metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmscslgroup/sparkle",
"score": 3
} |
#### File: sparkle/control/carfollowing.py
```python
import rospy
import sys, getopt
import numpy as np
import glob
import os
from geometry_msgs.msg import Twist, Pose, Point, Vector3
from std_msgs.msg import Float64
from nav_msgs.msg import Odometry
class carfollowing(object):
"""
CAR following constructor
Parameters
----------
thisvehicle: `string`, Name of the ego vehicle
leadervehicle: `string`, Name of the leader vehicle
Returns
--------
Nothing
"""
def __init__(self, thisvehicle, leadervehicle, **kwargs):
self.counter = 1.0 # for print log message at the time of publishing, currently we publish every 1000 steps or so.
self.thisvehicle = thisvehicle
self.leadervehicle= leadervehicle
self.carfollowingmodel = kwargs.get("carfollowingmodel", None)
rospy.init_node("car_following", anonymous=True)
print("carfollowing node initialized with following vehicle [{}] and leader vehicle [{}]".format(thisvehicle, leadervehicle))
self.FirstTime = rospy.Time.now() # Capture time when this node is initialized.
print("Time just after initialization is {}".format(self.FirstTime))
self.init = False
self.firstMessage = {"leadervel": False, "vel": False, "distance": False, "leaderaccel": False, "accel": False, "leaderodom": False, "odom": False}
# Get all rosparams
self.useSensorDistance = rospy.get_param('useSensorDistance', False)
self.leaderposX = rospy.get_param('leaderX_init', 0.0)
self.leaderposY = rospy.get_param('leaderY_init', 0.0)
self.posX = rospy.get_param('X_init', 10.0)
self.posY = rospy.get_param('Y_init', 10.0)
self.leaderaccel = rospy.get_param('leaderaccel_init',0.0) #initiliaze leader's acceleration
self.accel = rospy.get_param('accel_init',0.0) # # initialize the vehicle's acceleration
self.str_angle = rospy.get_param('str_angle', 0.0) #steering angle
#print(self.leaderposY)
#print(self.posY)
if self.useSensorDistance:
self.distance = rospy.get_param('distance_init', 9.45) #initialize the distance between the vehicle and its leader
else:
self.distance = np.sqrt( (self.leaderposX - self.posX)**2 + (self.leaderposY - self.posY)**2 )
#print("self.distance: {}".format(self.distance))
V_init = self.V(self.distance)
self.leadervel = 0.0 #V_init#initialize leader's velocity
self.vel = 0.0 #V_init #initialize velocity of this vehicle
# Initialize Velocity Message
self.vel_msg = Twist()
self.vel_msg.linear.x = self.leadervel
self.vel_msg.linear.y = 0.0
self.vel_msg.linear.z = 0.0
self.vel_msg.angular.x = 0.0
self.vel_msg.angular.y = 0.0
self.vel_msg.angular.z = self.str_angle
# Initialize Acceleration Message
self.accel_msg = Float64()
self.accel_msg.data = 0.0
# Initialize local variables
leadervel = Twist()
vel =Twist()
distance = Float64()
leaderodom = Odometry()
odom = Odometry()
leaderaccel = Vector3()
accel = Vector3()
odom.pose.pose.position.x = self.posX
odom.pose.pose.position.y = self.posY
leaderodom.pose.pose.position.x = self.leaderposX
leaderodom.pose.pose.position.y = self.leaderposY
leadervel.linear.x = self.leadervel
vel.linear.x = self.vel
distance.data = self.distance
leaderaccel.x = self.leaderaccel
accel.x = self.accel
self.agentdata = {"leadervel": leadervel , "vel": vel,
"distance": distance, "leaderaccel": leaderaccel,
"accel": accel, "odom": odom ,
"leaderodom": leaderodom}
# Create Publisher
self.pubvel = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.pubaccel = rospy.Publisher('cmd_accel', Float64, queue_size=1)
## Create all subscribers
config = ("leadervel", self.carfollowingmodel)
rospy.Subscriber('leadervel', Twist, self.callback, config)
config = ("vel", self.carfollowingmodel)
rospy.Subscriber('vel', Twist, self.callback, config)
config = ("leaderodom", self.carfollowingmodel)
rospy.Subscriber('leaderodom', Odometry, self.callback, config)
config = ("odom", self.carfollowingmodel)
rospy.Subscriber('odom', Odometry, self.callback, config)
config = ("distance", self.carfollowingmodel)
rospy.Subscriber('distance', Float64, self.callback, config)
config = ("leaderaccel", self.carfollowingmodel)
rospy.Subscriber('leaderaccel', Point, self.callback, config)
config = ("accel", self.carfollowingmodel)
rospy.Subscriber('accel',Point, self.callback, config)
# We also want to publish immediately after receiver a new data point
self.publishNow = False
def callback(self, data, args):
"""
Callback function to receive data from its subscriber and parse it and store in the class's dictionary variable
Parameters
------------
data: `obj`, data sent by the susbscriber - type is determined during runtime
type: `string`, expected type in string literal
agent: `string`, expected agent who will be sending the data
"""
agent = args[0]
carfollowingmodel = args[1]
#print("carfollowingmodel: {}".format(carfollowingmodel))
if 'oldTime' not in self.__dict__:
self.oldTime = rospy.Time.now()
return
newTime = rospy.Time.now()
deltaT = newTime - self.oldTime
deltaT = deltaT.to_sec()
#print("callback deltaT: {}".format(deltaT))
net_deltaT = newTime - self.FirstTime
net_deltaT = net_deltaT.to_sec()
self.oldTime = newTime
self.agentdata[agent] = data
#print("\n-----------")
#print("Agent is {}".format(agent))
#print("Data: {}".format(data))
#print("Args: {}".format(args))
if self.useSensorDistance:
self.distance = self.agentdata["distance"]
else:
self.distance = np.sqrt( (self.agentdata["leaderodom"].pose.pose.position.x - self.agentdata["odom"].pose.pose.position.x)**2 + (self.agentdata["leaderodom"].pose.pose.position.y - self.agentdata["odom"].pose.pose.position.y)**2 )
self.agentdata['distance'].data = self.distance
self.firstMessage[agent] = True
# if all true in first message dictionary, then set self init to true
if all(x is True for x in self.firstMessage.values()) and not self.init:
#print("self.init: {}".format(self.init))
#print('deltaT: {}'.format(deltaT))
self.init = True
return
if carfollowingmodel is None:
newvelocity, newaccel = self.cf_algorithm(self.L2Norm(self.agentdata['leadervel'].linear), self.L2Norm(self.agentdata['leaderaccel']),
self.L2Norm(self.agentdata['vel'].linear), self.L2Norm(self.agentdata['accel']),
self.agentdata['distance'].data, deltaT)
else:
newvelocity, newaccel = carfollowingmodel(self.L2Norm(self.agentdata['leadervel'].linear),
self.L2Norm(self.agentdata['leaderaccel']),
self.L2Norm(self.agentdata['vel'].linear),
self.L2Norm(self.agentdata['accel']),
self.agentdata['distance'].data, deltaT)
self.vel_msg.linear.x = newvelocity
self.vel_msg.linear.y = 0.0
self.vel_msg.linear.z = 0.0
self.vel_msg.angular.x = 0.0
self.vel_msg.angular.y = 0.0
self.vel_msg.angular.z = self.str_angle
self.accel_msg.data = newaccel
# We always publish right away
self.publishNow =True
@staticmethod
def L2Norm(vector3data):
"""
Calculates the L2Norm of geometry_msgs Vector3
Parameters
-------------
vector3data: `geometry_msgs/Vector3`, 3D Vector data whose L2 Norm is to be calculated
Returns
-----------
L2 norm of the 3D vector data
"""
magnitude = np.sqrt(vector3data.x**2 + vector3data.y**2 + vector3data.z**2)
return magnitude
def publish(self):
if self.publishNow:
self.counter = self.counter + 1
if self.counter % 100== 0:
#print("PublishNow:{}".format(self.publishNow))
rospy.loginfo(rospy.get_caller_id() + " publishing new velocity {0} m/s.".format(self.vel_msg.linear.x))
self.pubvel.publish(self.vel_msg)
self.pubaccel.publish(self.accel_msg)
# after we publish, we ensure to wait until a new odom point arrives
self.publishNow = False
@staticmethod
def follower_stopper(leadervel, leaderaccel, vel, accel, distance, deltaT, **kwargs):
"""
Implements follower stopper model
Parameters
-------------
leadervel: `double` , leader's velocity at current time step
leaderaccel: `double`, leader's acceleration at current time step
vel: `double`, current vehicle's velocity at the current time step
accel: `double`, current vehicle's acceleration at the current time step
distance: `double`, distance bwteen leader vehicle and current vehicle at the current time step
deltaT: `double`, timestep
kwargs: `dictionary`, variable argument dictionary
Returns
--------
Car following model returns new commanded velocity for the next time step as per followerstopper
"""
dx1 = 4.5
dx3 = 6.0
dx2 = (dx1 + dx3)/2.0
a1 = 1.5
a3 = 0.5
a2 = (a1 + a3)/2.0
cmd_vel = 0.0
r = 6.5 # m/s
v = np.min(np.max(leadervel, 0), r)
if distance <= dx1:
cmd_vel = 0.0
elif (distance <=dx2) and (distance >=dx1):
cmd_vel = v*((distance - dx1)/(dx2 - dx1))
elif(distance <=dx3) and (distance >=dx2):
cmd_vel = v + (r-v)*((distance - dx2)/(dx3 - dx2))
elif distance > dx3:
cmd_vel = r
def cf_algorithm(self, leadervel, leaderaccel, vel, accel, distance, deltaT, **kwargs):
"""
Implements a car following model
Parameters
-------------
leadervel: `double` , leader's velocity at current time step
leaderaccel: `double`, leader's acceleration at current time step
vel: `double`, current vehicle's velocity at the current time step
accel: `double`, current vehicle's acceleration at the current time step
distance: `double`, distance bwteen leader vehicle and current vehicle at the current time step
deltaT: `double`, timestep
kwargs: `dictionary`, variable argument dictionary
Returns
--------
Car following model returns new commanded velocity for the next time step
"""
b = 5.0 # [m^2/s] ,follow-the-leader strength
a = 0.5 # [1/s], optimal velocity strength
nu = 2.0 #power of distance in denominator of follow-the-leader term
#print("cf_algorithm leadervel: {}".format(leadervel))
#print("cf_algorithm vel: {}".format(vel))
#print("cf_algorithm distance: {}".format(distance))
accel = (b*(leadervel - vel)/(distance**nu)) + a*(self.V(distance) - vel)
#print("cf_algorithm new acceleration: {}".format(accel))
# print("cf_algorithm deltaT: {}".format(deltaT))
#deltaT = 0.01
deltav = accel*deltaT
newvelocity = vel + deltav
#print("cf_algorithm New Velocity : {}".format(newvelocity))
return newvelocity, accel
@staticmethod
def V(distance):
"""
Optimal Velocity Function
Parameters
-------------
distance: `double`, current distance between the current vehicle and the leader vehicle.
Returns
----------
Function returns the optimal Velocity
"""
#print("V Function distance", distance)
d0 = 5.00 # [m] reference vehicle distance
Vm = 15/3.6 # [m/s] maximum velocity for optimal velocity functios
V = Vm * (np.tanh((distance/d0) - 2) + np.tanh(2))/(1 + np.tanh(2))
return V
def usage():
print("carfollowing -t nebula -l magna -r 50.0")
def main(argv):
#print("Main of carfollowing")
thisvehicle='nebula'
leadervehicle='magna'
rate=20.0
print('Argv: {}'.format(argv))
try:
opts, args = getopt.getopt(argv, "l:r:t:", ["leadervehicle", "rate", "thisvehicle"])
except getopt.GetoptError:
usage()
exit.sys()
for opt, arg in opts:
if opt in ("-t", "--thisvehicle"):
thisvehicle=arg
elif opt in ("-l", "--leadervehicle"):
leadervehicle=arg
print('arg={}'.format(arg))
elif opt in ("-r", "-rate"):
rate=arg
else:
usage()
exit.sys()
print("This Vehicle is {}.\nLeader Vehicle is {}\n".format(thisvehicle, leadervehicle))
node = carfollowing(thisvehicle, leadervehicle)
rosrate = rospy.Rate(rate)
while not rospy.is_shutdown():
if rospy.get_param("/execute", False):
#print("Sending velocity {} m/s to {}".format(node.vel_msg.linear.x, node.thisvehicle))
node.publish()
rosrate.sleep()
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: src/sparkle/gazebo_rtf.py
```python
import sys
import rospy
import csv
import time
import datetime
import socket
from os.path import expanduser
from gazebo_msgs.srv import GetPhysicsProperties
from gazebo_msgs.srv import SetPhysicsProperties
class saveparam:
def __init__(self, ns):
"""
"""
pass
def main(argv):
rospy.wait_for_service('gazebo/get_physics_properties')
rospy.wait_for_service('gazebo/set_physics_properties')
max_update_rate = float(argv[0])
time_step = float(argv[1])
get_physics_properties_prox = rospy.ServiceProxy('gazebo/get_physics_properties', GetPhysicsProperties)
physics_properties = get_physics_properties_prox()
time.sleep(4)
get_physics_properties_prox = rospy.ServiceProxy('gazebo/get_physics_properties', GetPhysicsProperties)
physics_properties = get_physics_properties_prox()
print("Current max_update_rate is {}".format( physics_properties.max_update_rate))
while(physics_properties.max_update_rate != max_update_rate):
print("Max Update Rate was not set properly, terminating simulation. Please restart the simulation.")
physics_properties.max_update_rate = max_update_rate
physics_properties.time_step = time_step
set_physics_properties_prox = rospy.ServiceProxy('gazebo/set_physics_properties', SetPhysicsProperties)
set_physics_properties_prox(physics_properties.time_step,
physics_properties.max_update_rate,
physics_properties.gravity,
physics_properties.ode_config)
get_physics_properties_prox = rospy.ServiceProxy('gazebo/get_physics_properties', GetPhysicsProperties)
physics_properties = get_physics_properties_prox()
print("New max_update_rate is {}".format( physics_properties.max_update_rate))
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: sparkle/launch/launch.py
```python
import roslaunch
import time
""" This class is an utility class for creating and launching roslaunch, as well as terminating it """
'''
Summary of Class layout:
This class requires a ros package 'Sparkle'
Attributes:
1. launchfile: The full path of the launch file
2. theta: angular separation of two consecutive vehicle on the circle
and all attributes of super class
Functions:
1. __init__(circumference, n_vehicles): basically a constructor
'''
class launch:
'''
`launch`:A class facilitating roslaunch with runtime arguments and termination
Parameters
-------------
launchfile: `string`
Full path of the roslaunch file to be launch. Must be string.
kwargs
variable keyword arguments passed as run-time argument for the launch file
Attributes
------------
launchfile: `string`
Full path of the roslaunch file to be launch. Must be string.
runtime_args: `list`
A list of run time arguments to be passed to launch file execution
uuid: `string`
Unique Identifier for the launch
parent: `roslaunch.parent.ROSLaunchParents`
Length of car from bumper to bumper
See Also
---------
layout: superclass of `lane`
'''
def __init__(self, launchfile, **kwargs):
self.launchfile = launchfile
try:
roslaunch.rlutil.resolve_launch_arguments([self.launchfile])
except roslaunch.RLException:
print("Unable to find {}".format(self.launchfile))
self.runtime_args = []
self.uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
print(self.uuid)
roslaunch.configure_logging(self.uuid)
for key in kwargs.keys():
self.runtime_args.append("{}:={}".format(key, kwargs[key]))
if len(self.runtime_args) > 0:
self.parent = roslaunch.parent.ROSLaunchParent(self.uuid, [(self.launchfile, self.runtime_args)])
else:
self.parent = roslaunch.parent.ROSLaunchParent(self.uuid,[self.launchfile])
def start(self):
'''
Calls `start()` function to execute roslaunch
'''
self.parent.start()
time.sleep(5)
if len(self.runtime_args) > 0:
print("{} started with run-time arguments {}".format(self.launchfile, self.runtime_args))
else:
print("{} started.".format(self.launchfile))
def shutdown(self):
'''
Calls `shutdown()` function to terminate the execution of the launch file
'''
self.parent.shutdown()
print("{} Terminated.".format(self.launchfile))
```
#### File: sparkle/log/gzstats.py
```python
import signal
import pandas as pd
import sys, math, time, datetime
import matplotlib.pyplot as pt
import matplotlib.animation as animation
from matplotlib import style
import numpy as np
from matplotlib.pyplot import cm
import pickle
import bagpy
import seaborn as sea
class gzstats(object):
'''
__init__ takes file name of gz stats dump (either relative path or full path)
'''
def __init__(self, statfile='Circle_Test_n_20_updateRate_1_2019-12-02-13-13-35_gzStats.txt'):
self.statfile = statfile
dataframe = pd.read_csv(self.statfile, sep=' ' , header=None, names=["Factor", "SimTime", "RealTime", "SimStatus"])
# delete the last two rows, they may be incomplete
dataframe.drop(dataframe.tail(2).index,inplace=True)
df = dataframe.replace(r'\w+\[([\S\.]+)\]', r'\1', regex=True)
df['Factor'] = df['Factor'].astype('float')
df['SimTime'] = df['SimTime'].astype('float')
df['RealTime'] = df['RealTime'].astype('float')
df['SimStatus'] = df['SimStatus'].apply(lambda x: self._status(x))
self.dataframe = df
# Mean Real Time Factor throught the simulation
self.rtf_avg = round(np.mean(self.dataframe['Factor']), 5)
# Mean Standard Deviation of RTF throughout the Simulation
self.rtf_std = round(np.std(self.dataframe['Factor']), 5)
# Pause Ratio is the percentage of the time when Simulation stayed pause
self.pause_ratio = round(self._calcPausePercentage(self.dataframe['SimStatus']), 5)
'''
Private function to use in Sim Status Lambda
'''
def _status(self, x):
if(x == 'F'):
x = 0.0
elif(x == 'T'):
x = 1.0
else:
x = None
return x
'''
Private function to calculate pause percentage
'''
def _calcPausePercentage(self, data):
return (sum(data)/len(data))
def plotRTF(self, save=True):
SimTime = self.dataframe['SimTime']
RealTime = self.dataframe['RealTime']
Factor = self.dataframe['Factor']
pt.style.use('seaborn')
pt.rcParams["figure.figsize"] = (18,10)
pt.rcParams[ 'font.family'] = 'Roboto'
pt.rcParams[ 'font.weight'] = 'bold'
params = {'legend.fontsize': 12, 'legend.handlelength': 2, 'legend.loc': 'upper right'}
pt.rcParams.update(params)
fig, (ax1, ax2) = pt.subplots(2, 1)
# Change the color and its transparency
ax1.fill_between( SimTime, Factor, color="skyblue", alpha=0.2)
ax1.plot(SimTime, Factor, color="Slateblue", alpha=0.6, linestyle='-', linewidth='1', marker='.', markersize = 3)
ax1.set_axisbelow(True)
ax1.minorticks_on()
ax1.tick_params(axis="x", labelsize=14)
ax1.tick_params(axis="y", labelsize=14)
pt.grid(True)
ax1.set_xlabel('Sim Time', fontsize=14)
ax1.set_ylabel('Real Time Factor', fontsize=14)
ax1.legend(['Real time factor average: ' + str(self.rtf_avg) + ', std: ' + str(self.rtf_std)])
ax1.set_title( self.statfile[0:-4]+ "\n " + "Real Time Factor vs Sim Time", fontsize=10)
# Change the color and its transparency
ax2.fill_between( RealTime, Factor, color="lightcoral", alpha=0.2)
ax2.plot(RealTime, Factor, color="crimson", alpha=0.6, linestyle='-', linewidth='1', marker='.', markersize = 3)
ax2.set_axisbelow(True)
ax2.minorticks_on()
ax2.tick_params(axis="x", labelsize=14)
ax2.tick_params(axis="y", labelsize=14)
ax2.set_xlabel('Real Time', fontsize=14)
ax2.set_ylabel('Real Time Factor', fontsize=14)
ax2.legend(['Real time factor average: ' + str(self.rtf_avg) + ', std: ' + str(self.rtf_std)])
ax2.set_title( self.statfile[0:-4]+ "\n" + "Real Time Factor vs Real Time", fontsize=10)
pt.tight_layout()
if(save== True):
current_fig = pt.gcf()
fileToSave = self.statfile[0:-4]
pickle.dump(fig,file(fileToSave + "_RTF.pickle",'w'))
current_fig.savefig(fileToSave + "_RTF.pdf", dpi = 300)
pt.show()
def plotSimStatus(self, save=True):
SimTime = self.dataframe['SimTime']
RealTime = self.dataframe['RealTime']
SimStatus = self.dataframe['SimStatus']
pt.style.use('seaborn')
pt.rcParams["figure.figsize"] = (18,10)
params = {'legend.fontsize': 16, 'legend.handlelength': 2, 'legend.loc': 'upper left'}
pt.rcParams.update(params)
fig, (ax1, ax2) = pt.subplots(2, 1)
# Change the color and its transparency
ax1.plot(SimTime, SimStatus, color="Slateblue", alpha=0.6, linestyle='-', linewidth='0.5', marker='.', markersize = 10)
ax1.set_axisbelow(True)
ax1.minorticks_on()
ax1.tick_params(axis="x", labelsize=14)
ax1.tick_params(axis="y", labelsize=14)
pt.grid(True)
ax1.set_xlabel('Sim Time', fontsize=14)
ax1.set_ylabel('Gazebo Sim Pause Status', fontsize=16)
ax1.legend(['Pause percentage: ' + str(self.pause_ratio*100)+ '%'])
ax1.set_title( self.statfile[0:-4]+ "\n" + "Sim Status (Pause/Unpause) vs Sim Time")
# Change the color and its transparency
ax2.plot(RealTime, SimStatus, color="crimson", alpha=0.6, linestyle='-', linewidth='0.5', marker='.', markersize = 10)
ax2.set_axisbelow(True)
ax2.minorticks_on()
ax2.tick_params(axis="x", labelsize=14)
ax2.tick_params(axis="y", labelsize=14)
ax2.set_xlabel('Real Time', fontsize=14)
ax2.set_ylabel('Gazebo Sim Pause Status', fontsize=14)
ax2.legend(['Pause percentage: ' + str(self.pause_ratio*100) + '%'])
ax2.set_title( self.statfile[0:-4]+ "\n" + "Sim Status (Pause/Unpause) vs Real Time")
if(save== True):
current_fig = pt.gcf()
fileToSave = self.statfile[0:-4]
pickle.dump(fig,file(fileToSave + "SimStatus.pickle",'w'))
current_fig.savefig(fileToSave + "_SimStatus.pdf", dpi = 300)
# pt.show()
```
#### File: sparkle/log/rosbag_record.py
```python
import rospy
import subprocess
import os
import signal
class RosbagRecord:
def __init__(self):
rospy.init_node('bagrecorder')
rospy.loginfo(rospy.get_name() + ' start')
if rospy.has_param('~record_script'):
self.record_script = rospy.get_param('~record_script')
rospy.on_shutdown(self.stop_recording_handler)
# Start recording.
command = "source " + self.record_script
self.p = subprocess.Popen(command, stdin=subprocess.PIPE, shell=True, cwd=self.record_folder,
executable='/bin/bash')
# Wait for shutdown signal to close rosbag record
rospy.spin()
else:
rospy.signal_shutdown(rospy.get_name() + ' no record script or folder specified.')
def terminate_ros_node(self, s):
# Adapted from http://answers.ros.org/question/10714/start-and-stop-rosbag-within-a-python-script/
list_cmd = subprocess.Popen("rosnode list", shell=True, stdout=subprocess.PIPE)
list_output = list_cmd.stdout.read()
retcode = list_cmd.wait()
assert retcode == 0, "List command returned %d" % retcode
for str in list_output.split("\n"):
if (str.startswith(s)):
os.system("rosnode kill " + str)
def stop_recording_handler(self):
rospy.loginfo(rospy.get_name() + ' stop recording.')
self.terminate_ros_node("/bagrecorder")
``` |
{
"source": "jmsegrev/django-class-fixtures",
"score": 2
} |
#### File: class_fixtures/tests/models.py
```python
from django.db import models
# For relationless, M2M, M2M w/through model and concrete inheritance testing
class Band(models.Model):
name = models.CharField(max_length=255)
class MetalBand(Band):
leather_pants_worn = models.BooleanField(default=True)
class Musician(models.Model):
name = models.CharField(max_length=100)
member_of = models.ManyToManyField(Band, through='Membership')
class Membership(models.Model):
musician = models.ForeignKey(Musician)
band = models.ForeignKey(Band)
date_joined = models.DateField(null=True)
instrument = models.CharField(max_length=100)
class Roadie(models.Model):
name = models.CharField(max_length=100)
hauls_for = models.ManyToManyField(Band)
# For normal FK, FK to self and non-inheritance OneToOneField testing
class Company(models.Model):
name = models.CharField(max_length=100)
class Employee(models.Model):
name = models.CharField(max_length=100)
company = models.ForeignKey(Company)
manager = models.ForeignKey('self', null=True)
cog_in_the_machine = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if ' corp' in self.company.name.lower():
self.cog_in_the_machine = True
super(Employee, self).save(*args, **kwargs)
class EmployeeHistory(models.Model):
employee = models.OneToOneField(Employee)
date_joined = models.DateField()
# For natural key testing
class CompetencyManager(models.Manager):
def get_by_natural_key(self, framework, level):
return self.get(framework=framework, level=level)
class Competency(models.Model):
LEVEL_CHOICES = (
(0, "None"),
(1, "Beginner"),
(2, "Intermediate"),
(3, "Advanced"),
(4, "Guru"),
)
framework = models.CharField(max_length=100)
level = models.SmallIntegerField(choices=LEVEL_CHOICES)
objects = CompetencyManager()
def natural_key(self):
return (self.framework, self.level)
class Meta(object):
unique_together = (('framework', 'level'))
class JobPosting(models.Model):
title = models.CharField(max_length=100)
main_competency = models.ForeignKey(Competency, related_name='main_competency_for')
additional_competencies = models.ManyToManyField(Competency, related_name='extra_competency_for')
# For initial data only, to prevent messing up object counts for other models
# in tests. Due to custom routing in tests.runtests.AlternateDBTestRouter, the
# schemas for these models will not be synched to the "alternate" database.
class Party(models.Model):
name = models.CharField(max_length=100)
class Politician(models.Model):
name = models.CharField(max_length=100)
party = models.ForeignKey(Party)
minimum_bribe = models.DecimalField(max_digits=10, decimal_places=2, null=True)
# For testing dumpdata with a complex model. Skip some fields whose serialized
# representation is identical to that of others, e.g. FileField == CharField.
class ComprehensiveModel(models.Model):
bigint = models.BigIntegerField()
boolean = models.BooleanField()
char = models.CharField(max_length=255)
date = models.DateField()
datetime = models.DateTimeField()
decimal = models.DecimalField(max_digits=6, decimal_places=2)
floatf = models.FloatField()
integer = models.IntegerField()
nullboolean = models.NullBooleanField()
text = models.TextField()
time = models.TimeField()
``` |
{
"source": "jmservera/hassio-mqtt-proxy",
"score": 2
} |
#### File: hassio-mqtt-proxy/tests/common.py
```python
import os
def get_fixture_path(filename):
return os.path.join(os.path.dirname(__file__), "fixtures", filename)
``` |
{
"source": "jmservera/TwitterAnalyticsLambda",
"score": 2
} |
#### File: TwitterAnalyticsLambda/TweetIngest/send.py
```python
import sys
import logging
import datetime
import time
import os
import enhancedjsonencoder
from azure.eventhub import EventHubClient, Sender, EventData
from telemetry import Telemetry
class EventHubSender(object):
def __init__(self, connectionString):
print("Initiating EventHubSender with "+connectionString)
if not connectionString:
raise ValueError("No EventHubs URL supplied.")
self.address=connectionString
self.client=EventHubClient.from_connection_string(connectionString)
self.sender=self.client.add_sender()
self.client.run()
self.encoder=enhancedjsonencoder.EnhancedJSONEncoder()
def __del__(self):
self.sender.close()
def close(self):
self.__del__()
def sendTweet(self,tweet):
try:
tweetjson= self.encoder.encode(tweet)
self.sender.send(EventData(tweetjson))
Telemetry.IncrementMessages()
except:
raise
```
#### File: TwitterAnalyticsLambda/TweetIngest/telemetry.py
```python
class __telemetry(object):
def __init__(self):
self._receivedTweets=0
self._sentMessages=0
@property
def ReceivedTweets(self):
return self._receivedTweets
def IncrementTweets(self):
self._receivedTweets=self._receivedTweets+1
return self._receivedTweets
@property
def SentMessages(self):
return self._sentMessages
def IncrementMessages(self):
self._sentMessages=self._sentMessages+1
return self._sentMessages
Telemetry=__telemetry()
``` |
{
"source": "jmsevillam/Herramientas-Computacionales-UniAndes",
"score": 3
} |
#### File: Hw4/Solution/problem2.py
```python
def fac(n):
if n==1 or n==0:
return 1
else:
return n*fac(n-1)
print(fac(10))
```
#### File: Hw4/Solution/problem5a.py
```python
def decode(word1,word2,code):
if len(word1)==1:
code+=word1+word2
return code
else:
code+=word1[0]+word2[0]
return decode(word1[1:],word2[1:],code)
Alice='Ti rga eoe esg o h ore"ermetsCmuainls'
Bob='hspormdcdsamsaefrte<NAME>ae"'
print(decode(Alice,Bob,''))
```
#### File: Hw4/Solution/problem5b.py
```python
def decode(word1,word2):
word=''
for i in range(len(word1)):
word+=word1[i]+word2[i]
return word
Alice='Ti rga eoe esg o h ore"ermetsCmuainls'
Bob='hspormdcdsamsaefrtecus Hraina optcoae"'
print(decode(Alice,Bob))
```
#### File: Hw5/Solution/problem1.py
```python
class Dog:
def __init__(self,name,posx,posy):
self.name=name
self.posx=posx
self.posy=posy
self.awaken=False
self.hungry=False
self.counter=0
def awake(self):
if self.awaken:
print(self.name+' is already awaken')
else:
self.awaken=True
print(self.name+' is no longer slept')
def move(self,x1,y1):
if self.hungry:
print(self.name+' is hungry')
elif self.awaken:
self.posx+=x1
self.posy+=y1
self.counter+=1
else:
print(self.name+' is slept')
if self.counter>=3:
self.hungry=True
def feed(self):
self.counter=0
self.hungry=False
print(self.name+' is no longer hungry')
MyDog=Dog('Lambda',0,0)
print(MyDog.posx,MyDog.posy)
MyDog.move(1,1)
MyDog.awake()
MyDog.move(1,0)
print(MyDog.posx,MyDog.posy)
MyDog.move(0,1)
print(MyDog.posx,MyDog.posy)
MyDog.move(1,1)
print(MyDog.posx,MyDog.posy)
MyDog.move(1,1)
print(MyDog.posx,MyDog.posy)
MyDog.feed()
MyDog.move(1,0)
print(MyDog.posx,MyDog.posy)
```
#### File: Hw5/Solution/problem2.py
```python
import random
class vehicle:
def __init__(self,Color,Wheels,MaxVel):
self.Color=Color
self.Wheels=Wheels
self.VMax=MaxVel
def Move(self):
v=random.random()*self.VMax
print('The velocity is: '+str(v))
def Park(self):
print('The vehicle is parked')
class Bicycle(vehicle):
def __init__(self,Color,Wheels,MaxVel):
vehicle.__init__(self,Color,Wheels,MaxVel)
def Do_some_exercise(self):
print('Doing exercise')
class Motorcycle(vehicle):
def __init__(self,Color,Wheels,MaxVel):
vehicle.__init__(self,Color,Wheels,MaxVel)
def Put_Helmet(self):
print("Helmet's on")
class Car(vehicle):
def __init__(self,Color,Wheels,MaxVel):
vehicle.__init__(self,Color,Wheels,MaxVel)
def Turn_on_Radio(self):
print("Radio's on")
b=Bicycle('Red',2,10)
print(b.Color)
b.Do_some_exercise()
b.Move()
b.Park()
m=Motorcycle('black',2,10)
print(m.Color)
m.Put_Helmet()
m.Move()
m.Park()
c=Car('gray',2,10)
print(c.Color)
c.Turn_on_Radio()
c.Move()
c.Park()
``` |
{
"source": "jmsevillam/Intro_to_Computational_Physics",
"score": 3
} |
#### File: Maps/Standard Map/Standard Map.py
```python
import numpy as np
import matplotlib.pylab as plt
def std_map(theta,p,K):
p=((p+K*np.sin(theta)))
theta=(theta+p)%(2.0*np.pi)
return theta,p
N=2000
for K in np.linspace(0,20,51):
print(int(K*100))
theta=np.random.random(N)*2*np.pi
p=2.*np.random.random(N)-1
for t in range(1000):
theta,p=std_map(theta,p,K*0.3)
if K%1==0:
plt.plot(theta,(p+np.pi)%(2.*np.pi),'.',color='k',markersize=0.01)
if K%1==0:
plt.savefig(str(int(K*100)).zfill(4)+'.png')
plt.close()
```
#### File: Strange Attractors/Lorentz System/lorentz.py
```python
import numpy as np
class Lorentz:
"""This is the Lorentz Class"""
def __init__(self,x,y,z,sigma,beta,rho):
self.x=np.array([x])
self.y=np.array([y])
self.z=np.array([z])
self.sigma=sigma
self.beta=beta
self.rho=rho
def time_step(self,dt):
"""This calculate the next step"""
x1=self.x[-1]
y1=self.y[-1]
z1=self.z[-1]
dx=self.sigma*(y1-x1)
dy=x1*(self.rho-z1)-y1
dz=x1*y1-self.beta*z1
self.x=np.append(self.x,x1+dx*dt)
self.y=np.append(self.y,y1+dy*dt)
self.z=np.append(self.z,z1+dz*dt)
```
#### File: Strange Attractors/Lorentz System/main_lorentz.py
```python
import numpy as np
import matplotlib.pylab as plt
from scipy.optimize import curve_fit
from mpl_toolkits.mplot3d import Axes3D
import lorentz as lo
sis1=lo.Lorentz(1.0,1.0,1.0,10.0,8.0/3.0,28.0)
sis2=lo.Lorentz(1.1,1.0,1.0,10.0,8.0/3.0,28.0)
time=np.array([0])
for i in range(10000):
sis1.time_step(0.01)
sis2.time_step(0.01)
time=np.append(time,(i+1)*0.01)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(sis1.x,sis1.y,sis1.z,'ok',markersize=.1)
ax.plot(sis2.x,sis2.y,sis2.z,'or',markersize=.1)
plt.show()
dis=np.sqrt((sis1.x-sis2.x)**2+(sis1.y-sis2.y)**2+(sis1.z-sis2.z)**2)
def f(x,a,b):
return a*np.exp(b*x)
popt,pcov=curve_fit(f,time[300:800],dis[300:800])
plt.plot(time,dis)
plt.plot(time,f(time,*popt))
plt.yscale('log')
plt.show()
```
#### File: Electrodynamics/Poisson/2D_Poisson.py
```python
import numpy as np
import matplotlib.pylab as plt
Tolerance=0.
size=200
positive_charges=8
negative_charges=7
x_pos=np.random.randint(.05*size,.95*size,positive_charges)
y_pos=np.random.randint(.05*size,.95*size,positive_charges)
x_neg=np.random.randint(.05*size,.95*size,negative_charges)
y_neg=np.random.randint(.05*size,.95*size,negative_charges)
def geometry(mat):
mat[:,0]=0
mat[:,-1]=0
mat[0,:]=0
mat[-1,:]=0
mat[x_pos,y_pos]=1
mat[x_neg,y_neg]=-1
def time_step(mat):
copia=np.zeros(mat.shape)
copia[1:-1,1:-1]=(mat[2:,1:-1]+mat[0:-2,1:-1]+mat[1:-1,2:]+mat[1:-1,:-2])/4.
geometry(copia)
diff=(np.sqrt((mat-copia)**2).sum()/(100*100))
return diff,copia
mat=2*np.random.random((size,size))-1
for t in range(1000000):
diff,mat=time_step(mat)
if t%5000==0:
print(t,diff)
if diff<=Tolerance:
print(t,diff)
break
pl=plt.imshow(mat,cmap='RdBu',vmin=-1,vmax=1)
plt.colorbar(pl)
plt.show()
``` |
{
"source": "jmsevillam/LearningML",
"score": 4
} |
#### File: projects/Chess/Chess.py
```python
import numpy as np
import matplotlib.pylab as plt
def possible(x,y,board):
if x<0 or y<0 or x>=N or y>=M:
return False
elif x+y*N in board:
return False
else:
return True
def move(x,y,board):
option={1:[1,-2],
2:[-1,-2],
3:[-2,1],
4:[-2,-1],
5:[-1,2],
6:[1,2],
7:[2,-1],
8:[2,1]}
i=0
while True:
i+=1
index=np.random.randint(1,9)
dx=x+option[index][0]
dy=y+option[index][1]
if possible(dx,dy,board):
break
elif i>20:
board=[]
board.append(dx+N*dy)
return dx,dy,board
def print_board(board,name):
x=np.zeros(N*M)
for i in board:
x[i]=1.
x[board[-1]]=2
x_plot=x.reshape((N,M))
plt.imshow(x_plot)
plt.savefig(name)
plt.close()
N=8
M=8
x,y,board=0,0,[0]
name=0
while len(board)<N*M:
x,y,board=move(x,y,board)
#print(board,len(board))
print_board(board,"images/"+str(name).zfill(3))
name+=1
if name==100:
break
``` |
{
"source": "jmsfltchr/GraphSAGE",
"score": 3
} |
#### File: neighbour_traversal/test/test_neighbour_traversal.py
```python
import unittest
import grakn
from grakn.service.Session.Concept.Concept import Concept, Role
from grakn_graphsage.src.neighbour_traversal.neighbour_traversal import build_neighbourhood_generator, NeighbourRole, \
ConceptWithNeighbourhood, NEIGHBOUR_PLAYS, TARGET_PLAYS
class TestNeighbourTraversalFromEntity(unittest.TestCase):
def setUp(self):
self._client = grakn.Grakn(uri="localhost:48555")
self._session = self._client.session(keyspace="genealogy")
def tearDown(self):
self.tx.close()
def _assert_type_instances_correct(self, concept_with_neighbourhood):
self.assertTrue(isinstance(concept_with_neighbourhood, ConceptWithNeighbourhood))
self.assertTrue(isinstance(concept_with_neighbourhood.concept, Concept))
self.assertTrue(type(concept_with_neighbourhood.neighbourhood).__name__ in ('generator', 'chain'))
neighbour_role = next(concept_with_neighbourhood.neighbourhood, None)
if neighbour_role is not None:
self.assertTrue(isinstance(neighbour_role, NeighbourRole))
self.assertTrue(isinstance(neighbour_role.role, Role) or neighbour_role.role == 'UNKNOWN_ROLE')
self.assertIn(neighbour_role.target_or_neighbour_plays, [TARGET_PLAYS, NEIGHBOUR_PLAYS])
self.assertTrue(self._assert_type_instances_correct(neighbour_role.neighbour))
return True
def test_neighbour_traversal_structure(self):
self.tx = self._session.transaction(grakn.TxType.WRITE)
# concept = list(tx.query("match $x isa person, has firstname {}, has surname {}; get $x;".format("Jacob",
# "Young")))[0]
identifier = "<NAME>"
concept = list(self.tx.query("match $x isa person, has identifier '{}'; get $x;".format(identifier)))[0].get('x')
concept_with_neighbourhood = build_neighbourhood_generator(self.tx, concept, 2)
self._assert_type_instances_correct(concept_with_neighbourhood)
# neighbour_roles = [neighbour_role for neighbour_role in concept_with_neighbourhood.neighbourhood]
self.tx.close()
``` |
{
"source": "jmsfltchr/research",
"score": 2
} |
#### File: graph/thing/queries_to_graph.py
```python
import warnings
from functools import reduce
import networkx as nx
from kglib.utils.grakn.object.thing import build_thing
from kglib.utils.graph.thing.concept_dict_to_graph import concept_dict_to_graph
def concept_dict_from_concept_map(concept_map, tx):
"""
Given a concept map, build a dictionary of the variables present and the concepts they refer to, locally storing any
information required about those concepts.
Args:
concept_map: A dict of Concepts provided by Grakn keyed by query variables
Returns:
A dictionary of concepts keyed by query variables
"""
return {variable: build_thing(grakn_concept, tx) for variable, grakn_concept in concept_map.map().items()}
def combine_2_graphs(graph1, graph2):
"""
Combine two graphs into one. Do this by recognising common nodes between the two.
Args:
graph1: Graph to compare
graph2: Graph to compare
Returns:
Combined graph
"""
for node, data in graph1.nodes(data=True):
if graph2.has_node(node):
data2 = graph2.nodes[node]
if data2 != data:
raise ValueError((f'Found non-matching node properties for node {node} '
f'between graphs {graph1} and {graph2}:\n'
f'In graph {graph1}: {data}\n'
f'In graph {graph2}: {data2}'))
for sender, receiver, keys, data in graph1.edges(data=True, keys=True):
if graph2.has_edge(sender, receiver, keys):
data2 = graph2.edges[sender, receiver, keys]
if data2 != data:
raise ValueError((f'Found non-matching edge properties for edge {sender, receiver, keys} '
f'between graphs {graph1} and {graph2}:\n'
f'In graph {graph1}: {data}\n'
f'In graph {graph2}: {data2}'))
return nx.compose(graph1, graph2)
def combine_n_graphs(graphs_list):
"""
Combine N graphs into one. Do this by recognising common nodes between the two.
Args:
graphs_list: List of graphs to combine
Returns:
Combined graph
"""
return reduce(lambda x, y: combine_2_graphs(x, y), graphs_list)
def build_graph_from_queries(query_sampler_variable_graph_tuples, grakn_transaction,
concept_dict_converter=concept_dict_to_graph, infer=True):
"""
Builds a graph of Things, interconnected by roles (and *has*), from a set of queries and graphs representing those
queries (variable graphs)of those queries, over a Grakn transaction
Args:
infer: whether to use Grakn's inference engine
query_sampler_variable_graph_tuples: A list of tuples, each tuple containing a query, a sampling function,
and a variable_graph
grakn_transaction: A Grakn transaction
concept_dict_converter: The function to use to convert from concept_dicts to a Grakn model. This could be
a typical model or a mathematical model
Returns:
A networkx graph
"""
query_concept_graphs = []
for query, sampler, variable_graph in query_sampler_variable_graph_tuples:
concept_maps = sampler(grakn_transaction.query(query, infer=infer))
concept_dicts = [concept_dict_from_concept_map(concept_map, grakn_transaction) for concept_map in concept_maps]
answer_concept_graphs = []
for concept_dict in concept_dicts:
try:
answer_concept_graphs.append(concept_dict_converter(concept_dict, variable_graph))
except ValueError as e:
raise ValueError(str(e) + f'Encountered processing query:\n \"{query}\"')
if len(answer_concept_graphs) > 1:
query_concept_graph = combine_n_graphs(answer_concept_graphs)
query_concept_graphs.append(query_concept_graph)
else:
if len(answer_concept_graphs) > 0:
query_concept_graphs.append(answer_concept_graphs[0])
else:
warnings.warn(f'There were no results for query: \n\"{query}\"\nand so nothing will be added to the '
f'graph for this query')
if len(query_concept_graphs) == 0:
# Raise exception when none of the queries returned any results
raise RuntimeError(f'The graph from queries: {[query_sampler_variable_graph_tuple[0] for query_sampler_variable_graph_tuple in query_sampler_variable_graph_tuples]}\n'
f'could not be created, since none of these queries returned results')
concept_graph = combine_n_graphs(query_concept_graphs)
return concept_graph
``` |
{
"source": "jmsfltchr/typedb-client-python",
"score": 2
} |
#### File: concept/proto/concept_proto_reader.py
```python
import grakn_protocol.protobuf.concept_pb2 as concept_proto
from grakn.common.exception import GraknClientException
from grakn.concept.answer.concept_map import ConceptMap
from grakn.concept.thing.attribute import BooleanAttribute, LongAttribute, DoubleAttribute, StringAttribute, \
DateTimeAttribute
from grakn.concept.thing.entity import Entity
from grakn.concept.thing.relation import Relation
from grakn.concept.type.attribute_type import BooleanAttributeType, LongAttributeType, DoubleAttributeType, \
StringAttributeType, DateTimeAttributeType, AttributeType
from grakn.concept.type.entity_type import EntityType
from grakn.concept.type.relation_type import RelationType
from grakn.concept.type.role_type import RoleType
from grakn.concept.type.thing_type import ThingType
def iid(iid_proto: bytes):
return "0x" + iid_proto.hex()
def concept(con_proto: concept_proto.Concept):
if con_proto.HasField(ConceptMap._THING):
concept = thing(con_proto.thing)
else:
concept = type_(con_proto.type)
return concept
def thing(thing_proto: concept_proto.Thing):
if thing_proto.encoding == concept_proto.Thing.Encoding.Value("ENTITY"):
return Entity._of(thing_proto)
elif thing_proto.encoding == concept_proto.Thing.Encoding.Value("RELATION"):
return Relation._of(thing_proto)
elif thing_proto.encoding == concept_proto.Thing.Encoding.Value("ATTRIBUTE"):
return attribute(thing_proto)
else:
raise GraknClientException("The encoding " + thing_proto.encoding + " was not recognised.")
def attribute(thing_proto: concept_proto.Thing):
if thing_proto.value_type == concept_proto.AttributeType.ValueType.Value("BOOLEAN"):
return BooleanAttribute._of(thing_proto)
elif thing_proto.value_type == concept_proto.AttributeType.ValueType.Value("LONG"):
return LongAttribute._of(thing_proto)
elif thing_proto.value_type == concept_proto.AttributeType.ValueType.Value("DOUBLE"):
return DoubleAttribute._of(thing_proto)
elif thing_proto.value_type == concept_proto.AttributeType.ValueType.Value("STRING"):
return StringAttribute._of(thing_proto)
elif thing_proto.value_type == concept_proto.AttributeType.ValueType.Value("DATETIME"):
return DateTimeAttribute._of(thing_proto)
else:
raise GraknClientException("The value type " + str(thing_proto.value_type) + " was not recognised.")
def type_(type_proto: concept_proto.Type):
if type_proto.encoding == concept_proto.Type.Encoding.Value("ROLE_TYPE"):
return RoleType._of(type_proto)
else:
return thing_type(type_proto)
def thing_type(type_proto: concept_proto.Type):
if type_proto.encoding == concept_proto.Type.Encoding.Value("ENTITY_TYPE"):
return EntityType._of(type_proto)
elif type_proto.encoding == concept_proto.Type.Encoding.Value("RELATION_TYPE"):
return RelationType._of(type_proto)
elif type_proto.encoding == concept_proto.Type.Encoding.Value("ATTRIBUTE_TYPE"):
return attribute_type(type_proto)
elif type_proto.encoding == concept_proto.Type.Encoding.Value("THING_TYPE"):
return ThingType(type_proto.label, type_proto.root)
else:
raise GraknClientException("The encoding " + str(type_proto.encoding) + " was not recognised.")
def attribute_type(type_proto: concept_proto.Type):
if type_proto.value_type == concept_proto.AttributeType.ValueType.Value("BOOLEAN"):
return BooleanAttributeType._of(type_proto)
elif type_proto.value_type == concept_proto.AttributeType.ValueType.Value("LONG"):
return LongAttributeType._of(type_proto)
elif type_proto.value_type == concept_proto.AttributeType.ValueType.Value("DOUBLE"):
return DoubleAttributeType._of(type_proto)
elif type_proto.value_type == concept_proto.AttributeType.ValueType.Value("STRING"):
return StringAttributeType._of(type_proto)
elif type_proto.value_type == concept_proto.AttributeType.ValueType.Value("DATETIME"):
return DateTimeAttributeType._of(type_proto)
elif type_proto.value_type == concept_proto.AttributeType.ValueType.Value("OBJECT"):
return AttributeType(type_proto.label, type_proto.root)
else:
raise GraknClientException("The value type " + str(type_proto.value_type) + " was not recognised.")
```
#### File: concept/type/type.py
```python
from typing import Callable, List
import grakn_protocol.protobuf.concept_pb2 as concept_proto
import grakn_protocol.protobuf.transaction_pb2 as transaction_proto
from grakn.common.exception import GraknClientException
from grakn.concept.proto import concept_proto_builder, concept_proto_reader
from grakn.concept.concept import Concept, RemoteConcept
class Type(Concept):
def __init__(self, label: str, is_root: bool):
if not label:
raise GraknClientException("Label must be a non-empty string.")
self._label = label
self._is_root = is_root
self._hash = hash(label)
def get_label(self):
return self._label
def is_root(self):
return self._is_root
def is_type(self):
return True
def __str__(self):
return type(self).__name__ + "[label:" + self.get_label() + "]"
def __eq__(self, other):
if other is self:
return True
if not other or type(self) != type(other):
return False
return self.get_label() == other.get_label()
def __hash__(self):
return self._hash
class RemoteType(RemoteConcept):
def __init__(self, transaction, label: str, is_root: bool):
if not transaction:
raise GraknClientException("Transaction must be set.")
if not label:
raise GraknClientException("Label must be a non-empty string.")
self._transaction = transaction
self._label = label
self._is_root = is_root
self._hash = hash((self._transaction, label))
def get_label(self):
return self._label
def is_root(self):
return self._is_root
def set_label(self, label: str):
req = concept_proto.Type.Req()
set_label_req = concept_proto.Type.SetLabel.Req()
set_label_req.label = label
req.type_set_label_req.CopyFrom(set_label_req)
self._execute(req)
self._label = label
self._hash = hash((self._transaction, label))
def is_abstract(self):
req = concept_proto.Type.Req()
req.type_is_abstract_req.CopyFrom(concept_proto.Type.IsAbstract.Req())
res = self._execute(req)
return res.type_is_abstract_res.abstract
def is_type(self):
return True
def set_supertype(self, _type: Type):
req = concept_proto.Type.Req()
supertype_req = concept_proto.Type.SetSupertype.Req()
supertype_req.type.CopyFrom(concept_proto_builder.type_(_type))
req.type_set_supertype_req.CopyFrom(supertype_req)
self._execute(req)
def get_supertype(self):
req = concept_proto.Type.Req()
req.type_get_supertype_req.CopyFrom(concept_proto.Type.GetSupertype.Req())
res = self._execute(req).type_get_supertype_res
return concept_proto_reader.type_(res.type) if res.WhichOneof("res") == "type" else None
def get_supertypes(self):
method = concept_proto.Type.Req()
method.type_get_supertypes_req.CopyFrom(concept_proto.Type.GetSupertypes.Req())
return self._type_stream(method, lambda res: res.type_get_supertypes_res.types)
def get_subtypes(self):
method = concept_proto.Type.Req()
method.type_get_subtypes_req.CopyFrom(concept_proto.Type.GetSubtypes.Req())
return self._type_stream(method, lambda res: res.type_get_subtypes_res.types)
def delete(self):
method = concept_proto.Type.Req()
method.type_delete_req.CopyFrom(concept_proto.Type.Delete.Req())
self._execute(method)
def is_deleted(self):
return not self._transaction.concepts()._get_thing_type(self.get_label())
def _type_stream(self, method: concept_proto.Type.Req, type_list_getter: Callable[[concept_proto.Type.Res], List[concept_proto.Type]]):
method.label = self.get_label()
request = transaction_proto.Transaction.Req()
request.type_req.CopyFrom(method)
return map(lambda type_proto: concept_proto_reader.type_(type_proto), self._transaction._stream(request, lambda res: type_list_getter(res.type_res)))
def _thing_stream(self, method: concept_proto.Type.Req, thing_list_getter: Callable[[concept_proto.Type.Res], List[concept_proto.Thing]]):
method.label = self.get_label()
request = transaction_proto.Transaction.Req()
request.type_req.CopyFrom(method)
return map(lambda thing_proto: concept_proto_reader.thing(thing_proto), self._transaction._stream(request, lambda res: thing_list_getter(res.type_res)))
def _execute(self, method: concept_proto.Type.Req):
method.label = self.get_label()
request = transaction_proto.Transaction.Req()
request.type_req.CopyFrom(method)
return self._transaction._execute(request).type_res
def __str__(self):
return type(self).__name__ + "[label:" + self.get_label() + "]"
def __eq__(self, other):
if other is self:
return True
if not other or type(self) != type(other):
return False
return self._transaction is other._transaction and self.get_label() == other.get_label()
def __hash__(self):
return self._hash
```
#### File: grakn/logic/logic_manager.py
```python
from typing import Callable, List
import grakn_protocol.protobuf.logic_pb2 as logic_proto
import grakn_protocol.protobuf.transaction_pb2 as transaction_proto
from grakn.logic.rule import Rule
class LogicManager:
def __init__(self, transaction):
self._transaction = transaction
def put_rule(self, label: str, when: str, then: str):
req = logic_proto.LogicManager.Req()
put_rule_req = logic_proto.LogicManager.PutRule.Req()
put_rule_req.label = label
put_rule_req.when = when
put_rule_req.then = then
req.put_rule_req.CopyFrom(put_rule_req)
res = self._execute(req)
return Rule._of(res.put_rule_res.rule)
def get_rule(self, label: str):
req = logic_proto.LogicManager.Req()
get_rule_req = logic_proto.LogicManager.GetRule.Req()
get_rule_req.label = label
req.get_rule_req.CopyFrom(get_rule_req)
response = self._execute(req)
return Rule._of(response.get_rule_res.rule) if response.get_rule_res.WhichOneof("res") == "rule" else None
def get_rules(self):
method = logic_proto.LogicManager.Req()
method.get_rules_req.CopyFrom(logic_proto.LogicManager.GetRules.Req())
return self._rule_stream(method, lambda res: res.get_rules_res.rules)
def _execute(self, request: logic_proto.LogicManager.Req):
req = transaction_proto.Transaction.Req()
req.logic_manager_req.CopyFrom(request)
return self._transaction._execute(req).logic_manager_res
def _rule_stream(self, method: logic_proto.LogicManager.Req, rule_list_getter: Callable[[logic_proto.LogicManager.Res], List[logic_proto.Rule]]):
request = transaction_proto.Transaction.Req()
request.logic_manager_req.CopyFrom(method)
return map(Rule._of, self._transaction._stream(request, lambda res: rule_list_getter(res.logic_manager_res)))
```
#### File: type/thingtype/thing_type_steps.py
```python
from behave import *
from hamcrest import *
from grakn.common.exception import GraknClientException
from grakn.concept.thing.thing import Thing
from tests.behaviour.config.parameters import parse_bool, parse_list, RootLabel
from tests.behaviour.context import Context
@step("put {root_label:RootLabel} type: {type_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
if root_label == RootLabel.ENTITY:
context.tx().concepts().put_entity_type(type_label)
elif root_label == RootLabel.RELATION:
context.tx().concepts().put_relation_type(type_label)
else:
raise ValueError("Unrecognised value")
@step("delete {root_label:RootLabel} type: {type_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).delete()
assert False
except GraknClientException:
pass
@step("delete {root_label:RootLabel} type: {type_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
context.get_thing_type(root_label, type_label).as_remote(context.tx()).delete()
@step("{root_label:RootLabel}({type_label}) is null: {is_null}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, is_null):
is_null = parse_bool(is_null)
assert_that(context.get_thing_type(root_label, type_label) is None, is_(is_null))
@step("{root_label:RootLabel}({type_label}) set label: {new_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, new_label: str):
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_label(new_label)
@step("{root_label:RootLabel}({type_label}) get label: {get_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, get_label: str):
assert_that(context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_label(), is_(get_label))
@step("{root_label:RootLabel}({type_label}) set abstract: {is_abstract}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, is_abstract):
is_abstract = parse_bool(is_abstract)
thing_type = context.get_thing_type(root_label, type_label)
if is_abstract:
thing_type.as_remote(context.tx()).set_abstract()
else:
thing_type.as_remote(context.tx()).unset_abstract()
@step("{root_label:RootLabel}({type_label}) is abstract: {is_abstract}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, is_abstract):
is_abstract = parse_bool(is_abstract)
assert_that(context.get_thing_type(root_label, type_label).as_remote(context.tx()).is_abstract(), is_(is_abstract))
@step("{root_label:RootLabel}({type_label}) set supertype: {super_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, super_label: str):
if root_label == RootLabel.ENTITY:
entity_supertype = context.tx().concepts().get_entity_type(super_label)
try:
context.tx().concepts().get_entity_type(type_label).as_remote(context.tx()).set_supertype(entity_supertype)
assert False
except GraknClientException:
pass
elif root_label == RootLabel.ATTRIBUTE:
attribute_supertype = context.tx().concepts().get_attribute_type(super_label)
try:
context.tx().concepts().get_attribute_type(type_label).as_remote(context.tx()).set_supertype(attribute_supertype)
assert False
except GraknClientException:
pass
elif root_label == RootLabel.RELATION:
relation_supertype = context.tx().concepts().get_relation_type(super_label)
try:
context.tx().concepts().get_relation_type(type_label).as_remote(context.tx()).set_supertype(relation_supertype)
assert False
except GraknClientException:
pass
else:
raise ValueError("Unrecognised value")
@step("{root_label:RootLabel}({type_label}) set supertype: {super_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, super_label: str):
if root_label == RootLabel.ENTITY:
entity_supertype = context.tx().concepts().get_entity_type(super_label)
context.tx().concepts().get_entity_type(type_label).as_remote(context.tx()).set_supertype(entity_supertype)
elif root_label == RootLabel.ATTRIBUTE:
attribute_supertype = context.tx().concepts().get_attribute_type(super_label)
context.tx().concepts().get_attribute_type(type_label).as_remote(context.tx()).set_supertype(attribute_supertype)
elif root_label == RootLabel.RELATION:
relation_supertype = context.tx().concepts().get_relation_type(super_label)
context.tx().concepts().get_relation_type(type_label).as_remote(context.tx()).set_supertype(relation_supertype)
else:
raise ValueError("Unrecognised value")
@step("{root_label:RootLabel}({type_label}) get supertype: {super_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, super_label: str):
supertype = context.get_thing_type(root_label, super_label)
assert_that(context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_supertype(), is_(supertype))
@step("{root_label:RootLabel}({type_label}) get supertypes contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
super_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_supertypes()))
for super_label in super_labels:
assert_that(super_label, is_in(actuals))
@step("{root_label:RootLabel}({type_label}) get supertypes do not contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
super_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_supertypes()))
for super_label in super_labels:
assert_that(super_label, not_(is_in(actuals)))
@step("{root_label:RootLabel}({type_label}) get subtypes contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
sub_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_subtypes()))
for sub_label in sub_labels:
assert_that(sub_label, is_in(actuals))
@step("{root_label:RootLabel}({type_label}) get subtypes do not contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
sub_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_subtypes()))
for sub_label in sub_labels:
assert_that(sub_label, not_(is_in(actuals)))
@step("{root_label:RootLabel}({type_label}) set owns key type: {att_type_label} as {overridden_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str, overridden_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
overridden_type = context.tx().concepts().get_attribute_type(overridden_label)
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_owns(attribute_type, overridden_type, is_key=True)
assert False
except GraknClientException:
pass
@step("{root_label:RootLabel}({type_label}) set owns key type: {att_type_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_owns(attribute_type, is_key=True)
assert False
except GraknClientException:
pass
@step("{root_label:RootLabel}({type_label}) set owns key type: {att_type_label} as {overridden_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str, overridden_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
overridden_type = context.tx().concepts().get_attribute_type(overridden_label)
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_owns(attribute_type, overridden_type, is_key=True)
@step("{root_label:RootLabel}({type_label}) set owns key type: {att_type_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_owns(attribute_type, is_key=True)
@step("{root_label:RootLabel}({type_label}) unset owns attribute type: {att_type_label}; throws exception")
@step("{root_label:RootLabel}({type_label}) unset owns key type: {att_type_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).unset_owns(attribute_type)
assert False
except GraknClientException:
pass
@step("{root_label:RootLabel}({type_label}) unset owns attribute type: {att_type_label}")
@step("{root_label:RootLabel}({type_label}) unset owns key type: {att_type_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
context.get_thing_type(root_label, type_label).as_remote(context.tx()).unset_owns(attribute_type)
@step("{root_label:RootLabel}({type_label}) get owns key types contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
attribute_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_owns(keys_only=True)))
for attribute_label in attribute_labels:
assert_that(attribute_label, is_in(actuals))
@step("{root_label:RootLabel}({type_label}) get owns key types do not contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
attribute_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_owns(keys_only=True)))
for attribute_label in attribute_labels:
assert_that(attribute_label, not_(is_in(actuals)))
@step("{root_label:RootLabel}({type_label}) set owns attribute type: {att_type_label} as {overridden_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str, overridden_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
overridden_type = context.tx().concepts().get_attribute_type(overridden_label)
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_owns(attribute_type, overridden_type)
assert False
except GraknClientException:
pass
@step("{root_label:RootLabel}({type_label}) set owns attribute type: {att_type_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_owns(attribute_type)
assert False
except GraknClientException:
pass
@step("{root_label:RootLabel}({type_label}) set owns attribute type: {att_type_label} as {overridden_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str, overridden_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
overridden_type = context.tx().concepts().get_attribute_type(overridden_label)
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_owns(attribute_type, overridden_type)
@step("{root_label:RootLabel}({type_label}) set owns attribute type: {att_type_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, att_type_label: str):
attribute_type = context.tx().concepts().get_attribute_type(att_type_label)
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_owns(attribute_type)
@step("{root_label:RootLabel}({type_label}) get owns attribute types contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
attribute_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_owns()))
for attribute_label in attribute_labels:
assert_that(attribute_label, is_in(actuals))
@step("{root_label:RootLabel}({type_label}) get owns attribute types do not contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
attribute_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_owns()))
for attribute_label in attribute_labels:
assert_that(attribute_label, not_(is_in(actuals)))
@step("{root_label:RootLabel}({type_label}) set plays role: {scope}:{role_label} as {overridden_scope}:{overridden_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, scope: str, role_label: str, overridden_scope: str, overridden_label: str):
role_type = context.tx().concepts().get_relation_type(scope).as_remote(context.tx()).get_relates(role_label)
overridden_type = context.tx().concepts().get_relation_type(overridden_scope).as_remote(context.tx()).get_relates(overridden_label)
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_plays(role_type, overridden_type)
assert False
except GraknClientException:
pass
@step("{root_label:RootLabel}({type_label}) set plays role: {scope}:{role_label} as {overridden_scope}:{overridden_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, scope: str, role_label: str, overridden_scope: str, overridden_label: str):
role_type = context.tx().concepts().get_relation_type(scope).as_remote(context.tx()).get_relates(role_label)
overridden_type = context.tx().concepts().get_relation_type(overridden_scope).as_remote(context.tx()).get_relates(overridden_label)
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_plays(role_type, overridden_type)
@step("{root_label:RootLabel}({type_label}) set plays role: {scope}:{role_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, scope: str, role_label: str):
role_type = context.tx().concepts().get_relation_type(scope).as_remote(context.tx()).get_relates(role_label)
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_plays(role_type)
assert False
except GraknClientException:
pass
@step("{root_label:RootLabel}({type_label}) set plays role: {scope}:{role_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, scope: str, role_label: str):
role_type = context.tx().concepts().get_relation_type(scope).as_remote(context.tx()).get_relates(role_label)
context.get_thing_type(root_label, type_label).as_remote(context.tx()).set_plays(role_type)
@step("{root_label:RootLabel}({type_label}) unset plays role: {scope}:{role_label}; throws exception")
def step_impl(context: Context, root_label: RootLabel, type_label: str, scope: str, role_label: str):
role_type = context.tx().concepts().get_relation_type(scope).as_remote(context.tx()).get_relates(role_label)
try:
context.get_thing_type(root_label, type_label).as_remote(context.tx()).unset_plays(role_type)
assert False
except GraknClientException:
pass
@step("{root_label:RootLabel}({type_label}) unset plays role: {scope}:{role_label}")
def step_impl(context: Context, root_label: RootLabel, type_label: str, scope: str, role_label: str):
role_type = context.tx().concepts().get_relation_type(scope).as_remote(context.tx()).get_relates(role_label)
context.get_thing_type(root_label, type_label).as_remote(context.tx()).unset_plays(role_type)
@step("{root_label:RootLabel}({type_label}) get playing roles contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
role_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_scoped_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_plays()))
for role_label in role_labels:
assert_that(role_label, is_in(actuals))
@step("{root_label:RootLabel}({type_label}) get playing roles do not contain")
def step_impl(context: Context, root_label: RootLabel, type_label: str):
role_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_scoped_label(), context.get_thing_type(root_label, type_label).as_remote(context.tx()).get_plays()))
for role_label in role_labels:
assert_that(role_label, not_(is_in(actuals)))
@step("thing type root get supertypes contain")
def step_impl(context: Context):
super_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.tx().concepts().get_root_thing_type().as_remote(context.tx()).get_supertypes()))
for super_label in super_labels:
assert_that(super_label, is_in(actuals))
@step("thing type root get supertypes do not contain")
def step_impl(context: Context):
super_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.tx().concepts().get_root_thing_type().as_remote(context.tx()).get_supertypes()))
for super_label in super_labels:
assert_that(super_label, not_(is_in(actuals)))
@step("thing type root get subtypes contain")
def step_impl(context: Context):
super_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.tx().concepts().get_root_thing_type().as_remote(context.tx()).get_subtypes()))
for super_label in super_labels:
assert_that(super_label, is_in(actuals))
@step("thing type root get subtypes do not contain")
def step_impl(context: Context):
super_labels = parse_list(context.table)
actuals = list(map(lambda t: t.get_label(), context.tx().concepts().get_root_thing_type().as_remote(context.tx()).get_subtypes()))
for super_label in super_labels:
assert_that(super_label, not_(is_in(actuals)))
``` |
{
"source": "jmsgrogan/MicropocketAssay",
"score": 3
} |
#### File: cornea/parameters/parameter_collection.py
```python
import os
import random
from copy import deepcopy
import cPickle as pickle
import chaste.core
from microvessel_chaste.utility import *
_simulation_domains = ["Planar_2D",
"Planar_2D_Finite",
"Circle_2D",
"Planar_3D",
"Planar_3D_Finite",
"Circle_3D",
"Hemisphere"]
class Study():
def __init__(self, work_dir, parameter_collection):
self.work_dir = work_dir
self.parameter_collection = parameter_collection
self.range = []
self.random_realisations = 3
self.simulation_domains = _simulation_domains
def get_task_list(self):
task_list = []
for eachParameterSet in self.range:
for idx in range(eachParameterSet[1]):
if eachParameterSet[1] == 1:
param_value = self.parameter_collection.get_parameter(eachParameterSet[0]).value
else:
param = self.parameter_collection.get_parameter(eachParameterSet[0])
param_range = (param.max - param.min)*param.value
param_value = param.min*param.value + float(idx)/float(eachParameterSet[1])*param_range
for eachDomain in self.simulation_domains:
for jdx in range(self.random_realisations):
local_collection = deepcopy(self.parameter_collection)
local_collection.get_parameter("DomainType").value = eachDomain
local_collection.get_parameter("RandomSeed").value = random.randint(0, 1e6)
local_collection.get_parameter(eachParameterSet[0]).value = param_value
simulation_path = self.work_dir + "/ParamName_" + eachParameterSet[0].replace(" ", "") + "/"
simulation_path += "ParamValue_" + str(idx) + "/DomainType_" + eachDomain.replace(" ", "") + "/Run_"+str(jdx)+"/"
task_list.append([simulation_path, local_collection])
return task_list
class SimulationParameterCollection:
def __init__(self, random_seed=1234):
self.collection = {}
self.random_seed = random_seed
def add_parameter(self, parameter):
self.collection[parameter.name] = parameter
def save(self, file_name):
if not os.path.exists(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
with open(file_name, 'wb') as fp:
pickle.dump([self.collection, self.random_seed], fp)
# Human friendly version
output_file = open(os.path.splitext(file_name)[0]+".csv", "w")
for eachKey in self.collection.keys():
param = self.collection[eachKey]
output_file.write(param.name + " , " + str(param.value) + "\n")
output_file.close()
def load(self, file_name):
with open(file_name, 'rb') as fp:
self.collection, self.random_seed = pickle.load(fp)
def get_parameter(self, name):
return self.collection[name]
class Parameter:
def __init__(self, name, value, min_val=1.0, max_val=1.0,
symbol=None, nice_name=None, lit_source=None):
self.name = name
self.value = value
self.min = min_val
self.max = max_val
self.value_as_string = ""
self.store_value_as_string()
self.symbol = symbol
self.nice_name = nice_name
self.lit_source = lit_source
def __getstate__(self):
self.store_value_as_string()
d = dict(self.__dict__)
del d['value']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.update_value_from_string()
def store_value_as_string(self):
if hasattr(self.value, 'GetValue'):
symbol = get_symbol(type(self.value))
self.value_as_string = str(self.value.GetValue()) + " " + symbol
else:
self.value_as_string = str(self.value)
def update_value_from_string(self):
split_string = self.value_as_string.split()
left_string = split_string[0]
if left_string == "False":
self.value = False
elif left_string == "True":
self.value = True
elif left_string in _simulation_domains:
self.value = self.value_as_string
else:
self.value = float(left_string)
if len(split_string) > 1 and (left_string not in _simulation_domains):
unit_string = ''.join(split_string[1:])
self.value *= get_unit(unit_string)
if __name__ == "__main__":
import cornea.parameters.default_parameters
work_dir = "Python/Cornea/TestParameters/"
file_handler = chaste.core.OutputFileHandler(work_dir, True)
collection = cornea.parameters.default_parameters.get_default_collection()
collection.save(file_handler.GetOutputDirectoryFullPath()+"/parmaeters.p")
collection.load(file_handler.GetOutputDirectoryFullPath()+"/parmaeters.p")
for eachParameter in collection.collection.values():
print eachParameter.value
```
#### File: cornea/postprocessing/batch_postprocess_comps.py
```python
import pickle
import matplotlib
import matplotlib.pyplot as plt
import chaste
from microvessel_chaste.utility import *
from cornea.postprocessing.plotting_tools import *
from cornea.postprocessing import plot_collection
# Matplotlib global settings
matplotlib.rcParams.update({'font.size': 18})
plt.locator_params(nticks=4)
class PostProcessingTaskManager(object):
def __init__(self, relative_work_dir):
self.tasks = []
self.study_data = None
self.parameters = [OutputParameter(name="Line_density"),
OutputParameter(name="Tip_density"), ]
self.break_indices = []
file_handler = chaste.core.OutputFileHandler(relative_work_dir, False)
self.work_dir = file_handler.GetOutputDirectoryFullPath()
self.load_study_data()
def load_study_data(self):
f = open(self.work_dir + "study_data.p", 'r')
self.study_data = pickle.load(f)
def setup_density_line_plots(self):
for eachStudy in self.study_data["study_names"]:
for eachDomain in self.study_data["domain_types"]:
num_random = len(self.study_data["random_seeds"])
for idx in range(num_random):
local_work_dir = get_path(self.work_dir, eachStudy, eachDomain, str(idx))
for eachParam in self.parameters:
fig_dir = local_work_dir + "/density_plots/"
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
fig_path = fig_dir + "/" + eachParam.name + ".png"
task = plot_collection.DensityLinePlot(self.work_dir,
eachStudy,
eachDomain,
idx,
eachParam,
fig_path)
self.tasks.append(task)
def setup_line_density_plot_merge(self):
for eachStudy in self.study_data["study_names"]:
for eachDomain in self.study_data["domain_types"]:
num_random = len(self.study_data["random_seeds"])
outer_figure_paths = []
for idx in range(num_random):
local_work_dir = get_path(self.work_dir,
eachStudy,
eachDomain,
str(idx))
figure_paths = []
for eachParam in self.parameters:
fig_path = local_work_dir + "/density_plots/" + eachParam.name + ".png"
figure_paths.append(fig_path)
merge_path = local_work_dir + "/density_plots/merge.png"
outer_figure_paths.append(merge_path)
task = plot_collection.MergePlots(self.work_dir,
merge_path,
figure_paths,
merge_axis=0)
self.tasks.append(task)
merge_path = get_path(self.work_dir, eachStudy, eachDomain) + "/merge.png"
task = plot_collection.MergePlots(self.work_dir,
merge_path,
outer_figure_paths,
merge_axis=1)
self.tasks.append(task)
def setup_box_plots(self):
for eachStudy in self.study_data["study_names"]:
local_work_dir = get_path(self.work_dir, eachStudy)
fig_dir = local_work_dir + "/box_plots/"
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
task = plot_collection.BoxPlot(self.work_dir,
eachStudy,
self.study_data["domain_types"],
len(self.study_data["random_seeds"]),
self.parameters,
fig_dir)
self.tasks.append(task)
def setup_pde_plots(self):
for eachStudy in self.study_data["study_names"]:
local_work_dir = get_path(self.work_dir, eachStudy)
fig_dir = local_work_dir + "/pde_plots/"
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
task = plot_collection.PdePlot(self.work_dir,
eachStudy,
self.study_data["domain_types"],
len(self.study_data["random_seeds"]),
self.parameters,
fig_dir)
self.tasks.append(task)
def setup_max_tip_density_plots(self):
for eachStudy in self.study_data["study_names"]:
local_work_dir = get_path(self.work_dir, eachStudy)
fig_dir = local_work_dir + "/max_density_plots/"
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
task = plot_collection.MaxTipDensityPlot(self.work_dir,
eachStudy,
self.study_data["domain_types"],
len(self.study_data["random_seeds"]),
self.parameters,
fig_dir)
self.tasks.append(task)
def setup_max_conc_plots(self):
for eachStudy in self.study_data["study_names"]:
local_work_dir = get_path(self.work_dir, eachStudy)
fig_dir = local_work_dir + "/max_conc_plots/"
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
task = plot_collection.MaxConcPlot(self.work_dir,
eachStudy,
self.study_data["domain_types"],
len(self.study_data["random_seeds"]),
self.parameters,
fig_dir)
self.tasks.append(task)
def setup_front_pos_plots(self):
for eachStudy in self.study_data["study_names"]:
local_work_dir = get_path(self.work_dir, eachStudy)
fig_dir = local_work_dir + "/front_position/"
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
task = plot_collection.FrontPosPlot(self.work_dir,
eachStudy,
self.study_data["domain_types"],
len(self.study_data["random_seeds"]),
self.parameters,
fig_dir)
self.tasks.append(task)
```
#### File: cornea/postprocessing/sampling_grid.py
```python
import vtk
import numpy as np
from microvessel_chaste.utility import *
def GetDensityMetrics(file_path, domain, pc):
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file_path)
reader.Update()
grid = reader.GetOutput()
line_density = grid.GetCellData().GetArray("Line Density")
line_density_volume = 0.0
total_volume = 0.0
offset = pc.get_parameter("LimbalOffset").value
offset = offset.Convert(1.0e-6*metres)
height = pc.get_parameter("PelletHeight").value
height = height.Convert(1.0e-6*metres)
for idx in range(grid.GetNumberOfCells()):
points = grid.GetCell(idx).GetPoints()
loc = np.zeros(3)
for jdx in range(points.GetNumberOfPoints()):
if jdx == 0:
loc = np.array(points.GetPoint(jdx))
else:
loc += np.array(points.GetPoint(jdx))
loc /= float(points.GetNumberOfPoints())
cell_volume = abs(vtk.vtkMeshQuality.HexVolume(grid.GetCell(idx)))
total_volume += cell_volume
cell_line_density = line_density.GetTuple1(idx)
if cell_line_density >= 1.e-9:
line_density_volume += cell_volume
line_fraction = line_density_volume/total_volume
return [line_fraction, 1.0]
def DoLineSampling(file_path, domain, pc):
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file_path)
reader.Update()
sample_spacing = 60.0
height = pc.get_parameter("PelletHeight").value
height = height.Convert(1.0e-6*metres)
radius = pc.get_parameter("CorneaRadius").value
radius = radius.Convert(1.0e-6*metres)
width = 2.0*np.pi*radius
num_samples = int(height/sample_spacing)
points = vtk.vtkPoints()
for idx in range(num_samples):
points.InsertNextPoint(width/2.0, float(idx*sample_spacing), 0.0)
poly = vtk.vtkPolyData()
poly.SetPoints(points)
probe = vtk.vtkProbeFilter()
probe.SetSourceData(reader.GetOutput())
probe.SetInputData(poly)
probe.Update()
results = probe.GetOutput().GetPointData().GetArray("Line Density")
for idx in range(results.GetNumberOfTuples()):
print "y", points.GetPoint(idx)[1], " rho ", results.GetTuple1(idx)
``` |
{
"source": "jmsgrogan/PyChaste",
"score": 2
} |
#### File: dynamic/wrapper_generators/doxygen_extractor.py
```python
class doxygen_doc_extractor:
"""
Extracts Doxygen styled documentation from source or generates it from description.
"""
def __init__(self):
#for caching source
self.file_name = None
self.source = None
#__init__
def __call__(self, declaration):
try:
if self.file_name != declaration.location.file_name:
self.file_name = declaration.location.file_name
self.source = open(declaration.location.file_name).readlines()
find_block_end = False
doc_lines = []
for lcount in xrange(declaration.location.line-2, -1, -1):
line = self.source[lcount]
if not find_block_end:
try:
if line.rstrip()[-2:] == "*/":
find_block_end = True
except:
pass
if find_block_end:
try:
if line.lstrip()[:2] == "/*":
find_block_end = False
except:
pass
final_str = self.clear_str(line)
if not find_block_end and self.is_code(line):
break
if final_str:
doc_lines.insert(0, final_str)
except:
pass
finally:
if doc_lines:
final_doc_lines = [ line.replace("\n","\\n") for line in doc_lines[:-1] ]
final_doc_lines.append(doc_lines[-1].replace("\n",""))
return '"' + ''.join(final_doc_lines) + '"'
else:
return '\"\"'
#__call__()
def clear_str(self, tmp_str):
"""
Replace */! by space and \brief, @fn, \param, etc
"""
tmp_str = tmp_str.replace("/**", "")
tmp_str = tmp_str.replace("*", "")
tmp_str = tmp_str.replace("*/", "")
# tmp_str = reduce(clean, [tmp_str, '/','*','!',"\\brief","@brief","\\fn","@fn","\\ref","@ref", "\"", "\'", "\\c"])
#
# #commands list taken form : http://www.stack.nl/~dimitri/doxygen/commands.html
# replacement_list = [
# # "a",
# "addindex",
# "addtogroup",
# "anchor",
# "arg",
# "attention",
# "author",
# # "b",
# # "brief",
# "bug",
# # "c",
# "callgraph",
# "callergraph",
# "category",
# "class",
# ("code","[Code]"),
# "cond",
# "copybrief",
# "copydetails",
# "copydoc",
# "date",
# "def",
# "defgroup",
# "deprecated",
# "details",
# "dir",
# "dontinclude",
# ("dot","[Dot]"),
# "dotfile",
# "e",
# "else",
# "elseif",
# "em",
# ("endcode","[/Code]"),
# "endcond",
# ("enddot","[/Dot]"),
# "endhtmlonly",
# "endif",
# "endlatexonly",
# "endlink",
# "endmanonly",
# "endmsc",
# "endverbatim",
# "endxmlonly",
# "enum",
# "example",
# "exception",
# "extends",
# "f$",
# "f[",
# "f]",
# "f{",
# "f}",
# "file",
# # "fn",
# "headerfile",
# "hideinitializer",
# "htmlinclude",
# "htmlonly",
# "if",
# "ifnot",
# "image",
# "implements",
# "include",
# "includelineno",
# "ingroup",
# "internal",
# "invariant",
# "interface",
# "latexonly",
# "li",
# "line",
# "link",
# "mainpage",
# "manonly",
# "memberof",
# "msc",
# # "n",
# "name",
# "namespace",
# "nosubgrouping",
# "note",
# "overload",
# # "p",
# "package",
# "page",
# "par",
# "paragraph",
# "param",
# "post",
# "pre",
# # "private (PHP only)",
# # "privatesection (PHP only)",
# "property",
# # "protected (PHP only)",
# # "protectedsection (PHP only)",
# "protocol",
# # "public (PHP only)",
# # "publicsection (PHP only)",
# # "ref",
# "relates",
# "relatesalso",
# "remarks",
# "return",
# "retval",
# "sa",
# "section",
# "see",
# "showinitializer",
# "since",
# "skip",
# "skipline",
# "struct",
# "subpage",
# "subsection",
# "subsubsection",
# "test",
# "throw",
# ("todo","TODO"),
# "tparam",
# "typedef",
# "union",
# "until",
# "var",
# "verbatim",
# "verbinclude",
# "version",
# "warning",
# "weakgroup",
# "xmlonly",
# "xrefitem",
# # "$",
# # "@",
# # "\",
# # "&",
# # "~",
# # "<",
# # ">",
# # "#",
# # "%",
# ]
#
# for command in replacement_list:
# try:
# old,new = command
# except ValueError:
# old = command
# new = command.capitalize()+":"
# tmp_str = clean(tmp_str, "@"+old, new)
# tmp_str = clean(tmp_str, "\\"+old, new)
return tmp_str.lstrip()
#clean_str()
def is_code(self, tmp_str):
"""
Detects if tmp_str is code or not
"""
try:
beg = tmp_str.lstrip()[:2]
return beg != "//" and beg != "/*"
except:
pass
return False
#is_code()
#class doxygen_doc_extractor
```
#### File: chaste/cell_based/fortests.py
```python
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
import chaste.core
import chaste.cell_based
def SetupNotebookTest():
simulation_time = chaste.cell_based.SimulationTime.Instance()
simulation_time.SetStartTime(0.0)
chaste.core.RandomNumberGenerator.Instance().Reseed(0)
chaste.cell_based.CellId.ResetMaxCellId()
def TearDownNotebookTest():
simulation_time = chaste.cell_based.SimulationTime.Instance()
simulation_time.Destroy()
chaste.core.RandomNumberGenerator.Instance().Destroy()
#chaste.cell_based.CellPropertyRegistry.Instance().Clear()
class AbstractCellBasedTestSuite(unittest.TestCase):
def setUp(self):
simulation_time = chaste.cell_based.SimulationTime.Instance()
simulation_time.SetStartTime(0.0)
chaste.core.RandomNumberGenerator.Instance().Reseed(0)
chaste.cell_based.CellId.ResetMaxCellId()
def tearDown(self):
simulation_time = chaste.cell_based.SimulationTime.Instance()
simulation_time.Destroy()
chaste.core.RandomNumberGenerator.Instance().Destroy()
#chaste.cell_based.CellPropertyRegistry.Instance().Clear()
class AbstractCellBasedWithTimingsTestSuite(AbstractCellBasedTestSuite):
def setUp(self):
chaste.core.Timer().Reset()
super(AbstractCellBasedWithTimingsTestSuite, self).setUp()
def tearDown(self):
super(AbstractCellBasedWithTimingsTestSuite, self).tearDown()
chaste.core.Timer().Print("Test elapsed");
``` |
{
"source": "jmsgrogan/Stack3D",
"score": 3
} |
#### File: stack3d/formats/extract_zeiss_metadata.py
```python
from argparse import ArgumentParser
import os
import subprocess
import logging
import utility
import ome_schema
def extract_metadata(input_path, output_path):
"""
Extract OME metadata from the input file and write it out as a nicely formatted xml using
bftools. (http://www.openmicroscopy.org/site/support/bio-formats5.3/users/comlinetools/display.html)
"""
bf_tools_dir = os.getenv('BFTOOLS_DIR', os.getcwd()) + "/"
command = bf_tools_dir +"showinf -omexml-only -nopix " + input_path + " | " + bf_tools_dir + "xmlindent > " + output_path
p = subprocess.Popen(command, shell=True)
p.wait()
def get_metadata_as_class(input_xml_path):
"""
Return the OME metadata from the input XML file as a Python class. The class is automatically generated
using pyxbgen (http://pyxb.sourceforge.net/pyxbgen_cli.html) and the current OME XML Schema
(https://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd).
If you need to use a newer schema you need to regenerate the file ome_schema.py by doing:
pip install pyxb
pyxbgen -m ome_schema -u https://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd
where the web address points to the new schema. You can then access the elements of the OME XML as
instance attributes etc.
"""
xml = open(input_xml_path).read()
image_metadata = ome_schema.CreateFromDocument(xml)
return image_metadata
def integer_color_to_rgb(color):
"""
Convert integer color to (r,g,b)
"""
return ((color >> 16) & 255, (color >> 8) & 255, color & 255)
def print_metadata_overview(image_metadata):
"""
Print a reader-friendly metadata summary
"""
print "Number of Images: ", len(image_metadata.Image)
print "Image '0' - Name: ", image_metadata.Image[0].Name
print "Image '0' - Num Channels: ", image_metadata.Image[0].Pixels.SizeC
print "Image '0' - Num Times: ", image_metadata.Image[0].Pixels.SizeT
pixel_size_x = image_metadata.Image[0].Pixels.PhysicalSizeX
pixel_size_y = image_metadata.Image[0].Pixels.PhysicalSizeY
pixel_size_z = image_metadata.Image[0].Pixels.PhysicalSizeZ
pixel_unit_x = image_metadata.Image[0].Pixels.PhysicalSizeXUnit
pixel_unit_y = image_metadata.Image[0].Pixels.PhysicalSizeYUnit
pixel_unit_z = image_metadata.Image[0].Pixels.PhysicalSizeZUnit
print "Image '0' - Pixel Physical Size X: ", pixel_size_x, pixel_unit_x
print "Image '0' - Pixel Physical Size Y: ", pixel_size_y, pixel_unit_y
print "Image '0' - Pixel Physical Size Z: ", pixel_size_z, pixel_unit_z
print "Image '0' - Pixel Size X: ", image_metadata.Image[0].Pixels.SizeX
print "Image '0' - Pixel Size Y:", image_metadata.Image[0].Pixels.SizeY
print "Image '0' - Pixel Size Z:", image_metadata.Image[0].Pixels.SizeZ
print "Image '0' - Pixel Dimension Order: ", image_metadata.Image[0].Pixels.DimensionOrder
print "Image '0' - Pixel Bits: ", image_metadata.Image[0].Pixels.SignificantBits
for idx, eachChannel in enumerate(image_metadata.Image[0].Pixels.Channel):
print "Image '0' - Channel " +str(idx) + " Color: ", integer_color_to_rgb(eachChannel.Color)
if __name__ == "__main__":
# Do setup
tool_name = "extract_metadata"
utility.do_setup(tool_name)
logger1 = logging.getLogger('format_conversion.'+tool_name)
# Suppress XML Parse warnings
pyxb_logger = logging.getLogger('pyxb')
pyxb_logger.setLevel(logging.CRITICAL)
parser = ArgumentParser()
parser.add_argument("-i", "--input_file", type=str, help='Input file in a ZEISS format.')
parser.add_argument("-o", "--output_file", type=str, help='Output metadata file.')
parser.add_argument("--verbose", type=bool, help='Output a simple metadata summary.')
args = parser.parse_args()
logger1.info('Reading Metadata At: ' + args.input_file)
extract_metadata(args.input_file, args.output_file)
if(args.verbose):
image_metadata = get_metadata_as_class(args.output_file)
print_metadata_overview(image_metadata)
logger1.info('Completed Reading Metadata')
``` |
{
"source": "JMSHDev/regent.dev",
"score": 2
} |
#### File: api/device/models.py
```python
import hashlib
import random
import string
import logging
from django.db import models
LOG = logging.getLogger(__name__)
class Device(models.Model):
name = models.CharField(max_length=50, unique=True)
customer = models.CharField(max_length=50)
agent_status = models.CharField(max_length=10, default="offline")
program_status = models.CharField(max_length=10, default="down")
last_updated = models.DateTimeField(auto_now=True)
def delete_mqtt_credentials(self):
self.auth.all().delete()
self.acl.all().delete()
class MqttAuth(models.Model):
username = models.CharField(max_length=100, unique=True)
password = models.CharField(max_length=100)
salt = models.CharField(max_length=10)
activated = models.BooleanField(default=False)
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="auth", related_query_name="auth", null=True
)
def __str__(self):
return "activated" if self.activated else "not activated"
@classmethod
def create(cls, username, password, activated, device=None):
salt = "".join(random.choice(string.ascii_letters) for _ in range(10))
password = hashlib.sha256((password + salt).encode("utf-8")).hexdigest()
return MqttAuth(username=username, password=password, salt=salt, activated=activated, device=device)
class MqttAcl(models.Model):
allow = models.SmallIntegerField()
ipaddr = models.CharField(max_length=60, null=True)
username = models.CharField(max_length=100, null=True)
clientid = models.CharField(max_length=100, null=True)
access = models.SmallIntegerField()
topic = models.CharField(max_length=100)
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="acl", related_query_name="acl", null=True
)
class Telemetry(models.Model):
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="telemetry", related_query_name="telemetry"
)
created_on = models.DateTimeField(auto_now_add=True)
state = models.JSONField()
```
#### File: api/device/views.py
```python
from rest_framework.reverse import reverse
from rest_framework.decorators import action, api_view
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin, ListModelMixin
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_201_CREATED, HTTP_403_FORBIDDEN, HTTP_200_OK
from rest_framework.permissions import IsAuthenticated, AllowAny
from django_filters import FilterSet, DateFilter, CharFilter
from device.serializers import (
DeviceSerializer,
RegisterDeviceSerializer,
ActivateDeviceSerializer,
MqttMessageSerializer,
TelemetrySerializer,
)
from device.models import Device, Telemetry
from device.services.device_registration import register, activate
from device.services.device_state import update
class DeviceViewSet(RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin, ListModelMixin, GenericViewSet):
queryset = Device.objects.all()
serializer_class = DeviceSerializer
permission_classes = [IsAuthenticated]
@action(detail=False, permission_classes=[AllowAny], methods=["post"], serializer_class=RegisterDeviceSerializer)
def register(self, request, *args, **kwargs):
serializer = RegisterDeviceSerializer(data=request.data)
if serializer.is_valid():
try:
reg_result = register(serializer.data["customer_id"], serializer.data["device_id"])
if reg_result["success"]:
return Response(reg_result["content"], HTTP_201_CREATED)
else:
return Response(reg_result["content"], HTTP_403_FORBIDDEN)
except Exception as exp:
return Response("Error while registering the device.", HTTP_400_BAD_REQUEST)
else:
return Response(serializer.errors, HTTP_400_BAD_REQUEST)
@action(detail=False, permission_classes=[AllowAny], methods=["post"], serializer_class=ActivateDeviceSerializer)
def activate(self, request, *args, **kwargs):
serializer = ActivateDeviceSerializer(data=request.data)
if serializer.is_valid():
try:
act_result = activate(serializer.data["device_id"], serializer.data["password"])
if act_result["success"]:
return Response(act_result["content"], HTTP_200_OK)
else:
return Response(act_result["content"], HTTP_403_FORBIDDEN)
except Exception as exp:
return Response("Error whole activating the device.", HTTP_400_BAD_REQUEST)
else:
return Response(serializer.errors, HTTP_400_BAD_REQUEST)
def perform_destroy(self, instance):
instance.delete_corresponding_credentials()
instance.delete()
class UpdateDeviceState(APIView):
permission_classes = [AllowAny]
def post(self, request, format=None):
serializer = MqttMessageSerializer(data=request.data)
if serializer.is_valid():
try:
act_result = update(serializer.data)
if act_result["success"]:
return Response(act_result["content"], HTTP_200_OK)
else:
return Response(act_result["content"], HTTP_403_FORBIDDEN)
except Exception as exp:
return Response("Error while updating the device.", HTTP_400_BAD_REQUEST)
else:
return Response(serializer.errors, HTTP_400_BAD_REQUEST)
class PingViewSet(GenericViewSet, ListModelMixin):
permission_classes = [IsAuthenticated]
def list(self, request, *args, **kwargs):
return Response(data={"id": request.GET.get("id")}, status=HTTP_200_OK)
@api_view(["GET"])
def privateapi_root(request, format=None):
return Response(
{
"update-device": reverse("update-device", request=request, format=format),
}
)
class TelemetryFilter(FilterSet):
start = DateFilter(field_name="created_on", lookup_expr="gte")
end = DateFilter(field_name="created_on", lookup_expr="lte")
device = CharFilter(field_name="device__name")
class Meta:
model = Telemetry
fields = ("start", "end")
class TelemetryViewSet(RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin, ListModelMixin, GenericViewSet):
queryset = Telemetry.objects.all()
serializer_class = TelemetrySerializer
permission_classes = [IsAuthenticated]
filterset_class = TelemetryFilter
``` |
{
"source": "jmshen1994/SetExpan",
"score": 3
} |
#### File: src/dataProcessing/entityResolutionAndFilter.py
```python
from textblob import Word
from collections import defaultdict
import sys
def resolution(surfaceName):
'''
input: a surface name of entity
output: the "normalized" entity name
process: 1) lowercase
2) lemmatization
'''
tmp = [Word(ele.lower()).lemmatize() for ele in surfaceName.split()]
# tmp = [ele.lower() for ele in surfaceName.split()]
return " ".join(tmp)
def main(corpusName, min_sup = -1):
data = corpusName
min_sup = int(min_sup)
inputFileName = '../../data/'+data+'/intermediate/entitylist.txt'
outputFileName = '../../data/'+data+'/intermediate/entity2id.txt'
uniqueEntityNameFileOut = '../../data/'+data+'/intermediate/eidlist.txt'
eid = 0
ename2eid = {}
normalized_ename2eid = {}
normalized_ename2freq = defaultdict(int)
with open(inputFileName,"r") as fin:
for line in fin:
segs = line.strip().split("\t")
ename = segs[0]
freq = int(segs[1])
normalized_ename = resolution(ename)
if normalized_ename in normalized_ename2eid: # already exist
ename2eid[ename] = normalized_ename2eid[normalized_ename]
normalized_ename2freq[normalized_ename] += freq
else: # a new entity
normalized_ename2eid[normalized_ename] = eid
normalized_ename2freq[normalized_ename] += freq
ename2eid[ename] = eid
eid += 1
print("Number of entities between (potential) filtering = %s" % eid)
filtered_eid = set()
if min_sup != -1:
print("Filtering entities with too small occurrences")
for ele in normalized_ename2freq.items():
if ele[1] < min_sup:
## add the eid into the filtered set
filtered_eid.add(normalized_ename2eid[ele[0]])
print("Number of filtered entities = %s" % len(filtered_eid))
with open(outputFileName,"w") as fout:
for ele in sorted(ename2eid.items(), key = lambda x:x[0]):
if ele[1] not in filtered_eid:
fout.write(ele[0]+"\t"+str(ele[1])+"\n")
with open(uniqueEntityNameFileOut,"w") as fout:
for ele in sorted(normalized_ename2eid.items(), key = lambda x:x[1] ):
if ele[1] not in filtered_eid:
fout.write(ele[0]+"\t"+str(ele[1])+"\n")
if __name__ == '__main__':
corpusName = sys.argv[1]
min_sup = sys.argv[2]
main(corpusName, min_sup)
```
#### File: src/dataProcessing/extractFeatures.py
```python
import sys
import json
import itertools
def getSkipgrams(tokens, start, end):
positions = [(-1, 1), (-2, 1), (-3, 1), (-1, 3), (-2, 2), (-1, 2)]
skipgrams = []
for pos in positions:
skipgrams.append(' '.join(tokens[start+pos[0]:start])+' __ '+' '.join(tokens[end+1:end+1+pos[1]]))
return skipgrams
def processSentence(sent):
sentInfo = json.loads(sent)
eidSkipgrams = {}
eidTypes = {}
eidPairs = []
tokens = sentInfo['tokens']
eids = set()
for em in sentInfo['entityMentions']:
eid = em['entityId']
typeList = em['type'].split(",")
start = em['start']
end = em['end']
eids.add(eid)
for type in typeList:
key = (eid, type)
if key in eidTypes:
eidTypes[key] += 1
else:
eidTypes[key] = 1
for skipgram in getSkipgrams(tokens, start, end):
key = (eid, skipgram)
if key in eidSkipgrams:
eidSkipgrams[key] += 1
else:
eidSkipgrams[key] = 1
for pair in itertools.combinations(eids, 2):
eidPairs.append(frozenset(pair))
return eidSkipgrams, eidTypes, eidPairs
def writeMapToFile(map, outFilename):
with open(outFilename, 'w') as fout:
for key in map:
lkey = list(key)
fout.write(str(lkey[0])+'\t'+str(lkey[1])+'\t'+str(map[key])+'\n')
def updateMapFromMap(fromMap, toMap):
for key in fromMap:
if key in toMap:
toMap[key] += fromMap[key]
else:
toMap[key] = fromMap[key]
return toMap
def updateMapFromList(fromList, toMap):
for ele in fromList:
if ele in toMap:
toMap[ele] += 1
else:
toMap[ele] = 1
return toMap
def extractFeatures(dataname):
outputFolder = '../../data/'+dataname+'/intermediate/'
infilename = '../../data/'+dataname+'/source/sentences.json'
eidSkipgramCounts = {}
eidTypeCounts = {}
eidPairCounts = {}
with open(infilename, 'r') as fin:
ct = 0
for line in fin:
eidSkipgrams, eidTypes, eidPairs = processSentence(line)
updateMapFromMap(eidSkipgrams, eidSkipgramCounts)
updateMapFromMap(eidTypes, eidTypeCounts)
updateMapFromList(eidPairs, eidPairCounts)
ct += 1
if ct % 100000 == 0 and ct != 0:
print('processed ' + str(ct) + ' lines')
writeMapToFile(eidSkipgramCounts, outputFolder+'eidSkipgramCounts.txt')
writeMapToFile(eidTypeCounts, outputFolder+'eidTypeCounts.txt')
writeMapToFile(eidPairCounts, outputFolder+'eidPairCounts.txt')
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Usage: extractFeatures.py -data')
exit(1)
corpusName = sys.argv[1]
extractFeatures(corpusName)
```
#### File: src/dataProcessing/prepareFormatForEmbed_type.py
```python
import sys
import json
import itertools
from collections import Counter
from collections import defaultdict
def getEid2EnameMap(inputFile):
eid2ename = {}
with open(inputFile,"r") as fin:
for line in fin:
line = line.strip()
if line:
segs = line.split("\t")
eid = int(segs[1])
ename = "_".join(segs[0].split())
eid2ename[eid] = ename
return eid2ename
data=sys.argv[1]
#infname = '/shared/data/zeqiuwu1/fts-summer/data/'+data+'/source/sentences.json'
infname = '../../data/'+data+'/intermediate/eidTypeCounts.txt'
etfname = '../../data/'+data+'/intermediate/ef.txt'
tsetfname = '../../data/'+data+'/intermediate/f.txt'
eidTypeCounts = defaultdict(float)
typeSet = set()
inputEntityMapName = '../../data/'+data+'/intermediate/entity2id.txt'
eid2ename = getEid2EnameMap(inputEntityMapName)
with open(infname, 'r') as fin:
line_cnt = 0
for line in fin:
seg = line.strip('\r\n').split('\t')
eid = int(seg[0])
t = seg[1]
if t not in typeSet:
typeSet.add(t)
eidTypeCounts[(eid,t)] += float(seg[2])
'''
for line in fin:
line_cnt += 1
if (line_cnt % 100000 == 0 and line_cnt != 0):
print(line_cnt)
sentInfo = json.loads(line.strip('\r\n'))
tokens = sentInfo['tokens']
for em in sentInfo['entityMentions']:
eid = em['entityId']
types = em['type'].split(',')
if len(types) < 1:
continue
for t in types:
if t not in typeSet:
typeSet.add(t)
eidTypeCounts[(eid,t)] += 1
'''
print("Finish parsing data")
with open(etfname, "w") as fout:
for k,v in eidTypeCounts.items():
fout.write(eid2ename[k[0]] + "\t" + k[1] + "\t" + str(v) + "\n")
with open(tsetfname, "w") as fout:
for k in typeSet:
fout.write(str(k)+"\n")
```
#### File: src/SetExpan/util.py
```python
from collections import defaultdict
import set_expan
import eid_pair_TFIDF_selection
import extract_seed_edges
import extract_entity_pair_skipgrams
def loadEidToEntityMap(filename):
eid2ename = {}
ename2eid = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid2ename[int(seg[1])] = seg[0]
ename2eid[seg[0].lower()] = int(seg[1])
return eid2ename, ename2eid
def loadFeaturesAndEidMap(filename):
featuresetByEid = defaultdict(set)
eidsByFeature = defaultdict(set)
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid = int(seg[0])
feature = seg[1]
featuresetByEid[eid].add(feature)
eidsByFeature[feature].add(eid)
return featuresetByEid, eidsByFeature
def loadFeaturesAndEidPairMap(filename):
featuresetByEidPair = defaultdict(set)
eidPairsByFeature = defaultdict(set)
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eidPair = (int(seg[0]), int(seg[1]))
feature = seg[2]
featuresetByEidPair[eidPair].add(feature)
eidPairsByFeature[feature].add(eidPair)
return featuresetByEidPair, eidPairsByFeature
def loadWeightByEidAndFeatureMap(filename, idx = -1):
''' Load the (eid, feature) -> strength
:param filename:
:param idx: The index column of weight, default is the last column
:return:
'''
weightByEidAndFeatureMap = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid = int(seg[0])
feature = seg[1]
weight = float(seg[idx])
weightByEidAndFeatureMap[(eid, feature)] = weight
return weightByEidAndFeatureMap
def loadWeightByEidPairAndFeatureMap(filename, idx = -1):
''' Load the ((eid1, eid2), feature) -> strength
:param filename:
:param idx: The index column of weight, default is the last column
:return:
'''
weightByEidPairAndFeatureMap = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eidPair = (int(seg[0]), int(seg[1]))
feature = seg[2]
weight = float(seg[idx])
weightByEidPairAndFeatureMap[(eidPair, feature)] = weight
return weightByEidPairAndFeatureMap
``` |
{
"source": "jmshen1994/SetRank",
"score": 2
} |
#### File: code/SetRank/autoSetRank_TREC.py
```python
import argparse
import sys
from collections import Counter
from collections import defaultdict
import itertools
import time
import numpy as np
import math
import pickle
import setRank_TREC
def string2dict(s):
d = {ele.split(":")[0]: float(ele.split(":")[1]) for ele in s.split(",")}
return d
def dict2string(d):
s = ",".join(str(k)+":"+str(d[k]) for k in d)
return s
def multiSetRank(query_words_string, query_entities_string, kb, params_set, DEBUG=False):
bulk = []
for params in params_set:
retrieval_query = setRank_TREC.generate_retrieval_query(
query_string=query_words_string, entity_string=query_entities_string, field_weights=params, DEBUG=DEBUG
)
rescore_query = setRank_TREC.generate_rescore_query(
query_string=query_words_string, entity_string=query_entities_string, kb=kb, params=params, DEBUG=DEBUG
)
search_body = {
"size": 20,
"_source": ["pmid"],
"query": retrieval_query,
"rescore": {
"window_size": 1000,
"query": {
"rescore_query": rescore_query,
"query_weight": 0,
"rescore_query_weight": 1
}
}
}
op_dict = {"index": setRank_TREC.FLAGS_INDEX_NAME, "type": setRank_TREC.FLAGS_TYPE_NAME}
bulk.append(op_dict)
bulk.append(search_body)
start = time.time()
resp = setRank_TREC.es.msearch(body=bulk, request_timeout=1800)["responses"]
end = time.time()
print("Finish retrieve %s pre-rankers' results using %s seconds" % (len(bulk)/2, (end-start)))
rankings = []
for res in resp:
ranking = [hit["_source"]["pmid"] for hit in res["hits"]["hits"]]
rankings.append(ranking)
return rankings
def rankAggregate(doc_rankings, maxIters=10, distanceMetric='KT', checkConverge=False, DEBUG=False):
## Step 1: Construct the document pool
docCounter = sorted(Counter(itertools.chain(*doc_rankings)).items(), key=lambda x: -x[1])
docno2docid = {} # docid aligns with the frequency of docno in all ranking list
docid2docno = {}
for idx, ele in enumerate(docCounter): # notice: a small docid indicates most frequent documents
docno2docid[ele[0]] = idx
docid2docno[idx] = ele[0]
rankings = []
docid2positions = defaultdict(list) # docid -> [(position in rank list, len of rank list)]
for i, doc_ranking in enumerate(doc_rankings):
ranking = []
k = len(doc_ranking) # current rank list i is of length k
for j, docno in enumerate(doc_ranking):
docid = docno2docid[docno]
ranking.append(docid)
docid2positions[docid].append((i, j, k)) # current document is at position j of rank list i which is of size k
rankings.append(ranking)
p = len(doc_rankings)
K = len(docno2docid)
if DEBUG:
print("Number of ranker p = %s" % p)
print("Size of document pool K = %s" % K)
# for _, r in enumerate(rankings):
# print("Ranking list %s : \n \t\t%s" % (_, r))
# for j in sorted(docid2positions.keys()):
# print(j, docid2positions[j])
# for docid in docid2docno:
# print(docid, "=>", docid2docno[docid])
## Step 2: Iteratively apply weighted rank aggregation
alphas = np.ones(p) / p
prev_aggregated_rank = None
convergedFlag = False
for iter in range(maxIters):
## weighted Borda Counting
docid2scores = defaultdict(float)
for docid in docid2positions:
score = 0.0
for pos in docid2positions[docid]:
score += (alphas[pos[0]] * (pos[2]-pos[1]))
docid2scores[docid] = score
aggregated_rank = [ele[0] for ele in sorted(docid2scores.items(), key = lambda x:-x[1])]
docid2rank = {docid:r for r, docid in enumerate(aggregated_rank)}
if DEBUG:
print("Iteration: %s, aggregated list: %s" % (iter, aggregated_rank))
# print("Iteration: %s, docid2rank: %s" % (iter, docid2rank))
if aggregated_rank == prev_aggregated_rank:
print("Converged at iteration %s" % iter)
convergedFlag = True
break
else:
if DEBUG and prev_aggregated_rank:
# print("alpha:", alphas)
differences = [] # (docno, prev_rank, current_rank)
for i in range(len(prev_aggregated_rank)):
if docid2rank[prev_aggregated_rank[i]] != i:
differences.append((docid2docno[prev_aggregated_rank[i]], i, docid2rank[prev_aggregated_rank[i]]))
for ele in differences:
print("Position changed doc:", ele)
prev_aggregated_rank = aggregated_rank
## confidence score alignment
positions2discouts = {}
consider_not_appeared_docs = False
for r_id, r in enumerate(rankings):
k = len(r)
distance = 0.0
## Include influence of those not appeared documents
if consider_not_appeared_docs:
not_appeared_docs = set(docid2rank.keys()) - set(r) # set of docids that are not appeared in current rank list
for a in range(k-1):
for b in range(a+1,k) :
pi_a = docid2rank[r[a]]
pi_b = docid2rank[r[b]]
if pi_a > pi_b: # a position inversion
if distanceMetric == "dKT": # discounted KT distance
if (pi_a, pi_b) in positions2discouts:
discount = positions2discouts[(pi_a, pi_b)]
else:
# change zero-index to one-index
discount = (1.0 / math.log(1+pi_b+1, 2)) - (1.0 / math.log(1+pi_a+1, 2))
positions2discouts[(pi_a, pi_b)] = discount
distance += (discount * 1.0)
elif distanceMetric == 'KT': # normal KT distance
distance += 1.0
else:
print("[ERROR] Unsupported distanceMetric: %s" % distanceMetric)
if consider_not_appeared_docs:
for not_appeared_doc in not_appeared_docs:
pi_appear = docid2rank[r[a]]
pi_not_appeared_doc = docid2rank[not_appeared_doc]
if pi_not_appeared_doc > pi_appear:
if distanceMetric == "dKT": # discounted KT distance
if (pi_not_appeared_doc, pi_appear) in positions2discouts:
discount = positions2discouts[(pi_not_appeared_doc, pi_appear)]
else:
# change zero-index to one-index
discount = (1.0 / math.log(1 + pi_appear + 1, 2)) - (1.0 / math.log(1 + pi_not_appeared_doc + 1, 2))
positions2discouts[(pi_not_appeared_doc, pi_appear)] = discount
distance += (discount * 1.0)
elif distanceMetric == 'KT': # normal KT distance
distance += 1.0
else:
print("[ERROR] Unsupported distanceMetric: %s" % distanceMetric)
alphas[r_id] = math.exp(-1.0 * distance)
# print("positions2discouts", positions2discouts)
Z = sum(alphas)
print("Iteration: %s, confidence scores normalizer = %s" % (iter, Z))
alphas = alphas / Z
# print("Iteration: %s, confidence scores: %s" % (iter, alphas))
if not convergedFlag:
print("Not converged after %s iterations" % maxIters)
aggregated_rank_docno = [docid2docno[docid] for docid in aggregated_rank]
return (alphas, aggregated_rank_docno)
def rankAggregateCorpus(corpus_doc_rankings, maxIters=10, distanceMetric="KT", checkVonverge=False, DEBUG=False):
p = len(corpus_doc_rankings[0]) # number of distinct rankers
if DEBUG:
print("Number of ranker p = %s" % p)
alphas = np.ones(p) / p
for iter in range(maxIters):
if DEBUG:
print("Iteration: %s" % iter)
# print("Alphas: %s" % alphas)
alpha_distances = np.zeros(p)
## go through the query set
for qid, doc_rankings in enumerate(corpus_doc_rankings):
## obtain the docid
docCounter = sorted(Counter(itertools.chain(*doc_rankings)).items(), key=lambda x: -x[1])
docno2docid = {}
docid2docno = {}
for idx, ele in enumerate(docCounter): # notice: a small docid indicates most frequent documents
docno2docid[ele[0]] = idx
docid2docno[idx] = ele[0]
rankings = []
docid2positions = defaultdict(list) # docid -> [(position in rank list, len of rank list)]
for i, doc_ranking in enumerate(doc_rankings):
ranking = []
k = len(doc_ranking) # current rank list i is of length k
for j, docno in enumerate(doc_ranking):
docid = docno2docid[docno]
ranking.append(docid)
docid2positions[docid].append((i, j, k)) # current document is at position j of rank list i which is of size k
rankings.append(ranking)
## weighted Borda Counting
docid2scores = defaultdict(float)
for docid in docid2positions:
score = 0.0
for pos in docid2positions[docid]:
score += (alphas[pos[0]] * (pos[2] - pos[1]))
docid2scores[docid] = score
aggregated_rank = [ele[0] for ele in sorted(docid2scores.items(), key=lambda x: -x[1])]
docid2rank = {docid: r for r, docid in enumerate(aggregated_rank)}
## accumlate each parameter's dKT
positions2discouts = {}
query_distance_sum = 0
for r_id, r in enumerate(rankings): # r_id is the index of its corresponding parameter
k = len(r)
distance = 0.0
for a in range(k - 1):
for b in range(a + 1, k):
pi_a = docid2rank[r[a]]
pi_b = docid2rank[r[b]]
if pi_a > pi_b: # a position inversion
if distanceMetric == "dKT": # discounted KT distance
if (pi_a, pi_b) in positions2discouts:
discount = positions2discouts[(pi_a, pi_b)]
else:
# change zero-index to one-index
discount = (1.0 / math.log(1 + pi_b + 1, 2)) - (1.0 / math.log(1 + pi_a + 1, 2))
positions2discouts[(pi_a, pi_b)] = discount
distance += (discount * 1.0)
query_distance_sum += (discount * 1.0)
elif distanceMetric == 'KT': # normal KT distance
distance += 1.0
query_distance_sum += 1.0
else:
print("[ERROR] Unsupported distanceMetric: %s" % distanceMetric)
# accumlate the distance
alpha_distances[r_id] += distance
# if DEBUG:
# print("query_distance_sum for query %s = %s" % (qid,query_distance_sum))
if DEBUG:
Z_distance = sum(alpha_distances)
print("Sum of distances at iteration %s = %s" % (iter, Z_distance))
# print("Distances at iteration %s = %s" % (iter, alpha_distances))
## Adjust confidence score
# alpha_distances = np.exp(-1.0 * alpha_distances)
alpha_distances = 1.0 / alpha_distances
Z = sum(alpha_distances)
alphas = alpha_distances / Z
return alphas
def main(args):
queries = setRank_TREC.load_query(args)
kb = setRank_TREC.load_kb(args)
result_all = []
## Step 1: determine the anchor parameter and the parameters that we want to tune
anchor_params = {
'title': 20.0, 'abstract': 5.0,
'title_ana': 20.0, 'abstract_ana': 5.0,
'title_mu': 1000.0, 'abstract_mu': 1000.0,
'title_ana_mu': 1000.0, 'abstract_ana_mu': 1000.0,
"entity_lambda": 0.5, "type_interaction": 1.0,
"consider_entity_set": 1.0, "consider_word_set" : 1.0, "consider_type":1.0, "word_dependency":1.0
}
params_names = ["title", "abstract", "title_mu", "abstract_mu", "entity_lambda"]
## Step 2: fix the parameter set that we want to tune, based on the mode
if args.mode == "tune" or args.mode == "tune-best-rank":
params_values = [
[5.0, 10.0, 15.0, 20.0], # -> title
[1.0, 3.0, 5.0, 7.0], # -> abstract
[500, 1000, 1500, 2000], # -> title_mu
[500, 1000, 1500, 2000], # -> abstract_mu
[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], # -> entity_lambda
]
all_combinations = list(itertools.product(*params_values))
params_set = []
for ele in all_combinations:
tmp_params = anchor_params.copy()
for param_index, param_value in enumerate(ele):
tmp_params[params_names[param_index]] = param_value
params_set.append(tmp_params)
elif args.mode == "rank":
params_values = [
[15.0, 3.0, 15.0, 0.3, 1.0],
[15.0, 3.0, 15.0, 0.3, 0.5],
[15.0, 3.0, 15.0, 0.3, 1.5],
[20.0, 3.0, 15.0, 0.3, 1.5],
[15.0, 3.0, 15.0, 0.4, 0.5]
]
params_set = []
for ele in params_values:
print("ele:", ele)
tmp_params = anchor_params.copy()
for param_index, param_value in enumerate(ele):
print("param_index", param_index, "param_value", param_value)
tmp_params[params_names[param_index]] = param_value
params_set.append(tmp_params)
else:
print("Unsupported mode: %s" % args.mode)
return
## Step 3: auto model selection over either query or corpus level
if args.agglevel == "query":
saved_result = (int(args.load_pre_saved_rankings) == 1) ## load results from query
if saved_result:
print("=== Loading pre-saved ranking results ===")
with open(args.pre_saved_rankings, "rb") as fin:
all_docno_rankings = pickle.load(fin) # a list of docno_rankings
else:
print("=== Cannot load pre-saved ranking results, generate rankings from scratch ===")
all_docno_rankings = {} # query_id -> docno_rankings
confidence_over_all_queries = np.zeros(len(params_set))
for query in queries:
query_id = query[0]
query_string = query[1]
query_entities_list = []
for k, v in query[2].items():
for i in range(v):
query_entities_list.append(k)
query_entities_string = " ".join(query_entities_list)
print("=== Running query: %s (id = %s) ===" % (query_string, query_id))
if saved_result:
rankings = all_docno_rankings[query_id]
else:
rankings = multiSetRank(query_string, query_entities_string, kb, params_set, DEBUG=False)
all_docno_rankings[query_id] = rankings
(confidences, aggregated_rank) = rankAggregate(rankings, DEBUG=True)
confidence_over_all_queries += confidences
if args.mode == "tune-best-rank": # use the best parameter to rank this query again
best_parameter = params_set[np.argmax(confidences)]
print("Best parameters for query %s: %s" % (query_id, best_parameter))
res = setRank_TREC.setRank(query_string, query_entities_string, kb, best_parameter)
rank = 1
for hit in res['hits']['hits']:
result_all.append([query_id, "Q0", hit["_source"]["docno"], str(rank), str(hit["_score"]), "autoSetRank"])
rank += 1
else:
rank = 1
for docno in aggregated_rank:
result_all.append([query_id, "Q0", docno, str(rank), str(100-rank), "autoSetRank"])
rank += 1
## save results
if not saved_result:
with open(args.pre_saved_rankings, "wb") as fout:
print("=== Save rankings for next time's usage ===")
pickle.dump(all_docno_rankings, fout, protocol=pickle.HIGHEST_PROTOCOL)
elif args.agglevel == "corpus": ## corpus level aggregation
load_data_from_pickle = False
if load_data_from_pickle:
with open("all_rankings.pickle", "rb") as fin:
all_docno_rankings = pickle.load(fin)
else:
## step 1: obtain all query
all_docno_rankings = []
for query in queries:
query_id = query[0]
query_string = query[1]
query_entities_list = []
for k, v in query[2].items():
for i in range(v):
query_entities_list.append(k)
query_entities_string = " ".join(query_entities_list)
print("=== Running query %s (id = %s) ===" % (query_string, query_id))
rankings = multiSetRank(query_string, query_entities_string, kb, params_set, DEBUG=False)
all_docno_rankings.append(rankings)
with open(args.pre_saved_rankings, "wb") as fout:
pickle.dump(all_docno_rankings, fout, protocol=pickle.HIGHEST_PROTOCOL)
## step 2: rank aggregation
confidence_over_all_queries = rankAggregateCorpus(all_docno_rankings, DEBUG=True)
else:
print("[ERROR] Unsupported agglevel configuration: %s" % args.agglevel)
return
params2confidence = [(params, confidence_over_all_queries[i]) for i, params in enumerate(params_set)]
for ele in sorted(params2confidence, key = lambda x:-x[1])[0:10]:
print("Confidence = %s, parameters = %s" % (ele[1], ele[0]))
if args.mode == "query": # save results only for query level aggregation
setRank_TREC.save_results(args, result_all)
print("Finish saving results to path: %s" % args.output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='autoSetRank_TREC.py',
description='Use rank aggregation to automatically learn parameters in setRank'
'algorithm on TREC dataset.')
parser.add_argument('-query', required=False, default="../../data/TREC_BIO/trec_query.json",
help='File name of test queries.')
parser.add_argument('-output', required=False, default="../results/trec/auto-tune.run",
help='File name of output results.')
parser.add_argument('-kb', required=False, default="../../data/TREC_BIO/trec_entity_type.tsv")
parser.add_argument('-mode', required=False, default="tune",
help="mode can be 'tune', 'rank', 'tune-best-rank'."
"tune: aggregate over all candidate parameters and save the aggregated rank list "
" based on all the candidate rank lists,"
"rank: use topK (selected) parameters to obtain pre-ranked list and aggregate them"
"tune-best-rank: first tune the query level best parameters and return the rank list for that"
" query using the parameter suits it best, only works for aggLevel=query")
parser.add_argument('-agglevel', required=False, default="query",
help="agglevel can be 'query' or 'corpus', and it represents the level of rank aggregation")
parser.add_argument('-pre_saved_rankings', required=False, default="",
help="name of (previously saved OR about to be saved) ranking results")
parser.add_argument('-load_pre_saved_rankings', required=False, default="0",
help="set load_pre_saved_rankings to True if using presaved rankings")
args = parser.parse_args()
sys.exit(main(args))
``` |
{
"source": "jmshi/reconnection",
"score": 2
} |
#### File: jmshi/reconnection/csheet3d.py
```python
import athena_read as ath
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import cPickle as pickle
from my_colors import get_jhcolors
############################################
import matplotlib.gridspec as gridspec
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
def plot_jhist(dv,ncells,jmax,jmaxnorm,diss,dissnorm,size,orient,theta,ntime,vsheet=False,size2=None):
nframe = 2
nplot = 3
matplotlib.rcParams['figure.figsize'] = (12, 12.0*nframe/nplot)
fig = plt.figure()
fraction=0.046;pad=0.04
gs = gridspec.GridSpec(2, 3)
# calc dissipation rate per sheet for normalization or weighted average
ncelltot= float(np.sum(np.array(ncells)))
diss_sheet = np.array(ncells)*np.array(diss)
disstot= float(np.sum(diss_sheet))
for i in np.arange(6):
if (i==0):
hdata = np.array(size)[:,0]
vmin=-2; vmax=0.5 #in log10
nbins = 100
xlab = r'$l/H$'
# modify to plot the aspect ratio: xi/lambda
hdata = np.array(size)[:,1]/np.array(size)[:,2]
vmin=-0.5; vmax=3.0 #in log10
nbins = 100
xlab = r'$\xi/\lambda$'
#
print 'non-weighted averaged (l,xi,lambda,aspect) = (',np.average(np.array(size)[:,0]),' ,',\
np.average(np.array(size)[:,1]),' ,',\
np.average(np.array(size)[:,2]),',',\
np.average(np.array(size)[:,1]/np.array(size)[:,2]),')'
print 'vol-weighted averaged (l,xi,lambda,aspect) = (',np.sum(np.array(size)[:,0]*np.array(ncells))/ncelltot,' ,',\
np.sum(np.array(size)[:,1]*np.array(ncells))/ncelltot,' ,',\
np.sum(np.array(size)[:,2]*np.array(ncells))/ncelltot,', ',\
np.sum(np.array(size)[:,1]/np.array(size)[:,2]*np.array(ncells))/ncelltot,')'
print 'dis-weighted averaged (l,xi,lambda,aspect) = (',np.sum(np.array(size)[:,0]*diss_sheet)/disstot,' ,',\
np.sum(np.array(size)[:,1]*diss_sheet)/disstot,' ,',\
np.sum(np.array(size)[:,2]*diss_sheet)/disstot,', ',\
np.sum(np.array(size)[:,1]/np.array(size)[:,2]*diss_sheet)/disstot,')'
index,coef=-3.2,1e-2
if (i==1):
hdata = np.array(size)[:,0]
vmin=-3;vmax=1
nbins = 100
xlab1 = r'$\xi$'
xlab =r'$l$'
xlab2 = r'$\lambda$'
hdata1 = np.array(size)[:,1]
hdata2 = np.array(size)[:,2]
index,coef=-3.0,1e-2
if size2:
print "reach size2 loop"
hdata3 = np.array(size2)[:,2]
if (i==2):
hdata = (theta)
vmin = np.log10(np.min(hdata))
vmax = np.log10(np.max(hdata))
nbins = 100
if vsheet:
xlab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
print 'average theta/E_nu = ', np.average(hdata)
else:
xlab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
print 'average theta/E_eta = ', np.average(hdata)
index,coef=-2.0,1e-5
if (i==3):
hdata = (np.array(jmaxnorm))
vmin = np.log10(2) #np.log10(np.min(hdata))
vmax = np.log10(20) #np.log10(np.max(hdata))
nbins=100
if vsheet:
xlab = r'$\omega_{max}/\omega_{rms}$'
print 'average w_max/w_rms = ',np.average(hdata)
else:
xlab = r'$j_{max}/j_{rms}$'
print 'average j_max/j_rms = ',np.average(hdata)
if (i==4):
hdata = np.array(dissnorm)
vmin = np.log10(5) #np.log10(np.min(hdata))
vmax = np.log10(50) #np.log10(np.max(hdata))
nbins = 100
if vsheet:
xlab = r'$\langle\varepsilon_{\nu,sh}\rangle_i /\langle\varepsilon_{\nu}\rangle$'
print 'average epsilon = ', np.average(hdata)
else:
xlab = r'$\langle\varepsilon_{\eta,sh}\rangle_i /\langle\varepsilon_{\eta}\rangle$'
print 'average epsilon = ', np.average(hdata)
if (i==5):
hdata = np.array(ncells)*dv #/((x[1]-x[0])**3)
#vmin = -3; vmax = 0
vmin = np.log10(np.min(hdata))
vmax = np.log10(np.max(hdata))
nbins = 100
xlab = r'$v_{sh}/V$'
print 'average volume fraction per sheet = ',np.average(hdata)/8.0
print 'dis-weighted average volume fraction per sheet = ',np.average(hdata*diss_sheet)\
/8.0/np.average(diss_sheet)
print 'average volume fraction for all sheets = ',np.sum(hdata)/float(ntime)/8.0
print 'time averaged number of current sheets = ', len(ncells)/float(ntime)
#print 'dis-weighted average volume fraction for all sheets = ',np.sum(hdata*diss_sheet)\
# /float(ntime)/8.0/(disstot)
hist, bins = np.histogram(hdata, bins=np.logspace(vmin,vmax,nbins),density=1)
center = 10**((np.log10(bins[:-1])+np.log10(bins[1:]))/2)
if (i==1):
hist1, bins1 = np.histogram(hdata1, bins=np.logspace(vmin,vmax,nbins),density=1)
hist2, bins2 = np.histogram(hdata2, bins=np.logspace(vmin,vmax,nbins),density=1)
if size2:
hist3, bins3 = np.histogram(hdata3, bins=np.logspace(vmin,vmax,nbins),density=1)
fig.add_subplot(gs[i])
if (i< 3):
plt.plot(center,hist,'k-',label=xlab)
#n, bins, patches = plt.hist(hdata, bins=np.logspace(vmin,vmax,nbins), \
# normed=1, facecolor='green', alpha=0.5,label=xlab)
if i==1:
plt.plot(center,hist1,'b-',label=xlab1)
plt.plot(center,hist2,'g-',label=xlab2)
#n, bins, patches = plt.hist(hdata1, bins=np.logspace(vmin,vmax,nbins), \
# normed=1, facecolor='blue', alpha=0.35,label=xlab1)
if size2:
plt.plot(center,hist3,'g--')
plt.legend(loc=1,fontsize=20)
xlab = r'$(l,\xi,\lambda)/H$'
#plt.text(vmin,7000,'total counts: '+str(len(hdata)),size=20,color='g')
plt.plot(center,coef*center**index,'k:')
else:
plt.plot(center,hist,'k-')
#n, bins, patches = plt.hist(hdata, bins=np.logspace(vmin,vmax,nbins), normed=1, facecolor='blue', alpha=0.5)
plt.xlabel(xlab,size=20)
plt.ylabel('probability density',size=20)
plt.xlim([10**vmin,10**vmax])
plt.gca().set_xscale("log")
plt.gca().set_yscale("log",nonposy='clip')
#if(i==5):
# plt.xlim([1e4,10**4.3])
plt.tight_layout()
## only use the following for the paper
def plot_jhist_2panel(dv,ncells,jmax,jmaxnorm,diss,dissnorm,size,orient,theta,ntime,vsheet=False,size2=None):
nframe = 2
nplot = 1
matplotlib.rcParams['figure.figsize'] = (8.0,4.0)
fig = plt.figure()
fraction=0.046;pad=0.04
gs = gridspec.GridSpec(nplot, nframe)
# calc dissipation rate per sheet for normalization or weighted average
ncelltot= float(np.sum(np.array(ncells)))
diss_sheet = np.array(ncells)*np.array(diss)
disstot= float(np.sum(diss_sheet))
for i in np.arange(2):
if (i==0):
hdata = np.array(size)[:,0]
vmin=-3;vmax=1
nbins = 100
xlab1 = r'$\xi$'
xlab =r'$l$'
xlab2 = r'$\lambda_2$'
hdata1 = np.array(size)[:,1]
hdata2 = np.array(size)[:,2]
index,coef=-3.0,1e-2
if size2:
print "reach size2 loop"
hdata3 = np.array(size2)[:,2]
xlab3 = r'$\lambda_1$'
if (i==1):
hdata = (theta)
vmin = np.log10(np.min(hdata))
vmax = np.log10(np.max(hdata))
nbins = 100
if vsheet:
xlab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
print 'average theta/E_nu = ', np.average(hdata)
else:
xlab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
print 'average theta/E_eta = ', np.average(hdata)
index,coef=-2.0,1e-5
hist, bins = np.histogram(hdata, bins=np.logspace(vmin,vmax,nbins),density=1)
center = 10**((np.log10(bins[:-1])+np.log10(bins[1:]))/2)
if (i==0):
hist1, bins1 = np.histogram(hdata1, bins=np.logspace(vmin,vmax,nbins),density=1)
hist2, bins2 = np.histogram(hdata2, bins=np.logspace(vmin,vmax,nbins),density=1)
if size2:
hist3, bins3 = np.histogram(hdata3, bins=np.logspace(vmin,vmax,nbins),density=1)
fig.add_subplot(gs[i])
if (i< 3):
plt.plot(center,hist,'k-',label=xlab)
#n, bins, patches = plt.hist(hdata, bins=np.logspace(vmin,vmax,nbins), \
# normed=1, facecolor='green', alpha=0.5,label=xlab)
if i==0:
plt.plot(center,hist1,'b-',label=xlab1)
if size2:
plt.plot(center,hist3,'r-',label=xlab3)
plt.plot(center,hist2,'g-',label=xlab2)
#n, bins, patches = plt.hist(hdata1, bins=np.logspace(vmin,vmax,nbins), \
# normed=1, facecolor='blue', alpha=0.35,label=xlab1)
plt.legend(loc=1,fontsize=20,handlelength=1.5,handletextpad=0.05)
xlab = r'$(l,\xi,\lambda)/H$'
#plt.text(vmin,7000,'total counts: '+str(len(hdata)),size=20,color='g')
plt.plot(center,coef*center**index,'k:')
else:
plt.plot(center,hist,'k-')
#n, bins, patches = plt.hist(hdata, bins=np.logspace(vmin,vmax,nbins), normed=1, facecolor='blue', alpha=0.5)
plt.xlabel(xlab,fontsize=20)
plt.ylabel('probability density',fontsize=20)
plt.xlim([10**vmin,10**vmax])
plt.gca().set_xscale("log")
plt.gca().set_yscale("log",nonposy='clip')
#if(i==5):
# plt.xlim([1e4,10**4.3])
plt.tight_layout()
def plot_jcorr(ncells,jmax,jmaxnorm,diss,dissnorm,size,orient,theta,disscut=4e-5,ntime=21,project=np.array([1,0,0]),cmap='jet',vsheet=False):
"""
plot the correlation between quantities within current sheet
"""
nframe = 3
nplot = 3
enlarge = 2
matplotlib.rcParams['figure.figsize'] = (10*enlarge, enlarge*8.0*nframe/nplot)
fig = plt.figure()
fraction=0.046;pad=0.04
gs = gridspec.GridSpec(nframe, nplot)
mask = (theta > disscut)
# calc dissipation rate per sheet for normalization or weighted average
ncelltot= float(np.sum(np.array(ncells)))
diss_sheet = np.array(ncells)*np.array(diss)
disstot= float(np.sum(diss_sheet))
for i in np.arange(9):
#######################################
# (1) orientation
#######################################
if (i==0):
vec2d = np.array(orient)[mask,0]#np.cross(np.array(orient)[mask,0],project)
hdata_x = vec2d[:,2]
hdata_y = vec2d[:,1]
xlab = r'$\hat{l}_x$'
ylab = r'$\hat{l}_y$'
xmin,xmax = -1,1
ymin,ymax = -1,1
vec2d = np.array(orient)[:,0]
vecnorm = np.average(vec2d,axis=0)
vecnorm /= np.linalg.norm(vecnorm)
print 'non-weighted averaged (\hat{l}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(np.array(ncells)*vec2d.T,axis=1)/ncelltot
vecnorm /= np.linalg.norm(vecnorm)
print 'vol-weighted averaged (\hat{l}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(diss_sheet*vec2d.T,axis=1)/disstot
vecnorm /= np.linalg.norm(vecnorm)
print 'dis-weighted averaged (\hat{l}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
if (i==1):
vec2d = np.array(orient)[mask,1]
hdata_x = vec2d[:,2]
hdata_y = vec2d[:,1]
xlab = r'$\hat{\xi}_x$'
ylab = r'$\hat{\xi}_y$'
xmin,xmax = -1,1
ymin,ymax = -1,1
vec2d = np.array(orient)[:,1]
vecnorm = np.average(vec2d,axis=0)
vecnorm /= np.linalg.norm(vecnorm)
print 'non-weighted averaged (\hat{xi}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(np.array(ncells)*vec2d.T,axis=1)/ncelltot
vecnorm /= np.linalg.norm(vecnorm)
print 'vol-weighted averaged (\hat{xi}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(diss_sheet*vec2d.T,axis=1)/disstot
vecnorm /= np.linalg.norm(vecnorm)
print 'dis-weighted averaged (\hat{xi}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
if (i==2):
vec2d = np.array(orient)[mask,2]
hdata_x = vec2d[:,2]
hdata_y = vec2d[:,1]
xlab = r'$\hat{\lambda}_x$'
ylab = r'$\hat{\lambda}_y$'
xmin,xmax = -1,1
ymin,ymax = -1,1
vec2d = np.array(orient)[:,2]
vecnorm = np.average(vec2d,axis=0)
vecnorm /= np.linalg.norm(vecnorm)
print 'non-weighted averaged (\hat{\lambda}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(np.array(ncells)*vec2d.T,axis=1)/ncelltot
vecnorm /= np.linalg.norm(vecnorm)
print 'vol-weighted averaged (\hat{\lambda}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(diss_sheet*vec2d.T,axis=1)/disstot
vecnorm /= np.linalg.norm(vecnorm)
print 'dis-weighted averaged (\hat{\lambda}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
#######################################
# (2) 2d histogram of sheet dimensions
#######################################
if (i==3):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(size)[:,1]
xlab = r'$l$'
ylab = r'$\xi$'
xmin,xmax = -2.7,1
ymin,ymax = -2.7,0
nbin = 100
if (i==4):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(size)[:,2]
xlab = r'$l$'
ylab = r'$\lambda$'
xmin,xmax = -2.7,1
ymin,ymax = -2.7,0
nbin = 100
if (i==5):
hdata_x = np.array(size)[:,1]
hdata_y = np.array(size)[:,2]
xlab = r'$\xi$'
ylab = r'$\lambda$'
xmin,xmax = -2.7,1
ymin,ymax = -2.7,0
nbin = 100
# modify to plot the corr btw xi/lambda and lambda
hdata_x = np.array(size)[:,1]
hdata_y = np.array(size)[:,1]/np.array(size)[:,2]
xlab = r'$\xi$'
ylab = r'$\xi/\lambda$'
xmin,xmax = -3,1.
ymin,ymax = 0.0, 3. #-0.5,3.0
nbin = 100
#######################################
# (3) dissipation below
#######################################
if (i==6):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(theta)
xlab = r'$l$'
if vsheet:
ylab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
else:
ylab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
ymin = np.log10(np.min(hdata_y))
ymax = np.log10(np.max(hdata_y))
xmin,xmax = -2.7,1
nbin = 100
if (i==7):
hdata_x = np.array(size)[:,1]
hdata_y = np.array(theta)
xlab = r'$\xi$'
if vsheet:
ylab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
else:
ylab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
ymin = np.log10(np.min(hdata_y))
ymax = np.log10(np.max(hdata_y))
xmin,xmax = -2.7,1
nbin = 100
# modify to plot corr btwn theta and aspect ratio
hdata_x = np.array(size)[:,1]/np.array(size)[:,2]
hdata_y = np.array(theta)
xlab = r'$\xi/\lambda$'
if vsheet:
ylab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
else:
ylab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
ymin = np.log10(np.min(hdata_y))
ymax = np.log10(np.max(hdata_y))
xmin,xmax = -0.5,2.5
nbin = 100
if (i==8):
hdata_x = np.array(size)[:,2]
hdata_y = np.array(theta)
xlab = r'$\lambda$'
if vsheet:
ylab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
else:
ylab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
ymin = np.log10(np.min(hdata_y))
ymax = np.log10(np.max(hdata_y))
xmin,xmax = -2.7,1
nbin = 100
if i !=-1:
fig.add_subplot(gs[i],aspect='equal')
else:
fig.add_subplot(gs[i])
if i<3:
plt.scatter(hdata_x,hdata_y,s=0.02, marker = '.' )
plt.ylim([ymin,ymax])
plt.xlim([xmin,xmax])
plt.xlabel(xlab,size=20)
plt.ylabel(ylab,size=20)
if i==0: # add tilt angle measured in Zhdankin paper -17.5degree
tilt = -17.5/180.*np.pi
rx,ry = 2.*np.sin(tilt),2.*np.cos(tilt)
plt.plot([rx,-rx],[ry,-ry],'k:')
elif i<6:
#plt.scatter(hdata_x,hdata_y,s=0.01,marker='.')
#hst are binned in the dissipation values (normalized with total dissipation)
hst,xe,ye = np.histogram2d(hdata_y,hdata_x,bins=(np.logspace(ymin,ymax,nbin),\
np.logspace(xmin,xmax,nbin)),\
weights=theta) #diss_sheet/disstot)
xx,yy = np.meshgrid(xe,ye)
loghst = np.log10(hst+1e-12)
plt.pcolormesh(yy,xx,np.transpose(loghst),vmin=-5,vmax=-1,cmap=cmap)
#vmax=np.max(loghst),cmap=cmap)
#print 'min/max of hst = ',np.min(loghst),np.max(loghst)
#print 'sum(hst) = ',np.sum(hst)
#print 'min/max of xe = ',np.min(xe),np.max(xe)
#print 'min/max of ye = ',np.min(ye),np.max(ye)
plt.colorbar(pad=pad,fraction=fraction*0.8)
plt.xscale("log")
plt.yscale("log")
plt.xlim([10**xmin,10**xmax])
plt.ylim([10**ymin,10**ymax])
plt.xlabel(xlab,size=20)
plt.ylabel(ylab,size=20)
if vsheet:
plt.title(r'$\theta_{\nu}/\mathcal{E}_{\nu}$',fontsize=15)
else:
plt.title(r'$\theta_{\eta}/\mathcal{E}_{\eta}$',fontsize=15)
if i==3:
index,coef=1.0,0.4
plt.plot(xe,coef*xe**index,'k:')
#plt.title(r'$\mathrm{Histogram\ of\ d_i:}$')
else:
hst,xe,ye = np.histogram2d(hdata_y,hdata_x,bins=[np.logspace(ymin,ymax,nbin),\
np.logspace(xmin,xmax,nbin)],\
normed=True)
xx,yy = np.meshgrid(xe,ye)
loghst = np.log10(hst+1e-12)
plt.pcolormesh(yy,xx,np.transpose(loghst),vmin=-3,vmax=9)
#print 'min/max of hst = ',np.min(loghst),np.max(loghst)
plt.colorbar(pad=pad,fraction=fraction)
plt.xscale("log")
plt.yscale("log")
plt.xlim([10**xmin,10**xmax])
plt.ylim([10**ymin,10**ymax])
plt.xlabel(xlab,size=20)
plt.ylabel(ylab,size=20)
plt.title(r'$\log$'+'(PDF)',fontsize=15)
if i==6:
index,coef=2.0,1.5e-3
plt.plot(ye,coef*ye**index,'k:')
plt.tight_layout()
# linear regression
#n_abovezero = np.min([len(hdata_x[hdata_x > 0]),len(hdata_y[hdata_y > 0])])
#clean_data = np.log10(np.array(zip(hdata_x[:n_abovezero],hdata_y[:n_abovezero])))
#print clean_data
#coeff = np.polyfit(clean_data[:,0], clean_data[:,1], 1)
#print coeff
#break
#yfit = 10**(coeff[0]*clean_data[:,0]+coeff[1])
#ax.plot(clean_data[:,0],yfit,'r-')
# use the following to generate the figure used in the paper
def plot_jcorr_4panel(ncells,jmax,jmaxnorm,diss,dissnorm,size,orient,theta,disscut=4e-5,ntime=21,project=np.array([1,0,0]),cmap='jet',vsheet=False):
"""
plot the correlation between quantities within current sheet
"""
nframe = 2
nplot = 2
enlarge = 1
matplotlib.rcParams['figure.figsize'] = (8*enlarge, enlarge*7.0*nframe/nplot)
fig = plt.figure()
fraction=0.046;pad=0.04
gs = gridspec.GridSpec(nframe, nplot)
mask = (theta > disscut)
# calc dissipation rate per sheet for normalization or weighted average
ncelltot= float(np.sum(np.array(ncells)))
diss_sheet = np.array(ncells)*np.array(diss)
disstot= float(np.sum(diss_sheet))
for i in np.arange(4):
#######################################
# (1) orientation
# (1) replace it with aspect ratio vs. xi
#######################################
if (i==2):
#vec2d = np.array(orient)[mask,0]#np.cross(np.array(orient)[mask,0],project)
#hdata_x = vec2d[:,2]
#hdata_y = vec2d[:,1]
#xlab = r'$\hat{l}_x$'
#ylab = r'$\hat{l}_y$'
#xmin,xmax = -1,1
#ymin,ymax = -1,1
# modify to plot the corr btw xi/lambda and lambda
hdata_x = np.array(size)[:,1]
hdata_y = np.array(size)[:,1]/np.array(size)[:,2]
xlab = r'$\xi$'
ylab = r'$\xi/\lambda$'
xmin,xmax = -3,1.
ymin,ymax = 0.0, 3. #-0.5,3.0
nbin = 100
#######################################
# (2) 2d histogram of sheet dimensions
#######################################
if (i==0):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(size)[:,1]
xlab = r'$l$'
ylab = r'$\xi$'
xmin,xmax = -3.0,1.0 #2.7,1
ymin,ymax = -3.0,1.0 #-2.7,0
nbin = 100
if (i==1):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(size)[:,2]
xlab = r'$l$'
ylab = r'$\lambda_2$'
xmin,xmax = -3.0,1.0 #-2.7,1
ymin,ymax = -3.0,-1.8 #-2.7,0
nbin = 100
#######################################
# (3) dissipation below
#######################################
if (i==3):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(theta)
xlab = r'$l$'
if vsheet:
ylab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
else:
ylab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
ymin = np.log10(np.min(hdata_y))
ymax = np.log10(np.max(hdata_y))
xmin,xmax = -2.7,1
nbin = 100
#if i==2:
# fig.add_subplot(gs[i] ,aspect='equal')
#else:
fig.add_subplot(gs[i])
#if i==2:
# plt.scatter(hdata_x,hdata_y,s=0.03, marker = '.' )
# plt.ylim([ymin,ymax])
# plt.xlim([xmin,xmax])
# plt.xlabel(xlab,size=20)
# plt.ylabel(ylab,size=20)
# # add tilt angle measured in Zhdankin paper -17.5degree
# tilt = -17.5/180.*np.pi
# rx,ry = 2.*np.sin(tilt),2.*np.cos(tilt)
# plt.plot([rx,-rx],[ry,-ry],'k:')
#if i<2:
if i<3:
#plt.scatter(hdata_x,hdata_y,s=0.01,marker='.')
#hst are binned in the dissipation values (normalized with total dissipation)
hst,xe,ye = np.histogram2d(hdata_y,hdata_x,bins=(np.logspace(ymin,ymax,nbin),\
np.logspace(xmin,xmax,nbin)),\
weights=theta) #diss_sheet/disstot)
xx,yy = np.meshgrid(xe,ye)
loghst = np.log10(hst+1e-12)
plt.pcolormesh(yy,xx,np.transpose(loghst),vmin=-5,vmax=-1,cmap=cmap)
#vmax=np.max(loghst),cmap=cmap)
#print 'min/max of hst = ',np.min(loghst),np.max(loghst)
#print 'sum(hst) = ',np.sum(hst)
#print 'min/max of xe = ',np.min(xe),np.max(xe)
#print 'min/max of ye = ',np.min(ye),np.max(ye)
plt.colorbar(pad=pad,fraction=fraction)
plt.xscale("log")
plt.yscale("log")
plt.xlim([10**xmin,10**xmax])
plt.ylim([10**ymin,10**ymax])
plt.xlabel(xlab,size=20)
plt.ylabel(ylab,size=20)
if vsheet:
plt.title(r'$\theta_{\nu}/\mathcal{E}_{\nu}$',fontsize=15)
else:
plt.title(r'$\theta_{\eta}/\mathcal{E}_{\eta}$',fontsize=15)
if i==0:
index,coef=1.0,0.4
plt.plot(xe,coef*xe**index,'k:')
if i==1:
index,coef=1./3.,0.008
xe = 10**(np.linspace(-3.0,0.4,100))
plt.plot(xe,coef*xe**index,'k:')
index,coef=0.,0.0065
xe = 10**(np.linspace(-1.8,1.0,50))
plt.plot(xe,coef*xe**index,'k:')
if i==2:
index,coef=2./3.,85.
xe = 10**(np.linspace(-3.0,0.4,100))
plt.plot(xe,coef*xe**index,'k:')
index,coef=1.,160.
xe = 10**(np.linspace(-1.8,1.0,50))
plt.plot(xe,coef*xe**index,'k:')
#plt.title(r'$\mathrm{Histogram\ of\ d_i:}$')
if i==3:
hst,xe,ye = np.histogram2d(hdata_y,hdata_x,bins=[np.logspace(ymin,ymax,nbin),\
np.logspace(xmin,xmax,nbin)],\
normed=True)
xx,yy = np.meshgrid(xe,ye)
loghst = np.log10(hst+1e-12)
plt.pcolormesh(yy,xx,np.transpose(loghst),vmin=-3,vmax=9,cmap=cmap)
#print 'min/max of hst = ',np.min(loghst),np.max(loghst)
plt.colorbar(pad=pad,fraction=fraction)
plt.xscale("log")
plt.yscale("log")
plt.xlim([10**xmin,10**xmax])
plt.ylim([10**ymin,10**ymax])
plt.xlabel(xlab,size=20)
plt.ylabel(ylab,size=20)
plt.title(r'$\log$'+'(PDF)',fontsize=15)
index,coef=2.25,2.2e-3
plt.plot(ye,coef*ye**index,'k:')
plt.tight_layout()
# use the following to generate the figure used in the paper
def plot_jcorr_null(size,theta,size1,theta1,cmap1='hot_r',cmap='bone_r'):
"""
compare sheets with null and sheets w/o null
draw nonull sheets first then overlay with null sheets
then draw colorbar of these two..
"""
matplotlib.rcParams['figure.figsize'] = (5.5,5)
fig = plt.figure()
fraction=0.046;pad=0.04
hdata_x = np.array(size)[:,0]
hdata_y = np.array(size)[:,1]
xlab = r'$l$'
ylab = r'$\xi$'
xmin,xmax = -3.0,1.0 #2.7,1
ymin,ymax = -3.0,1.0 #-2.7,0
nbin = 100
# 1) plot all sheets
cs0 = plt.scatter(hdata_x,hdata_y,s=3,marker='s',color='b')
#hst are binned in the dissipation values (normalized with total dissipation)
#hst,xe,ye = np.histogram2d(hdata_y,hdata_x,bins=(np.logspace(ymin,ymax,nbin),\
# np.logspace(xmin,xmax,nbin)),\
# weights=theta) #diss_sheet/disstot)
#xx,yy = np.meshgrid(xe,ye)
#loghst = np.log10(hst+1e-12)
#cs1 = plt.pcolormesh(yy,xx,np.transpose(loghst),vmin=-8,vmax=-4,cmap=cmap)
#cbar1 = fig.colorbar(cs1,pad=pad,fraction=fraction,orientation='horizontal',ticks=[-6,-5,-4])
##plt.colorbar(cs1,pad=pad,fraction=fraction,orientation='horizontal',location='top')
#2) now plot the null sheets
hdata_x = np.array(size1)[:,0]
hdata_y = np.array(size1)[:,1]
hst,xe,ye = np.histogram2d(hdata_y,hdata_x,bins=(np.logspace(ymin,ymax,nbin),\
np.logspace(xmin,xmax,nbin)),\
weights=theta1) #diss_sheet/disstot)
xx,yy = np.meshgrid(xe,ye)
loghst = np.log10(hst+1e-12)
cs2 = plt.pcolormesh(yy,xx,np.transpose(loghst),vmin=-5,vmax=-1,alpha=0.8,cmap=cmap1)
#plt.colorbar(cs2,pad=pad,fraction=fraction,orientation='vertical')
cbar2 = fig.colorbar(cs2,pad=pad,fraction=fraction,orientation='vertical',ticks=[-5,-4,-3,-2,-1])
plt.xscale("log")
plt.yscale("log")
plt.xlim([10**xmin,10**xmax])
plt.ylim([10**ymin,10**ymax])
plt.xlabel(xlab,size=20)
plt.ylabel(ylab,size=20)
plt.title(r'$\theta_{\eta}/\mathcal{E}_{\eta}$',fontsize=20)
index,coef=1.0,0.4
plt.plot(xe,coef*xe**index,'k:')
plt.axes().set_aspect('equal')
# script to get big Epsilon normalized with total dissipation
def get_path(targname='x2y4z1r64pmre4000'):
direname='/tigress/jiming/reconnect/athena/bin/'
targname=targname+'/'
return direname+targname
def get_normtheta(targ,ts,te,tstride,eta=2.5e-4,vsheet=False,thick=False,null=False,wth=1):
direname = get_path(targ)
# 1. load the volume averaged data
if vsheet:
fname = direname+'Unstra.out2.wth.tab'
else:
fname = direname+'Unstra.out2.jth.tab'
dtype = np.dtype([('time', 'f8'), ('jth2', 'f8'),('jsh2','f8'),('jrms2','f8')])
ahist = np.loadtxt(fname, dtype=dtype, usecols=(0,1,2,3))
tt = ahist['time']
jrms2 = ahist['jrms2']
jsh2 = ahist['jsh2']
# pick out ts to te
istart,iend = 0,0
if wth==1:
for i in np.arange(len(tt)):
if ts == int(tt[i]):
istart = i
break
for i in np.arange(len(tt)):
if (te-1) == int(tt[i]):
iend = i
break
else: #wth=2 where rho\omega^2 is calc instead omega alone
for i in np.arange(len(tt)):
if ts == int(tt[i]):
istart = i
for i in np.arange(len(tt)):
if (te-1) == int(tt[i]):
iend = i
iend +=1
if iend==istart:
iend+=1
tt = tt[istart:iend]
jrms2 = jrms2[istart:iend]
jsh2 = jsh2[istart:iend]
jrms = np.sqrt(jrms2)
if vsheet:
print '<epsilon_{nu,sh}>/<epsilon_{nu}> = ',np.average(jsh2/jrms2)
else:
print '<epsilon_{eta,sh}>/<epsilon_{eta}> = ',np.average(jsh2/jrms2)
# 2. now load current sheets for each frame
# for each sheet we calc total dissipation
# and normalize it with jrms^2
if vsheet:
if wth == 1:
basename='vprop.'
appdname='.p'
else:
basename='vprop2.'
appdname='.p'
else:
basename='jprop.'
appdname='.p'
if thick:
basename='jprop_thick.'
if null:
basename='jprop_null.'
first_time = True
for i in np.arange(ts,te,tstride):
fname = direname+basename+str(i).zfill(5)+appdname
ncells,jmax,diss,size,orient = pickle.load( open(fname, "rb" ) )
if i != int(tt[i-ts]):
print 'at t = ',tt[i-ts]
return 0
else:
# sum(eta jsh^2)/ sum(eta jrms2) dissipation rate per sheet normalized
# with rms dissipation.equivalently it's theta normalized with
# instantaneous total dissipation (although the number of grid cells not included here)
diss =np.array(diss)/eta
ncells = np.array(ncells)
if first_time:
dissnorm = diss/jrms2[i-ts]
theta = diss*ncells/jrms2[i-ts]
jmaxnorm = np.array(jmax)/jrms[i-ts]
first_time = False
else:
dissnorm = np.hstack((dissnorm,diss/jrms2[i-ts]))
theta = np.hstack((theta,diss*ncells/jrms2[i-ts]))
jmaxnorm = np.hstack((jmaxnorm,np.array(jmax)/jrms[i-ts]))
# print 'at t = ',tt[i-ts]
# print 'jsh2 from pickled file: ',np.sum(diss*ncells)/float(np.sum(ncells))
# print 'jsh2 from tabulat file:',jsh2[i-ts]
# print 'jrms2 from tabulat file:',jrms2[i-ts]
return dissnorm,theta,jmaxnorm
# nbin = 100; nt = te-ts+1
# jth2 = np.zeros(nbin)
# cdfj2 = np.zeros(nbin)
# cdfvol = np.zeros(nbin)
# theta1 = get_normtheta('x2y4z1r64pm1re4000',50,51,1)
# theta2 = get_normtheta('x2y4z1r64pm1re4000',50,101,1)
def get_curl(fname='Unstra.out2.00008.athdf'):
"""load 3d bfield and calc the current density"""
# ---
def curl(vx,vy,vz,dx,dy,dz):
[dzvx,dyvx,dxvx] = np.gradient(vx)
[dzvy,dyvy,dxvy] = np.gradient(vy)
[dzvz,dyvz,dxvz] = np.gradient(vz)
j2 = (dyvz/dy-dzvy/dz)**2
j2 += (dzvx/dz-dxvz/dx)**2
j2 += (dxvy/dx-dyvx/dy)**2
return j2
# ---
#data=ath.athdf(fname,quantities=['B1','B2','B3'])
time,data=ath.athdf(fname,quantities=['Bcc1'])
bx = data['Bcc1']
time,data=ath.athdf(fname,quantities=['Bcc2'])
by = data['Bcc2']
time,data=ath.athdf(fname,quantities=['Bcc3'])
bz = data['Bcc3']
x = data['x1f'];y = data['x2f']; z = data['x3f']
dx = dz = x[1]-x[0]; dy = y[1]-y[0]
#j2 = curl(bx,by,bz,dx,dy,dz)
j2 = curl(bx,by,bz,dx,dy,dz)
time,data=ath.athdf(fname,quantities=['vel1'])
bx = data['vel1']
time,data=ath.athdf(fname,quantities=['vel2'])
by = data['vel2']
time,data=ath.athdf(fname,quantities=['vel3'])
bz = data['vel3']
time,data=ath.athdf(fname,quantities=['rho'])
d = data['rho']
#w2 = curl(bx,by,bz,dx,dy,dz)*data['rho']
w2 = curl(bx,by,bz,dx,dy,dz)*d
return time,x,y,z,j2,w2
def plot_3slice(input_data,xslice=512,yslice=504,zslice=256,dmin=0,dmax=100,cmap='OrRd',figsize_x=6,figsize_y=8,label=None):
tmp = input_data
matplotlib.rcParams['figure.figsize'] = (figsize_x, figsize_y)
matplotlib.rcParams['image.cmap'] = cmap
gs = gridspec.GridSpec(2, 2,width_ratios=[2, 1],height_ratios=[4, 1])
ax1 = plt.subplot(gs[0])
fig1 = plt.imshow(tmp[zslice,:,:],origin='lower',extent =[-1,1,-2,2],vmin=dmin,vmax=dmax)
plt.ylabel('y',fontsize=15)
ax2 = plt.subplot(gs[1])
fig2 = plt.imshow(np.transpose(tmp[:,:,xslice]),origin='lower',extent=[-0.5,0.5,-2,2],vmin=dmin,vmax=dmax)
ax3 = plt.subplot(gs[2])
fig3 = plt.imshow(tmp[:,yslice,:],origin='lower',extent=[-1,1,-0.5,0.5],vmin=dmin,vmax=dmax)
plt.ylabel('z',fontsize=15)
plt.xlabel('x',fontsize=15)
ax4 = plt.subplot(gs[3])
ax4.axis('off')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("left", size="8%", pad=0.15)
ax1.tick_params(labelbottom='off')
ax2.tick_params(labelleft='off')
#ax2.get_yaxis().set_ticks([])
plt.colorbar(cax=cax)
if label != None:
ax3.text(1.8,0.08,label,fontsize=20,rotation=90)
plt.tight_layout()
def show_contour(x,y,input_data,dmin=0,dmax=100,cmap='OrRd',xspan=(0,0),yspan=(0,0),figsize_x=6,figsize_y=8,xlabel='x',ylabel='y'):
tmp = input_data
matplotlib.rcParams['figure.figsize'] = (figsize_x, figsize_y)
matplotlib.rcParams['image.cmap'] = cmap
#gs = gridspec.GridSpec(2, 2,width_ratios=[2, 1],height_ratios=[4, 1])
if xspan[0] == 0 and xspan[1] == 0:
xmin = -1; xmax=1
else:
xmin = xspan[0]; xmax=xspan[1]
if yspan[0] == 0 and yspan[1] == 0:
ymin = -2; ymax=2
else:
ymin = yspan[0]; ymax=yspan[1]
# reduce the img to required size
ids=np.abs(x-xmin).argmin()
ide=np.abs(x-xmax).argmin()
jds=np.abs(y-ymin).argmin()
jde=np.abs(y-ymax).argmin()
ndim = np.shape(tmp)
jde = jde if jde < ndim[0] else ndim[0]-1
ide = ide if ide < ndim[1] else ndim[1]-1
tmp = tmp[jds:jde,ids:ide]
fig1 = plt.imshow(tmp,origin='lower',extent =[xmin,xmax,ymin,ymax],vmin=dmin,vmax=dmax)
plt.ylabel(ylabel,fontsize=15)
plt.xlabel(xlabel,fontsize=15)
plt.colorbar(pad=0.05,fraction=0.05)
# plot 8-panels in x-z plane (jmag,bx,by,bz,wmag/rho,vx,dvy,vz)
def plot_8panel(t,x,y,j2u,w2u,b1,b2,b3,v1,v2,v3,d,xspan=(-1,1),yspan=(-0.5,0.5),yslice=502,step=10,nslice=1,isrho=False):
matplotlib.rcParams['figure.figsize'] = (15,40)
x1b = (x + 0.5*(x[1]-x[0]))[:-1]
x3b = (y + 0.5*(y[1]-y[0]))[:-1]
nx,nz = len(x1b),len(x3b)
count = 1
jmin,jmax = 0,5
bmin,bmax = -2,2
vmin,vmax = -1.5,1.5
rmin,rmax = 0,2
#xspan,yspan = (0.3,0.7),(-0.5,-0.1)
ncol,nrow = 4,22
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
cmap = 'seismic'
#yslice= 502
#step = 10
wu = np.sqrt(w2u)
wu /= np.average(wu)
ju = np.sqrt(j2u)
ju /= np.average(ju)
for i in (np.arange(nslice)-nslice/2):
ind = yslice+step*i
for nvar in np.arange(8): #// j2,vx,bx,by
cmap = 'seismic'
if nvar == 0:
var = ju[:,ind,:] #ysli(np.sqrt(j2u)/np.average(np.sqrt(j2u)))[:,yslice+step*i,:]
cmap = 'OrRd'
dmin,dmax = jmin,jmax
label = r'$|j|$'
elif nvar == 1:
var = b1[:,ind,:]
dmin,dmax = bmin,bmax
label = r'$B_x$'
elif nvar == 2:
var = b2[:,ind,:]
dmin,dmax = bmin,bmax
label = r'$B_y$'
elif nvar == 3:
var = b3[:,ind,:]
dmin,dmax = bmin,bmax
label = r'$B_z$'
elif nvar == 4:
if isrho:
var = d[:,ind,:]
dmin,dmax = rmin,rmax
label = r'$\rho$'
else:
var = wu[:,ind,:] #(np.sqrt(w2u)/np.average(np.sqrt(w2u)))[:,yslice+step*i,:]
cmap = 'OrRd'
dmin,dmax = jmin,jmax
label = r'$|\omega|$'
elif nvar == 5:
var = v1[:,ind,:]
dmin,dmax = vmin,vmax
label = r'$v_x$'
elif nvar == 6:
var = v2[:,ind,:]+1.5*1.0*np.array(np.tile(x1b,(nz,1)))
dmin,dmax = vmin,vmax
label = r'$\delta v_y$'
elif nvar == 7:
var = v3[:,ind,:]
dmin,dmax = vmin,vmax
label = r'$v_z$'
else:
print 'out plotting bound'
plt.subplot(nrow,ncol,count)
show_contour(x,y,var,dmin=dmin,dmax=dmax,xspan=xspan,yspan=yspan,cmap=cmap,xlabel='x',ylabel='z')
plt.text(xspan[1]-0.1,yspan[1]-0.05,label,fontsize=15)
if nvar > 0:
plt.tick_params(labelbottom='off')
plt.tick_params(labelleft='off')
count += 1
plt.tight_layout()
# plot 6-panels in x-z plane (jmag,bx,by,wmag/rho,vx,dvy)
def plot_6panel(t,x,y,j2u,w2u,b1,b2,v1,v2,d,xspan=(-1,1),yspan=(-0.5,0.5),yslice=502,nslice=1,isrho=False):
matplotlib.rcParams['figure.figsize'] = (10,3.5)
x1b = (x + 0.5*(x[1]-x[0]))[:-1]
x3b = (y + 0.5*(y[1]-y[0]))[:-1]
nx,nz = len(x1b),len(x3b)
count = 1
jmin,jmax = 0,5
bmin,bmax = -2,2
vmin,vmax = -1.5,1.5
rmin,rmax = 0,2
#xspan,yspan = (0.3,0.7),(-0.5,-0.1)
ncol,nrow = 3,2
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
cmap = 'seismic'
#yslice= 502
#step = 10
wu = np.sqrt(w2u)
wu /= np.average(wu)
ju = np.sqrt(j2u)
ju /= np.average(ju)
ind = yslice
for nvar in np.arange(6): #// j2,vx,bx,by
cmap = 'seismic'
if nvar == 0:
var = ju[:,ind,:] #ysli(np.sqrt(j2u)/np.average(np.sqrt(j2u)))[:,yslice+step*i,:]
cmap = 'OrRd'
dmin,dmax = jmin,jmax
label = r'$|j|$'
elif nvar == 1:
var = b1[:,ind,:]
dmin,dmax = bmin,bmax
label = r'$B_x$'
elif nvar == 2:
var = b2[:,ind,:]
dmin,dmax = bmin,bmax
label = r'$B_y$'
elif nvar == 3:
if isrho:
var = d[:,ind,:]
dmin,dmax = rmin,rmax
label = r'$\rho$'
else:
var = wu[:,ind,:] #(np.sqrt(w2u)/np.average(np.sqrt(w2u)))[:,yslice+step*i,:]
cmap = 'OrRd'
dmin,dmax = jmin,jmax
label = r'$|\omega|$'
elif nvar == 4:
var = v1[:,ind,:]
dmin,dmax = vmin,vmax
label = r'$v_x$'
elif nvar == 5:
var = v2[:,ind,:]+1.5*1.0*np.array(np.tile(x1b,(nz,1)))
dmin,dmax = vmin,vmax
label = r'$\delta v_y$'
else:
print 'out plotting bound'
plt.subplot(nrow,ncol,count)
show_contour(x,y,var,dmin=dmin,dmax=dmax,xspan=xspan,yspan=yspan,cmap=cmap,xlabel='x',ylabel='z')
plt.text(xspan[1]-0.2,yspan[1]+0.1,label,fontsize=15)
if nvar > 0:
plt.tick_params(labelbottom='off')
plt.tick_params(labelleft='off')
count += 1
plt.tight_layout()
def plot_exx(ncells,jmax,jmaxnorm,diss,dissnorm,size,orient,theta,disscut=4e-5,ntime=21,project=np.array([1,0,0]),cmap='jet',vsheet=False):
"""
plot the correlation between quantities within current sheet
"""
nframe = 3
nplot = 3
enlarge = 2
matplotlib.rcParams['figure.figsize'] = (10*enlarge, enlarge*8.0*nframe/nplot)
fig = plt.figure()
fraction=0.046;pad=0.04
gs = gridspec.GridSpec(nframe, nplot)
mask = (theta > disscut)
# calc dissipation rate per sheet for normalization or weighted average
ncelltot= float(np.sum(np.array(ncells)))
diss_sheet = np.array(ncells)*np.array(diss)
disstot= float(np.sum(diss_sheet))
for i in np.arange(9):
#######################################
# (1) orientation
#######################################
if (i==0):
vec2d = np.array(orient)[mask,0]#np.cross(np.array(orient)[mask,0],project)
hdata_x = vec2d[:,2]
hdata_y = vec2d[:,1]
xlab = r'$\hat{l}_x$'
ylab = r'$\hat{l}_y$'
xmin,xmax = -1,1
ymin,ymax = -1,1
vec2d = np.array(orient)[:,0]
vecnorm = np.average(vec2d,axis=0)
vecnorm /= np.linalg.norm(vecnorm)
print 'non-weighted averaged (\hat{l}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(np.array(ncells)*vec2d.T,axis=1)/ncelltot
vecnorm /= np.linalg.norm(vecnorm)
print 'vol-weighted averaged (\hat{l}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(diss_sheet*vec2d.T,axis=1)/disstot
vecnorm /= np.linalg.norm(vecnorm)
print 'dis-weighted averaged (\hat{l}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
if (i==1):
vec2d = np.array(orient)[mask,1]
hdata_x = vec2d[:,2]
hdata_y = vec2d[:,1]
xlab = r'$\hat{\xi}_x$'
ylab = r'$\hat{\xi}_y$'
xmin,xmax = -1,1
ymin,ymax = -1,1
vec2d = np.array(orient)[:,1]
vecnorm = np.average(vec2d,axis=0)
vecnorm /= np.linalg.norm(vecnorm)
print 'non-weighted averaged (\hat{xi}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(np.array(ncells)*vec2d.T,axis=1)/ncelltot
vecnorm /= np.linalg.norm(vecnorm)
print 'vol-weighted averaged (\hat{xi}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(diss_sheet*vec2d.T,axis=1)/disstot
vecnorm /= np.linalg.norm(vecnorm)
print 'dis-weighted averaged (\hat{xi}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
if (i==2):
vec2d = np.array(orient)[mask,2]
hdata_x = vec2d[:,2]
hdata_y = vec2d[:,1]
xlab = r'$\hat{\lambda}_x$'
ylab = r'$\hat{\lambda}_y$'
xmin,xmax = -1,1
ymin,ymax = -1,1
vec2d = np.array(orient)[:,2]
vecnorm = np.average(vec2d,axis=0)
vecnorm /= np.linalg.norm(vecnorm)
print 'non-weighted averaged (\hat{\lambda}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(np.array(ncells)*vec2d.T,axis=1)/ncelltot
vecnorm /= np.linalg.norm(vecnorm)
print 'vol-weighted averaged (\hat{\lambda}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
vecnorm = np.average(diss_sheet*vec2d.T,axis=1)/disstot
vecnorm /= np.linalg.norm(vecnorm)
print 'dis-weighted averaged (\hat{\lambda}) = (',vecnorm[2],' ,',\
vecnorm[1],' ,',\
vecnorm[0],')'
#######################################
# (2) 2d histogram of sheet dimensions
#######################################
if (i==3):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(size)[:,1]
xlab = r'$l$'
ylab = r'$\xi$'
xmin,xmax = -2.7,1
ymin,ymax = -2.7,0
nbin = 100
if (i==4):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(size)[:,2]
xlab = r'$l$'
ylab = r'$\lambda$'
xmin,xmax = -2.7,1
ymin,ymax = -2.7,0
nbin = 100
if (i==5):
hdata_x = np.array(size)[:,1]
hdata_y = np.array(size)[:,2]
xlab = r'$\xi$'
ylab = r'$\lambda$'
xmin,xmax = -2.7,1
ymin,ymax = -2.7,0
nbin = 100
#######################################
# (3) dissipation below
#######################################
if (i==6):
hdata_x = np.array(size)[:,0]
hdata_y = np.array(theta)
xlab = r'$l$'
if vsheet:
ylab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
else:
ylab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
ymin = np.log10(np.min(hdata_y))
ymax = np.log10(np.max(hdata_y))
xmin,xmax = -2.7,1
nbin = 100
if (i==7):
hdata_x = np.array(size)[:,1]
hdata_y = np.array(theta)
xlab = r'$\xi$'
if vsheet:
ylab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
else:
ylab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
ymin = np.log10(np.min(hdata_y))
ymax = np.log10(np.max(hdata_y))
xmin,xmax = -2.7,1
nbin = 100
if (i==8):
hdata_x = np.array(size)[:,2]
hdata_y = np.array(theta)
xlab = r'$\lambda$'
if vsheet:
ylab = r'$\theta_{\nu}/\mathcal{E}_{\nu}$'
else:
ylab = r'$\theta_{\eta}/\mathcal{E}_{\eta}$'
ymin = np.log10(np.min(hdata_y))
ymax = np.log10(np.max(hdata_y))
xmin,xmax = -2.7,1
nbin = 100
fig.add_subplot(gs[i],aspect='equal')
if i<3:
plt.scatter(hdata_x,hdata_y,s=0.02, marker = '.' )
plt.ylim([ymin,ymax])
plt.xlim([xmin,xmax])
plt.xlabel(xlab,size=20)
plt.ylabel(ylab,size=20)
if i==0: # add tilt angle measured in Zhdankin paper -17.5degree
tilt = -17.5/180.*np.pi
rx,ry = 2.*np.sin(tilt),2.*np.cos(tilt)
plt.plot([rx,-rx],[ry,-ry],'k:')
elif i<6:
#plt.scatter(hdata_x,hdata_y,s=0.01,marker='.')
#hst are binned in the dissipation values (normalized with total dissipation)
hst,xe,ye = np.histogram2d(hdata_y,hdata_x,bins=(np.logspace(ymin,ymax,nbin),\
np.logspace(xmin,xmax,nbin)),\
weights=theta) #diss_sheet/disstot)
xx,yy = np.meshgrid(xe,ye)
#return hst, xe,ye,xx,yy
loghst = np.log10(hst+1e-12)
#plt.pcolormesh(yy,xx,np.transpose(loghst),vmin=-5,vmax=-1,cmap=cmap)
plt.plot(xe,np.log10(np.sum(hst*xx[:-1],axis=0)+1e-12))
#print 'min/max of hst = ',np.min(loghst),np.max(loghst)
#print 'sum(hst) = ',np.sum(hst)
#print 'min/max of xe = ',np.min(xe),np.max(xe)
#print 'min/max of ye = ',np.min(ye),np.max(ye)
#plt.colorbar(pad=pad,fraction=fraction*0.8)
plt.xscale("log")
plt.yscale("log")
plt.xlim([10**xmin,10**xmax])
#plt.ylim([10**ymin,10**ymax])
plt.xlabel(xlab,size=20)
#plt.ylabel(ylab,size=20)
#if vsheet:
# plt.title(r'$\theta_{\nu}/\mathcal{E}_{\nu}$',fontsize=15)
#else:
# plt.title(r'$\theta_{\eta}/\mathcal{E}_{\eta}$',fontsize=15)
#if i==3:
# index,coef=1.0,0.4
# plt.plot(xe,coef*xe**index,'k:')
#plt.title(r'$\mathrm{Histogram\ of\ d_i:}$')
else:
hst,xe,ye = np.histogram2d(hdata_y,hdata_x,bins=[np.logspace(ymin,ymax,nbin),\
np.logspace(xmin,xmax,nbin)],\
normed=True)
xx,yy = np.meshgrid(xe,ye)
loghst = np.log10(hst+1e-12)
plt.pcolormesh(yy,xx,np.transpose(loghst),vmin=-3,vmax=9)
#print 'min/max of hst = ',np.min(loghst),np.max(loghst)
plt.colorbar(pad=pad,fraction=fraction)
plt.xscale("log")
plt.yscale("log")
plt.xlim([10**xmin,10**xmax])
plt.ylim([10**ymin,10**ymax])
plt.xlabel(xlab,size=20)
plt.ylabel(ylab,size=20)
plt.title(r'$\log$'+'(PDF)',fontsize=15)
if i==6:
index,coef=2.0,1.5e-3
plt.plot(ye,coef*ye**index,'k:')
plt.tight_layout()
```
#### File: jmshi/reconnection/dump_hist2d.py
```python
import numpy as np
import my_athena_read as myath
import os
import sys
def dump_hist2d(targname,ts=0,te=5,resol=64,pres=1.0,nx=256,nz=64,nvar=3):
direname='/tigress/jiming/reconnect/athena/bin/'
basename='Unstra.out2.'
appdname='athdf'
dumprate = 0.5 # in unit of orbits
nframe = te-ts+1
have = np.zeros((nz,nframe,nvar))
zave = np.zeros((nx,nframe,nvar))
tave = np.zeros(nframe)
for i in range(ts,te+1):
fname = direname+targname+'/'+basename+str(i).zfill(5)+'.'+appdname
if os.path.isfile(fname):
time,data = myath.athdf(fname,quantities=['Bcc1','Bcc2','Bcc3','rho'])
tave[i]=time
have[:,i,0] = np.average(data['rho'],axis=(1,2)) # rho
have[:,i,1] = np.average(data['Bcc2'],axis=(1,2)) # By
have[:,i,2] = np.average(-data['Bcc2']*data['Bcc1'],axis=(1,2))/pres # maxwell stress
zave[:,i,0] = np.average(data['rho'],axis=(0,1)) # rho
zave[:,i,1] = np.average(data['Bcc2'],axis=(0,1)) # By
zave[:,i,2] = np.average(-data['Bcc2']*data['Bcc1'],axis=(0,1))/pres # maxwell stress
# dump have/zave data
fmt = '%.15e'
halfcellwidth=(data['x1f'][-1:]-data['x1v'])[0]
xx = data['x1f'][:-1]+halfcellwidth
zz = data['x3f'][:-1]+halfcellwidth
dst = direname+targname+'/'+'tmp/'
if not os.path.exists(dst):
os.makedirs(dst)
np.savetxt(dst+'xx.dat',xx,fmt=fmt)
np.savetxt(dst+'zz.dat',zz,fmt=fmt)
np.savetxt(dst+'tt.dat',tave,fmt=fmt)
for i in range(nvar):
np.savetxt(dst+'have'+str(i).zfill(2)+'.dat',have[...,i],fmt=fmt)
np.savetxt(dst+'zave'+str(i).zfill(2)+'.dat',zave[...,i],fmt=fmt)
return
if __name__=='__main__':
if len(sys.argv)>=4:
ts=int(sys.argv[2])
te=int(sys.argv[3])
else:
ts=0;te=100
print "use ts=0 te=100 for the calc"
print "plz specify ts/te if other than that"
resol=64;pres=1.0;nx=4*64;nz=64*1;nvar=3
targ = sys.argv[ 1 ];
if(targ[0:6]=='x2y4z1'):
Lx,Lz=2,1
if(targ[0:6]=='x4y4z1'):
Lx,Lz=4,1
if(targ[0:6]=='x2y8z1'):
Lx,Lz=2,1
if(targ[6:9]=='r64'):
resol=64
if(targ[6:10]=='r128'):
resol=128
if(targ[6:10]=='r256'):
resol=256
if(targ[6:10]=='r512'):
resol=512
if(targ[0:8]=='adb.r256'):
resol=256
Lx,Ly,Lz = 2,4,1
nx=resol*Lx
nz=resol*Lz
print 'calculating have/zave for '+targ+' within ts= '+str(ts)+' te= '+str(te)
dump_hist2d(targ,ts=ts,te=te,resol=resol,nx=nx,nz=nz,nvar=nvar)
```
#### File: jmshi/reconnection/my_colors.py
```python
import matplotlib # in python
import matplotlib.pyplot as plt
import numpy as np
def get_jhcolors(debug=False):
ct = np.loadtxt('jhcolors.tab')
cm = matplotlib.colors.ListedColormap(ct/255.0)
if debug:
plt.imshow([[-1,1],[1,-1]], cmap=cm) # for example
plt.colorbar(pad=0.01,fraction=0.045,orientation='vertical')
return cm
```
#### File: jmshi/reconnection/oldathena_read.py
```python
import numpy as np
#=======================================================================================
def vtk(filename):
"""Read .vtk files and return dict of arrays of data."""
# Python module
import struct
# Read raw data
with open(filename, 'r') as data_file:
raw_data = data_file.read()
# Skip header
current_index = 0
current_char = raw_data[current_index]
# while current_char == '#':
# while current_char != '\n':
# current_index += 1
# current_char = raw_data[current_index]
# current_index += 1
# current_char = raw_data[current_index]
# Skip the first line
while current_char != '\n':
current_index += 1
current_char = raw_data[current_index]
#print current_char
# Extract time info from the second line after time=...
while current_char != '=':
current_index += 1
current_char = raw_data[current_index]
#print current_char
stime = ""
while current_char != ' ':
current_index += 1
current_char = raw_data[current_index]
stime += current_char
current_index += 1
current_char = raw_data[current_index]
time = float(stime)
#print 'time = ',time
while current_char != '\n':
current_index += 1
current_char = raw_data[current_index]
current_index += 1
# Function for skipping though the file
def skip_string(expected_string):
expected_string_len = len(expected_string)
if raw_data[current_index:current_index+expected_string_len] != expected_string:
raise AthenaError('File not formatted as expected')
return current_index+expected_string_len
# Read metadata
current_index = skip_string('BINARY\nDATASET RECTILINEAR_GRID\nDIMENSIONS ')
end_of_line_index = current_index + 1
while raw_data[end_of_line_index] != '\n':
end_of_line_index += 1
face_dimensions = map(int, raw_data[current_index:end_of_line_index].split(' '))
current_index = end_of_line_index + 1
# Function for reading interface locations
def read_faces(letter, num_faces):
identifier_string = '{0}_COORDINATES {1} float\n'.format(letter,num_faces)
begin_index = skip_string(identifier_string)
format_string = '>' + 'f'*num_faces
end_index = begin_index + 4*num_faces
vals = np.array(struct.unpack(format_string, raw_data[begin_index:end_index]))
return vals,end_index+1
# Read interface locations
x_faces,current_index = read_faces('X', face_dimensions[0])
y_faces,current_index = read_faces('Y', face_dimensions[1])
z_faces,current_index = read_faces('Z', face_dimensions[2])
# Prepare to read quantities defined on grid
cell_dimensions = np.array([max(dim-1,1)
for dim in face_dimensions])
num_cells = cell_dimensions.prod()
current_index = skip_string('CELL_DATA {0}\n'.format(num_cells))
if raw_data[current_index:current_index+1] == '\n':
current_index = skip_string('\n') # extra newline inserted by join script
data = {}
# Function for reading scalar data
def read_cell_scalars():
begin_index = skip_string('SCALARS ')
end_of_word_index = begin_index + 1
while raw_data[end_of_word_index] != ' ':
end_of_word_index += 1
array_name = raw_data[begin_index:end_of_word_index]
string_to_skip = 'SCALARS {0} float\nLOOKUP_TABLE default\n'.format(array_name)
begin_index = skip_string(string_to_skip)
format_string = '>' + 'f'*num_cells
end_index = begin_index + 4*num_cells
data[array_name] = struct.unpack(format_string, raw_data[begin_index:end_index])
dimensions = tuple(cell_dimensions[::-1])
data[array_name] = np.array(data[array_name]).reshape(dimensions)
return end_index+1
# Function for reading vector data
def read_cell_vectors():
begin_index = skip_string('VECTORS ')
end_of_word_index = begin_index + 1
while raw_data[end_of_word_index] != '\n':
end_of_word_index += 1
array_name = raw_data[begin_index:end_of_word_index]
string_to_skip = 'VECTORS {0}\n'.format(array_name)
array_name = array_name[:-6] # remove ' float'
begin_index = skip_string(string_to_skip)
format_string = '>' + 'f'*num_cells*3
end_index = begin_index + 4*num_cells*3
data[array_name] = struct.unpack(format_string, raw_data[begin_index:end_index])
dimensions = tuple(np.append(cell_dimensions[::-1],3))
data[array_name] = np.array(data[array_name]).reshape(dimensions)
return end_index+1
# Read quantities defined on grid
while current_index < len(raw_data):
expected_string = 'SCALARS'
expected_string_len = len(expected_string)
if raw_data[current_index:current_index+expected_string_len] == expected_string:
current_index = read_cell_scalars()
continue
expected_string = 'VECTORS'
expected_string_len = len(expected_string)
if raw_data[current_index:current_index+expected_string_len] == expected_string:
current_index = read_cell_vectors()
continue
raise AthenaError('File not formatted as expected')
return time,x_faces,y_faces,z_faces,data
#=======================================================================================
def athdf(filename, data=None, quantities=None):
"""Read .athdf files and populate dict of arrays of data."""
# Python module for reading hdf5 files
import h5py
# Open file
with h5py.File(filename, 'r') as f:
# Create list of all quantities if none given
if data is not None:
quantities = data.values()
elif quantities is None:
quantities = f[u'MeshBlock0'].keys()
quantities = [q for q in quantities \
if q != u'x1f' and q != u'x2f' and q != u'x3f']
# Get block count, dimensions, and sizes
num_blocks = len(f.keys())
dims = 0
block_size = []
coords = [u'x1f',u'x2f',u'x3f']
for key in coords:
if key in f[u'MeshBlock0'].keys():
dims += 1
block_size.append(len(f[u'MeshBlock0'][key][:]) - 1)
coords = coords[:dims]
# Order blocks
edges = np.empty((num_blocks,dims))
for block_num,block_name in zip(range(num_blocks),f.keys()):
for dim,coord in zip(range(dims),coords):
edges[block_num,dim] = f[block_name][coord][0]
edges_unique = []
for dim in range(dims):
edges_unique.append(set(edges[:,dim]))
indices = np.empty((num_blocks,3,2), dtype=int)
for block_num in range(num_blocks):
for dim in range(dims):
num_prior = sum(edge < edges[block_num,dim] for edge in edges_unique[dim])
indices[block_num,dim,0] = num_prior * block_size[dim]
indices[block_num,dim,1] = (num_prior+1) * block_size[dim]
for dim in range(dims,3):
indices[block_num,dim,0] = 0
indices[block_num,dim,1] = 1
# Prepare arrays if needed
nx1 = block_size[0] * len(edges_unique[0])
nx2 = block_size[1] * len(edges_unique[1]) if dims >= 2 else 1
nx3 = block_size[2] * len(edges_unique[2]) if dims >= 3 else 1
if data is None:
data = {}
for q in quantities:
data[q] = np.empty((nx3,nx2,nx1))
data[u'x1f'] = np.empty(nx1+1)
if dims >= 2:
data[u'x2f'] = np.empty(nx2+1)
if dims >= 3:
data[u'x3f'] = np.empty(nx3+1)
# Read interface data
for n,block_name in zip(range(num_blocks),f.keys()):
for dim,coord in zip(range(dims),coords):
need_interfaces = True
for dim_other in range(dims):
if dim_other == dim:
continue
if indices[n,dim_other,0] != 0:
need_interfaces = False
if not need_interfaces:
continue
data[coord][indices[n,dim,0]:indices[n,dim,1]] = f[block_name][coord][:-1]
if indices[n,dim,1] == block_size[dim] * len(edges_unique[dim]):
data[coord][indices[n,dim,1]] = f[block_name][coord][-1]
# Read value data
for n,block_name in zip(range(num_blocks),f.keys()):
kl = indices[n,2,0]
ku = indices[n,2,1]
jl = indices[n,1,0]
ju = indices[n,1,1]
il = indices[n,0,0]
iu = indices[n,0,1]
for q in quantities:
data[q][kl:ku,jl:ju,il:iu] = f[block_name][q][:]
return data
#=======================================================================================
class AthenaError(RuntimeError):
"""General exception class for Athena++ read functions."""
pass
```
#### File: jmshi/reconnection/time_track.py
```python
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
#import athena4_read as ath
import athena_read as ath
import scipy.ndimage.measurements as measurements
import scipy.ndimage.morphology as morphology
import cPickle as pickle
import time
import multiprocessing as mp
from itertools import product
import sys
#from scipy.interpolate import RegularGridInterpolator
from skimage.transform import resize
from collections import defaultdict
from sklearn.decomposition import PCA
def loadData(fname):
jlist_sorted = pickle.load( open(fname, "rb" ) )
return jlist_sorted
def track(varlist):
tstamp = varlist[0]
rank = varlist[1] # rank of jsheet under investigation
js0 = varlist[2] # content of the jsheet at the rank
ts,te = varlist[3],varlist[4] # time frames to lookup
dt = np.pi*0.001
fdir = '/tigress/jiming/reconnect/athena/bin/'
basename = 'box.'
rank +=1
tstride = 1
size = len(js0)
js_track = []
js_track += js0 #set initial jsheet dict
js_time = []
js_time += [ts*dt] #set initial jsheet time
#fname = fdir+targ+'/'+basename+str(ts).zfill(5)+'.tab'
#fhandler1=open(fname,'a') # for print outs
for frame in np.arange(ts+1,te+1,tstride):
fname = fdir+targ+'/'+basename+str(frame).zfill(5)+'.vtk.jlist.p'
jlist = loadData(fname)
js_self,js_merg,js_tag =[],[],[] #set seg. of jsheet to zero
nself,nmerg = 0,0
upper = max([num for num,item in enumerate(jlist) if len(item)>=1000])
for js in jlist[0:upper]:
js0_in_js = set(js0).intersection(set(js))
inside = len(js0_in_js)
outside = len(js)-inside
if inside > outside: # successor found; append list; count+1
js_self += [js]
js_tag += [frame*dt]
nself += 1
elif inside >0: #potential merger; start counting
js_merg += [js]
js_tag += [frame*dt]
nmerg += 1
else:
pass
if js_merg == []:
lenmergmax = 0
else:
lenmergmax = max([len(item) for item in js_merg])
if js_self == []:
lenselfmax = 0
lenselfsec = 0
else:
sorted_self = sorted(js_self,key=len)
lenselfmax = len(sorted_self[-1]) #max([len(item) for item in js_self])
if len(js_self)>=2:
lenselfsec = len(sorted_self[-2])
else:
lenselfsec = 0
if nself == 1 and nmerg == 0: # single successor keep tracking
#js_track += js_self
#js_time += [dt*frame]
js0 = js_self[0] # set current jsheet as initial for next step
elif nself == 1 and nmerg > 0: # incoming sheet to merge
#js_track += js_self
#js_time += js_tag
#js_track += js_merg
flag = 0
tmp = np.array([tstamp,rank-1,flag,size,(frame-ts),nself,lenselfmax,lenselfsec,nmerg,lenmergmax],dtype='i')
#np.savetxt(fhandler1,tmp,fmt='%i %i %i %f %i %i %i %i')
#print 'jsheet = ',rank-1, size,' merged @Dt = ',frame*dt,'nself,nmerg = ',nself,nmerg
print tmp
break # break out the ts+1:te+1 loop, go to next init jsheet
elif nself >1: # self-disruption
#js_track += js_self
#js_track += js_merg
#js_time += js_tag
if lenselfsec >=800:
flag = 1
tmp = np.array([tstamp,rank-1,flag,size,(frame-ts),nself,lenselfmax,lenselfsec,nmerg,lenmergmax],dtype='i')
#print 'jsheet = ',rank-1, size, 'self-des @ Dt = ',frame*dt, 'nself,nmerg = ',nself,nmerg
print tmp
break
else:
js0 = sorted_self[-1]
elif nself==0 & nmerg==1: #somehow large displacement occurs
js0 = js_merg[0]
else:
flag = 2
tmp = np.array([tstamp,rank-1,flag,size,(frame-ts),nself,lenselfmax,lenselfsec,nmerg,lenmergmax],dtype='i')
#print '[warning] rank,size,nself,nmerg,time = ',rank-1,size,nself,nmerg,frame*dt
print tmp
break
return
#print 'rank,size,nself,nmerg,time = ',rank-1,size,nself,nmerg,frame*dt
if __name__=='__main__':
if len( sys.argv ) < 4:
print "Please specify input targ,ts,te,tstride"
exit( )
targ = sys.argv[1]
#targ = x2y4z1r64pm1re4000
ts,te,tstride = int(sys.argv[2]),int(sys.argv[3]),int(sys.argv[4])
dt = np.pi*0.001
rank = 0
fdir = '/tigress/jiming/reconnect/athena/bin/'
basename = 'box.'
frame=ts
fname = fdir+targ+'/'+basename+str(frame).zfill(5)+'.vtk.jlist.p'
jlist_sorted = loadData(fname)
print 'ts= ',ts
p = mp.Pool(processes=6)
varlist = [[ts,rank,jlist_sorted[rank],ts,te] for rank in range(0,6)]
result = p.map(track,tuple(varlist))
# for js0 in jlist_sorted[0:2]: # only analyze top 10
# varlist = [rank,js0,ts,te]
# track(varlist)
# rank +=1
#size = len(js0)
#js_track = []
#js_track += js0 #set initial jsheet dict
#js_time = []
#js_time += [ts*dt] #set initial jsheet time
#for frame in np.arange(ts+1,te+1,tstride):
# fname = fdir+targ+'/'+basename+str(frame).zfill(5)+'.vtk.jlist.p'
# jlist = loadData(fname)
# js_self,js_merg,js_tag =[],[],[] #set seg. of jsheet to zero
# nself,nmerg = 0,0
# for js in jlist[0:100]:
# js0_in_js = set(js0).intersection(set(js))
# inside = len(js0_in_js)
# outside = len(js)-inside
# if inside >= outside: # successor found; append list; count+1
# js_self += js
# js_tag += [frame*dt]
# nself += 1
# elif inside >0: #potential merger; start counting
# js_merg += js
# js_tag += [frame*dt]
# nmerg += 1
# else:
# pass
#
# if nself == 1 and nmerg == 0: # single successor keep tracking
# js_track += js_self
# js_time += [dt*frame]
# js0 = js_self # set current jsheet as initial for next step
# elif nself == 1 and nmerg > 0: # incoming sheet to merge
# js_track += js_self
# js_time += js_tag
# js_track += js_merg
# print 'jsheet = ',rank-1, size,' merged @Dt = ',frame*dt,'nself,nmerg = ',nself,nmerg
# break # break out the ts+1:te+1 loop, go to next init jsheet
# elif nself >1: # self-disruption
# js_track += js_self
# js_track += js_merg
# js_time += js_tag
# print 'jsheet = ',rank-1, size, 'self-des @ Dt = ',frame*dt, 'nself,nmerg = ',nself,nmerg
# break
# else:
# print '[warning] rank,size,nself,nmerg,time = ',rank-1,size,nself,nmerg,frame*dt
# break
#print 'rank,size,nself,nmerg,time = ',rank-1,size,nself,nmerg,frame*dt
# end of the script
``` |
{
"source": "jmshnds/eventstore_grpc",
"score": 2
} |
#### File: eventstore_grpc/eventstore_grpc/client.py
```python
import abc
import grpc
from eventstore_grpc import mixins, connection
from eventstore_grpc import options, discovery
from typing import List, Union
class ClientBase(abc.ABC):
"""Handles EventStoreDB operations."""
def __init__(
self,
connection_string: str,
):
"""Initializes the EventStoreDBClient.
Args:
connection_string: the string to connect to the gRPC channel.
"""
self._connection_string = connection_string
self._connection = connection.Connection.from_connection_string(
connection_string
)
self.channel = self._connection.channel
def __repr__(self):
return f"{self.__class__.__name__}('{self._connection_string}')"
def close(self):
if getattr(self, "_subscriptions_manager", None):
self.unsubscribe_all()
self.channel.close()
class EventStoreDBClient(
mixins.Streams,
mixins.Projections,
mixins.Users,
mixins.Operations,
mixins.Gossip,
mixins.Persistent,
mixins.Subscriptions,
ClientBase,
):
pass
```
#### File: eventstore_grpc/eventstore_grpc/event_data.py
```python
import abc
import uuid
from typing import Any, Dict, Callable
import json
class EventData(abc.ABC):
"""Events abstract class.
When appending events to EventStoreDB they must first all be wrapped in an Event
object. This allow you to specify the content of the event, the type of event
and whether its in JSON format. In it's simplest form you need to three following
arguments.
* `event_id`: this takes the format of `Uuid` and is used to uniquely identify the
event your are trying to append. If two events with the same `Uuid` are appended
to the same stream in quick succession, EventStoreDB will only append one copy
of the event to the stream.
* `type`: an event type should be supplied for each event. This is a unique string
used to identify the type of event you are saving.
It is common to see the explicit event code type name used as the type as
it makes serializing and de-serializing of the event easy. However we recommend
against this as it couples the storage to the type and will make it more
difficult if you need to version the event at a later date.
* `data`: representation of your event data. It is recommended that you store your
events as JSON objects as this will allow you to make use of all of
EventStoreDB's functionality such as projections. Ultimately though, you can
save it using whatever format you like as eventually, it will be stored as
encoded bytes.
* `metadata`: it is common to need to store additional information along side your
event that is part of the event itself. This can be correlation Id's,
timestamps, access information, etc. EventStoreDB allows you to store a
separate byte array containing this information to keep it separate.
* `is_json`: a simple boolean field to tell EventStoreDB if the event is stored as
a json, `True` by default.
"""
def __init__(
self,
event_id: uuid.UUID,
type: str,
data: Any,
metadata: Any,
is_json: bool = True,
):
self.event_id = event_id
self.type = type
self.data = data
self.metadata = metadata
self.is_json = is_json
def __repr__(self):
return f"{self.__class__.__name__}(**{self.__dict__!r})"
def __str__(self):
return f"{self.type} => {self.data}"
@property
def data_content_type(self):
return "application/json" if self.is_json else "application/octet-stream"
@property
def serialized_metadata(self):
return json.dumps(self.metadata).encode()
@property
@abc.abstractmethod
def serialized_data(self):
raise NotImplementedError
@classmethod
@abc.abstractmethod
def deserialize_data(cls, data: str):
raise NotImplementedError
class JSONEventData(EventData):
"""An Event carrying data as a JSON payload."""
def __init__(
self,
type: str,
data: Dict, # TODO: should be JSON-able.
metadata: Any = None,
event_id: uuid.UUID = None,
):
if event_id is None:
event_id = uuid.uuid4()
if metadata is None:
metadata = {}
super().__init__(event_id, type, data, metadata, True)
@property
def serialized_data(self):
return json.dumps(self.data).encode()
@classmethod
def deserialize_data(cls, data: str):
return json.loads(data)
```
#### File: eventstore_grpc/mixins/persistent.py
```python
from typing import Union, Optional, Dict, Callable
from eventstore_grpc import constants, persistent
from eventstore_grpc.proto import persistent_pb2, persistent_pb2_grpc
class Persistent:
"""Handles Persistent operations."""
def create_persistent_subscription(
self,
stream: str,
group_name: str,
resolve_link_to_s: bool = False,
from_revision: Union[int, str] = constants.START,
extra_statistics: bool = False,
message_timeout_ms: int = 30000,
checkpoint_after_ms: int = 2000,
max_retry_count: int = 10,
min_checkpoint_count: int = 10,
max_checkpoint_count: int = 1000,
max_subscriber_count: Union[str, int] = 0,
live_buffer_size: int = 20,
history_buffer_size: int = 500,
strategy: str = "ROUND_ROBIN",
**kwargs
) -> persistent_pb2.CreateResp:
stub = persistent_pb2_grpc.PersistentSubscriptionsStub(self.channel)
result = persistent.create_persistent_subscription(
stub=stub,
stream=stream,
group_name=group_name,
resolve_link_to_s=resolve_link_to_s,
from_revision=from_revision,
extra_statistics=extra_statistics,
message_timeout_ms=message_timeout_ms,
checkpoint_after_ms=checkpoint_after_ms,
max_retry_count=max_retry_count,
min_checkpoint_count=min_checkpoint_count,
max_checkpoint_count=max_checkpoint_count,
max_subscriber_count=max_subscriber_count,
live_buffer_size=live_buffer_size,
history_buffer_size=history_buffer_size,
strategy=strategy,
**kwargs,
)
return result
def update_persistent_subscription(
self,
resolve_link_to_s: bool = False,
from_revision: Union[str, int] = constants.START,
extra_statistics: bool = False,
message_timeout_ms: int = 30000,
checkpoint_after_ms: int = 2000,
max_retry_count: int = 10,
min_checkpoint_count: int = 10,
max_checkpoint_count: int = 1000,
max_subscriber_count: Union[str, int] = 0,
live_buffer_size: int = 20,
history_buffer_size: int = 500,
strategy: str = "ROUND_ROBIN",
**kwargs
) -> persistent_pb2.UpdateResp:
stub = persistent_pb2_grpc.PersistentSubscriptionsStub(self.channel)
result = persistent.update_persistent_subscription(
stub,
resolve_link_to_s=resolve_link_to_s,
from_revision=from_revision,
extra_statistics=extra_statistics,
message_timeout_ms=message_timeout_ms,
checkpoint_after_ms=checkpoint_after_ms,
max_retry_count=max_retry_count,
min_checkpoint_count=min_checkpoint_count,
max_checkpoint_count=max_checkpoint_count,
max_subscriber_count=max_subscriber_count,
live_buffer_size=live_buffer_size,
history_buffer_size=history_buffer_size,
strategy=strategy,
**kwargs,
)
return result
def delete_persistent_subscription(
self, stream: str, group: str
) -> persistent_pb2.DeleteResp:
stub = persistent_pb2_grpc.PersistentSubscriptionsStub(self.channel)
result = persistent.delete_persistent_subscription(
stub=stub, stream=stream, group=group
)
return result
```
#### File: eventstore_grpc/mixins/streams.py
```python
from typing import Union, List, Dict
from eventstore_grpc.streams import append, read, delete, tombstone
from eventstore_grpc import event_data, constants
from eventstore_grpc.proto import streams_pb2_grpc
class Streams:
"""Handles streams operations."""
def append_to_stream(
self,
stream: str,
expected_version: Union[str, int],
events: Union[event_data.EventData, List[event_data.EventData]],
user_credentials: append.UserCredentials = None,
**kwargs
):
"""Appends new events to a stream."""
stub = streams_pb2_grpc.StreamsStub(self.channel)
result = append.append_to_stream(
stub,
stream=stream,
expected_version=expected_version,
events=events,
user_credentials=user_credentials,
**kwargs
)
return result
def read_from_stream(
self,
stream: str,
count: int = None,
from_revision: Union[int, str] = constants.START,
options: dict = None,
**kwargs
):
"""Reads events from a stream."""
options = options or {}
options.update(
{"from_revision": from_revision}
) # TODO: Also the functional api should use from_revision as a param. (and maybe all the options?)
stub = streams_pb2_grpc.StreamsStub(self.channel)
result = read.read_from_stream(
stub, stream=stream, count=count, options=options, **kwargs
)
return result
def read_from_all(
self,
from_position: Union[Dict[str, int], str] = constants.START,
count: int = None,
direction: str = None,
**kwargs
):
stub = streams_pb2_grpc.StreamsStub(self.channel)
result = read.read_from_all(
stub,
from_position=from_position,
count=count,
direction=direction,
**kwargs
)
return result
def delete_stream(self, stream: str, expected_version: Union[int, str], **kwargs):
"""Deletes a stream."""
stub = streams_pb2_grpc.StreamsStub(self.channel)
result = delete.delete_stream(
stub, stream=stream, expected_version=expected_version, **kwargs
)
return result
def tombstone_stream(
self, stream: str, expected_version: Union[int, str], **kwargs
):
"""Tombstones a stream."""
stub = streams_pb2_grpc.StreamsStub(self.channel)
result = tombstone.tombstone_stream(
stub, stream=stream, expected_version=expected_version, **kwargs
)
return result
```
#### File: eventstore_grpc/operations/resign_node.py
```python
from eventstore_grpc.proto import operations_pb2, operations_pb2_grpc, shared_pb2
def resign_node(stub: operations_pb2_grpc.OperationsStub, **kwargs) -> shared_pb2.Empty:
"""Resign Node."""
return stub.ResignNode(shared_pb2.Empty(), **kwargs)
```
#### File: eventstore_grpc/options/base_options.py
```python
import dataclasses
from typing import Dict, Optional, List, Tuple
import base64
import grpc
@dataclasses.dataclass
class BaseOptions:
credentials: Optional[Dict[str, str]] = None
requires_leader: Optional[bool] = None
@property
def metadata(self):
return as_metadata(self.credentials, self.requires_leader)
def as_metadata(
credentials: Optional[Dict[str, str]] = None, requires_leader: Optional[bool] = None
) -> Optional[List[Optional[Tuple[str, str]]]]:
"""Returns a valid grpc "metadata" object."""
metadata = []
if credentials is not None:
if all(elm in ["username", "password"] for elm in credentials):
token = f"{credentials['username']}:{credentials['password']}"
token = base64.b64encode(token.encode("ascii")).decode("ascii")
auth = ("authorization", f"Bearer {token}")
metadata.append(auth)
if requires_leader is not None:
req_leader = ("requires-leader", str(requires_leader).lower())
metadata.append(req_leader)
return metadata if metadata else None
class EventStoreDBMetadataPlugin(grpc.AuthMetadataPlugin):
def __init__(
self, username: str = None, password: str = None, requires_leader: bool = None
):
self._username = username
self._password = password
self._requires_leader = requires_leader
self._metadata = None
@staticmethod
def _compile_token(username: str, password: str):
if username.strip() and password.strip():
_key = f"{username}:{password}".encode("ascii")
token = base64.b64encode(_key).decode("ascii")
return ("authorization", f"Basic {token}")
@staticmethod
def _compile_requires_leader(requires_leader: bool):
if requires_leader is not None:
return ("requires-leader", str(requires_leader).lower())
def compile(self):
metadata = []
compiled_token = self._compile_token(self._username, self._password)
compiled_requires_leader = self._compile_requires_leader(self._requires_leader)
if compiled_token: # good place to use the walrus op here :)
metadata.append(compiled_token)
if compiled_requires_leader:
metadata.append(compiled_requires_leader)
if metadata:
self._metadata = tuple(metadata)
return self
def __call__(self, context, callback):
self.compile()
callback(self._metadata, None)
def as_credentials(
username: Optional[str] = None,
password: Optional[str] = None,
requires_leader: Optional[bool] = None,
) -> grpc.AuthMetadataPlugin:
return grpc.metadata_call_credentials(
EventStoreDBMetadataPlugin(username, password, requires_leader),
EventStoreDBMetadataPlugin.__name__,
)
```
#### File: eventstore_grpc/persistent/create.py
```python
from typing import Union
from eventstore_grpc.proto import persistent_pb2, persistent_pb2_grpc, shared_pb2
from eventstore_grpc import constants
def create_persistent_subscription(
stub: persistent_pb2_grpc.PersistentSubscriptionsStub,
stream: str,
group_name: str,
resolve_link_to_s: bool = False,
from_revision: Union[int, str] = constants.START,
extra_statistics: bool = False,
message_timeout_ms: int = 30000,
checkpoint_after_ms: int = 2000,
max_retry_count: int = 10,
min_checkpoint_count: int = 10,
max_checkpoint_count: int = 1000,
max_subscriber_count: Union[str, int] = 0,
live_buffer_size: int = 20,
history_buffer_size: int = 500,
strategy: str = "ROUND_ROBIN",
**kwargs
) -> persistent_pb2.CreateResp:
"""Creates a persistent subscription."""
request = persistent_pb2.CreateReq()
options = persistent_pb2.CreateReq.Options()
identifier = shared_pb2.StreamIdentifier()
request_settings = persistent_pb2.CreateReq.Settings()
request_settings.resolve_links = resolve_link_to_s
if isinstance(from_revision, int):
request_settings.revision = from_revision
elif from_revision == constants.START:
request_settings.revision = 0
request_settings.extra_statistics = extra_statistics
request_settings.message_timeout_ms = message_timeout_ms
request_settings.checkpoint_after_ms = checkpoint_after_ms
request_settings.max_retry_count = max_retry_count
request_settings.min_checkpoint_count = min_checkpoint_count
request_settings.max_checkpoint_count = max_checkpoint_count
if isinstance(max_subscriber_count, int):
request_settings.max_subscriber_count = max_subscriber_count
elif max_checkpoint_count == "UNLIMITED":
request_settings.max_subscriber_count = 0
request_settings.live_buffer_size = live_buffer_size
request_settings.history_buffer_size = history_buffer_size
if strategy == "DISPATCH_TO_SINGLE":
request_settings.named_consumer_strategy = (
persistent_pb2.CreateReq.DispatchToSingle
)
elif strategy == "PINNED":
request_settings.named_consumer_strategy = persistent_pb2.CreateReq.Pinned
elif strategy == "ROUND_ROBIN":
request_settings.named_consumer_strategy = persistent_pb2.CreateReq.RoundRobin
identifier.streamName = stream.encode()
options.group_name = group_name
options.stream_identifier.CopyFrom(identifier)
options.settings.CopyFrom(request_settings)
request.options.CopyFrom(options)
response = stub.Create(request, **kwargs)
return response
```
#### File: eventstore_grpc/persistent/delete.py
```python
from eventstore_grpc.proto import persistent_pb2, persistent_pb2_grpc, shared_pb2
def delete_persistent_subscription(
stub: persistent_pb2_grpc.PersistentSubscriptionsStub, stream: str, group: str
) -> persistent_pb2.DeleteResp:
"""Deletes a persistent subscription."""
request = persistent_pb2.DeleteReq()
options = persistent_pb2.DeleteReq.Options()
identifier = shared_pb2.StreamIdentifier()
identifier.streamName = stream.encode()
options.stream_identifier.CopyFrom(identifier)
options.group_name = group
request.options.CopyFrom(options)
response = stub.Delete(request)
return response
```
#### File: eventstore_grpc/projections/create_transient.py
```python
from eventstore_grpc.proto import projections_pb2, projections_pb2_grpc, shared_pb2
def create_transient_projection(
stub: projections_pb2_grpc.ProjectionsStub,
name: str,
query: str,
**kwargs,
) -> projections_pb2.CreateResp:
"""Creates a one time projection."""
request = projections_pb2.CreateReq()
options = projections_pb2.CreateReq.Options()
transient = projections_pb2.CreateReq.Options.Transient()
transient.name = name
options.transient.CopyFrom(transient)
options.query = query
request.options.CopyFrom(options)
response = stub.Create(request, **kwargs)
return response
```
#### File: eventstore_grpc/projections/disable.py
```python
from eventstore_grpc.proto import projections_pb2, projections_pb2_grpc
def disable_projection(
stub: projections_pb2_grpc.ProjectionsStub,
name: str,
write_checkpoint: bool = True,
**kwargs
) -> projections_pb2.DisableResp:
"""Disables a projection."""
request = projections_pb2.DisableReq()
options = projections_pb2.DisableReq.Options()
options.name = name
options.write_checkpoint = write_checkpoint
request.options.CopyFrom(options)
response = stub.Disable(request, **kwargs)
return response
```
#### File: eventstore_grpc/projections/reset.py
```python
from eventstore_grpc.proto import projections_pb2, projections_pb2_grpc
def reset_projection(
stub: projections_pb2_grpc.ProjectionsStub, name: str, write_checkpoint: bool = True, **kwargs
) -> projections_pb2.ResetResp:
"""Resets a projection."""
request = projections_pb2.ResetReq()
options = projections_pb2.ResetReq.Options()
options.name = name
options.write_checkpoint = write_checkpoint
request.options.CopyFrom(options)
response = stub.Reset(request, **kwargs)
return response
```
#### File: eventstore_grpc/projections/update.py
```python
from typing import Optional
from eventstore_grpc.proto import projections_pb2, projections_pb2_grpc, shared_pb2
def update_projection(
stub: projections_pb2_grpc.ProjectionsStub,
name: str,
query: str,
track_emitted_streams: Optional[bool] = None,
**kwargs,
) -> projections_pb2.UpdateResp:
"""Updates a projection."""
request = projections_pb2.UpdateReq()
options = projections_pb2.UpdateReq.Options()
options.name = name
options.query = query
if track_emitted_streams is None:
options.no_emit_options.CopyFrom(shared_pb2.Empty())
else:
options.emit_enabled = track_emitted_streams
request.options.CopyFrom(options)
response = stub.Update(request, **kwargs)
return response
```
#### File: jmshnds/eventstore_grpc/run_threading_experiment.py
```python
import logging
import time
import random
import sys
import threading
from eventstore_grpc.subscription import Subscription
logging.basicConfig(level=logging.DEBUG)
SLOW_DOWN = False
STOP_FIRST_THREAD = False
if "--stop-first-thread" in sys.argv:
STOP_FIRST_THREAD = True
if "--slow-down" in sys.argv:
SLOW_DOWN = True
def some_stream():
counter = 0
while True:
if counter == 10:
break
counter += 1
yield counter
if SLOW_DOWN:
time.sleep(random.randint(2,10))
class Counter:
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
def update(self, value: int):
self.count = value
counter = Counter()
def some_handler(element, current_thread: threading.Thread = None, *args, **kwargs):
"""Handles the elment received from the stream."""
current_value = counter.count
if SLOW_DOWN:
time.sleep(random.randint(1,5))
counter.update(current_value + 1)
print(f"\033[92mHandling iteration number {element:^3} \033[38;5;226m[{name:^20}]\033[92m-[\033[38;5;117m{counter.count:^4}\033[92m] [{id(counter)}]\033[0m")
number_of_streams = 10
stream_names = [f"thread_stream_{i}" for i in range(number_of_streams)]
subscriptions = {}
for stream_name in stream_names:
subscription = Subscription(some_stream, some_handler, name=stream_name)
if stream_name in subscriptions:
registered_subscription = subscriptions[stream_name]
registered_subscription.subscribed = False
subscriptions[stream_name] = subscription
subscriptions[stream_name].start()
if STOP_FIRST_THREAD:
first_thread = "thread_stream_0"
if SLOW_DOWN:
time.sleep(random.randint(2,5))
subscriptions[first_thread].subscribed = False
```
#### File: eventstore_grpc/tests/conftest.py
```python
import pathlib
import pytest
import time
import grpc
from eventstore_grpc import gossip, as_credentials, EventStoreDBClient
from eventstore_grpc.proto import gossip_pb2_grpc
CERTS = pathlib.Path(__file__).parent / "certs"
pytest_plugins = ("docker_compose",)
def pytest_configure():
CERTS.mkdir(parents=True, exist_ok=True)
def pytest_unconfigure():
for elm in CERTS.iterdir():
if elm.is_dir():
for file in elm.iterdir():
file.unlink()
elm.rmdir()
else:
elm.unlink()
CERTS.rmdir()
@pytest.fixture(scope="session", autouse=True)
def wait_for_ready(session_scoped_container_getter):
target = "localhost:2112" # Cluster nodes are at ports 2111, 2112.
with open(CERTS / "ca" / "ca.crt", "rb") as f:
root_certificates = f.read()
credentials = grpc.ssl_channel_credentials(root_certificates=root_certificates)
with grpc.secure_channel(target, credentials=credentials) as channel:
stub = gossip_pb2_grpc.GossipStub(channel)
count = 0
while True:
count += 1
print("\033[38;5;220mTrying to reach cluster...\033[0m", end="\r")
if count > 10:
break
try:
result = gossip.read(stub)
except:
time.sleep(3)
continue
if all(member.is_alive for member in result.members):
break
time.sleep(10)
print("\033[38;5;42mEventStore DB Cluster is up and running! ✨\033[0m")
@pytest.fixture
def secure_cluster_connection_string():
rc = CERTS / "ca" / "ca.crt"
print(f"rc => {rc}")
return f"esdb://localhost:2111,localhost:2112?tls&rootCertificate={rc}"
@pytest.fixture
def insecure_single_connection_string():
return "esdb://localhost:2113"
@pytest.fixture(params=["cluster"])
def connection_string(
request, secure_cluster_connection_string, insecure_single_connection_string
):
if request.param == "single":
return insecure_single_connection_string
elif request.param == "cluster":
return secure_cluster_connection_string
else:
return None
@pytest.fixture
def credentials():
return as_credentials(username="admin", password="<PASSWORD>")
@pytest.fixture
def secure_client(secure_cluster_connection_string):
return EventStoreDBClient(secure_cluster_connection_string)
@pytest.fixture
def insecure_client(insecure_single_connection_string):
return EventStoreDBClient(insecure_single_connection_string)
@pytest.fixture
def client(connection_string):
return EventStoreDBClient(connection_string)
``` |
{
"source": "jmshnds/PySnake",
"score": 3
} |
#### File: jmshnds/PySnake/food.py
```python
from gameObject import GameObject
class Food(GameObject):
def __init__(self, x, y):
super().__init__(x, y)
# Represents if the food can be eaten or not
self.isPoison = False
def draw(self, draw, screen, color, shape):
draw.rect(screen, color, (shape[0]*self.x, shape[1]*self.y, shape[2], shape[3]))
``` |
{
"source": "jmsierra/ansible-oracle-collection",
"score": 3
} |
#### File: plugins/modules/role.py
```python
DOCUMENTATION = '''
---
module: jmsierra.oracle.role
short_description: Manage users/roles in an Oracle database
description:
- Manage grants/privileges in an Oracle database
- Handles role/sys privileges at the moment.
- It is possible to add object privileges as well, but they are not considered when removing privs at the moment.
version_added: "0.2.0"
options:
hostname:
description:
- The Oracle database host
required: false
default: localhost
port:
description:
- The listener port number on the host
required: false
default: 1521
service_name:
description:
- The database service name to connect to
required: true
user:
description:
- The Oracle user name to connect to the database
required: true
password:
description:
- The Oracle user password for '<PASSWORD>'
required: true
mode:
description:
- The mode with which to connect to the database
required: true
default: normal
choices: ['normal','sysdba']
role:
description:
- The role that should get grants added/removed
required: false
default: null
state:
description:
- The intended state of the priv (present=added to the user, absent=removed from the user).
default: present
choices: ['present','absent']
notes:
- cx_Oracle needs to be installed
requirements: [ "cx_Oracle" ]
author:
- <NAME>, <EMAIL>, @oravirt
- https://github.com/jmsierra
'''
EXAMPLES = '''
'''
try:
import cx_Oracle
except ImportError:
cx_oracle_exists = False
else:
cx_oracle_exists = True
def clean_string(item):
item = item.replace("'","").replace(", ",",").lstrip(" ").rstrip(",").replace("[","").replace("]","")
return item
def clean_list(item):
item = [p.replace("'","").replace(", ",",").lstrip(" ").rstrip(",").replace("[","").replace("]","") for p in item]
return item
# Check if the user/role exists
def check_role_exists(module, msg, cursor, role, auth):
if not(role):
module.fail_json(msg='Error: Missing role name', changed=False)
return False
role = clean_string(role)
#sql = 'select count(*) from dba_roles where role = upper(\'%s\')' % role
sql = 'select lower(role), lower(authentication_type) from dba_roles where role = upper(\'%s\')' % role
try:
cursor.execute(sql)
result = cursor.fetchone()
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg[0] = error.message+ 'sql: ' + sql
return False
if result is not None:
msg[0] = 'The role (%s) already exists' % role
return True
# Create the role
def create_role(module, msg, cursor, role, auth, auth_conf):
if not(role) or not (auth):
module.fail_json(msg='Error: Missing role name', changed=False)
return False
# This is the default role creation
sql = 'create role %s ' % role
if auth == 'password':
if not auth_conf:
module.fail_json(msg='Missing password', changed=False)
return False
else:
sql += 'identified by %s' % auth_conf
if auth == 'application':
if not (auth_conf):
module.fail_json(msg='Missing authentication package (schema.name)', changed=False)
return False
else:
sql += 'identified using %s' % auth_conf
if auth == 'external':
sql += 'identified externally '
if auth == 'global':
sql += 'identified globally'
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg[0] = 'Blergh, something went wrong while creating the role - %s sql: %s' % (error.message, sql)
return False
msg[0] = 'The role (%s) has been created successfully, authentication: %s' % (role, auth)
return True
def modify_role(module, msg, cursor, role, auth, auth_conf):
if not(role) or not (auth):
module.fail_json(msg='Error: Missing role name', changed=False)
return False
sql = 'alter role %s ' % (role)
currauth = get_role_specs(module, msg, cursor, role)
if currauth.lower() == auth.lower():
module.exit_json(msg='The role (%s) already exists' % role, changed=False)
else:
if auth == 'none':
sql += ' not identified '
if auth == 'password':
if not auth_conf:
module.fail_json(msg='Missing password for authentication_type %s' % (auth), changed=False)
return False
else:
sql += ' identified by %s' % auth_conf
if auth == 'application':
if not (auth_conf):
module.fail_json(msg='Missing authentication package (schema.name)', changed=False)
return False
else:
sql += 'identified using %s' % auth_conf
if auth == 'external':
sql += 'identified externally '
if auth == 'global':
sql += 'identified globally'
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg[0] = 'Blergh, something went wrong while altering the role - %s sql: %s' % (error.message, sql)
return False
msg[0] = 'The role (%s) has been changed successfully, authentication: %s, previous: %s' % (role, auth, currauth)
return True
def get_role_specs(module, msg, cursor, role):
sql = 'select lower(authentication_type) from dba_roles where role = upper(\'%s\')' % role
try:
cursor.execute(sql)
result = (cursor.fetchall()[0][0])
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg[0] = 'Blergh, something went wrong while getting the role auth scheme - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg[0], changed=False)
return False
#module.exit_json(msg='Result: %s, sql: %s' % (result, sql), changed=False)
return result
# Create the role
def drop_role(module, msg, cursor, role):
if not(role):
module.fail_json(msg='Error: Missing role name', changed=False)
return False
sql = 'drop role %s' % role
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg[0] = 'Blergh, something went wrong while dropping the role - %s sql: %s' % (error.message, sql)
return False
msg[0] = 'The role (%s) has been successfully dropped' % role
return True
def main():
msg = ['']
module = AnsibleModule(
argument_spec = dict(
hostname = dict(default='localhost'),
port = dict(default=1521),
service_name = dict(required=True),
user = dict(required=False),
password = dict(required=False, no_log=True),
mode = dict(default='normal', choices=["normal","sysdba"]),
role = dict(default=None),
state = dict(default="present", choices=["present", "absent"]),
auth = dict(default='none', choices=["none", "password", "external", "global", "application"]),
auth_conf = dict(default=None)
))
hostname = module.params["hostname"]
port = module.params["port"]
service_name = module.params["service_name"]
user = module.params["user"]
password = module.params["password"]
mode = module.params["mode"]
role = module.params["role"]
state = module.params["state"]
auth = module.params["auth"]
auth_conf = module.params["auth_conf"]
if not cx_oracle_exists:
module.fail_json(msg="The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick. If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set")
wallet_connect = '/@%s' % service_name
try:
if (not user and not password ): # If neither user or password is supplied, the use of an oracle wallet is assumed
if mode == 'sysdba':
connect = wallet_connect
conn = cx_Oracle.connect(wallet_connect, mode=cx_Oracle.SYSDBA)
else:
connect = wallet_connect
conn = cx_Oracle.connect(wallet_connect)
elif (user and password ):
if mode == 'sysdba':
dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name)
connect = dsn
conn = cx_Oracle.connect(user, password, dsn, mode=cx_Oracle.SYSDBA)
else:
dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name)
connect = dsn
conn = cx_Oracle.connect(user, password, dsn)
elif (not(user) or not(password)):
module.fail_json(msg='Missing username or password for cx_Oracle')
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg[0] = 'Could not connect to database - %s, connect descriptor: %s' % (error.message, connect)
module.fail_json(msg=msg[0], changed=False)
cursor = conn.cursor()
if state == 'present':
if not check_role_exists(module, msg, cursor, role, auth):
if create_role(module, msg, cursor, role, auth, auth_conf):
module.exit_json(msg=msg[0], changed=True)
else:
module.fail_json(msg=msg[0], changed=False)
elif modify_role(module, msg, cursor, role, auth, auth_conf):
module.exit_json(msg=msg[0], changed=True)
else:
module.fail_json(msg=msg[0], changed=False)
elif state == 'absent':
if check_role_exists(module, msg, cursor, role, auth):
if drop_role(module, msg, cursor, role):
module.exit_json(msg=msg[0], changed=True)
else:
module.exit_json(msg='The role (%s) doesn\'t exist' % role, changed=False)
module.exit_json(msg=msg[0], changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
```
#### File: plugins/modules/user.py
```python
DOCUMENTATION = '''
---
module: jmsierra.oracle.user
short_description: Manage users/schemas in an Oracle database
description:
- Manage users/schemas in an Oracle database
- Can be run locally on the controlmachine or on a remote host
version_added: "0.2.0"
options:
hostname:
description:
- The Oracle database host
required: false
default: localhost
port:
description:
- The listener port number on the host
required: false
default: 1521
service_name:
description:
- The database service name to connect to
required: true
user:
description:
- The Oracle user name to connect to the database
required: false
password:
description:
- The Oracle user password for '<PASSWORD>'
required: false
mode:
description:
- The mode with which to connect to the database
required: false
default: normal
choices: ['normal','sysdba']
schema:
description:
- The schema that you want to manage
required: false
default: None
schema_password:
description:
- The password for the new schema. i.e '..identified by password'
required: false
default: null
schema_password_hash:
description:
- The password hash for the new schema. i.e '..identified by values 'XXXXXXX'
required: false
default: None
default_tablespace:
description:
- The default tablespace for the new schema. The tablespace must exist
required: false
default: None
default_temp_tablespace:
description:
- The default tablespace for the new schema. The tablespace must exist
required: false
default: None
update_password:
description:
- always will update passwords if they differ. on_create will only set the password for newly created users.
required: false
default: always
choices: ['always','on_create']
authentication_type:
description:
- The type of authentication for the user.
required: false
default: password
choices: ['password','external','global']
profile:
description:
- The profile for the user
required: false
default: None
state:
description:
- Whether the user should exist. Absent removes the user, locked/unlocked locks or unlocks the user
required: False
default: present
choices: ['present','absent','locked','unlocked']
notes:
- cx_Oracle needs to be installed
requirements: [ "cx_Oracle" ]
author:
- <NAME>, <EMAIL>, @oravirt
- https://github.com/jmsierra
'''
EXAMPLES = '''
# Create a new schema on a remote db by running the module on the controlmachine (i.e: delegate_to: localhost)
oracle_user: hostname=remote-db-server service_name=orcl user=system password=manager schema=myschema schema_password=<PASSWORD> default_tablespace=test state=present
# Create a new schema on a remote db
oracle_user: hostname=localhost service_name=orcl user=system password=<PASSWORD> schema=myschema schema_password=<PASSWORD> default_tablespace=test state=present
# Drop a schema on a remote db
oracle_user: hostname=localhost service_name=orcl user=system password=<PASSWORD> schema=myschema state=absent
'''
try:
import cx_Oracle
except ImportError:
cx_oracle_exists = False
else:
cx_oracle_exists = True
def clean_string(item):
item = item.replace("'","").replace(", ",",").lstrip(" ").rstrip(",").replace("[","").replace("]","")
return item
def clean_list(item):
item = [p.replace("'","").replace(", ",",").lstrip(" ").rstrip(",").replace("[","").replace("]","") for p in item]
return item
# Check if the user/schema exists
def check_user_exists(msg, cursor, schema):
sql = 'select count(*) from dba_users where username = upper(\'%s\')' % schema
try:
cursor.execute(sql)
result = cursor.fetchone()
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = error.message+ 'sql: ' + sql
module.fail_json(msg=msg)
if result is not None:
msg = 'The schema (%s) already exists' % schema
return True
# Create the user/schema
def create_user(module, cursor, schema, schema_password, schema_password_hash, default_tablespace, default_temp_tablespace, profile, authentication_type, state, container, container_data):
total_sql = []
if not (schema):
msg = 'Error: Missing schema name'
return False
if not (schema_password) and authentication_type == 'password':
if not (schema_password_hash):
msg = 'Error: Missing schema password or password hash'
module.fail_json(msg=msg, Changed=False)
if authentication_type == 'password':
if (schema_password_hash):
sql = 'create user %s identified by values \"%s\" ' % (schema, schema_password_hash)
else:
sql = 'create user %s identified by \"%s\" '% (schema, schema_password)
elif authentication_type == 'global':
sql = 'create user %s identified globally ' % (schema)
elif authentication_type == 'external':
sql = 'create user %s identified externally ' % (schema)
if (default_tablespace):
sql += 'default tablespace %s '% default_tablespace
sql += 'quota unlimited on %s '% default_tablespace
if (default_temp_tablespace):
sql += 'temporary tablespace %s '% default_temp_tablespace
if (profile):
sql += ' profile %s' % profile
if container:
sql += ' container=%s' % (container)
if state == 'locked':
sql += ' account lock'
if state == 'expired':
sql += ' password expire'
if state == 'expired & locked':
sql += ' account lock password expire'
total_sql.append(sql)
if container_data:
altersql = 'alter user %s set container_data=%s container=current' % (schema, container)
total_sql.append(altersql)
# module.exit_json(msg=total_sql, changed=True)
for a in total_sql:
execute_sql(module, cursor, a)
return True
# Get the current password hash for the user
def get_user_password_hash(module, cursor, schema):
sql = 'select password from sys.user$ where name = upper(\'%s\')' % schema
try:
cursor.execute(sql)
pwhashresult = cursor.fetchone()[0]
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = error.message+ ': sql: ' + sql
module.fail_json(msg=msg)
return pwhashresult
# Modify the user/schema
def modify_user(module, cursor, schema, schema_password, schema_password_hash, default_tablespace, default_temp_tablespace, update_password, profile, authentication_type, state, container_data):
sql_get_curr_def = 'select lower(account_status)'
sql = 'alter user %s' % schema
if update_password == 'always':
if authentication_type == 'password':
if schema_password_hash:
sql += ' identified by values \'%s\'' % (schema_password_hash)
elif schema_password:
sql += ' identified by %s ' % (schema_password)
elif authentication_type == 'external':
sql += ' identified externally '
sql_get_curr_def += ' ,lower(authentication_type)'
elif authentication_type == 'global':
sql += ' identified globally '
sql_get_curr_def += ' ,lower(authentication_type)'
if default_tablespace:
sql += ' default tablespace %s' % default_tablespace
sql += ' quota unlimited on %s '% default_tablespace
sql_get_curr_def += ' ,lower(default_tablespace)'
if default_temp_tablespace:
sql += ' temporary tablespace %s ' % default_temp_tablespace
sql_get_curr_def += ' ,lower(temporary_tablespace)'
if profile:
sql += ' profile %s ' % profile
sql_get_curr_def += ' ,lower(profile)'
want_account_status = ''
if state == 'present' or state == 'unlocked':
want_account_status = 'open'
sql += ' account unlock'
elif state == 'locked':
want_account_status = state
sql += ' account lock'
elif state == 'expired':
want_account_status = state
sql += ' password expire'
elif state == 'expired & locked':
want_account_status = state
sql += ' account lock password expire'
wanted_list = []
wanted_list.append(want_account_status)
if authentication_type != 'password' and update_password == '<PASSWORD>':
wanted_list.append(authentication_type)
if default_tablespace:
wanted_list.append(default_tablespace)
if default_temp_tablespace:
wanted_list.append(default_temp_tablespace)
if profile:
wanted_list.append(profile)
sql_get_curr_def += ' from dba_users where username = upper(\'%s\')' % schema
if update_password == '<PASSWORD>':
old_pw_hash = get_user_password_hash(module, cursor, schema)
wanted_list = [x.lower() for x in wanted_list]
curr_defaults = execute_sql_get(module, cursor, sql_get_curr_def)
curr_defaults = [list(t) for t in curr_defaults]
if (schema_password_hash):
if update_password == '<PASSWORD>':
# if (wanted_list in curr_defaults) and (old_pw_hash == schema_password_hash):
# # Everything is kosher, exit changed=False
# module.exit_json(msg='The schema (%s) is in the intented state' % (schema), changed=False)
# else:
# # Make the change and exit changed=True
execute_sql(module, cursor, sql)
module.exit_json(msg='Successfully altered the user (%s)' % (schema), changed=True)
else:
if (wanted_list in curr_defaults):
module.exit_json(msg='The schema (%s) is in the intented state' % (schema), changed=False)
else:
# Make the change and exit changed=Truecontainer = module.params["container"]
execute_sql(module, cursor, sql)
module.exit_json(msg='Successfully altered the user (%s)' % (schema), changed=True)
else:
if (wanted_list in curr_defaults):
if update_password == '<PASSWORD>':
## DISABLING THE PRE/POST-CHECK
# change everything and compare hash pre/post. If same => exit change=False else exit change=True
execute_sql(module, cursor, sql)
# new_pw_hash = get_user_password_hash(module, cursor, schema)
# if new_pw_hash == old_pw_hash:
# module.exit_json(msg='The schema (%s) is in the intented state' % (schema), changed=False)
# else:
module.exit_json(msg='Successfully altered the user (%s)' % (schema), changed=True)
else:
module.exit_json(msg='The schema (%s) is in the intented state' % (schema), changed=False)
else:
# do the complete change -> exit with change=True
# module.exit_json(msg=sql)
execute_sql(module, cursor, sql)
module.exit_json(msg='Successfully altered the user (%s, %s)' % (schema, sql), changed=True)
return True
# Run the actual modification
def execute_sql(module, cursor, sql):
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Blergh, something went wrong while executing sql - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return True
def execute_sql_get(module, cursor, sql):
try:
cursor.execute(sql)
result = cursor.fetchall()
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = error.message+ ': sql: ' + sql
module.fail_json(msg=msg)
return result
# Drop the user
def drop_user(module, cursor, schema):
black_list = ['sys','system','dbsnmp']
if schema.lower() in black_list:
msg = 'Trying to drop an internal user: %s. Not allowed' % schema
return False
sql = 'drop user %s cascade' % schema
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Blergh, something went wrong while dropping the schema - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg)
return True
def main():
msg = ['']
module = AnsibleModule(
argument_spec = dict(
oracle_home = dict(required=False, aliases=['oh']),
hostname = dict(default='localhost'),
port = dict(default=1521),
service_name = dict(required=True, aliases = ['tns']),
user = dict(required=False),
password = dict(required=False, no_log=True),
mode = dict(default='normal', choices=["normal","sysdba"]),
schema = dict(default=None,aliases=['name']),
schema_password = dict(default=None, no_log=True),
schema_password_hash = dict(default=None, no_log=True),
state = dict(default="present", choices=["present", "absent", "locked", "unlocked", "expired", "expired & locked"]),
default_tablespace = dict(default=None),
default_temp_tablespace = dict(default=None),
update_password = dict(default='always', choices=['on_create','always']),
profile = dict(default=None),
authentication_type = dict(default='password', choices=['password','external','global']),
container = dict(default=None),
container_data = dict(default=None)
),
mutually_exclusive=[['schema_password', 'schema_password_hash']]
)
oracle_home = module.params["oracle_home"]
hostname = module.params["hostname"]
port = module.params["port"]
service_name = module.params["service_name"]
user = module.params["user"]
password = module.params["password"]
mode = module.params["mode"]
schema = module.params["schema"]
schema_password = module.params["schema_password"]
schema_password_hash = module.params["schema_password_hash"]
state = module.params["state"]
default_tablespace = module.params["default_tablespace"]
default_temp_tablespace = module.params["default_temp_tablespace"]
update_password = module.params["update_password"]
profile = module.params["profile"]
authentication_type = module.params["authentication_type"]
container = module.params["container"]
container_data = module.params["container_data"]
if not cx_oracle_exists:
module.fail_json(msg="The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick. If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set")
if oracle_home is not None:
os.environ['ORACLE_HOME'] = oracle_home.rstrip('/')
#os.environ['LD_LIBRARY_PATH'] = ld_library_path
elif 'ORACLE_HOME' in os.environ:
oracle_home = os.environ['ORACLE_HOME']
wallet_connect = '/@%s' % service_name
try:
if (not user and not password ): # If neither user or password is supplied, the use of an oracle wallet is assumed
if mode == 'sysdba':
connect = wallet_connect
conn = cx_Oracle.connect(wallet_connect, mode=cx_Oracle.SYSDBA)
else:
connect = wallet_connect
conn = cx_Oracle.connect(wallet_connect)
elif (user and password ):
if mode == 'sysdba':
dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name)
connect = dsn
conn = cx_Oracle.connect(user, password, dsn, mode=cx_Oracle.SYSDBA)
else:
dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name)
connect = dsn
conn = cx_Oracle.connect(user, password, dsn)
elif (not(user) or not(password)):
module.fail_json(msg='Missing username or password for cx_Oracle')
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Could not connect to database - %s, connect descriptor: %s' % (error.message, connect)
module.fail_json(msg=msg, changed=False)
cursor = conn.cursor()
if state not in ('absent'):
if not check_user_exists(msg, cursor, schema):
if create_user(module, cursor, schema, schema_password, schema_password_hash, default_tablespace, default_temp_tablespace, profile, authentication_type, state, container, container_data):
msg = 'The schema %s has been created successfully' % (schema)
module.exit_json(msg=msg, changed=True)
else:
modify_user(module, cursor, schema, schema_password, schema_password_hash, default_tablespace, default_temp_tablespace, update_password, profile, authentication_type, state, container_data)
# elif state in ('unlocked','locked', ''):
# if not check_user_exists(msg, cursor, schema):
# # if create_user(module, cursor, schema, schema_password, schema_password_hash, default_tablespace, default_temp_tablespace, profile, authentication_type, state, container):
# msg = 'The schema %s doesn\'t exist' % schema
# module.fail_json(msg=msg, changed=False)
# else:
# modify_user(module, cursor, schema, schema_password, schema_password_hash, default_tablespace, default_temp_tablespace, update_password, profile, authentication_type, state)
elif state == 'absent':
if check_user_exists(msg, cursor, schema):
if drop_user(module, cursor, schema):
msg = 'The schema (%s) has been dropped successfully' % schema
module.exit_json(msg=msg, changed=True)
else:
module.exit_json(msg='The schema (%s) doesn\'t exist' % schema, changed=False)
module.exit_json(msg='Undhandled exit', changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
``` |
{
"source": "jmsimons/email_scheduler",
"score": 3
} |
#### File: email_scheduler/email_scheduler/scheduler_app.py
```python
import time, schedule
from smtplib import SMTP_SSL
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class EmailScheduler:
def __init__(self, config):
self.config = config
pass
def run(self):
try:
while True:
time.sleep(5)
schedule.run_pending()
except KeyboardInterrupt:
print("EmailScheduler is shutting down, goodbye!")
def add_email_event(self, filename):
with open(f"messages/{filename}") as f:
line = f.readline().strip()
# print(line)
recipient = line.split("::")[1]
subject = f.readline().split("::")[1]
f.readline() # Burn line containing message::
message = f.read()
print(f"Scheduling Message:\nTo: {recipient}, Subject: {subject}\n{message}" )
schedule.every().monday.at("23:16").do(self.send_email, recipient, subject, message)
def send_email(self, recipients, subject, message):
with SMTP_SSL("smtp.gmail.com", 465) as smtp:
smtp.login(self.config["email"], self.config["password"])
for recipient in recipients:
email_msg = MIMEMultipart()
email_msg["From"] = self.config["email"]
email_msg["To"] = recipient
email_msg["Subject"] = subject
email_body = MIMEText(message)
email_msg.attach(email_body)
email_msg = email_msg.as_string()
try:
smtp.sendmail(self.config["email"], recipient, email_msg)
print(f"{time.time()} - Sent message to {recipient}")
except:
print(f"{time.time()} - Error sending message to {recipient}")
``` |
{
"source": "jmsimons/flask_template",
"score": 2
} |
#### File: jmsimons/flask_template/run.py
```python
import os
from webapp import app
def setup_webapp_db(): # TODO: Move this function into setup
from webapp import db, bcrypt, User
# from webapp.models import User, Project
print('Building webapp database...')
hashed_password = bcrypt.generate_password_hash("<PASSWORD>").decode('utf-8')
db.create_all()
db.session.add(User(username = "admin", email = "<EMAIL>", password = <PASSWORD>))
db.session.commit()
db.session.close()
if __name__ == '__main__':
if os.path.exists('webapp/assets/webapp.db'): # TODO: remove after moving db_setup into setup routine
print("No database detected")
# setup_webapp_db()
print('Webapp running with PID:', os.getpid())
app.run(debug = True, host = '0.0.0.0', port = 5000)
# TODO: while True: RunProcessManager and other backend-tasks
```
#### File: flask_template/webapp/prepare_functions.py
```python
from webapp import login_manager, mail, db_manager
def compile_expense_miles():
pass
def prep_expense_summary(user_id, year):
expense_miles = db_manager.get_user_expense_miles(user_id)
cash_expenses = 0
user_vehicles = db_manager.get_user_vehicles(user_id = user_id)
for vehicle in user_vehicles:
vehicle_expenses = db_manager.get_vehicle_expenses(vehicle["id"])
if vehicle_expenses:
cash_expenses += sum([i["amount"] for i in vehicle_expenses])
return {"total_cash": cash_expenses, "total_miles": expense_miles}
```
#### File: flask_template/webapp/user_functions.py
```python
from flask import url_for
from flask_mail import Message
from webapp import login_manager, mail, db_manager
from webapp.models import User
# User-management functions
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def send_reset_email(user):
token = user.get_reset_token()
message = Message('Password Reset Request', sender = '<EMAIL>', recipients = [user.email])
body = 'To reset your password, visit the following link:\n{}\n\nIf you did not make this request then simply ignore this email and no change will be made.\n'
message.body = body.format(url_for('reset_token', token = token, _external = True))
mail.send(message)
def send_admin_invite(name, email):
token = db_manager.add_admin_invite()
message = Message('Admin Invite Notice', sender = '<EMAIL>', recipients = [email])
body = "Hello {} You've been invited to become an admin! To sign up, visit the following link:\n{}\n\nIf you suspect that you have recieved this message in error, please ignore it.\n"
message.body = body.format(name, url_for('register_user', token = token, _external = True))
mail.send(message)
``` |
{
"source": "jmsimons/LURKlandia",
"score": 3
} |
#### File: LURKlandia/MAINland/fd_test.py
```python
import threading, select, pickle, os, struct
class Q:
def __init__(self): ### Q object that uses a threading Lock and bytes pipe file for put() and get() ###
self.lock = threading.Lock()
self.rq, self.wq = os.pipe() # Pipe returns two file descriptors, this class's fileno and read mothods reference self.fq, where the put method references self.wq #
def put(self, item): ## writes 2-byte length for subsefquent pickle object written ##
with self.lock:
item = pickle.dumps(item)
bytes_to_write = struct.pack('<h', len(item))
print(bytes_to_write)
os.write(self.wq, bytes_to_write)
os.write(self.wq, item)
def get(self): ## Reads 2-bytes for subsequent pickle object to read ##
with self.lock:
bytes_to_read = struct.unpack('<h', os.read(self.rq, 2))[0]
print(bytes_to_read)
data = os.read(self.rq, bytes_to_read)
print(data)
item = pickle.loads(data)
return item
def fileno(self):
fno = self.rq
return fno
q = Q()
def select_loop(pollable_file): ### Watches for activity on pollable_file ###
while True:
print('Loop start...')
readers, _, _ = select.select([pollable_file], [], [])
for r in readers:
print('Reading...')
item = r.get()['text']
print(item)
if item == None: break # Breaks out of while loop when {'item': None}
# Loads three dictionaries to q #
q.put({'text': 'Hell'})
q.put({'text': 'O'})
q.put({'text': 'World'})
# Starts thread #
thread = threading.Thread(target = select_loop, args = (q, )) # Passes in q to select_loop as pollable_file #
thread.start()
# Loads 100 more sets of the same three dictionaries to q #
for i in range(100):
q.put({'text': 'Hell'})
q.put({'text': 'O'})
q.put({'text': 'World'})
# Loads dictionary which signals thread to terminate #
q.put({'text': None})
thread.join()
```
#### File: LURKlandia/MAINland/game.py
```python
from game_components import Character, Room, Loot
# from models import Session, Room, Connection, Monster, Loot
from player import Player
from LURKp import LURKprot
from random import randint
import queue, threading, time, models
class Game:
def __init__(self):
self.settings = {
'landlord': 'Mr. Rowanitz',
'start_stat_limit': 100,
'start_room': 1,
'returning_room': 5,
'stash_player_after': 600
}
self.actions = {
2: self.change_room,
3: self.stage_fight,
4: self.stage_pvp_fight,
5: self.loot,
6: self.start_player,
12: self.stash_player
}
self.rooms = {}
self.players = {}
self.queue = queue.Queue() # Queue of dictionaries to process in game_loop #
self.remove = queue.Queue() # Queue of Player objects to stash on socket error #
self.load_map()
self.lurk = LURKprot()
self.thread = threading.Thread(target = self.game_loop)
self.thread.start()
def load_map(self): ### Loads Rooms, Connections, Monsters, and Lootables from database ###
session = models.Session()
rooms = session.query(models.Room).all()
connections = session.query(models.Connection).all()
monsters = session.query(models.Monster).all()
lootables = session.query(models.Loot).all()
for room in rooms:
number, name, desc = room.id, room.name, room.description
self.rooms[number] = Room(number, name, desc)
for connection in connections:
room1, room2 = connection.room1, connection.room1
self.rooms[room1].connections.append(room2)
for monster in monsters:
name, attack, defense, regen, health, gold, room, desc = monster.name, monster.attack, monster.defense, monster.regen, monster.health, monster.gold, monster.room, monster.description
self.rooms[room].monsters[name] = Character(name, attack, True, True, True, True, True, defense, regen, health, gold, room, desc)
for lootable in lootables:
room, name, value, rewards, message = lootable.room, lootable.name, lootable.value, lootable.rewards, lootable.message
self.rooms[room].lootables.append(Loot(room, name, value, rewards, message))
def game_loop(self):
while True:
game_queue_size = self.queue.qsize()
if game_queue_size: print('Processing', game_queue_size, 'actions...')
for _ in range(game_queue_size):
action = self.queue.get() # Return a tuple of (player.name, message_dict)
action_type = action[1]['type']
self.route[action_type](action)
remove_queue_size = self.remove.qsize()
if remove_queue_size: print('Processing', remove_queue_size, 'removals...')
for _ in range(remove_queue_size):
player, time_added = self.remove.get()
if time.time() - time_added >= self.settings['stash_player_after']:
self.stash_player((player, {}))
def new_conn(self, conn): ### Passes conn to static method LURKprot.decode(), returns success or failure on player creation ###
message_dict = LURKprot.decode(conn = conn)
if message_dict and 'type' in message_dict and message_dict['type'] == 10:
self.new_player(conn, message_dict)
return True
else:
return False
def new_player(self, conn, characer_dict): ### Checks availability of name or innactive Player object, creates/updates Player or responds with error message ###
name = characer_dict['name']
if name in self.players:
try: players[name].conn.send(bytes(1)) # attempt writing to the socket to see if it's alive
except:
print('Found existing player with broken conn, replacing conn...')
self.players[name].conn = conn
return
error_message = self.lurk.get_err_message(2)
self.lurk.encode(error_message, conn = conn)
# elif player in database: # This will check long-term player storage
else:
stats_total = sum(characer_dict['attack'], characer_dict['defense'], characer_dict['regen'])
stats_limit = self.settings['start_stat_limit']
if stats_total == stats_limit:
self.players[name] = Player(self, conn, characer_dict = characer_dict)
else:
for stat in ('attack', 'defense', 'regen'): # recalculates each stat as a ratio and multiplies it by the game stat limit #
characer_dict[stat] = characer_dict[stat] / stats_total * stats_limit
stats_total = sum(characer_dict['attack'], characer_dict['defense'], characer_dict['regen'])
stats_delta = stats_limit - stats_total
print(stats_delta)
for i in [i for i in range(stats_delta)][::-1]:
for stat in ('attack', 'defense', 'regen'):
if not i: break
characer_dict[stat] += 1 * (i / abs(i))
self.players[name] = Player(self, conn, characer_dict = characer_dict)
def start_player(self, action): ### Updates player.character to indicate 'started' and adds player to the appropriate room ###
name, message_dict = action
player = self.players[name]
player.character.started = True
player.character.room = self.settings['start_room']
self.rooms[self.settings['start_room']].append(name)
def stash_player(self, action): ### Removes Player object from players and current room, adds or updates player record in long-term storage ###
name, message_dict = action
fair_well = self.lurk.get_chat_message(self.settings['landlord'], name, 'Sad to see you going so sooooon. Fair well!')
self.players[name].send_queue.put(fair_well)
self.players[name].character.started = False
self.players[name].character.ready = False
self.players[name].active = False
def change_room(self, action): ### Checks that new room is a connection of current room, removes player from current room and adds to new room ###
name, message_dict = action
player = self.players[name]
new_room = message_dict['room']
current_room = player.character['room']
if new_room in self.rooms[current_room].connections:
self.rooms[current_room].remove(player)
self.update_room(current_room)
self.rooms[new_room].append(player)
self.update_room(new_room)
player.character.room = new_room
def update_room(self, room): ### Sends updated characters, connections, and other info to all players in room ###
current_room = self.rooms[room].get_dict()
player_characters = [self.players[i].character.get_dict() for i in self.rooms[room].players]
monster_characters = [i.get_dict() for i in self.rooms[room].monsters.values()]
connecting_rooms = [self.rooms[i].get_dict() for i in self.rooms[room].connections.values()]
for player in self.rooms[room].players:
self.players[player].send_queue.put(current_room)
for update_list in (player_characters, monster_characters, connecting_rooms):
self.players[player].send_queue.put(update_list)
def process_fight(self, room, players_list, monsters_list = None): ### Calculates attack and damage taken for each character's turn, finally calls self.update_room ###
if monsters_list: # whole room fight
for character in players_list: # Each player attacks first
attack = character['attack']
for character in monsters_list:
if character['health'] > 0:
calc_attack = randint(int(attack * 0.75), int(attack * 1.25)) # consider moving this above the for loop if functionality is slow #
damage_taken = calc_attack - character['defense']
self.rooms[room].monsters[character['name']].health -= damage_taken
for character in monsters_list: # Then monsters attack
attack = character['attack']
for character in players_list:
calc_attack = randint(int(attack * 0.5), attack) # consider moving this above the for loop if functionality is slow #
damage_taken = calc_attack - character['defense']
self.players[character['name']].character.health -= damage_taken
else: # pvp fight
player1, player2 = players_list
calc_attack = randint(int(player1['attack'] * 0.75), int(player1['attack'] * 1.25))
damage_taken = calc_attack - player2['defense']
self.players[player2['name']].character.health -= damage_taken
calc_attack = randint(int(player2['attack'] * 0.75), int(player2['attack'] * 1.25))
damage_taken = calc_attack - player1['defense']
self.players[player1['name']].character.health -= damage_taken
self.update_room(room)
def stage_fight(self, action): ### Prepares character list for room, passes characters to calculate_attack ###
name, message_dict = action
room = self.players[name].character.room
if self.rooms[room].monsters:
players_list = self.rooms[room].players.copy()
players_list.remove(name)
players_list.insert(0, name)
players_list = [self.players[i].character.get_fight_stats() for i in players_list]
players_list = [i for i in players_list if i and i['health'] > 0]
monsters_list = [i.get_fight_stats() for i in self.rooms[room].monster.values()]
self.process_fight(room, players_list, monsters_list)
else:
message_dict = self.lurk.get_err_message(3)
self.players[name].send_queue.put(message_dict)
def stage_pvp_fight(self, action): ### Commences attack sequence, calculating attack and damage taken for each character's turn, and finally calls self.update_room ###
name, message_dict = action
target = message_dict['name']
room = message_dict['room']
if target in self.rooms[room].players:
players_list = [name, target]
players_list = [self.players[i].character.get_fight_stats() for i in players_list]
self.process_fight(room, players_list)
else:
message_dict = self.lurk.get_err_message(6) # text = 'Target player not in room'
self.players[name].send_queue.put(message_dict)
def loot(self, action):
name, message_dict = action
target = message_dict['name']
if self.players[name].character.room == self.players[target].character.room:
self.players[name].character.gold += self.players[target].character.gold
else:
message_dict = self.lurk.get_err_message(6)
self.players[name].send_queue.put(message_dict)
```
#### File: LURKlandia/MAINland/LURKland.py
```python
from game import Game
import socket, select, logging
host = socket.gethostname()
port = 5050
def setup_logging():
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(filename = f'LURK.log', level = logging.DEBUG)
def main(server_conn):
game = Game()
conns = [server_conn]
new_conns = list()
while True: ### The main process should only handle initial connections, passing subsequent messages into game ###
print(f'Listening for conns: {new_conns}')
for i in new_conns: conns.append(i)
readers, _, _ = select.select(conns, [], []) #.extend(new_conns)
for reader in readers:
if reader == server_conn:
conn, addr = server_conn.accept()
print("New Client; Address:", addr, "Conn:", conn)
logging.log("New Client; Address:", addr, "Conn:", conn)
new_conns.append(conn)
else:
if game.new_conn(reader):
new_conns.remove(reader)
if __name__ == '__main__':
setup_logging()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while port <= 5054:
try:
s.bind((host, port))
break
except socket.error as e:
print('Error binding port:', str(e))
port += 1
s.listen(5)
print(f'Listening on port: {port}')
main(s)
```
#### File: LURKlandia/Multiprocessing/LURKland.py
```python
from game import Game
from client_manager import ClientManager
import socket, select, logging, os
from multiprocessing import Pipe, Process, Queue
host = '0.0.0.0'
port = 5050
def setup_logging():
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(filename = 'LURK.log', level = logging.DEBUG)
def spawn_game_process(game_queue, client_queue):
p = Process(target = Game, args = (game_queue, client_queue), daemon = True)
p.start()
print('Conn Process PID:', os.getpid())
print('Game Process PID:', p.pid)
def main(server_conn):
game_queue = Queue()
client_queue = Queue()
spawn_game_process(game_queue, client_queue)
conns = []
attempts = []
client_manager = ClientManager(game_queue, client_queue)
conns.append(server_conn)
while True: ### The main process should only handle initial connections, passing subsequent messages into game ###
# print(f'Listening for conns: {conns}')
readers, _, _ = select.select(conns, [], []) #.extend(new_conns)
for reader in readers:
if reader == server_conn:
conn, addr = server_conn.accept()
print("New Client; Address:", addr, "Conn:", conn)
logging.log(logging.INFO, f"New Client Address: {addr}")
conns.insert(0, conn)
attempts.insert(0, 0)
client_manager.greet_conn(conn)
else:
index = conns.index(reader)
attempts[index] += 1
if client_manager.approve_conn(reader):
conns.pop(index)
attempts.pop(index)
elif attempts[index] == 5:
print('Closing conn after 5 attemps:', reader)
conns.pop(index)
attempts.pop(index)
if __name__ == '__main__':
setup_logging()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while port <= 5054:
try:
s.bind((host, port))
break
except socket.error as e:
print('Error binding port:', str(e))
port += 1
s.listen(10)
print(f'Listening on: {host}:{port}')
# TODO: add logic that kills the child process(es)
main(s)
```
#### File: LURKlandia/Multiprocessing/player.py
```python
import threading, multiprocessing, select, pickle, os, struct, time
from LURKp import LURKprot
from game_components import Character
class Q:
def __init__(self): ### Queue object that uses a threading Lock and bytes pipe file for put() and get() ###
self.lock = multiprocessing.Lock()
self.rq, self.wq = os.pipe() # Pipe returns two file descriptors, this class's fileno and read mothods reference self.fq, where the put method references self.wq #
def put(self, item): ## writes 2-byte length for subsefquent pickle object written ##
with self.lock:
item = pickle.dumps(item)
bytes_to_write = struct.pack('<H', len(item))
os.write(self.wq, bytes_to_write)
os.write(self.wq, item)
def get(self): ## Reads 2-byte length for subsequent pickle object read ##
with self.lock:
bytes_to_read = struct.unpack('<H', os.read(self.rq, 2))[0]
data = os.read(self.rq, bytes_to_read)
item = pickle.loads(data)
return item
def fileno(self): ## Returns the read file descriptor ##
fno = self.rq
return fno
class Player: ### This class holds Player data and processes incoming and outgoing socket communication in its own thread ###
def __init__(self, client_man_object, name, conn):
# !!! Never update self.character from within this class's thread, only from game thread !!! #
self.name = name
self.send_queue = Q()
self.client_manager = client_man_object
self.new_thread(conn)
def new_thread(self, conn):
self.conn = conn
self.lurk = LURKprot(conn = conn)
self.thread = threading.Thread(target = self.conn_loop, name = f'{self.name}_thread')
self.active = True
self.thread.start()
def conn_loop(self): ### Routes incoming message to other player or game queue, outgoing message to conn, or flags self.conn as broken ###
print('Conn Loop Started:', threading.current_thread().name)
while self.active:
readers, _, errors = select.select([self.conn, self.send_queue], [], [self.conn]) # Writers should be a file which can be polled or watched
for reader in readers:
if reader == self.conn:
message_dict = self.lurk.decode()
# print(message_dict)
if message_dict and message_dict['type'] in (1, 2, 3, 4, 5, 6, 12):
# print('Incoming from', self.character.name, message_dict)
self.client_manager.game_queue.put((self.name, message_dict))
else:
self.active = False
print(f'Problem data from {self.name}: {message_dict}')
else:
message_dict = reader.get()
# print('Outgoing to', self.character.name, message_dict)
self.lurk.encode(message_dict)
for error in errors:
print('Error in', self.name, error)
self.active = False
# self.game.remove.put((self.name, time.time()))
print(self.name, 'conn_loop ended!')
``` |
{
"source": "jmsimons/time_keeper_gui",
"score": 2
} |
#### File: time_keeper_gui/timekeeper/ReportEdit.py
```python
from tkinter import ttk, Tk, StringVar, END, N, S, E, W, NORMAL, DISABLED
from tkinter.scrolledtext import ScrolledText
import time, clipboard
from timekeeper.grid_table import GridTable
from timekeeper.Export import Export
from timekeeper.Popups import PopConfirm, EntryBox
class ReportEditApp(): ### 'Report hours' and 'edit jobs/shifts' application window gui elements, data, and methods ###
def __init__(self, db, **kwargs):
self.db = db
self.root = Tk()
self.root.title('Report and Edit')
# self.root.geometry("600x400")
self.root.resizable(False, False)
self.container = ttk.Frame(self.root)
self.frame = ttk.Frame(self.container)
self.grid_configure()
filter_label = ttk.Label(self.frame, text = "Filter:")
self.view_selection = StringVar(self.root)
self.job_selection = StringVar(self.root)
self.start_selection = StringVar(self.root)
self.end_selection = StringVar(self.root)
self.export_selection = StringVar(self.root)
self.build_menus()
self.search_input = ttk.Entry(self.frame, width = 16)#, placeholder = 'Search Notes...'
self.totals_label = ttk.Label(self.frame, text = 'Hours: \tShifts:')
self.table_frame = ttk.Frame(self.frame)
self.build_table(**kwargs)
self.build_date_select()
export_options = ('Text', 'PDF')
self.export_menu = ttk.OptionMenu(self.frame, self.export_selection, 'Export', *export_options)
self.view_shift_button = ttk.Button(self.frame, text = 'View/Edit Shift', command = self.view_item)
filter_label.grid(column = 1, row = 1, sticky = W, pady = 5)
self.view_menu.grid(column = 2, row = 1, sticky = E)
self.job_menu.grid(column = 1, columnspan = 2, row = 2, sticky = (W, E))
self.search_input.grid(column = 5, row = 2, sticky = (W, E))
self.totals_label.grid(column = 1, columnspan = 5, row = 4, sticky = W, pady = 5)
self.table_frame.grid(column = 1, columnspan = 5, row = 5, sticky = (N, S, E, W))
self.export_menu.grid(column = 1, columnspan = 2, row = 6, sticky = W)
self.view_shift_button.grid(column = 5, row = 6, sticky = E, pady = 5)
self.frame.grid(column = 0, row = 0, padx = 5, sticky = (N, S, E, W))
self.container.grid(column = 0, row = 0, sticky = (N, S, E, W))
self.view_menu.bind("<ButtonRelease-1>", self.build_table)
self.job_menu.bind("<ButtonRelease-1>", self.filter_data)
self.per_start_menu.bind("<ButtonRelease-1>", self.filter_data)
self.per_end_menu.bind("<ButtonRelease-1>", self.filter_data)
self.search_input.bind('<KeyRelease>', self.filter_data)
self.search_input.bind("<Return>", self.filter_data)
self.export_menu.bind("<ButtonRelease-1>", self.export)
def grid_configure(self):
# TODO: this isn't working, trying to figure out why. The goal is to set fixed column-widths so that the ui doesn't redraw when a widget changes size.
self.root.columnconfigure(0, weight = 0)
self.container.columnconfigure(0, weight = 0)
self.frame.columnconfigure(0, weight = 0)
self.frame.columnconfigure(1, weight = 1)
self.frame.columnconfigure(2, weight = 2)
self.frame.columnconfigure(3, weight = 2)
self.frame.columnconfigure(4, weight = 2)
self.frame.columnconfigure(5, weight = 2)
def build_menus(self):
self.job_choices = ['All Jobs'] + self.db.report_jobs()
self.job_menu = ttk.OptionMenu(self.frame, self.job_selection, 'All Jobs', *self.job_choices)
self.job_menu.config(width = 10)
self.view_choices = ("Shifts", "Tasks", "Jobs")
self.view_menu = ttk.OptionMenu(self.frame, self.view_selection, self.view_choices[0], *self.view_choices)
self.job_menu.config(width = 5)
def build_table(self, event = None, **kwargs):
self.table_view = ttk.Treeview(self.table_frame, selectmode = 'browse')
vbar = ttk.Scrollbar(self.table_frame, orient = 'vertical', command = self.table_view.yview)
self.table_view.config(yscrollcommand = vbar.set)
self.table_view.grid(column = 1, columnspan = 1, row = 1, sticky = (N, S, E, W))
vbar.grid(column = 2, row = 1, sticky = (N, S))
view_selection = self.view_selection.get()
if view_selection == self.view_choices[0]:
self.job_menu.config(state = NORMAL)
self.shifts_table()
elif view_selection == self.view_choices[1]:
self.job_menu.config(state = NORMAL)
self.tasks_table()
elif view_selection == self.view_choices[2]:
# self.job_menu set to 'All Jobs'
self.job_menu.config(state = DISABLED)
self.jobs_table()
self.tid_lookup = {}
self.get_rows(**kwargs)
self.populate_table()
self.table_view.bind("<Double-Button-1>", self.view_item)
def shifts_table(self):
self.table_view['columns'] = ('1', '2', '3', '4', '5')
self.table_view['show'] = 'headings'
self.table_view.column('1', width = 120, anchor = 'w')
self.table_view.column('2', width = 80, anchor = 'w')
self.table_view.column('3', width = 70, anchor = 'w')
self.table_view.column('4', width = 40, anchor = 'w')
self.table_view.column('5', width = 150, anchor = 'w')
self.table_view.heading('1', text = 'Job')
self.table_view.heading('2', text = 'Date')
self.table_view.heading('3', text = 'Start')
self.table_view.heading('4', text = 'Hours')
self.table_view.heading('5', text = 'Notes')
def tasks_table(self):
self.table_view['columns'] = ('1', '2', '3', '4')
self.table_view['show'] = 'headings'
self.table_view.column('1', width = 120, anchor = 'w')
self.table_view.column('2', width = 80, anchor = 'w')
self.table_view.column('3', width = 100, anchor = 'w')
self.table_view.column('4', width = 160, anchor = 'w')
self.table_view.heading('1', text = 'Job')
self.table_view.heading('2', text = 'Date')
self.table_view.heading('3', text = 'Created')
self.table_view.heading('4', text = 'Title')
def jobs_table(self):
self.table_view['columns'] = ('1', '2', '3', '4', '5')
self.table_view['show'] = 'headings'
self.table_view.column('1', width = 120, anchor = 'w')
self.table_view.column('2', width = 90, anchor = 'w')
self.table_view.column('3', width = 90, anchor = 'w')
self.table_view.column('4', width = 80, anchor = 'center')
self.table_view.column('5', width = 80, anchor = 'center')
self.table_view.heading('1', text = 'Job')
self.table_view.heading('2', text = 'Date')
self.table_view.heading('3', text = 'Created')
self.table_view.heading('4', text = 'Shifts')
self.table_view.heading('5', text = 'Hours')
def build_date_select(self):
format = '%Y/%m/%d'
# TODO: Replace 17 hour offset with time zone setting from config
start = int(self.shifts[0]['start'] / 86400) * 86400 - (17 * 3600)
end = int(time.time() / 86400 + 1) * 86400 - (17 * 3600) + (24 * 3600) + 1
self.seconds_range = [i for i in range(start, end, 86400)][::-1]
self.date_range = [time.strftime(format, time.localtime(i)) for i in self.seconds_range]
self.per_start_menu = ttk.OptionMenu(self.frame, self.start_selection, self.date_range[0], *self.date_range)
self.per_end_menu = ttk.OptionMenu(self.frame, self.end_selection, self.date_range[0], *self.date_range)
self.start_selection.set(self.date_range[-1])
self.end_selection.set(self.date_range[0])
self.per_start_menu.grid(column = 3, row = 2, sticky = (W, E))
self.per_end_menu.grid(column = 4, row = 2, sticky = (W, E))
def filter_data(self, event = None):
job_name = self.job_selection.get()
if job_name == self.job_choices[0]:
job_name = None
period_start = self.start_selection.get()
period_start = self.seconds_range[self.date_range.index(period_start)]
period_end = self.end_selection.get()
period_end = self.seconds_range[self.date_range.index(period_end)]
search_term = self.search_input.get()
if search_term.strip() == '': search_term = None
self.clear_table()
self.get_rows(job_name = job_name, period_start = period_start, period_end = period_end, search_term = search_term)
self.populate_table()
def get_rows(self, **kwargs):
view_selection = self.view_selection.get()
if view_selection == self.view_choices[0]:
self.get_shifts(**kwargs)
elif view_selection == self.view_choices[1]:
self.get_tasks(**kwargs)
elif view_selection == self.view_choices[2]:
self.get_jobs()
if not "job_name" in kwargs or not kwargs["job_name"]:
job_name = 'All Jobs'
else:
job_name = kwargs["job_name"]
self.job_selection.set(job_name)
self.total_shifts = len(self.shifts)
self.total_hours = sum([i['hours'] for i in self.shifts])
self.total_hours = round(self.total_hours, 2)
def get_shifts(self, **kwargs):
self.shifts = self.db.report_shifts(**kwargs)
self.table_items = []
for shift in self.shifts:
if len(shift['notes']) < 20:
notes = shift['notes']
else:
notes = shift['notes'][:20].split('\n')[0]
start = shift['str_start'].split()
self.table_items.append((shift["id"], (shift['job'], start[0], start[1], shift['hours'], notes)))
def get_tasks(self, **kwargs):
tasks = self.db.report_tasks(**kwargs)
self.table_items = [(i["id"], (i["job_name"], i["time_created"].split()[0], i["time_created"].split()[1], i["title"])) for i in tasks]
def get_jobs(self):
jobs = self.db.report_jobs(return_dict = True, return_details = True)
self.table_items = []
for job in jobs:
self.table_items.append((job["id"], (job["name"], job["created"].split()[0], job["created"].split()[1], job["total_shifts"], job["total_hours"])))
def populate_table(self):
self.totals_label['text'] = f'Hours: {self.total_hours}\tShifts: {self.total_shifts}'
for item in self.table_items[::-1]:
tid = self.table_view.insert('', 'end', values = item[1])
self.tid_lookup[tid] = item[0]
def clear_table(self):
for tid in self.tid_lookup:
self.table_view.delete(tid)
self.tid_lookup = {}
def export(self, event = None):
doc_type = self.export_selection.get()
self.export_selection.set('Export')
job_selection = self.job_selection.get()
period_start = self.start_selection.get()
period_end = self.end_selection.get()
search_term = self.search_input.get()
if search_term in ('', ' '): search_term = None
export_shifts = [i for i in self.shifts]
for shift in export_shifts:
shift["tasks"] = self.db.report_tasks(shift_id = shift["id"])
Export(doc_type, job_selection, period_start, period_end, search_term, self.total_hours, export_shifts)
def view_item(self, event = None):
try: tid = self.table_view.focus()
except: return
item_type = self.view_selection.get().lower().rstrip('s')
item_id = self.tid_lookup[tid]
if item_type == "shift":
item = self.db.get_shift(item_id)
elif item_type == "task":
item = self.db.get_task(item_id)
elif item_type == "job":
# return
item = self.db.get_job(item_id)
item_view = ViewEditPane(self, item_type, item)
item_view.root.mainloop()
class ViewEditPane():
def __init__(self, ReportEdit, item_type, item_dict):
self.report_edit = ReportEdit
self.item_type = item_type
self.item = item_dict
self.root = Tk()
title = f"{item_type.capitalize()} Record"
self.root.title(title)
self.root.resizable(False, False)
# self.root.overrideredirect(True)
self.edit_selection = StringVar(self.root)
self.container = ttk.Frame(self.root)
self.frame = ttk.Frame(self.container)
self.edit_frame = ttk.Frame(self.frame)
if self.item_type == "shift": self.load_shift()
elif self.item_type == "task": self.load_task()
elif self.item_type == "job": self.load_job()
self.notes = ScrolledText(self.frame, width = 60, height = 15, relief = 'sunken')
delete_button_text = f"Delete {item_type.capitalize()}"
self.delete_button = ttk.Button(self.edit_frame, text = delete_button_text, command = self.delete_prompt)
self.done_button = ttk.Button(self.frame, text = 'Done', command = self.root.destroy)
self.edit_menu = ttk.OptionMenu(self.edit_frame, self.edit_selection, 'Edit', *self.edit_options)
self.notes.grid(column = 1, columnspan = 2, row = 4, sticky = (N, S, E, W), pady = 5)
self.edit_menu.grid(column = 1, row = 1, sticky = W)
self.delete_button.grid(column = 2, row = 1, sticky = E)
self.done_button.grid(column = 2, row = 5, sticky = E)
self.edit_frame.grid(column = 1, row = 5, sticky = W)
self.frame.grid(column = 0, row = 0, padx = 5, pady = 5)
self.container.grid()
self.notes.config(state = NORMAL)
self.notes.delete('1.0', END)
self.notes.insert(END, self.item['notes'])
self.notes.config(state = DISABLED)
self.root.bind("<Command-c>", self.copy_notes)
self.edit_menu.bind("<ButtonRelease-1>", self.edit_item)
def load_shift(self): ## Load Shift view elements ##
self.job_label = ttk.Label(self.frame, text = f"Job:\t{self.item['job']}")
self.start_label = ttk.Label(self.frame, text = f"Start:\t{self.item['str_start']}")
self.end_label = ttk.Label(self.frame, text = f"End:\t{self.item['str_end']}")
self.break_label = ttk.Label(self.frame, text = f"Break:\t{self.item['break']} (minutes)")
self.hours_label = ttk.Label(self.frame, text = f"Hours:\t{self.item['hours']}")
self.job_label.grid(column = 1, row = 1, sticky = W, ipadx = 2)
self.start_label.grid(column = 1, row = 2, sticky = W, ipadx = 5, pady = 2)
self.end_label.grid(column = 1, row = 3, sticky = W, ipadx = 5, ipady = 0)
self.break_label.grid(column = 2, row = 2, sticky = W, ipadx = 5, ipady = 0)
self.hours_label.grid(column = 2, row = 3, sticky = W, ipadx = 5, ipady = 0)
self.edit_options = ('Job', 'Start', 'End', 'Break', 'Notes')
def load_task(self): ## Load Task view elements ##
completion_status = "Complete" if self.item["complete"] else "Incomplete"
self.job_label = ttk.Label(self.frame, text = f"Job:\t{self.item['job_name']}")
self.time_created_label = ttk.Label(self.frame, text = f"Created:\t{self.item['time_created']}")
self.completion_status_label = ttk.Label(self.frame, text = f"Status:\t{completion_status}")
self.job_label.grid(column = 1, row = 1, sticky = W, ipadx = 2)
self.time_created_label.grid(column = 1, row = 2, sticky = W, ipadx = 2)
self.completion_status_label.grid(column = 2, row = 2, sticky = W, ipadx = 2)
self.edit_options = ("Title", "Notes")
def load_job(self): ## Load Job view elements ##
self.job_label = ttk.Label(self.frame, text = f"Name:\t{self.item['name']}")
self.time_created_label = ttk.Label(self.frame, text = f"Created:\t{self.item['created']}")
self.total_shifts_label = ttk.Label(self.frame, text = f"Shifts:\t{self.item['total_shifts']}")
self.total_hours_label = ttk.Label(self.frame, text = f"Total Hours:\t{self.item['total_hours']}")
self.export_button = ttk.Button(self.frame, text = 'Export Records', command = self.load_job)
self.job_label.grid(column = 1, row = 1, sticky = W, ipadx = 2)
self.time_created_label.grid(column = 1, row = 2, sticky = W, ipadx = 2)
self.total_shifts_label.grid(column = 1, row = 3, sticky = W, ipadx = 2)
self.total_hours_label.grid(column = 2, row = 3, sticky = W, ipadx = 2)
self.export_button.grid(column = 2, row = 1, sticky = E, ipadx = 2)
self.edit_options = ("Name", "Notes")
def edit_item(self, event):
key = self.edit_selection.get().lower()
if key == 'edit': return
self.edit_selection.set('Edit')
if self.item_type == "shift":
if key in ('start', 'end'):
key = 'str_' + key
elif self.item_type == "task": pass
elif self.item_type == "job": pass
entry = EntryBox(self.save_property, key, self.item[key])
entry.root.mainloop()
def save_property(self, key, value):
if self.item_type == "shift":
self.item = self.report_edit.db.update_shift_field(self.item['id'], key, value)
self.load_shift()
elif self.item_type == "task":
self.item = self.report_edit.db.update_task_field(self.item['id'], key, value)
self.load_task()
elif self.item_type == "job":
self.item = self.report_edit.db.update_job_field(self.item['id'], key, value)
self.load_job()
self.report_edit.filter_data()
def copy_notes(self, event = None):
clipboard.copy(self.notes.get(1.0, END))
print('Notes Copied')
def delete_prompt(self):
popup = PopConfirm("Permanently delete shift?", self.delete_shift)
popup.root.mainloop()
def delete_shift(self):
self.report_edit.db.remove_shift(self.item['id'])
self.root.destroy()
self.report_edit.filter_data()
``` |
{
"source": "jmsimons/VScope",
"score": 3
} |
#### File: VScope/main/load_project.py
```python
def dict(project_path): ### Loads dictionary data from project/assets directory ###
project_dict = {}
with open('{}/assets/project_dict.txt'.format(project_path)) as f:
for line in f:
key, value = line.rstrip().split('::')
if value == "False": value = False
elif value == "True": value = True
elif value == "None": value = None
else:
try: value = int(value)
except: pass
project_dict[key] = value
return project_dict
```
#### File: VScope/main/pickle_utils.py
```python
import pickle
def dump(object, filepath):
with open(filepath, 'wb') as wb:
pickle.dump(object, wb)
def load(filepath):
with open(filepath, 'rb') as rb:
return pickle.load(rb)
project_dict = {'name':None, # Project Name
'orgn':None, # Project Organism
'path':None, # Path to main project directory (default: .../vscope/projects)
'temp':None, # (Optional) Path to temporary project dir, points to a faster storage medium
'repo':None, # Reads data repository (Local, fastq-dump, wget, upload)
'acc':None, # Accession/Samples list contains sample ID's for each sample in project
'ref':None, # Reference genome file (.fa, .fasta, .fas)
'ver':None, # Reference genome version # (ie 7.0.1)
'gff':None, # Reference genome exons file (.gff, .gff3)
'gtf':None, # Reference genome exons file (.gtf)
'grp':None, # Group size for final analysis
'sub':True, # Subtraction boolean (Perform Subtraction?)
'anno':True, # Post-subtraction SnpEff annotation
'proc':1, # Number of concurrent samples
'dlay':0, # Number of minutes to delay between sample launches
'thds':1, # Number of additional threads to use for assembly and other tasks
}
# dump(project_dict, '../project_dict.pkl')
```
#### File: VScope/main/sample.py
```python
import os, time, subprocess
from sys import exit
class sample: ### Data and methods for managing workflow on a single sample ###
def __init__(self, sample, project_dict, config):
# Define Class Attributes #
self.config = config
self.proj_dict = project_dict
self.name = sample
self.threads = self.proj_dict['thds']
if self.threads == 'Auto':
self.threads = self.config['CPU_CORES']
proj_path = project_dict['path'].rstrip('/')
if self.proj_dict['temp']:
self.aln_path = '{}/{}/aln'.format(self.proj_dict['temp'].rstrip('/'), self.name) # Static aln directory path #
else:
self.aln_path = '{}/{}/aln'.format(proj_path, self.name) # Static aln directory path #
self.out_path = '{}/{}/out'.format(proj_path, self.name) # Static output directory path #
self.reference = '{}/reference/{}'.format(proj_path, self.proj_dict['ref'])
self.files = {'fastq':[], 'gz':[], 'sam':[], 'bam':[], 'cram':[], 'txt':[], 'bai':[], 'vcf':[]}
# Set-up/Check Sample Environment #
if not os.path.exists(self.aln_path): # Sets up sample aln directory #
os.makedirs(self.aln_path)
if not os.path.exists(self.out_path): # Sets up sample out directory #
os.makedirs(self.out_path)
os.chdir('{}/{}'.format(proj_path, self.name))
print('Working in...', os.getcwd())
self.refresh_complete() # Update internal completed files list #
for key in self.files: # Scans for existing files by extension (ignores log files) #
self.add_file(key)
self.write('Sample initialized!')
def write(self, string, output_file = '', nl = 1): ### Writes to log files, log file name referenced from self.stages ###
while nl:
string += '\n'
nl -= 1
if output_file == 'complete':
output = string
else:
format = '%Y/%m/%d %H:%M:%S'
stamp = time.strftime(format, time.localtime(time.time()))
output = '>{} {}'.format(stamp, string)
if not output_file: output_file = '{}.log'.format(self.name)
else: output_file = '{}.{}.log'.format(self.name, output_file)
with open(output_file, 'a+') as f:
f.write(output)
def add_file(self, ext): ### Adds files of a given extension to their respective lists and the master complete.log list ###
print('Scanning for {} files'.format(ext))
dirs = (self.out_path, self.aln_path)
for dir in dirs:
for f in sorted(os.listdir(dir)):
if f[0] != '.':
f = f.split('.')
n, e = f[0], f[-1]
if len(f) == 3:
n = '{}.{}'.format(n, f[1])
if e in ext:
if n not in self.files[e]:
if 'ign' not in n:
print('Adding {}.{}'.format(n, e))
self.write('Adding {}.{}'.format(n, e))
self.files[e].append(n)
if '{}.{}'.format(n, e) not in self.complete: # adds new files to complete.log #
self.write('{}.{}'.format(n, e), output_file = 'complete')
self.files[ext].sort()
self.refresh_complete()
def refresh_complete(self): ### Creates/updates complete.log file and internal master complete list ###
self.complete = []
if not os.path.exists('{}.complete.log'.format(self.name)):
self.write('', nl = 0, output_file = 'complete')
with open('{}.complete.log'.format(self.name)) as f:
for line in f:
self.complete.append(line.strip())
def process(self, stage_desc, command, output_file): ### Executes a shell command and waits for completion, writes stdout and stderr to specified log file, exits program on non-0 return code ###
self.write(stage_desc)
print('Executing:', command)
pipe = subprocess.PIPE
try: proc = subprocess.run(command, shell = True, stdout = pipe, stderr = pipe) # starts shell process, returns 0 if successful #
except:
self.exit('Halt: Sample Interupted!')
print('[{!r} exited with {}]'.format(command, proc.returncode))
self.write('[{!r} exited with {}]'.format(command, proc.returncode))
if proc.stdout: self.write('[stdout]\n{}'.format(proc.stdout.decode()), output_file = output_file)
if proc.stderr: self.write('[stderr]\n{}'.format(proc.stderr.decode()), output_file = output_file)
if proc.returncode: # proc.returncode evaluates to True if non-zero #
self.exit('Halt: Process exited with error!') # exit called with error #
def exit(self, code = 0):
if code: ### Process returncode is non-0 or KeyboardInterupt, called from process() upon error, deletes unfinished files ###
dirs = (self.out_path, self.aln_path)
for dir in dirs:
for f in sorted(os.listdir(dir)):
if f[0] != '.': # Ignores filenames that start with '.' #
f = f.split('.')
n, e = f[0], f[-1]
if len(f) == 3:
n = '{}.{}'.format(n, f[1])
if e != 'log': # Ignores log files #
if n+'.'+e not in self.complete: # Removes from dir if not listed in self.files (assumed to be incomplete) #
f = '{}.{}'.format(n, e)
print('Removing {}'.format(f))
self.write('Removing {}'.format(f))
os.remove('{}/{}'.format(dir, f))
print(code)
self.write(code)
exit() # sys.exit() when called with error #
else: ### Successful completion of workflow ###
print('{} workflow complete.'.format(self.name))
self.write('{} workflow complete.'.format(self.name))
file_list = [] # Retained files list returned to the calling script #
for key in self.files:
for file in self.files[key]:
file_list.append('{}.{}'.format(file, key))
return file_list
```
#### File: VScope/webapp/routes.py
```python
import time
from flask import render_template, flash, redirect, url_for, request
from flask_login import login_user, current_user, logout_user, login_required
from werkzeug.utils import secure_filename
from webapp import app, config, db, bcrypt, send_reset_email
from webapp.models import User, Project
from webapp.forms import RegisterUser, Login, RequestResetForm, ResetPasswordForm, ProjectSetupA, ProjectSetupB, ProjectSetupC, ConfirmSetup, StartStopProject, ArchiveRemove, AnnotateReport
from webapp.collect_data import list_file, list_ext, get_projects, get_archive, project_dict, sample_status, status_list
from webapp.project_actions import setup_project, run_project, stop_project, run_subtraction, run_annotation, run_analysis, archive_project, remove_project
### User Authentication Web Routes ###
@app.route('/register_user', methods = ['GET', 'POST'])
def register_user():
form = RegisterUser()
if form.validate_on_submit():
hashed_password = <PASSWORD>.generate_password_hash(form.password.data).decode('utf-8')
user = User(username = form.username.data, email = form.email.data, password = <PASSWORD>)
db.session.add(user)
db.session.commit()
return render_template('register_user.html', title = 'Register User', form = form)
@app.route('/login', methods = ['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = Login()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
return redirect(url_for('home'))
flash('Login Unsuccessful. Please check email and password.')
return render_template('login.html', title = 'Login', form = form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
# @app.route('/manage_users')
# @login_required
# def manage_users():
# users = User.query.all()
# return render_template('users.html', users = users)
@app.route('/search_user')
@login_required
def search_user():
form = SearchUser()
return render_template('search_user.html', form = form)
@app.route('/user_details/<user>')
@login_required
def manage_users(user):
users = User.query.all()
return render_template('users.html', users = users)
@app.route('/reset_password', methods = ['GET', 'POST'])
def reset_request():
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
send_reset_email(user)
flash('An email has been sent with instructions to reset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title = 'Reset Password', form = form)
@app.route('/reset_password/reset_password/<token>', methods = ['GET', 'POST'])
def reset_token(token):
user = User.verify_reset_token(token)
if not user:
flash('That is an invalid or expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = <PASSWORD>
db.session.commit()
flash('Your password has been updated! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('reset_token.html', title = 'Reset Password', form = form)
### Main App Web Routes ###
@app.route('/')
@login_required
def home():
projects = get_projects()
return render_template('home.html', projects = projects)
@app.route('/<project_id>', methods = ['GET', 'POST'])
@login_required
def project(project_id):
form = StartStopProject()
project = get_projects()[project_id]
if request.method == 'POST':
if form.start.data:
if run_project(project['path']):
flash('Project Running', 'info')
time.sleep(2)
elif form.stop.data:
if stop_project(project):
flash('Project Stopped', 'info')
time.sleep(2)
return redirect(url_for('project', project_id = project_id))
processing = status_list(project)
return render_template('project.html', title = 'Project Dashboard', project = project, processing = processing, form = form)
@app.route('/<project_id>/<sample>')
@login_required
def sample(project_id, sample):
print('Sample:', sample)
projects = get_projects()
project = projects[project_id]
status = sample_status(project['path'], sample)
logs = list_ext(project['path'], sample, 'log')
return render_template('sample.html', title = 'Sample Dashboard', project = project, sample = status, logs = logs)
@app.route('/view/<project_id>/<document>')
@app.route('/view/<project_id>/<sample>/<document>')
@login_required
def file_view(project_id, document, sample = None):
projects = get_projects()
project = projects[project_id]
title = document
if sample: document = list_file('{}/{}/{}'.format(project['path'], sample, document))
else: document = list_file('{}/{}'.format(project['path'], document))
return render_template('file_view.html', title = title, document = document, sample = sample)
@app.route('/<project_id>/accession')
@login_required
def accession(project_id):
projects = get_projects()
project = projects[project_id]
title = '{} Samples'.format(project_id)
accession = list_file('{}/assets/{}'.format(project['path'], 'accession.txt'))
return render_template('accession.html', title = title, project_id = project_id, accession = accession)
@app.route('/project_setup', methods = ['GET', 'POST'])
@login_required
def project_setup_A():
form = ProjectSetupA()
if form.validate_on_submit():
project_id = form.project_name.data
project = Project(project_id)
project.organism = form.organism.data
db.session.add(project)
db.session.commit()
return redirect(url_for('project_setup_B', project_id = project_id))
return render_template('proj_setup_A.html', title = 'New Project', form = form)
@app.route('/project_setup-<project_id>', methods = ['GET', 'POST'])
@login_required
def project_setup_B(project_id):
global config
form = ProjectSetupB()
if form.validate_on_submit():
project = Project.query.filter_by(name = project_id).first()
project.reads_path = form.sample_path.data
project.exons_ver = form.ex_version.data
acc_filename = form.accession.data.filename
project.accession = acc_filename
form.accession.data.save('{}/Stage/{}'.format(config['BASE_PATH'], acc_filename))
ref_filename = form.reference.data.filename
project.reference = ref_filename
form.reference.data.save('{}/Stage/{}'.format(config['BASE_PATH'], ref_filename))
exon_filename = form.exons.data.filename
project.exons = exon_filename
form.exons.data.save('{}/Stage/{}'.format(config['BASE_PATH'], exon_filename))
db.session.commit()
if form.submit1.data:
setup_project(project)
time.sleep(1)
return redirect(url_for('project', project_id = project_id))
elif form.submit2.data:
return redirect(url_for('project_setup_C'))
return render_template('proj_setup_B.html', title = 'New Project', form = form)
@app.route('/project_setup--<project_id>', methods = ['GET', 'POST'])
@login_required
def project_setup_C(project_id):
form = ProjectSetupC()
if request.method == 'POST':
project = Project.query.filter_by(name = project_id).first()
project.concurrent = form.processes.data
project.threads = form.threads.data
project.delay = form.time_delay.data
project.active = True
db.session.add(project)
db.session.commit()
setup_project(project)
time.sleep(2)
return redirect(url_for('project', project_id = project_id))
return render_template('proj_setup_C.html', title = 'New Project', form = form)
# @app.route('/confirm_setup-<project_id>', methods = ['GET', 'POST'])
# @login_required
# def confirm_setup(project_id):
# global config
# form = ConfirmSetup()
# form_data = new.get_dict()
# if request.method == 'POST':
# pass
# return render_template('confirm_setup.html', form = form, form_data = form_data)
@app.route('/<project_id>/archive_remove', methods = ['GET', 'POST'])
@login_required
def archive_remove(project_id):
form = ArchiveRemove()
if form.validate_on_submit():
project = get_projects()[project_id]
if form.archive.data:
archive_project(project['path'])
flash('{} has been added to archived projects.'.format(project_id), 'info')
if form.remove.data:
remove_project(project_id)
flash('{} has been removed from active projects.'.format(project_id), 'info')
return redirect(url_for('home'))
return redirect(url_for('project', project_id = project_id))
return render_template('archive_remove.html', project_id = project_id, form = form)
@app.route('/archived_projects')
@login_required
def archive():
projects = get_archive()
return render_template('archive.html', projects = projects)
@app.route('/<project_id>/annotate_report/', methods = ['GET', 'POST'])
@login_required
def annotate_report(project_id):
form = AnnotateReport()
if request.method == 'POST':
project = get_projects()[project_id]
# print(request.form)
if 'annotate' in request.form:
# print('Annotating Project VCFs')
run_annotation(project['path'])
elif 'analyse' in request.form:
print('Analysing Quality Threshold Impact to Variant Spectrum')
run_analysis(project['path'])
elif 'filter' in request.form:
print('Filtering Variants at a Threshold of:', request.form['quality'])
print('Removing Common Variants')
run_subtraction(project['path'], request.form['quality'])
return render_template('annotate_report.html', project_id = project_id, form = form)
``` |
{
"source": "jmsking/pykube",
"score": 2
} |
#### File: pykube/tests/test_config.py
```python
import os
from pathlib import Path
from unittest.mock import MagicMock
import pytest
from . import TestCase
from pykube import config
from pykube import exceptions
BASEDIR = Path("tests")
GOOD_CONFIG_FILE_PATH = BASEDIR / "test_config.yaml"
DEFAULTUSER_CONFIG_FILE_PATH = BASEDIR / "test_config_default_user.yaml"
def test_from_service_account_no_file(tmpdir):
with pytest.raises(FileNotFoundError):
config.KubeConfig.from_service_account(path=str(tmpdir))
def test_from_service_account(tmpdir):
namespace_file = Path(tmpdir) / "namespace"
token_file = Path(tmpdir) / "token"
ca_file = Path(tmpdir) / "ca.crt"
with namespace_file.open("w") as fd:
fd.write("mynamespace")
with token_file.open("w") as fd:
fd.write("mytok")
with ca_file.open("w") as fd:
fd.write("myca")
os.environ["KUBERNETES_SERVICE_HOST"] = "127.0.0.1"
os.environ["KUBERNETES_SERVICE_PORT"] = "9443"
cfg = config.KubeConfig.from_service_account(path=str(tmpdir))
assert cfg.doc["clusters"][0]["cluster"] == {
"server": "https://127.0.0.1:9443",
"certificate-authority": str(ca_file),
}
assert cfg.doc["users"][0]["user"]["token"] == "<PASSWORD>"
assert cfg.namespace == "mynamespace"
def test_from_url():
cfg = config.KubeConfig.from_url("http://localhost:8080")
assert cfg.doc["clusters"][0]["cluster"] == {"server": "http://localhost:8080"}
assert "users" not in cfg.doc
@pytest.fixture
def kubeconfig(tmpdir):
kubeconfig = tmpdir.join("kubeconfig")
kubeconfig.write(
"""
apiVersion: v1
clusters:
- cluster: {server: 'https://localhost:9443'}
name: test
contexts:
- context: {cluster: test, user: test}
name: test
current-context: test
kind: Config
preferences: {}
users:
- name: test
user: {token: <PASSWORD>}
"""
)
return kubeconfig
@pytest.mark.parametrize(
"kubeconfig_env,expected_path",
[(None, "~/.kube/config"), ("/some/path", "/some/path")],
)
def test_from_default_kubeconfig(
kubeconfig_env, expected_path, monkeypatch, kubeconfig
):
mock = MagicMock()
mock.return_value.expanduser.return_value = Path(kubeconfig)
monkeypatch.setattr(config, "Path", mock)
if kubeconfig_env is None:
monkeypatch.delenv("KUBECONFIG", raising=False)
else:
monkeypatch.setenv("KUBECONFIG", kubeconfig_env)
cfg = config.KubeConfig.from_file()
mock.assert_called_with(expected_path)
assert cfg.doc["clusters"][0]["cluster"] == {"server": "https://localhost:9443"}
class TestConfig(TestCase):
def setUp(self):
self.cfg = config.KubeConfig.from_file(GOOD_CONFIG_FILE_PATH)
def tearDown(self):
self.cfg = None
def test_init(self):
"""
Test Config instance creation.
"""
# Ensure that a valid creation works
self.assertEqual(GOOD_CONFIG_FILE_PATH, self.cfg.filepath)
# Ensure that if a file does not exist the creation fails
self.assertRaises(
exceptions.PyKubeError, config.KubeConfig.from_file, "doesnotexist"
)
def test_set_current_context(self):
"""
Verify set_current_context works as expected.
"""
self.cfg.set_current_context("new_context")
self.assertEqual("new_context", self.cfg.current_context)
def test_clusters(self):
"""
Verify clusters works as expected.
"""
self.assertEqual(
{"server": "http://localhost"}, self.cfg.clusters.get("thecluster", None)
)
def test_users(self):
"""
Verify users works as expected.
"""
self.assertEqual("data", self.cfg.users.get("admin", None))
def test_contexts(self):
"""
Verify contexts works as expected.
"""
self.assertEqual(
{"cluster": "thecluster", "user": "admin"},
self.cfg.contexts.get("thename", None),
)
def test_cluster(self):
"""
Verify cluster works as expected.
"""
# Without a current_context this should fail
try:
cluster = self.cfg.cluster
self.fail(
"cluster was found without a current context set: {}".format(cluster)
)
except exceptions.PyKubeError:
# We should get an error
pass
self.cfg.set_current_context("thename")
self.assertEqual({"server": "http://localhost"}, self.cfg.cluster)
def test_user(self):
"""
Verify user works as expected.
"""
# Without a current_context this should fail
try:
user = self.cfg.user
self.fail("user was found without a current context set: {}".format(user))
except exceptions.PyKubeError:
# We should get an error
pass
self.cfg.set_current_context("thename")
self.assertEqual("data", self.cfg.user)
def test_default_user(self):
"""
User can sometimes be specified as 'default' with no corresponding definition
"""
test_config = config.KubeConfig.from_file(DEFAULTUSER_CONFIG_FILE_PATH)
test_config.set_current_context("a_context")
self.assertIsNotNone(test_config.user)
def test_namespace(self):
self.cfg.set_current_context("thename")
self.assertEqual("default", self.cfg.namespace)
self.cfg.set_current_context("context_with_namespace")
self.assertEqual("foospace", self.cfg.namespace)
```
#### File: pykube/tests/test_session.py
```python
import copy
import logging
import tempfile
from pathlib import Path
from . import TestCase
BASE_CONFIG = {
"clusters": [
{"name": "test-cluster", "cluster": {"server": "http://localhost:8080"}}
],
"contexts": [
{
"name": "test-cluster",
"context": {"cluster": "test-cluster", "user": "test-user"},
}
],
"users": [{"name": "test-user", "user": {}}],
"current-context": "test-cluster",
}
_log = logging.getLogger(__name__)
class TestSession(TestCase):
def setUp(self):
self.config = copy.deepcopy(BASE_CONFIG)
def test_build_session_auth_provider(self):
"""Test that HTTPClient correctly parses the auth-provider config.
Observed in GKE with kubelet v1.3.
"""
self.config.update(
{
"users": [
{
"name": "test-user",
"user": {
"auth-provider": {
"config": {
"access-token": "<PASSWORD>",
"expiry": "2016-08-24T16:19:17.19878675-07:00",
},
},
},
},
]
}
)
gcloud_content = """
{
"client_id": "myclientid",
"client_secret": "myclientsecret",
"refresh_token": "<PASSWORD>",
"type": "authorized_user"
}
"""
_log.info("Built config: %s", self.config)
try:
tmp = Path(tempfile.mktemp())
with tmp.open("w") as f:
f.write(gcloud_content)
# TODO: this no longer works due to refactoring, GCP session handling is now done in KubernetesHTTPAdapter
# session = pykube.session.GCPSession(pykube.KubeConfig(doc=self.config), tmp)
# self.assertEquals(session.oauth.token['access_token'], 'abc')
# self.assertEquals(session.oauth.token['refresh_token'], 'myrefreshtoken')
# self.assertEquals(session.credentials.get('client_id'), 'myclientid')
# self.assertEquals(session.credentials.get('client_secret'), 'myclientsecret')
finally:
if tmp.exists():
tmp.unlink()
``` |
{
"source": "jmskinner/pii_firewall",
"score": 2
} |
#### File: pii_firewall/local_threading/worker.py
```python
from multiprocessing import Process
from work_strategies.work_strategy_factory import WorkerStrategyFactory
from work_strategies.work_base_strategy import WorkerNullStrategy
class Worker(Process):
def __init__(self, task_queue,write_queue,signal_queue,w_id):
super(Worker, self).__init__()
self.task_queue = task_queue
self.write_queue = write_queue
self.signal_queue = signal_queue
self.exec_strategy = WorkerNullStrategy()
self.current_task = None
self.id = w_id
def __reassign_strategy(self, task):
self.exec_strategy = WorkerStrategyFactory.make_strategy(task)
def run(self):
for task in iter(self.task_queue.get, None):
print(f"Worker {self.id} is working on a {task.domain} task at {task.in_endpoint}..")
if not WorkerStrategyFactory.check_strategy(self.exec_strategy, task):
self.__reassign_strategy(task)
try:
self.exec_strategy.execute(self, task)
except Exception:
pass
print(f"Worker {self.id} is shutting down now")
self.signal_queue.put(1)
```
#### File: jmskinner/pii_firewall/pii_firewall.py
```python
import mimetypes
import os
import threading
from local_threading.worker import Worker
from local_threading.writer import Writer
from multiprocessing import Queue
from tasking.task import Task
from pathlib import Path
class PIIFirewall:
def __init__(self, config):
self.runtime_config = config['runtime_config']
self.task_config = config['task_config']
self.task_queue = Queue()
self.write_queue = Queue()
self.worker_signals = Queue()
self.writer_signals = Queue()
self.max_num_workers = self.runtime_config['max_cpus']
self.max_num_writers = self.runtime_config['max_threads']
def run(self):
reader_threads = []
for endpoint in self.runtime_config['endpoints']:
input_src, output_dest = endpoint.split("-->")
input_src = input_src.strip()
output_dest = output_dest.strip()
if os.path.isdir(input_src):
all_files = self.__get_all_files_endpoint(input_src)
reader_thread = threading.Thread(target=self.__ingest_files, args=(all_files, output_dest))
reader_threads.append(reader_thread)
else:
print("Not a directory, skipping for now")
# spin up all the readers
for reader_thread in reader_threads:
reader_thread.start()
# spin up all the workers
worker_processes = []
for worker_id in range(self.max_num_workers):
# note that this is process not a queque
worker = Worker(self.task_queue, self.write_queue, self.worker_signals, worker_id)
worker_processes.append(worker)
worker.start()
# spin up writers
writer_threads = []
for writer_id in range(self.max_num_writers):
# note that this is thread not a process
writer = Writer(self.write_queue,self.writer_signals,writer_id)
writer_threads.append(writer)
writer.start()
# wait for readers to be done
for reader_thread in reader_threads:
reader_thread.join()
# let the workers know they can shutdown
for worker in range(self.max_num_workers):
self.task_queue.put(None)
# wait while the workers finish
for worker in worker_processes:
worker.join()
# once all the workers are done we let the writers know
for writer in range(self.max_num_writers):
self.write_queue.put(None)
# we make main thread wait while the writers finish
for writer in writer_threads:
writer.join()
def __get_mime_type(self, filename):
return mimetypes.guess_type(filename)[0].split("/")
def __get_all_files_endpoint(self, input_src):
all_files = []
for path, subdirs, files in os.walk(input_src):
for name in files:
if name[0] != ".":
all_files.append(os.path.join(path, name))
return all_files
def __ingest_files(self, all_files, dest):
for in_endpoint in all_files:
domain, file_type = self.__get_mime_type(in_endpoint)
out_endpoint, profile_endpoint = self.__make_output_dest(domain, file_type, in_endpoint, dest)
task = Task(domain, file_type, in_endpoint, out_endpoint,profile_endpoint,self.task_config)
self.task_queue.put(task)
print(f"Task at {task.in_endpoint} was placed on queue")
def __make_output_dest(self, domain, file_type, file_name, dest):
left_index = file_name.rfind("/")
right_index = file_name.rfind(".")
ext = file_name[right_index:]
title = file_name[left_index+1:right_index]
dir_path = os.path.join(dest,"data",domain,file_type)
pro_path = os.path.join(dest,"profiles",domain,file_type)
Path(dir_path).mkdir(parents=True, exist_ok=True)
Path(pro_path).mkdir(parents=True, exist_ok=True)
out_path = os.path.join(dir_path,title+"_post_pii_" + ext)
profile_path = os.path.join(pro_path,title+".json")
return out_path,profile_path
```
#### File: pii_firewall/work_strategies/work_pdf_strategy.py
```python
from urllib.request import urlopen
from work_strategies.work_base_strategy import WorkerBaseStrategy
from pdf2image import convert_from_path, convert_from_bytes
from presidio_image_redactor import ImageRedactorEngine
from threading import Lock,Thread, Semaphore
from PIL import Image, ImageChops
from presidio_image_redactor.image_analyzer_engine import ImageAnalyzerEngine
class WorkerPDFStrategy(WorkerBaseStrategy):
hyperthread_image_processing = True
## if we wanted to limit this across classes we could change this to active and remove it from the constructor
# thread_semaphore = Semaphore(5)
def __init__(self,domain, task_type):
super().__init__(domain, task_type)
self.my_lock = Lock()
self.image_analyzer = ImageAnalyzerEngine()
self.text_redactor = ImageRedactorEngine()
self.thread_semaphore = None
def _fetch(self, task):
try:
if task.in_is_local:
task.data = convert_from_path(task.in_endpoint)
else:
data = urlopen(task.in_endpoint).read()
task.data = convert_from_bytes(data, 'rb')
except Exception:
print(f'Error reading pdf from source: {task.in_endpoint}')
return task
def _process(self, task):
redacted_images = {}
local_threads = []
pdf_img_list = []
# self.thread_semaphore = Semaphore(task.config['max_thread_per_task']+1)
# print(task.config['max_thread_per_task'])
# In case we have a large doc, don't spin up too many threads
for pos,image in enumerate(task.data):
# self.thread_semaphore.acquire()
thread = Thread(target=self._redact_an_image, args=(image,pos,redacted_images,task))
local_threads.append(thread)
for thread in local_threads:
thread.start()
# wait for the threads to finish
for thread in local_threads:
thread.join()
# reassemble the doc in proper order
for num, page in sorted(redacted_images.items()):
pdf_img_list.append(page)
task.data = pdf_img_list
return task
def _push(self, worker, task):
print(f"Worker {worker.id} pushed task at {task.in_endpoint}")
worker.write_queue.put(task)
def _redact_an_image(self,img,key,output,task):
self.my_lock.acquire()
try:
temp = ImageChops.duplicate(img)
image_result = self.image_analyzer.analyze(temp)
if len(image_result) > 0:
task.profile["page"+str(key)] = str(image_result)
output[key] = self.text_redactor.redact(img, self.REDACT_COLOR)
except Exception:
print(f"Incompatible PDF type occured on page {key+1} in the doc located at {task.in_endpoint}... ignoring this page")
self.my_lock.release()
# self.thread_semaphore.release()
finally:
self.my_lock.release()
# self.thread_semaphore.release()
```
#### File: pii_firewall/write_strategies/write_image_strategy.py
```python
from write_strategies.write_base_strategy import WriterBaseStrategy
import json
class WriterImageStrategy(WriterBaseStrategy):
def _write_data(self, task):
task.data.save(task.out_endpoint)
def _write_profile(self, task):
with open(task.profile_endpoint, "w") as outfile:
json.dump(task.profile, outfile)
``` |
{
"source": "jms/klickbrick",
"score": 2
} |
#### File: features/steps/hello.py
```python
import subprocess
from behave import *
use_step_matcher("re")
@when("the user run klickbrick 'hello'")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
args = "poetry run klickbrick hello".split()
context.response = subprocess.run(args, capture_output=True, text=True).stdout
@then("the CLI prints 'Hello World'")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
assert "Hello World\n" == context.response
```
#### File: klickbrick/klickbrick/klickbrick.py
```python
import argparse
import sys
from typing import List, Any
from klickbrick.script import greeting
class Klickbrick:
def __init__(self, arguments: List[Any]):
self.parser = argparse.ArgumentParser(prog="klickbrick", usage="%(prog)s [options]")
self.subparsers = self.parser.add_subparsers(help="sub-command help")
self.subparsers.dest = "hello"
self.subparsers.required = True
hello_parser = self.subparsers.add_parser(name="hello", description="A friendly Hello World")
hello_parser.add_argument("-n", "--name", type=str, default="World")
toolchain_parser = self.subparsers.add_parser(name="onboard", description="Toolchain commands")
toolchain_parser.add_argument("--dry-run", type=str)
toolchain_parser.add_argument("--name", type=str, required=True)
toolchain_parser.add_argument("--email", type=str, required=True)
toolchain_parser.add_argument("--commit-template", type=str)
options = self.parser.parse_args(arguments) # type: argparse.Namespace
getattr(self, arguments[0])(options.name)
@staticmethod
def hello(name: str):
print(greeting(name))
@staticmethod
def onboard(name: str, email: str):
"""
git tasks:
git config --global user.name "<NAME>"
git config --global user.email <EMAIL>
git config --global init.defaultBranch main
git config --global commit.template ~/.gitmessage.txt
:param name:
:param email:
:return:
"""
commit_template: str = "~/.gitmessage.txt"
pass
def main():
Klickbrick(sys.argv[1:])
if __name__ == '__main__':
main()
``` |
{
"source": "jmsl31/coverage.py",
"score": 3
} |
#### File: coverage.py/lab/genpy.py
```python
import collections
from itertools import cycle, product
import random
import re
import sys
import coverage
from coverage.parser import PythonParser
class PythonSpinner(object):
"""Spin Python source from a simple AST."""
def __init__(self):
self.lines = []
self.lines.append("async def func():")
self.indent = 4
@property
def lineno(self):
return len(self.lines) + 1
@classmethod
def generate_python(cls, ast):
spinner = cls()
spinner.gen_python_internal(ast)
return "\n".join(spinner.lines)
def add_line(self, line):
g = "g{}".format(self.lineno)
self.lines.append(' ' * self.indent + line.format(g=g, lineno=self.lineno))
def add_block(self, node):
self.indent += 4
self.gen_python_internal(node)
self.indent -= 4
def maybe_block(self, node, nodei, keyword):
if len(node) > nodei and node[nodei] is not None:
self.add_line(keyword + ":")
self.add_block(node[nodei])
def gen_python_internal(self, ast):
for node in ast:
if isinstance(node, list):
op = node[0]
if op == "if":
self.add_line("if {g}:")
self.add_block(node[1])
self.maybe_block(node, 2, "else")
elif op == "for":
self.add_line("for x in {g}:")
self.add_block(node[1])
self.maybe_block(node, 2, "else")
elif op == "while":
self.add_line("while {g}:")
self.add_block(node[1])
self.maybe_block(node, 2, "else")
elif op == "try":
self.add_line("try:")
self.add_block(node[1])
# 'except' clauses are different, because there can be any
# number.
if len(node) > 2 and node[2] is not None:
for except_node in node[2]:
self.add_line("except Exception{}:".format(self.lineno))
self.add_block(except_node)
self.maybe_block(node, 3, "else")
self.maybe_block(node, 4, "finally")
elif op == "with":
self.add_line("with {g} as x:")
self.add_block(node[1])
else:
raise Exception("Bad list node: {!r}".format(node))
else:
op = node
if op == "assign":
self.add_line("x = {lineno}")
elif op in ["break", "continue"]:
self.add_line(op)
elif op == "return":
self.add_line("return")
elif op == "yield":
self.add_line("yield {lineno}")
else:
raise Exception("Bad atom node: {!r}".format(node))
def weighted_choice(rand, choices):
"""Choose from a list of [(choice, weight), ...] options, randomly."""
total = sum(w for c, w in choices)
r = rand.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"
class RandomAstMaker(object):
def __init__(self, seed):
self.r = random.Random()
self.r.seed(seed)
self.depth = 0
self.bc_allowed = set()
def roll(self, prob=0.5):
return self.r.random() <= prob
def choose(self, choices):
"""Roll the dice to choose an option."""
return weighted_choice(self.r, choices)
STMT_CHOICES = [
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 20), ("return", 1), ("yield", 0)],
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
# Last element has to have no compound statements, to limit depth.
[("assign", 10), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
]
def make_body(self, parent):
body = []
choices = self.STMT_CHOICES[self.depth]
self.depth += 1
nstmts = self.choose([(1, 10), (2, 25), (3, 10), (4, 10), (5, 5)])
for _ in range(nstmts):
stmt = self.choose(choices)
if stmt == "if":
body.append(["if", self.make_body("if")])
if self.roll():
body[-1].append(self.make_body("ifelse"))
elif stmt == "for":
old_allowed = self.bc_allowed
self.bc_allowed = self.bc_allowed | set(["break", "continue"])
body.append(["for", self.make_body("for")])
self.bc_allowed = old_allowed
if self.roll():
body[-1].append(self.make_body("forelse"))
elif stmt == "while":
old_allowed = self.bc_allowed
self.bc_allowed = self.bc_allowed | set(["break", "continue"])
body.append(["while", self.make_body("while")])
self.bc_allowed = old_allowed
if self.roll():
body[-1].append(self.make_body("whileelse"))
elif stmt == "try":
else_clause = self.make_body("try") if self.roll() else None
old_allowed = self.bc_allowed
self.bc_allowed = self.bc_allowed - set(["continue"])
finally_clause = self.make_body("finally") if self.roll() else None
self.bc_allowed = old_allowed
if else_clause:
with_exceptions = True
elif not else_clause and not finally_clause:
with_exceptions = True
else:
with_exceptions = self.roll()
if with_exceptions:
num_exceptions = self.choose([(1, 50), (2, 50)])
exceptions = [self.make_body("except") for _ in range(num_exceptions)]
else:
exceptions = None
body.append(
["try", self.make_body("tryelse"), exceptions, else_clause, finally_clause]
)
elif stmt == "with":
body.append(["with", self.make_body("with")])
elif stmt == "return":
body.append(stmt)
break
elif stmt == "yield":
body.append("yield")
elif stmt in ["break", "continue"]:
if stmt in self.bc_allowed:
# A break or continue immediately after a loop is not
# interesting. So if we are immediately after a loop, then
# insert an assignment.
if not body and (parent in ["for", "while"]):
body.append("assign")
body.append(stmt)
break
else:
stmt = "assign"
if stmt == "assign":
# Don't put two assignments in a row, there's no point.
if not body or body[-1] != "assign":
body.append("assign")
self.depth -= 1
return body
def async_alternatives(source):
parts = re.split(r"(for |with )", source)
nchoices = len(parts) // 2
#print("{} choices".format(nchoices))
def constant(s):
return [s]
def maybe_async(s):
return [s, "async "+s]
choices = [f(x) for f, x in zip(cycle([constant, maybe_async]), parts)]
for result in product(*choices):
source = "".join(result)
yield source
def compare_alternatives(source):
all_all_arcs = collections.defaultdict(list)
for i, alternate_source in enumerate(async_alternatives(source)):
parser = PythonParser(alternate_source)
arcs = parser.arcs()
all_all_arcs[tuple(arcs)].append((i, alternate_source))
return len(all_all_arcs)
def show_a_bunch():
longest = ""
for i in range(100):
maker = RandomAstMaker(i)
source = PythonSpinner.generate_python(maker.make_body("def"))
try:
print("-"*80, "\n", source, sep="")
compile(source, "<string>", "exec")
except Exception as ex:
print("Oops: {}\n{}".format(ex, source))
if len(source) > len(longest):
longest = source
def show_alternatives():
for i in range(1000):
maker = RandomAstMaker(i)
source = PythonSpinner.generate_python(maker.make_body("def"))
nlines = len(source.splitlines())
if nlines < 15:
nalt = compare_alternatives(source)
if nalt > 1:
print("--- {:3} lines, {:2} alternatives ---------".format(nlines, nalt))
print(source)
if __name__ == "__main__":
show_alternatives()
```
#### File: coverage.py/lab/show_pyc.py
```python
import binascii
import dis
import marshal
import struct
import sys
import time
import types
def show_pyc_file(fname):
f = open(fname, "rb")
magic = f.read(4)
moddate = f.read(4)
modtime = time.asctime(time.localtime(struct.unpack('<L', moddate)[0]))
print("magic %s" % (binascii.hexlify(magic)))
print("moddate %s (%s)" % (binascii.hexlify(moddate), modtime))
if sys.version_info >= (3, 3):
# 3.3 added another long to the header (size).
size = f.read(4)
print("pysize %s (%d)" % (binascii.hexlify(size), struct.unpack('<L', size)[0]))
code = marshal.load(f)
show_code(code)
def show_py_file(fname):
text = open(fname).read().replace('\r\n', '\n')
show_py_text(text, fname=fname)
def show_py_text(text, fname="<string>"):
code = compile(text, fname, "exec")
show_code(code)
CO_FLAGS = [
('CO_OPTIMIZED', 0x00001),
('CO_NEWLOCALS', 0x00002),
('CO_VARARGS', 0x00004),
('CO_VARKEYWORDS', 0x00008),
('CO_NESTED', 0x00010),
('CO_GENERATOR', 0x00020),
('CO_NOFREE', 0x00040),
('CO_COROUTINE', 0x00080),
('CO_ITERABLE_COROUTINE', 0x00100),
('CO_ASYNC_GENERATOR', 0x00200),
('CO_GENERATOR_ALLOWED', 0x01000),
('CO_FUTURE_DIVISION', 0x02000),
('CO_FUTURE_ABSOLUTE_IMPORT', 0x04000),
('CO_FUTURE_WITH_STATEMENT', 0x08000),
('CO_FUTURE_PRINT_FUNCTION', 0x10000),
('CO_FUTURE_UNICODE_LITERALS', 0x20000),
('CO_FUTURE_BARRY_AS_BDFL', 0x40000),
('CO_FUTURE_GENERATOR_STOP', 0x80000),
]
def show_code(code, indent='', number=None):
label = ""
if number is not None:
label = "%d: " % number
print("%s%scode" % (indent, label))
indent += ' '
print("%sname %r" % (indent, code.co_name))
print("%sargcount %d" % (indent, code.co_argcount))
print("%snlocals %d" % (indent, code.co_nlocals))
print("%sstacksize %d" % (indent, code.co_stacksize))
print("%sflags %04x: %s" % (indent, code.co_flags, flag_words(code.co_flags, CO_FLAGS)))
show_hex("code", code.co_code, indent=indent)
dis.disassemble(code)
print("%sconsts" % indent)
for i, const in enumerate(code.co_consts):
if type(const) == types.CodeType:
show_code(const, indent+' ', number=i)
else:
print(" %s%d: %r" % (indent, i, const))
print("%snames %r" % (indent, code.co_names))
print("%svarnames %r" % (indent, code.co_varnames))
print("%sfreevars %r" % (indent, code.co_freevars))
print("%scellvars %r" % (indent, code.co_cellvars))
print("%sfilename %r" % (indent, code.co_filename))
print("%sfirstlineno %d" % (indent, code.co_firstlineno))
show_hex("lnotab", code.co_lnotab, indent=indent)
def show_hex(label, h, indent):
h = binascii.hexlify(h)
if len(h) < 60:
print("%s%s %s" % (indent, label, h.decode('ascii')))
else:
print("%s%s" % (indent, label))
for i in range(0, len(h), 60):
print("%s %s" % (indent, h[i:i+60].decode('ascii')))
def flag_words(flags, flag_defs):
words = []
for word, flag in flag_defs:
if flag & flags:
words.append(word)
return ", ".join(words)
def show_file(fname):
if fname.endswith('pyc'):
show_pyc_file(fname)
elif fname.endswith('py'):
show_py_file(fname)
else:
print("Odd file:", fname)
def main(args):
if args[0] == '-c':
show_py_text(" ".join(args[1:]).replace(";", "\n"))
else:
for a in args:
show_file(a)
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "jmslagmay/apoleid",
"score": 3
} |
#### File: fingerprinting/get_rssi/station.py
```python
import socket, select, string, sys
import cflib.drivers.crazyradio as crazyradio
from time import sleep
#def get_rssi(sock, cradio, station_no):
def get_rssi(sock, station_no):
cradio = crazyradio.Crazyradio()
cradio.set_data_rate(cradio.DR_2MPS)
cradio.set_channel(70)
count = 0
#delay = (float(station_no) - 1) / 10
#delay = (float(station_no) - 1)
delay = (station_no - 1) * 2
#addr = 0xff - station_no - 1
#print (delay)
#print (addr)
rss = 0
total = 0
#cradio = crazyradio.Crazyradio()
#cradio.set_data_rate(cradio.DR_2MPS)
#cradio.set_channel(70)
#RSS_list = []
sleep(delay)
while count < 100:
pk = cradio.send_packet([0xff, ])
if pk.ack and len(pk.data) > 2 and \
pk.data[0] & 0xf3 == 0xf3 and pk.data[1] == 0x01:
#print("RSSI: -{}dBm".format(pk.data[2]))
#print ("RSSI: %d" % pk.data[2])
count += 1
rss = pk.data[2]
#RSS_list.append(rss)
#print("hello")
total += 1
else:
#print("No RSS")
count += 1
#print("hi")
#rss = 10000
drone = 0
#check if there is a drone present
if total > 1:
drone = 1
print("Drone detected")
else:
drone = 0
print("No drone")
count = 0
rss = 0
x = 20
#get x no. of RSS then average
if drone:
#for i in range (0, len(RSS_list)):
# rss_sum += RSS_list[i]
#rss = int(rss_sum / len(RSS_list))
while count < x:
pk = cradio.send_packet([0xff, ])
if pk.ack and len(pk.data) > 2 and \
pk.data[0] & 0xf3 == 0xf3 and pk.data[1] == 0x01:
count += 1
rss += pk.data[2]
rss = rss / x
rss = int(rss)
else:
rss = 10000
cradio.close()
print("RSSI: %d" % rss)
return rss
#main function
if __name__ == "__main__":
if(len(sys.argv) < 3) :
print ('Usage : python3 filename hostname port')
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
station_no = int(input("Station No: "))
#channel = 70 + station_no
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# connect to remote host
try :
s.connect((host, port))
except :
print ('Unable to connect')
sys.exit()
print ('Connected to remote host. Start sending messages')
try:
while 1:
socket_list = [s]
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
#incoming message from remote server
if sock == s:
rcv_data = sock.recv(4096)
data = rcv_data.decode('ascii')
if not data :
print ('\nDisconnected from chat server')
sys.exit()
else :
#sys.stdout.write(data)
#print(data)
if " " in data:
parsed_data = data.split(" ")
if parsed_data[0] == "get_rssi":
#print ("GET RSSI!!!!!!!!!!!")
#print (str(station_no) + " rss " + "35")
#cradio = crazyradio.Crazyradio()
#cradio.set_data_rate(cradio.DR_2MPS)
#cradio.set_channel(70)
#rss = get_rssi(sock, cradio, station_no)
rss = get_rssi(sock, station_no)
reply = "rss " + str(station_no) + " " + str(rss)
s.send(reply.encode('ascii'))
else:
print (data)
else:
if data == "get_rssi":
#cradio = crazyradio.Crazyradio()
#cradio.set_data_rate(cradio.DR_2MPS)
#cradio.set_channel(70)
#rss = get_rssi(sock, cradio, station_no)
rss = get_rssi(sock, station_no)
reply = "rss " + str(station_no) + " " + str(rss)
s.send(reply.encode('ascii'))
else:
print (data)
#user entered a message
else :
#msg = sys.stdin.readline()
msg = input("")
#print (len(msg))
s.send(msg.encode('ascii'))
if msg == "/quit\n":
print ('Disconnecting from server...')
break
except KeyboardInterrupt:
msg = '/quit\n'
s.send(msg.encode('ascii'))
print ('Disconnecting from server...')
print ('\nDisconnected from chat server')
s.close()
``` |
{
"source": "jmsmdy/decentralized-electronic-health-record",
"score": 2
} |
#### File: backend/records/fields.py
```python
from django.forms import MultiValueField, ChoiceField
from .widgets import BootstrapMultiBinaryRadio
class MultiBinaryField(MultiValueField):
widget = BootstrapMultiBinaryRadio
default_error_messages = {
'invalid_date': 'Enter a valid date.',
'invalid_time': 'Enter a valid time.',
}
def __init__(self, *, choices=None, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = tuple([ChoiceField(choices=[('yes', 'Yes'), ('no', 'No'), ('unknown', "I don't know")]) for choice in choices])
super().__init__(fields, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
for data in data_list:
if data in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
result = '---'.join(data_list)
return result
return None
``` |
{
"source": "jmsmg/jmsmg.github.io",
"score": 3
} |
#### File: python/k-digital training/quarter.py
```python
import math
def quarterYear(month):
return math.ceil( month / 3.0 )
print(quarterYear(6))
``` |
{
"source": "jmsnur/mytaxi-test",
"score": 2
} |
#### File: site-packages/mysqlx/authentication.py
```python
import hashlib
import struct
from .compat import PY3, UNICODE_TYPES, hexlify
class MySQL41AuthPlugin(object):
def __init__(self, username, password):
self._username = username
self._password = password.encode("<PASSWORD>") \
if isinstance(password, UNICODE_TYPES) else password
def name(self):
return "MySQL 4.1 Authentication Plugin"
def auth_name(self):
return "MYSQL41"
def xor_string(self, hash1, hash2):
"""Encrypt/Decrypt function used for password encryption in
authentication, using a simple XOR.
"""
if PY3:
xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)]
else:
xored = [ord(h1) ^ ord(h2) for (h1, h2) in zip(hash1, hash2)]
return struct.pack("20B", *xored)
def build_authentication_response(self, data):
"""Hashing for MySQL 4.1 authentication
"""
if self._password:
h1 = hashlib.sha1(self._password).digest()
h2 = hashlib.sha1(h1).digest()
auth_response = self.xor_string(
h1, hashlib.sha1(data + h2).digest())
return "{0}\0{1}\0*{2}\0".format("", self._username,
hexlify(auth_response))
else:
return "{0}\0{1}\0".format("", self._username)
```
#### File: site-packages/mysqlx/dbdoc.py
```python
import json
import uuid
from .compat import STRING_TYPES
class DbDoc(object):
"""Represents a generic document in JSON format.
Args:
value (object): The value can be a JSON string or a dict.
Raises:
ValueError: If ``value`` type is not a basestring or dict.
"""
def __init__(self, value):
# TODO: Handle exceptions. What happens if it doesn't load properly?
if isinstance(value, dict):
self.__dict__ = value
elif isinstance(value, STRING_TYPES):
self.__dict__ = json.loads(value)
else:
raise ValueError("Unable to handle type: {0}".format(type(value)))
def __getitem__(self, index):
return self.__dict__[index]
def keys(self):
return self.__dict__.keys()
def ensure_id(self):
if "_id" not in self.__dict__:
self.__dict__["_id"] = uuid.uuid4().hex
return self.__dict__["_id"]
def __str__(self):
return json.dumps(self.__dict__)
``` |
{
"source": "JMSoler7/database",
"score": 3
} |
#### File: JMSoler7/database/main.py
```python
from faker import Faker
class Person():
name = Faker().name()
def __str__(self):
self.name = Faker().name()
return self.name
if __name__ == "__main__":
people = []
for i in range(10):
people.append(Person())
for person in people:
print person.name
``` |
{
"source": "JMSoler7/django-mptt-admin",
"score": 2
} |
#### File: django_mptt_example/tests/test_util.py
```python
from uuid import UUID
from django.test import TestCase
from django_mptt_admin.util import get_tree_queryset, get_tree_from_queryset, get_javascript_value, serialize_id
from ..models import Country
from .utils import read_testdata
class UtilTestCase(TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
read_testdata()
def test_get_tree_queryset(self):
# get default queryset
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
# subtree
qs = get_tree_queryset(Country, node_id=Country.objects.get(name='Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
# max_level 1
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
# max_level True
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
# exclude root
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_tree_from_queryset(self):
tree = get_tree_from_queryset(get_tree_queryset(Country))
root = tree[0]
self.assertEqual(root['name'], 'root')
continents = root['children']
self.assertEqual(len(continents), 7)
self.assertEqual(continents[0]['name'], 'Africa')
african_countries = continents[0]['children']
self.assertEqual(african_countries[0]['name'], 'Algeria')
# format label
tree = get_tree_from_queryset(get_tree_queryset(Country), item_label_field_name='code')
root = tree[0]
continents = root['children']
african_countries = continents[0]['children']
self.assertEqual(african_countries[0]['name'], 'DZ')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
def test_serialize_id(self):
self.assertEqual(serialize_id(10), 10)
self.assertEqual(serialize_id('10'), '10')
self.assertEqual(
serialize_id(UUID('7b6dd6ba55fb400ca0f59cde381c987f')),
'7b6dd6ba-55fb-400c-a0f5-9cde381c987f'
)
```
#### File: django_mptt_example/tests/test_views.py
```python
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.auth.models import User
from django_webtest import WebTest
from ..models import Country
from .utils import read_testdata
SCRIPT_JS_NAMESPACE = 'script[src="/static/django_mptt_admin/jquery_namespace.js"]'
SCRIPT_JS_DJANGO_MPTT_ADMIN = 'script[src="/static/django_mptt_admin/django_mptt_admin.js"]'
class DjangoMpttAdminWebTests(WebTest):
def setUp(self):
super(DjangoMpttAdminWebTests, self).setUp()
USERNAME = 'admin'
PASSWORD = 'p'
self.admin = User.objects.create_superuser(USERNAME, '<EMAIL>', PASSWORD)
self.login(USERNAME, PASSWORD)
read_testdata()
def test_tree_view(self):
# - get countries admin page
countries_page = self.app.get('/django_mptt_example/country/')
tree_element = countries_page.pyquery('#tree')
# check savestate key
self.assertEqual(tree_element.attr('data-save_state'), 'django_mptt_example_country')
# check url
json_url = tree_element.attr('data-url')
self.assertEqual(json_url, '/django_mptt_example/country/tree_json/')
self.assertEqual(tree_element.attr('data-csrf-cookie-name'), 'csrftoken')
# check that js is included
self.assertEqual(len(countries_page.pyquery(SCRIPT_JS_NAMESPACE)), 1)
self.assertEqual(len(countries_page.pyquery(SCRIPT_JS_DJANGO_MPTT_ADMIN)), 1)
def test_load_json(self):
base_url = '/django_mptt_example/country/tree_json/'
# -- load json
json_data = self.app.get(base_url).json
self.assertEqual(len(json_data), 1)
root = json_data[0]
self.assertEqual(root['name'], 'root')
self.assertEqual(len(root['children']), 7)
africa_id = Country.objects.get(name='Africa').id
africa = root['children'][0]
change_url = '/django_mptt_example/country/{0:d}/change/'.format(africa_id)
self.assertEqual(
africa,
dict(
name='Africa',
id=africa_id,
url=change_url,
move_url='/django_mptt_example/country/{0:d}/move/'.format(africa_id),
load_on_demand=True,
)
)
# no children loaded beyond level 1
self.assertFalse(hasattr(africa, 'children'))
# -- load subtree
json_data = self.app.get('{0!s}?node={1:d}'.format(base_url, africa_id)).json
self.assertEqual(len(json_data), 58)
self.assertEqual(json_data[0]['name'], 'Algeria')
# -- issue 8; selected node does not exist
self.app.get('{0!s}?selected_node=9999999'.format(base_url))
def test_grid_view(self):
# - get grid page
grid_page = self.app.get('/django_mptt_example/country/grid/')
# get first row
row_index = 0
first_row = grid_page.pyquery('#result_list tbody tr').eq(row_index)
# 'name' column
self.assertEqual(first_row.find('td').eq(1).text(), 'Afghanistan')
# 'code' column
self.assertEqual(first_row.find('th').text(), 'AF')
# link to edit page
afghanistan_id = Country.objects.get(name='Afghanistan').id
change_url = '/django_mptt_example/country/{0:d}/change/'.format(afghanistan_id)
self.assertEqual(first_row.find('a').attr('href'), change_url)
# check that js is not included
self.assertEqual(len(grid_page.pyquery(SCRIPT_JS_NAMESPACE)), 0)
self.assertEqual(len(grid_page.pyquery(SCRIPT_JS_DJANGO_MPTT_ADMIN)), 0)
def test_move_view(self):
def get_continents():
return ','.join(c.name for c in Country.objects.filter(level=1).order_by('lft'))
def do_move(source_id, target_id, position):
countries_page = self.app.get('/django_mptt_example/country/')
csrf_token = countries_page.form['csrfmiddlewaretoken'].value
response = self.app.post(
'/django_mptt_example/country/{0:d}/move/'.format(source_id),
params=dict(
csrfmiddlewaretoken=csrf_token,
target_id=target_id,
position=position,
)
)
self.assertEqual(response.json, dict(success=True))
# setup
bouvet_island = Country.objects.get(code='BV')
oceania = Country.objects.get(name='Oceania')
self.assertEqual(bouvet_island.parent.name, 'Antarctica')
# - move Bouvet Island under Oceania
do_move(bouvet_island.id, oceania.id, 'inside')
bouvet_island = Country.objects.get(code='BV')
self.assertEqual(bouvet_island.parent.name, 'Oceania')
# - move Antartica before Africa
self.assertEqual(get_continents(), 'Africa,Antarctica,Asia,Europe,North America,Oceania,South America')
do_move(
Country.objects.get(name='Antarctica', level=1).id,
Country.objects.get(name='Africa').id,
'before'
)
self.assertEqual(get_continents(), 'Antarctica,Africa,Asia,Europe,North America,Oceania,South America')
# move Antarctica after Europe
do_move(
Country.objects.get(name='Antarctica', level=1).id,
Country.objects.get(name='Europe').id,
'after'
)
self.assertEqual(get_continents(), 'Africa,Asia,Europe,Antarctica,North America,Oceania,South America')
# unknown position
self.assertRaisesMessage(
Exception,
'Unknown position',
lambda: do_move(
Country.objects.get(name='Antarctica', level=1).id,
Country.objects.get(name='Europe').id,
'unknown'
)
)
def login(self, username, password):
login_page = self.app.get('/login/')
form = login_page.form
form['username'] = username
form['password'] = password
form.submit()
response = self.app.get('/')
self.assertEqual(response.context['user'].username, username)
def test_popup(self):
# popup must return grid view
grid_page = self.app.get('/django_mptt_example/country/?{0!s}=true'.format(IS_POPUP_VAR))
first_row = grid_page.pyquery('#result_list tbody tr').eq(0)
self.assertEqual(first_row.find('td').eq(0).text(), 'Afghanistan')
def test_permissions(self):
# admin2 doesn't have access because he's no superuser
admin2 = User.objects.create_user('admin2', '<EMAIL>', 'p')
admin2.is_staff = True
admin2.save()
self.app.get('/logout/')
self.login('admin2', 'p')
# tree view
self.app.get('/django_mptt_example/country/', status=403)
def test_filter(self):
# - tree view with all continents
countries_page = self.app.get('/django_mptt_example/country/')
self.assertEqual(
countries_page.pyquery('#changelist-filter li a').text(),
"All Africa Antarctica Asia Europe North America Oceania South America"
)
# - filter on 'Europe'
countries_page = self.app.get('/django_mptt_example/country/?continent=Europe')
tree_div = countries_page.pyquery('#tree')
self.assertEqual(tree_div.attr('data-url'), '/django_mptt_example/country/tree_json/?continent=Europe')
self.assertEqual(
tree_div.attr('data-insert_at_url'),
'/django_mptt_example/country/add/?_changelist_filters=continent%3DEurope'
)
# test json data
# add `selected_node` parameter
json_data = self.app.get('/django_mptt_example/country/tree_json/?continent=Europe&selected_node=2').json
self.assertEqual(len(json_data), 1)
root = json_data[0]
self.assertEqual(root['name'], 'Europe')
self.assertEqual(len(root['children']), 50)
country_node = root['children'][0]
self.assertEqual(
country_node['url'],
"/django_mptt_example/country/{0!s}/change/?_changelist_filters=continent%3DEurope".format(country_node['id'])
)
# check urls
object_tool_buttons = countries_page.pyquery('.object-tools a')
self.assertEqual(len(object_tool_buttons), 2)
self.assertEqual(
object_tool_buttons.eq(0).attr('href'),
'/django_mptt_example/country/add/?_changelist_filters=continent%3DEurope'
)
self.assertEqual(
object_tool_buttons.eq(1).attr('href'),
'/django_mptt_example/country/grid/?continent=Europe'
)
# - grid view; filter on 'Europe'
grid_page = self.app.get('/django_mptt_example/country/grid/?continent=Europe')
object_tool_buttons = grid_page.pyquery('.object-tools a')
self.assertEqual(len(object_tool_buttons), 2)
self.assertEqual(
object_tool_buttons.eq(0).attr('href'),
'/django_mptt_example/country/add/?_changelist_filters=continent%3DEurope'
)
self.assertEqual(
object_tool_buttons.eq(1).attr('href'),
'/django_mptt_example/country/?continent=Europe'
)
```
#### File: django_mptt_example/tests/utils.py
```python
import os
from django.core import serializers
def read_testdata():
fixture_filename = os.path.join(os.path.dirname(__file__), 'testdata/countries.json')
with open(fixture_filename) as f:
for obj in serializers.deserialize("json", f.read()):
obj.save()
``` |
{
"source": "jmsplank/phdhelper",
"score": 2
} |
#### File: phdhelper/suMMSary/suMMSary.py
```python
import numpy as np
import pyspedas
from phdhelper.helpers import title_print
from phdhelper.helpers.CONSTANTS import c, k_B, m_e, m_i, mu_0, q
from pytplot import data_quants
import matplotlib.pyplot as plt
from datetime import datetime as dt
from cached_property import cached_property
class EventHandler:
FPI = None
FPI_DIST = None
FSM = None
FGM = None
trange = None
probe = None
def load_FGM(self):
self.FGM = pyspedas.mms.fgm(
trange=self.trange, probe=self.probe, data_rate="brst"
)
def load_FSM(self):
raise NotImplementedError()
def load_FPI_DIST(self):
self.FPI_DIST = pyspedas.mms.fpi(
trange=self.trange,
probe=self.probe,
data_rate="brst",
datatype="dis-dist",
)
def load_FPI(self):
self.FPI = pyspedas.mms.fpi(
trange=self.trange, probe=self.probe, data_rate="brst"
)
@staticmethod
def get_tplot_data(var_str, sl=None, time=False):
if not time:
if sl is None:
# Get all data
return data_quants[var_str].values
else:
return data_quants[var_str].values[sl]
else:
if sl is None:
# Get all data
return data_quants[var_str].coords["time"].values
else:
return data_quants[var_str].coords["time"].values[sl]
class TimeMMS(EventHandler):
def __init__(self, kw):
self.kw = kw
@cached_property
def timestamp(self):
return self.get_tplot_data(self.kw, time=True)
@cached_property
def date_time(self):
return np.array([dt.utcfromtimestamp(t) for t in self.timestamp])
def date_string(self, fmt="%H:%M"):
return np.array([dt.strftime(t, fmt) for t in self.date_time])
class Species(EventHandler):
def __init__(self, kw) -> None:
self.kw = kw
@cached_property
def value(self):
return self.get_tplot_data(self.kw)
@cached_property
def time(self):
return TimeMMS(self.kw)
def plot(self):
plt.plot(self.value)
def __repr__(self):
return (
f"Species({self.kw})"
"Available properties:"
" value"
"Available methods:"
" plot"
)
class MultiSpecies:
def __init__(self, ion_kw: str, electron_kw: str) -> None:
self.ion_kw = ion_kw
self.electron_kw = electron_kw
@cached_property
def ion(self):
return Species(self.ion_kw)
@cached_property
def electron(self):
return Species(self.electron_kw)
class Event(EventHandler):
def __init__(
self, trange: str, required_instruments: str, probe: str = "1"
) -> None:
self.trange = trange
self.required_instruments = required_instruments.upper()
self.probe = probe
if "FGM" in required_instruments:
self.load_FGM()
if "FPI" in required_instruments:
self.load_FPI()
if "FSM" in required_instruments:
self.load_FSM()
if "FPI_DIST" in required_instruments:
self.load_FPI_DIST()
@cached_property
def B(self):
return Species(f"mms{self.probe}_fgm_b_gse_brst_l2")
@cached_property
def v(self):
return MultiSpecies(
f"mms{self.probe}_dis_bulkv_gse_brst",
f"mms{self.probe}_des_bulkv_gse_brst",
)
@cached_property
def T(self):
return MultiSpecies(
f"mms{self.probe}_dis_temppara_brst",
f"mms{self.probe}_dis_tempperp_brst",
)
@cached_property
def E(self):
return MultiSpecies(
f"mms{self.probe}_dis_energyspectr_omni_brst",
f"mms{self.probe}_des_energyspectr_omni_brst",
)
# @property
# def v_0(self, species="i"):
# title_print("Calculating background flow speed")
# species = self.Species(species)
# if species.ion:
# self.v_0_i = np.mean(np.linalg.norm(self.v_i, axis=1))
# if species.elec:
# self.v_0_e = np.mean(np.linalg.norm(self.v_e, axis=1))
# @property
# def v_A(self):
# title_print("Calculating Alfven speed")
# self.v_A = self.mean_B / np.sqrt(mu_0 * self.number_density_i) / 1e3
# @property
# def number_density(self, species="i"):
# species = self.Species(species)
# if species.ion:
# self.number_density_i = (
# self.get_tplot_data(f"mms{self.probe}_dis_numberdensity_brst") * 1e6
# ).mean()
# if species.elec:
# self.number_density_e = (
# self.get_tplot_data(f"mms{self.probe}_des_numberdensity_brst") * 1e6
# ).mean()
# @property
# def beta(self, species="i"):
# title_print("Calculating plasma betas")
# species = self.Species(species)
# magPress = self.mean_B ** 2 / (2 * mu_0)
# if species.ion:
# self.beta_i = (
# self.number_density_i * k_B * self.T_i[:, 0].mean()
# ) / magPress
# if species.elec:
# self.beta_e = (
# self.number_density_e * k_B * self.T_e[:, 0].mean()
# ) / magPress
# @property
# def rho(self, species="i"):
# title_print("Calculating gyroradius")
# species = self.Species(species)
# if species.ion:
# i_thermal_velocity = np.sqrt(self.T_i[:, 1].mean() * 2 * q / m_i) / 1e3
# i_gyrofrequency = q * self.mean_B / m_i
# self.rho_i = i_thermal_velocity / i_gyrofrequency
# if species.elec:
# e_thermal_velocity = np.sqrt(self.T_i[:, 1].mean() * 2 * q / m_e) / 1e3
# e_gyrofrequency = q * self.mean_B / m_e
# self.rho_e = e_thermal_velocity / e_gyrofrequency
# @property
# def p(self, species="i"):
# title_print("Calculating Intertial length")
# species = self.Species(species)
# if species.ion:
# i_plasma_frequency = 1.32e3 * np.sqrt(self.number_density_i)
# self.p_i = c / i_plasma_frequency
# self.p_i /= 1e3
# if species.elec:
# e_plasma_frequency = 5.64e4 * np.sqrt(self.number_density_e)
# self.p_e = c / e_plasma_frequency
# self.p_e /= 1e3
# @property
# def time(self, var="B"):
# title_print("Getting time arrays")
# var = var.split("|")
# if "B" in var:
# self.time_B = self.get_tplot_data(
# f"mms{self.probe}_fgm_b_gse_brst_l2", time=True
# )
# if "V" in var:
# self.time_V = self.get_tplot_data(
# f"mms{self.probe}_dis_bulkv_gse_brst", time=True
# )
# if "e" in var:
# self.time_e = self.get_tplot_data(
# f"mms{self.probe}_des_temppara_brst", time=True
# )
``` |
{
"source": "jms/potential-bassoon",
"score": 2
} |
#### File: srl/services/parse.py
```python
from django.conf import settings
import string
from math import floor
alphabet = '0123456789ABCDEFGHJKLMNPQRSTUVWXYZ_abcdefghijkmnopqrstuvwxyz'
def encode(url_id):
return numtosxgf(url_id, settings.SHORT_URL_MAX_LEN)
def decode(code):
return sxgtonum(code)
# http://faruk.akgul.org/blog/tantek-celiks-newbase60-in-python-and-java/
# encode number to base60
def numtosxg(n):
s = ''
char_list = alphabet
if not isinstance(n, (int,)) or n == 0:
return '0'
while n > 0:
n, i = divmod(n, 60)
s = char_list[i] + s
return s
# number to base60, padding chars (string length)
def numtosxgf(n, f):
s = numtosxg(n)
if not isinstance(f, (int,)):
f = 1
f -= len(s)
while f > 0:
s = '0' + s
f -= 1
return s
# convert string to number base60
def sxgtonum(s):
n = 0
j = len(s)
for i in range(0, j):
c = ord(s[i])
if 48 <= c <= 57:
c -= 48
elif 65 <= c <= 72:
c -= 55
elif c == 73 or c == 108: # typo capital I, lowercase l to 1
c = 1
elif 74 <= c <= 78:
c -= 56
elif c == 79: # error correct typo capital O to 0
c = 0
elif 80 <= c <= 90:
c -= 57
elif c == 95:
c = 34
elif 97 <= c <= 107:
c -= 62
elif 109 <= c <= 122:
c -= 63
else:
c = 0 # treat all other noise as 0
n = 60 * n + c
return n
def to_base62(num, b=62):
if b <= 0 or b > 62:
return 0
base = string.digits + string.ascii_letters
r = num % b
res = base[r]
q = floor(num / b)
while q:
r = q % b
q = floor(q / b)
res = base[int(r)] + res
return res
def to_base10(num, b=62):
base = string.digits + string.ascii_letters
limit = len(num)
res = 0
for i in range(limit):
res = b * res + base.find(num[i])
return res
``` |
{
"source": "jmspring/active-learning-detect",
"score": 2
} |
#### File: active-learning-detect/db/postgres-client.py
```python
import sys
import string
import pg8000
import random
import os
import time
import logging
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from functions.pipeline.shared.db_access import ImageTagDataAccess
from functions.pipeline.shared.db_provider import PostGresProvider, DatabaseInfo
from functions.pipeline.shared.db_access.db_access_v2 import generate_test_image_infos
def get_transformed_id_to_url_map(id_to_url_map):
updated_image_id_url_map = {}
for image_id, old_url in id_to_url_map.items():
replaced_path = old_url.replace('new-uploads','perm-uploads')
file_name_to_replace = extract_image_name_no_suffix(replaced_path)
transformed_path = replaced_path.replace(file_name_to_replace,str(image_id))
updated_image_id_url_map[image_id] = transformed_path
return updated_image_id_url_map
def pretty_print_audit_history(conn, list_of_image_ids):
if(len(list_of_image_ids) > 0):
cursor = conn.cursor()
image_ids_as_strings = [str(i) for i in list_of_image_ids]
images_to_audit = '{0}'.format(', '.join(image_ids_as_strings))
query = ("SELECT a.imageid,c.originalimagename, b.tagstatename, d.username, a.ArchiveDtim FROM image_tagging_state_audit a "
"JOIN tagstate b ON a.tagstateid = b.tagstateid "
"JOIN image_info c on a.imageid = c.imageid "
"JOIN user_info d on a.modifiedbyuser = d.userid "
"WHERE a.ImageId in ({0}) "
"ORDER BY a.ImageId,ArchiveDtim ASC")
cursor.execute(query.format(images_to_audit))
row = cursor.fetchone()
print()
if(row != None):
print("ImageId\tImgName\tTagState\tUser\tLoggedTime")
while row:
print("{0}\t{1}\t{2}\t{3}\t{4}".format(str(row[0]),str(row[1]),str(row[2]),str(row[3]),str(row[4])))
row = cursor.fetchone()
else:
print("No images!")
def extract_image_name_no_suffix(url):
start_idx = url.rfind('/')+1
end_idx = url.rfind('.')
return url[start_idx:end_idx]
def extract_image_id_from_urls(list_of_image_urls):
extracted_image_ids = []
for url in list_of_image_urls:
extracted_id = int(extract_image_name_no_suffix(url))
extracted_image_ids.append(extracted_id)
return extracted_image_ids
def main(num_of_images,user_name):
try:
if(os.getenv("DB_HOST") is None or os.getenv("DB_USER") is None or os.getenv("DB_NAME") is None or os.getenv("DB_PASS") is None):
print("Please set environment variables for DB_HOST, DB_USER, DB_NAME, DB_PASS")
return
if(num_of_images < 5 or num_of_images > 20):
print("Number of images should be between 5 and 20")
return
if(not user_name):
print("User name cannot be empty or whitespace")
return
#################################################################
# Below we simulate the following scenarios:
# Creating a User
# Onboarding of new images
# Checking out images to tag
# Checking in images that have or have not been tagged
#################################################################
db_config = DatabaseInfo(os.getenv("DB_HOST"),os.getenv("DB_NAME"),os.getenv("DB_USER"),os.getenv("DB_PASS"))
pg = PostGresProvider(db_config)
data_access = ImageTagDataAccess(pg)
user_id = data_access.create_user(user_name)
NUMBER_OF_IMAGES = num_of_images
# Simulate new images from VOTT getting created in some blob store
mocked_images = generate_test_image_infos(NUMBER_OF_IMAGES)
print()
print("***\tSubject matter experts use the CLI to upload new images...")
time.sleep(1)
print()
# Simulate the data access layer creating entries in the DB for the new images
# and returning a map of the original image url to generaled image id
url_to_image_id_map = data_access.add_new_images(mocked_images, user_id)
print()
print("***\tBehind the scenes Az Functions move the images to a new blob location")
time.sleep(1)
print()
#Invert the above map since the client will now be using the image id as a key
image_to_url = {v: k for k, v in url_to_image_id_map.items()}
# Simulates when the client has moved images to a new blob store container
# and creates a payload for the data access layer with a map for image id to new urls
updated_image_id_url_map = get_transformed_id_to_url_map(image_to_url)
# Simulates the call the client makes to the data access layer
# with the new payload. Image urls get updated in the DB
data_access.update_image_urls(updated_image_id_url_map, user_id)
print()
print("***\tThe newly uploaded images are now onboarded with a 'ready to tag' state. See audit history")
print()
time.sleep(1)
# Prints the audit history of the generated of all the newly onboarded
# images involved in the simulation to prove the state tracking for onboarding.
image_ids = list(updated_image_id_url_map.keys())
pretty_print_audit_history(pg.get_connection(),image_ids)
time.sleep(3)
print()
print("***\tSubject matter experts use the CLI to retrieve images in a 'ready to tag' state")
time.sleep(2)
print()
list_of_image_urls = data_access.get_images_for_tagging(NUMBER_OF_IMAGES, user_id)
print()
print("***\tLet's wait for image taggers to get through the set of images....")
time.sleep(5)
print()
print("***\tDone! Though the subject matter experts didn't complete tagging all images")
time.sleep(2)
print()
'''
print("***\tRegardless the SMEs use the CLI to post the VOTT json results")
print()
# Since we rename the original image name to a integer that matchs the DB image id
# we need to extract out the image ids. Below this code is simulates extracting
# image ids from the VOTT JSON
extracted_image_ids = extract_image_id_from_urls(list_of_image_urls)
# Let assume 3 images got tagged and 2 images did not. The client will
# call corresponding methods to update tagged and untagged states
completed_tagged_ids = []
incomplete_tagged_ids = []
num_of_incomplete = NUMBER_OF_IMAGES/5
for idx, img_id in enumerate(extracted_image_ids):
if(idx > num_of_incomplete):
completed_tagged_ids.append(img_id)
else:
incomplete_tagged_ids.append(img_id)
data_access.update_tagged_images(completed_tagged_ids,user_id)
data_access.update_incomplete_images(incomplete_tagged_ids,user_id)
print()
print("***\tVOTT json results are posted. Lets take a look at the audit history")
time.sleep(2)
# Finally lets look at the audit history again. We expect to see some images as tagged
# and some as incomplete
print()
pretty_print_audit_history(pg.get_connection(),image_ids)
print()
print("Success!")
'''
#__verify_connect_to_db(get_connection())
#get_unvisited_items(get_connection(),count_of_images)
except Exception as e: print(e)
if __name__ == "__main__":
#print(sys.path)
console = logging.StreamHandler()
log = logging.getLogger()
log.setLevel(logging.getLevelName('DEBUG'))
log.addHandler(console)
if (len(sys.argv) != 3):
print("Usage: {0} (Number of Images) (User Name)".format(sys.argv[0]))
else:
main(int(sys.argv[1]), str(sys.argv[2]))
```
#### File: shared/onboarding/__init__.py
```python
import os
import logging
from enum import Enum
from datetime import datetime
import time
import asyncio
TIMEOUT_SECONDS = 1
class CopyStatus(Enum):
SUCCESS = "success",
PENDING = "pending",
ABORTED = "aborted",
FAILED = "failed",
TIMEOUT = "timeout" # custom status
class DeleteStatus(Enum):
SUCCESS = "success",
PENDING = "pending",
ABORTED = "aborted",
FAILED = "failed",
TIMEOUT = "timeout" # custom status
# Initiates copy of images from temporary to permanent storage, and checks the status of each operation.
# Returns two dictionaries, copy_succeeded_dict and copy_error_dict, in the format {sourceURL : destinationURL }.
def copy_images_to_permanent_storage(image_id_url_map, copy_source, copy_destination, blob_service):
copy_initiated_dict = {} # Dictionary of images for which copy was successfully initiated
copy_error_dict = {} # Dictionary of images for which some error/exception occurred
# Create new blob names
for key, value in image_id_url_map.items():
original_blob_url = key
# original_blob_name = original_blob_url.split("/")[-1]
file_extension = os.path.splitext(original_blob_url)[1]
image_id = value
new_blob_name = (str(image_id) + file_extension)
# Create the destination blob URL
destination_blob_path = blob_service.make_blob_url(copy_destination, new_blob_name)
# Copy blob from temp storage to permanent storage
logging.info("Now initiating copy of image from temporary to permanent storage...")
# Log source and destination paths for debugging
logging.info("Source path: " + original_blob_url)
logging.info("Destination path: " + destination_blob_path)
try:
blob_service.copy_blob(copy_destination, new_blob_name, original_blob_url)
logging.info("Done.")
# Add to list of items for which we need to check status if copy was initiated successfully
copy_initiated_dict[original_blob_url] = destination_blob_path
except Exception as e:
logging.error("ERROR: Exception thrown during copy attempt: " + str(e))
copy_error_dict[original_blob_url] = destination_blob_path
# Wait a few seconds before checking status
time.sleep(TIMEOUT_SECONDS)
copy_succeeded_dict = {} # Dictionary of copy operations that were successful
# Get copy status of each item. If status is succeeded, add to success list. Otherwise, add to error list.
for key, value in copy_initiated_dict.items():
target_blob_properties = blob_service.get_blob_properties(copy_destination, value.split("/")[-1])
copy_properties = target_blob_properties.properties.copy
# logging.info("Copy status of image" + value.split("/")[-1] + " is: " + copy_properties.status) # Debugging
# if copy_properties.status == CopyStatus.SUCCESS: # Note: Want to remove hard-coding, but this line does not work
if copy_properties.status == "success":
copy_succeeded_dict[key] = value
else:
copy_error_dict[key] = value
# Debugging
# logging.info("copy_succeeded_dict:")
# for key, value in copy_succeeded_dict.items():
# logging.info("Key: " + key + " Value: " + value)
# logging.info("copy_error_dict:")
# for key, value in copy_error_dict.items():
# logging.info("Key: " + key + " Value: " + value)
return copy_succeeded_dict, copy_error_dict
# Initiates deletion of images from temporary storage, and then checks whether the images still exist in the container.
# Returns two dictionaries, delete_succeeded_dict and delete_error_dict, in the format {sourceURL : destinationURL }.
def delete_images_from_temp_storage(delete_images_dict, delete_location, blob_service):
delete_initiated_dict = {} # Dictionary of images for which delete was successfully initiated
delete_error_dict = {} # Dictionary of images for which some error/exception occurred
# Delete blobs from container
for key, value in delete_images_dict.items():
logging.info("Now initiating delete of image from temp storage...")
logging.info("Image to be deleted: " + key)
try:
blob_service.delete_blob(delete_location, key.split("/")[-1])
logging.info("Done.")
# Add to list of items to check status if delete was initiated successfully
delete_initiated_dict[key] = value
except Exception as e:
logging.error("ERROR: Exception thrown during delete attempt: " + str(e))
delete_error_dict[key] = value
# Wait a few seconds before checking status
time.sleep(TIMEOUT_SECONDS)
delete_succeeded_dict = {} # Dictionary of delete operations that were successful
# List blobs in the source container. For each image in delete_initiated_dict, if the blob no longer exists,
# add to delete_succeeded_dict. If the blob still exists, add to delete_error_dict.
blob_list = blob_service.list_blobs(delete_location)
for key, value in delete_initiated_dict.items():
blob_name = key.split('/')[-1]
if blob_name in blob_list:
delete_error_dict[key] = value
else:
delete_succeeded_dict[key] = value
return delete_succeeded_dict, delete_error_dict
``` |
{
"source": "jmspring/azure_blob_examples",
"score": 2
} |
#### File: jmspring/azure_blob_examples/container_copy.py
```python
import json
import os
import sys
import uuid
import time
from datetime import datetime, timedelta
from azure.storage.blob import BlockBlobService, ContainerPermissions
def main():
# get command line args
account = sys.argv[1]
secret = sys.argv[2]
srcContainer = sys.argv[3]
files = sys.argv[4:]
# generate container name
destContainer = str(uuid.uuid4()).replace('-', '')
try:
# connect to blob store
bs = BlockBlobService(account_name=account, account_key=secret)
# create and setup container, by default a container is private
bs.create_container(destContainer)
bs.set_container_acl(destContainer)
# perform blob copy
copyStartTime = int(round(time.time() * 1000))
copyProps = {}
for f in files:
srcUrl = 'https://{}.blob.core.windows.net/{}/{}'.format(account, srcContainer, f)
cp = bs.copy_blob(destContainer, f, srcUrl)
copyProps[f] = cp
# wait for copy to finish
while len(copyProps.keys()) > 0:
for f, prop in copyProps.items():
bp = bs.get_blob_properties(destContainer, f)
copyProps[f] = None if bp.properties.copy.status is not 'pending' else bp
copyProps = { k:v for k, v in copyProps.items() if v }
# copy completed
copyEndTime = int(round(time.time() * 1000))
print('Blob copy completed in {}ms'.format(copyEndTime - copyStartTime), file=sys.stderr)
# generate SAS token, read only, valid for an hour
token = bs.generate_container_shared_access_signature(destContainer, ContainerPermissions.READ | ContainerPermissions.LIST, datetime.utcnow() + timedelta(hours=1))
# return information
result = {
'storage_account': account,
'container': destContainer,
'sas_token': token
}
print(json.dumps(result, indent=4, sort_keys=True))
except Exception as e:
print(e, file=sys.stderr)
if __name__ == "__main__":
main()
``` |
{
"source": "jmspring/azure-scripts",
"score": 2
} |
#### File: resource-custodian/ResourceCustodian/__init__.py
```python
import datetime
import logging
import datetime
import os
import sys
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.monitor import MonitorClient
import azure.functions as func
def main(mytimer: func.TimerRequest) -> None:
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
# setup clients
subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
credentials = ServicePrincipalCredentials(
client_id=os.environ['AZURE_CLIENT_ID'],
secret=os.environ['AZURE_CLIENT_SECRET'],
tenant=os.environ['AZURE_TENANT_ID']
)
rgClient = ResourceManagementClient(credentials, subscription_id)
mClient = MonitorClient(credentials, subscription_id)
# retrieve resource groups
filter_prefix=os.environ['AZURE_RESOURCE_GROUP_PREFIX']
matcthing_resource_groups = []
for item in rgClient.resource_groups.list():
if item.name.lower().startswith(filter_prefix):
matcthing_resource_groups.append(item.name)
# check create date for each resource group
inactivity_window=int(os.environ['AZURE_RESOURCE_GROUP_INACTIVITY'])
inactivity_date = datetime.datetime.today() - datetime.timedelta(minutes=inactivity_window)
groups_to_remove = []
for rg in matcthing_resource_groups:
filter = " and ".join([ "eventTimestamp ge {}".format(inactivity_date), "resourceGroupName eq '{}'".format(rg) ])
select = ",".join([
"eventTimestamp",
"eventName",
"operationName",
"resourceGroupName"
])
activity_logs = mClient.activity_logs.list( filter=filter, select=select )
group_active = False;
for log in activity_logs:
group_active = True
break
if not group_active:
groups_to_remove.append(rg)
# remove groups
async_handles = []
for rg in groups_to_remove:
logging.info("deleting resource group: {}".format(rg))
async_handles.append(rgClient.resource_groups.delete(rg))
for handle in async_handles:
handle.wait()
logging.info('Resource Custodian ran at %s', utc_timestamp)
``` |
{
"source": "jmspring/deis_certificate_manager",
"score": 3
} |
#### File: jmspring/deis_certificate_manager/deis_certificate_manager.py
```python
import requests
import sys
import time
import json
import threading
from os import environ
import atexit
from flask import Flask
from flask import Response
from flask import request
from deis_rest_client import DeisRestClient
# globals
shutdownRequested = False
certhandlerThread = None
stats = {
'applications': {
'already_configured': [],
'certificate_added': [],
'problem': []
}
}
def environment_variables():
env = {
'deisUrl': environ.get('DEIS_CONTROLLER_URL', None),
'deisUsername': environ.get('DEIS_USERNAME', None),
'deisPassword': environ.get('DEIS_PASSWORD', None),
'domainName': environ.get('DEIS_APPLICATION_DOMAIN_NAME', None),
'letsEncryptEmail': environ.get('LETS_ENCRYPT_CERTIFICATE_EMAIL', None),
'letsEncryptServer': environ.get('LETS_ENCRYPT_SERVER', None),
'workerImage': environ.get('DEIS_CERTIFICATE_WORKER_IMAGE', None),
'proxies': environ.get('HTTP_PROXIES', None),
}
return env
def required_environment_vars_set(env):
if env['deisUrl'] != None and \
env['deisUsername'] != None and \
env['deisPassword'] != None and \
env['domainName'] != None and \
env['letsEncryptEmail'] != None and \
env['letsEncryptServer'] != None and \
env['workerImage'] != None:
return True
return False
def mask_sensitive_environment_variables(env):
for key in env.keys():
if 'PASSWORD' in key.upper():
env[key] = '********'
return env
def wait_for_cert_worker(worker, domain, timeout=30, proxies=None):
ready = False
start = time.time()
while not ready:
r = requests.get('http://' + worker + '.' + domain + '/', proxies=proxies)
if r.status_code == 200:
ready = True
elif time.time() - start > timeout:
break
else:
time.sleep(1)
return ready
def request_certificate(worker, domain, proxies=None):
r = requests.get('http://' + worker + '.' + domain + '/generate_cert', proxies=proxies)
if r.status_code != 201:
print 'Error requesting certificate: {}'.format(r.status_code)
return None
return r.json()
def all_certificate_domains(certs):
domains = list((certs[cert]['domains']) for cert in certs)
return [domain for subdomains in domains for domain in subdomains]
def all_application_domains(client, apps):
result = []
for app in apps:
domains = client.list_domains(app)
if domains and domains['count'] > 0:
r = list((domain['domain']) for domain in domains['results'])
result.extend(r)
return result
def is_certificate_needed(client, fqdn, certs, apps):
app = fqdn[:fqdn.index('.')]
# does the app already have a certificate?
if app in certs:
# is the app properly configured?
cert = certs[app]
# does the certificate contain the fqdn in it's domain?
if cert['domains'] and len(cert['domains']) > 0:
if fqdn in cert['domains']:
domains = client.list_domains(app)
if not domains:
raise SystemError('Unable to list domains for app {}'.format(app))
else:
if domains['count'] > 0:
if fqdn in list((domain['domain']) for domain in domains['results']):
return False
else:
raise SystemError('Domain {} not associated with application {}'.format(fqdn, app))
else:
raise SystemError('No domains for application {}'.format(app))
else:
raise SystemError('FQDN ({}) not in certificate domains.'.format(fqdn))
else:
raise SystemError('Certificate with name {} contains no domains.'.format(app))
# make sure an existing domain with the same fqdn doesn't exist
domains = all_certificate_domains(certs)
if domains and len(domains) > 0:
if fqdn in domains:
SystemError('Domain ({}) already associated with another certificate.'.format(fqdn))
domains = all_application_domains(client, apps)
if fqdn in domains:
# make sure the fqdn isn't associated with the correct app
domains = client.list_domains(app)
if fqdn in domains:
return True
else:
SystemError('Domain ({}) already associated with another application.'.format(app))
return True
def applications(client):
result = None
apps = client.list_applications()
if apps and apps['count'] > 0:
result = dict((app['id'], app) for app in apps['results'])
return result
def certificates(client):
result = None
certs = client.list_certs()
if certs and certs['count'] > 0:
result = dict((cert['name'], cert) for cert in certs['results'])
return result
def get_certificate_for_application(client, fqdn, letsEncryptEmail, letsEncryptServer, workerImage, proxies = None):
error = None
certinfo = None
domainName = fqdn[fqdn.index('.') + 1:]
# create an application for the worker process
app = client.create_application()
if app:
appid = app['id']
# create the configuration for the application
config = {
'APPLICATION_FQDN': fqdn,
'LETS_ENCRYPT_EMAIL': letsEncryptEmail,
'LETS_ENCRYPT_SERVER': letsEncryptServer
}
if client.create_config(appid, config):
# assign the domain to the worker app
if client.add_domain(appid, fqdn):
# deploy the worker application
if client.deploy_application(appid, workerImage):
# the worker image can take awhile to be ready, so try
# waiting for awhile until it comes up
if wait_for_cert_worker(appid, domainName, proxies=proxies):
certinfo = request_certificate(appid, domainName, proxies)
if not certinfo:
error = SystemError('Unable to retrieve certificate for {}'.format(appid))
else:
error = SystemError('Timeout waiting for certificate worker task.')
else:
error = SystemError('Unable to deploy certificate worker task.')
else:
error = SystemError('Unable to assign domain {} to worker task {}'.format(fqdn, appid))
else:
error = SystemError('Unable to set config for app {}'.format(appid))
# clean up
if not client.destroy_application(appid):
error = SystemError('Unable to destroy worker application: {}'.format(appid))
else:
error = SystemError('Unable to create application for worker process.')
if certinfo:
return certinfo
else:
if error:
return error
else:
return SystemError('An undefined error occurred while trying to get certificate.`')
def install_certificate_for_app(client, fqdn, cert, key):
app = fqdn[0:fqdn.index('.')]
error = None
# add certificate to the application
if client.add_certificate(app, cert, key):
# add the domain to the application
if client.add_domain(app, fqdn):
# add the domain to the certificate
if client.add_domain_to_certificate(app, fqdn):
return True
else:
error = SystemError('Unable to add domain {} to certificate {}'.format(fqdn, app))
else:
error = SystemError('Unable to add domain {} to app {}.'.format(fqdn, app))
else:
error = SystemError('Unable to add certificate {}.'.format(app))
return error
def application_already_handled(app):
global stats
if app in stats['applications']['already_configured'] or \
app in stats['applications']['problem'] or \
app in stats['applications']['certificate_added']:
return True
return False
def application_check_loop():
global shutdownRequested
env = {}
environment_ready = False
client = None
last_run = 0
while not shutdownRequested:
if not environment_ready:
env = environment_variables()
if required_environment_vars_set(env):
client = DeisRestClient(env['deisUrl'], \
env['deisUsername'], \
env['deisPassword'], \
env['proxies'])
client.login()
environment_ready = True
continue
else:
# check every ten seconds
if time.time() >= 10000:
last_run = time.time()
apps = applications(client)
certs = certificates(client)
for app in apps:
# has the application already been handled?
if application_already_handled(app):
continue
fqdn = '{}.{}'.format(app, env['domainName'])
try:
needcert = is_certificate_needed(client, fqdn, certs, apps)
if not needcert:
stats['applications']['already_configured'].append(app)
continue
except SystemError as se:
print 'Error: {}'.format(se)
stats['applications']['problem'].append(app)
continue
# get the certificate for the application
cert = get_certificate_for_application(client, \
fqdn, \
env['letsEncryptEmail'], \
env['letsEncryptServer'], \
env['workerImage'], \
proxies=env['proxies'])
if type(cert) is not dict:
stats['applications']['problem'].append(app)
print 'Error: {}'.format(cert)
continue
# install the certificate
r = install_certificate_for_app(client, fqdn, cert['cert'], cert['key'])
if type(r) is not bool:
stats['applications']['problem'].append(app)
print 'Error: {}'.format(r)
else:
stats['applications']['certificate_added'].append(app)
time.sleep(0.25)
def shutdown_server():
global certhandlerThread
global shutdownRequested
shutdownRequested = True
# shutdown flask
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
# wait for difference thread to shut down
if certhandlerThread:
t = certhandlerThread
certhandlerThread = None
t.join()
def create_app():
app = Flask(__name__)
@app.route('/config')
def config():
masked_env = mask_sensitive_environment_variables(environment_variables())
output = json.dumps(masked_env, indent=4) + '\n'
return Response(output, mimetype='text/plain')
@app.route('/shutdown')
def shutdown():
shutdown_server()
return Response('ok\n', mimetype='text/plain')
@app.route('/stats')
def stats():
global stats
output = '{}\n'.format(json.dumps(stats, sort_keys=True, indent=4, separators=(',', ': ')))
return Response(output, mimetype='text/plain')
def interrupt():
global certhandlerThread
global shutdownRequested
shutdownRequested = True
if certhandlerThread:
certhandlerThread.join()
certhandlerThread = None
def start_handler():
global certhandlerThread
certhandlerThread = threading.Thread(target=application_check_loop)
certhandlerThread.start()
start_handler()
atexit.register(interrupt)
return app
if __name__ == '__main__':
app = create_app()
# Bind to PORT if defined, otherwise default to 5000.
port = int(environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
```
#### File: jmspring/deis_certificate_manager/deis_rest_client.py
```python
import requests
class DeisRestClient:
username = None
password = <PASSWORD>
token = None
baseUrl = None
proxies = None
def __init__(self, baseUrl, username, password, proxies = None):
self.baseUrl = baseUrl
if self.baseUrl.endswith('/'):
self.baseUrl = self.baseUrl[:-1]
self.username = username
self.password = password
if proxies:
self.proxies = proxies
def login(self):
if self.username == None or self.password == None or self.baseUrl == None:
return False
r = requests.post(self.baseUrl + '/v2/auth/login/',
json = { 'username': self.username, 'password': self.password },
proxies = self.proxies)
if r.status_code != 200:
return False
self.token = r.json()['token']
return True
def list_applications(self):
if not self.token:
if not self.login():
return None
r = requests.get(self.baseUrl + '/v2/apps/',
headers={ 'authorization': 'token ' + self.token },
proxies = self.proxies)
if r.status_code != 200:
return False
return r.json()
def application_detail(self, application):
if not self.token:
if not self.login():
return None
r = requests.get(self.baseUrl + '/v2/apps/' + application + '/',
headers={ 'authorization': 'token ' + self.token },
proxies = self.proxies)
if r.status_code != 200:
return False
return r.json()
def create_application(self):
if not self.token:
if not self.login():
return None
r = requests.post(self.baseUrl + '/v2/apps/',
headers={ 'authorization': 'token ' + self.token },
proxies = self.proxies)
if r.status_code != 201:
return None
return r.json()
def destroy_application(self, app):
if not self.token:
if not self.login():
return None
r = requests.delete(self.baseUrl + '/v2/apps/' + app + '/',
headers={ 'authorization': 'token ' + self.token },
proxies = self.proxies)
if r.status_code != 204:
return False
return True
def deploy_application(self, app, image):
if not self.token:
if not self.login():
return None
r = requests.post(self.baseUrl + '/v2/apps/' + app + '/builds/',
headers={ 'authorization': 'token ' + self.token },
json={ 'image': image },
proxies = self.proxies)
if r.status_code != 201:
return None
return r.json()
def create_config(self, app, config):
if not self.token:
if not self.login():
return None
r = requests.post(self.baseUrl + '/v2/apps/' + app + '/config/',
headers={ 'authorization': 'token ' + self.token },
json={ 'values': config },
proxies = self.proxies)
if r.status_code != 201:
return None
return r.json()
def list_certs(self):
if not self.token:
if not self.login():
return None
r = requests.get(self.baseUrl + '/v2/certs/',
headers={ 'authorization': 'token ' + self.token },
proxies = self.proxies)
if r.status_code != 200:
return False
return r.json()
def list_domains(self, app):
if not self.token:
if not self.login():
return None
r = requests.get(self.baseUrl + '/v2/apps/' + app + '/domains/',
headers={ 'authorization': 'token ' + self.token },
proxies = self.proxies)
if r.status_code != 200:
return False
return r.json()
def add_domain(self, app, domain):
if not self.token:
if not self.login():
return None
r = requests.post(self.baseUrl + '/v2/apps/' + app + '/domains/',
headers={ 'authorization': 'token ' + self.token },
json = { 'domain': domain },
proxies = self.proxies)
if r.status_code != 201:
return False
return True
def remove_domain(self, app, domain):
if not self.token:
if not self.login():
return None
r = requests.delete(self.baseUrl + '/v2/apps/' + app + '/domain/' + domain,
headers={ 'authorization': 'token ' + self.token },
proxies = self.proxies)
if r.status_code != 204:
return False
return True
def add_certificate(self, name, cert, key):
if not self.token:
if not self.login():
return None
r = requests.post(self.baseUrl + '/v2/certs/',
headers={ 'authorization': 'token ' + self.token },
json = { 'name': name, 'certificate': cert, 'key': key },
proxies = self.proxies)
if r.status_code != 201:
return False
return True
def add_domain_to_certificate(self, cert, domain):
if not self.token:
if not self.login():
return None
r = requests.post(self.baseUrl + '/v2/certs/' + cert + '/domain/',
headers={ 'authorization': 'token ' + self.token },
json = { 'domain': domain },
proxies = self.proxies)
if r.status_code != 201:
return False
return True
def get_certificate(self, cert):
if not self.token:
if not self.login():
return None
r = requests.get(self.baseUrl + '/v2/certs/' + cert,
headers={ 'authorization': 'token ' + self.token },
proxies = self.proxies)
if r.status_code != 200:
return False
return r.json()
``` |
{
"source": "JMS-Software-Development/django-sockpuppet",
"score": 2
} |
#### File: management/commands/_base.py
```python
from pathlib import Path
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
class BaseGenerateCommand(BaseCommand):
def lookup_app_path(self, app_name):
try:
config = apps.get_app_config(app_name)
except LookupError as e:
raise CommandError(str(e))
module_path = config.module.__path__[0]
return module_path
def call_stdout(self, msg, _type="WARNING"):
style = getattr(self.style, _type)
self.stdout.write(style(msg))
def create_file(self, folder, filename, contents):
if folder == "":
base_path = Path.cwd()
else:
base_path = self.module_path
filepath = base_path / folder / filename
if filepath.exists():
partial_path = "/".join(filepath.parts[-4:])
self.call_stdout(
"{} already exists, so it will be skipped".format(partial_path)
)
return
try:
filepath.parent.mkdir(parents=True)
except FileExistsError:
pass
with filepath.open(mode="w") as f:
f.write(contents)
```
#### File: management/commands/generate_reflex.py
```python
import keyword
from pathlib import Path
from django.core.management import CommandError
from django.template.loader import get_template
from ._base import BaseGenerateCommand
from ...utils import pascalcase
TEMPLATES = {
"_reflex.py": "sockpuppet/scaffolds/reflex.py",
"_controller.js": "sockpuppet/scaffolds/controller.js",
".js": "sockpuppet/scaffolds/application.js",
".py": "sockpuppet/scaffolds/view.py",
".html": "sockpuppet/scaffolds/template.html",
}
class Command(BaseGenerateCommand):
help = "Scaffold for reflex. Includes javascript and python."
def add_arguments(self, parser):
parser.add_argument(
"app_name",
nargs=1,
type=str,
help="The app where the generated files should be placed",
)
parser.add_argument(
"reflex_name",
nargs="?",
type=str,
help="The name of the reflex and javascript controller",
default="example",
)
parser.add_argument(
"--javascript",
dest="javascript",
action="store_true",
help="Include this to generate a setup than includes javascript with controllers",
)
parser.set_defaults(javascript=False)
def handle(self, *args, **options):
app_name = options["app_name"][0]
reflex_name = options["reflex_name"].lower()
using_javascript = options["javascript"]
if not reflex_name.isidentifier():
raise CommandError(
f"The reflex name ({reflex_name}) must be a valid Python identifier."
)
if reflex_name == "_":
raise CommandError("The reflex name must not be a single underscore.")
if reflex_name in keyword.kwlist:
raise CommandError(
f"The reflex name ({reflex_name}) can't be a Python keyword."
)
module_path = self.lookup_app_path(app_name)
self.module_path = Path(module_path)
paths = [
(False, "reflexes", "_reflex.py"),
(True, "javascript", ".js"),
(True, "javascript/controllers", "_controller.js"),
(False, "views", ".py"),
(False, "templates", ".html"),
]
for without_js, path, suffix in paths:
template_name = TEMPLATES[suffix]
template = get_template(template_name)
rendered = template.render(
{
"class_name": pascalcase(reflex_name),
"reflex_name": reflex_name,
"using_javascript": using_javascript,
}
)
if without_js and not using_javascript:
# skipping these templates
continue
self.create_file(path, "{}{}".format(reflex_name, suffix), rendered)
self.create_file("views", "__init__.py", "")
self.create_file("reflexes", "__init__.py", "")
self.call_stdout("Scaffolding generated!", _type="SUCCESS")
if (self.module_path / "views.py").exists():
msg = "We created a views directory which means that you need to move your initial views there"
self.call_stdout("")
self.call_stdout(msg, _type="WARNING")
self.call_stdout("Last step is to add the view to urls.py", _type="SUCCESS")
```
#### File: django-sockpuppet/sockpuppet/utils.py
```python
try:
from lxml import etree
from io import StringIO
from lxml.cssselect import CSSSelector
HAS_LXML = True
except ImportError:
HAS_LXML = False
from bs4 import BeautifulSoup
def pascalcase(value: str) -> str:
"""capitalizes the first letter of each _-separated component.
This method preserves already pascalized strings."""
components = value.split("_")
if len(components) == 1:
return value[0].upper() + value[1:]
else:
components[0] = components[0][0].upper() + components[0][1:]
return "".join(x.title() for x in components)
def camelcase(value: str) -> str:
"""capitalizes the first letter of each _-separated component except the first one.
This method preserves already camelcased strings."""
components = value.split("_")
if len(components) == 1:
return value[0].lower() + value[1:]
else:
components[0] = components[0][0].lower() + components[0][1:]
return components[0].lower() + "".join(x.title() for x in components[1:])
def camelize_value(value):
"""camelizes all keys/values in a given dict or list"""
if isinstance(value, list):
value = [camelize_value(val) for val in value]
elif isinstance(value, dict):
value = {camelcase(key): camelize_value(val) for key, val in value.items()}
return value
def _lxml_selectors(html, selectors):
parser = etree.HTMLParser()
document = etree.parse(StringIO(html), parser)
selectors = [CSSSelector(selector) for selector in selectors]
selectors = [selector for selector in selectors if selector(document)]
return document, selectors
def _bs_selectors(html, selectors):
document = BeautifulSoup(html)
selectors = [selector for selector in selectors if document.select(selector)]
return document, selectors
def get_document_and_selectors(html, selectors):
if HAS_LXML:
return _lxml_selectors(html, selectors)
return _bs_selectors(html, selectors)
def parse_out_html(document, selector):
if HAS_LXML:
return "".join(
[
etree.tostring(e, method="html").decode("utf-8")
for e in selector(document)
]
)
return "".join([e.decode_contents() for e in document.select(selector)])
``` |
{
"source": "jmsstudio/practice-python",
"score": 4
} |
#### File: jmsstudio/practice-python/guess_the_number.py
```python
import random
import simplegui
range_limit = 100
def new_game():
# initialize global variables
global secret_number, range_limit, num_guess
print "=== GUESS THE NUMBER ==="
num_guess = 0
secret_number = random.randrange(0, range_limit)
def range100():
# changes the range to [0,100) and starts a new game
global range_limit, max_guesses
range_limit = 100
max_guesses = 7
new_game()
def range1000():
# changes the range to [0,1000) and starts a new game
global range_limit, max_guesses
range_limit = 1000
max_guesses = 10
new_game()
def input_guess(guess):
# main game logic
global secret_number, max_guesses, num_guess
num_guess += 1
_guess = int(guess)
print "Guess was " + str(_guess)
if num_guess > max_guesses:
print "Game over - number of guesses exceeded " + str(max_guesses)
new_game()
else:
if _guess > secret_number:
print "Higher"
elif _guess < secret_number:
print "Lower"
else:
print "Correct"
new_game()
frame = simplegui.create_frame("Guess the number", 200, 300)
frame.add_button("Reset - Range [0-100)", range100)
frame.add_button("Reset - Range [0-1000)", range1000)
frame.add_input("Guess the number", input_guess, 40)
frame.start()
range100()
``` |
{
"source": "jmstadt/cell_segmentation",
"score": 2
} |
#### File: jmstadt/cell_segmentation/app.py
```python
from fastai.vision import *
from fastai.metrics import error_rate
from flask import Flask, request, url_for, flash
from werkzeug import secure_filename
from flask import send_from_directory
import numpy as np
import os
from os import rename, listdir
from PIL import Image
import class_def
from class_def import SegLabelListCustom
from class_def import SegItemListCustom
path = ''
export_file_url = 'https://www.dropbox.com/s/bjszupvu7a15ccb/cell_export.pkl?dl=1'
export_file_name = 'cell_export.pkl'
def down_load_file(filename, url):
"""
Download an URL to a file
"""
with open(filename, 'wb') as fout:
response = requests.get(url, stream=True)
response.raise_for_status()
# Write response data to file
for block in response.iter_content(4096):
fout.write(block)
def download_if_not_exists(filename, url):
"""
Download a URL to a file if the file
does not exist already.
Returns
-------
True if the file was downloaded,
False if it already existed
"""
if not os.path.exists(filename):
down_load_file(filename, url)
return True
return False
download_if_not_exists(export_file_name, export_file_url)
class SegLabelListCustom(SegmentationLabelList):
def open(self, fn): return open_mask(fn, div=True)
class SegItemListCustom(SegmentationItemList):
_label_cls = SegLabelListCustom
learn = load_learner(path, export_file_name)
UPLOAD_FOLDER = ''
ALLOWED_EXTENSIONS = set(['jpg', 'png'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
#filename = file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
image = open_image(filename)
image_url = url_for('uploaded_file', filename=filename)
think = learn.predict(image)
think_np = np.array(think[1])
think_np.shape = (256,256)
think_np = think_np.astype(int)
think_np[think_np > 0] = 255
think_im = Image.fromarray((think_np).astype('uint8'), mode='L')
think_im.save(os.path.join(app.config['UPLOAD_FOLDER'], 'think2_im.png'))
think_im_url = url_for('uploaded_file', filename='think2_im.png')
print(think_im_url)
#image.show(y=learn.predict(image)[0])
return '''<h1>The cell image is:</h1>
<img src= "{}" height = "85" width="200"/>
<h1>The cell segmentation is:</h1>
<img src= "{}" height = "85" width="200"/>'''.format(image_url, think_im_url)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload an image of Cells</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/uploads/#')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
``` |
{
"source": "jmstadt/Employee_Turnover_Prediction",
"score": 3
} |
#### File: jmstadt/Employee_Turnover_Prediction/app.py
```python
from fastai.tabular import *
from flask import Flask, request
import requests
import os.path
path = ''
export_file_url = 'https://www.dropbox.com/s/w55aizavo8ntqps/HR_data_Employee_Turnover_export.pkl?dl=1'
export_file_name = 'HR_data_Employee_Turnover_export.pkl'
def down_load_file(filename, url):
"""
Download an URL to a file
"""
with open(filename, 'wb') as fout:
response = requests.get(url, stream=True)
response.raise_for_status()
# Write response data to file
for block in response.iter_content(4096):
fout.write(block)
def download_if_not_exists(filename, url):
"""
Download a URL to a file if the file
does not exist already.
Returns
-------
True if the file was downloaded,
False if it already existed
"""
if not os.path.exists(filename):
down_load_file(filename, url)
return True
return False
download_if_not_exists(export_file_name, export_file_url)
learn = load_learner(path, export_file_name)
app = Flask(__name__)
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST': #this block is only entered when the form is submitted
satisfaction_level = request.form.get('satisfaction_level')
last_evaluation = request.form.get('last_evaluation')
number_project = request.form.get('number_project')
average_montly_hours = request.form.get('average_montly_hours')
time_spend_company = request.form.get('time_spend_company')
Work_accident = request.form.get('Work_accident')
promotion_last_5years = request.form.get('promotion_last_5years')
sales = request.form.get('sales')
salary = request.form.get('salary')
inf_df = pd.DataFrame(columns=['satisfaction_level','last_evaluation', 'number_project',
'average_montly_hours', 'time_spend_company',
'Work_accident',
'promotion_last_5years', 'sales', 'salary'])
inf_df.loc[0] = [satisfaction_level, last_evaluation, number_project, average_montly_hours,
time_spend_company, Work_accident,
promotion_last_5years, sales, salary]
inf_df['satisfaction_level'] = inf_df['satisfaction_level'].astype(float)
inf_df['last_evaluation'] = inf_df['last_evaluation'].astype(float)
inf_df['number_project'] = inf_df['number_project'].astype(int)
inf_df['average_montly_hours'] = inf_df['average_montly_hours'].astype(int)
inf_df['time_spend_company'] = inf_df['time_spend_company'].astype(int)
inf_df['Work_accident'] = inf_df['Work_accident'].astype(int)
inf_df['promotion_last_5years'] = inf_df['promotion_last_5years'].astype(int)
inf_row = inf_df.iloc[0]
pred = learn.predict(inf_row)
return '''<h3>The input Satisfaction Level is: {}</h3>
<h3>The input Last Evaluation is: {}</h3>
<h3>The input Number of Projects is: {}</h3>
<h3>The input Average Monthly Hours are: {}</h3>
<h3>The input Number of Years Employed is: {}</h3>
<h3>The input on whether the employee had a work accident is (0 for No, 1 for Yes): {}</h3>
<h3>The input on whether the employee has been promoted in the last 5 years (0 for No, 1 for Yes) is: {}</h3>
<h3>The input Department is: {}</h3>
<h3>The input Salary Level is: {}</h3>
<h1>The employee risk of leaving (Category 1 for Yes, Category 0 for No): {}</h1>'''.format(satisfaction_level,
last_evaluation,
number_project,
average_montly_hours,
time_spend_company,
Work_accident,
promotion_last_5years,
sales, salary,
pred
)
return '''<form method="POST">
<h1>Predicting whether an employee is at risk of leaving</h1>
Satisfaction Level, enter a two digit decimal number between 0 and 1: <input type="number" name="satisfaction_level" step=0.01 min=0 max =1 required="required"><br>
Last Evaluation, enter a two digit decimal number between 0 and 1: <input type="number" name="last_evaluation" step=0.01 min=0 max =1 required="required"><br>
How many projects has the employee worked? enter a number between 0 and 9: <input type="number" name="number_project" step=1 min=0 max =9 required="required"><br>
What is the average number of hours the employee works per month? <input type="number" name ="average_montly_hours" step=1 min=1 max=310 required="required"<br><br>
How many years has the employee been employed? <input type="number" name ="time_spend_company" step=1 min=0 max=50 required="required"<br><br>
Has the employee had a work accident (0 for No, 1 for Yes)? <input type="number" name ="Work_accident" step=1 min=0 max=1 required="required"<br><br>
Has the employee been promoted in the last 5 years? (0 for No, 1 for Yes) <input type="number" name ="promotion_last_5years" step=1 min=0 max=1 required="required"<br><br>
Select the employee's Department: <select name="sales">
<option value="sales">sales</option>
<option value="accounting">accounting</option>
<option value="hr">hr</option>
<option value="technical">technical</option>
<option value="support">support</option>
<option value="management">management</option>
<option value="IT">IT</option>
<option value="product_mng">product_mng</option>
<option value="marketing">marketing</option>
<option value="RandD">RandD</option>
</select><br>
Is the employee's salary low, medium, or high?: <select name="salary">
<option value="low">low</option>
<option value="medium">medium</option>
<option value="high">high</option>
</select><br>
<input type="submit" value="Submit"><br>
</form>'''
#if __name__ == '__main__':
# app.run(port=5000, debug=False)
``` |
{
"source": "jmstar85/rl_practice",
"score": 3
} |
#### File: rl_practice/dqn_mario/agent.py
```python
import tensorflow as tf
import numpy as np
from network import DQN
from replay_buffer import ReplayBuffer
class DQNAgent:
def __init__(self, sess, state_size, action_size):
self.sess = sess
self.state_size = state_size
self.action_size = action_size
# hyper parameter
self.batch_size = 32
self.discount_factor = 0.99
self.learning_rate = 0.00025
# epsilon
self.s_epsilon = 1.0
self.e_epsilon = 0.01
self.n_epsilon_decay = 100000
self.epsilon = self.s_epsilon
# replay buffer
self.buffer = ReplayBuffer(50000)
# place holder
self.actions = tf.placeholder(tf.int32, shape=None)
self.targets = tf.placeholder(tf.float32, shape=None)
# network
self.policy_net = DQN(self.state_size, self.action_size, net_name="policy_net")
self.target_net = DQN(self.state_size, self.action_size, net_name="target_net")
self.sess.run(tf.global_variables_initializer())
self.update_target_network()
# optimizer
self.loss_op, self.train_op = self._build_op()
def _build_op(self):
"""신경망 학습을 위한 Loss function과 Optimaizer를 정의합니다."""
def select_action(self, state):
"""epsilon-greedy로 action을 선택합니다."""
def update_model(self):
"""학습 네트워크를 학습합니다."""
def update_target_network(self):
"""학습 네트웍의 변수의 값들을 타겟 네트웍으로 복사해서 타겟 네트웍의 값들을 최신으로 업데이트합니다."""
def save_model(self, filename):
"""Save model."""
saver = tf.train.Saver()
path = "./save/" + filename + ".ckpt"
save_path = saver.save(self.sess, path)
print("[Model saved in path: %s !!!]" % save_path)
def load_model(self, filename):
"""Load model."""
saver = tf.train.Saver()
path = "./save/" + filename + ".ckpt"
saver.restore(self.sess, path)
print("[Model restored !!!]")
```
#### File: rl_practice/dqn_mario/wrapper.py
```python
import numpy as np
from collections import deque
import gym
import cv2
IMG_SIZE = 84
def _process_frame_mario(frame):
if frame is not None: # for future meta implementation
img = np.reshape(frame, [240, 256, 3]).astype(np.float32)
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
x_t = np.expand_dims(cv2.resize(img, (IMG_SIZE, IMG_SIZE)), axis=-1)
x_t.astype(np.uint8)
else:
x_t = np.zeros((IMG_SIZE, IMG_SIZE, 1))
return x_t
class ProcessFrameMario(gym.Wrapper):
def __init__(self, env=None):
super(ProcessFrameMario, self).__init__(env)
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(1, IMG_SIZE, IMG_SIZE))
self.prev_time = 400
self.prev_stat = 0
self.prev_score = 0
self.prev_dist = 40
def _step(self, action):
'''
Implementing custom rewards
Time = -0.1
Distance = +1 or 0
Player Status = +/- 5
Score = 2.5 x [Increase in Score]
Done = +50 [Game Completed] or -50 [Game Incomplete]
'''
obs, reward, done, info = self.env.step(action)
return _process_frame_mario(obs), reward, done, info
def _reset(self):
return _process_frame_mario(self.env.reset())
def change_level(self, level):
self.env.change_level(level)
class BufferSkipFrames(gym.Wrapper):
def __init__(self, env=None, skip=4, shape=(IMG_SIZE, IMG_SIZE)):
super(BufferSkipFrames, self).__init__(env)
self.counter = 0
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(IMG_SIZE, IMG_SIZE, 4))
self.skip = skip
self.buffer = deque(maxlen=self.skip)
def _step(self, action):
obs, reward, done, info = self.env.step(action)
counter = 1
total_reward = reward
self.buffer.append(obs)
for i in range(self.skip - 1):
if not done:
obs, reward, done, info = self.env.step(action)
total_reward += reward
counter += 1
self.buffer.append(obs)
else:
self.buffer.append(obs)
frame = np.stack(self.buffer, axis=0)
frame = np.reshape(frame, (IMG_SIZE, IMG_SIZE, 4))
return frame, total_reward, done, info
def _reset(self):
self.buffer.clear()
obs = self.env.reset()
for i in range(self.skip):
self.buffer.append(obs)
frame = np.stack(self.buffer, axis=0)
frame = np.reshape(frame, (IMG_SIZE, IMG_SIZE, 4))
return frame
def change_level(self, level):
self.env.change_level(level)
class NormalizedEnv(gym.ObservationWrapper):
def __init__(self, env=None):
super(NormalizedEnv, self).__init__(env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.num_steps = 0
def _observation(self, observation):
if observation is not None: # for future meta implementation
self.num_steps += 1
self.state_mean = self.state_mean * self.alpha + \
observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + \
observation.std() * (1 - self.alpha)
unbiased_mean = self.state_mean / (1 - pow(self.alpha, self.num_steps))
unbiased_std = self.state_std / (1 - pow(self.alpha, self.num_steps))
return (observation - unbiased_mean) / (unbiased_std + 1e-8)
else:
return observation
def change_level(self, level):
self.env.change_level(level)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = info['life']
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
self.done = True
self.lives = lives
return obs, reward, self.done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped._life
return obs
def wrap_mario(env):
# assert 'SuperMarioBros' in env.spec.id
env = ProcessFrameMario(env)
env = NormalizedEnv(env)
env = BufferSkipFrames(env)
env = EpisodicLifeEnv(env)
return env
``` |
{
"source": "jmstevens/aesopbot",
"score": 3
} |
#### File: features/genius/song_structure.py
```python
import itertools
import os
import pickle
import re
import json
# from nltk.corpus import stopwords
artists = ['<NAME>','Drake','Chance The Rapper','<NAME>','Logic','Future','Chief Keef','Eminem','Kanye West','JAY-Z','Big Sean',
'Lil Uzi Vert','Tyler, The Creator','Earl Sweatshirt','2 Chainz','G-Eazy','ScHoolboy Q','Young Thug','<NAME>$$', 'Wu Tang Clan',
'Flatbush Zombies','A$AP Rocky','A$AP Ferg','Dumbfoundead','<NAME>','Waka Flocka Flame','Nas','A Tribe Called Quest','Vic Mensa',
'$UICIDEBOY$','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','Yonas','<NAME>','<NAME>',
'Three 6 Mafia','<NAME>','RiFF RAFF','<NAME>','<NAME>','Tyga','<NAME>','<NAME>','<NAME>','<NAME>','Migos','Rihanna',
'<NAME>','21 Savage','<NAME>','<NAME>','<NAME>','XXXTENTACION','Lil Pump','Ski Mask the Slump God','<NAME>',
'SmokePurpp','A Boogie Wit Da Hoodie','Playboi Carti','Ugly God','Wiz Khalifa','<NAME>','Beyoncé','<NAME>','Meek Mill', 'Aesop Rock']
def get_lyrics_file():
with open('data/raw/artist_lyrics.json') as f:
data = json.load(f)
return data
def get_verses(data):
verse_lines = list()
for k in data['artists']:
for v in k['songs']:
song = v['lyrics']
lines = song.splitlines()
for l in range(len(lines)):
title = [x.lower() for x in lines[l].replace('[', '').replace(']', '').split()]
if '[' in lines[l] and 'verse' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
verse_lines.append(section_lines)
return verse_lines
def clean_verses(verses):
verses_list = []
bad_characters = ['"',"'",'_','(',')','$',',','.','?','!','—','%','=']
for song in verses:
verses = list()
for line in song:
if line == '\n':
continue
if '[' in line:
continue
new_word = []
separate = line.split()
words = []
for word in separate:
for character in word:
character = character.lower()
if character not in bad_characters:
new_word.append(character)
w = ''.join(new_word)
# if w not in total_stop_words:
words.append(w)
new_word = []
if words != []:
words = words + ['<eol>']
verses.append(words)
verses = verses + ['<eov>']
verses_list.append(verses)
return verses_list
def segment_to_verses(verse_list):
verses = []
for i in clean:
verse = ''
for j in i:
if isinstance(j, list):
verse = verse + ' ' + ' '.join(j)
else:
verse = verse + ' ' + j
verses.append(verse)
return verses
# with open('data/raw/artist_lyrics.json') as f:
# data = json.load(f)
#
# verse_lines = list()
# for k in data['artists']:
# for v in k['songs']:
# song = v['lyrics']
# lines = song.splitlines()
# for l in range(len(lines)):
# title = [x.lower() for x in lines[l].replace('[', '').replace(']', '').split()]
# if '[' in lines[l] and 'verse' in title:
# section_lines = []
# count = l + 1
# done = False
# while count < len(lines) and not done:
# if '[' not in lines[count]:
# if lines[count] != '':
# section_lines.append(lines[count])
# count += 1
# else:
# done = True
# verse_lines.append(section_lines)
#
#
#
#
#
def get_verses(all_lyrics,artist):
#finds total verses, hooks, bridges, choruses written by a specific artist
one_song_verse_lines = []
one_song_chorus_lines = []
one_song_hook_lines = []
one_song_bridge_lines = []
total_verse_lines = []
total_chorus_lines = []
total_hook_lines = []
total_bridge_lines = []
total_lines = []
Songs = {}
with open('data/raw/artist_lyrics.json') as f:
data = json.load(f)
verse_lines = list()
for k in data['artists']:
for v in k['songs']:
song = v['lyrics']
lines = song.splitlines()
for l in range(len(lines)):
title = [x.lower() for x in lines[l].replace('[', '').replace(']', '').split()]
if '[' in lines[l] and 'verse' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
verse_lines.append(section_lines)
all_lyrics = data['artists']
for _artist in all_lyrics:
for artist,songs in _artist.items():
for _song in songs:
print(_song)
if isinstance(_song, dict):
song_title = _song['title']
song_lyrics = _song['lyrics']
art = _song['artist']
clean_title = song_title.replace('(','').replace('.','').split()
if art == artist and 'Ft' not in clean_title:
lines = song_lyrics.splitlines()
for l in range(len(lines)):
title = [x.lower() for x in lines[l].replace('[', '').replace(']', '').split()]
if '[' in lines[l] and 'verse' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_verse_lines.append(section_lines)
one_song_verse_lines.append(section_lines)
print(section_lines)
elif '[' in lines[l] and 'chorus' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_chorus_lines.append(section_lines)
one_song_chorus_lines.append(section_lines)
elif '[' in lines[l] and 'hook' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_hook_lines.append(section_lines)
one_song_hook_lines.append(section_lines)
elif '[' in lines[l] and 'bridge' in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_bridge_lines.append(section_lines)
one_song_bridge_lines.append(section_lines)
artist_first_name = artist.split()[0].lower()
total_lines = []
one_song_verse_lines = []
one_song_chorus_lines = []
one_song_hook_lines = []
one_song_bridge_lines = []
if 'Ft' in clean_title:
lines = song_lyrics.splitlines()
for l in range(len(lines)):
title = [x.lower() for x in lines[l].replace('[','').replace(']','').replace('-','').replace(':','').split()]
if '[' in lines[l] and 'verse' in title and artist_first_name in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count += 1
else:
done = True
total_verse_lines.append(section_lines)
one_song_verse_lines.append(section_lines)
elif '[' in lines[l] and 'chorus' in title and artist_first_name in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count+=1
else:
done = True
total_chorus_lines.append(section_lines)
one_song_chorus_lines.append(section_lines)
elif '[' in lines[l] and 'hook' in title and artist_first_name in title:
section_lines = []
count = l + 1
done = False
while count < len(lines) and not done:
if '[' not in lines[count]:
if lines[count] != '':
section_lines.append(lines[count])
count+=1
else:
done = True
total_hook_lines.append(section_lines)
one_song_hook_lines.append(section_lines)
if len(one_song_verse_lines) > 0:
total_lines.append(total_verse_lines)
if len(one_song_chorus_lines) > 0:
total_lines.append(total_chorus_lines)
if len(one_song_hook_lines) > 0:
total_lines.append(total_hook_lines)
if len(one_song_bridge_lines) > 0:
total_lines.append(total_bridge_lines)
if len(total_lines) > 0:
Songs[song_title] = list(itertools.chain.from_iterable(total_lines))
#FIXME: Songs has all duplicates
Lines = {'Verses':total_verse_lines,'Choruses':total_chorus_lines,'Hooks':total_hook_lines,'Bridges':total_bridge_lines}
return Lines, Songs
def clean_name(song_title):
return re.sub(r'[^\x00-\x7F]+', ' ', song_title)
def clean_song_titles(song_dict):
keys = [x.replace(u'\xa0', u' ').replace(u'\u200b',u'') for x in song_dict.keys()]
new_keys = []
featured_artists = []
for k in keys:
if '(' in k:
if 'ft' in k.lower():
new_k = k.split('(')[0][:-1]
featured_artists.append(k.split('(')[1][:-1])
new_keys.append(new_k)
else:
new_keys.append(k)
else:
new_keys.append(k)
return new_keys,featured_artists
def make_one_list(song_lyrics):
sentence_list = []
bad_characters = ['"',"'",'_','(',')','$',',','.','?','!','—']
bad_words = ['it', 'the', 'you', 'they', 'she', 'he', 'this', 'my', 'to', 'me', 'in', 'like', 'yeah', "you're",
"that's", "really", "couldn't",
'youre','get','want','come','uh','put','got','one','im',
'ran','em','right','gon','need','take','dont','every',
'turn','back','lets','better','look','see','til',
'aint','tryna','oh','still','yo',"don't","i'm",'gotta',
'know','go','yuh']
stopword = set(stopwords.words('english'))
total_stop_words = bad_words + list(stopword)
lines = song_lyrics.splitlines()
for line in lines:
if line == '\n':
continue
if '[' in line:
continue
new_word = []
separate = line.split()
words = []
for word in separate:
for character in word:
character = character.lower()
if character not in bad_characters:
new_word.append(character)
w = ''.join(new_word)
if w not in total_stop_words:
words.append(w)
new_word = []
if words != []:
sentence_list.append(words)
return sentence_list
def Get_Artist_Lyrics(artist):
song_lyrics_path = 'Song Lyrics/'
artist_dict = {}
for root,dirs,files in os.walk(song_lyrics_path):
for f in files:
name = root + f
artist_dict[f.replace('_',' ')] = name
all_lyrics = {}
for art,path in artist_dict.items():
with open(path,'rb') as f:
all_lyrics[art] = pickle.load(f)
Lines, Songs = get_verses(all_lyrics,artist)
return Lines, Songs, all_lyrics
def Get_Lyrics():
song_lyrics_path = 'data/raw/'
artist_dict = {}
for root,dirs,files in os.walk(song_lyrics_path):
for f in files:
name = root + f
artist_dict[f.replace('_',' ')] = name
json.loads(song_lyrics_path)
all_lyrics = []
for art,path in artist_dict.items():
with open(path,'rb') as f:
lyrics = pickle.load(f)
for title,song_lyrics in lyrics.items():
lyrics = make_one_list(song_lyrics)
all_lyrics+=lyrics
return all_lyrics
def Get_All_Lyrics(all_lyrics):
sentence_list = []
bad_characters = ['"',"'",'_','(',')','$',',','.','?','!','—']
for artist,songs in all_lyrics.items():
for title,song in songs.items():
new_word = []
separate = song.split()
words = []
for word in separate:
for character in word:
character = character.lower()
if character not in bad_characters:
new_word.append(character)
words.append(''.join(new_word))
new_word = []
sentence_list.append(words)
return sentence_list
def get_song_structures(artist):
Lines, Songs, All_lyrics = Get_Artist_Lyrics(artist)
song_structures = {}
for section, lines in Lines.items():
rhyming_sections = []
for lines_ in lines:
rhyming_words = [line.split()[-2:] for line in lines_ ]
rhyming_sections.append(rhyming_words)
song_structures[section] = rhyming_sections
return song_structures
song_structures = get_song_structures('Kendrick Lamar')
```
#### File: src/models/aesop_gpt2.py
```python
import math
import tensorflow
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_addons as tfa
import json
import os
import time
from ftfy import fix_text
#:os.chdir('../')
import pickle
import numpy as np
import string, os
from gensim.models import KeyedVectors
import gensim.downloader as api
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Dropout, LSTM, Activation, Bidirectional
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.callbacks import LambdaCallback, ModelCheckpoint
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow.keras.utils as ku
from sklearn.model_selection import train_test_split
import random
import sys
from datetime import date
from collections import Counter
import matplotlib.pyplot as plt
from src.features.build import Lyrics
from src.features.transform_data import Transform
from random import shuffle
from tensorflow.python.framework import tensor_shape
from tokenizers import CharBPETokenizer, BertWordPieceTokenizer
from transformers import GPT2Model, GPT2Config, GPT2Tokenizer
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
def clean_text(txt):
txt = "".join(v for v in txt if v not in string.punctuation).lower()
txt = txt.encode("utf8").decode("ascii",'ignore')
return txt
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
def load_data():
data_dir = 'data/processed/verses.txt'
with open(data_dir, "r") as fp: # Unpickling
lyrics = fp.read()
lyrics_clean = clean_text(lyrics)
def word_based():
_t = Lyrics(32,10000)
#arr = _t.verse_lines
corpus = _t.lyrics
tokenizer = Tokenizer()
def get_sequence_of_tokens(corpus):
## tokenization
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
# convert data to sequence of tokens
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
return input_sequences, total_words
inp_sequences, total_words = get_sequence_of_tokens(corpus)
num_words = total_words
print(inp_sequences[:10])
input_sequences = inp_sequences
max_sequence_len = max([len(x) for x in input_sequences])
#input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len+1, padding='pre'))
predictors, label = input_sequences[:,:-1],input_sequences[:,-1]
return tokenizer, num_words, tf.data.from_tensor_slices((predictors, label))
# In[ ]:
# def tf_encode(pt, en):
# result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
# result_pt.set_shape([None])
# result_en.set_shape([None])
#
# return result_pt, result_en
#
#
# def filter_max_length(x, y, max_length=MAX_LENGTH):
# return tf.logical_and(tf.size(x) <= max_length,
# tf.size(y) <= max_length)
#
# def fetch_dataset(train_dataset, val_dataset, batch_size, padded_shapes=([-1], [-1]), epoch=25, buffer_size=10000):
# train_dataset = train_dataset.map(tf_encode)
# train_dataset = train_dataset.filter(filter_max_length)
# # cache the dataset to memory to get a speedup while reading from it.
# train_dataset = train_dataset.cache()
# train_dataset = train_dataset.shuffle(buffer_size).padded_batch(batch_size)
# train_dataset = train_dataset.repeat(epoch)
# train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
#
#
# val_dataset = val_dataset.map(tf_encode)
# val_dataset = val_dataset.filter(filter_max_length).padded_batch(batch_size)
# return train_dataset, val_dataset
def verse_pairs_approach(target_vocab_size=2**12):
_t = Transform()
arr = [i for i in _t.verse_lines if len(i) > 0]
dataset = list()
for verse in arr:
if max([len(i.split()) for i in verse]) > 1 and max([len(i.split()) for i in verse]) < 25:
chunk_number = len(verse) // 4
# chunks = [verse[x:x+chunk_number] for x in range(0, len(verse), chunk_number)]
if chunk_number != 0:
chunks = ['<START> ' + ''.join([ j + ' <NEWLINE> ' for j in verse[x:x+chunk_number]]) + ' <END>' for x in range(0, len(verse), chunk_number)]
chunks = [chunk for chunk in chunks if len(chunk.split('<NEWLINE>')) > 2]
dataset.append((chunks[:2], chunks[2:]))
# for i in arr:
# tmp = [ ' <NEWLINE> '.join([clean_text(j[0]), clean_text(j[1])]) for j in zip(i[0::2],i[1::2])]
# dataset.append([z for z in zip(tmp[0::2], tmp[1::2])])
example = [x[0] for x in dataset]
target = [x[1] for x in dataset]
print(example[:2], target[:2])
X_train, X_test, y_train, y_test = train_test_split(example, target, test_size=0.10, shuffle=True)
len(X_train)
train_examples = tf.data.Dataset.from_tensor_slices((X_train, y_train))
val_examples = tf.data.Dataset.from_tensor_slices((X_test, y_test))
tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size, reserved_tokens=['<UNK>','<NEWLINE>','<START>','<END>'])#, reserved_tokens=['<UNK>'])
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size,reserved_tokens=['<UNK>','<NEWLINE>','<START>','<END>']) #reserved_tokens=['<UNK>'])
BUFFER_SIZE = 15000
BATCH_SIZE = 32
def encode(lang1, lang2):
lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode(lang1.numpy()) + [tokenizer_pt.vocab_size+1]
lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(lang2.numpy()) + [tokenizer_en.vocab_size+1]
return lang1, lang2
def tf_encode(pt, en):
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
MAX_LENGTH = 125
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
train_dataset = train_examples.map(tf_encode)
train_dataset = train_dataset.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
train_dataset = train_dataset.cache()
train_dataset = train_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_examples.map(tf_encode)
val_dataset = val_dataset.filter(filter_max_length).padded_batch(BATCH_SIZE)
return train_dataset, val_dataset, tokenizer_en, tokenizer_pt
def verse_by_verse(test_size=.10, shuffle=False, target_vocab_size=2**12):
_t = Transform()
arr = _t.verse_lines
dataset = list()
for verse in arr:
x = verse[0::2]
y = verse[1::2]
#[print(i) for i in zip(x, y)]
# dataset +=
#print(dataset[0])
if shuffle:
np.random.shuffle(dataset)
train = dataset[:round(len(dataset) * test_size)]
test = dataset[round(len(dataset) * test_size):]
train_examples = tf.data.Dataset.from_tensor_slices(train)
val_examples = tf.data.Dataset.from_tensor_slices(test)
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size)
tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size)
return train_examples, val_examples, tokenizer_en, tokenizer_pt
def fill_in_the_blank(test_size=.10, shuffle=False, target_vocab_size=2**12):
_t = Transform()
arr = _t.verse_lines
data_dir = 'data/processed/verses.txt'
with open(data_dir, "rb") as fp: # Unpickling
lyrics = pickle.load(fp)
arr = [[j for j in i.split(' \n ') if len(j) > 1 and '\n\n' != j] for i in list(np.array(lyrics)) if len(i.split(' \n ')) > 0]#tokenizer = BertWordPieceTokenizer()
#tokenizer.train(['data/processed/verses_encoded.txt'])
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
#special_tokens_dict = {'bos_token':'|START|', 'eos_token':'|END|', 'unk_token':'|UNK|', 'sep_token':'|SEP|', 'pad_token':'|PAD|', 'cls_token':'|CLS|', 'mask_token':'|MASK|'}
#num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
#print('We have added', num_added_toks, 'tokens')
#model.resize_token_embeddings(len(tokenizer))
#tokenizer.add_tokens(['<START>','<END>'])
dataset = list()
for verse in arr:
num_times = random.randint(1, 5)
try:
if max([len(i.split()) for i in verse]) > 1 and max([len(i.split()) for i in verse]) < 50:
chunk_number = len(verse) // 3
chunks = [verse[x:x+chunk_number] for x in range(0, len(verse), chunk_number)]
#chunks = ['<START> ' + ''.join([ j for j in verse[x:x+chunk_number]]) for x in range(0, len(verse), chunk_number)]
#chunks = [chunk for chunk in chunks if len(chunk.split('<NEWLINE>')) > 2]
chunk_list = [' '.join(chunk_verse).split() for chunk_verse in chunks]
for chunk in chunk_list:
for _ in range(0, num_times,1):
mask = np.random.random(len(chunk))
mask_bool = random.uniform(.3, .4)
mask_x = mask > mask_bool
mask_y = mask < mask_bool
x = '<START> ' + ' '.join(['[MASK]' if not x else chunk[i] for i, x in enumerate(mask_x)]) + ' <END>'
#x = ' '.join(np.array(verse)[mask_x].tolist())
#y = ' '.join(np.array(chunk).tolist())
#$y = ' '.join(['' if not x else chunk[i] for i, x in enumerate(mask_y)])
#y = '|<GAP>|'.join(['' if not x else chunk[i] for i, x in enumerate(mask_y)])
y = '<START> ' + ' '.join(['[MASK]' if x else chunk[i] for i, x in enumerate(mask_x)]) + ' <END>'
# = ' '.join([np.array(i)[mask_y] for i in chunk])
# x = ' '.join(np.array(chunk)[mask_x].tolist())
# y = ' '.join(np.array(chunk)[mask_y].tolist())
#x = ' '.join([' ' if not x else chunk.split(' ')[i] for i, x in enumerate(mask_x)])
#x = ' '.join([' ' if not x else chunk.split(' ')[i] for i, x in enumerate(mask_x)])
#y = chunk
dataset.append((x, y))
except ValueError:
pass
print(dataset[0])
example = np.array(pad_sequences([tokenizer.encode(x[0]) for x in dataset], padding='post'))
target = np.array(pad_sequences([tokenizer.encode(x[1]) for x in dataset], padding='post'))
# target = [tokenizer.encode(x[1]).ids for x in dataset]
print(len(dataset))
print(dataset[0])
X_train, X_test, y_train, y_test = train_test_split(example, target, test_size=0.10, shuffle=True)
train_examples = tf.data.Dataset.from_tensor_slices((X_train, y_train))
val_examples = tf.data.Dataset.from_tensor_slices((X_test, y_test))
#tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
# (pt.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size)#, reserved_tokens=['<UNK>'])
#tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
# (en.numpy() for pt, en in train_examples), target_vocab_size=target_vocab_size)#,reserved_tokens=['<UNK>'])
BUFFER_SIZE = 15000
BATCH_SIZE = 64
def encode(lang1, lang2):
lang1 = [tokenizer.get_vocab_size()] + tokenizer.encode(lang1.numpy()).ids + [tokenizer.get_vocab_size()+1]
lang2 = [tokenizer.get_vocab_size()] + tokenizer.encode(lang2.numpy()).ids + [tokenizer.get_vocab_size()+1]
return lang1, lang2
#
def tf_encode(pt, en):
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
#
return result_pt, result_en
#
MAX_LENGTH = 125
#
#
#
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
#train_dataset = train_examples.map(tf_encode)
#train_dataset = train_dataset.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
train_dataset = train_examples.cache()
# train_dataset = train_dataset.repeat(25)
train_dataset = train_dataset.shuffle(BUFFER_SIZE).padded(BATCH_SIZE)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
#val_dataset = val_examples.map(tf_encode)
val_dataset = val_examples.padded_batch(BATCH_SIZE)
return train_dataset, val_dataset, tokenizer#, tokenizer_pt
def window_based(test_size=.10, shuffle=False, target_vocab_size=2**12):
test_size = 1 - test_size
dataset = list()
_t = Lyrics(32, 1000)
data_dir = 'data/processed/verses_encoded.txt'
with open(data_dir, "r") as fp: # Unpickling
lyrics = fp.read()
tokenizer = BertWordPieceTokenizer()
tokenizer.train(['data/processed/verses_encoded.txt'])
tokenizer.add_tokens(['<START>','<END>','<NEWLINE>'])
arr = [[clean_text(j).replace('newline','<NEWLINE>').replace('start','<START>').replace('end','<END>') for j in i.split(' \n ') if len(j) > 1 and '\n\n' != j] for i in list(np.array(lyrics.split('\n\n'))) if len(i.split(' \n ')) > 0]
# print(arr)
# for verse in arr:
# chunk_number = len(verse) // 5
# if chunk_number > 0:
# chunks = ['<START> ' + ''.join([ j.replace('\n','').replace('\n\n','') + ' <NEWLINE> ' for j in verse[x:x+chunk_number]]) + ' <END>' for x in range(0, len(verse), chunk_number)]
# chunks = [chunk for chunk in chunks if len(chunk.split('<NEWLINE>')) > 2]
# print()
# dataset.append(chunks)
# train = dataset
train = [y for x in arr for y in x]
train = [tokenizer.encode(i).ids for i in train]
train = [y for x in train for y in x]
# train.split('<NEWLINE>')
# print(train)
# train = ' <EOV> '.join(dataset)
# print(train)
# tokenizer.add_tokens(['<START>','<END>','<NEWLINE>','<EOV>'])
# target = _t.target
# target = [x[1] for x in dataset]
# print(len(dataset))
# X_train, X_test, y_train, y_test = train_test_split(example, target, test_size=0.10, shuffle=True)
# train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
# print(len(dataset))
# np.random.shuffle(dataset)
# train_test = dataset[:round(len(dataset) * test_size)]
# train = train_test[:round(len(train_test) * test_size)]
# test = train_test[round(len(train_test) * test_size):]
# val = dataset[round(len(dataset) * test_size):]
# train_dataset = tf.data.Dataset.from_tensor_slices(train)
# tokenizer = BertWordPieceTokenizer("data/processed/vocab.txt", lowercase=True)
# tokenizer = tfds.features.text.SubwordTextEncoder.build_from_corpus((en.numpy() for en in train_dataset), target_vocab_size=target_vocab_size, reserved_tokens=['<UNK>','<NEWLINE>','<START>','<END>'])
train_dataset = tf.data.Dataset.from_tensor_slices(train)
seq_length = 40
# examples_per_epoch = len(train.split())//(seq_length+1)
# data = [i for i in flattened_list if len(i) < 100]
sequences = train_dataset.batch(seq_length+1, drop_remainder=True)
dataset = sequences.map(split_input_target)
# Batch size
BATCH_SIZE = 128
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 20000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.repeat(50)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
print(dataset)
return dataset, tokenizer
def simple_method(sequence_size, testSetRatio=0.15):
testSetRatio = 1-testSetRatio
data_dir = 'data/processed/verses_test.txt'
with open(data_dir, "rb") as fp: # Unpickling
lyrics = pickle.load(fp)
arr = [' <NEWLINE> '.join([clean_text(j) for j in i.split(' \n ') if len(j) > 1 and '\n\n' != j]) for i in list(np.array(lyrics)) if len(i.split(' \n ')) > 0]
#tokenizer = BertWordPieceTokenizer()
#tokenizer.train(['data/processed/verses_encoded.txt'])
tokenizer = GPT2Tokenizer.from_pretrained('gpt2-xl')
#tokenizer.train(['data/processed/verses_encoded.txt'])
special_tokens_dict = {'eos_token':'<END>','sep_token':'<NEWLINE>','bos_token':'<START>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print(tokenizer.encode(' <NEWLINE> '))
tokenizer.save_pretrained('src/data/tokenizers')
dataset = list()
for verse in arr:
tmp = list()
verse = ' <START> ' + verse + ' <END> '
verse_split = verse.split(' <NEWLINE> ')
for line in verse_split:
tmp = tmp + tokenizer.encode(line + ' <NEWLINE>', add_prefix_space=True)
if tmp:
dataset.append(tmp)
print(dataset[0])
# dataset = [[item for sublist in verse.split(' \n ') for tokenizer.encode(item, add_prefix_space=True) in sublist] for verse in arr]
np.random.shuffle(dataset)
verse_length = [len(verse) for verse in dataset]
verse_average = sum(verse_length) / len(verse_length)
print(f'Average number of words in a verse {verse_average}')
# dataset = dataset[
train = dataset[:round(len(dataset) * testSetRatio)]
test = dataset[round(len(dataset) * testSetRatio):]
print(f'train size {len(train)}')
print(f'test size {len(test)}')
trainTensor = simple_pipeline(train, sequence_size)
testTensor = simple_pipeline(test, sequence_size)
return trainTensor, testTensor, tokenizer
def simple_pipeline(dataset, sequence_size):
dataset = [y for x in dataset for y in x]
assert isinstance(dataset[0], int)
print(f'number of tokens {len(dataset)}: \n{dataset[:5]}')
train = tf.data.Dataset.from_tensor_slices(dataset)
train = train.window(sequence_size, drop_remainder=True)
for window in train.take(5):
print(list(window.as_numpy_iterator()))
train = train.flat_map(lambda window: window.batch(sequence_size))
train = train.shuffle(10000).batch(64)
train = train.map(lambda windows: (windows[:,:-1], windows[:,1:]))
# train = train.cache()
train = train.prefetch(tf.data.experimental.AUTOTUNE)
return train
def gelu(x):
with tf.name_scope("gelu"):
cdf = 0.35 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def shape_as_list_2(x):
return [int(i) for i in tf.shape(x)]
def get_padding_mask(seq):
with tf.name_scope("Padding_Mask"):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def attention_mask(size):
with tf.name_scope("attention_mask"):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks(inp):
with tf.name_scope("attn_masking"):
# Encoder padding mask
att_mask = attention_mask(tf.shape(inp)[1])
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
padding_mask = get_padding_mask(inp)
mask = tf.maximum(padding_mask, att_mask)
return mask
def scaled_dot_product_attention(q, k, v, training, mask=None):
"""
Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) #(..., seq_len, seq_len_k)
# scale matmul_qk
if self.scale:
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) for scores to add up to 1
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v)
return output, attention_weights
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(
q, k, v, None
)
print('Attention weights are:')
print(temp_attn)
print('Output is:')
print(temp_out)
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, att_dropout=0.4,
residual_dropout=0.45, scale=True):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
self.att_dropout = att_dropout
self.residual_dropout=residual_dropout
self.scale=scale
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.c_attn = Conv1d(self.d_model, self.d_model * 3)
self.c_proj = Conv1d(self.d_model, self.d_model)
def multihead_attention(self, q, k, v, training, mask=None):
"""
Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) #(..., seq_len, seq_len_k)
# scale matmul_qk
if self.scale:
dk = tf.cast(tf.shape(k)[-1], tf.float32)
matmul_qk = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor
if mask is not None:
matmul_qk += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) for scores to add up to 1
attention_weights = tf.nn.softmax(matmul_qk, axis=-1)
if training:
attention_weights = tf.nn.dropout(attention_weights, rate=self.att_dropout, name="attn_dropout") # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v)
return output, attention_weights
def split_heads(self, x):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
batch_size = tf.shape(x)[0]
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def merge_heads(self, x):
batch_size = tf.shape(x)[0]
scaled_attention = tf.transpose(x, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
merged = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model))
return merged
def call(self, x, mask=None, past_layer=None, training=True):
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query) # (batch_size, seq_len, d_model)
key = self.split_heads(key) # (batch_size, seq_len, d_model)
value = self.split_heads(value) # (batch_size, seq_len, d_model)
if past_layer is not None:
past_key, past_value = tf.unstack(past_layer, axis=1)
key = tf.concat([past_key, key], axis=-2)
value = tf.concat([past_value, value], axis=2)
present = tf.stack([key, value], axis=1)
scaled_attention, attention_weights = self.multihead_attention(query, key, value, training, mask)
concat_attention = self.merge_heads(scaled_attention)
output = self.c_proj(concat_attention)
if training:
output = tf.nn.dropout(output, rate=self.residual_dropout, name="resit_dropout")
return output, present
class EmbeddingLayer(tf.keras.layers.Layer):
def __init__(self, vocab_size, embedding_size, initializer=None, stddev=0.01, mean=0.0):
super(EmbeddingLayer, self).__init__()
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.stddev = stddev
self.mean = mean
self.initializer = initializer
if self.initializer is None:
self.initializer = tf.random_normal_initializer(mean=self.mean, stddev=self.stddev)
def build(self, input_shape):
with tf.name_scope("embedding_weights"):
self.embedding_weights = self.add_weight("weights", shape=[self.vocab_size, self.embedding_size],
dtype="float32",
initializer=self.initializer
)
super(EmbeddingLayer, self).build(input_shape)
def call(self, inputs, mode="embedding", scale=False):
if mode == "embedding":
return self.embedding(inputs, scale=scale)
elif mode == "projection":
return self.projection(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def embedding(self, inputs, scale=False):
with tf.name_scope("embedding"):
# Create binary mask of size [batch_size, length]
mask = tf.cast(tf.not_equal(inputs, 0), tf.float32)
inputs = tf.cast(inputs, tf.int32)
embeddings = tf.nn.embedding_lookup(self.embedding_weights, inputs)
embeddings *= tf.expand_dims(mask, -1)
# Scale embedding by the sqrt of the hidden size
if scale:
embeddings *= self.embedding_size ** 0.5
return embeddings
def projection(self, inputs):
with tf.name_scope("output_layer"):
batch_size = tf.shape(inputs)[0]
seq_len = tf.shape(inputs)[1]
h_flat = tf.reshape(inputs, [-1, self.embedding_size])
logits = tf.matmul(h_flat, self.embedding_weights, transpose_b=True)
return tf.reshape(logits, [batch_size, seq_len, self.vocab_size])
class PositionEmbeddingLayer(tf.keras.layers.Layer):
def __init__(self, position_seq, pos_embedding_size, trainable=True, stddev=0.02, mean=0.0):
super(PositionEmbeddingLayer, self).__init__()
self.position_seq = position_seq
self.hidden_size = pos_embedding_size
self.trainable = trainable
self.stddev = stddev
self.mean = mean
if trainable:
self.position_embedding = EmbeddingLayer(self.position_seq, self.hidden_size,
stddev=self.stddev, mean=self.mean)
def call(self, inputs, start=1):
with tf.name_scope("pos_embedding"):
if self.trainable:
batch_size = tf.shape(inputs)[0]
batch_seq = tf.shape(inputs)[1]
positions = tf.reshape(tf.tile(tf.range(start, batch_seq + start), [batch_size]),
[batch_size, batch_seq])
positions = tf.cast(positions, tf.int32)
position_mask = tf.cast(tf.not_equal(inputs, 0), tf.int32)
positions *= position_mask
return self.position_embedding(positions)
else:
return self.get_position_sinusoid(self.position_seq)
@staticmethod
def get_position_sinusoid(seq_len, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
position = tf.cast(tf.range(seq_len), tf.float32)
num_timescales = hidden_size // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.cast(num_timescales, tf.float32) - 1))
inv_timescales = min_timescale * tf.exp(
tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return signal
class Conv1d(tf.keras.layers.Layer):
def __init__(self,
hidden_size,
filter_size,
weights_init_stdev=0.02,
weights_mean=0.0,
bias_init=0.0):
super(Conv1d, self).__init__()
self.weights_init_stdev = weights_init_stdev
self.weights_mean = weights_mean
self.bias_init = bias_init
self.hidden_size = hidden_size
self.filter_size = filter_size
def build(self, input_shape):
self.weight = self.add_weight(
"cov1d_weights",
shape=[self.hidden_size, self.filter_size],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
stddev=self.weights_init_stdev,
mean=self.weights_mean))
self.bias = self.add_weight("conv1d_biases",
shape=[self.filter_size],
initializer=tf.constant_initializer(self.bias_init))
super(Conv1d, self).build(input_shape)
def call(self, inputs):
output_shape = [tf.shape(inputs)[0], tf.shape(inputs)[1]] + [self.filter_size]
inputs = tf.reshape(inputs, [-1, self.hidden_size]) # shape [batch, seq , features] => [batch*seq, features]
outputs = tf.matmul(inputs, self.weight) + self.bias
outputs = tf.reshape(outputs, output_shape) # Reshape => [batch, seq, filter_size]
return outputs
class FeedForward(tf.keras.layers.Layer):
def __init__(self, hidden_size, filter_size, dropout_rate=0.45, activation=tf.nn.relu):
super(FeedForward, self).__init__()
self.hidden_size = hidden_size
self.filter_size = filter_size
self.activation = activation
self.dropout_rate = dropout_rate
self.dense_layer = Conv1d(self.hidden_size, self.filter_size)
self.output_dense_layer = Conv1d(self.filter_size, self.hidden_size)
def call(self, x, training=False):
output = self.dense_layer(x)
output = self.activation(output)
output = self.output_dense_layer(output)
if training:
output = tf.nn.dropout(output, rate=self.dropout_rate, name="feed_forward_dropout")
return output
class LayerNormalization(tf.keras.layers.Layer):
def __init__(self, hidden_size):
super(LayerNormalization, self).__init__()
self.hidden_size = hidden_size
def build(self, input_shape):
self.gamma = self.add_weight(
"layer_norm_scale",
shape=[self.hidden_size],
dtype="float32",
initializer=tf.ones_initializer(),
experimental_autocast=False)
self.beta = self.add_weight(
"layer_norm_bias",
shape=[self.hidden_size],
dtype="float32",
initializer=tf.zeros_initializer(),
experimental_autocast=False)
super(LayerNormalization, self).build(input_shape)
def call(self, x, epsilon=1e-6, input_dtype=tf.float32):
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
normalized = (x - mean) * tf.math.rsqrt(variance + epsilon)
return tf.cast(normalized * self.gamma + self.beta, input_dtype)
def argmax(logits):
return tf.argmax(logits)
def top_k_logits(logits, k):
if k == 0:
return logits
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits
)
# Nucleas Sampling (https://arxiv.org/pdf/1904.09751.pdf)
def top_p_logits(logits, p):
"""Took from OpenAI GPT-2 Implememtation"""
batch = tf.shape(logits)[0]
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
indices = tf.stack([
tf.range(0, batch),
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="Inputs"),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="Targets"),
tf.TensorSpec(shape=(None), dtype=tf.int32, name="Step")
]
test_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="Inputs"),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name="Targets"),
tf.TensorSpec(shape=(None), dtype=tf.int32, name="Step")
]
class Gpt2(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, max_seq_len, vocab_size, tokenizer,
optimizer="adam", learning_rate=0.005, rev_embedding_projection=True):
super(Gpt2, self).__init__()
self.rev_embedding_projection = rev_embedding_projection
self.num_layers = num_layers
self.num_heads = num_heads
self.dff = dff
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.d_model = d_model
self.tokenizer = tokenizer
self.learning_rate = learning_rate
self.optimizer_t = optimizer
self.dataset = None
self.mirrored_strategy = None
self.embedding = EmbeddingLayer(
self.vocab_size, self.d_model)
self.pos_embedding = PositionEmbeddingLayer(
self.max_seq_len, self.d_model)
self.decoder_layers = [DecoderLayer(self.d_model, self.num_heads, self.dff)
for _ in range(self.num_layers)]
self.layer_norm = LayerNormalization(self.d_model)
if not self.rev_embedding_projection:
self.output_layer = OutputLayer(self.vocab_size)
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
self.accuracy_object = tf.keras.metrics.SparseCategoricalAccuracy(
name='accuracy')
self.train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32)]
self.test_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32)]
def call(self, x, training=True, past=None):
x = tf.cast(x, tf.int32)
batch, sequence = tf.shape(x)[0], tf.shape(x)[1]
if past is None:
pasts = [None] * self.num_layers
else:
pasts = past
assert len(pasts) == self.num_layers
att_mask = create_masks(x)
past_length = 1 if past is None else tf.shape(past)[-2]
with tf.name_scope("embeddings"):
embedded_x = self.embedding(x)
hidden_states = embedded_x + self.pos_embedding(x, start=past_length)
presents = []
for decoder_layer, past in zip(self.decoder_layers, pasts):
hidden_states, present = decoder_layer(hidden_states, training, att_mask, past=past)
presents.append(present)
hidden_states = self.layer_norm(hidden_states)
if self.rev_embedding_projection:
logits = self.embedding(hidden_states, mode="projection")
else:
logits = self.output_layer(hidden_states)
return logits, presents
@staticmethod
def get_padded_accuracy(labels, logits):
with tf.name_scope("padded_accuracy"):
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
padded_labels = tf.cast(labels, tf.int32)
nonpad_seq = tf.math.count_nonzero(weights, dtype=tf.dtypes.float32, )
acc = tf.cast(tf.equal(outputs, padded_labels), tf.float32)
accuracy = tf.reduce_sum(tf.cast(acc * weights, tf.float32)) / nonpad_seq
return tf.cast(accuracy, tf.float32)
def create_optimizer(self):
optimizer = self.optimizer_t.lower()
with tf.name_scope("optimizer"):
if optimizer == "adam":
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
elif optimizer == "adadelta":
self.optimizer = tf.keras.optimizers.Adadelta(self.learning_rate)
elif optimizer == "rms":
self.optimizer = tf.keras.optimizers.RMSprop(self.learning_rate)
else:
self.optimizer = tf.keras.optimizers.SGD(self.learning_rate)
return self.optimizer
def get_loss(self, real, pred):
with tf.name_scope("loss_layer"):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = self.loss_object(real, pred)
with tf.name_scope("loss_masking"):
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
loss_ = tf.reduce_sum(loss_, axis=1)
sequence_avg_loss = loss_ / tf.reduce_sum(mask, axis=1)
return sequence_avg_loss
def create_checkpoint_manager(self, checkpoint_path, max_to_keep=5, load_model=True):
with tf.name_scope('checkpoint_manager'):
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self)
self.ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=max_to_keep)
if load_model: # If want to load trained weights
ckpt.restore(self.ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored...............')
else:
print("Initializing model from scratch..........")
def load_model(self, filepath):
ckpt = tf.train.Checkpoint(model=self)
ckpt_manager = tf.train.CheckpointManager(ckpt, filepath, max_to_keep=5)
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Model Restored..........................")
def create_summary_writer(self, summary_path):
train_summary_path = summary_path + "/train"
test_summary_path = summary_path + "/test"
with tf.name_scope('summary'):
self.train_writer = tf.summary.create_file_writer(train_summary_path)
self.test_writer = tf.summary.create_file_writer(test_summary_path)
return self.train_writer, self.test_writer
@tf.function(input_signature=train_step_signature)
def train_step(self, inputs, targets, step, grad_clip=True, clip_value=2.5):
with tf.GradientTape() as tape:
predictions, _ = self(inputs, training=True)
loss = tf.reduce_mean(self.get_loss(targets, predictions))
with tf.name_scope("gradients"):
gradients = tape.gradient(loss, self.trainable_variables)
if grad_clip:
gradients = [(tf.clip_by_value(grad, -clip_value, clip_value))
for grad in gradients]
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
accuracy = self.get_padded_accuracy(targets, predictions)
with tf.name_scope("summary_writer"):
with self.train_writer.as_default():
tf.summary.scalar("loss", loss, step=tf.cast(step, tf.int64))
tf.summary.scalar("accuracy", accuracy, step=tf.cast(step, tf.int64))
return loss, accuracy
@tf.function(input_signature=test_step_signature)
def test_step(self, inputs, targets, step, grad_clip=True, clip_value=2.5):
with tf.GradientTape() as tape:
predictions, _ = self(inputs, training=False)
test_loss = tf.reduce_mean(self.get_loss(targets, predictions))
test_accuracy = self.get_padded_accuracy(targets, predictions)
with tf.name_scope("summary_writer"):
with self.test_writer.as_default():
tf.summary.scalar("test_loss", test_loss, step=tf.cast(step, tf.int64))
tf.summary.scalar("test_accuracy", test_accuracy, step=tf.cast(step, tf.int64))
return test_loss, test_accuracy
def fit(self, train_dataset, test_dataset, EPOCHS=50):
for epoch in range(EPOCHS):
tf.summary.trace_on(graph=True, profiler=True)
print('EPOCH :{}'.format(epoch))
if not epoch == 0:
step = epoch * step
test_step = epoch * test_step
tf.summary.trace_on(graph=True, profiler=True)
for (step, (inputs, targets)) in enumerate(train_dataset):
train_loss, train_acc = self.train_step(inputs, targets, step)
if step % 100 == 0:
print('Step {} Train_Loss {:.4f} Train_Accuracy {:.4f}'.format(
step, train_loss, train_acc))
if step == 25:
with self.train_writer.as_default():
tf.summary.trace_export(
name="gpt-2",
step=step,
profiler_outdir='logs/train')
if step % 5000 == 0:
ckpt_save_path = self.ckpt_manager.save()
print('Saving checkpoint for step {} at {}'.format(step,
ckpt_save_path))
# tf.summary.trace_on(graph=True, profiler=True)
for (test_step, (inputs, targets)) in enumerate(test_dataset):
test_loss, test_acc = self.test_step(inputs, targets, test_step)
if not epoch == 0:
test_step = epoch * test_step
if test_step % 100 == 0:
print('Step {} Test_Loss {:.4f} Test_Accuracy {:.4f}'.format(
test_step, test_loss, test_acc))
if test_step == 25:
with self.test_writer.as_default():
tf.summary.trace_export(
name="gpt2_test",
step=test_step,
profiler_outdir='logs/test')
def beam_search(self, predictions, top_k=25):
#start with an empty sequence with zero score
output_sequences = [([], 0)]
#looping through all the predictions
for token_probs in predictions:
new_sequences = []
#append new tokens to old sequences and re-score
for old_seq, old_score in output_sequences:
for char_index in range(len(token_probs)):
new_seq = old_seq + [char_index]
#considering log-likelihood for scoring
new_score = old_score + math.log(token_probs[char_index])
new_sequences.append((new_seq, new_score))
#sort all new sequences in the de-creasing order of their score
output_sequences = sorted(new_sequences, key = lambda val: val[1], reverse = True)
#select top-k based on score
# *Note- best sequence is with the highest score
output_sequences = output_sequences[:top_k]
return output_sequences
def sample_sequence(self,seq_len, context=None,temperature=.96,
top_k=25,
top_p=.95,
nucleus_sampling=True):
# vocab_size=2**15
# model_gen = Gpt2(num_layers=self.num_layers, d_model=self.d_model, num_heads=self.num_heads, dff=self.dff, max_seq_len=self.max_seq_len, vocab_size=self.tokenizer.get_vocab_size(), tokenizer=self.tokenizer, optimizer="adam")
# model_gen.create_optimizer()
# model_gen.create_checkpoint_manager('checkpoint')
bos=self.tokenizer.bos_token_id#.encode('<START>')#.ids[0]
eos=self.tokenizer.eos_token_id#.ids[0]
if context == None:
print("Give some context to model.................")
return
context_str = context
context = tf.expand_dims(([bos] + self.tokenizer.encode(context)), 0)
# context = tf.expand_dims(([bos] + [self.tokenizer.encode(context)]), 0)
prev = context
print(prev)
output = context
past = None
for i in range(seq_len):
#context = tf.expand_dims((self.tokenicontext).ids), 0)
#prev = context
#output = context
past = None
logits, past = self(prev, training=False, past=past)
# print(logits)
#logits = (tf.nn.softmax(logits[-1, -5:, :].numpy(),axis=-1) / tf.cast(1.25, tf.float32)).numpy()
logits = logits[:,-1,:] / tf.cast(temperature, tf.float32)
#predictions = beam_search_decoder(logits, 5)
#np.random.shuffle(predictions)
#print([self.tokenizer.decode(i) for i in predictions])
#predictions = predictions[0][0]
# print(logits)
logits = top_k_logits(logits, k=top_k)
# print(logits)
if nucleus_sampling:
logits = top_p_logits(logits, p=top_p)
samples = tf.random.categorical(logits, num_samples=1, dtype=tf.int32)
if tf.equal(samples, eos):
print("Predicted end of sequence.")
break
# print("shape.........")
# print(tf.shape(output))
# print(tf.shape(samples))
#context_str = context_str + ' ' + self.tokenizer.decode(predictions)
#context = tf.expand_dims(([bos] + self.tokenizer.encode(context_str), 0))
prev = samples
output = tf.concat([output, samples], axis=-1)
# print(tf.shape(output))
# print(output)
# print("--------------------------")
result = tf.squeeze(output, axis=0)
pred = [int(i) for i in result]
generated_seq = self.tokenizer.decode([i for i in pred[1:]])
#generated_seq = generated_seq.replace("|SEP|", "\n")
generated_seq = ' '.join(generated_seq.split())
generated_seq = generated_seq.replace("<NEWLINE>", "\n").replace("<|>","\n").replace("<|NEWLINE|NEWLINE|>","\n").replace("<|NEWLINE|NEWLINE|NEWLINE|>","\n")
return generated_seq
from math import log
from numpy import array
from numpy import argmax
# beam search
def beam_search_decoder(data, k):
sequences = [[list(), 0.0]]
# walk over each step in sequence
for row in data:
all_candidates = list()
# expand each current candidate
for i in range(len(sequences)):
seq, score = sequences[i]
for j in range(len(row)):
candidate = [seq + [j], score - log(row[j])]
all_candidates.append(candidate)
# order all candidates by score
ordered = sorted(all_candidates, key=lambda tup:tup[1])
# select k best
sequences = ordered[:k]
return sequences
class OutputLayer(tf.keras.layers.Layer):
def __init__(self, output_dim, proj_weights=None, kernel_initializer=None):
super(OutputLayer, self).__init__()
self.proj_weights = proj_weights
self.output_dim = output_dim
self.layer_weights = None
self.kernel_initializer = kernel_initializer
def build(self, input_shape):
if self.proj_weights is None:
input_dim = tensor_shape.dimension_value(input_shape[-1])
self.layer_weights = self.add_weight(
'output_layer_weights',
shape=[input_dim, self.output_dim],
initializer=self.kernel_initializer,
trainable=True)
super(OutputLayer, self).build(input_shape)
def call(self, x):
batch, sequence, d_model = tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[-1]
with tf.name_scope("residual_conn"):
x = x + out
out = self.feed_forward(self.layer_norm2(x), training=training) # (batch_size, input_seq_len, d_model)
with tf.name_scope("residual_conn"):
x = x + out
return x, present
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff,
dr_rate=0.45):
super(DecoderLayer, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
self.dff = dff
self.dr_rate = dr_rate
self.mha = MultiHeadAttention(self.d_model, self.num_heads)
self.feed_forward = FeedForward(self.d_model, self.dff, self.dr_rate)
self.layer_norm1 = LayerNormalization(self.d_model)
self.layer_norm2 = LayerNormalization(self.d_model)
def call(self, x, training, mask, past=None):
out, present = self.mha(self.layer_norm1(x), mask=mask, past_layer=past,
training=training) # (batch_size, input_seq_len, d_model)
with tf.name_scope("residual_conn"):
x = x + out
out = self.feed_forward(self.layer_norm2(x), training=training) # (batch_size, input_seq_len, d_model)
with tf.name_scope("residual_conn"):
x = x + out
return x, present
def run():
sequence_size = 12
trainTensor, testTensor, tokenizer = simple_method(sequence_size)
model = Gpt2(6, 512, 8, 512, sequence_size, vocab_size=tokenizer.vocab_size+3, tokenizer=tokenizer, optimizer='adam')
opt = model.create_optimizer()
model.create_checkpoint_manager('checkpoint')
model.create_summary_writer('logs')
model.compile(loss=model.loss_object, optimizer=opt)
model.fit(trainTensor, testTensor)
# model.save('aesop')
``` |
{
"source": "jmstriegel/ESP8266_Weather_Station",
"score": 3
} |
#### File: appengine_service/api/station.py
```python
import webapp2
import time
from datetime import datetime
import re
import json
from google.appengine.api import memcache
CACHETIME = 120
class UpdateHandler(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/json'
data = {
't': int(time.time()),
'10_0': validPMData(self.request.get('pm10_0')),
'2_5': validPMData(self.request.get('pm2_5')),
'1_0': validPMData(self.request.get('pm1_0')),
'rssi': validRSSI(self.request.get('rssi')),
'ip': self.request.remote_addr,
}
device_id = validID(self.request.get('id'))
key = 'device:%s' % device_id
rawdata = memcache.get(key)
devicedata = {
'id': device_id,
'latest': {},
'history': []
}
if (rawdata is not None):
devicedata = json.loads(rawdata)
timebin = getTimeBinMinute(data['t'])
devicedata['history'] = flushHistory(devicedata['history'], timebin)
devicedata['history'].append(data)
devicedata['latest'] = data
memcache.set(key, json.dumps(devicedata), CACHETIME)
key = 'activedevices'
rawdata = memcache.get(key)
devices = {}
if (rawdata is not None):
devices = json.loads(rawdata)
devices[device_id] = data['t']
devices = removeExpiredDevices(devices)
memcache.set(key, json.dumps(devices), CACHETIME)
result = {
't': data['t']
}
self.response.out.write(json.dumps(result))
class ListHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/json'
devices = {}
key = 'activedevices'
rawdata = memcache.get(key)
if (rawdata is not None):
devices = json.loads(rawdata)
devices = removeExpiredDevices(devices)
device_arr = []
for device_id, t in devices.iteritems():
device_arr.append(device_id)
response = {
'devices': device_arr
}
self.response.out.write(json.dumps(response))
class StatusHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/json'
device_id = validID(self.request.get('id'))
key = 'device:%s' % device_id
rawdata = memcache.get(key)
devicedata = {
'id': device_id,
'latest': {},
'history': []
}
now = int(time.time())
timediff = -1
if (rawdata is not None):
devicedata = json.loads(rawdata)
timediff = now - devicedata['latest']['t']
result = {
'id': device_id,
'latest': devicedata['latest'],
't': now,
'recency': timediff
}
self.response.out.write(json.dumps(result))
def getTimeBinMinute(timestamp):
d = datetime.fromtimestamp(timestamp)
return d.strftime('%Y%m%d%H%M')
def flushHistory(history, currenttimebin):
bins = {}
pruned = []
for data in history:
timebin = getTimeBinMinute(data['t'])
if timebin == currenttimebin:
pruned.append(data)
else:
if timebin not in bins:
bins[timebin] = []
bins[timebin].append(data)
# TODO: arrays in bins should be averaged and stored
return pruned
def removeExpiredDevices(devices):
newdata = {}
cutoff = int(time.time()) - CACHETIME
for device_id, t in devices.iteritems():
if(t > cutoff):
newdata[device_id] = t
return newdata
def validPMData(data):
if (data is None or data == ""):
return -1
else:
return int(data)
def validRSSI(data):
if (data is None or data == ""):
return 0
else:
return int(data)
def validID(data):
if (data is None or data == ""):
return "default"
else:
return str(data)
app = webapp2.WSGIApplication(
[
('/api/station/update', UpdateHandler),
('/api/station/list', ListHandler),
('/api/station/status', StatusHandler)
],
debug=True
)
```
#### File: ESP8266_Weather_Station/appengine_service/index.py
```python
import json
import os
import pickle
import webapp2
from google.appengine.ext.webapp import template
class IndexHandler( webapp2.RequestHandler ):
def get( self ):
self.response.headers['Content-Type'] = 'text/html'
tmpl = os.path.join(os.path.dirname(__file__), 'views/index.html')
tmplvars = {
'device_id': ''
}
device_id = self.request.get('id')
if device_id is not None:
tmplvars['device_id'] = device_id
self.response.out.write(template.render(tmpl, tmplvars))
app = webapp2.WSGIApplication(
[
('/', IndexHandler)
],
debug=True
)
``` |
{
"source": "jmsull/gzpt",
"score": 2
} |
#### File: gzpt/gzpt/utils.py
```python
from nbodykit.cosmology import Planck15
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as ius
#true utils
def W_TH(k,R):
'''Top hat window
Input: wavenumber k - arraylike
position r - arraylike
option (k,r) - for Fourier or configuration space top hat
R - the filtering scale
'''
x=k*R
return (3/x**3)*(np.sin(x) - x*np.cos(x))
def W_TH_real(r,R):
'''Top hat window
Input:
position r - arraylike
R - the filtering scale
'''
V = 4*np.pi/3 *R**3
if(len(r)>0):
indicator = np.ones(r.shape)
indicator[r>R] =0
return indicator/V
else:
if(r>R):
return 0.
else:
return 1/V
def Nk(k,L=3200.):
"""Number of k modes for a given box size. Default is cc box size."""
kf = 2.*np.pi/L
vf = (kf)**3
dk = kf #fftpower uses fundamental mode for dk by default
Nk = (4*np.pi*k**2 * dk)/vf
return Nk
def match(rlow,xlow,rhigh,xhigh,mp=10,extrap=1,npts=100,bare=False):
"""For plotting 2pcfs - interpolate small-scale pair counts and large scale FFT grid 2PCF"""
"""Output is r^2 xi """
rlow,xlow = rlow[rlow>0],xlow[rlow>0] #check if zero because sometimes that happens for nbodykit 2pcf
m = mp #match_point
rconc,xconc = np.concatenate([rlow[rlow<m],rhigh[rhigh>=m]]),np.concatenate([xlow[rlow<m],xhigh[rhigh>=m]])
r = np.logspace(np.log10(rlow.min()),np.log10(rhigh.max()),npts)
s = ius(rconc,rconc**2 * xconc,ext=extrap)(r)
if(bare): #return multiplied by r^2 by default
s = s/r**2
return r,s
def Delta(z,mdef,cosmo=Planck15):
if(mdef=='vir'):
'''Bryan + Norman 1998 fit'''
xv = 1- cosmo.Omega_m(z)
return ((18*np.pi**2 - 82*xv -39*xv**2)/(1-xv) * cosmo.rho_crit(z)*1e10)
elif(mdef=='200m'):
return 200*cosmo.rho_m(z)*1e10
elif(mdef=='200c'):
return 200*cosmo.rho_crit(z)*1e10
elif(mdef=='Lag'):
return cosmo.rho_m(z)*1e10
elif(mdef=='exc'):
"Approx. Baldauf++ 13 fitting value for z=0"
return 30*cosmo.rho_m(z)*1e10
else:
print("Mass definition not avaliable!")
raise ValueError
def rDelta(M,z,mdef='vir',cosmo=Planck15):
"Choosing vir since closest to what M_FoF finds with ll=0.2 (White 2000 Table 1)"
return ((3/(4*np.pi*Delta(z,mdef,cosmo)))*M)**(1/3)
def mDelta(r,z,mdef='vir',cosmo=Planck15):
return 4/3 *np.pi*Delta(z,mdef,cosmo)*r**3
@np.vectorize
def sigma(M,z,mdef,P=None,kmin=1e-5,kmax=1e2,num_pts=100,cosmo=Planck15):
'''
Get sigma from P using trapezoidal rule
Input:
M: Mass defining smoothing scale
z: redshift
mdef: choice of mass definition for smoothing window
optional
P: Power spectrum callable, if none use linear
kmin: lower integration range
kmax: upper integration range
num_pts: points to use in integration
'''
growth = (1/(1+z))
if P is None:
kk,Pkk = np.loadtxt('/Users/jsull/Cosmology_Codes/flowpm/flowpm/data/Planck15_a1p00.txt',unpack=True)
def P(k):
return np.interp(k,kk,Pkk)
k = np.logspace(np.log10(kmin),np.log10(kmax),num_pts)
"""using EdS growth"""
def I(k):
I = k**2 * P(k) * np.abs(W_TH(k,rDelta(M,z,mdef)))**2
return I
Ig = growth*np.sqrt((1/(2*np.pi**2))* np.trapz(I(k),x=k))
return Ig
```
#### File: jmsull/gzpt/setup.py
```python
from setuptools import setup
from io import open
# read the contents of the README file
#with open('README.md', "r") as f:
# long_description = f.read()
def find_version(path):
with open(path, 'r') as fp:
file = fp.read()
import re
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
file, re.M)
if match:
return match.group(1)
raise RuntimeError("Version not found")
setup(name='gzpt',
version = find_version("gzpt/__init__.py"),
description='Hybrid Analytic Model for Matter and Tracer Two-point Correlators',
# long_description="See README.md",#long_description,
url='https://github.com/jmsull/gzpt',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['gzpt'],
install_requires=['wheel', 'numpy>=1.16.5', 'scipy','pyfftw'],
tests_require=['numpy>=1.16.5','scipy','pyfftw'],
extras_require={
'testing': ["numpy"],
},
python_requires='>=3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Physics'
],
keywords='cosmology')
``` |
{
"source": "jmsung/APC",
"score": 3
} |
#### File: APC/scripts/anisotropy.py
```python
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Created by <NAME> (<EMAIL>)
Anisotropy data analysis
The equation for the curve as published by Marchand et al. in Nature Cell Biology in 2001 is as follows:
y = a + (b-a) / [(c(x+K)/K*d)+1], where
a is the anisotropy without protein,
b is anisotropy with protein,
c is the Kd for ligand,
d is the total concentration of protein.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from pathlib import Path
import os
import shutil
from timeit import default_timer as timer
from scipy.stats import norm
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from inspect import currentframe, getframeinfo
fname = getframeinfo(currentframe()).filename # current file name
current_dir = Path(fname).resolve().parent
# User input ----------------------------------------------------------------
red_x = np.array([100, 50, 25, 12.5, 6.25, 3.125, 1.56, 0])
red_y = np.array([0.179, 0.186, 0.19, 0.195, 0.2, 0.212, 0.222, 0.248])
red_p = np.array([0.191, 0.248, 0.05, 1])
black_x = np.array([100, 50, 25, 18.75, 14.1, 10.5, 7.9, 5.9, 0])
black_y = np.array([0.204, 0.225, 0.248, 0.26, 0.268, 0.271, 0.274, 0.277, 0.278])
black_p = np.array([0.183, 0.278, 1.5, 16])
# ---------------------------------------------------------------------------
def red_anisotropy(x, K):
a = red_p[0]
b = red_p[1]
c = red_p[2]
d = red_p[3]
return a+(b-a)/((c*(x+K)/(K*d))+1)
def black_anisotropy(x, K):
a = black_p[0]
b = black_p[1]
c = black_p[2]
d = black_p[3]
return a+(b-a)/((c*(x+K)/(K*d))+1)
def main():
red_p, _ = curve_fit(red_anisotropy, red_x, red_y, p0=[0.078])
black_p, _ = curve_fit(black_anisotropy, black_x, black_y, p0=[0.1])
# Plot the result
fit_x = np.linspace(0, 100, 1000)
fig, (ax1, ax2) = plt.subplots(figsize=(20, 10), ncols=2, nrows=1, dpi=300)
ax1.plot(red_x, red_y, 'ro', ms=10)
ax1.plot(fit_x, red_anisotropy(fit_x, red_p), 'r', lw=2)
ax1.set_xlabel('[dark D] um')
ax1.set_ylabel('Anisotropy')
ax1.set_title('Red K = %f' %(red_p))
ax1.set_ylim([0.15, 0.3])
ax2.plot(black_x, black_y, 'ko', ms=10)
ax2.plot(fit_x, black_anisotropy(fit_x, black_p), 'k', lw=2)
ax2.set_xlabel('[dark D] um')
ax2.set_ylabel('Anisotropy')
ax2.set_title('Black K = %f' %(black_p))
ax2.set_ylim([0.15, 0.3])
fig.savefig('plot_anisotropy.png')
plt.close(fig)
if __name__ == "__main__":
main()
```
#### File: scripts/archive/hmm_processive ([email protected]).py
```python
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import scipy
from hmmlearn import hmm
import time
# User parameters
n_sample = 2
n_frame = 200
SNR = 10
time_bound = 10
time_unbound = 20
print("Number of sample = %d" %(n_sample))
print("Number of frame = %d" %(n_frame))
print("SNR = %d\n" %(SNR))
print("Time bound (true) = %.1f" %(time_bound))
print("Time unbound (true) = %.1f\n" %(time_unbound))
def icdf(data, time):
data = np.array(data)
cdf = np.zeros(len(time))
for i in time:
cdf[i] = sum(data <= time[i])
icdf = 1 - cdf/max(cdf)
icdf = icdf - min(icdf) + 0.001
return icdf
def outliers(data, m = 5.):
data = np.array(data)
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return s > m
def Gaussian(m, s, x):
return np.exp(-(x-m)**2/(2*s**2))/(2*np.pi*s**2)**0.5
def LL_G(param, x):
[m, s] = param
return np.sum(np.log(Gaussian(m, s, x)))
def MLE_G(x):
m = np.mean(x)
s = np.std(x)
fun = lambda *args: -LL_G(*args)
p0 = [m, s]
result = scipy.optimize.minimize(fun, p0, args=(x))
return result
def Exp3(m, a, b, x):
return abs(a)*np.exp(-x/abs(m)) + b
def Exp2(m, b, x):
return np.exp(-x/abs(m)) + b
def Exp1(m, x):
return np.exp(-x/abs(m))
def Exp_pdf(m, x):
return np.exp(-x/abs(m))/abs(m)
def LL_E(param, x):
[m] = param
return np.sum(np.log(Exp_pdf(m, x)))
def MLE_E(x):
m = np.mean(x)
fun = lambda *args: -LL_E(*args)
p0 = [m]
result = scipy.optimize.minimize(fun, p0, args=(x))
return result
def get_dwell(X):
t_ub = []
t_bu = []
Z = [i for i in X]
# stop if only bound or unbound state
if max(Z) == min(Z):
return [], []
for i in range(n_frame-1):
if Z[i] == 0 and Z[i+1] == 1: # time at binding
t_ub.append(i)
elif Z[i] == 1 and Z[i+1] == 0: # time at unbinding
t_bu.append(i)
else:
pass
# Either binding or unbinding is zero event
if len(t_bu)*len(t_ub) == 0:
return [], []
t_ub = np.array(t_ub)
t_bu = np.array(t_bu)
if t_ub[0] < t_bu[0]: # if binding starts first
t_b = t_bu - t_ub[:len(t_bu)]
if len(t_ub) > 1:
t_u = t_ub[1:] - t_bu[:len(t_ub[1:])]
else:
return t_b, []
else: # if unbinding starts first
t_u = t_ub - t_bu[:len(t_ub)]
if len(t_bu) > 1:
t_b = t_bu[1:] - t_ub[:len(t_bu[1:])]
else:
return [], t_u
return t_b, t_u
class Data:
# Initialize an object with the parameters
def __init__(self, n_sample, n_frame, SNR, time_bound, time_unbound):
self.n_sample = n_sample
self.n_frame = n_frame
self.SNR = SNR
self.time_bound = time_bound
self.time_unbound = time_unbound
# Build a HMM model and simulate n_sample traces
def generate(self):
# Build an HMM instance and set parameters
self.model = hmm.GaussianHMM(n_components=2, covariance_type="full")
# The transition probability matrix
tp_ub = 1/self.time_unbound
tp_uu = 1 - tp_ub
tp_bu = 1/self.time_bound
tp_bb = 1 - tp_bu
# Set the parameters to generate samples
self.model.startprob_ = np.array([0.5, 0.5])
self.model.transmat_ = np.array([[tp_uu, tp_ub],
[tp_bu, tp_bb]])
self.model.means_ = np.array([[0], [1]])
self.model.covars_ = np.tile(np.identity(1), (2, 1, 1)) / self.SNR**2
# Generate list of n_samples and concatenate them
self.X = [None] * self.n_sample
self.Z_true = [None] * self.n_sample
self.X_conc = np.empty(shape=(0, 1))
self.Z_true_conc = np.empty(shape=(0, 1))
for i in range(self.n_sample):
self.X[i], Z = self.model.sample(self.n_frame)
self.Z_true[i] = Z.reshape(self.n_frame, 1)
self.X_conc = np.concatenate((self.X_conc, self.X[i]), axis=0)
self.Z_true_conc = np.concatenate((self.Z_true_conc, self.Z_true[i]), axis=0)
# HMM prediction
def predict(self):
# Set a new model for traidning
self.remodel = hmm.GaussianHMM(n_components=2, covariance_type="full", n_iter=100)
# Set initial parameters for training
self.remodel.startprob_ = np.array([0.5, 0.5])
self.remodel.transmat_ = np.array([[0.5, 0.5],
[0.5, 0.5]])
self.remodel.means_ = np.array([0, 1])
self.remodel.covars_ = np.tile(np.identity(1), (2, 1, 1)) / self.SNR**2
self.Z_predict = [None] * self.n_sample
self.converged = [None] * self.n_sample
self.X_mean = [None] * self.n_sample
self.X_var = [None] * self.n_sample
self.SNR = np.zeros(self.n_sample)
self.tp = [None] * self.n_sample
self.tp_ub = np.zeros(self.n_sample)
self.tp_bu = np.zeros(self.n_sample)
self.tb_HMM = np.zeros(self.n_sample)
self.tu_HMM = np.zeros(self.n_sample)
for i in range(n_sample):
# Estimate model parameters (training)
self.remodel.fit(self.X[i])
# Find most likely state sequence corresponding to X
Z_predict = self.remodel.predict(self.X[i])
Z_predict = Z_predict.reshape(self.n_frame, 1)
X_mean = self.remodel.means_ # Mean
X_var = self.remodel.covars_ # Covariance
### Simplify the following
tp = self.remodel.transmat_ # Transition probability
self.converged[i] = self.remodel.monitor_.converged # Check convergence
self.SNR[i] = (abs(X_mean[1][0]-X_mean[0][0])/(np.mean(X_var))**0.5)
# Assign them such that X[state==0]=0 and X[state==1]=1
if X_mean[0] <= X_mean[1]:
self.Z_predict[i] = Z_predict
self.X_mean[i] = [X_mean[0][0], X_mean[1][0]]
self.X_var[i] = [X_var[0][0][0], X_var[1][0][0]]
self.tp[i] = [[tp[0][0], tp[0][1]],
[tp[1][0], tp[1][1]]]
else:
self.Z_predict[i] = 1 - Z_predict
self.X_mean[i] = [X_mean[1][0], X_mean[0][0]]
self.X_var[i] = [X_var[1][0][0], X_var[0][0][0]]
self.tp[i] = [[tp[1][1], tp[1][0]],
[tp[0][1], tp[0][0]]]
# HMM estimate of bound (tb) and unbound time (tu)
self.tp_ub[i] = self.tp[i][0][1] + 1/n_frame # Transition prob from unbound to bound
self.tp_bu[i] = self.tp[i][1][0] + 1/n_frame # Transition prob from bound to unbound
self.tb_HMM[i] = 1/self.tp_bu[i] # Bound time
self.tu_HMM[i] = 1/self.tp_ub[i] # Unbound time
# Check the convergence
print("%.1f %% converged." %(sum([int(i) for i in self.converged])/self.n_sample*100))
# Label only good data
cond1 = np.array(self.tb_HMM) <= n_frame*0.5
# cond1 = ~outliers(self.tb_HMM)
cond2 = np.array(self.tu_HMM) <= n_frame*0.5
# cond2 = ~outliers(self.tu_HMM)
cond3 = ~outliers(self.SNR)
self.good_data = cond1 & cond2 & cond3
# Log transition probability
self.log_tp_ub = np.log10(np.array(self.tp_ub[self.good_data]))
self.log_tp_bu = np.log10(np.array(self.tp_bu[self.good_data]))
# MLE fitting with a Gaussian function
result_bu = MLE_G(self.log_tp_bu)
result_ub = MLE_G(self.log_tp_ub)
self.m_b, self.s_b = result_bu["x"]
self.m_u, self.s_u = result_ub["x"]
self.tb_MLE = 1/10**(self.m_b)
self.tu_MLE = 1/10**(self.m_u)
error_tb = 100*(self.tb_MLE/self.time_bound-1)
error_tu = 100*(self.tu_MLE/self.time_unbound-1)
print("Time bound (MLE) = %.1f (%.1f %%)" %(self.tb_MLE, error_tb))
print("Time unbound (MLE) = %.1f (%.1f %%) \n" %(self.tu_MLE, error_tu))
# # ----------------------------------------------------------------------
# # HMM prediction with concatenated data
# self.remodel.fit(self.X_conc) # Fit (train) to find the parameters
# Z_predict_conc = self.remodel.predict(self.X_conc) # Predict the most likely trajectory
# self.Z_predict_conc = Z_predict_conc.reshape(self.n_frame*self.n_sample, 1)
# self.converged_conc = self.remodel.monitor_.converged # Check the convergence
# self.tp_conc = self.remodel.transmat_ # Transition probability
# # Reorder state number such that X[Z=0] < X[Z=1]
# if self.X_conc[Z_predict_conc == 0].mean() > self.X_conc[Z_predict_conc == 1].mean():
# self.Z_predict_conc = 1 - self.Z_predict_conc
# self.tp_conc = np.array([[self.tp_conc[1][1], self.tp_conc[1][0]],
# [self.tp_conc[0][1], self.tp_conc[0][0]]])#
#
# self.tp_bu_conc = self.tp_conc[1][0] + 1/n_frame # Transition prob from unbound to bound
# self.tp_ub_conc = self.tp_conc[0][1] + 1/n_frame # Transition prob from bound to unbound
# self.tb_HMM_conc = 1/self.tp_bu_conc # Bound time
# self.tu_HMM_conc = 1/self.tp_ub_conc # Unbound time
# error_tb = 100*(self.tb_HMM_conc/self.time_bound-1)
# error_tu = 100*(self.tu_HMM_conc/self.time_unbound-1)
# print("HMM_concatenated is %s" %(["not converged.", "converged."][int(self.converged_conc)]))
# print("Time bound (HMM, conc) = %.1f (%.1f %%)" %(self.tb_HMM_conc, error_tb))
# print("Time unbound (HMM, conc) = %.1f (%.1f %%)\n" %(self.tu_HMM_conc, error_tu))
def threshold(self):
self.Z_Threshold = [None] * self.n_sample
self.X_Threshold = [None] * self.n_sample
for i in range(self.n_sample):
self.Z_Threshold[i] = np.zeros(self.n_frame)
for j in range(self.n_frame):
if self.X[i][j] > 0.5:
self.Z_Threshold[i][j] = 1
# self.Z_Threshold[i][self.X[i]>0.5] = 1
# self.X_Threshold[i][self.Z_Threshold[i]==0] = np.mean(self.X[i][self.Z_Threshold[i]==0])
# self.X_Threshold[i][self.Z_Threshold[i]==1] = np.mean(self.X[i][self.Z_Threshold[i]==1])
def analyze_pdf(self):
dwell_b = []
dwell_u = []
for i in range(self.n_sample):
if self.good_data[i]:
tb, tu = get_dwell(self.Z_predict[i])
dwell_b.extend(tb)
dwell_u.extend(tu)
dwell_b = np.array(dwell_b)
dwell_u = np.array(dwell_u)
self.dwell_b = dwell_b[dwell_b < self.n_frame*0.5]
self.dwell_u = dwell_u[dwell_u < self.n_frame*0.5]
self.dwell_b_min = 0#np.min(self.dwell_b)
self.dwell_u_min = 0#np.min(self.dwell_u)
# MLE fitting with an Exponential function
result_b = MLE_E(self.dwell_b - self.dwell_b_min)
result_u = MLE_E(self.dwell_u - self.dwell_u_min)
self.tb_pdf = float(result_b["x"]) + self.dwell_b_min
self.tu_pdf = float(result_u["x"]) + self.dwell_u_min
error_tb = 100*(self.tb_pdf/self.time_bound-1)
error_tu = 100*(self.tu_pdf/self.time_unbound-1)
print("Time bound (PDF) = %.1f (%.1f %%)" %(self.tb_pdf, error_tb))
print("Time unbound (PDF) = %.1f (%.1f %%) \n" %(self.tu_pdf, error_tu))
def analyze_icdf(self):
self.t_b = np.arange(max(self.dwell_b))
self.t_u = np.arange(max(self.dwell_u))
# Inverse cumulative distrubution function from dwell time data
self.icdf_b = icdf(self.dwell_b, self.t_b)
self.icdf_u = icdf(self.dwell_u, self.t_u)
p_b = [self.tb_pdf, 1, 0]
p_u = [self.tu_pdf, 1, 0]
# Curve fit of the icdf
# p_b, c_b = scipy.optimize.curve_fit(Exp3, t_b[10:30], icdf_b[10:30], p0=p_b)#, [m_b, 1, 0])#, sigma = 1/icdf_b**0.5)
# p_u, c_u = scipy.optimize.curve_fit(Exp3, t_u[10:30], icdf_u[10:30], p0=p_u)#, sigma = 1/icdf_u**0.5)
self.p_b, self.c_b = scipy.optimize.curve_fit(lambda t,a,b,c: b*np.exp(-t/a)+c, self.t_b, self.icdf_b, p0=p_b)#, sigma=1/icdf_b**0.5)
self.p_u, self.c_u = scipy.optimize.curve_fit(lambda t,a,b,c: b*np.exp(-t/a)+c, self.t_u, self.icdf_u, p0=p_u)#, sigma=1/icdf_u**0.5)
print('Time bound (icdf) = %.1f (%.1f %%)' %(self.p_b[0], 100*(self.p_b[0]/time_bound-1)))
print('Time unbound (icdf) = %.1f (%.1f %%) \n' %(self.p_u[0], 100*(self.p_u[0]/time_unbound-1)))
def plot_trace(self):
fig = plt.figure('trace', clear=True)
for i in range(1):
# Mean values for true and predicted states
X_true = np.zeros((n_frame,1))
X_predict = np.zeros((n_frame,1))
X_threshold = np.zeros((n_frame,1))
for j in range(2):
X_true[self.Z_true[i]==j] = self.X[i][self.Z_true[i]==j].mean()
X_predict[self.Z_predict[i]==j] = self.X[i][self.Z_predict[i]==j].mean()
X_threshold[self.Z_Threshold[i]==j] = self.X[i][self.Z_Threshold[i]==j].mean()
# Percent of error
percent_error = sum(abs(self.Z_true[i] - self.Z_predict[i]))/self.n_frame*100
# Plot the sampled data
sp = fig.add_subplot(2, 4, i+1)
sp.plot(self.X[i], "k", label="observations", ms=1, lw=1, alpha=0.5)
sp.plot(X_true, "b", label="states", lw=2, alpha=1)
sp.plot(X_predict, "r", label="predictions", lw=2, alpha=1)
sp.set_ylabel('Intensity')
# sp.set_title("SNR = %.1f, HMM" % (self.SNR[i])) #show both signal and noise
sp.set_title("SNR = %.1f, Error = %.1f %%" % (self.SNR[i], percent_error)) #show both signal and noise
# sp = fig.add_subplot(2, 1, i+2)
# sp.plot(self.X[i], "k", label="observations", ms=1, lw=1, alpha=0.5)
# sp.plot(X_true, "b", label="states", lw=2, alpha=1)
# sp.plot(X_threshold, "r", label="threshold", lw=2, alpha=1)
# sp.set_xlabel('Frame')
# sp.set_ylabel('Intensity')
# sp.set_title("SNR = %.1f, Threshoding" % (self.SNR[i])) #show both signal and noise
# sp.set_title("SNR = %.1f, Error = %.1f %%" % (self.SNR[i], percent_error)) #show both signal and noise
plt.show()
def plot_cluster(self):
fig = plt.figure('cluster', clear=True)
sp = fig.add_subplot(231)
sp.hist(self.SNR, bins ='scott', color='k', histtype='step', lw=1)
sp.hist(self.SNR[self.good_data], bins ='scott', color='r', histtype='step', lw=1)
sp.set_title('SNR')
sp = fig.add_subplot(232)
sp.hist(self.tb_HMM, bins ='scott', color='k', histtype='step', lw=1)
sp.hist(self.tb_HMM[self.good_data], bins ='scott', color='r', histtype='step', lw=1)
sp.set_title('Time bound (True = %.1f)' %(self.time_bound))
sp = fig.add_subplot(233)
sp.hist(self.tu_HMM, bins ='scott', color='k', histtype='step', lw=1)
sp.hist(self.tu_HMM[self.good_data], bins ='scott', color='r', histtype='step', lw=1)
sp.set_title('Time unbound (True = %.1f)' %(self.time_unbound))
sp = fig.add_subplot(234)
sp.plot(self.SNR, self.tb_HMM, 'k.', alpha=0.5)
sp.plot(self.SNR[self.good_data], self.tb_HMM[self.good_data], 'r.', alpha=0.5)
sp.set_xlabel('SNR')
sp.set_ylabel('Time bound')
sp = fig.add_subplot(235)
sp.plot(self.SNR, self.tu_HMM, 'k.', alpha=0.5)
sp.plot(self.SNR[self.good_data], self.tu_HMM[self.good_data], 'r.', alpha=0.5)
sp.set_xlabel('SNR')
sp.set_ylabel('Time unbound')
sp = fig.add_subplot(236)
sp.plot(self.tb_HMM, self.tu_HMM, 'k.', alpha=0.5)
sp.plot(self.tb_HMM[self.good_data], self.tu_HMM[self.good_data], 'r.', alpha=0.5)
sp.set_xlabel('Time bound')
sp.set_ylabel('Time unbound')
plt.show()
def plot_HMM(self):
fig = plt.figure('HMM', clear=True)
sp = fig.add_subplot(121)
sp.hist(self.log_tp_bu, bins ='scott', color='k', histtype='step', density='True', lw=1)
x = np.linspace(min(self.log_tp_bu), max(self.log_tp_bu), 100)
sp.plot(x, Gaussian(self.m_b, self.s_b, x), 'r', lw=2)
sp.axvline(x=self.m_b, color='k', linestyle='dotted', lw=1)
sp.set_xlabel('Log10(TP_bu)')
error_b = 100*(self.tb_MLE/time_bound-1)
sp.set_title("Bound time = %.1f (%.1f %%)" %(self.tb_MLE, error_b))
sp = fig.add_subplot(122)
sp.hist(self.log_tp_ub, bins ='scott', color='k', histtype='step', density='True', lw=1)
x = np.linspace(min(self.log_tp_ub), max(self.log_tp_ub), 100)
sp.plot(x, Gaussian(self.m_u, self.s_u, x), 'r', lw=2)
sp.axvline(x=self.m_u, color='k', linestyle='dotted', lw=1)
sp.set_xlabel('Log10(TP_ub)')
error_u = 100*(self.tu_MLE/time_unbound-1)
sp.set_title("Unbound time = %.1f (%.1f %%)" %(self.tu_MLE, error_u))
plt.show()
def plot_pdf(self):
# Plot the result
fig = plt.figure('PDF', clear=True)
sp = fig.add_subplot(121)
sp.hist(self.dwell_b - self.dwell_b_min, bins=np.linspace(0,max(self.dwell_b),20), color='k', histtype='step', density='True', lw=1)
sp.plot(self.t_b, Exp_pdf(self.tb_pdf, self.t_b), 'r', lw=1)
sp.set_xlim([0, max(self.dwell_b)])
sp.set_xlabel('Frames')
sp.set_ylabel('Probability')
error_b = 100*(self.tb_pdf/time_bound-1)
sp.set_title('Time bound (PDF) = %.1f (%.1f %%)' %(self.tb_pdf, error_b))
sp = fig.add_subplot(122)
sp.hist(self.dwell_u - self.dwell_u_min, bins=np.linspace(0,max(self.dwell_u),20), color='k', histtype='step', density='True', lw=1)
sp.plot(self.t_u, Exp_pdf(self.tu_pdf, self.t_u), 'r', lw=1)
sp.set_xlim([0, max(self.dwell_u)])
sp.set_xlabel('Frames')
sp.set_ylabel('Probability')
error_u = 100*(self.tu_pdf/time_unbound-1)
sp.set_title('Time unbound (PDF) = %.1f (%.1f %%)' %(self.tu_pdf, error_u))
plt.show()
def plot_icdf(self):
fig = plt.figure('ICDF', clear=True)
sp = fig.add_subplot(121)
sp.plot(self.t_b, self.icdf_b, 'ko', ms=2)
sp.plot(self.t_b, Exp3(self.p_b[0], self.p_b[1], self.p_b[2], self.t_b), 'r', lw=1)
sp.set_xlim([0, max(self.t_b)])
sp.set_ylim([0, 1])
sp.set_xlabel('Frames')
sp.set_ylabel('Pobability')
sp.set_title('Time bound (icdf) = %.1f (%.1f %%)' %(self.p_b[0], 100*(self.p_b[0]/self.time_bound-1)))
sp = fig.add_subplot(122)
sp.plot(self.t_u, self.icdf_u, 'ko', ms=2)
sp.plot(self.t_u, Exp3(self.p_u[0], self.p_u[1], self.p_u[2], self.t_u), 'r', lw=1)
sp.set_xlim([0, max(self.t_u)])
sp.set_ylim([0, 1])
sp.set_xlabel('Frames')
sp.set_ylabel('Probability')
sp.set_title('Time unbound (icdf) = %.1f (%.1f %%)' %(self.p_u[0], 100*(self.p_u[0]/self.time_unbound-1)))
def main(n_sample, n_frame, SNR, time_bound, time_unbound):
start_time = time.time()
# Initialize
data = Data(n_sample, n_frame, SNR, time_bound, time_unbound)
# Generate list of data array
data.generate()
# Predict from list of data array
data.predict()
# Test thresholding
data.threshold()
# PDF analysis
# data.analyze_pdf()
# ICDF analysis
# data.analyze_icdf()
# Plot the result
data.plot_trace()
# data.plot_cluster()
data.plot_HMM()
data.plot_pdf()
# data.plot_icdf()
print("Calculation time = %d (s)" %(time.time()-start_time))
if __name__ == "__main__":
main(n_sample, n_frame, SNR, time_bound, time_unbound)
# To do
# Independent signal generation
# Cutoff > Median estimat
# TP = Beta distribution
``` |
{
"source": "jmsung/codewars",
"score": 4
} |
#### File: codewars/scripts/myfunc.py
```python
def sign(n):
""" Return sign 1 (n >= 0) or -1 (n < 0) """
if not (isinstance(n, float) or isinstance(n, int)):
raise ValueError("Input must be real number.")
import math
return math.copysign(1, n)
def num2lst(n):
""" Convert from a multiple digit number to a list of single digit """
return [int(i) if n > 0 else -int(i) for i in str(abs(n))]
def lst2num(lst):
""" Convert from a list of single digits to a multiple digit number """
# check all the element have the same sign
if not all(sign(element) == sign(lst[0])
for element in lst if element != 0):
raise ValueError("All the element in lst must have the same sign.")
num = int("".join([str(abs(i)) for i in lst]))
return num if sign(lst[0]) > 0 else -num
def list_same_digit(n):
""" Return the sorted list of the same digits from the input number
Parameters
----------
n (int): An int number
Return
------
digits (list of int): combination of digits """
pass
def sum_squared(x):
""" Return the sum of the squared elements
Parameters
----------
x (list): List of numbers
Return
------
ss (float): sum of the squared """
ss = sum(map(lambda i : i * i, x))
# ss = sum([i**2 for i in x])
return ss
``` |
{
"source": "jmsung/trap_analysis",
"score": 3
} |
#### File: scripts/Analysis/trap_func.py
```python
import numpy as np
import scipy
import os
import shutil
def step(t, tb, tu, Ab, Au, s1, s2):
return (Ab-Au) * (scipy.special.erf(s1*(t-tb)) - scipy.special.erf(s2*(t-tu)))/2 + Au
def sine(t, f, A, ph, b): # Sine function
return A * np.sin(2*np.pi*f*t - ph) + b
def triangle(t, f, A, ph, b):
t = 2 * np.pi * f * t - ph + np.pi/2
t, w = np.asarray(t), np.asarray(0.5)
w = np.asarray(w + (t - t))
t = np.asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = np.zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
np.place(y, mask1, np.nan)
# take t modulo 2*pi
tmod = np.mod(t, 2 * np.pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * np.pi)
tsub = np.extract(mask2, tmod)
wsub = np.extract(mask2, w)
np.place(y, mask2, tsub / (np.pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = np.extract(mask3, tmod)
wsub = np.extract(mask3, w)
np.place(y, mask3, (np.pi * (wsub + 1) - tsub) / (np.pi * (1 - wsub)))
return A*y + b
def trapzoid(t, f, A, ph, b, m):
t = 2 * np.pi * f * t - ph + np.pi/2
t, w = np.asarray(t), np.asarray(0.5)
w = np.asarray(w + (t - t))
t = np.asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = np.zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
np.place(y, mask1, np.nan)
# take t modulo 2*pi
tmod = np.mod(t, 2 * np.pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * np.pi)
tsub = np.extract(mask2, tmod)
wsub = np.extract(mask2, w)
np.place(y, mask2, tsub / (np.pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = np.extract(mask3, tmod)
wsub = np.extract(mask3, w)
np.place(y, mask3, (np.pi * (wsub + 1) - tsub) / (np.pi * (1 - wsub)))
y[y > A*m] = A*m
y[y < -A*m] = -A*m
return A*y + b
def square(t, f, A, ph, b):
duty = 0.5
t = 2 * np.pi * f * t - ph
t, w = np.asarray(t), np.asarray(duty)
w = np.asarray(w + (t - t))
t = np.asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = np.zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
np.place(y, mask1, np.nan)
# on the interval 0 to duty*2*pi function is 1
tmod = np.mod(t, 2 * np.pi)
mask2 = (1 - mask1) & (tmod < w * 2 * np.pi)
np.place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
np.place(y, mask3, -1)
return A*y + b
def exp(F, t0, dF):
dF = abs(dF)
return t0*np.exp(-F/dF)
def running_mean(x, N = 10): # Running mean
cumsum = np.cumsum(np.insert(x, 0, 0))
x0 = (cumsum[N:] - cumsum[:-N]) / float(N)
x1 = np.mean(x[:N])*np.ones(int(N/2))
x2 = np.mean(x[-N:])*np.ones(int(N/2))
return np.concatenate((x1, x0, x2))
def running_std(x, N = 11): # Running mean
s = np.ones(len(x))*100
s[:int(N/2)] = np.std(x[:int(N/2)])
s[-int(N/2):] = np.std(x[-int(N/2):])
for i in range(len(x)-(N-1)):
s[i+int(N/2)] = np.std(x[i:N+i])
if any(x == 100):
print('wrong')
return s
def reject_outliers(data, m = 2.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s < m]
def find_outliers(data, m = 5):
d = np.abs(data - np.median(data))
mdev = np.median(d)
cutoff = np.median(data) + m*mdev
i_outliers = data > cutoff
return cutoff, i_outliers
def make_folder(name):
path = os.path.join(os.getcwd(), name)
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
return path
```
#### File: scripts/Calibration/Calibration_BrightNoise.py
```python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.fft import fft
import matplotlib.pyplot as plt
from nptdms import TdmsFile
from scipy.optimize import curve_fit, minimize
from scipy.stats import norm
import os
import shutil
### User input ##################################
# First you need to change directory (cd) to where the file is located
# Update the file name
files = ['Dark_Power000',
'Bright_Power010',
'Bright_Power020',
'Bright_Power030',
'Bright_Power040',
'Bright_Power050',
'Bright_Power060',
'Bright_Power070',
'Bright_Power080',
'Bright_Power090',
'Bright_Power099',
'Bright_Power100']
f_sample = 20000 # Sampling frequency (Hz)
dt = 1/f_sample # Time interval during sampling (s)
t_total = 100 # Total time (s)
N_total = int(f_sample * t_total) # Total number of data
# I use 1 sec window for PSD and do averaging of them
t_window = 0.1 # Time for one window in sec
N_window = int(f_sample * t_window) # Num of data in a window
df = 1/t_window # Freq interval for a window
N_avg = int(t_total / t_window) # Num of windows for averaging
###############################################
class Data:
def __init__(self, fname, power):
self.fname = fname
self.power = power
def read(self):
# File information below
tdms_file = TdmsFile(self.fname+'.tdms') # Reads a tdms file.
group_name = "Trap" # Get the group name
channels = tdms_file.group_channels(group_name) # Get the channel object
self.ch = np.zeros((len(channels), N_total)) # Make a 2D array (ch, timetrace) for trap data
for i, channel in enumerate(channels):
self.ch[i,] = channel.data[range(N_total)]
self.x = self.ch[0] - np.mean(self.ch[0])
self.y = self.ch[1] - np.mean(self.ch[1])
self.s = self.ch[2]
def analyze(self):
x = self.x.reshape((N_avg, N_window))
y = self.y.reshape((N_avg, N_window))
s = self.s.reshape((N_avg, N_window))
PSD_X = np.zeros((N_avg, int(N_window/2)-1))
PSD_Y = np.zeros((N_avg, int(N_window/2)-1))
PSD_S = np.zeros((N_avg, int(N_window/2)-1))
PSD_XY = np.zeros((N_avg, int(N_window/2)-1))
for j in range(N_avg): # per window
PSD_X0 = np.abs(fft(x[j]))**2/t_window
PSD_Y0 = np.abs(fft(y[j]))**2/t_window
PSD_S0 = np.abs(fft(s[j]))**2/t_window
PSD_XY0 = fft(x[j])*np.conj(fft(y[j]))/t_window
PSD_XY0 = PSD_XY0/(PSD_X0*PSD_Y0)**0.5
PSD_X[j] = PSD_X0[1:int(N_window/2)]
PSD_Y[j] = PSD_Y0[1:int(N_window/2)]
PSD_S[j] = PSD_S0[1:int(N_window/2)]
PSD_XY[j] = PSD_XY0[1:int(N_window/2)]
self.PSD_X = np.mean(PSD_X, axis=0)
self.PSD_Y = np.mean(PSD_Y, axis=0)
self.PSD_S = np.mean(PSD_S, axis=0)
self.PSD_XY = np.mean(PSD_XY, axis=0)
self.f = df * np.arange(1, N_window/2)
def plot(self): # PSD
# PSD fitting (log-log)
# PSD (lin)
t = dt * np.arange(N_window)
fig = plt.figure(1, figsize = (20, 10), dpi=300)
sp = fig.add_subplot(221)
sp.loglog(self.f, self.PSD_X, 'k', lw=1)
# sp.set_ylim([1e-12, 5e-9])
sp.set_xlabel('Frequency (Hz)')
sp.set_ylabel('PSD_X (V^2/s)')
sp = fig.add_subplot(222)
sp.loglog(self.f, self.PSD_Y, 'k', lw=1)
# sp.set_ylim([1e-12, 5e-9])
sp.set_xlabel('Frequency (Hz)')
sp.set_ylabel('PSD_Y (V^2/s)')
sp = fig.add_subplot(223)
sp.plot(self.f, self.PSD_XY, 'k', lw=1)
# sp.set_ylim([1e-12, 5e-9])
sp.set_xlabel('Frequency (Hz)')
sp.set_ylabel('PSD_XY')
sp = fig.add_subplot(224)
sp.loglog(self.f, self.PSD_S, 'k', lw=1)
# sp.set_ylim([1e-12, 5e-9])
sp.set_xlabel('Frequency (Hz)')
sp.set_ylabel('PSD_S (V^2/s)')
sp.set_title('Trap power = %d %%' %(self.power))
fig.savefig(self.fname)
plt.close(fig)
def main():
for fname in files:
print(fname)
power = int(fname[-3:])
data = Data(fname, power)
data.read()
data.analyze()
data.plot()
if __name__ == "__main__":
main()
```
#### File: scripts/Calibration/Calibration_Height.py
```python
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import Calibration_Bead
import Calibration_Bead_Hydro
from scipy.optimize import curve_fit
files = [['X_H0100', 'X_H0200', 'X_H0300', 'X_H0400', 'X_H0500', 'X_H0600', 'X_H0700', 'X_H0800', 'X_H0900', 'X_H1000', 'X_H1100', 'X_H1200', 'X_H1300', 'X_H1400', 'X_H1500'],
['Y_H0100', 'Y_H0200', 'Y_H0300', 'Y_H0400', 'Y_H0500', 'Y_H0600', 'Y_H0700', 'Y_H0800', 'Y_H0900', 'Y_H1000', 'Y_H1100', 'Y_H1200', 'Y_H1300', 'Y_H1400', 'Y_H1500']]
files = [['X_H0200', 'X_H0300', 'X_H0400', 'X_H0500', 'X_H0600', 'X_H0700', 'X_H0800', 'X_H0900', 'X_H1000', 'X_H1100', 'X_H1200', 'X_H1300', 'X_H1400', 'X_H1500'],
['Y_H0200', 'Y_H0300', 'Y_H0400', 'Y_H0500', 'Y_H0600', 'Y_H0700', 'Y_H0800', 'Y_H0900', 'Y_H1000', 'Y_H1100', 'Y_H1200', 'Y_H1300', 'Y_H1400', 'Y_H1500']]
#files = [['X_H0100', 'X_H0500', 'X_H0900'],
# ['Y_H0100', 'Y_H0500', 'Y_H0900']]
R = 430
fd = 50
Ad = 50
power = 100
def Faxen(H, offset, B):
h = H+offset
x = 1 - 9*R/16/h + (R**3)/8/(h**3) - 45*(R**4)/256/(h**4) - (R**5)/16/(h**5)
return B/x
def main():
fig = plt.figure(10, figsize = (20, 10), dpi=300)
for i in range(2):
h = np.zeros(len(files[i]))
b = np.zeros(len(files[i]))
db = np.zeros(len(files[i]))
k = np.zeros(len(files[i]))
dk = np.zeros(len(files[i]))
r = np.zeros(len(files[i]))
dr = np.zeros(len(files[i]))
for j in range(len(files[i])):
fname = files[i][j]
axis = fname[0]
h[j] = int(fname[-4:])+R
print(fname)
print(h[j])
b[j], db[j], k[j], dk[j], r[j], dr[j] = Calibration_Bead_Hydro.main(fname, axis, power, fd, Ad, h[j])
# b[j], db[j], k[j], dk[j], r[j], dr[j] = Calibration_Bead.main(fname, axis, power, fd, Ad)
# Beta * Kappa
bk = b*k
dbk = bk*((db/b)**2 + (dk/k)**2)**0.5
mbk = np.average(bk, weights = 1/dbk**2)
# Beta
mb = np.average(b, weights = 1/db**2)
# Kappa
mk = np.average(k, weights = 1/dk**2)
# Stoke ratio
p_r, cov = curve_fit(Faxen, h, r, p0=[0, 3], sigma=dr)
offset = p_r[0]
ratio = p_r[1]
h = h + offset
x = np.linspace(min(h)-10, max(h)+100, 100)
r_fit = Faxen(x, 0, ratio)
sp = fig.add_subplot(2,4,4*i+1)
sp.axhline(y=mb, color='k', linestyle='solid', linewidth=1)
sp.axvline(x=R, color='k', linestyle='dashed', linewidth=1)
sp.errorbar(h, b, yerr=db, fmt='o', ecolor='k', color='k')
sp.set_xlim((0, max(h)+100))
sp.set_xlabel('Bead center to surface [nm]')
sp.set_ylabel('Beta [nm/V]')
sp.set_title('Beta [nm/V] = %d +/- %d' %(mb, np.std(b)))
sp = fig.add_subplot(2,4,4*i+2)
sp.errorbar(h, k, yerr=dk, fmt='o', ecolor='k', color='k')
sp.axvline(x=R, color='k', linestyle='dashed', linewidth=1)
sp.axhline(y=mk, color='k', linestyle='solid', linewidth=1)
sp.set_xlim((0, max(h)+100))
sp.set_xlabel('Bead center to surface [nm]')
sp.set_ylabel('Kappa [pN/nm]')
sp.set_title('Kappa [pN/nm] = %.3f +/- %.3f' %(mk, np.std(k)))
sp = fig.add_subplot(2,4,4*i+3)
sp.errorbar(h, b*k, yerr=dbk, fmt='o', ecolor='k', color='k')
sp.axvline(x=R, color='k', linestyle='dashed', linewidth=1)
sp.axhline(y=mbk, color='k', linestyle='solid', linewidth=1)
sp.set_xlim((0, max(h)+100))
sp.set_xlabel('Bead center to surface [nm]')
sp.set_ylabel('Beta*Kappa [pN/V]')
sp.set_title('Beta*Kappa [pN/V] = %.1f +/- %.1f' %(mbk, np.std(dbk)))
sp = fig.add_subplot(2,4,4*i+4)
sp.plot(x, r_fit, 'r')
sp.errorbar(h, r, yerr=dr, fmt='o', ecolor='k', color='k')
sp.axvline(x=R, color='k', linestyle='dashed', linewidth=1)
sp.axhline(y=ratio, color='r', linestyle='dashed', linewidth=1)
sp.axhline(y=1, color='k', linestyle='solid', linewidth=1)
sp.set_xlim((0, max(h)+100))
sp.set_xlabel('Bead center to surface [nm]')
sp.set_ylabel('Stoke ratio')
sp.set_title('Stoke ratio = %.1f, Offset = %.1f nm' %(ratio, offset))
fig.savefig('Calibration result.png')
plt.close(fig)
if __name__ == "__main__":
main()
```
#### File: scripts/HFS/HFS_event_detection.py
```python
import numpy as np
import os
from pylab import *
from scipy import convolve
from scipy.optimize import leastsq
from operator import itemgetter
from itertools import groupby
#-------------------------- USER INPUT ------------------------------
# Sampling frequency
fsample=40000.0 # [Hz]
# Molecule 1
mol1={}
mol1['directory']='C:/Users/chaoliu/Documents/Spudich Lab/trap harmonic force spectroscopy/HFS/aGFP'
mol1['datadir']='Antibody1'
mol1['cal_b1']=149.0 # Calibration factor [nm/V] 114.0
mol1['cal_b2']=80.0 # Calibration factor [nm/V] 63.0
mol1['cal_k1']=0.114 # Trap strength [pN/nm] 0.21
mol1['cal_k2']=0.0679 # Trap strength [pN/nm] 0.13
mol1['fd']=100.0 # Oscillation frequency [Hz]
mol1['phasecut']=1 # Threshold for the phase [rad] 1.0
mol1['ampthreshold1']=10.0 # Threshold for the amplitude [nm] 10.0
mol1['ampthreshold2']=20.0 # Threshold for the amplitude [nm] 6.0
## Molecule 2
#mol2={}
#mol2['directory']='Molecule_2'
#mol2['datadir']='Cal1_M2'
#mol2['cal_b1']=
#mol2['cal_b2']=
#mol2['cal_k1']=
#mol2['cal_k2']=
#mol2['fd']=
#mol2['phasecut']=
#mol2['ampthreshold1']=
#mol2['ampthreshold2']=
# List of molecules to be analyzed
molecules=[mol1] # [mol1, mol2, ...]
#-------------------------- END USER INPUT --------------------------
# Auxiliary function
def mergeevents(events):
merged=True
while any(merged):
mergedevents=[]
merged=repeat(False,len(events)-1)
n=0
while n<len(events):
if n<len(events)-1 and (events[n+1][0]-events[n][1] < 500):
mergedevents.append((events[n][0],events[n+1][1]))
merged[n]=True
n+=2
else:
mergedevents.append((events[n][0],events[n][1]))
n+=1
events=mergedevents
return events
# Close all figures at startup
close('all')
nevents_all=0
for mol in molecules:
# Get all parameters for the relevant molecule
directory=mol['directory']
datadir=mol['datadir']
cal_b1=mol['cal_b1']
cal_b2=mol['cal_b2']
cal_k1=mol['cal_k1']
cal_k2=mol['cal_k2']
fd=mol['fd']
phase_threshold=mol['phasecut']
amp_threshold1=mol['ampthreshold1']
amp_threshold2=mol['ampthreshold2']
print directory
# Make results directory if it does not exist
resultsdir=directory+'/Results_WaveletAnalysis' #_test8
if not os.path.exists(resultsdir):
os.mkdir(resultsdir)
# Make list of all real data arrays in data directory
datafiles=os.listdir(directory+'/'+datadir)
if '.DS_Store' in datafiles:
datafiles=datafiles[1:]
if 'processed.txt' in datafiles:
datafiles.pop()
# Sort datafiles according to numeric value
datafiles.sort(key=lambda s: int(s.rsplit('.')[0]))
# Plot
plotcolors=['black','gray']
highlightcolors=['yellow','cyan']
# Period of stage osciallation
T=fsample/fd # [data points]
# List to store binding times, row is event number and columns contain at1,at2,det1,det2
bindingtimes_all=[]
F0_all=[]
dF_all=[]
bindingtimes_all_Fourier=[]
F0_all_Fourier=[]
dF_all_Fourier=[]
avchi2_all=[]
nevent_save=0
# Loop over datafiles
for datafile in datafiles:
print 'Analyzing '+datafile
# List for tuples of start and end positions of events
allevents=[]
# Load data from data file
data = np.load(directory+'/'+datadir+'/'+datafile)
# Correct for arbitrary, fixed calibration performed in script
# reading binary data from trap output files
data[:,1]*=(cal_b1/(80.0))
data[:,2]*=(cal_b2/(80.0))
# Position data from the two beads
data1=data[:,1]
data2=data[:,2]
# Loop over the two beads
for beadnumber in [1,2]:
events=[]
# Loop over all data in
for datai in range(0,int((shape(data)[0]/10000.0))):
#print 1.0*datai/int((shape(data)[0]/10000.0)), datafile, directory
# Array of time in units of data points
t=np.arange(datai*10000,(datai+1)*10000)
# Fit sine-function to piezo position, to determine phase, assuming known period T
parsinit=array([20.0,0.0,0.0])
pars=leastsq(lambda pars: data[t,5]-(pars[0]*sin(t*2*pi/T+pars[1])+pars[2]),parsinit)[0]
# Coerce amplitude into positive number
if pars[0]<0.0:
piezoamp=abs(pars[0])
piezophase=pars[1]+pi
else:
piezoamp=pars[0]
piezophase=pars[1]
# Eliminate 2*pi degeneracy
piezophase=mod(piezophase,2*pi)
# Fitted sine function and its corresponding cosine
piezofit=piezoamp*sin(t*2*pi/T+piezophase)+pars[2]
piezofit2=piezoamp*sin(t*2*pi/T+piezophase+pi/2.0)+pars[2]
# Calculate wavelet-sine and wavelet-cosine transforms
# and in turn the amplitude and phase of the signals
pdata1=data[t,beadnumber]*(piezofit-mean(piezofit))
pdata2=data[t,beadnumber]*(piezofit2-mean(piezofit2))
pdata=sqrt(pdata1**2+pdata2**2)
y=convolve(pdata,ones(T)/T,mode='valid')
tsub=t[0]+arange(len(y))+(T-1)/2.0
y1=2*convolve(pdata1,ones(T)/T,mode='valid')
y2=2*convolve(pdata2,ones(T)/T,mode='valid')
y=sqrt(y1**2+y2**2)
yamp=y/abs(piezoamp)
test=zeros(len(y))
for n in range(len(y)):
if y1[n]>0.0 and y2[n]>0.0:
test[n]=arctan(y2[n]/y1[n])
elif y1[n]<0.0 and y2[n]>0.0:
test[n]=arctan(y2[n]/y1[n])+pi
elif y1[n]>0.0 and y2[n]<0.0:
test[n]=arctan(y2[n]/y1[n])
elif y1[n]<0.0 and y2[n]<0.0:
test[n]=arctan(y2[n]/y1[n])-pi
yphase=test
# Locate events using phase and amplitude
if beadnumber==1:
binding1=yamp > amp_threshold1
else:
binding1=yamp > amp_threshold2
binding2=yphase < phase_threshold
# Require that both criteria are satisfied simultaneously
binding=binding1*binding2
# Get binding times
tbinding=tsub[binding]
tbinding=tbinding.astype(int)
# Find groups of consecutive time points
groups=[]
for k, g in groupby(enumerate(tbinding), lambda (i,x):i-x):
groups.append(map(itemgetter(1), g))
for element in groups:
tbinding=element
events.append((tbinding[0],tbinding[-1]))
# Merge events if they happen to be located over
# a 10,000 data point break in the sequence
n=0
tempevents=[]
while n < len(events)-1:
if mod(events[n][1]-10000+int(T/2+1),10000)==0 and \
mod(events[n+1][0]-int(T/2-1),10000)==0:
tempevents.append((events[n][0],events[n+1][1]))
n+=2
else:
tempevents.append(events[n])
n+=1
events=tempevents
if events!=[]:
allevents+=events
events=allevents
events.sort(key=lambda tup: tup[0])
# Merge events from the two beads if they overlap in time
if events!=[]:
merged=True
while any(merged):
mergedevents=[]
merged=repeat(False,len(events)-1)
n=0
while n<len(events):
if n<len(events)-1 and (events[n+1][0]<events[n][1]):
mergedevents.append((events[n][0],max(events[n+1][1],events[n][1])))
merged[n]=True
n+=2
else:
mergedevents.append((events[n][0],events[n][1]))
n+=1
events=mergedevents
# Ignore a possible early event
if events[0][0]< 5*T:
events=events[1:]
nevents_all+=len(events)
# Loop over possible events
for nevent in range(len(events)):
event=events[nevent]
# Test if threshold criteria is passed for more than one period
if event[1]-event[0]>T:
try:
bindingtimes_Fourier=[]
F0_Fourier=[]
dF_Fourier=[]
dFcor_Fourier=[]
phi_Fourier=[]
avchi2=[]
# Prepare to plot results for duration of event +/- 25 periods
figure(1,figsize=(16,10))
starttime=event[0]
endtime=event[1]
tevent=np.arange(starttime,endtime)
tplot=np.arange(max(0,starttime-25*int(T)),min(endtime+25*int(T),shape(data)[0]))
# Plot position of bead 1
subplot(511)
plot(tplot,data[tplot,1],linestyle='-',color='k',markersize=1,linewidth=0.5)
plot(tevent,data[tevent,1],'y-',markersize=1,linewidth=1.0)
# Plot position of bead 2
subplot(512)
plot(tplot,data[tplot,2],linestyle='-',color='gray',markersize=1,linewidth=0.5)
plot(tevent,data[tevent,2],'c-',markersize=1,linewidth=1.0)
# Plot position of piezo-stage
parsinit=array([20.0,0.0,0.0])
pars=leastsq(lambda pars: data[tplot,5]-(pars[0]*sin(tplot*2*pi/T+pars[1])+pars[2]),parsinit)[0]
if pars[0]<0.0:
piezoamp=abs(pars[0])
piezophase=pars[1]+pi
else:
piezoamp=pars[0]
piezophase=pars[1]
piezophase=mod(piezophase,2*pi)
piezofit=piezoamp*sin(tplot*2*pi/T+piezophase)+pars[2]
piezofit2=piezoamp*sin(tplot*2*pi/T+piezophase+pi/2.0)+pars[2]
subplot(513)
plot(tplot,data[tplot,5],'k-',markersize=1,linewidth=0.5)
plot(tplot,piezofit,'g-',markersize=1,linewidth=1.0)
# Redo analysis for approved events (not optimal)
for beadnumber in [1,2]:
if beadnumber==1:
cal_k=cal_k1
yampthreshold=amp_threshold1
elif beadnumber==2:
cal_k=cal_k2
yampthreshold=amp_threshold2
t=tplot
pdata1=data[t,beadnumber]*(piezofit-mean(piezofit))
pdata2=data[t,beadnumber]*(piezofit2-mean(piezofit2))
pdata=sqrt(pdata1**2+pdata2**2)
y=convolve(pdata,ones(T)/T,mode='valid')
tsub=t[0]+arange(len(y))+(T-1)/2.0
y1=2*convolve(pdata1,ones(T)/T,mode='valid')
y2=2*convolve(pdata2,ones(T)/T,mode='valid')
y=sqrt(y1**2+y2**2)
yamp=y/abs(piezoamp)
test=zeros(len(y))
for n in range(len(y)):
if y1[n]>0.0 and y2[n]>0.0:
test[n]=arctan(y2[n]/y1[n])
elif y1[n]<0.0 and y2[n]>0.0:
test[n]=arctan(y2[n]/y1[n])+pi
elif y1[n]>0.0 and y2[n]<0.0:
test[n]=arctan(y2[n]/y1[n])
elif y1[n]<0.0 and y2[n]<0.0:
test[n]=arctan(y2[n]/y1[n])-pi
yphase=test
# Calculate period to use for averaging
select=(tsub>starttime+T/2.0)*(tsub<endtime-T/2.0)
# Calculate average amplitude and phase of bound state
boundamplevel=mean(yamp[select])
boundphaselevel=mean(yphase[select])
# Plot thresholds
subplot(514)
plot(tsub,yamp,linestyle='-',color=plotcolors[beadnumber-1])
hlines(amp_threshold1,tplot[0],tplot[-1],linestyle='dotted')
hlines(amp_threshold2,tplot[0],tplot[-1],linestyle='dotted')
plot(tsub[select],yamp[select],color=highlightcolors[beadnumber-1],linestyle='-',markersize=1,linewidth=1.0)
hlines(boundamplevel,starttime,endtime,linestyle='dashed',color='black')
subplot(515)
plot(tsub,yphase,linestyle='-',color=plotcolors[beadnumber-1])
plot(tsub[select],yphase[select],color=highlightcolors[beadnumber-1],linestyle='-',markersize=1,linewidth=1.0)
hlines(phase_threshold,tplot[0],tplot[-1],linestyle='dotted')
hlines(boundphaselevel,starttime,endtime,linestyle='dashed',color='black')
ylim(-pi,pi)
# Find surrounding intervals that do not overlap with other events
tunbound1=max(starttime-25*int(T),0)
if nevent>=1 and tunbound1<events[nevent-1][1]:
tunbound1=events[nevent-1][1]
tunbound2=min(endtime+25*int(T),shape(data)[0])
if nevent<=len(events)-2 and tunbound2>events[nevent+1][0]:
tunbound2=events[nevent+1][0]
tunbound_before=arange(tunbound1,starttime-int(T))
tunbound_after=arange(endtime+int(T),tunbound2)
tunbound=arange(max(starttime-4*int(T),0),min(endtime+4*int(T),shape(data)[0]))
tunbound_fit=append(tunbound_before,tunbound_after)
# Determine parameters using wavelets
deltax=mean(yamp[select])
phi=mean(yphase[select])
dF_Fourier.append(mean(yamp[select])*cal_k)
phi_Fourier.append(mean(yphase[select]))
# Find average amplitude of unbound states
select1=(tsub>tunbound1)*(tsub<starttime-T)
select2=(tsub>endtime+T)*(tsub<tunbound2)
select=np.logical_or(select1,select2)
unboundamplevel=mean(yamp[select])
# Locate interval of increasing amplitude
select=(tsub>starttime-T)*(tsub<min(starttime+T/2.0,(starttime+endtime)/2.0))
rise=yamp[select]
trise=tsub[select]
t1s=[]
zero_crossings = np.where(np.diff(np.sign(rise-(boundamplevel+unboundamplevel)/2.0)))[0]
for element in zero_crossings:
vlines(element,-10,10)
t1s.append(trise[element])
# Binding times are calculated as full-width-at-half-max
# In case of multiple candidate times for binding, use the one closest to initial value
t1_Fourier=t1s[argmin(abs((t1s-starttime)))]
# Save binding time
bindingtimes_Fourier.append(t1_Fourier)
# Locate interval of decreasing amplitude
select=(tsub>max(endtime-T/2.0,(starttime+endtime)/2.0))*(tsub<endtime+T)
fall=yamp[select]
tfall=tsub[select]
t2s=[]
zero_crossings = np.where(np.diff(np.sign(fall-(boundamplevel+unboundamplevel)/2.0)))[0]
for element in zero_crossings:
vlines(element,-10,10)
t2s.append(tfall[element])
# Binding times are calculated as full-width-at-half-max
# In case of multiple candidate times for binding, use the one closest to initial value
t2_Fourier=t2s[argmin(abs((t2s-endtime)))]
for panel in [511,512]:
subplot(panel)
if beadnumber==1:
vlines(t1_Fourier,ylim()[0],ylim()[1],linestyle='dashed',color='k')
vlines(t2_Fourier,ylim()[0],ylim()[1],linestyle='dashed',color='k')
elif beadnumber==2:
vlines(t1_Fourier,ylim()[0],ylim()[1],linestyle='dashed',color='gray')
vlines(t2_Fourier,ylim()[0],ylim()[1],linestyle='dashed',color='gray')
# Save unbinding time
bindingtimes_Fourier.append(t2_Fourier)
# Fit harmonic function to binding region
parsinit=array([20.0,piezophase,0.0])
pars=leastsq(lambda pars: data[tevent,beadnumber]-(pars[0]*sin(tevent*2*pi/T+pars[1])+pars[2]),parsinit)[0]
subplot(510+beadnumber)
sinebinding=pars[0]*sin(tunbound*2*pi/T+pars[1])+pars[2]
plot(tunbound,sinebinding,'g-',lw=1.0)
# Coerce amplitude into a positive number and adjust phase accordingly
if pars[0]<0.0:
amp_bound=abs(pars[0])
phase_bound=pars[1]+pi
else:
amp_bound=pars[0]
phase_bound=pars[1]
piezophase=mod(piezophase,2*pi)
offset_bound=pars[2]
# Calculate average chi-squared for the bound state
t12=np.arange(int(t1_Fourier),int(t2_Fourier))
dev=data[t12,beadnumber]-(amp_bound*sin(t12*2*pi/T+phase_bound)+offset_bound)
ssdev=sum(dev**2)
avssdev=ssdev/(t2_Fourier-t1_Fourier)
avchi2.append(avssdev)
# Fit sine-function to early detached state
figure(1)
parsinit=array([5.0,piezophase,0.0])
pars=leastsq(lambda pars: data[tunbound_fit,beadnumber]-(pars[0]*sin(tunbound_fit*2*pi/T+pars[1])+pars[2]),parsinit)[0]
subplot(510+beadnumber)
sineunbound_fit=pars[0]*sin(tunbound_fit*2*pi/T+pars[1])+pars[2]
sineunbound=pars[0]*sin(tunbound*2*pi/T+pars[1])+pars[2]
sineunbound_before=pars[0]*sin(tunbound_before*2*pi/T+pars[1])+pars[2]
sineunbound_after=pars[0]*sin(tunbound_after*2*pi/T+pars[1])+pars[2]
plot(tunbound_before,sineunbound_before,'b-',lw=2.0)
plot(tunbound_after,sineunbound_after,'b-',lw=2.0)
tmed=arange(starttime-T,endtime+T)
sineunbound_med=pars[0]*sin(tmed*2*pi/T+pars[1])+pars[2]
plot(tmed,sineunbound_med,'b--',lw=2.0)
if pars[0]<0.0:
amp_unbound=abs(pars[0])
phase_unbound=pars[1]+pi
else:
amp_unbound=pars[0]
phase_unbound=pars[1]
offset_unbound=pars[2]
# Determine F0 from raw trajectory
t=tplot
pdata=data[t,beadnumber]
y=convolve(pdata,ones(T)/T,mode='valid')
tsub=t[0]+arange(len(y))+(T-1)/2.0
subplot(510+beadnumber)
if endtime-starttime>T:
select=(tsub>starttime+T/2.0)*(tsub<endtime-T/2.0)
else:
select=(tsub>starttime)*(tsub<endtime)
boundlevel=mean(y[select])
hlines(boundlevel,starttime,endtime,color='k',linestyle='dashed')
select1=(tsub>tunbound1)*(tsub<starttime-T)
select2=(tsub>endtime+T)*(tsub<tunbound2)
select=np.logical_or(select1,select2)
unboundlevel=mean(y[select])
F0_Fourier.append((boundlevel-unboundlevel)*cal_k)
# Polish the plots
for panel in [511,512,513,514,515]:
subplot(panel)
tlim=np.arange(max(0,starttime-5*int(T)),min(endtime+5*int(T),shape(data)[0]))
xlim(tlim[0],tlim[-1])
if panel==511:
text(xlim()[0]+0.95*(xlim()[1]-xlim()[0]),ylim()[1]-20,str(round(avchi2[0],2)))
elif panel==512:
text(xlim()[0]+0.95*(xlim()[1]-xlim()[0]),ylim()[1]-20,str(round(avchi2[1],2)))
subplot(511)
ylabel('$x_1$ (nm)')
subplot(512)
ylabel('$x_2$ (nm)')
subplot(513)
ylabel('$x_\mathrm{stage}$ (nm)')
subplot(514)
ylabel('Amplitude (nm)')
subplot(515)
ylabel('Phase (rad)')
xlabel('Time (frames)')
# Save the diagnostics figure
savefig(resultsdir+'/'+'event'+str(nevent_save)+'.png')
close('all')
F0_all_Fourier.append(F0_Fourier)
dF_all_Fourier.append(dF_Fourier)
bindingtimes_all_Fourier.append(bindingtimes_Fourier)
avchi2_all.append(avchi2)
nevent_save+=1
except (IndexError,ValueError,TypeError,RuntimeError):
pass
# Save the results to files
np.savetxt(directory+'/'+'bindingtimes_Wavelet.txt',bindingtimes_all_Fourier)
np.savetxt(directory+'/'+'F0_Wavelet.txt',F0_all_Fourier)
np.savetxt(directory+'/'+'dF_Wavelet.txt',dF_all_Fourier)
np.savetxt(directory+'/'+'avchi2.txt',avchi2_all)
``` |
{
"source": "jmsutariya/cube-tracker",
"score": 3
} |
#### File: cube-tracker/rubikscolorresolver/base.py
```python
from math import ceil, sqrt
import sys
if sys.version_info < (3, 4):
raise SystemError("Must be using Python 3.4 or higher")
def is_micropython():
return sys.implementation.name == "micropython"
if is_micropython():
from ucollections import OrderedDict
else:
from collections import OrderedDict
# @timed_function
def get_lab_distance(lab1, lab2):
"""
http://www.w3resource.com/python-exercises/math/python-math-exercise-79.php
In mathematics, the Euclidean distance or Euclidean metric is the "ordinary"
(i.e. straight-line) distance between two points in Euclidean space. With this
distance, Euclidean space becomes a metric space. The associated norm is called
the Euclidean norm.
"""
return sqrt(((lab1.L - lab2.L) ** 2) + ((lab1.a - lab2.a) ** 2) + ((lab1.b - lab2.b) ** 2))
edge_color_pair_map = {
# Up (white)
"Gr/Wh": "Gr/Wh",
"Wh/Gr": "Gr/Wh",
"Bu/Wh": "Bu/Wh",
"Wh/Bu": "Bu/Wh",
"OR/Wh": "OR/Wh",
"Wh/OR": "OR/Wh",
"Rd/Wh": "Rd/Wh",
"Wh/Rd": "Rd/Wh",
# Left (orange)
"Gr/OR": "Gr/OR",
"OR/Gr": "Gr/OR",
"Bu/OR": "Bu/OR",
"OR/Bu": "Bu/OR",
# Right (red)
"Gr/Rd": "Gr/Rd",
"Rd/Gr": "Gr/Rd",
"Bu/Rd": "Bu/Rd",
"Rd/Bu": "Bu/Rd",
# Down (yellow)
"Gr/Ye": "Gr/Ye",
"Ye/Gr": "Gr/Ye",
"Bu/Ye": "Bu/Ye",
"Ye/Bu": "Bu/Ye",
"OR/Ye": "OR/Ye",
"Ye/OR": "OR/Ye",
"Rd/Ye": "Rd/Ye",
"Ye/Rd": "Rd/Ye",
}
class ListMissingValue(Exception):
pass
# @timed_function
def find_index_for_value(list_foo, target, min_index):
for (index, value) in enumerate(list_foo):
if value == target and index >= min_index:
return index
raise ListMissingValue("Did not find %s in list %s".format(target, list_foo))
# @timed_function
def get_swap_count(listA, listB, debug=False):
"""
How many swaps do we have to make in listB for it to match listA
Example:
A = [1, 2, 3, 0, 4]
B = [3, 4, 1, 0, 2]
would require 2 swaps
"""
A_length = len(listA)
B_length = len(listB)
swaps = 0
index = 0
if A_length != B_length:
#log.info("listA %s" % " ".join(listA))
#log.info("listB %s" % " ".join(listB))
assert False, "listA (len %d) and listB (len %d) must be the same length" % (
A_length,
B_length,
)
#if debug:
# log.info("INIT")
# log.info("listA: %s" % " ".join(listA))
# log.info("listB: %s" % " ".join(listB))
# log.info("")
while listA != listB:
if listA[index] != listB[index]:
listA_value = listA[index]
listB_index_with_A_value = find_index_for_value(
listB, listA_value, index + 1
)
tmp = listB[index]
listB[index] = listB[listB_index_with_A_value]
listB[listB_index_with_A_value] = tmp
swaps += 1
#if debug:
# log.info("index %d, swaps %d" % (index, swaps))
# log.info("listA: %s" % " ".join(listA))
# log.info("listB: %s" % " ".join(listB))
# log.info("")
index += 1
#if debug:
# log.info("swaps: %d" % swaps)
# log.info("")
return swaps
class LabColor(object):
# @timed_function
def __init__(self, L, a, b, red, green, blue):
self.L = L
self.a = a
self.b = b
self.red = red
self.green = green
self.blue = blue
def __str__(self):
return "Lab (%s, %s, %s)" % (self.L, self.a, self.b)
def __repr__(self):
return self.__str__()
def __lt__(self, other):
if self.L != other.L:
return self.L < other.L
if self.a != other.a:
return self.a < other.a
return self.b < other.b
# @timed_function
def rgb2lab(inputColor):
(red, green, blue) = inputColor
# XYZ -> Standard-RGB
# https://www.easyrgb.com/en/math.php
var_R = red / 255
var_G = green / 255
var_B = blue / 255
if var_R > 0.04045:
var_R = pow(((var_R + 0.055) / 1.055), 2.4)
else:
var_R = var_R / 12.92
if var_G > 0.04045:
var_G = pow(((var_G + 0.055) / 1.055), 2.4)
else:
var_G = var_G / 12.92
if var_B > 0.04045:
var_B = pow(((var_B + 0.055) / 1.055), 2.4)
else:
var_B = var_B / 12.92
var_R = var_R * 100
var_G = var_G * 100
var_B = var_B * 100
X = var_R * 0.4124 + var_G * 0.3576 + var_B * 0.1805
Y = var_R * 0.2126 + var_G * 0.7152 + var_B * 0.0722
Z = var_R * 0.0193 + var_G * 0.1192 + var_B * 0.9505
reference_X = 95.047
reference_Y = 100.0
reference_Z = 108.883
# XYZ -> CIE-L*ab
# //www.easyrgb.com/en/math.php
var_X = X / reference_X
var_Y = Y / reference_Y
var_Z = Z / reference_Z
if var_X > 0.008856:
var_X = pow(var_X, 1 / 3)
else:
var_X = (7.787 * var_X) + (16 / 116)
if var_Y > 0.008856:
var_Y = pow(var_Y, 1 / 3)
else:
var_Y = (7.787 * var_Y) + (16 / 116)
if var_Z > 0.008856:
var_Z = pow(var_Z, 1 / 3)
else:
var_Z = (7.787 * var_Z) + (16 / 116)
L = (116 * var_Y) - 16
a = 500 * (var_X - var_Y)
b = 200 * (var_Y - var_Z)
# log.info("RGB ({}, {}, {}), L {}, a {}, b {}".format(red, green, blue, L, a, b))
return LabColor(L, a, b, red, green, blue)
class Square(object):
def __init__(self, side, cube, position, red, green, blue):
#self.cube = cube
self.side = side
self.position = position
self.rgb = (red, green, blue)
self.lab = rgb2lab((red, green, blue))
self.color_name = None
self.side_name = None # ULFRBD
def __str__(self):
return "{}{}-{}".format(self.side, self.position, self.color_name)
def __repr__(self):
return self.__str__()
def __lt__(self, other):
return self.position < other.position
class Side(object):
def __init__(self, cube, width, name):
self.cube = cube
self.name = name # U, L, etc
self.color = None
self.squares = OrderedDict()
self.width = width
self.squares_per_side = width * width
self.center_squares = []
self.edge_squares = []
self.corner_squares = []
self.wing_partner = {}
if self.name == "U":
index = 0
elif self.name == "L":
index = 1
elif self.name == "F":
index = 2
elif self.name == "R":
index = 3
elif self.name == "B":
index = 4
elif self.name == "D":
index = 5
self.min_pos = (index * self.squares_per_side) + 1
self.max_pos = (index * self.squares_per_side) + self.squares_per_side
# If this is a cube of odd width (3x3x3) then define a mid_pos
if self.width % 2 == 0:
self.mid_pos = None
else:
self.mid_pos = (self.min_pos + self.max_pos) / 2
self.corner_pos = (
self.min_pos,
self.min_pos + self.width - 1,
self.max_pos - self.width + 1,
self.max_pos,
)
self.edge_pos = []
self.edge_north_pos = []
self.edge_west_pos = []
self.edge_south_pos = []
self.edge_east_pos = []
self.center_pos = []
for position in range(self.min_pos, self.max_pos):
if position in self.corner_pos:
pass
# Edges at the north
elif position > self.corner_pos[0] and position < self.corner_pos[1]:
self.edge_pos.append(position)
self.edge_north_pos.append(position)
# Edges at the south
elif position > self.corner_pos[2] and position < self.corner_pos[3]:
self.edge_pos.append(position)
self.edge_south_pos.append(position)
elif (position - 1) % self.width == 0:
west_edge = position
east_edge = west_edge + self.width - 1
# Edges on the west
self.edge_pos.append(west_edge)
self.edge_west_pos.append(west_edge)
# Edges on the east
self.edge_pos.append(east_edge)
self.edge_east_pos.append(east_edge)
# Center squares
for x in range(west_edge + 1, east_edge):
self.center_pos.append(x)
def __str__(self):
return "side-{}".format(self.name)
def __repr__(self):
return self.__str__()
# @timed_function
def set_square(self, position, red, green, blue):
self.squares[position] = Square(self, self.cube, position, red, green, blue)
if position in self.center_pos:
self.center_squares.append(self.squares[position])
elif position in self.edge_pos:
self.edge_squares.append(self.squares[position])
elif position in self.corner_pos:
self.corner_squares.append(self.squares[position])
else:
raise Exception("Could not determine egde vs corner vs center")
# @timed_function
def calculate_wing_partners(self):
for (pos1, pos2) in self.cube.all_edge_positions:
if pos1 >= self.min_pos and pos1 <= self.max_pos:
self.wing_partner[pos1] = pos2
elif pos2 >= self.min_pos and pos2 <= self.max_pos:
self.wing_partner[pos2] = pos1
# @timed_function
def get_wing_partner(self, wing_index):
try:
return self.wing_partner[wing_index]
except KeyError:
#log.info("wing_partner\n%s\n".format(self.wing_partner))
raise
class RubiksColorSolverGenericBase(object):
def __init__(self, width):
self.width = width
self.height = width
self.squares_per_side = self.width * self.width
self.orbits = int(ceil((self.width - 2) / 2.0))
self.state = []
self.orange_baseline = None
self.red_baseline = None
self.all_edge_positions = []
self.write_debug_file = False
if self.width % 2 == 0:
self.even = True
self.odd = False
else:
self.even = False
self.odd = True
#if not os.path.exists(HTML_DIRECTORY):
# os.makedirs(HTML_DIRECTORY)
self.sides = {
"U": Side(self, self.width, "U"),
"L": Side(self, self.width, "L"),
"F": Side(self, self.width, "F"),
"R": Side(self, self.width, "R"),
"B": Side(self, self.width, "B"),
"D": Side(self, self.width, "D"),
}
self.sideU = self.sides["U"]
self.sideL = self.sides["L"]
self.sideF = self.sides["F"]
self.sideR = self.sides["R"]
self.sideB = self.sides["B"]
self.sideD = self.sides["D"]
self.side_order = ("U", "L", "F", "R", "B", "D")
self.pos2side = {}
self.pos2square = {}
# U and B
for (pos1, pos2) in zip(self.sideU.edge_north_pos, reversed(self.sideB.edge_north_pos)):
self.all_edge_positions.append((pos1, pos2))
# U and L
for (pos1, pos2) in zip(self.sideU.edge_west_pos, self.sideL.edge_north_pos):
self.all_edge_positions.append((pos1, pos2))
# U and F
for (pos1, pos2) in zip(self.sideU.edge_south_pos, self.sideF.edge_north_pos):
self.all_edge_positions.append((pos1, pos2))
# U and R
for (pos1, pos2) in zip(self.sideU.edge_east_pos, reversed(self.sideR.edge_north_pos)):
self.all_edge_positions.append((pos1, pos2))
# F and L
for (pos1, pos2) in zip(self.sideF.edge_west_pos, self.sideL.edge_east_pos):
self.all_edge_positions.append((pos1, pos2))
# F and R
for (pos1, pos2) in zip(self.sideF.edge_east_pos, self.sideR.edge_west_pos):
self.all_edge_positions.append((pos1, pos2))
# F and D
for (pos1, pos2) in zip(self.sideF.edge_south_pos, self.sideD.edge_north_pos):
self.all_edge_positions.append((pos1, pos2))
# L and B
for (pos1, pos2) in zip(self.sideL.edge_west_pos, self.sideB.edge_east_pos):
self.all_edge_positions.append((pos1, pos2))
# L and D
for (pos1, pos2) in zip(self.sideL.edge_south_pos, reversed(self.sideD.edge_west_pos)):
self.all_edge_positions.append((pos1, pos2))
# R and D
for (pos1, pos2) in zip(self.sideR.edge_south_pos, self.sideD.edge_east_pos):
self.all_edge_positions.append((pos1, pos2))
# R and B
for (pos1, pos2) in zip(self.sideR.edge_east_pos, self.sideB.edge_west_pos):
self.all_edge_positions.append((pos1, pos2))
# B and D
for (pos1, pos2) in zip(reversed(self.sideB.edge_south_pos), self.sideD.edge_south_pos):
self.all_edge_positions.append((pos1, pos2))
for side in self.sides.values():
side.calculate_wing_partners()
self.calculate_pos2side()
# @timed_function
def calculate_pos2side(self):
for side in self.sides.values():
for x in range(side.min_pos, side.max_pos + 1):
self.pos2side[x] = side
# @timed_function
def calculate_pos2square(self):
for side in self.sides.values():
for (position, square) in side.squares.items():
self.pos2square[position] = square
# @timed_function
def print_cube(self):
data = []
for x in range(3 * self.height):
data.append([])
color_codes = {"OR": 90, "Rd": 91, "Gr": 92, "Ye": 93, "Bu": 94, "Wh": 97}
for side_name in self.side_order:
side = self.sides[side_name]
if side_name == "U":
line_number = 0
prefix = (" " * self.width * 3) + " "
elif side_name in ("L", "F", "R", "B"):
line_number = self.width
prefix = ""
else:
line_number = self.width * 2
prefix = (" " * self.width * 3) + " "
# rows
for y in range(self.width):
data[line_number].append(prefix)
# cols
for x in range(self.width):
color_name = side.squares[
side.min_pos + (y * self.width) + x
].color_name
color_code = color_codes.get(color_name)
if color_name is None:
color_code = 97
data[line_number].append("\033[%dmFo\033[0m" % color_code)
else:
data[line_number].append(
"\033[%dm%s\033[0m" % (color_code, color_name)
)
line_number += 1
output = []
for row in data:
output.append(" ".join(row))
print("Cube\n\n%s\n" % "\n".join(output))
#log.info("Cube\n\n%s\n" % "\n".join(output))
# @timed_function
def cube_for_kociemba_strict(self):
#log.info("color_to_side_name:\n{}\n".format(self.color_to_side_name))
data = []
for side in (self.sideU, self.sideR, self.sideF, self.sideD, self.sideL, self.sideB):
for x in range(side.min_pos, side.max_pos + 1):
square = side.squares[x]
data.append(square.side_name)
return data
# @timed_function
def validate_edge_orbit(self, orbit_id):
if self.width == 2:
from rubikscolorresolver.cube_333 import edge_orbit_wing_pairs
elif self.width == 3:
from rubikscolorresolver.cube_333 import edge_orbit_wing_pairs
elif self.width == 4:
from rubikscolorresolver.cube_444 import edge_orbit_wing_pairs
elif self.width == 5:
from rubikscolorresolver.cube_555 import edge_orbit_wing_pairs
elif self.width == 6:
from rubikscolorresolver.cube_666 import edge_orbit_wing_pairs
elif self.width == 7:
from rubikscolorresolver.cube_777 import edge_orbit_wing_pairs
valid = True
# We need to see which orange/red we can flip that will make the edges valid
wing_pair_counts = {}
for (square1_position, square2_position) in edge_orbit_wing_pairs[orbit_id]:
square1 = self.pos2square[square1_position]
square2 = self.pos2square[square2_position]
wing_pair_string = ", ".join(
sorted([square1.color_name, square2.color_name])
)
# log.info("orbit {}: ({}, {}) is ({})".format(orbit_id, square1_position, square2_position, wing_pair_string))
if wing_pair_string not in wing_pair_counts:
wing_pair_counts[wing_pair_string] = 0
wing_pair_counts[wing_pair_string] += 1
# Are all counts the same?
target_count = None
for (_wing_pair, count) in wing_pair_counts.items():
if target_count is None:
target_count = count
else:
if count != target_count:
valid = False
break
#if not valid:
# log.info("wing_pair_counts:\n{}\n".format(wing_pair_counts))
# log.warning("valid: {}".format(valid))
# assert valid, "Cube is invalid"
return valid
# @timed_function
def find_corners_by_color(self):
green_white_corners = []
green_yellow_corners = []
blue_white_corners = []
blue_yellow_corners = []
if self.width == 2:
from rubikscolorresolver.cube_222 import corner_tuples
elif self.width == 3:
from rubikscolorresolver.cube_333 import corner_tuples
elif self.width == 4:
from rubikscolorresolver.cube_444 import corner_tuples
elif self.width == 5:
from rubikscolorresolver.cube_555 import corner_tuples
elif self.width == 6:
from rubikscolorresolver.cube_666 import corner_tuples
elif self.width == 7:
from rubikscolorresolver.cube_777 import corner_tuples
for corner_tuple in corner_tuples:
corner_colors = []
for position in corner_tuple:
#square = self.pos2square[position]
#corner_colors.add(square.color_name)
corner_colors.append(self.pos2square[position].color_name)
if "Gr" in corner_colors:
if "Wh" in corner_colors:
green_white_corners.append(corner_tuple)
elif "Ye" in corner_colors:
green_yellow_corners.append(corner_tuple)
elif "Bu" in corner_colors:
if "Wh" in corner_colors:
blue_white_corners.append(corner_tuple)
elif "Ye" in corner_colors:
blue_yellow_corners.append(corner_tuple)
return (
green_white_corners,
green_yellow_corners,
blue_white_corners,
blue_yellow_corners,
)
# @timed_function
def find_edges_by_color(self, orbit_id):
if self.width == 2:
from rubikscolorresolver.cube_333 import edge_orbit_wing_pairs
elif self.width == 3:
from rubikscolorresolver.cube_333 import edge_orbit_wing_pairs
elif self.width == 4:
from rubikscolorresolver.cube_444 import edge_orbit_wing_pairs
elif self.width == 5:
from rubikscolorresolver.cube_555 import edge_orbit_wing_pairs
elif self.width == 6:
from rubikscolorresolver.cube_666 import edge_orbit_wing_pairs
elif self.width == 7:
from rubikscolorresolver.cube_777 import edge_orbit_wing_pairs
green_red_orange_color_names = ("Gr", "Rd", "OR")
blue_red_orange_color_names = ("Bu", "Rd", "OR")
white_red_orange_color_names = ("Wh", "Rd", "OR")
yellow_red_orange_color_names = ("Ye", "Rd", "OR")
green_red_or_orange_edges = []
blue_red_or_orange_edges = []
white_red_or_orange_edges = []
yellow_red_or_orange_edges = []
for (square_index, partner_index) in edge_orbit_wing_pairs[orbit_id]:
square = self.pos2square[square_index]
partner = self.pos2square[partner_index]
if (
square.color_name in green_red_orange_color_names
and partner.color_name in green_red_orange_color_names
):
if square.color_name == "Gr":
green_red_or_orange_edges.append((square, partner))
else:
green_red_or_orange_edges.append((partner, square))
elif (
square.color_name in blue_red_orange_color_names
and partner.color_name in blue_red_orange_color_names
):
if square.color_name == "Bu":
blue_red_or_orange_edges.append((square, partner))
else:
blue_red_or_orange_edges.append((partner, square))
elif (
square.color_name in white_red_orange_color_names
and partner.color_name in white_red_orange_color_names
):
if square.color_name == "Wh":
white_red_or_orange_edges.append((square, partner))
else:
white_red_or_orange_edges.append((partner, square))
elif (
square.color_name in yellow_red_orange_color_names
and partner.color_name in yellow_red_orange_color_names
):
if square.color_name == "Ye":
yellow_red_or_orange_edges.append((square, partner))
else:
yellow_red_or_orange_edges.append((partner, square))
return (
green_red_or_orange_edges,
blue_red_or_orange_edges,
white_red_or_orange_edges,
yellow_red_or_orange_edges,
)
# @timed_function
def sanity_check_edges_red_orange_count_for_orbit(self, target_orbit_id):
if (self.width == 4 or self.width == 6 or (self.width == 5 and target_orbit_id == 0)):
high_low_edge_per_color = self.get_high_low_per_edge_color(target_orbit_id)
else:
high_low_edge_per_color = None
def fix_orange_vs_red_for_color(target_color, target_color_red_or_orange_edges):
if len(target_color_red_or_orange_edges) == 2:
red_orange_permutations = (("OR", "Rd"), ("Rd", "OR"))
elif len(target_color_red_or_orange_edges) == 4:
# 4!/(2!*2!) = 6
red_orange_permutations = (
("OR", "OR", "Rd", "Rd"),
("OR", "Rd", "OR", "Rd"),
("OR", "Rd", "Rd", "OR"),
("Rd", "Rd", "OR", "OR"),
("Rd", "OR", "Rd", "OR"),
("Rd", "OR", "OR", "Rd"),
)
else:
raise Exception(
"There should be either 2 or 4 but we have %s"
% target_color_red_or_orange_edges
)
min_distance = None
min_distance_permutation = None
for red_orange_permutation in red_orange_permutations:
distance = 0
for (index, (target_color_square, partner_square)) in enumerate(target_color_red_or_orange_edges):
red_orange = red_orange_permutation[index]
if red_orange == "OR":
distance += sqrt(((partner_square.lab.L - self.orange_baseline.L) ** 2) + ((partner_square.lab.a - self.orange_baseline.a) ** 2) + ((partner_square.lab.b - self.orange_baseline.b) ** 2))
elif red_orange == "Rd":
distance += sqrt(((partner_square.lab.L - self.red_baseline.L) ** 2) + ((partner_square.lab.a - self.red_baseline.a) ** 2) + ((partner_square.lab.b - self.red_baseline.b) ** 2))
else:
raise Exception(red_orange)
partner_square.color_name = red_orange
partner_square.side_name = self.color_to_side_name[partner_square.color_name]
if (self.width == 4 or self.width == 6 or (self.width == 5 and target_orbit_id == 0)):
for (index, (target_color_square, partner_square)) in enumerate(target_color_red_or_orange_edges):
red_orange = red_orange_permutation[index]
edge_color_pair = edge_color_pair_map[
"%s/%s"
% (
target_color_square.color_name,
partner_square.color_name,
)
]
# log.info("high_low_edge_per_color\n%s".format(high_low_edge_per_color))
if len(high_low_edge_per_color[edge_color_pair]) != 2:
# log.warning("*" * 40)
# log.warning("edge_color_pair %s high_low is %s" % (edge_color_pair, high_low_edge_per_color[edge_color_pair]))
# log.warning("*" * 40)
distance += 999
if min_distance is None or distance < min_distance:
min_distance = distance
min_distance_permutation = red_orange_permutation
'''
log.info(
"target edge %s, red_orange_permutation %s, distance %s (NEW MIN)"
% (target_color, ",".join(red_orange_permutation), distance)
)
else:
log.info(
"target edge %s, red_orange_permutation %s, distance %s)"
% (target_color, ",".join(red_orange_permutation), distance)
)
log.info("min_distance_permutation %s" % ",".join(min_distance_permutation))
'''
for (index, (target_color_square, partner_square)) in enumerate(target_color_red_or_orange_edges):
if partner_square.color_name != min_distance_permutation[index]:
'''
log.warning(
"change %s edge partner %s from %s to %s"
% (
target_color,
partner_square,
partner_square.color_name,
min_distance_permutation[index],
)
)
'''
partner_square.color_name = min_distance_permutation[index]
partner_square.side_name = self.color_to_side_name[
partner_square.color_name
]
'''
else:
log.info(
"%s edge partner %s is %s"
% (target_color, partner_square, partner_square.color_name)
)
log.info("\n\n")
'''
(
green_red_or_orange_edges,
blue_red_or_orange_edges,
white_red_or_orange_edges,
yellow_red_or_orange_edges,
) = self.find_edges_by_color(target_orbit_id)
#log.info(
# "orbit %s green_red_or_orange_edges %s"
# % (target_orbit_id, green_red_or_orange_edges)
#)
fix_orange_vs_red_for_color("green", green_red_or_orange_edges)
#log.info(
# "orbit %s blue_red_or_orange_edges %s"
# % (target_orbit_id, blue_red_or_orange_edges)
#)
fix_orange_vs_red_for_color("blue", blue_red_or_orange_edges)
#log.info(
# "orbit %s white_red_or_orange_edges %s"
# % (target_orbit_id, white_red_or_orange_edges)
#)
fix_orange_vs_red_for_color("white", white_red_or_orange_edges)
#log.info(
# "orbit %s yellow_red_or_orange_edges %s"
# % (target_orbit_id, yellow_red_or_orange_edges)
#)
fix_orange_vs_red_for_color("yellow", yellow_red_or_orange_edges)
self.validate_edge_orbit(target_orbit_id)
# @timed_function
def get_high_low_per_edge_color(self, target_orbit_id):
if self.width == 4:
from rubikscolorresolver.cube_444 import edge_orbit_wing_pairs, highlow_edge_values
elif self.width == 5:
from rubikscolorresolver.cube_555 import edge_orbit_wing_pairs, highlow_edge_values
elif self.width == 6:
from rubikscolorresolver.cube_666 import edge_orbit_wing_pairs, highlow_edge_values
else:
raise Exception("Add support for %sx%sx%s" % (self.width, self.width, self.width))
high_low_per_edge_color = {
"Gr/Wh": set(),
"Bu/Wh": set(),
"OR/Wh": set(),
"Rd/Wh": set(),
"Gr/OR": set(),
"Bu/OR": set(),
"Gr/Rd": set(),
"Bu/Rd": set(),
"Gr/Ye": set(),
"Bu/Ye": set(),
"OR/Ye": set(),
"Rd/Ye": set(),
}
for (square_index, partner_index) in edge_orbit_wing_pairs[target_orbit_id]:
square = self.pos2square[square_index]
partner = self.pos2square[partner_index]
if self.width == 4:
highlow = highlow_edge_values[(square_index, partner_index, square.side_name, partner.side_name)]
elif self.width == 5:
highlow = highlow_edge_values[(square_index, partner_index, square.side_name, partner.side_name)]
elif self.width == 6:
highlow = highlow_edge_values[(square_index, partner_index, square.side_name, partner.side_name)]
edge_color_pair = edge_color_pair_map["%s/%s" % (square.color_name, partner.color_name)]
high_low_per_edge_color[edge_color_pair].add(highlow)
# log.info("high_low_per_edge_color for orbit %d\n%s" % (target_orbit_id, high_low_per_edge_color))
# log.info("")
return high_low_per_edge_color
# @timed_function
def sanity_check_edge_squares(self):
for orbit_id in range(self.orbits):
self.sanity_check_edges_red_orange_count_for_orbit(orbit_id)
# @timed_function
def assign_green_white_corners(self, green_white_corners):
# log.info("Gr/Wh corner tuples %s".format(green_white_corners))
valid_green_orange_white = (
["Gr", "OR", "Wh"],
["Wh", "Gr", "OR"],
["OR", "Wh", "Gr"],
)
valid_green_white_red = (
["Gr", "Wh", "Rd"],
["Rd", "Gr", "Wh"],
["Wh", "Rd", "Gr"],
)
for (corner1_index, corner2_index, corner3_index) in green_white_corners:
corner1 = self.pos2square[corner1_index]
corner2 = self.pos2square[corner2_index]
corner3 = self.pos2square[corner3_index]
color_seq = [x.color_name for x in (corner1, corner2, corner3)]
# If this is the case we must flip the orange to red or vice versa
if (
color_seq not in valid_green_orange_white
and color_seq not in valid_green_white_red
):
if corner1.color_name == "OR":
corner1.color_name = "Rd"
#log.warning(
# "change Gr/Wh corner partner %s from OR to Rd" % corner1
#)
elif corner1.color_name == "Rd":
corner1.color_name = "OR"
#log.warning(
# "change Gr/Wh corner partner %s from Rd to OR" % corner1
#)
elif corner2.color_name == "OR":
corner2.color_name = "Rd"
#log.warning(
# "change Gr/Wh corner partner %s from OR to Rd" % corner2
#)
elif corner2.color_name == "Rd":
corner2.color_name = "OR"
#log.warning(
# "change Gr/Wh corner partner %s from Rd to OR" % corner2
#)
elif corner3.color_name == "OR":
corner3.color_name = "Rd"
#log.warning(
# "change Gr/Wh corner partner %s from OR to Rd" % corner3
#)
elif corner3.color_name == "Rd":
corner3.color_name = "OR"
#log.warning(
# "change Gr/Wh corner partner %s from Rd to OR" % corner3
#)
# @timed_function
def assign_green_yellow_corners(self, green_yellow_corners):
valid_green_yellow_orange = (
["Gr", "Ye", "OR"],
["OR", "Gr", "Ye"],
["Ye", "OR", "Gr"],
)
valid_green_red_yellow = (
["Gr", "Rd", "Ye"],
["Ye", "Gr", "Rd"],
["Rd", "Ye", "Gr"],
)
for (corner1_index, corner2_index, corner3_index) in green_yellow_corners:
corner1 = self.pos2square[corner1_index]
corner2 = self.pos2square[corner2_index]
corner3 = self.pos2square[corner3_index]
color_seq = [x.color_name for x in (corner1, corner2, corner3)]
# If this is the case we must flip the orange to red or vice versa
if (
color_seq not in valid_green_yellow_orange
and color_seq not in valid_green_red_yellow
):
if corner1.color_name == "OR":
corner1.color_name = "Rd"
#log.warning(
# "change Gr/Ye corner partner %s from OR to Rd" % corner1
#)
elif corner1.color_name == "Rd":
corner1.color_name = "OR"
#log.warning(
# "change Gr/Ye corner partner %s from Rd to OR" % corner1
#)
elif corner2.color_name == "OR":
corner2.color_name = "Rd"
#log.warning(
# "change Gr/Ye corner partner %s from OR to Rd" % corner2
#)
elif corner2.color_name == "Rd":
corner2.color_name = "OR"
#log.warning(
# "change Gr/Ye corner partner %s from Rd to OR" % corner2
#)
elif corner3.color_name == "OR":
corner3.color_name = "Rd"
#log.warning(
# "change Gr/Ye corner partner %s from OR to Rd" % corner3
#)
elif corner3.color_name == "Rd":
corner3.color_name = "OR"
#log.warning(
# "change Gr/Ye corner partner %s from Rd to OR" % corner3
#)
# @timed_function
def assign_blue_white_corners(self, blue_white_corners):
# log.info("Bu/Wh corner tuples %s".format(blue_white_corners))
valid_blue_white_orange = (
["Bu", "Wh", "OR"],
["OR", "Bu", "Wh"],
["Wh", "OR", "Bu"],
)
valid_blue_red_white = (
["Bu", "Rd", "Wh"],
["Wh", "Bu", "Rd"],
["Rd", "Wh", "Bu"],
)
for (corner1_index, corner2_index, corner3_index) in blue_white_corners:
corner1 = self.pos2square[corner1_index]
corner2 = self.pos2square[corner2_index]
corner3 = self.pos2square[corner3_index]
color_seq = [x.color_name for x in (corner1, corner2, corner3)]
# If this is the case we must flip the orange to red or vice versa
if (
color_seq not in valid_blue_white_orange
and color_seq not in valid_blue_red_white
):
if corner1.color_name == "OR":
corner1.color_name = "Rd"
#log.warning(
# "change Bu/Wh corner partner %s from OR to Rd" % corner1
#)
elif corner1.color_name == "Rd":
corner1.color_name = "OR"
#log.warning(
# "change Bu/Wh corner partner %s from Rd to OR" % corner1
#)
elif corner2.color_name == "OR":
corner2.color_name = "Rd"
#log.warning(
# "change Bu/Wh corner partner %s from OR to Rd" % corner2
#)
elif corner2.color_name == "Rd":
corner2.color_name = "OR"
#log.warning(
# "change Bu/Wh corner partner %s from Rd to OR" % corner2
#)
elif corner3.color_name == "OR":
corner3.color_name = "Rd"
#log.warning(
# "change Bu/Wh corner partner %s from OR to Rd" % corner3
#)
elif corner3.color_name == "Rd":
corner3.color_name = "OR"
#log.warning(
# "change Bu/Wh corner partner %s from Rd to OR" % corner3
#)
# @timed_function
def assign_blue_yellow_corners(self, blue_yellow_corners):
valid_blue_yellow_red = (
["Bu", "Ye", "Rd"],
["Rd", "Bu", "Ye"],
["Ye", "Rd", "Bu"],
)
valid_blue_orange_yellow = (
["Bu", "OR", "Ye"],
["Ye", "Bu", "OR"],
["OR", "Ye", "Bu"],
)
for (corner1_index, corner2_index, corner3_index) in blue_yellow_corners:
corner1 = self.pos2square[corner1_index]
corner2 = self.pos2square[corner2_index]
corner3 = self.pos2square[corner3_index]
color_seq = [x.color_name for x in (corner1, corner2, corner3)]
# If this is the case we must flip the orange to red or vice versa
if (
color_seq not in valid_blue_yellow_red
and color_seq not in valid_blue_orange_yellow
):
if corner1.color_name == "OR":
corner1.color_name = "Rd"
#log.warning(
# "change Bu/Ye corner partner %s from OR to Rd" % corner1
#)
elif corner1.color_name == "Rd":
corner1.color_name = "OR"
#log.warning(
# "change Bu/Ye corner partner %s from Rd to OR" % corner1
#)
elif corner2.color_name == "OR":
corner2.color_name = "Rd"
#log.warning(
# "change Bu/Ye corner partner %s from OR to Rd" % corner2
#)
elif corner2.color_name == "Rd":
corner2.color_name = "OR"
#log.warning(
# "change Bu/Ye corner partner %s from Rd to OR" % corner2
#)
elif corner3.color_name == "OR":
corner3.color_name = "Rd"
#log.warning(
# "change Bu/Ye corner partner %s from OR to Rd" % corner3
#)
elif corner3.color_name == "Rd":
corner3.color_name = "OR"
#log.warning(
# "change Bu/Ye corner partner %s from Rd to OR" % corner3
#)
# @timed_function
def sanity_check_corner_squares(self):
(green_white_corners, green_yellow_corners, blue_white_corners, blue_yellow_corners) = self.find_corners_by_color()
self.assign_green_white_corners(green_white_corners)
self.assign_green_yellow_corners(green_yellow_corners)
self.assign_blue_white_corners(blue_white_corners)
self.assign_blue_yellow_corners(blue_yellow_corners)
# @timed_function
def get_corner_swap_count(self, debug=False):
needed_corners = ["BLU", "BRU", "FLU", "FRU", "DFL", "DFR", "BDL", "BDR"]
to_check = [
(
self.sideU.corner_pos[0],
self.sideL.corner_pos[0],
self.sideB.corner_pos[1],
), # ULB
(
self.sideU.corner_pos[1],
self.sideR.corner_pos[1],
self.sideB.corner_pos[0],
), # URB
(
self.sideU.corner_pos[2],
self.sideL.corner_pos[1],
self.sideF.corner_pos[0],
), # ULF
(
self.sideU.corner_pos[3],
self.sideF.corner_pos[1],
self.sideR.corner_pos[0],
), # UFR
(
self.sideD.corner_pos[0],
self.sideL.corner_pos[3],
self.sideF.corner_pos[2],
), # DLF
(
self.sideD.corner_pos[1],
self.sideF.corner_pos[3],
self.sideR.corner_pos[2],
), # DFR
(
self.sideD.corner_pos[2],
self.sideL.corner_pos[2],
self.sideB.corner_pos[3],
), # DLB
(
self.sideD.corner_pos[3],
self.sideR.corner_pos[3],
self.sideB.corner_pos[2],
), # DRB
]
current_corners = []
for (square_index1, square_index2, square_index3) in to_check:
square1 = self.pos2square[square_index1]
square2 = self.pos2square[square_index2]
square3 = self.pos2square[square_index3]
corner_str = "".join(
sorted([square1.side_name, square2.side_name, square3.side_name])
)
current_corners.append(corner_str)
return get_swap_count(needed_corners, current_corners, debug)
# @timed_function
def corner_swaps_even(self, debug=False):
if self.get_corner_swap_count(debug) % 2 == 0:
return True
return False
# @timed_function
def corner_swaps_odd(self, debug=False):
if self.get_corner_swap_count(debug) % 2 == 1:
return True
return False
# @timed_function
def get_edge_swap_count(self, orbit, debug=False):
needed_edges = []
to_check = []
# Upper
for square_index in self.sideU.edge_north_pos:
to_check.append(square_index)
needed_edges.append("UB")
break
for square_index in reversed(self.sideU.edge_west_pos):
to_check.append(square_index)
needed_edges.append("UL")
break
for square_index in reversed(self.sideU.edge_south_pos):
to_check.append(square_index)
needed_edges.append("UF")
break
for square_index in self.sideU.edge_east_pos:
to_check.append(square_index)
needed_edges.append("UR")
break
# Left
for square_index in reversed(self.sideL.edge_west_pos):
to_check.append(square_index)
needed_edges.append("LB")
break
for square_index in self.sideL.edge_east_pos:
to_check.append(square_index)
needed_edges.append("LF")
break
# Right
for square_index in reversed(self.sideR.edge_west_pos):
to_check.append(square_index)
needed_edges.append("RF")
break
for square_index in self.sideR.edge_east_pos:
to_check.append(square_index)
needed_edges.append("RB")
break
# Down
for square_index in self.sideD.edge_north_pos:
to_check.append(square_index)
needed_edges.append("DF")
break
for square_index in reversed(self.sideD.edge_west_pos):
to_check.append(square_index)
needed_edges.append("DL")
break
for square_index in reversed(self.sideD.edge_south_pos):
to_check.append(square_index)
needed_edges.append("DB")
break
for square_index in self.sideD.edge_east_pos:
to_check.append(square_index)
needed_edges.append("DR")
break
current_edges = []
for square_index in to_check:
side = self.pos2side[square_index]
partner_index = side.get_wing_partner(square_index)
square1 = self.pos2square[square_index]
square2 = self.pos2square[partner_index]
if square1.side_name in ("U", "D"):
wing_str = square1.side_name + square2.side_name
elif square2.side_name in ("U", "D"):
wing_str = square2.side_name + square1.side_name
elif square1.side_name in ("L", "R"):
wing_str = square1.side_name + square2.side_name
elif square2.side_name in ("L", "R"):
wing_str = square2.side_name + square1.side_name
else:
raise Exception(
"Could not determine wing_str for (%s, %s)" % (square1, square2)
)
current_edges.append(wing_str)
#if debug:
# log.info("current edges: %s" % " ".join(current_edges))
return get_swap_count(needed_edges, current_edges, debug)
# @timed_function
def edge_swaps_even(self, orbit, debug):
if self.get_edge_swap_count(orbit, debug) % 2 == 0:
return True
return False
# @timed_function
def edge_swaps_odd(self, orbit, debug):
if self.get_edge_swap_count(orbit, debug) % 2 == 1:
return True
return False
# @timed_function
def validate_all_corners_found(self):
needed_corners = ["BLU", "BRU", "FLU", "FRU", "DFL", "DFR", "BDL", "BDR"]
to_check = [
(
self.sideU.corner_pos[0],
self.sideL.corner_pos[0],
self.sideB.corner_pos[1],
), # ULB
(
self.sideU.corner_pos[1],
self.sideR.corner_pos[1],
self.sideB.corner_pos[0],
), # URB
(
self.sideU.corner_pos[2],
self.sideL.corner_pos[1],
self.sideF.corner_pos[0],
), # ULF
(
self.sideU.corner_pos[3],
self.sideF.corner_pos[1],
self.sideR.corner_pos[0],
), # UFR
(
self.sideD.corner_pos[0],
self.sideL.corner_pos[3],
self.sideF.corner_pos[2],
), # DLF
(
self.sideD.corner_pos[1],
self.sideF.corner_pos[3],
self.sideR.corner_pos[2],
), # DFR
(
self.sideD.corner_pos[2],
self.sideL.corner_pos[2],
self.sideB.corner_pos[3],
), # DLB
(
self.sideD.corner_pos[3],
self.sideR.corner_pos[3],
self.sideB.corner_pos[2],
), # DRB
]
current_corners = []
for (square_index1, square_index2, square_index3) in to_check:
square1 = self.pos2square[square_index1]
square2 = self.pos2square[square_index2]
square3 = self.pos2square[square_index3]
corner_str = "".join(
sorted([square1.side_name, square2.side_name, square3.side_name])
)
current_corners.append(corner_str)
# We need a way to validate all of the needed_corners are present and
# if not, what do we flip so that we do have all of the needed corners?
for corner in needed_corners:
if corner not in current_corners:
raise Exception("corner {} is missing".format(corner))
# @timed_function
def validate_odd_cube_midge_vs_corner_parity(self):
"""
http://www.ryanheise.com/cube/parity.html
When considering the permutation of all edges and corners together, the
overall parity must be even, as dictated by laws of the cube. However,
when considering only edges or corners alone, it is possible for their
parity to be either even or odd. To obey the laws of the cube, if the edge
parity is even then the corner parity must also be even, and if the edge
parity is odd then the corner parity must also be odd.
"""
if self.even:
return
# TODO add support for 555 and 777
if self.width != 3:
return
debug = False
ref_get_lab_distance = get_lab_distance
try:
edges_even = self.edge_swaps_even(None, debug)
corners_even = self.corner_swaps_even(debug)
if edges_even == corners_even:
return
#log.warning(
# "edges_even %s != corners_even %s, swap most ambiguous orange or red edges to create valid parity"
# % (edges_even, corners_even)
#)
except ListMissingValue:
#log.warning(
# "Either edges or corners are off, swap most ambiguous orange or red edges to create valid parity"
#)
pass
# Reasonable assumptions we can make about why our parity is off:
# - we have a red vs orange backwards somewhere
# - the error will be made on an edge, not a corner. Corners are much easier to get
# correct because once you have correctly IDed green, white, blue and yellow you
# can figure out which corner squares are red and which are orange. Green, white,
# yellow and blue are easy to get correct so it is extremely rare for us to mislabel
# a corner
green_orange_position = None
green_red_position = None
blue_orange_position = None
blue_red_position = None
for side in (self.sideU, self.sideL, self.sideF, self.sideR, self.sideB, self.sideD):
for square in side.edge_squares:
partner_position = side.get_wing_partner(square.position)
partner = self.pos2square[partner_position]
if square.color_name == "Gr" and partner.color_name == "OR":
green_orange_position = partner_position
elif square.color_name == "Gr" and partner.color_name == "Rd":
green_red_position = partner_position
elif square.color_name == "Bu" and partner.color_name == "OR":
blue_orange_position = partner_position
elif square.color_name == "Bu" and partner.color_name == "Rd":
blue_red_position = partner_position
#log.debug("green_orange_position %s".format(green_orange_position))
#log.debug("green_red_position %s".format(green_red_position))
#log.debug("blue_orange_position %s".format(blue_orange_position))
#log.debug("blue_red_position %s".format(blue_red_position))
square_green_orange = self.pos2square[green_orange_position]
square_green_red = self.pos2square[green_red_position]
square_blue_orange = self.pos2square[blue_orange_position]
square_blue_red = self.pos2square[blue_red_position]
# To correct the parity we can swap orange/red for the green edges or
# we can swap orange/red for the blue edges. Which will result in the
# lowest color distance with our orange/red baselines?
distance_swap_green_edge = 0
distance_swap_green_edge += ref_get_lab_distance(square_blue_orange.lab, self.orange_baseline)
distance_swap_green_edge += ref_get_lab_distance(square_blue_red.lab, self.red_baseline)
distance_swap_green_edge += ref_get_lab_distance(square_green_orange.lab, self.red_baseline)
distance_swap_green_edge += ref_get_lab_distance(square_green_red.lab, self.orange_baseline)
distance_swap_blue_edge = 0
distance_swap_blue_edge += ref_get_lab_distance(square_green_orange.lab, self.orange_baseline)
distance_swap_blue_edge += ref_get_lab_distance(square_green_red.lab, self.red_baseline)
distance_swap_blue_edge += ref_get_lab_distance(square_blue_orange.lab, self.red_baseline)
distance_swap_blue_edge += ref_get_lab_distance(square_blue_red.lab, self.orange_baseline)
#log.info("distance_swap_green_edge %s" % distance_swap_green_edge)
#log.info("distance_swap_blue_edge %s" % distance_swap_blue_edge)
if distance_swap_green_edge < distance_swap_blue_edge:
'''
log.warning(
"edge parity correction: change %s from %s to Rd"
% (square_green_orange, square_green_orange.color_name)
)
log.warning(
"edge parity correction: change %s from %s to OR"
% (square_green_red, square_green_red.color_name)
)
'''
square_green_orange.color_name = "Rd"
square_green_red.color_name = "OR"
square_green_orange.side_name = self.color_to_side_name[square_green_orange.color_name]
square_green_red.side_name = self.color_to_side_name[square_green_red.color_name]
else:
'''
log.warning(
"edge parity correction: change %s from %s to Rd"
% (square_blue_orange, square_blue_orange.color_name)
)
log.warning(
"edge parity correction: change %s from %s to OR"
% (square_blue_red, square_blue_red.color_name)
)
'''
square_blue_orange.color_name = "Rd"
square_blue_red.color_name = "OR"
square_blue_orange.side_name = self.color_to_side_name[square_blue_orange.color_name]
square_blue_red.side_name = self.color_to_side_name[square_blue_red.color_name]
edges_even = self.edge_swaps_even(None, debug)
corners_even = self.corner_swaps_even(debug)
assert edges_even == corners_even, (
"parity is still broken, edges_even %s, corners_even %s"
% (edges_even, corners_even)
)
``` |
{
"source": "jmsv/ety-python",
"score": 3
} |
#### File: ety/data/generate.py
```python
import csv
import hashlib
import os
import io
import json
import gc
import requests
import six
from clint.textui import progress
def prepare(source_dir):
"""
Create data source directory if not exists
"""
if not os.path.exists(source_dir):
os.makedirs(source_dir)
def download_dataset(url, dl_path):
"""
Download filtered etymwn from jmsv.me mirror, displaying progress bar
"""
r = requests.get(url, stream=True)
with open(dl_path, "wb") as f:
total_length = int(r.headers.get("content-length"))
chunk_size = 4096
for chunk in progress.bar(
r.iter_content(chunk_size=chunk_size),
expected_size=(total_length / chunk_size) + 1,
):
if chunk:
f.write(chunk)
f.flush()
print("Downloaded to " + dl_path)
def verify_local_data(url, dl_path):
"""
Compare actual file checksum with expected served checksum
Return bool determines whether or not data is (re)downloaded
:return: True if local file matches, otherwise False
"""
try:
with open(dl_path, "rb") as f:
# Local file checksum
actual = hashlib.md5(f.read()).hexdigest()
except EnvironmentError:
# Return False if file doesn't exit
return False
expected = requests.get("%s.checksum" % url).text.strip()
return actual == expected
def fix_anomalous_lang_code(code):
if code == "wit":
return "wnw"
return code
def split_elements(compound):
"""
Split source tsv elements at colon
e.g.: 'rel:etymology' => ['rel', 'etymology']
:return: Elements as list
"""
elements = [fix_anomalous_lang_code(e.strip()) for e in compound.split(":")]
if len(elements) == 2:
return elements
return [fix_anomalous_lang_code(elements[0]), ":".join(elements[1:])]
def generate_json(source_path, dir):
"""
Reads source tsv and restructures data as described:
https://github.com/jmsv/ety-python/issues/24
"""
result = {}
print("Loading source tsv")
with io.open(source_path, "r", newline="", encoding="utf-8") as source:
reader = csv.reader(source, delimiter="\t")
source_rows = list(reader)
gc.collect()
print("Structuring data")
for row in progress.bar(source_rows):
source_lang, source_word = split_elements(row[0])
if source_lang not in result:
result[source_lang] = {}
if source_word not in result[source_lang]:
result[source_lang][source_word] = []
dest_lang, dest_word = split_elements(row[2])
result[source_lang][source_word].append({dest_word: dest_lang})
del source_lang, source_word, dest_lang, dest_word
# Save data to seperate files for languages, may be required in the future
# print('Saving language files')
# for key in progress.bar(result):
# with io.open(os.path.join(dir, 'data/ety-%s.json' % key), 'w') as f:
# f.write(json.dumps(result[key], sort_keys=False))
# Save data
print("Writing etymologies file")
with io.open(os.path.join(dir, "etymologies.json"), "w") as f:
json.dump(result, f)
def main():
"""
Define paths, download data if required, generate json dataset
"""
dir = os.path.dirname(os.path.realpath(__file__))
source_dir = os.path.join(dir, "source")
source_path = os.path.join(source_dir, "etymwn.tsv")
source_url = "https://data.jmsv.me/etymwn-filtered.tsv"
# Exit if not Python 3
if not six.PY3:
print("Script should be run as Python 3, exiting")
exit(1)
prepare(source_dir)
# (Re)download data if required
if not verify_local_data(source_url, source_path):
print("Downloading source data")
download_dataset(source_url, source_path)
# If checksum still doesn't match, exit
if verify_local_data(source_url, source_path):
print("Verified local source data")
else:
print("Error verifying local source data, exiting")
exit(1)
else:
print("Verified local source data")
generate_json(source_path, dir)
print("Done")
if __name__ == "__main__":
main()
``` |
{
"source": "jmsv/hackthemidlands18",
"score": 3
} |
#### File: hackthemidlands18/siege-weapon/main.py
```python
import serial
import serial.tools.list_ports
import time
import paho.mqtt.client as mqtt
class Device:
def __init__(self, details):
self.ser = serial.Serial(details)
def close(self):
self.ser.close()
def write(self, message):
self.ser.write(message)
class SerialDevices:
@staticmethod
def select_device(details):
return details
@staticmethod
def list_devices():
return serial.tools.list_ports.comports()
class Motor(Device):
def __init__(self, device):
Device.__init__(self, device)
def rotate(self, direction, sleep_time):
self.write(str.encode(str(direction)))
time.sleep(sleep_time)
self._stop()
pass
def _stop(self):
self.write(str.encode(str(0)))
def main():
mqttc = mqtt.Client()
mqttc.connect(
'broker.mqttdashboard.com',
1883,
60
)
device_list = SerialDevices.list_devices()
device = Motor(
SerialDevices.select_device(device_list[0].device)
)
# Our actions
def on_connect(client, userdata, flags, rc):
# subscribe to the channel
client.subscribe('covhack/superior-siege-weapon')
def on_message(client, userdata, msg):
device.rotate(1, 15)
mqttc.on_connect = on_connect
mqttc.on_message = on_message
mqttc.loop_forever()
if __name__ == "__main__":
main()
``` |
{
"source": "jmsv/jameshash",
"score": 4
} |
#### File: jmsv/jameshash/tests.py
```python
import unittest
from jameshash import hash_password, check_password, merge_lists, largest_prime_factor
class CheckHashes(unittest.TestCase):
def test_hash_password(self):
"""
Checks hashes are as expected for password/username input
"""
self.assertEqual(
hash_password("password", "username"),
"<PASSWORD>"
)
self.assertEqual(
hash_password("password1", "username"),
"<PASSWORD>"
)
self.assertEqual(
hash_password("<PASSWORD>", "username"),
"<PASSWORD>"
)
def test_check_password(self):
"""
Checks password/username/hash checker returns correct True/False
"""
# Check True is returned for correct password/username/hash
self.assertTrue(check_password(
"<PASSWORD>", "<PASSWORD>_",
"<PASSWORD>")
)
# Check False is returned for wrong password/username/hash
self.assertFalse(check_password(
"guessed password", "<PASSWORD>_",
"<PASSWORD>")
)
self.assertFalse(check_password(
"<PASSWORD>", "<PASSWORD>",
"<PASSWORD>")
)
def test_merge_lists(self):
"""
Test method to merge to lists together in format:
[1, 2, 3, 4, 5, 6] & ['a', 'b', 'c']
=> [1, 'a', 2, 'b', 3, 'c', 4, 5, 6]
"""
self.assertEqual(
merge_lists([1, 2, 3, 4, 5], ['a', 'b', 'c']),
[1, 'a', 2, 'b', 3, 'c', 4, 5]
)
self.assertEqual(
merge_lists(['a', 'b', 'c', 'd', 'e'], ['x', 'y', 'z']),
['a', 'x', 'b', 'y', 'c', 'z', 'd', 'e']
)
def test_largest_prime_factor(self):
"""
Test method that calculates a numbers largest prime factor
:return:
"""
self.assertEqual(largest_prime_factor(35), 7)
# High-value test cases from:
# http://www.javascripter.net/math/calculators/primefactorscalculator.htm
self.assertEqual(largest_prime_factor(39865), 67)
self.assertEqual(largest_prime_factor(9007199254740991), 20394401)
self.assertEqual(largest_prime_factor(9007199254740993), 28059810762433)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmsv/rockstar-py",
"score": 3
} |
#### File: rockstar-py/rockstarpy/command_line.py
```python
import sys
import argparse
from rockstarpy import convert
parser = argparse.ArgumentParser(description="Python transpiler for the esoteric language Rockstar")
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('--input', action='store', help='Input file (.rock)')
input_group.add_argument('--stdin', action='store_true', help='Stream in stdin')
output_group = parser.add_mutually_exclusive_group(required=True)
output_group.add_argument('--output', action='store', help='Output file (.py)', default='output.py')
output_group.add_argument('--stdout', action='store_true', help='Stream to stdout')
parser.add_argument('-v', action='version', help='Version', version='1.3.6')
args = parser.parse_args()
def command_line():
# connnect input
if args.stdin:
lyrics = sys.stdin
else:
lyrics = open(args.input, 'r')
# connect output
if args.stdout:
enc = False
output = sys.stdout
else:
enc = True
output = open(args.output, 'wb', 0)
# Read, Convert, Write, loop
for line in lyrics:
output.write( encode( convert.convert_line(line), enc ) )
# close input
if not args.stdin:
lyrics.close()
# close output
if not args.stdout:
output.close()
def encode(line, enc):
if enc:
return line.encode()
return line
```
#### File: rockstar-py/rockstarpy/convert.py
```python
import re
global ident
global regex_variables
global most_recently_named
ident = 0
regex_variables = r'\b(?:(?:[Aa]n?|[Tt]he|[Mm]y|[Yy]our) [a-z]+|[A-Z][A-Za-z]+(?: [A-Z][A-Za-z]+)*)\b'
most_recently_named = ''
simple_subs = {
'(':'#',
')':'',
'Give back':'return',
'Take it to the top':'continue',
'Break it down':'break',
' false ':' False ',
' wrong ':' False ',
' no ':' False ',
' lies ':' False ',
' null ':' False ',
' nothing ':' False ',
' nowhere ':' False ',
' nobody ':' False ',
' empty ':' False ',
' gone ':' False ',
' mysterious ':' False ',
' true ':' True ',
' right ':' True ',
' yes ':' True ',
' ok ':' True ',
' plus ':' + ',
' with ':' + ',
' minus ':' - ',
' without ':' - ',
' times ':' * ',
' of ':' * ',
' over ':' / ',
' is higher than ':' > ',
' is greater than ':' > ',
' is bigger than ':' > ',
' is stronger than ':' > ',
' is lower than ':' < ',
' is less than ':' < ',
' is smaller than ':' < ',
' is weaker than ':' < ',
' is as high as ':' >= ',
' is as great as ':' >= ',
' is as big as ':' >= ',
' is as strong as ':' >= ',
' is as low as ':' <= ',
' is as little as ':' <= ',
' is as small as ':' <= ',
' is as weak as ':' <= ',
' is not ':' != ',
' ain\'t ':' != ',
'Until ':'while not ',
'While ':'while '
}
def get_comments(line):
if '(' in line:
line, comment = line.split('(')
comment = ' #' + comment.strip(')\n ')
else:
comment = ''
return line, comment
def create_function(line):
global ident
match = re.match(r'\b({0}) takes ({0}(?: and {0})*)\b'.format(regex_variables), line)
if match:
ident += 1
line = 'def {}({}):'.format(match.group(1), match.group(2).replace(' and', ','))
return line
def create_while(line):
global ident
if line.startswith('while '):
line = line.replace(' is ', ' == ')
line += ':'
ident += 1
return line
def create_if(line):
global ident
match = re.match(r'If .*', line)
if match:
ident += 1
line = line.replace(' is ', ' == ')
line = line.replace('If', 'if')
line += ':'
return line
def find_poetic_number_literal(line):
poetic_type_literals_keywords = ['true', 'false', 'nothing', 'nobody', 'nowhere', 'empty', 'wrong', 'gone', 'no', 'lies', 'right', 'yes', 'ok', 'mysterious']
match = re.match(r'\b({})(?: is|\'s| was| were) (.+)'.format(regex_variables), line)
if match and match.group(2).split()[0] not in poetic_type_literals_keywords:
line = '{} = '.format(match.group(1))
for word_number in match.group(2).split():
period = '.' if word_number.endswith('.') else ''
alpha_word = re.sub('[^A-Za-z]', '', word_number)
line += str(len(alpha_word) % 10) + period
return line
def find_proper_variables(line):
match_list = re.findall(r'\b[A-Z][A-Za-z]+(?: [A-Z][A-Za-z]+)*\b', line)
if match_list:
for match in match_list:
line = line.replace(match, match.replace(' ', '_'))
return line
def find_common_variables(line):
match_list = re.findall(r'\b([Aa]n?|[Tt]he|[Mm]y|[Yy]our) ([a-z]+)\b', line)
if match_list:
for match in match_list:
line = line.replace(' '.join(match), '{}_{}'.format(*match).lower())
return line
def find_named(line):
match = re.match(r'([A-Za-z]+(?:_[A-Za-z]+)*) [+-]?= .+', line)
if match:
return match.group(1)
def get_strings(line):
says_match = re.match(r'({}) says (.*)'.format(regex_variables), line)
if says_match:
line = says_match.group(1) + ' = "{}"'
return line, says_match.group(2)
quotes_match = re.match(r'([^\"]* )(\".*\"(?:, ?\".*\")*)([^\"]*)', line)
if quotes_match:
line = quotes_match.group(1) + '{}' + quotes_match.group(3)
return line, quotes_match.group(2)
return line, None
def convert_line(line):
global ident
global most_recently_named
if line == '\n':
ident = ident - 1 if ident > 0 else 0
return ''
else:
line_ident = ' ' * ident
line, comments = get_comments(line)
line, line_strings = get_strings(line)
py_line = find_poetic_number_literal(line)
for key in simple_subs:
py_line = py_line.strip()
py_line += ' '
py_line = py_line.replace(key, simple_subs[key])
py_line = py_line.strip('\n ,.;')
most_recently_named_keywords = [' it ', ' he ', ' she ', ' him ', ' her ', ' them ', ' they ',
' ze ', ' hir ', ' zie ', ' zir ', ' xe ', ' xem ', ' ve ', ' ver ']
for keyword in most_recently_named_keywords:
py_line = py_line.replace(keyword, ' {} '.format(most_recently_named))
py_line = create_function(py_line)
py_line = create_while(py_line)
py_line = create_if(py_line)
line_ident = ' ' * (ident - 1) if py_line == 'Else' else line_ident
py_line = 'else:' if py_line == 'Else' else py_line
py_line = re.sub(r'Put (.*) into ({})'.format(regex_variables), r'\g<2> = \g<1>', py_line)
py_line = re.sub(r'Build ({}) up'.format(regex_variables), r'\g<1> += 1', py_line)
py_line = re.sub(r'Knock ({}) down'.format(regex_variables), r'\g<1> -= 1', py_line)
py_line = re.sub(r'Listen to ({})'.format(regex_variables), r'\g<1> = input()', py_line)
py_line = re.sub(r'(?:Say|Shout|Whisper|Scream) (.*)', r'print(\g<1>)', py_line)
py_line = py_line.replace(' is ', ' = ', 1)
py_line = re.sub(r'({0}) taking ((?:{0}|\"[^\"]*\"|[0-9]+)(?:, ?(?:{0}|\"[^\"]*\"|[0-9]+))*)'.format(regex_variables), r'\g<1>(\g<2>)', py_line)
py_line = find_proper_variables(py_line)
py_line = find_common_variables(py_line)
line_named = find_named(py_line)
most_recently_named = line_named if line_named else most_recently_named
py_line = py_line.format(line_strings) if line_strings else py_line
return line_ident + py_line + comments + '\n'
```
#### File: rockstar-py/tests/run_tests.py
```python
import os
import sys
import difflib
sys.path=[os.path.dirname(os.path.dirname(os.path.realpath(__file__)))]+sys.path
from rockstarpy import convert
def check_files_identical(expected, actual):
diff = difflib.unified_diff(
expected,
actual,
fromfile='expected',
tofile='actual',
)
line = None
for line in diff:
print(line, end='')
if line is not None:
print()
assert False, "There are differences"
def main():
files = os.listdir('.')
rock_files = filter(lambda f: f.split(".")[-1] in ['rock','rockstar','lyrics'] , files)
py_files = set(filter(lambda f: f.endswith('.py'), files))
for rock_file in rock_files:
print("testing", rock_file)
file_name = os.path.splitext(rock_file)[0] # take off extension
py_file = file_name + ".py"
assert py_file in py_files, "Did not create a corrosponding expected output for " + rock_file
converted_code = ''
with open(rock_file, 'r') as rockstar_file:
for line in rockstar_file:
converted_code += convert.convert_line(line)
with open(file_name +".py", 'r') as expected:
expected_code = expected.read()
check_files_identical(expected_code, converted_code)
if __name__ == '__main__':
main()
``` |
{
"source": "jmsv/specsavers",
"score": 3
} |
#### File: specsavers/specsavers/api.py
```python
import requests
from requests_html import HTMLSession
def retry_on_token_failure(func):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
except AuthenticationError:
api = args[0]
api.__class__.__token = api.fetch_token()
result = func(*args)
return result
return wrapper
class AuthenticationError(Exception):
...
class Api:
base_url = "https://www.specsavers.co.uk"
__token = ""
def __init__(self):
if not self.__class__.__token:
self.__class__.__token = self.fetch_token()
def fetch_token(self):
html = self.fetch_booking_page()
script_tags = html.find("script")
token_script = [
element for element in script_tags
if "data-integrity" in element.attrs]
if not token_script:
return ""
return token_script[0].attrs["data-integrity"]
def fetch_booking_page(self):
page = HTMLSession().get(f"{self.base_url}/book/nottingham")
return page.html
def store_exists(self, store_name):
store_page = requests.head(f"{self.base_url}/book/{store_name}")
if store_page.status_code == 200:
return True
else:
return False
@retry_on_token_failure
def fetch_store_details(self, store_name):
url = (f"{self.base_url}/appointment/api/v1/store"
f"?url-name={store_name}")
store_details = self.__make_request(url)
try:
return store_details.json()
except json.decoder.JSONDecodeError:
return {}
@retry_on_token_failure
def fetch_appointments(self, store, date, kind):
url = (f"{self.base_url}/appointment/api/v1/appointment/"
f"slot?epos={store.epos}&business-type=opticians&"
f"appointment-type={kind}"
f"&date-from={date.iso8601().split('T')[0]}")
appointment_details = self.__make_request(url)
try:
return appointment_details.json()
except json.decoder.JSONDecodeError:
return {}
def __make_request(self, url):
request = requests.get(
url, headers={"X-Access-Token": self.__token})
if request.status_code == 401:
raise AuthenticationError("Unable to authenticate.")
return request
def list_of_store_names(self, latitude, longitude):
html = self.fetch_store_select_page(latitude, longitude)
store_divs = html.find(".store-name")
return [
div.find("a", first=True).attrs["href"].replace("/stores/", "")
for div in store_divs]
def fetch_store_select_page(self, latitude, longitude):
page = HTMLSession().get(
f"{self.base_url}/stores/select-a-store/x"
f"/{latitude},{longitude}")
return page.html
```
#### File: specsavers/tests/test_api.py
```python
import unittest
from requests_html import HTML
class TestApi(unittest.TestCase):
def setUp(self):
from specsavers.api import Api
self.test_token = (
"<KEY>
"<KEY>"
"iwiZXhwIjoxNTIzNzA4MDU5fQ.2U5GYS_SbZq"
"4bEUpTA6Em0l4XF3jrMtjBOZyCxZMe3Q")
Api._Api__token = self.test_token
with open("tests/page_book.html", "r") as book:
self.booking_page = book.read()
with open("tests/page_store_select.html", "r") as store_select:
self.store_select_page = store_select.read()
Api.fetch_booking_page = lambda *_: HTML(html=self.booking_page)
Api.fetch_store_select_page = \
lambda *_: HTML(html=self.store_select_page)
self.api = Api()
def test_getting_token_from_html(self):
token = self.api.fetch_token()
self.assertEqual(token, self.test_token)
def test_parsing_for_store_names(self):
store_names = self.api.list_of_store_names(1234, 5678)
self.assertEqual(store_names, ["woolwich", "barking", "eastham"])
def test_(self):
...
```
#### File: specsavers/tests/test_store.py
```python
import unittest
from tests import MockApi
class TestStore(unittest.TestCase):
def setUp(self):
import specsavers
self.specsavers = specsavers
self.specsavers.Store.api = MockApi
self.specsavers.StoreList.api = MockApi
def test_no_fetching_details_on_search(self):
store = self.specsavers.Store("nottingham", from_search=True)
self.assertEqual(store.json, {})
def test_details_fetched_once_attr_accessed(self):
store = self.specsavers.Store("nottingham", from_search=True)
store.business_type
self.assertEqual(store.business_type, "opticians")
def test_store_has_attributes(self):
store = self.specsavers.Store("nottingham")
self.assertIn("business_type", dir(store))
def test_store_has_attributes_once_attr_accessed(self):
store = self.specsavers.Store("nottingham", from_search=True)
store.epos
self.assertIn("business_type", dir(store))
def test_find_gets_store(self):
store = self.specsavers.find("nottingham")
self.assertIsInstance(store, self.specsavers.Store)
def test_locate_gets_store_list(self):
store_list = self.specsavers.locate(
latitude=51.507879, longitude=0.087732)
self.assertIsInstance(store_list, self.specsavers.StoreList)
def test_store_list_indexable(self):
store_list = self.specsavers.locate(
latitude=51.507879, longitude=0.087732)
store = store_list[0]
self.assertIsInstance(store, self.specsavers.Store)
def test_store_list_sliceable(self):
store_list = self.specsavers.locate(
latitude=51.507879, longitude=0.087732)
store_slice = store_list[0:1]
self.assertIsInstance(store_slice, self.specsavers.StoreList)
def test_store_list_lookupable(self):
store_list = self.specsavers.locate(
latitude=51.507879, longitude=0.087732)
store = store_list["woolwich"]
self.assertIsInstance(store, self.specsavers.Store)
def test_store_list_iterable(self):
store_list = self.specsavers.locate(
latitude=51.507879, longitude=0.087732)
for store in store_list:
self.assertTrue(True)
def test_store_iterating_no_details(self):
store_list = self.specsavers.locate(
latitude=51.507879, longitude=0.087732)
for store in store_list:
self.assertEqual(store.json, {})
def test_store_list_items_keep_fetched_data(self):
store_list = self.specsavers.locate(
latitude=51.507879, longitude=0.087732)
# NOTE: This test doesn't fetch the Store object from the StoreList,
# it instead accesses the object by index. This test ensures
# that the Store correctly keeps it's information once fetched.
self.assertEqual(store_list[0].json, {})
store_list[0].epos
self.assertTrue(
store_list[0].json,
f"{store_list[0]} does not have json populated")
def test_getting_appointments(self):
store = self.specsavers.Store("nottingham")
appointments = store.appointments("2018-04-15")
from specsavers.appointment import Appointment
self.assertIsInstance(appointments[0], Appointment)
``` |
{
"source": "jmsv/weighted-dict",
"score": 3
} |
#### File: jmsv/weighted-dict/tests.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import unittest
from collections import Counter
from weightedDict import WeightedDict
class TestWeightedDict(unittest.TestCase):
# Usage example
def test_main(self):
random.seed(42)
wdict = WeightedDict()
wdict['dog'] = 38.2
wdict['cat'] = 201.7
wdict['cow'] = 222.3
wdict['ostrich'] = 0.
wdict['cow'] = 31.5 # Change the weight for cow
wdict['unicorn'] = 0.01
wdict['wolf'] = 128.1
wdict['bear'] = 12.1
wdict['aardvark'] = 9.1
print(wdict['dog'])
print(wdict.sample())
print(wdict.keys())
wdict.pop('cat') # Remove the cat
dasum = 0.
tallies = {}
num_samples = 100000
for i in wdict:
tallies[i] = 0
for _ in range(num_samples):
tallies[wdict.sample()] += 1
for i in wdict:
dasum += wdict[i]
for i in wdict:
print(i, tallies[i], '%.2f' % (num_samples * wdict[i] / dasum))
print(wdict)
# A more rigorous test
def test_big(self):
random.seed(42)
dstr = 'bcdefghijklmnopqrstuvwxyz'
data = {i: j for i, j in zip(dstr, [x + 1 for x in range(len(dstr))])}
foo = WeightedDict()
for i in dstr:
foo[i] = data[i]
# Check the sampling
bar = Counter()
dnum = 10000
for _ in range(dnum):
bar[foo.sample()] += 1
den = sum(data.values())
vals = {i: int(dnum * (j / den)) for i, j in data.items()}
self.assertEqual(set(vals.keys()), set(bar.keys()))
dsum = 0
for i in sorted(vals):
dif = abs(vals[i] - bar[i])
dsum += dif
print(i, vals[i], bar[i])
print('Total percent from max: ' + str(100 * float(dsum) / dnum) + '%')
self.assertLess((100 * float(dsum) / dnum), 10)
# Check insert and deletion consistency.
data2 = data.copy()
for ii in range(30000):
foo.check_tree()
toggle = random.choice(dstr)
print(ii, toggle, dstr)
if toggle not in data2:
data2[toggle] = data[toggle]
foo[toggle] = data[toggle]
else:
data2.pop(toggle)
foo.pop(toggle)
self.assertEqual(tuple(foo.keys()), tuple(sorted(data2.keys())))
for i, j in data2.items():
self.assertLess(abs(foo[i] - j), .000000001)
# Test emptying the tree
if ii % 10000 == 0:
dkeys = foo.keys()
random.shuffle(dkeys)
for toggle in dkeys:
foo.check_tree()
data2.pop(toggle)
foo.pop(toggle)
self.assertEqual(
tuple(foo.keys()), tuple(sorted(data2.keys())))
for i, j in data2.items():
self.assertLess(abs(foo[i] - j), .000000001)
print(foo)
print('Success. Yay!')
# Note that the test output isn't identical across Python versions (2
# & 3) because random.seed has changed. We could use version=1, but
# that's not compatible with Python 2.
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JMSwag/flask-sse",
"score": 3
} |
#### File: flask-sse/tests/test_message.py
```python
import pytest
from flask_sse import Message
def test_empty_message():
with pytest.raises(TypeError):
m = Message()
def test_simple_data():
m = Message("foo")
assert m.data == "foo"
assert m.type == None
assert m.id == None
assert m.retry == None
assert m.to_dict() == {"data": "foo"}
assert repr(m) == "Message('foo')"
assert str(m) == 'data:foo\n\n'
def test_data_dict():
m = Message({"message": "Hello!"})
assert m.data == {"message": "Hello!"}
assert m.type == None
assert m.id == None
assert m.retry == None
assert m.to_dict() == {"data": {"message": "Hello!"}}
assert repr(m) == "Message({'message': 'Hello!'})"
assert str(m) == 'data:{"message": "Hello!"}\n\n'
def test_multiline_data():
m = Message("foo\nbar")
assert m.data == "foo\nbar"
assert m.type == None
assert m.id == None
assert m.retry == None
assert m.to_dict() == {"data": "foo\nbar"}
assert repr(m) == "Message('foo\\nbar')"
assert str(m) == 'data:foo\ndata:bar\n\n'
def test_all_args():
m = Message("foo", type="example", id=5, retry=500)
assert m.data == "foo"
assert m.type == "example"
assert m.id == 5
assert m.retry == 500
assert m.to_dict() == {
"data": "foo",
"type": "example",
"id": 5,
"retry": 500,
}
assert repr(m) == "Message('foo', type='example', id=5, retry=500)"
assert str(m) == 'event:example\ndata:foo\nid:5\nretry:500\n\n'
def test_equality():
m1 = Message("abc")
m2 = Message("abc")
assert m1 == m2
m3 = Message("abc", type="example")
assert m1 != m3
m4 = Message("def")
assert m1 != m4
``` |
{
"source": "jmswaney/tif2jp2",
"score": 2
} |
#### File: tif2jp2/src/tif_downsampler.py
```python
import tkinter as tk
from tkinter import filedialog
from tkinter import ttk
from pathlib import Path
import tifffile
# from PIL import Image
import multiprocessing
from skimage.measure import block_reduce
from raw import raw_imread
DOWNSAMPLE_OPTIONS = ['2', '4', '8']
def save_as_downsampled(arg_dict):
input_path = arg_dict['input_path']
output_path = arg_dict['output_path']
tif_path = arg_dict['tif_path']
downsample_factor = arg_dict['downsample_factor']
if str(tif_path).endswith('.tif*'):
tif_img = tifffile.imread(str(tif_path))
else:
tif_img = raw_imread(str(tif_path))
downsampled_tif = block_reduce(tif_img, block_size=(downsample_factor, downsample_factor))
downsampled_tif = downsampled_tif.astype(tif_img.dtype)
# img = Image.fromarray(tif_img)
output_subdir = output_path.joinpath(tif_path.relative_to(input_path).parent)
output_subdir.mkdir(parents=True, exist_ok=True)
downsampled_filename = tif_path.stem + '.tif'
downsampled_path = output_subdir.joinpath(downsampled_filename)
tifffile.imsave(str(downsampled_path), downsampled_tif, compress=1)
# img.save(downsampled_path, quality_mode='rates', quality_layers=[20])
class MainApplication(tk.Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.build_elements()
def build_elements(self):
self.parent.title('tif_downsampler')
# Setup the grid layout
self.parent.rowconfigure(5, weight=1)
self.parent.columnconfigure(5, weight=1)
self.grid(sticky=tk.W + tk.E + tk.N + tk.S)
# Add an extry box for the input directory
self.input_entry = tk.Entry(self, width=60)
self.input_entry.grid(row=1, column=1, padx=2, pady=2, sticky=tk.W)
self.input_entry.insert(0, 'Browse to the root image directory with tifs -->')
# Add a progress bar
self.progress_bar = ttk.Progressbar(self, length=360, mode='determinate')
self.progress_bar.grid(row=2, column=1, padx=2, pady=2, sticky=tk.E)
# Create variables to store the directories
self.input_path = None
self.output_path = None
# Make a browse button
self.browse_btn = tk.Button(self, text='Browse', width=10, command=self.get_directory)
self.browse_btn.grid(row=1, column=2, sticky=tk.W)
# Make a convert button
self.convert_btn = tk.Button(self, text='Convert', width=10, command=self.convert)
self.convert_btn.grid(row=2, column=2, sticky=tk.W)
# Give options for downsampling
self.downsample_factor = tk.StringVar(self.parent)
self.downsample_factor.set(DOWNSAMPLE_OPTIONS[0])
self.dropdown = tk.OptionMenu(self.parent, self.downsample_factor, *DOWNSAMPLE_OPTIONS)
self.dropdown.grid(row=0, column=3, sticky=tk.W)
def set_entry_text(self, text):
self.input_entry.delete(0, tk.END)
self.input_entry.insert(0, text)
def get_directory(self):
browse_str = filedialog.askdirectory(parent=self.parent, title='Please select the root image directory')
in_p = Path(browse_str)
if in_p.exists():
self.set_entry_text(str(in_p))
self.input_path = in_p
def convert(self):
if self.input_path is not None and self.input_path.exists() and self.input_path.is_dir():
self.output_path = Path(self.input_path.parent).joinpath(str(self.input_path)+'_downsampled')
self.output_path.mkdir(exist_ok=True)
tif_paths = list(self.input_path.glob('**/*.tif*'))
nb_tifs = len(tif_paths)
if nb_tifs == 0:
tif_paths = list(self.input_path.glob('**/*.raw'))
self.progress_bar['value'] = 0
self.progress_bar['maximum'] = nb_tifs-1
arg_dicts = []
for i, tif_path in enumerate(tif_paths):
arg_dict = {
'input_path': self.input_path,
'output_path': self.output_path,
'tif_path': tif_path,
'downsample_factor': int(self.downsample_factor.get()),
}
arg_dicts.append(arg_dict)
nb_cpu = multiprocessing.cpu_count()
nb_processes = max(1, nb_cpu-1)
with multiprocessing.Pool(processes=nb_processes) as p:
for i, _ in enumerate(p.imap_unordered(save_as_downsampled, arg_dicts)):
self.progress_bar['value'] = i
self.parent.update()
if __name__ == '__main__':
multiprocessing.freeze_support()
root = tk.Tk()
app = MainApplication(root)
root.mainloop()
``` |
{
"source": "jmswen/eden",
"score": 2
} |
#### File: eden/cli/config.py
```python
import binascii
import collections
import datetime
import errno
import fcntl
import json
import os
import shutil
import stat
import tempfile
import types
import typing
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Tuple, Type, Union, cast
import eden.thrift
import facebook.eden.ttypes as eden_ttypes
import toml
from . import configinterpolator, configutil, util
from .util import EdenStartError, HealthStatus, print_stderr, readlink_retry_estale
# Use --etcEdenDir to change the value used for a given invocation
# of the eden cli.
DEFAULT_ETC_EDEN_DIR = "/etc/eden"
# These are INI files that hold config data.
# CONFIG_DOT_D is relative to DEFAULT_ETC_EDEN_DIR, or whatever the
# effective value is for that path
CONFIG_DOT_D = "config.d"
# USER_CONFIG is relative to the HOME dir for the user
USER_CONFIG = ".edenrc"
# These paths are relative to the user's client directory.
CLIENTS_DIR = "clients"
CONFIG_JSON = "config.json"
# These are files in a client directory.
CLONE_SUCCEEDED = "clone-succeeded"
MOUNT_CONFIG = "config.toml"
SNAPSHOT = "SNAPSHOT"
SNAPSHOT_MAGIC = b"eden\x00\x00\x00\x01"
DEFAULT_REVISION = { # supported repo name -> default bookmark
"git": "refs/heads/master",
"hg": ".",
}
SUPPORTED_REPOS = DEFAULT_REVISION.keys()
REPO_FOR_EXTENSION = {".git": "git", ".hg": "hg"}
# Create a readme file with this name in the mount point directory.
# The intention is for this to contain instructions telling users what to do if their
# Eden mount is not currently mounted.
NOT_MOUNTED_README_PATH = "README_EDEN.txt"
# The path under /etc/eden where site-specific contents for the not-mounted README can
# be found.
NOT_MOUNTED_SITE_SPECIFIC_README_PATH = "NOT_MOUNTED_README.txt"
# The default contents for the not-mounted README if a site-specific template
# is not found.
NOT_MOUNTED_DEFAULT_TEXT = """\
This directory is the mount point for a virtual checkout managed by Eden.
If you are seeing this file that means that your repository checkout is not
currently mounted. This could either be because the edenfs daemon is not
currently running, or it simply does not have this checkout mounted yet.
You can run "eden doctor" to check for problems with Eden and try to have it
automatically remount your checkouts.
"""
assert sorted(REPO_FOR_EXTENSION.values()) == sorted(SUPPORTED_REPOS)
class UsageError(Exception):
pass
class CheckoutConfig(typing.NamedTuple):
"""Configuration for an Eden checkout. A checkout stores its config in config.toml
it its state directory (.eden/clients/<checkout_name>/config.toml)
- backing_repo: The path where the true repo resides on disk. For mercurial backing
repositories this does not include the final ".hg" directory component.
- scm_type: "hg" or "git"
- bind_mounts: dict where keys are private pathnames under ~/.eden where the
files are actually stored and values are the relative pathnames in the
EdenFS mount that maps to them.
"""
backing_repo: Path
scm_type: str
bind_mounts: Dict[str, str]
default_revision: str
class EdenInstance:
"""This class contains information about a particular edenfs instance.
It provides APIs for communicating with edenfs over thrift and for examining and
modifying the list of checkouts managed by this edenfs instance.
"""
def __init__(
self,
config_dir: Union[Path, str, None],
etc_eden_dir: Union[Path, str, None],
home_dir: Union[Path, str, None],
interpolate_dict: Optional[Dict[str, str]] = None,
) -> None:
self._etc_eden_dir = Path(etc_eden_dir or DEFAULT_ETC_EDEN_DIR)
self._home_dir = Path(home_dir) if home_dir is not None else util.get_home_dir()
self._user_config_path = self._home_dir / USER_CONFIG
self._interpolate_dict = interpolate_dict
# TODO: We should eventually read the default config_dir path from the config
# files rather than always using ~/local/.eden
#
# We call resolve() to resolve any symlinks in the config directory location.
# This is particularly important when starting edenfs, since edenfs in some
# cases will try to access this path as root (e.g., when creating bind mounts).
# In some cases this path may traverse symlinks that are readable as the
# original user but not as root: this can happen if the user has a home
# directory on NFS, which may not be readable as root.
if config_dir:
self._config_dir = Path(config_dir)
elif os.name == "nt":
self._config_dir = self._home_dir / ".eden"
else:
self._config_dir = self._home_dir / "local" / ".eden"
self._config_dir = self._config_dir.resolve(strict=False)
def __repr__(self) -> str:
return f"EdenInstance({self._config_dir!r})"
@property
def state_dir(self) -> Path:
return self._config_dir
def _loadConfig(self) -> configutil.EdenConfigParser:
""" to facilitate templatizing a centrally deployed config, we
allow a limited set of env vars to be expanded.
${HOME} will be replaced by the user's home dir,
${USER} will be replaced by the user's login name.
These are coupled with the equivalent code in
eden/fs/config/CheckoutConfig.cpp and must be kept in sync.
"""
parser = configutil.EdenConfigParser(
interpolation=configinterpolator.EdenConfigInterpolator(
self._config_variables
)
)
for path in self.get_rc_files():
try:
toml_cfg = _load_toml_config(path)
except FileNotFoundError:
# Ignore missing config files. Eg. user_config_path is optional
continue
parser.read_dict(toml_cfg)
return parser
@property
def _config_variables(self) -> Dict[str, str]:
return (
self._interpolate_dict
if self._interpolate_dict is not None
else {
"USER": os.environ.get("USER", ""),
"USER_ID": str(os.getuid()),
"HOME": str(self._home_dir),
}
)
def get_rc_files(self) -> List[Path]:
result: List[Path] = []
config_d = self._etc_eden_dir / CONFIG_DOT_D
try:
rc_entries = os.listdir(config_d)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
rc_entries = []
for name in rc_entries:
if name.endswith(".toml"):
result.append(config_d / name)
result.sort()
result.append(self._user_config_path)
return result
def get_repository_list(
self, parser: Union[configutil.EdenConfigParser, "ConfigUpdater", None] = None
) -> List[str]:
result = []
if not parser:
parser = self._loadConfig()
for section in parser.sections():
header = section.split(" ")
if len(header) == 2 and header[0] == "repository":
result.append(header[1])
return sorted(result)
def get_config_value(self, key: str, default: str) -> str:
parser = self._loadConfig()
section, option = key.split(".", 1)
return parser.get_str(section, option, default=default)
def should_use_experimental_systemd_mode(self) -> bool:
# TODO(T33122320): Delete this environment variable when systemd is properly
# integrated.
env_var_value = os.getenv("EDEN_EXPERIMENTAL_SYSTEMD")
if env_var_value == "1":
return True
if env_var_value == "0":
return False
if self._loadConfig().get_bool(
"service", "experimental_systemd", default=False
):
return True
return False
def get_fallback_systemd_xdg_runtime_dir(self) -> str:
xdg_runtime_dir = self.get_config_value(
"service.fallback_systemd_xdg_runtime_dir", default=""
)
if xdg_runtime_dir == "":
user_id = self._config_variables["USER_ID"]
xdg_runtime_dir = f"/run/user/{user_id}"
return xdg_runtime_dir
def print_full_config(self, file: typing.TextIO) -> None:
parser = self._loadConfig()
data: Dict[str, Mapping[str, str]] = {}
for section in parser.sections():
data[section] = parser.get_section_str_to_any(section)
toml.dump(data, file) # pyre-ignore[T39129461]
def find_config_for_alias(self, alias: str) -> Optional[CheckoutConfig]:
"""Looks through the existing config files and searches for a
[repository <alias>] section that defines a config:
- If no such section is found, returns None.
- If the appropriate section is found, returns a CheckoutConfig if all of
the fields for the config data are present and well-formed.
- Otherwise, throws an Exception.
"""
parser = self._loadConfig()
repository_header = f"repository {alias}"
if not parser.has_section(repository_header):
return None
bind_mounts_header = f"bindmounts {alias}"
if parser.has_section(bind_mounts_header):
# Convert the EdenConfigParser section into a dict so it is JSON
# serializable for the `eden info` command.
bind_mounts = dict(parser.get_section_str_to_str(bind_mounts_header))
else:
bind_mounts = {}
scm_type = parser.get_str(repository_header, "type", default="")
if not scm_type:
raise Exception(f'repository "{alias}" missing key "type".')
if scm_type not in SUPPORTED_REPOS:
raise Exception(f'repository "{alias}" has unsupported type.')
path = parser.get_str(repository_header, "path", default="")
if not path:
raise Exception(f'repository "{alias}" missing key "path".')
default_revision = (
parser.get_str(repository_header, "default-revision", default="")
or parser.get_str("clone", "default-revision", default="")
or DEFAULT_REVISION[scm_type]
)
return CheckoutConfig(
backing_repo=Path(path),
scm_type=scm_type,
bind_mounts=bind_mounts,
default_revision=default_revision,
)
def get_mount_paths(self) -> List[str]:
"""Return the paths of the set mount points stored in config.json"""
return [str(path) for path in self._get_directory_map().keys()]
def get_all_client_config_info(self) -> Dict[str, collections.OrderedDict]:
info = {}
for path in self.get_mount_paths():
info[path] = self.get_client_info(path)
return info
def get_thrift_client(self) -> eden.thrift.EdenClient:
return eden.thrift.create_thrift_client(str(self._config_dir))
def get_client_info(self, path: Union[Path, str]) -> collections.OrderedDict:
path = Path(path).resolve(strict=False)
client_dir = self._get_client_dir_for_mount_point(path)
checkout = EdenCheckout(self, path, client_dir)
checkout_config = checkout.get_config()
snapshot = checkout.get_snapshot()
return collections.OrderedDict(
[
("bind-mounts", checkout_config.bind_mounts),
("mount", str(path)),
("scm_type", checkout_config.scm_type),
("snapshot", snapshot),
("client-dir", str(client_dir)),
]
)
def add_repository(
self, name: str, repo_type: str, source: str, with_buck: bool = False
) -> None:
# Check if repository already exists
with ConfigUpdater(self._user_config_path) as config:
if name in self.get_repository_list(config):
raise UsageError(
"""\
repository %s already exists. You will need to edit the ~/.edenrc config file \
by hand to make changes to the repository or remove it."""
% name
)
# Create a directory for client to store repository metadata
bind_mounts = {}
if with_buck:
bind_mount_name = "buck-out"
bind_mounts[bind_mount_name] = "buck-out"
# Add repository to INI file
config["repository " + name] = {"type": repo_type, "path": source}
if bind_mounts:
config["bindmounts " + name] = bind_mounts
config.save()
def clone(
self, checkout_config: CheckoutConfig, path: str, snapshot_id: str
) -> None:
if path in self._get_directory_map():
raise Exception(
"""\
mount path %s is already configured (see `eden list`). \
Do you want to run `eden mount %s` instead?"""
% (path, path)
)
# Create the mount point directory
self._create_mount_point_dir(path)
# Create client directory
clients_dir = self._get_clients_dir()
clients_dir.mkdir(parents=True, exist_ok=True)
client_dir = self._create_client_dir_for_path(clients_dir, path)
# Store snapshot ID
checkout = EdenCheckout(self, Path(path), Path(client_dir))
if snapshot_id:
checkout.save_snapshot(snapshot_id)
else:
raise Exception("snapshot id not provided")
# Create bind mounts directories
bind_mounts_dir = os.path.join(client_dir, "bind-mounts")
util.mkdir_p(bind_mounts_dir)
for mount in checkout_config.bind_mounts:
util.mkdir_p(os.path.join(bind_mounts_dir, mount))
checkout.save_config(checkout_config)
# Prepare to mount
mount_info = eden_ttypes.MountArgument(
mountPoint=os.fsencode(path), edenClientPath=os.fsencode(client_dir)
)
with self.get_thrift_client() as client:
client.mount(mount_info)
self._post_clone_checkout_setup(checkout, snapshot_id)
# Add mapping of mount path to client directory in config.json
self._add_path_to_directory_map(Path(path), os.path.basename(client_dir))
def _create_mount_point_dir(self, path: str) -> None:
# Create the directory
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# If the path already exists, make sure it is an empty directory.
# listdir() will throw its own error if the path is not a directory.
if len(os.listdir(path)) > 0:
raise OSError(errno.ENOTEMPTY, os.strerror(errno.ENOTEMPTY), path)
# Populate the directory with a file containing instructions about how to get
# Eden to remount the checkout. If Eden is not running or does not have this
# checkout mounted users will see this file.
help_path = Path(path) / NOT_MOUNTED_README_PATH
site_readme_path = self._etc_eden_dir / NOT_MOUNTED_SITE_SPECIFIC_README_PATH
help_contents: Optional[str] = NOT_MOUNTED_DEFAULT_TEXT
try:
# Create a symlink to the site-specific readme file. This helps ensure that
# users will see up-to-date contents if the site-specific file is updated
# later.
with site_readme_path.open("r") as f:
try:
help_path.symlink_to(site_readme_path)
help_contents = None
except OSError as ex:
# EPERM can indicate that the underlying filesystem does not support
# symlinks. Read the contents from the site-specific file in this
# case. We will copy them into the file instead of making a
# symlink.
if ex.errno == errno.EPERM:
help_contents = f.read()
else:
raise
except OSError as ex:
if ex.errno == errno.ENOENT:
# If the site-specific readme file does not exist use default contents
help_contents = NOT_MOUNTED_DEFAULT_TEXT
else:
raise
if help_contents is not None:
with help_path.open("w") as f:
f.write(help_contents)
os.fchmod(f.fileno(), 0o444)
def _create_client_dir_for_path(self, clients_dir: Path, path: str) -> Path:
"""Tries to create a new subdirectory of clients_dir based on the
basename of the specified path. Tries appending an increasing sequence
of integers to the basename if there is a collision until it finds an
available directory name.
"""
basename = os.path.basename(path)
if basename == "":
raise Exception("Suspicious attempt to clone into: %s" % path)
i = 0
while True:
if i == 0:
dir_name = basename
else:
dir_name = f"{basename}-{i}"
client_dir = clients_dir / dir_name
try:
client_dir.mkdir()
return client_dir
except OSError as e:
if e.errno == errno.EEXIST:
# A directory with the specified name already exists: try
# again with the next candidate name.
i += 1
continue
raise
def _post_clone_checkout_setup(
self, checkout: "EdenCheckout", commit_id: str
) -> None:
# First, check to see if the post-clone setup has been run successfully
# before.
clone_success_path = checkout.state_dir / CLONE_SUCCEEDED
is_initial_mount = not clone_success_path.is_file()
if is_initial_mount and checkout.get_config().scm_type == "hg":
from . import hg_util
hg_util.setup_hg_dir(checkout, commit_id)
clone_success_path.touch()
def mount(self, path: Union[Path, str]) -> int:
# Load the config info for this client, to make sure we
# know about the client.
path = Path(path).resolve(strict=False)
client_dir = self._get_client_dir_for_mount_point(path)
checkout = EdenCheckout(self, path, client_dir)
# Call checkout.get_config() for the side-effect of it raising an
# Exception if the config is in an invalid state.
checkout.get_config()
# Make sure the mount path exists
path.mkdir(parents=True, exist_ok=True)
# Check if it is already mounted.
try:
root = path / ".eden" / "root"
target = readlink_retry_estale(root)
if Path(target) == path:
print_stderr(
f"ERROR: Mount point in use! {path} is already mounted by Eden."
)
return 1
else:
# If we are here, MOUNT/.eden/root is a symlink, but it does not
# point to MOUNT. This suggests `path` is a subdirectory of an
# existing mount, though we should never reach this point
# because _get_client_dir_for_mount_point() above should have
# already thrown an exception. We return non-zero here just in
# case.
print_stderr(
f"ERROR: Mount point in use! "
f"{path} is already mounted by Eden as part of {root}."
)
return 1
except OSError as ex:
err = ex.errno
if err != errno.ENOENT and err != errno.EINVAL:
raise
# Ask eden to mount the path
mount_info = eden_ttypes.MountArgument(
mountPoint=bytes(path), edenClientPath=bytes(client_dir)
)
with self.get_thrift_client() as client:
client.mount(mount_info)
return 0
def unmount(self, path: str) -> None:
"""Ask edenfs to unmount the specified checkout."""
with self.get_thrift_client() as client:
# In some cases edenfs can take a long time unmounting while it waits for
# inodes to become unreferenced. Ideally we should have edenfs timeout and
# forcibly clean up the mount point in this situation.
#
# For now at least time out here so the CLI commands do not hang in this
# case.
client._socket.setTimeout(15000)
client.unmount(os.fsencode(path))
def destroy_mount(self, path: Union[Path, str]) -> None:
"""Delete the specified mount point from the configuration file and remove
the mount directory, if it exists.
This should normally be called after unmounting the mount point.
"""
path = Path(path)
shutil.rmtree(self._get_client_dir_for_mount_point(path))
self._remove_path_from_directory_map(path)
# Delete the mount point
# It should normally contain the readme file that we put there, but nothing
# else. We only delete these specific files for now rather than using
# shutil.rmtree() to avoid deleting files we did not create.
#
# Previous versions of Eden made the mount point directory read-only
# as part of "eden clone". Make sure it is writable now so we can clean it up.
path.chmod(0o755)
try:
(path / NOT_MOUNTED_README_PATH).unlink()
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
path.rmdir()
def check_health(self, timeout: Optional[float] = None) -> HealthStatus:
"""
Get the status of the edenfs daemon.
Returns a HealthStatus object containing health information.
"""
return util.check_health(
lambda: self.get_thrift_client(), self._config_dir, timeout=timeout
)
def get_edenfs_start_cmd(
self,
daemon_binary: str,
extra_args: Optional[List[str]] = None,
takeover: bool = False,
gdb: bool = False,
gdb_args: Optional[List[str]] = None,
strace_file: Optional[str] = None,
foreground: bool = False,
) -> Tuple[List[str], Dict[str, str]]:
"""Get the command and environment to use to start edenfs."""
if gdb and strace_file is not None:
raise EdenStartError("cannot run eden under gdb and " "strace together")
# Compute the command.
cmd = [
daemon_binary,
"--edenfs",
"--edenDir",
str(self._config_dir),
"--etcEdenDir",
str(self._etc_eden_dir),
"--configPath",
str(self._user_config_path),
]
if gdb:
gdb_args = gdb_args or []
cmd = ["gdb"] + gdb_args + ["--args"] + cmd
foreground = True
if strace_file is not None:
cmd = ["strace", "-fttT", "-o", strace_file] + cmd
if extra_args:
cmd.extend(extra_args)
if self.should_use_experimental_systemd_mode():
# TODO(T33122320): Delete this after making 'eden restart' and other
# callers support systemd mode. (--foreground should never set
# --experimentalSystemd.)
cmd.append("--experimentalSystemd")
if takeover:
cmd.append("--takeover")
if foreground:
cmd.append("--foreground")
eden_env = self._build_eden_environment()
# Run edenfs using sudo, unless we already have root privileges,
# or the edenfs binary is setuid root.
if os.geteuid() != 0:
s = os.stat(daemon_binary)
if not (s.st_uid == 0 and (s.st_mode & stat.S_ISUID)):
# We need to run edenfs under sudo
sudo_cmd = ["/usr/bin/sudo"]
# Add environment variable settings
# Depending on the sudo configuration, these may not
# necessarily get passed through automatically even when
# using "sudo -E".
for key, value in eden_env.items():
sudo_cmd.append("%s=%s" % (key, value))
cmd = sudo_cmd + cmd
return cmd, eden_env
def get_log_path(self) -> Path:
return self._config_dir / "logs" / "edenfs.log"
def _build_eden_environment(self) -> Dict[str, str]:
# Reset $PATH to the following contents, so that everyone has the
# same consistent settings.
path_dirs = ["/opt/facebook/hg/bin", "/usr/local/bin", "/bin", "/usr/bin"]
eden_env = {"PATH": ":".join(path_dirs)}
# Preserve the following environment settings
preserve = [
"USER",
"LOGNAME",
"HOME",
"EMAIL",
"NAME",
"ASAN_OPTIONS",
# When we import data from mercurial, the remotefilelog extension
# may need to SSH to a remote mercurial server to get the file
# contents. Preserve SSH environment variables needed to do this.
"SSH_AUTH_SOCK",
"SSH_AGENT_PID",
"KRB5CCNAME",
]
for name, value in os.environ.items():
# Preserve any environment variable starting with "TESTPILOT_".
# TestPilot uses a few environment variables to keep track of
# processes started during test runs, so it can track down and kill
# runaway processes that weren't cleaned up by the test itself.
# We want to make sure this behavior works during the eden
# integration tests.
# Similarly, we want to preserve EDENFS_ env vars which are
# populated by our own test infra to relay paths to important
# build artifacts in our build tree.
if name.startswith("TESTPILOT_") or name.startswith("EDENFS_"):
eden_env[name] = value
elif name in preserve:
eden_env[name] = value
else:
# Drop any environment variable not matching the above cases
pass
return eden_env
def get_checkout_config_for_path(self, path: str) -> Optional[CheckoutConfig]:
client_link = os.path.join(path, ".eden", "client")
try:
client_dir = readlink_retry_estale(client_link)
except OSError:
return None
checkout = EdenCheckout(self, Path(path), Path(client_dir))
return checkout.get_config()
def get_checkouts(self) -> List["EdenCheckout"]:
"""Return information about all configured checkouts defined in Eden's
configuration file."""
dir_map = self._get_directory_map()
checkouts: List[EdenCheckout] = []
clients_dir = Path(self._get_clients_dir())
for mount_path, client_name in dir_map.items():
checkout_data_dir = clients_dir / client_name
checkouts.append(EdenCheckout(self, mount_path, checkout_data_dir))
return checkouts
def _get_directory_map(self) -> Dict[Path, str]:
"""
Parse config.json which holds a mapping of mount paths to their
respective client directory and return contents in a dictionary.
"""
directory_map = self._config_dir / CONFIG_JSON
try:
with directory_map.open() as f:
data = json.load(f)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
data = {}
except json.JSONDecodeError:
raise Exception(f"invalid JSON data found in {directory_map}")
if not isinstance(data, dict):
raise Exception(f"invalid data found in {directory_map}")
result: Dict[Path, str] = {}
for k, v in data.items():
if not isinstance(k, str) or not isinstance(v, str):
raise Exception(f"invalid data found in {directory_map}")
result[Path(k)] = v
return result
def _add_path_to_directory_map(self, path: Path, dir_name: str) -> None:
config_data = self._get_directory_map()
if path in config_data:
raise Exception("mount path %s already exists." % path)
config_data[path] = dir_name
self._write_directory_map(config_data)
def _remove_path_from_directory_map(self, path: Path) -> None:
config_data = self._get_directory_map()
if path in config_data:
del config_data[path]
self._write_directory_map(config_data)
def _write_directory_map(self, config_data: Dict[Path, str]) -> None:
json_data = {str(path): name for path, name in config_data.items()}
directory_map = self._config_dir / CONFIG_JSON
with directory_map.open("w") as f:
json.dump(json_data, f, indent=2, sort_keys=True)
f.write("\n")
def _get_client_dir_for_mount_point(self, path: Path) -> Path:
# The caller is responsible for making sure the path is already
# a normalized, absolute path.
assert path.is_absolute()
config_data = self._get_directory_map()
if path not in config_data:
raise Exception(f"could not find mount path {path}")
return self._get_clients_dir() / config_data[path]
def _get_clients_dir(self) -> Path:
return self._config_dir / CLIENTS_DIR
def get_server_build_info(self) -> Dict[str, str]:
with self.get_thrift_client() as client:
try:
return client.getRegexExportedValues("^build_.*")
except AttributeError:
# on macOS currently, we don't know about getRegexExportedValues
return {}
def get_uptime(self) -> datetime.timedelta:
now = datetime.datetime.now()
with self.get_thrift_client() as client:
since_in_seconds = client.aliveSince()
since = datetime.datetime.fromtimestamp(since_in_seconds)
return now - since
class ConfigUpdater(object):
"""
A helper class to safely update an eden config file.
This acquires a lock on the config file, reads it in, and then provide APIs
to save it back. This ensures that another process cannot change the file
in between the time that we read it and when we write it back.
This also saves the file to a temporary name first, then renames it into
place, so that the main config file is always in a good state, and never
has partially written contents.
"""
def __init__(self, path: Path) -> None:
self.path = path
self._lock_path = self.path.with_suffix(".lock")
self._lock_file: Optional[typing.TextIO] = None
self.config = configutil.EdenConfigParser()
# Acquire a lock.
# This makes sure that another process can't modify the config in the
# middle of a read-modify-write operation. (We can't stop a user
# from manually editing the file while we work, but we can stop
# other eden CLI processes.)
self._acquire_lock()
try:
toml_cfg = _load_toml_config(self.path)
self.config.read_dict(toml_cfg)
except FileNotFoundError:
pass
def __enter__(self) -> "ConfigUpdater":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
exc_traceback: Optional[types.TracebackType],
) -> bool:
self.close()
return False
def __del__(self) -> None:
self.close()
def sections(self) -> List[str]:
return self.config.sections()
def __setitem__(self, key: str, value: Dict[str, Any]) -> None:
self.config[key] = value
def _acquire_lock(self) -> None:
while True:
self._lock_file = typing.cast(typing.TextIO, open(self._lock_path, "w+"))
fcntl.flock(self._lock_file.fileno(), fcntl.LOCK_EX)
# The original creator of the lock file will unlink it when
# it is finished. Make sure we grab the lock on the file still on
# disk, and not an unlinked file.
st1 = os.fstat(self._lock_file.fileno())
st2 = os.lstat(self._lock_path)
if st1.st_dev == st2.st_dev and st1.st_ino == st2.st_ino:
# We got the real lock
return
# We acquired a lock on an old deleted file.
# Close it, and try to acquire the current lock file again.
self._lock_file.close()
self._lock_file = None
continue
def _unlock(self) -> None:
assert self._lock_file is not None
# Remove the file on disk before we unlock it.
# This way processes currently waiting in _acquire_lock() that already
# opened our lock file will see that it isn't the current file on disk
# once they acquire the lock.
os.unlink(self._lock_path)
self._lock_file.close()
self._lock_file = None
def close(self) -> None:
if self._lock_file is not None:
self._unlock()
def save(self) -> None:
if self._lock_file is None:
raise Exception("Cannot save the config without holding the lock")
try:
st = os.stat(self.path)
perms = st.st_mode & 0o777
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
perms = 0o644
# Write the contents to a temporary file first, then atomically rename
# it to the desired destination. This makes sure the .edenrc file
# always has valid contents at all points in time.
prefix = USER_CONFIG + ".tmp."
dirname = self.path.parent
tmpf = tempfile.NamedTemporaryFile(
"w", dir=str(dirname), prefix=prefix, delete=False
)
try:
toml_config = self.config.to_raw_dict()
toml_data = toml.dumps(typing.cast(Mapping[str, Any], toml_config))
tmpf.write(toml_data)
tmpf.close()
os.chmod(tmpf.name, perms)
os.rename(tmpf.name, self.path)
except BaseException:
# Remove temporary file on error
try:
os.unlink(tmpf.name)
except Exception:
pass
raise
class EdenCheckout:
"""Information about a particular Eden checkout."""
def __init__(self, instance: EdenInstance, path: Path, state_dir: Path) -> None:
self.instance = instance
self.path = path
self.state_dir = state_dir
self._config: Optional[CheckoutConfig] = None
def __repr__(self) -> str:
return f"EdenCheckout({self.instance!r}, {self.path!r}, {self.state_dir!r})"
def get_relative_path(self, path: Path, already_resolved: bool = False) -> Path:
"""Compute the relative path to a given location inside an eden checkout.
If the checkout is currently mounted this function is able to correctly resolve
paths that refer into the checkout via alternative bind mount locations.
e.g. if the checkout is located at "/home/user/foo/eden_checkout" but
"/home/user" is also bind-mounted to "/data/user" this will still be able to
correctly resolve an input path of "/data/user/foo/eden_checkout/test"
"""
if not already_resolved:
path = path.resolve(strict=False)
# First try using path.relative_to()
# This should work in the common case
try:
return path.relative_to(self.path)
except ValueError:
pass
# path.relative_to() may fail if the checkout is bind-mounted to an alternate
# location, and the input path points into it using the bind mount location.
# In this case search upwards from the input path looking for the checkout root.
try:
path_stat = path.lstat()
except OSError as ex:
raise Exception(
f"unable to stat {path} to find relative location inside "
f"checkout {self.path}: {ex}"
)
try:
root_stat = self.path.lstat()
except OSError as ex:
raise Exception(f"unable to stat checkout at {self.path}: {ex}")
if (path_stat.st_dev, path_stat.st_ino) == (root_stat.st_dev, root_stat.st_ino):
# This is the checkout root
return Path()
# pyre-fixme[22]: The cast is redundant.
curdir = cast(Path, path.parent)
path_parts = [path.name]
while True:
stat = curdir.lstat()
if (stat.st_dev, stat.st_ino) == (root_stat.st_dev, root_stat.st_ino):
path_parts.reverse()
return Path(*path_parts)
if curdir.parent == curdir:
raise Exception(
f"unable to determine relative location of {path} "
f"inside {self.path}"
)
path_parts.append(curdir.name)
# pyre-fixme[22]: The cast is redundant.
curdir = typing.cast(Path, curdir.parent)
def get_config(self) -> CheckoutConfig:
if self._config is None:
self._config = self._read_config()
return self._config
def save_config(self, checkout_config: CheckoutConfig) -> None:
# Store information about the mount in the config.toml file.
config_data = {
"repository": {
"path": str(checkout_config.backing_repo),
"type": checkout_config.scm_type,
},
"bind-mounts": checkout_config.bind_mounts,
}
with self._config_path().open("w") as f:
# pyre-fixme[6]: Expected `_Writable` for 2nd param but got `IO[]`.
toml.dump(config_data, f)
# Update our local config cache
self._config = checkout_config
def _config_path(self) -> Path:
return self.state_dir / MOUNT_CONFIG
def _read_config(self) -> CheckoutConfig:
"""Returns CheckoutConfig or raises an Exception if the config.toml
under self.state_dir is not properly formatted or does not exist.
"""
config_path = self._config_path()
config = _load_toml_config(config_path)
repo_field = config.get("repository")
if isinstance(repo_field, dict):
repository = repo_field
else:
raise Exception(f"{config_path} is missing [repository]")
def get_field(key: str) -> str:
value = repository.get(key)
if isinstance(value, str):
return value
raise Exception(f"{config_path} is missing {key} in " "[repository]")
scm_type = get_field("type")
if scm_type not in SUPPORTED_REPOS:
raise Exception(
f'repository "{config_path}" has unsupported type ' f'"{scm_type}"'
)
bind_mounts = {}
bind_mounts_dict = config.get("bind-mounts")
if bind_mounts_dict is not None:
if not isinstance(bind_mounts_dict, dict):
raise Exception(
f"{config_path} has an invalid " "[bind-mounts] section"
)
for key, value in bind_mounts_dict.items():
if not isinstance(value, str):
raise Exception(
f"{config_path} has invalid value in "
f"[bind-mounts] for {key}: {value} "
"(string expected)"
)
bind_mounts[key] = value
return CheckoutConfig(
backing_repo=Path(get_field("path")),
scm_type=scm_type,
bind_mounts=bind_mounts,
default_revision=(
repository.get("default-revision") or DEFAULT_REVISION[scm_type]
),
)
def get_snapshot(self) -> str:
"""Return the hex version of the parent hash in the SNAPSHOT file."""
snapshot_path = self.state_dir / SNAPSHOT
with snapshot_path.open("rb") as f:
assert f.read(8) == SNAPSHOT_MAGIC
return binascii.hexlify(f.read(20)).decode("utf-8")
def save_snapshot(self, commid_id: str) -> None:
"""Write a new parent commit ID into the SNAPSOHT file."""
snapshot_path = self.state_dir / SNAPSHOT
assert len(commid_id) == 40
commit_bin = binascii.unhexlify(commid_id)
# TODO: It would be nicer to write this out atomically using a temporary file
# followed by a rename.
snapshot_path.write_bytes(SNAPSHOT_MAGIC + commit_bin)
def find_eden(
path: Union[str, Path],
etc_eden_dir: Optional[str] = None,
home_dir: Optional[str] = None,
state_dir: Optional[str] = None,
) -> Tuple[EdenInstance, Optional[EdenCheckout], Optional[Path]]:
"""Look up the EdenInstance and EdenCheckout for a path.
If the input path points into an Eden checkout, this returns a tuple of
(EdenInstance, EdenCheckout, rel_path), where EdenInstance contains information for
the edenfs instance serving this checkout, EdenCheckout contains information about
the checkout, and rel_path contains the relative location of the input path inside
the checkout. The checkout does not need to be currently mounted for this to work.
If the input path does not point inside a known Eden checkout, this returns
(EdenInstance, None, None)
"""
if isinstance(path, str):
path = Path(path)
path = path.resolve(strict=False)
# First check to see if this looks like a mounted checkout
eden_state_dir = None
checkout_root = None
checkout_state_dir = None
try:
eden_socket_path = readlink_retry_estale(path.joinpath(path, ".eden", "socket"))
eden_state_dir = os.path.dirname(eden_socket_path)
checkout_root = Path(readlink_retry_estale(path.joinpath(".eden", "root")))
checkout_state_dir = Path(
readlink_retry_estale(path.joinpath(".eden", "client"))
)
except OSError:
# We will get an OSError if any of these symlinks do not exist
# Fall through and we will handle this below.
pass
if eden_state_dir is None:
# Use the state directory argument supplied by the caller.
# If this is None the EdenInstance constructor will pick the correct location.
eden_state_dir = state_dir
elif state_dir is not None:
# We found a state directory from the checkout and the user also specified an
# explicit state directory. Make sure they match.
_check_same_eden_directory(Path(eden_state_dir), Path(state_dir))
instance = EdenInstance(
eden_state_dir, etc_eden_dir=etc_eden_dir, home_dir=home_dir
)
checkout: Optional[EdenCheckout] = None
rel_path: Optional[Path] = None
if checkout_root is None:
all_checkouts = instance._get_directory_map()
for checkout_path_str, checkout_name in all_checkouts.items():
checkout_path = Path(checkout_path_str)
try:
rel_path = path.relative_to(checkout_path)
except ValueError:
continue
checkout_state_dir = instance.state_dir.joinpath(CLIENTS_DIR, checkout_name)
checkout = EdenCheckout(instance, checkout_path, checkout_state_dir)
break
else:
# This path does not appear to be inside a known checkout
checkout = None
rel_path = None
elif checkout_state_dir is None:
all_checkouts = instance._get_directory_map()
checkout_name_value = all_checkouts.get(checkout_root)
if checkout_name_value is None:
raise Exception(f"unknown checkout {checkout_root}")
checkout_state_dir = instance.state_dir.joinpath(
CLIENTS_DIR, checkout_name_value
)
checkout = EdenCheckout(instance, checkout_root, checkout_state_dir)
rel_path = checkout.get_relative_path(path, already_resolved=True)
else:
checkout = EdenCheckout(instance, checkout_root, checkout_state_dir)
rel_path = checkout.get_relative_path(path, already_resolved=True)
return (instance, checkout, rel_path)
def _check_same_eden_directory(found_path: Path, path_arg: Path) -> None:
s1 = found_path.lstat()
s2 = path_arg.lstat()
if (s1.st_dev, s1.st_ino) != (s2.st_dev, s2.st_ino):
raise Exception(
f"the specified directory is managed by the edenfs instance at "
f"{found_path}, which is different from the explicitly requested "
f"instance at {path_arg}"
)
def _verify_mount_point(mount_point: str) -> None:
if os.path.isdir(mount_point):
return
parent_dir = os.path.dirname(mount_point)
if os.path.isdir(parent_dir):
os.mkdir(mount_point)
else:
raise Exception(
(
"%s must be a directory in order to mount a client at %s. "
+ "If this is the correct location, run `mkdir -p %s` to create "
+ "the directory."
)
% (parent_dir, mount_point, parent_dir)
)
_TomlConfigDict = Mapping[str, Mapping[str, Any]]
def _load_toml_config(path: Path) -> _TomlConfigDict:
return typing.cast(_TomlConfigDict, toml.load(str(path)))
```
#### File: eden/cli/daemon.py
```python
import asyncio
import errno
import getpass
import os
import pathlib
import signal
import subprocess
import sys
from typing import Dict, List, NoReturn, Optional, Tuple
from .config import EdenInstance
from .logfile import forward_log_file
from .systemd import (
EdenFSSystemdServiceConfig,
SystemdConnectionRefusedError,
SystemdFileNotFoundError,
SystemdServiceFailedToStartError,
SystemdUserBus,
edenfs_systemd_service_name,
print_service_status_using_systemctl_for_diagnostics_async,
)
from .util import ShutdownError, poll_until, print_stderr
def wait_for_process_exit(pid: int, timeout: float) -> bool:
"""Wait for the specified process ID to exit.
Returns True if the process exits within the specified timeout, and False if the
timeout expires while the process is still alive.
"""
def process_exited() -> Optional[bool]:
if did_process_exit(pid):
return True
else:
return None
try:
poll_until(process_exited, timeout=timeout)
return True
except TimeoutError:
return False
def wait_for_shutdown(pid: int, timeout: float, kill_timeout: float = 5.0) -> bool:
"""Wait for a process to exit.
If it does not exit within `timeout` seconds kill it with SIGKILL.
Returns True if the process exited on its own or False if it only exited
after SIGKILL.
Throws a ShutdownError if we failed to kill the process with SIGKILL
(either because we failed to send the signal, or if the process still did
not exit within kill_timeout seconds after sending SIGKILL).
"""
# Wait until the process exits on its own.
if wait_for_process_exit(pid, timeout):
return True
# client.shutdown() failed to terminate the process within the specified
# timeout. Take a more aggressive approach by sending SIGKILL.
print_stderr(
"error: sent shutdown request, but edenfs did not exit "
"within {} seconds. Attempting SIGKILL.",
timeout,
)
sigkill_process(pid, timeout=kill_timeout)
return False
def sigkill_process(pid: int, timeout: float = 5.0) -> None:
"""Send SIGKILL to a process, and wait for it to exit.
If timeout is greater than 0, this waits for the process to exit after sending the
signal. Throws a ShutdownError exception if the process does not exit within the
specified timeout.
Returns successfully if the specified process did not exist in the first place.
This is done to handle situations where the process exited on its own just before we
could send SIGKILL.
"""
try:
os.kill(pid, signal.SIGKILL)
except OSError as ex:
if ex.errno == errno.ESRCH:
# The process exited before the SIGKILL was received.
# Treat this just like a normal shutdown since it exited on its
# own.
return
elif ex.errno == errno.EPERM:
raise ShutdownError(
"Received EPERM when sending SIGKILL. "
"Perhaps edenfs failed to drop root privileges properly?"
)
else:
raise
if timeout <= 0:
return
if not wait_for_process_exit(pid, timeout):
raise ShutdownError(
"edenfs process {} did not terminate within {} seconds of "
"sending SIGKILL.".format(pid, timeout)
)
def is_zombie_process(pid: int) -> bool:
try:
with open(f"/proc/{pid}/stat", "rb") as proc_stat:
line = proc_stat.read()
pieces = line.split()
if len(pieces) > 2 and pieces[2] == b"Z":
return True
except (FileNotFoundError, ProcessLookupError):
pass
return False
def did_process_exit(pid: int) -> bool:
try:
os.kill(pid, 0)
except OSError as ex:
if ex.errno == errno.ESRCH:
# The process has exited
return True
# EPERM is okay (and means the process is still running),
# anything else is unexpected
elif ex.errno != errno.EPERM:
raise
if is_zombie_process(pid):
return True
# Still running
return False
def _find_default_daemon_binary() -> Optional[str]:
# By default, we look for the daemon executable in the corresponding libexec
# directory.
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
candidate = os.path.normpath(os.path.join(script_dir, "../libexec/eden/edenfs"))
permissions = os.R_OK | os.X_OK
if os.access(candidate, permissions):
return candidate
# This is where the binary will be found relative to this file when it is
# run out of buck-out in debug mode.
candidate = os.path.normpath(os.path.join(script_dir, "../fs/service/edenfs"))
if os.access(candidate, permissions):
return candidate
else:
return None
class DaemonBinaryNotFound(Exception):
def __init__(self) -> None:
super().__init__("unable to find edenfs executable")
def _find_daemon_binary(explicit_daemon_binary: Optional[str]) -> str:
if explicit_daemon_binary is not None:
return explicit_daemon_binary
daemon_binary = _find_default_daemon_binary()
if daemon_binary is None:
raise DaemonBinaryNotFound()
return daemon_binary
def exec_daemon(
instance: EdenInstance,
daemon_binary: Optional[str] = None,
edenfs_args: Optional[List[str]] = None,
takeover: bool = False,
gdb: bool = False,
gdb_args: Optional[List[str]] = None,
strace_file: Optional[str] = None,
foreground: bool = False,
) -> NoReturn:
"""Execute the edenfs daemon.
This method uses os.exec() to replace the current process with the edenfs daemon.
It does not return on success. It may throw an exception on error.
"""
try:
cmd, env = _get_daemon_args(
instance=instance,
daemon_binary=daemon_binary,
edenfs_args=edenfs_args,
takeover=takeover,
gdb=gdb,
gdb_args=gdb_args,
strace_file=strace_file,
foreground=foreground,
)
except DaemonBinaryNotFound as e:
print_stderr(f"error: {e}")
os._exit(1)
os.execve(cmd[0], cmd, env)
# Throw an exception just to let mypy know that we should never reach here
# and will never return normally.
raise Exception("execve should never return")
def start_daemon(
instance: EdenInstance,
daemon_binary: Optional[str] = None,
edenfs_args: Optional[List[str]] = None,
) -> int:
"""Start the edenfs daemon."""
try:
cmd, env = _get_daemon_args(
instance=instance, daemon_binary=daemon_binary, edenfs_args=edenfs_args
)
except DaemonBinaryNotFound as e:
print_stderr(f"error: {e}")
return 1
return subprocess.call(cmd, env=env)
def start_systemd_service(
instance: EdenInstance,
daemon_binary: Optional[str] = None,
edenfs_args: Optional[List[str]] = None,
) -> int:
try:
daemon_binary = _find_daemon_binary(daemon_binary)
except DaemonBinaryNotFound as e:
print_stderr(f"error: {e}")
return 1
service_config = EdenFSSystemdServiceConfig(
eden_dir=instance.state_dir,
edenfs_executable_path=pathlib.Path(daemon_binary),
extra_edenfs_arguments=edenfs_args or [],
)
service_config.write_config_file()
service_name = edenfs_systemd_service_name(instance.state_dir)
xdg_runtime_dir = _get_systemd_xdg_runtime_dir(config=instance)
startup_log_path = service_config.startup_log_file_path
startup_log_path.write_bytes(b"")
with forward_log_file(startup_log_path, sys.stderr.buffer) as log_forwarder:
loop = asyncio.get_event_loop()
async def start_service_async() -> int:
with SystemdUserBus(
event_loop=loop, xdg_runtime_dir=xdg_runtime_dir
) as systemd:
service_name_bytes = service_name.encode()
active_state = await systemd.get_unit_active_state_async(
service_name_bytes
)
if active_state == b"active":
print_stderr("error: edenfs systemd service is already running")
await print_service_status_using_systemctl_for_diagnostics_async(
service_name=service_name, xdg_runtime_dir=xdg_runtime_dir
)
return 1
await systemd.start_service_and_wait_async(service_name_bytes)
return 0
try:
start_task = loop.create_task(start_service_async())
loop.create_task(log_forwarder.poll_forever_async())
return loop.run_until_complete(start_task)
except (SystemdConnectionRefusedError, SystemdFileNotFoundError):
print_stderr(
f"error: The systemd user manager is not running. Run the "
f"following command to\n"
f"start it, then try again:\n"
f"\n"
f" sudo systemctl start user@{getpass.getuser()}.service"
)
return 1
except SystemdServiceFailedToStartError as e:
print_stderr(f"error: {e}")
return 1
finally:
log_forwarder.poll()
def _get_systemd_xdg_runtime_dir(config: EdenInstance) -> str:
xdg_runtime_dir = os.getenv("XDG_RUNTIME_DIR")
if xdg_runtime_dir is None:
xdg_runtime_dir = config.get_fallback_systemd_xdg_runtime_dir()
print_stderr(
f"warning: The XDG_RUNTIME_DIR environment variable is not set; "
f"using fallback: {xdg_runtime_dir!r}"
)
return xdg_runtime_dir
def _get_daemon_args(
instance: EdenInstance,
daemon_binary: Optional[str] = None,
edenfs_args: Optional[List[str]] = None,
takeover: bool = False,
gdb: bool = False,
gdb_args: Optional[List[str]] = None,
strace_file: Optional[str] = None,
foreground: bool = False,
) -> Tuple[List[str], Dict[str, str]]:
"""Get the command and environment to use to start edenfs."""
daemon_binary = _find_daemon_binary(daemon_binary)
return instance.get_edenfs_start_cmd(
daemon_binary,
edenfs_args,
takeover=takeover,
gdb=gdb,
gdb_args=gdb_args,
strace_file=strace_file,
foreground=foreground,
)
```
#### File: cli/doctor/check_filesystems.py
```python
import os
import subprocess
import sys
import tempfile
from pathlib import Path
from typing import List, Set
from eden.cli.config import EdenInstance
from eden.cli.doctor.problem import Problem, ProblemSeverity, ProblemTracker
def check_using_nfs_path(tracker: ProblemTracker, mount_path: Path) -> None:
check_shared_path(tracker, mount_path)
def check_eden_directory(tracker: ProblemTracker, instance: EdenInstance) -> None:
if not is_nfs_mounted(str(instance.state_dir)):
return
msg = (
f"Eden's state directory is on an NFS file system: {instance.state_dir}\n"
f" This will likely cause performance problems and/or other errors."
)
# On FB devservers the default Eden state directory path is ~/local/.eden
# Normally ~/local is expected to be a symlink to local disk (for users who are
# still using NFS home directories in the first place). The most common cause of
# the Eden state directory being on NFS is for users that somehow have a regular
# directory at ~/local rather than a symlink. Suggest checking this as a
# remediation.
remediation = (
"The most common cause for this is if your ~/local symlink does not point "
"to local disk. Make sure that ~/local is a symlink pointing to local disk "
"and then restart Eden."
)
tracker.add_problem(Problem(msg, remediation))
def get_shared_path(mount_path: Path) -> Path:
return mount_path / ".hg" / "sharedpath"
def read_shared_path(tracker: ProblemTracker, shared_path: Path) -> str:
try:
return shared_path.read_text()
except (FileNotFoundError, IsADirectoryError):
raise
except Exception as e:
tracker.add_problem(Problem(f"Failed to read .hg/sharedpath: {e}"))
raise
def check_shared_path(tracker: ProblemTracker, mount_path: Path) -> None:
shared_path = get_shared_path(mount_path)
try:
dst_shared_path = read_shared_path(tracker, shared_path)
except Exception:
return
if is_nfs_mounted(dst_shared_path):
msg = (
f"The Mercurial data directory for {shared_path} is at"
f" {dst_shared_path} which is on a NFS filesystem."
f" Accessing files and directories in this repository will be slow."
)
problem = Problem(msg, severity=ProblemSeverity.ADVICE)
tracker.add_problem(problem)
def fstype_for_path(path: str) -> str:
if sys.platform == "linux2":
try:
args = ["stat", "-fc", "%T", "--", path]
return subprocess.check_output(args).decode("ascii").strip()
except subprocess.CalledProcessError:
return "unknown"
return "unknown"
def is_nfs_mounted(path: str) -> bool:
return fstype_for_path(path) == "nfs"
def get_mountpt(path) -> str:
if not os.path.exists(path):
return path
path = os.path.realpath(path)
path_stat = os.lstat(path)
while True:
parent = os.path.dirname(path)
parent_stat = os.lstat(parent)
if parent == path or parent_stat.st_dev != path_stat.st_dev:
return path
path, path_stat = parent, parent_stat
def get_mount_pts_set(
tracker: ProblemTracker, mount_paths: List[str], instance: EdenInstance
) -> Set[str]:
eden_locations = [str(instance.state_dir), tempfile.gettempdir()]
for mount_path in mount_paths:
try:
eden_repo_path = read_shared_path(
tracker, get_shared_path(Path(mount_path))
)
except Exception:
continue
eden_locations.append(eden_repo_path)
try:
hg_cache_dir = subprocess.check_output(
["hg", "config", "remotefilelog.cachepath"],
cwd=mount_path,
env=dict(os.environ, HGPLAIN="1"),
)
except subprocess.CalledProcessError:
continue
eden_locations.append(hg_cache_dir.decode("utf-8").rstrip("\n"))
# Set is used to skip duplicate mount folders
return {get_mountpt(eden_location) for eden_location in eden_locations}
def check_disk_usage(
tracker: ProblemTracker, mount_paths: List[str], instance: EdenInstance
) -> None:
prob_advice_space_used_ratio_threshold = 0.90
prob_error_absolute_space_used_threshold = 1024 * 1024 * 1024 # 1GB
eden_mount_pts_set = get_mount_pts_set(tracker, mount_paths, instance)
for eden_mount_pt in eden_mount_pts_set:
if eden_mount_pt and os.path.exists(eden_mount_pt):
disk_status = os.statvfs(eden_mount_pt)
avail = disk_status.f_frsize * disk_status.f_bavail
size = disk_status.f_frsize * disk_status.f_blocks
if size == 0:
continue
used = size - avail
used_percent = float(used) / size
message = (
"Eden lazily loads your files and needs enough disk space to "
"store these files when loaded."
)
extra_message = instance.get_config_value(
"doctor.low-disk-space-message", ""
)
if extra_message:
message = f"{message} {extra_message}"
if avail <= prob_error_absolute_space_used_threshold:
tracker.add_problem(
Problem(
f"{eden_mount_pt} "
f"has only {str(avail)} bytes available. "
f"{message}",
severity=ProblemSeverity.ERROR,
)
)
elif used_percent >= prob_advice_space_used_ratio_threshold:
tracker.add_problem(
Problem(
f"{eden_mount_pt} "
f"is {used_percent:.2%} full. "
f"{message}",
severity=ProblemSeverity.ADVICE,
)
)
```
#### File: cli/doctor/check_hg.py
```python
import abc
import binascii
import os
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type
import eden.dirstate
import facebook.eden.ttypes as eden_ttypes
from eden.cli import hg_util
from eden.cli.config import EdenCheckout
from eden.cli.doctor.problem import FixableProblem, ProblemTracker, UnexpectedCheckError
from thrift.Thrift import TApplicationException
class HgChecker:
errors: List[str] = []
def __init__(self, checkout: EdenCheckout) -> None:
self.checkout = checkout
def check(self) -> bool:
self.errors = self.check_for_error()
return not self.errors
@abc.abstractmethod
def check_for_error(self) -> List[str]:
"""Check for errors.
Returns a list of errors, or an empty list if no problems were found.
"""
raise NotImplementedError()
@abc.abstractmethod
def repair(self) -> None:
raise NotImplementedError()
class HgFileChecker(HgChecker):
def __init__(self, checkout: EdenCheckout, name: str) -> None:
super().__init__(checkout)
self.name = name
self.problem: Optional[str] = None
@property
def path(self) -> Path:
return self.checkout.path / ".hg" / self.name
@property
def short_path(self) -> str:
return os.path.join(".hg", self.name)
def check_for_error(self) -> List[str]:
try:
data = self.path.read_bytes()
except IOError as ex:
return [f"error reading {self.short_path}: {ex}"]
return self.check_data(data)
def check_data(self, data: bytes) -> List[str]:
return []
class DirstateChecker(HgFileChecker):
_null_commit_id = 20 * b"\x00"
_old_snapshot: Optional[bytes] = None
_old_dirstate_parents: Optional[Tuple[bytes, bytes]] = None
_tuples_dict: Dict[bytes, Tuple[str, int, int]] = {}
_copymap: Dict[bytes, bytes] = {}
_new_parents: Optional[Tuple[bytes, bytes]] = None
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "dirstate")
def check_for_error(self) -> List[str]:
errors: List[str] = []
self._get_old_dirstate_info(errors)
self._get_old_snapshot(errors)
self._new_parents = self._select_new_parents(errors)
# If we need to update state make sure we reported an error
if (
self._new_parents != self._old_dirstate_parents
or self._new_parents[0] != self._old_snapshot
):
assert errors
return errors
def _get_old_dirstate_info(self, errors: List[str]) -> None:
# Read the data from the dirstate file
try:
with self.path.open("rb") as f:
parents, tuples_dict, copymap = eden.dirstate.read(f, str(self.path))
self._old_dirstate_parents = parents
self._tuples_dict = {os.fsencode(k): v for k, v in tuples_dict.items()}
self._copymap = {os.fsencode(k): os.fsencode(v) for k, v in copymap.items()}
except IOError as ex:
errors.append(f"error reading {self.short_path}: {ex}")
return
except eden.dirstate.DirstateParseException as ex:
errors.append(f"error parsing {self.short_path}: {ex}")
return
# Make sure the commits are valid, and discard them otherwise
old_p0 = self._check_commit(errors, parents[0], "mercurial's p0 commit")
old_p1 = self._check_commit(errors, parents[1], "mercurial's p1 commit")
if old_p0 is None:
self._old_dirstate_parents = None
else:
if old_p1 is None:
old_p1 = self._null_commit_id
self._old_dirstate_parents = (old_p0, old_p1)
def _get_old_snapshot(self, errors: List[str]) -> None:
# Get the commit ID from the snapshot file
try:
snapshot_hex = self.checkout.get_snapshot()
self._old_snapshot = binascii.unhexlify(snapshot_hex)
except Exception as ex:
errors.append(f"error parsing Eden snapshot ID: {ex}")
return
self._old_snapshot = self._check_commit(
errors, self._old_snapshot, "Eden's snapshot file"
)
def _check_commit(
self, errors: List[str], commit: bytes, name: str
) -> Optional[bytes]:
if self._is_commit_hash_valid(commit):
return commit
commit_hex = self._commit_hex(commit)
errors.append(f"{name} points to a bad commit: {commit_hex}")
return None
def _select_new_parents(self, errors: List[str]) -> Tuple[bytes, bytes]:
if self._old_snapshot is None and self._old_dirstate_parents is None:
last_resort = self._get_last_resort_commit()
return (last_resort, self._null_commit_id)
elif self._old_dirstate_parents is None:
assert self._old_snapshot is not None # to make mypy happy
return (self._old_snapshot, self._null_commit_id)
else:
if (
self._old_snapshot is not None
and self._old_snapshot != self._old_dirstate_parents[0]
):
p0_hex = self._commit_hex(self._old_dirstate_parents[0])
snapshot_hex = self._commit_hex(self._old_snapshot)
errors.append(
f"mercurial's parent commit is {p0_hex}, but Eden's internal "
f"parent commit is {snapshot_hex}"
)
return self._old_dirstate_parents
def repair(self) -> None:
# If the .hg directory was missing entirely check_for_error() won't have been
# called yet. Call it now to compute self._new_parents
if self._new_parents is None:
self.check_for_error()
assert self._new_parents is not None
if self._new_parents != self._old_dirstate_parents:
with self.path.open("wb") as f:
eden.dirstate.write(
f, self._new_parents, self._tuples_dict, self._copymap
)
if self._new_parents[0] != self._old_snapshot:
parents = eden_ttypes.WorkingDirectoryParents(parent1=self._new_parents[0])
if self._new_parents[1] != self._null_commit_id:
parents.parent2 = self._new_parents[1]
with self.checkout.instance.get_thrift_client() as client:
client.resetParentCommits(bytes(self.checkout.path), parents)
def _commit_hex(self, commit: bytes) -> str:
return binascii.hexlify(commit).decode("utf-8")
def _is_commit_hash_valid(self, commit_hash: bytes) -> bool:
# The null commit ID is always valid
if commit_hash == self._null_commit_id:
return True
try:
with self.checkout.instance.get_thrift_client() as client:
client.getScmStatusBetweenRevisions(
bytes(self.checkout.path), commit_hash, commit_hash
)
return True
except (TApplicationException, eden_ttypes.EdenError) as ex:
if "RepoLookupError: unknown revision" in str(ex):
return False
raise
def _get_last_resort_commit(self) -> bytes:
try:
return get_tip_commit_hash(self.checkout.path)
except Exception:
return self._null_commit_id
class HgrcChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "hgrc")
def repair(self) -> None:
hgrc_data = hg_util.get_hgrc_data(self.checkout)
self.path.write_text(hgrc_data)
class RequiresChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "requires")
def check_data(self, data: bytes) -> List[str]:
requirements = data.splitlines()
if b"eden" not in requirements:
return [".hg/requires file does not include Eden as a requirement"]
return []
def repair(self) -> None:
hgrc_data = hg_util.get_requires_data(self.checkout)
self.path.write_text(hgrc_data)
class SharedPathChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "sharedpath")
def check_data(self, data: bytes) -> List[str]:
# TODO: make sure the sharedpath file points to a valid .hg directory that
# does not use Eden itself. However, we can't fix errors about the sharedpath
# file pointing to a bad repo, so those should probably be reported as
# completely separate problems to the ProblemTracker.
#
# backing_repo = Path(os.fsdecode(data))
return []
def repair(self) -> None:
backing_hg_dir = hg_util.get_backing_hg_dir(self.checkout)
self.path.write_bytes(bytes(backing_hg_dir))
class SharedChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "shared")
def check_data(self, data: bytes) -> List[str]:
# This file normally contains "bookmarks" for most users, but its fine
# if users don't have anything here if they don't want to share bookmarks.
# Therefore we don't do any other validation of the contents of this file.
return []
def repair(self) -> None:
self.path.write_text("bookmarks\n")
class BookmarksChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "bookmarks")
def repair(self) -> None:
self.path.touch()
class BranchChecker(HgFileChecker):
def __init__(self, checkout: EdenCheckout) -> None:
super().__init__(checkout, "branch")
def repair(self) -> None:
self.path.write_text("default\n")
def get_tip_commit_hash(repo: Path) -> bytes:
# Try to get the tip commit ID. If that fails, use the null commit ID.
args = ["hg", "log", "-T", "{node}", "-r", "tip"]
env = dict(os.environ, HGPLAIN="1")
result = subprocess.run(
args,
env=env,
cwd=str(repo),
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return binascii.unhexlify(result.stdout.strip())
def check_hg(tracker: ProblemTracker, checkout: EdenCheckout) -> None:
checker_classes: List[Type[HgChecker]] = [
DirstateChecker,
HgrcChecker,
RequiresChecker,
SharedPathChecker,
SharedChecker,
BookmarksChecker,
BranchChecker,
]
checkers = [checker_class(checkout) for checker_class in checker_classes]
hg_path = checkout.path / ".hg"
if not os.path.exists(hg_path):
description = f"Missing hg directory: {checkout.path}/.hg"
tracker.add_problem(HgDirectoryError(checkout, checkers, description))
return
bad_checkers: List[HgChecker] = []
for checker in checkers:
try:
if checker.check():
continue
bad_checkers.append(checker)
except Exception:
tracker.add_problem(UnexpectedCheckError())
if bad_checkers:
msg = (
f"No contents present in hg directory: {checkout.path}/.hg"
if len(bad_checkers) == len(checkers)
else None
)
tracker.add_problem(HgDirectoryError(checkout, bad_checkers, msg))
class HgDirectoryError(FixableProblem):
def __init__(
self,
checkout: EdenCheckout,
checkers: List[HgChecker],
description: Optional[str] = None,
) -> None:
self._checkout = checkout
self._checkers = checkers
self._description = description
def description(self) -> str:
if self._description is not None:
return self._description
all_errors = []
for checker in self._checkers:
all_errors.extend(checker.errors)
problems = "\n ".join(all_errors)
return (
f"Found inconsistent/missing data in {self._checkout.path}/.hg:\n "
+ problems
)
def dry_run_msg(self) -> str:
return f"Would repair hg directory contents for {self._checkout.path}"
def start_msg(self) -> str:
return f"Repairing hg directory contents for {self._checkout.path}"
def perform_fix(self) -> None:
hg_path = self._checkout.path / ".hg"
# Make sure the hg directory exists
hg_path.mkdir(exist_ok=True)
for checker in self._checkers:
checker.repair()
```
#### File: cli/doctor/check_rogue_edenfs.py
```python
import shlex
from typing import List
from eden.cli import process_finder
from eden.cli.doctor.problem import Problem, ProblemSeverity, ProblemTracker
def check_many_edenfs_are_running(
tracker: ProblemTracker, process_finder: process_finder.ProcessFinder
) -> None:
rogue_pids_list = process_finder.find_rogue_pids()
if len(rogue_pids_list) > 0:
rogue_pids_problem = ManyEdenFsRunning(rogue_pids_list)
tracker.add_problem(rogue_pids_problem)
class ManyEdenFsRunning(Problem):
_rogue_pids_list: List[process_finder.ProcessID]
def __init__(self, rogue_pids_list):
self._rogue_pids_list = list(sorted(rogue_pids_list))
self.set_manual_remediation_message()
def description(self) -> str:
return (
"Many edenfs processes are running. "
"Please keep only one for each config directory."
)
def severity(self) -> ProblemSeverity:
return ProblemSeverity.ADVICE
def set_manual_remediation_message(self) -> None:
if self._rogue_pids_list is not None:
kill_command = ["kill", "-9"]
kill_command.extend(map(str, self._rogue_pids_list))
self._remediation = " ".join(map(shlex.quote, kill_command))
```
#### File: doctor/test/disk_usage_test.py
```python
import collections
import typing
from typing import List, Optional
from unittest.mock import patch
import eden.cli.doctor as doctor
from eden.cli.config import EdenInstance
from eden.cli.doctor.problem import ProblemBase, ProblemTracker
from eden.cli.doctor.test.lib.fake_eden_instance import FakeEdenInstance
from eden.cli.doctor.test.lib.testcase import DoctorTestBase
class DiskUsageTest(DoctorTestBase):
def _mock_disk_usage(self, blocks, avail, frsize=1024) -> None:
"""Mock test for disk usage."""
mock_statvfs_patcher = patch("eden.cli.doctor.os.statvfs")
mock_statvfs = mock_statvfs_patcher.start()
self.addCleanup(lambda: mock_statvfs.stop())
statvfs_tuple = collections.namedtuple("statvfs", "f_blocks f_bavail f_frsize")
mock_statvfs.return_value = statvfs_tuple(blocks, avail, frsize)
mock_getmountpt_and_deviceid_patcher = patch(
"eden.cli.doctor.check_filesystems.get_mountpt"
)
mock_getmountpt_and_deviceid = mock_getmountpt_and_deviceid_patcher.start()
self.addCleanup(lambda: mock_getmountpt_and_deviceid.stop())
mock_getmountpt_and_deviceid.return_value = "/"
def _check_disk_usage(
self, instance: Optional[FakeEdenInstance] = None
) -> List[ProblemBase]:
problem_collector = ProblemCollector()
if instance is None:
instance = FakeEdenInstance(self.make_temporary_directory())
doctor.check_filesystems.check_disk_usage(
tracker=problem_collector,
mount_paths=["/"],
instance=typing.cast(EdenInstance, instance),
)
return problem_collector.problems
def test_low_free_absolute_disk_is_major(self):
self._mock_disk_usage(blocks=100000000, avail=500000)
problems = self._check_disk_usage()
self.assertEqual(
problems[0].description(),
"/ has only 512000000 bytes available. "
"Eden lazily loads your files and needs enough disk "
"space to store these files when loaded.",
)
self.assertEqual(problems[0].severity(), doctor.ProblemSeverity.ERROR)
def test_low_percentage_free_but_high_absolute_free_disk_is_minor(self):
self._mock_disk_usage(blocks=100000000, avail=2000000)
problems = self._check_disk_usage()
self.assertEqual(
problems[0].description(),
"/ is 98.00% full. "
"Eden lazily loads your files and needs enough disk "
"space to store these files when loaded.",
)
self.assertEqual(problems[0].severity(), doctor.ProblemSeverity.ADVICE)
def test_high_percentage_free_but_small_disk_is_major(self):
self._mock_disk_usage(blocks=800000, avail=500000)
problems = self._check_disk_usage()
self.assertEqual(
problems[0].description(),
"/ has only 512000000 bytes available. "
"Eden lazily loads your files and needs enough disk "
"space to store these files when loaded.",
)
self.assertEqual(problems[0].severity(), doctor.ProblemSeverity.ERROR)
def test_disk_usage_normal(self):
self._mock_disk_usage(blocks=100000000, avail=50000000)
problems = self._check_disk_usage()
self.assertEqual(len(problems), 0)
def test_issue_includes_custom_message_from_config(self) -> None:
self._mock_disk_usage(blocks=100000000, avail=500000)
instance = FakeEdenInstance(
self.make_temporary_directory(),
config={
"doctor.low-disk-space-message": "Ask your administrator for help."
},
)
problems = self._check_disk_usage(instance=instance)
self.assertEqual(
problems[0].description(),
"/ has only 512000000 bytes available. "
"Eden lazily loads your files and needs enough disk "
"space to store these files when loaded. Ask your administrator "
"for help.",
)
self._mock_disk_usage(blocks=100000000, avail=2000000)
instance = FakeEdenInstance(
self.make_temporary_directory(),
config={
"doctor.low-disk-space-message": "Ask your administrator for help."
},
)
problems = self._check_disk_usage(instance=instance)
self.assertEqual(
problems[0].description(),
"/ is 98.00% full. "
"Eden lazily loads your files and needs enough disk "
"space to store these files when loaded. Ask your administrator "
"for help.",
)
class ProblemCollector(ProblemTracker):
problems: List[ProblemBase]
def __init__(self) -> None:
super().__init__()
self.problems = []
def add_problem(self, problem: ProblemBase) -> None:
self.problems.append(problem)
```
#### File: test/lib/fake_process_finder.py
```python
import errno
import os
from pathlib import Path
from typing import Dict, List, Union
from eden.cli import process_finder
class FakeProcessFinder(process_finder.LinuxProcessFinder):
def __init__(self, tmp_dir: str) -> None:
self.proc_path = Path(tmp_dir)
self._file_contents: Dict[Path, Union[bytes, Exception]] = {}
def add_process(self, pid: int, cmdline: List[str]) -> None:
pid_dir = self.proc_path / str(pid)
pid_dir.mkdir()
command = os.path.basename(cmdline[0])
(pid_dir / "comm").write_bytes(command.encode("utf-8") + b"\n")
cmdline_bytes = b"".join((arg.encode("utf-8") + b"\0") for arg in cmdline)
(pid_dir / "cmdline").write_bytes(cmdline_bytes)
def add_edenfs(self, pid: int, eden_dir: str, set_lockfile: bool = True) -> None:
if set_lockfile:
self.set_file_contents(Path(eden_dir) / "lock", f"{pid}\n".encode("utf-8"))
cmdline = [
"/usr/bin/edenfs",
"--edenfs",
"--edenDir",
eden_dir,
"--etcEdenDir",
"/etc/eden",
"--configPath",
"/home/user/.edenrc",
]
self.add_process(pid, cmdline)
def set_file_contents(self, path: Union[Path, str], contents: bytes) -> None:
self._file_contents[Path(path)] = contents
def set_file_exception(self, path: Union[Path, str], exception: Exception) -> None:
self._file_contents[Path(path)] = exception
def read_lock_file(self, path: Path) -> bytes:
contents = self._file_contents.get(path, None)
if contents is None:
raise FileNotFoundError(errno.ENOENT, str(path))
if isinstance(contents, Exception):
raise contents
return contents
```
#### File: test/lib/testcase.py
```python
import binascii
import unittest
from typing import Tuple
import eden.cli.doctor as doctor
import eden.dirstate
from eden.cli.config import EdenCheckout
from eden.cli.test.lib.output import TestOutput
from eden.test_support.temporary_directory import TemporaryDirectoryMixin
from .fake_process_finder import FakeProcessFinder
class DoctorTestBase(unittest.TestCase, TemporaryDirectoryMixin):
def create_fixer(self, dry_run: bool) -> Tuple[doctor.ProblemFixer, TestOutput]:
out = TestOutput()
if not dry_run:
fixer = doctor.ProblemFixer(out)
else:
fixer = doctor.DryRunFixer(out)
return fixer, out
def assert_results(
self,
fixer: doctor.ProblemFixer,
num_problems: int = 0,
num_fixed_problems: int = 0,
num_failed_fixes: int = 0,
num_manual_fixes: int = 0,
) -> None:
self.assertEqual(num_problems, fixer.num_problems)
self.assertEqual(num_fixed_problems, fixer.num_fixed_problems)
self.assertEqual(num_failed_fixes, fixer.num_failed_fixes)
self.assertEqual(num_manual_fixes, fixer.num_manual_fixes)
def assert_dirstate_p0(self, checkout: EdenCheckout, commit: str) -> None:
dirstate_path = checkout.path / ".hg" / "dirstate"
with dirstate_path.open("rb") as f:
parents, _tuples_dict, _copymap = eden.dirstate.read(f, str(dirstate_path))
self.assertEqual(binascii.hexlify(parents[0]).decode("utf-8"), commit)
def make_process_finder(self) -> FakeProcessFinder:
return FakeProcessFinder(self.make_temporary_directory())
```
#### File: eden/cli/overlay.py
```python
import contextlib
import errno
import fcntl
import logging
import os
import shutil
import stat
import struct
import tempfile
import time
import typing
from pathlib import Path
from typing import BinaryIO, Iterator, Optional, Tuple
from facebook.eden.overlay.ttypes import OverlayDir, OverlayEntry
class InvalidOverlayFile(Exception):
pass
class NoSuchOverlayFile(Exception):
def __init__(self, inode_number: int) -> None:
super().__init__(f"inode {inode_number} is not materialized in the overlay")
self.inode_number = inode_number
class InodeLookupError(Exception):
def __init__(self, msg: str, errnum: int) -> None:
super().__init__(msg)
self.errno = errnum
class OverlayHeader:
LENGTH = 64
VERSION_1 = 1
TYPE_DIR = b"OVDR"
TYPE_FILE = b"OVFL"
STRUCT_FORMAT = ">4sIQQQQQQ8s"
@classmethod
def parse(cls, data: bytes, type: Optional[bytes] = None) -> "OverlayHeader":
# A 0-length file is somewhat common on unclean reboot,
# so use a separate exception message for this case.
if len(data) == 0:
raise InvalidOverlayFile("zero-sized overlay file")
if len(data) < cls.LENGTH:
raise InvalidOverlayFile(
"overlay file is too short to contain a header: length={len(data)}"
)
(
header_id,
version,
atime_sec,
atime_nsec,
ctime_sec,
ctime_nsec,
mtime_sec,
mtime_nsec,
padding,
) = struct.unpack(cls.STRUCT_FORMAT, data)
if header_id not in (cls.TYPE_DIR, cls.TYPE_FILE):
raise InvalidOverlayFile(
"overlay file is too short to contain a header: length={len(data)}"
)
if version != cls.VERSION_1:
raise InvalidOverlayFile(f"unsupported overlay file version {version}")
return OverlayHeader(
header_id,
version,
atime_sec,
atime_nsec,
ctime_sec,
ctime_nsec,
mtime_sec,
mtime_nsec,
)
def __init__(
self,
type: bytes,
version: int,
atime_sec: int = 0,
atime_nsec: int = 0,
ctime_sec: int = 0,
ctime_nsec: int = 0,
mtime_sec: int = 0,
mtime_nsec: int = 0,
padding: bytes = b"\0\0\0\0\0\0\0\0",
) -> None:
self.type = type
self.version = version
self.atime_sec = atime_sec
self.atime_nsec = atime_nsec
self.ctime_sec = ctime_sec
self.ctime_nsec = ctime_nsec
self.mtime_sec = mtime_sec
self.mtime_nsec = mtime_nsec
self.padding = padding
@property
def atime(self) -> float:
return self.atime_sec + (self.atime_nsec / 1000000000.0)
@atime.setter
def atime(self, value: float) -> None:
self.atime_sec = int(value)
self.atime_nsec = int((value - self.atime_sec) * 1000000000)
@property
def ctime(self) -> float:
return self.ctime_sec + (self.ctime_nsec / 1000000000.0)
@ctime.setter
def ctime(self, value: float) -> None:
self.ctime_sec = int(value)
self.ctime_nsec = int((value - self.ctime_sec) * 1000000000)
@property
def mtime(self) -> float:
return self.mtime_sec + (self.mtime_nsec / 1000000000.0)
@mtime.setter
def mtime(self, value: float) -> None:
self.mtime_sec = int(value)
self.mtime_nsec = int((value - self.mtime_sec) * 1000000000)
def serialize(self) -> bytes:
return struct.pack(
self.STRUCT_FORMAT,
self.type,
self.version,
self.atime_sec,
self.atime_nsec,
self.ctime_sec,
self.ctime_nsec,
self.mtime_sec,
self.mtime_nsec,
self.padding,
)
class Overlay:
ROOT_INODE_NUMBER = 1
NEXT_INODE_NUMBER_PATH = "next-inode-number"
def __init__(self, path: str) -> None:
self.path = path
@contextlib.contextmanager
def try_lock(self) -> Iterator[bool]:
info_path = os.path.join(self.path, "info")
try:
lock_file = open(info_path, "rb")
except OSError:
yield False
return
try:
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
yield True
except OSError:
yield False
finally:
# Release the lock once the yield returns
lock_file.close()
def get_path(self, inode_number: int) -> str:
dir_name = "{:02x}".format(inode_number % 256)
return os.path.join(self.path, dir_name, str(inode_number))
def open_overlay_file(self, inode_number: int) -> BinaryIO:
try:
return typing.cast(BinaryIO, open(self.get_path(inode_number), "rb"))
except OSError as ex:
if ex.errno == errno.ENOENT:
raise NoSuchOverlayFile(inode_number)
raise
def read_header(self, f: BinaryIO) -> OverlayHeader:
data = f.read(OverlayHeader.LENGTH)
return OverlayHeader.parse(data)
def check_header(
self, f: BinaryIO, inode_number: int, expected_type: bytes
) -> OverlayHeader:
data = f.read(OverlayHeader.LENGTH)
header = OverlayHeader.parse(data)
if header.type != expected_type:
raise InvalidOverlayFile(
f"unexpected type for inode {inode_number} in overlay: "
f"expected {expected_type!r} but found {header.type!r}"
)
return header
def read_dir_inode(self, inode_number: int) -> OverlayDir:
return self.read_dir_inode_tuple(inode_number)[1]
def read_dir_inode_tuple(
self, inode_number: int
) -> Tuple[OverlayHeader, OverlayDir]:
with self.open_overlay_file(inode_number) as f:
header = self.check_header(f, inode_number, OverlayHeader.TYPE_DIR)
data = f.read()
return (header, self.parse_dir_inode_data(data))
def parse_dir_inode_data(self, data: bytes) -> OverlayDir:
from thrift.util import Serializer
from thrift.protocol import TCompactProtocol
# Initialize entries to the empty dictionary.
# This value will be used if the serialized data does not have any value
# for this field.
tree_data = OverlayDir(entries={})
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
Serializer.deserialize(protocol_factory, data, tree_data)
return tree_data
def open_file_inode(self, inode_number: int) -> BinaryIO:
return self.open_file_inode_tuple(inode_number)[1]
def open_file_inode_tuple(
self, inode_number: int
) -> Tuple[OverlayHeader, BinaryIO]:
"""Open the overlay file for the specified inode number.
Returns the header information and a file object opened to the start of the
file inode contents.
"""
f = self.open_overlay_file(inode_number)
try:
header = self.check_header(f, inode_number, OverlayHeader.TYPE_FILE)
except Exception:
f.close()
raise
return (header, f)
def lookup_path(self, path: Path) -> Optional[int]:
"""
Lookup a path in the overlay.
Returns the inode number corresponding to the path, if the path is materialized.
- If an inode number is found for this path, returns the inode number.
- If one of the parent directories is not materialized, returns None.
Without checking the source control data we cannot tell if this logical path
exists or not.
- If this path or one of its parent directories does not exist throws an
InodeLookupError
May throw other exceptions on error.
"""
assert not path.is_absolute()
if not path.parts:
return self.ROOT_INODE_NUMBER
parent_inode_number = self.ROOT_INODE_NUMBER
index = 0
while True:
parent_dir = self.read_dir_inode(parent_inode_number)
desired = path.parts[index]
index += 1
entries = [] if parent_dir.entries is None else parent_dir.entries.items()
entry: Optional[OverlayEntry] = None
for name, entry in entries: # noqa: ignore=B007
if name == desired:
break
if entry is None:
raise InodeLookupError(f"{path} does not exist", errno.ENOENT)
if index >= len(path.parts):
return entry.inodeNumber
if entry.mode is None or stat.S_IFMT(entry.mode) != stat.S_IFDIR:
non_dir_path = os.path.sep.join(path.parts[:index])
raise InodeLookupError(
f"error looking up {path}: {non_dir_path} is not a directory",
errno.ENOTDIR,
)
if entry.hash:
# This directory along the chain is not materialized
return None
parent_inode_number = entry.inodeNumber
def extract_file(
self, inode_number: int, output_path: Path, mode: int, remove: bool = False
) -> None:
"""Copy the specified file inode out of the overlay.
If remove=True the data for this inode will be removed from the overlay after it
has been extracted.
"""
with self.open_overlay_file(inode_number) as inf:
header = self.read_header(inf)
if header.type != OverlayHeader.TYPE_FILE:
raise Exception(
f"expected inode {inode_number} to be a regular file; "
f"found unexpected type {header.type!r}"
)
output_path.parent.mkdir(parents=True, exist_ok=True)
file_type = stat.S_IFMT(mode)
if file_type == stat.S_IFLNK:
contents = inf.read()
os.symlink(contents, bytes(output_path))
elif file_type == stat.S_IFREG:
with output_path.open("wb") as outf:
shutil.copyfileobj(inf, outf) # type: ignore
# Note: the file permissions bits are now stored in the inode table
# rather than the overlay. The mode bits in the overlay will
# reflect the correct file type only. Always extract orphan inodes
# with permissions 0o600 (read+write to owner only).
os.fchmod(outf.fileno(), 0o600)
else:
# We don't copy out sockets, fifos, or other unusual file types.
# These shouldn't have any actual file contents anyway.
logging.debug(
f"skipping inode {inode_number} at {output_path} with "
f"unsupported file type {file_type:#o}"
)
path = Path(self.get_path(inode_number))
path.unlink()
def extract_dir(
self, inode_number: int, output_path: Path, remove: bool = False
) -> None:
"""Recursively copy the specified directory inode out of the overlay.
All of its materialized children will be copied out. Children that still have
the same contents as a committed source control object will not be copied out.
If remove=True the data for the extracted inodes will be removed from the
overlay after they have been extracted.
"""
data = self.read_dir_inode(inode_number)
for name, entry in data.entries.items():
overlay_path = Path(self.get_path(entry.inodeNumber))
if not overlay_path.exists():
# Skip children that do not exist in the overlay.
# Note that we explicitly check for existence of the child even if
# entry.hash is set (i.e., if the inode is not materialized):
#
# - Non-materialized directories can have data in the overlay if they
# contain allocated inode numbers. We still recurse into the
# directory in this case. This makes sure we remove the overlay files
# when remove=True, and also ensures that we will find any
# materialized file data inside this subdirectory if Eden crashed in
# the middle of trying to materialize a file but before it marked the
# parent directories materialized.
# - Even for files we can have the same race on crash: eden may have
# crashed while materializing a file before it could mark the parent
# directories materialized. (In theory the file contents should still
# be the same as the source control state in this case, but it seems
# better to err on the safe side and extract it anyway.)
continue
entry_output_path = output_path.joinpath(name)
file_type = stat.S_IFMT(entry.mode)
if file_type == stat.S_IFDIR:
self.extract_dir(entry.inodeNumber, entry_output_path, remove=remove)
else:
self.extract_file(
entry.inodeNumber, entry_output_path, entry.mode, remove=remove
)
if remove:
path = Path(self.get_path(inode_number))
path.unlink()
def write_empty_file(self, inode_number: int) -> None:
self._write_inode(inode_number, OverlayHeader.TYPE_FILE, b"")
def write_empty_dir(self, inode_number: int) -> None:
from thrift.util import Serializer
from thrift.protocol import TCompactProtocol
empty_tree = OverlayDir()
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
contents = typing.cast(
bytes, Serializer.serialize(protocol_factory, empty_tree)
)
self._write_inode(inode_number, OverlayHeader.TYPE_DIR, contents)
def _write_inode(self, inode_number: int, header_type: bytes, body: bytes) -> None:
now_sec = int(time.time())
header = OverlayHeader(
type=header_type,
version=OverlayHeader.VERSION_1,
atime_sec=now_sec,
mtime_sec=now_sec,
ctime_sec=now_sec,
)
header_data = header.serialize()
path = Path(self.get_path(inode_number))
path.write_bytes(header_data + body)
def read_next_inode_number(self) -> Optional[int]:
file_path = os.path.join(self.path, self.NEXT_INODE_NUMBER_PATH)
try:
with open(file_path, "rb") as f:
data = f.read()
except FileNotFoundError:
return None
if len(data) != 8:
raise Exception(
f"invalid data in {self.NEXT_INODE_NUMBER_PATH} file: "
f"expected file to contain 8 bytes, but is {len(data)} "
f"bytes"
)
return struct.unpack("@Q", data)[0]
def write_next_inode_number(self, next_inode: int) -> None:
contents = struct.pack("@Q", next_inode)
file_path = os.path.join(self.path, self.NEXT_INODE_NUMBER_PATH)
fd, tmp_path = tempfile.mkstemp(
prefix=self.NEXT_INODE_NUMBER_PATH, dir=self.path
)
try:
os.write(fd, contents)
os.fdatasync(fd)
os.fchmod(fd, 0o644)
os.rename(tmp_path, file_path)
except Exception:
try:
os.unlink(tmp_path)
except Exception:
pass
raise
```
#### File: eden/cli/rage.py
```python
import getpass
import io
import socket
import subprocess
import traceback
from pathlib import Path
from typing import IO
from . import (
debug as debug_mod,
doctor as doctor_mod,
filesystem,
mtab,
process_finder,
stats as stats_mod,
ui as ui_mod,
)
from .config import EdenInstance
def print_diagnostic_info(instance: EdenInstance, out: IO[bytes]) -> None:
out.write(b"User : %s\n" % getpass.getuser().encode())
out.write(b"Hostname : %s\n" % socket.gethostname().encode())
print_rpm_version(out)
health_status = instance.check_health()
if health_status.is_healthy():
out.write(b"\n")
debug_mod.do_buildinfo(instance, out)
out.write(b"uptime: ")
debug_mod.do_uptime(instance, out)
print_eden_doctor_report(instance, out)
print_tail_of_log_file(instance.get_log_path(), out)
print_running_eden_process(out)
if health_status.is_healthy() and health_status.pid is not None:
print_edenfs_process_tree(health_status.pid, out)
out.write(b"\nList of mount points:\n")
mountpoint_paths = []
for key in sorted(instance.get_mount_paths()):
key_bytes = key.encode()
out.write(key_bytes)
mountpoint_paths.append(key_bytes)
for key, val in instance.get_all_client_config_info().items():
out.write(b"\nMount point info for path %s:\n" % key.encode())
for k, v in val.items():
out.write("{:>10} : {}\n".format(k, v).encode())
if health_status.is_healthy():
with io.StringIO() as stats_stream:
stats_mod.do_stats_general(instance, out=stats_stream)
out.write(stats_stream.getvalue().encode())
def print_rpm_version(out: IO[bytes]) -> None:
try:
queryformat = "%{VERSION}"
output = subprocess.check_output(["rpm", "-q", "--qf", queryformat, "fb-eden"])
out.write(b"Rpm Version : %s\n" % output)
except Exception as e:
out.write(b"Error getting the Rpm version : %s\n" % str(e).encode())
def print_eden_doctor_report(instance: EdenInstance, out: IO[bytes]) -> None:
dry_run = True
doctor_output = io.StringIO()
try:
doctor_rc = doctor_mod.cure_what_ails_you(
instance=instance,
dry_run=dry_run,
mount_table=mtab.new(),
fs_util=filesystem.LinuxFsUtil(),
process_finder=process_finder.new(),
out=ui_mod.PlainOutput(doctor_output),
)
out.write(
b"\neden doctor --dry-run (exit code %d):\n%s\n"
% (doctor_rc, doctor_output.getvalue().encode())
)
except Exception:
out.write(b"\nUnexpected exception thrown while running eden doctor checks:\n")
out.write(traceback.format_exc().encode("utf-8") + b"\n")
def print_tail_of_log_file(path: Path, out: IO[bytes]) -> None:
try:
out.write(b"\nMost recent Eden logs:\n")
LOG_AMOUNT = 20 * 1024
with path.open("rb") as logfile:
size = logfile.seek(0, io.SEEK_END)
logfile.seek(max(0, size - LOG_AMOUNT), io.SEEK_SET)
data = logfile.read()
out.write(data)
except Exception as e:
out.write(b"Error reading the log file: %s\n" % str(e).encode())
def print_running_eden_process(out: IO[bytes]) -> None:
try:
out.write(b"\nList of running Eden processes:\n")
output = subprocess.check_output(
["ps", "-eo", "pid,ppid,comm,start_time,etime"]
)
output = output.decode()
lines = output.split("\n")
format_str = "{:>20} {:>20} {:>10} {:>20} {:>20}\n"
out.write(
format_str.format(
"Pid", "PPid", "Command", "Start Time", "Elapsed Time"
).encode()
)
for line in lines:
if "edenfs" in line:
word = line.split()
out.write(format_str.format(*word).encode())
except Exception as e:
out.write(b"Error getting the eden processes: %s\n" % str(e).encode())
def print_edenfs_process_tree(pid: int, out: IO[bytes]) -> None:
try:
out.write(b"\nedenfs process tree:\n")
output = subprocess.check_output(
["ps", "f", "-o", "pid,s,comm,start_time,etime,cputime,drs", "-s", str(pid)]
)
out.write(output)
except Exception as e:
out.write(b"Error getting edenfs process tree: %s\n" % str(e).encode())
```
#### File: eden/cli/stats_print.py
```python
from typing import TextIO
def write_heading(heading: str, out: TextIO) -> None:
border = "*" * len(heading)
out.write(_center_strip_right(border, 80))
out.write(_center_strip_right(heading, 80))
out.write(_center_strip_right(border, 80) + "\n")
def write_mem_status_table(fuse_counters, out: TextIO) -> None:
format_str = "{:>40} {:^1} {:<20}"
keys = [
"memory_free",
"memory_free_percent",
"memory_usage",
"memory_usage_percent",
]
for key in keys:
if key.endswith("_percent"):
value = "%d%s" % (fuse_counters[key], "%")
else:
value = "%f(GB)" % (fuse_counters[key] / (10 ** 6))
centered_text = format_str.format(key.replace("_", " "), ":", value)
out.write(centered_text.rstrip() + "\n")
LATENCY_FORMAT_STR = "{:<12} {:^4} {:^10} {:>10} {:>15} {:>10} {:>10}\n"
# Prints a record of latencies with avg, 50'th,90'th and 99'th percentile.
def write_latency_record(operation: str, matrix, out: TextIO) -> None:
border = "-" * 80
percentile = {0: "avg", 1: "p50", 2: "p90", 3: "p99"}
for i in range(len(percentile)):
operation_name = ""
if i == int(len(percentile) / 2):
operation_name = operation
out.write(
LATENCY_FORMAT_STR.format(
operation_name,
"|",
percentile[i],
matrix[i][0],
matrix[i][1],
matrix[i][2],
matrix[i][3],
)
)
out.write(border + "\n")
def write_latency_table(table, out: TextIO) -> None:
out.write(
LATENCY_FORMAT_STR.format(
"SystemCall",
"|",
"Percentile",
"Last Minute",
"Last 10 Minutes",
"Last Hour",
"All Time",
)
)
border = "-" * 80
out.write(border + "\n")
for key in table:
write_latency_record(key, table[key], out)
def write_table(table, heading: str, out: TextIO) -> None:
key_width = max([len(heading)] + list(map(len, table.keys()))) + 2
format_str = "{:<{}}{:>15}{:>15}{:>15}{:>15}\n"
out.write(
format_str.format(
heading, key_width, "Last Minute", "Last 10m", "Last Hour", "All Time"
)
)
border = "-" * (key_width + 60)
out.write(border + "\n")
for key in table:
value = table[key]
out.write(
format_str.format(key, key_width, value[0], value[1], value[2], value[3])
)
def _center_strip_right(text: str, width: int) -> str:
"""Returns a string with sufficient leading whitespace such that `text`
would be centered within the specified `width` plus a trailing newline."""
space = (width - len(text)) // 2
return space * " " + text + "\n"
def format_size(size: int) -> str:
if size > 1000000000:
return "{:.1f} GB".format(size / 1000000000)
if size > 1000000:
return "{:.1f} MB".format(size / 1000000)
if size > 1000:
return "{:.1f} KB".format(size / 1000)
if size > 0:
return "{} B".format(size)
return "0"
```
#### File: eden/cli/stats.py
```python
import argparse
import io
import logging
import os
import sys
import textwrap
from typing import Dict, List, Optional, cast
from . import cmd_util, stats_print, subcmd as subcmd_mod
from .config import EdenInstance
from .subcmd import Subcmd
stats_cmd = subcmd_mod.Decorator()
log = logging.getLogger("eden.cli.stats")
DiagInfoCounters = Dict[str, int]
Table = Dict[str, List[int]]
Table2D = Dict[str, List[List[Optional[str]]]]
# TODO: https://github.com/python/typeshed/issues/1240
stdoutWrapper = cast(io.TextIOWrapper, sys.stdout)
# Shows information like memory usage, list of mount points and number of inodes
# loaded, unloaded, and materialized in the mount points, etc.
def do_stats_general(
instance: EdenInstance, out: io.TextIOWrapper = stdoutWrapper
) -> None:
with instance.get_thrift_client() as client:
stat_info = client.getStatInfo()
private_bytes = stats_print.format_size(stat_info.privateBytes)
resident_bytes = stats_print.format_size(stat_info.vmRSSBytes)
if stat_info.blobCacheStats is not None:
blob_cache_size = stats_print.format_size(
stat_info.blobCacheStats.totalSizeInBytes
)
blob_cache_entry_count = stat_info.blobCacheStats.entryCount
else:
blob_cache_size = None
blob_cache_entry_count = None
out.write(
textwrap.dedent(
f"""\
edenfs memory usage
▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔
private bytes: {private_bytes} ({resident_bytes} resident)
"""
)
)
if blob_cache_size is not None and blob_cache_entry_count is not None:
out.write(f"blob cache: {blob_cache_size} in {blob_cache_entry_count} blobs\n")
out.write(
textwrap.dedent(
f"""\
active mounts
▔▔▔▔▔▔▔▔▔▔▔▔▔
"""
)
)
inode_info = stat_info.mountPointInfo
for key in inode_info:
info = inode_info[key]
mount_path = os.fsdecode(key)
in_memory = info.loadedInodeCount
files = info.loadedFileCount
trees = info.loadedTreeCount
if stat_info.mountPointJournalInfo is None:
journal = None
else:
journal = stat_info.mountPointJournalInfo.get(key)
if journal is None:
journalLine = ""
else:
entries = journal.entryCount
mem = journal.memoryUsage
journalLine = (
f"- Journal entry count: {entries} "
f"(memory usage: {stats_print.format_size(mem)})\n"
)
out.write(
textwrap.dedent(
f"""\
{mount_path}
- Inodes in memory: {in_memory} ({trees} trees, {files} files)
- Unloaded, tracked inodes: {info.unloadedInodeCount}
- Loaded and materialized inodes: {info.materializedInodeCount}
{journalLine}
"""
)
)
@stats_cmd("memory", "Show memory statistics for Eden")
class MemoryCmd(Subcmd):
def run(self, args: argparse.Namespace) -> int:
out = sys.stdout
stats_print.write_heading("Memory Stats for EdenFS", out)
instance = cmd_util.get_eden_instance(args)
with instance.get_thrift_client() as client:
diag_info = client.getStatInfo()
stats_print.write_mem_status_table(diag_info.counters, out)
# print memory counters
heading = "Average values of Memory usage and availability"
out.write("\n\n %s \n\n" % heading.center(80, " "))
mem_counters = get_memory_counters(diag_info.counters)
stats_print.write_table(mem_counters, "", out)
return 0
# Returns all the memory counters in ServiceData in a table format.
def get_memory_counters(counters: DiagInfoCounters) -> Table:
table: Table = {}
index = {"60": 0, "600": 1, "3600": 2}
for key in counters:
if key.startswith("memory") and key.find(".") != -1:
tokens = key.split(".")
memKey = tokens[0].replace("_", " ")
if memKey not in table.keys():
table[memKey] = [0, 0, 0, 0]
if len(tokens) == 2:
table[memKey][3] = counters[key]
else:
table[memKey][index[tokens[2]]] = counters[key]
return table
@stats_cmd("io", "Show information about the number of I/O calls")
class IoCmd(Subcmd):
def setup_parser(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-A",
"--all",
action="store_true",
default=False,
help="Show status for all the system calls",
)
def run(self, args: argparse.Namespace) -> int:
out = sys.stdout
stats_print.write_heading("Counts of I/O operations performed in EdenFs", out)
instance = cmd_util.get_eden_instance(args)
with instance.get_thrift_client() as client:
diag_info = client.getStatInfo()
# If the arguments has --all flag, we will have args.all set to
# true.
fuse_counters = get_fuse_counters(diag_info.counters, args.all)
stats_print.write_table(fuse_counters, "SystemCall", out)
return 0
# Filters Fuse counters from all the counters in ServiceData and returns a
# printable form of the information in a table. If all_flg is true we get the
# counters for all the system calls, otherwise we get the counters of the
# system calls which are present in the list syscalls, which is a list of
# frequently called io system calls.
def get_fuse_counters(counters: DiagInfoCounters, all_flg: bool) -> Table:
table: Table = {}
index = {"60": 0, "600": 1, "3600": 2}
# list of io system calls, if all flag is set we return counters for all the
# systems calls, else we return counters for io systemcalls.
syscalls = [
"open",
"read",
"write",
"symlink",
"readlink",
"mkdir",
"mknod",
"opendir",
"readdir",
"rmdir",
]
for key in counters:
if key.startswith("fuse") and key.find(".count") >= 0:
tokens = key.split(".")
syscall = tokens[1][:-3] # _us
if not all_flg and syscall not in syscalls:
continue
if syscall not in table.keys():
table[syscall] = [0, 0, 0, 0]
if len(tokens) == 3:
table[syscall][3] = int(counters[key])
else:
table[syscall][index[tokens[3]]] = int(counters[key])
return table
def insert_latency_record(
table: Table2D, value: int, operation: str, percentile: str, period: Optional[str]
) -> None:
period_table = {"60": 0, "600": 1, "3600": 2}
percentile_table = {"avg": 0, "p50": 1, "p90": 2, "p99": 3}
def with_microsecond_units(i: int) -> str:
if i:
return str(i) + " \u03BCs" # mu for micro
else:
return str(i) + " "
if operation not in table.keys():
# pyre-ignore[6]: T38220626
table[operation] = [
["" for _ in range(len(percentile_table))]
for _ in range(len(period_table) + 1)
]
pct_index = percentile_table[percentile]
if period:
period_index = period_table[period]
else:
period_index = len(period_table)
table[operation][pct_index][period_index] = with_microsecond_units(value)
@stats_cmd("latency", "Show information about the latency of I/O calls")
class LatencyCmd(Subcmd):
def setup_parser(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-A",
"--all",
action="store_true",
default=False,
help="Show status for all the system calls",
)
def run(self, args: argparse.Namespace) -> int:
out = sys.stdout
instance = cmd_util.get_eden_instance(args)
with instance.get_thrift_client() as client:
diag_info = client.getStatInfo()
table = get_fuse_latency(diag_info.counters, args.all)
stats_print.write_heading(
"Latencies of I/O operations performed in EdenFs", out
)
stats_print.write_latency_table(table, out)
return 0
# Returns all the latency information in ServiceData in a table format.
# If all_flg is true we get the counters for all the system calls, otherwise we
# get the counters of the system calls which are present in the list syscalls,
# which is a list of frequently called io system calls.
def get_fuse_latency(counters: DiagInfoCounters, all_flg: bool) -> Table2D:
table: Table2D = {}
syscalls = [
"open",
"read",
"write",
"symlink",
"readlink",
"mkdir",
"mknod",
"opendir",
"readdir",
"rmdir",
]
for key in counters:
if key.startswith("fuse") and key.find(".count") == -1:
tokens = key.split(".")
syscall = tokens[1][:-3]
if not all_flg and syscall not in syscalls:
continue
percentile = tokens[2]
period = None
if len(tokens) > 3:
period = tokens[3]
insert_latency_record(table, counters[key], syscall, percentile, period)
return table
@stats_cmd(
"hgimporter",
"Show the number of requests to hg-debugedenimporthelper",
aliases=[
"debugedenimporthelper",
"hg-debugedenimporthelper",
"hg",
"hg-import",
"hg-importer",
"hgimport",
],
)
class HgImporterCmd(Subcmd):
def run(self, args: argparse.Namespace) -> int:
out = sys.stdout
stats_print.write_heading(
"Counts of HgImporter requests performed in EdenFS", out
)
instance = cmd_util.get_eden_instance(args)
with instance.get_thrift_client() as client:
counters = client.getCounters()
hg_importer_counters = get_hg_importer_counters(counters)
stats_print.write_table(hg_importer_counters, "HgImporter Request", out)
return 0
def get_hg_importer_counters(counters: DiagInfoCounters) -> Table:
zero = [0, 0, 0, 0]
table: Table = {
"cat_file": zero,
"fetch_tree": zero,
"manifest": zero,
"manifest_node_for_commit": zero,
"prefetch_files": zero,
}
for key in counters:
segments = key.split(".")
if (
len(segments) == 3
and segments[0] == "hg_importer"
and segments[2] == "count"
):
call_name = segments[1]
last_minute = counters[key + ".60"]
last_10_minutes = counters[key + ".600"]
last_hour = counters[key + ".3600"]
all_time = counters[key]
table[call_name] = [last_minute, last_10_minutes, last_hour, all_time]
return table
@stats_cmd("thrift", "Show the number of received thrift calls")
class ThriftCmd(Subcmd):
def run(self, args: argparse.Namespace) -> int:
out = sys.stdout
stats_print.write_heading("Counts of Thrift calls performed in EdenFs", out)
instance = cmd_util.get_eden_instance(args)
with instance.get_thrift_client() as client:
counters = client.getCounters()
thrift_counters = get_thrift_counters(counters)
stats_print.write_table(thrift_counters, "Thrift Call", out)
return 0
def get_thrift_counters(counters: DiagInfoCounters) -> Table:
table: Table = {}
for key in counters:
segments = key.split(".")
if (
len(segments) == 5
and segments[:2] == ["thrift", "EdenService"]
and segments[-2:] == ["num_calls", "sum"]
):
call_name = segments[2]
last_minute = counters[key + ".60"]
last_10_minutes = counters[key + ".600"]
last_hour = counters[key + ".3600"]
all_time = counters[key]
table[call_name] = [last_minute, last_10_minutes, last_hour, all_time]
return table
@stats_cmd("thrift-latency", "Show the latency of received thrift calls")
class ThriftLatencyCmd(Subcmd):
def run(self, args: argparse.Namespace) -> int:
out = sys.stdout
instance = cmd_util.get_eden_instance(args)
with instance.get_thrift_client() as client:
diag_info = client.getStatInfo()
table = get_thrift_latency(diag_info.counters)
stats_print.write_heading(
"Latency of Thrift processing time performed in EdenFs", out
)
stats_print.write_latency_table(table, out)
return 0
def get_thrift_latency(counters: DiagInfoCounters) -> Table2D:
table: Table2D = {}
for key in counters:
if key.startswith("thrift.EdenService.") and key.find("time_process_us") != -1:
tokens = key.split(".")
if len(tokens) < 5:
continue
method = tokens[2]
percentile = tokens[4]
period = None
if len(tokens) > 5:
period = tokens[5]
insert_latency_record(table, counters[key], method, percentile, period)
return table
@stats_cmd("hg-latency", "Show the latency of hg backing store")
class HgBackingStoreLatencyCmd(Subcmd):
def run(self, args: argparse.Namespace) -> int:
return backing_store_latency("hg", args)
@stats_cmd("mononoke", "Show the latency of mononoke backing store")
class MononokeBackingStoreLatencyCmd(Subcmd):
def run(self, args: argparse.Namespace) -> int:
return backing_store_latency("mononoke", args)
def backing_store_latency(store: str, args: argparse.Namespace) -> int:
out = sys.stdout
instance = cmd_util.get_eden_instance(args)
with instance.get_thrift_client() as client:
diag_info = client.getStatInfo()
table = get_store_latency(diag_info.counters, store)
stats_print.write_heading(
"Latency of {} backing store operations in EdenFs".format(store), out
)
stats_print.write_latency_table(table, out)
return 0
def get_store_latency(counters: DiagInfoCounters, store: str) -> Table2D:
table: Table2D = {}
for key in counters:
if key.startswith("store.{}".format(store)) and key.find(".count") == -1:
tokens = key.split(".")
method = tokens[2]
percentile = tokens[3]
period = None
if len(tokens) > 4:
period = tokens[4]
insert_latency_record(table, counters[key], method, percentile, period)
return table
class StatsCmd(Subcmd):
NAME = "stats"
HELP = "Prints statistics information for eden"
def setup_parser(self, parser: argparse.ArgumentParser) -> None:
self.add_subcommands(parser, stats_cmd.commands)
def run(self, args: argparse.Namespace) -> int:
instance = cmd_util.get_eden_instance(args)
do_stats_general(instance)
return 0
```
#### File: cli/test/config_test.py
```python
import configparser
import io
import os
import unittest
from pathlib import Path
import toml
import toml.decoder
from eden.test_support.environment_variable import EnvironmentVariableMixin
from eden.test_support.temporary_directory import TemporaryDirectoryMixin
from .. import config as config_mod, configutil, util
from ..config import EdenInstance
from ..configinterpolator import EdenConfigInterpolator
from ..configutil import EdenConfigParser, UnexpectedType
def get_toml_test_file_invalid():
cfg_file = """
[core thisIsNotAllowed]
"""
return cfg_file
def get_toml_test_file_defaults():
cfg_file = """
[core]
systemIgnoreFile = "/etc/eden/gitignore"
ignoreFile = "/home/${USER}/.gitignore"
[clone]
default-revision = "master"
[rage]
reporter = 'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/'
"""
return cfg_file
def get_toml_test_file_fbsource_repo():
cfg_file = """
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource"
["bindmounts fbsource"]
fbcode-buck-out = "fbcode/buck-out"
buck-out = "buck-out"
"""
return cfg_file
def get_toml_test_file_user_rc():
cfg_file = """
[core]
ignoreFile = "/home/${USER}/.gitignore-override"
edenDirectory = "/home/${USER}/.eden"
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource-override"
["bindmounts fbsource"]
fbcode-buck-out = "fbcode/buck-out-override"
["repository git"]
type = "git"
path = "/home/${USER}/src/git/.git"
"""
return cfg_file
class TomlConfigTest(
unittest.TestCase, TemporaryDirectoryMixin, EnvironmentVariableMixin
):
def setUp(self) -> None:
self._test_dir = self.make_temporary_directory()
self._user = "bob"
self._state_dir = os.path.join(self._test_dir, ".eden")
self._etc_eden_dir = os.path.join(self._test_dir, "etc/eden")
self._config_d = os.path.join(self._test_dir, "etc/eden/config.d")
self._home_dir = os.path.join(self._test_dir, "home", self._user)
self._interpolate_dict = {
"USER": self._user,
"USER_ID": "42",
"HOME": self._home_dir,
}
os.mkdir(self._state_dir)
util.mkdir_p(self._config_d)
util.mkdir_p(self._home_dir)
self.unset_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD")
def copy_config_files(self) -> None:
path = os.path.join(self._config_d, "defaults.toml")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_defaults())
path = os.path.join(self._config_d, "fbsource.repo.toml")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_fbsource_repo())
path = os.path.join(self._home_dir, ".edenrc")
with open(path, "w") as text_file:
text_file.write(get_toml_test_file_user_rc())
def assert_core_config(self, cfg: EdenInstance) -> None:
self.assertEqual(
cfg.get_config_value("rage.reporter", default=""),
'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/',
)
self.assertEqual(
cfg.get_config_value("core.ignoreFile", default=""),
f"/home/{self._user}/.gitignore-override",
)
self.assertEqual(
cfg.get_config_value("core.systemIgnoreFile", default=""),
"/etc/eden/gitignore",
)
self.assertEqual(
cfg.get_config_value("core.edenDirectory", default=""),
f"/home/{self._user}/.eden",
)
def assert_git_repo_config(self, cfg: EdenInstance) -> None:
cc = cfg.find_config_for_alias("git")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/home/{self._user}/src/git/.git"))
self.assertEqual(cc.scm_type, "git")
self.assertEqual(cc.bind_mounts, {})
self.assertEqual(cc.default_revision, "master")
def assert_fbsource_repo_config(self, cfg: EdenInstance) -> None:
cc = cfg.find_config_for_alias("fbsource")
assert cc is not None
self.assertEqual(
cc.backing_repo, Path(f"/data/users/{self._user}/fbsource-override")
)
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(
cc.bind_mounts,
{"fbcode-buck-out": "fbcode/buck-out-override", "buck-out": "buck-out"},
)
self.assertEqual(cc.default_revision, "master")
def test_load_config(self) -> None:
self.copy_config_files()
cfg = self.get_config()
# Check the various config sections
self.assert_core_config(cfg)
exp_repos = ["fbsource", "git"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assert_fbsource_repo_config(cfg)
self.assert_git_repo_config(cfg)
# Check if test is for toml or cfg by cfg._user_toml_cfg
exp_rc_files = [
Path(self._config_d) / "defaults.toml",
Path(self._config_d) / "fbsource.repo.toml",
Path(self._home_dir) / ".edenrc",
]
self.assertEqual(cfg.get_rc_files(), exp_rc_files)
def test_no_dot_edenrc(self) -> None:
self.copy_config_files()
os.remove(os.path.join(self._home_dir, ".edenrc"))
cfg = self.get_config()
cfg._loadConfig()
exp_repos = ["fbsource"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assertEqual(
cfg.get_config_value("rage.reporter", default=""),
'arc paste --title "eden rage from $(hostname)" --conduit-uri=https://phabricator.intern.facebook.com/api/',
)
self.assertEqual(
cfg.get_config_value("core.ignoreFile", default=""),
f"/home/{self._user}/.gitignore",
)
self.assertEqual(
cfg.get_config_value("core.systemIgnoreFile", default=""),
"/etc/eden/gitignore",
)
cc = cfg.find_config_for_alias("fbsource")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/data/users/{self._user}/fbsource"))
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(
cc.bind_mounts,
{"fbcode-buck-out": "fbcode/buck-out", "buck-out": "buck-out"},
)
self.assertEqual(cc.default_revision, "master")
def test_add_existing_repo(self) -> None:
self.copy_config_files()
cfg = self.get_config()
with self.assertRaisesRegex(
config_mod.UsageError,
"repository fbsource already exists. You will need to edit "
"the ~/.edenrc config file by hand to make changes to the "
"repository or remove it.",
):
cfg.add_repository("fbsource", "hg", f"/data/users/{self._user}/fbsource")
def test_add_repo(self) -> None:
self.copy_config_files()
cfg = self.get_config()
cfg.add_repository("fbandroid", "hg", f"/data/users/{self._user}/fbandroid")
# Lets reload our config
cfg = self.get_config()
# Check the various config sections
self.assert_core_config(cfg)
exp_repos = ["fbandroid", "fbsource", "git"]
self.assertEqual(cfg.get_repository_list(), exp_repos)
self.assert_fbsource_repo_config(cfg)
self.assert_git_repo_config(cfg)
# Check the newly added repo
cc = cfg.find_config_for_alias("fbandroid")
assert cc is not None
self.assertEqual(cc.backing_repo, Path(f"/data/users/{self._user}/fbandroid"))
self.assertEqual(cc.scm_type, "hg")
self.assertEqual(cc.bind_mounts, {})
self.assertEqual(cc.default_revision, "master")
def test_missing_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "type".'
)
def test_invalid_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "invalidrepotype"
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" has unsupported type.'
)
def test_empty_type_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = ""
path = "/tmp/myrepo"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "type".'
)
def test_missing_path_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "hg"
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "path".'
)
def test_empty_path_option_in_repository_is_an_error(self) -> None:
self.write_user_config(
"""
["repository myrepo"]
type = "hg"
path = ""
"""
)
with self.assertRaises(Exception) as expectation:
cfg = self.get_config()
cfg.find_config_for_alias("myrepo")
self.assertEqual(
str(expectation.exception), 'repository "myrepo" missing key "path".'
)
def test_toml_error(self) -> None:
self.copy_config_files()
self.write_user_config(get_toml_test_file_invalid())
cfg = self.get_config()
with self.assertRaises(toml.decoder.TomlDecodeError):
cfg._loadConfig()
def test_get_config_value_returns_default_if_section_is_missing(self) -> None:
self.assertEqual(
self.get_config().get_config_value(
"missing_section.test_option", default="test default"
),
"test default",
)
def test_get_config_value_returns_default_if_option_is_missing(self) -> None:
self.write_user_config(
"""[test_section]
other_option = "test value"
"""
)
self.assertEqual(
self.get_config().get_config_value(
"test_section.missing_option", default="test default"
),
"test default",
)
def test_get_config_value_returns_value_for_string_option(self) -> None:
self.write_user_config(
"""[test_section]
test_option = "test value"
"""
)
self.assertEqual(
self.get_config().get_config_value(
"test_section.test_option", default="test default"
),
"test value",
)
def test_experimental_systemd_is_disabled_by_default(self) -> None:
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_is_enabled_with_environment_variable(self) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "1")
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_is_enabled_with_user_config_setting(self) -> None:
self.write_user_config(
"""[service]
experimental_systemd = true
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
def test_experimental_systemd_environment_variable_overrides_config(self) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "1")
self.write_user_config(
f"""[service]
experimental_systemd = false
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "0")
self.write_user_config(
f"""[service]
experimental_systemd = true
"""
)
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_empty_experimental_systemd_environment_variable_does_not_override_config(
self
) -> None:
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "")
self.write_user_config(
f"""[service]
experimental_systemd = true
"""
)
self.assertTrue(self.get_config().should_use_experimental_systemd_mode())
self.set_environment_variable("EDEN_EXPERIMENTAL_SYSTEMD", "")
self.write_user_config(
f"""[service]
experimental_systemd = false
"""
)
self.assertFalse(self.get_config().should_use_experimental_systemd_mode())
def test_user_id_variable_is_set_to_process_uid(self) -> None:
config = self.get_config_without_stub_variables()
self.write_user_config(
"""
[testsection]
testoption = "My user ID is ${USER_ID}."
"""
)
self.assertEqual(
config.get_config_value("testsection.testoption", default=""),
f"My user ID is {os.getuid()}.",
)
def test_default_fallback_systemd_xdg_runtime_dir_is_run_user_uid(self) -> None:
self.assertEqual(
self.get_config().get_fallback_systemd_xdg_runtime_dir(), "/run/user/42"
)
def test_configured_fallback_systemd_xdg_runtime_dir_expands_user_and_user_id(
self
) -> None:
self.write_user_config(
"""
[service]
fallback_systemd_xdg_runtime_dir = "/var/run/${USER}/${USER_ID}"
"""
)
self.assertEqual(
self.get_config().get_fallback_systemd_xdg_runtime_dir(), "/var/run/bob/42"
)
def test_printed_config_is_valid_toml(self) -> None:
self.write_user_config(
"""
[clone]
default-revision = "master"
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
printed_config.seek(0)
parsed_config = toml.load(printed_config)
self.assertIn("clone", parsed_config)
self.assertEqual(parsed_config["clone"].get("default-revision"), "master")
def test_printed_config_expands_variables(self) -> None:
self.write_user_config(
"""
["repository fbsource"]
type = "hg"
path = "/data/users/${USER}/fbsource"
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
self.assertIn("/data/users/bob/fbsource", printed_config.getvalue())
def test_printed_config_writes_booleans_as_booleans(self) -> None:
self.write_user_config(
"""
[service]
experimental_systemd = true
"""
)
printed_config = io.StringIO()
self.get_config().print_full_config(file=printed_config)
self.assertRegex(printed_config.getvalue(), r"experimental_systemd\s*=\s*true")
def get_config(self) -> EdenInstance:
return EdenInstance(
self._state_dir, self._etc_eden_dir, self._home_dir, self._interpolate_dict
)
def get_config_without_stub_variables(self) -> EdenInstance:
return EdenInstance(
self._state_dir, self._etc_eden_dir, self._home_dir, interpolate_dict=None
)
def write_user_config(self, content: str) -> None:
path = os.path.join(self._home_dir, ".edenrc")
with open(path, "w") as text_file:
text_file.write(content)
class EdenConfigParserTest(unittest.TestCase):
unsupported_value = {"dict of string to string": ""}
def test_loading_config_with_unsupported_type_is_not_an_error(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": self.unsupported_value}})
def test_querying_bool_returns_bool(self) -> None:
for value in [True, False]:
with self.subTest(value=value):
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": value}})
self.assertEqual(
parser.get_bool("test_section", "test_option", default=True), value
)
self.assertEqual(
parser.get_bool("test_section", "test_option", default=False), value
)
def test_querying_bool_with_non_boolean_value_fails(self) -> None:
for value in ["not a boolean", "", "true", "True", 0]:
with self.subTest(value=value):
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_bool("test_section", "test_option", default=False)
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, bool)
def test_querying_bool_with_value_of_unsupported_type_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": self.unsupported_value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_bool("test_section", "test_option", default=False)
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, self.unsupported_value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, bool)
def test_querying_str_with_non_string_value_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": True}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_str("test_section", "test_option", default="")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, True)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, str)
def test_querying_section_str_to_str_returns_mapping(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": "a value", "b": "b value"}})
section = parser.get_section_str_to_str("test_section")
self.assertCountEqual(section, {"a", "b"})
self.assertEqual(section["a"], "a value")
self.assertEqual(section["b"], "b value")
def test_querying_section_str_to_any_fails_if_option_has_unsupported_type(
self
) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"unsupported": self.unsupported_value}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_section_str_to_any("test_section")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "unsupported")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, self.unsupported_value)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertIsNone(expectation.exception.expected_type)
def test_querying_section_str_to_any_interpolates_options(self) -> None:
parser = EdenConfigParser(
interpolation=EdenConfigInterpolator({"USER": "alice"})
)
parser.read_dict({"test_section": {"test_option": "hello ${USER}"}})
section = parser.get_section_str_to_any("test_section")
self.assertEqual(section.get("test_option"), "hello alice")
def test_querying_section_str_to_any_returns_any_supported_type(self) -> None:
parser = EdenConfigParser()
parser.read_dict(
{
"test_section": {
"bool_option": True,
"string_array_option": ["hello", "world"],
"string_option": "hello",
}
}
)
section = parser.get_section_str_to_any("test_section")
self.assertEqual(section["bool_option"], True)
self.assertEqual(list(section["string_array_option"]), ["hello", "world"])
self.assertEqual(section["string_option"], "hello")
def test_querying_section_str_to_str_with_non_string_value_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": False}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_section_str_to_str("test_section")
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "a")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, False)
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, str)
def test_querying_section_str_to_str_of_missing_section_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"a": "a value"}})
with self.assertRaises(configparser.NoSectionError) as expectation:
parser.get_section_str_to_str("not_test_section")
section: str = expectation.exception.section # type: ignore
self.assertEqual(section, "not_test_section")
def test_querying_strs_with_empty_array_returns_empty_sequence(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": []}})
self.assertEqual(
list(
parser.get_strs(
"test_section", "test_option", default=["default value"]
)
),
[],
)
def test_querying_strs_with_array_of_strings_returns_strs(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": ["first", "second", "3rd"]}})
self.assertEqual(
list(parser.get_strs("test_section", "test_option", default=[])),
["first", "second", "3rd"],
)
def test_querying_strs_with_array_of_non_strings_fails(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"test_option": [123]}})
with self.assertRaises(UnexpectedType) as expectation:
parser.get_strs("test_section", "test_option", default=[])
# pyre-fixme[16]: `_E` has no attribute `section`.
self.assertEqual(expectation.exception.section, "test_section")
# pyre-fixme[16]: `_E` has no attribute `option`.
self.assertEqual(expectation.exception.option, "test_option")
# pyre-fixme[16]: `_E` has no attribute `value`.
self.assertEqual(expectation.exception.value, [123])
# pyre-fixme[16]: `_E` has no attribute `expected_type`.
self.assertEqual(expectation.exception.expected_type, configutil.Strs)
def test_querying_missing_value_as_strs_returns_default(self) -> None:
parser = EdenConfigParser()
parser.read_dict({"test_section": {"bogus_option": []}})
self.assertEqual(
list(
parser.get_strs(
"test_section", "missing_option", default=["default value"]
)
),
["default value"],
)
def test_str_sequences_are_interpolated(self) -> None:
parser = EdenConfigParser(
interpolation=EdenConfigInterpolator({"USER": "alice"})
)
parser.read_dict(
{
"test_section": {
"test_option": ["sudo", "-u", "${USER}", "echo", "Hello, ${USER}!"]
}
}
)
self.assertEqual(
list(parser.get_strs("test_section", "test_option", default=[])),
["sudo", "-u", "alice", "echo", "Hello, alice!"],
)
def test_unexpected_type_error_messages_are_helpful(self) -> None:
self.assertEqual(
'Expected boolean for service.experimental_systemd, but got string: "true"',
str(
UnexpectedType(
section="service",
option="experimental_systemd",
value="true",
expected_type=bool,
)
),
)
self.assertEqual(
"Expected string for repository myrepo.path, but got boolean: true",
str(
UnexpectedType(
section="repository myrepo",
option="path",
value=True,
expected_type=str,
)
),
)
self.assertRegex(
str(
UnexpectedType(
section="section", option="option", value={}, expected_type=None
)
),
r"^Unexpected dict for section.option: \{\s*\}$",
)
self.assertEqual(
"Expected array of strings for service.command, but got array: [ 123,]",
str(
UnexpectedType(
section="service",
option="command",
value=[123],
expected_type=configutil.Strs,
)
),
)
```
#### File: cli/test/interp_test.py
```python
import configparser
import unittest
from .. import configinterpolator
class InterpolatorTest(unittest.TestCase):
def test_basic_subs(self):
defaults = {"USER": "wez", "RECURSIVE": "a${RECURSIVE}b"}
parser = configparser.ConfigParser(
interpolation=configinterpolator.EdenConfigInterpolator(defaults)
)
parser.add_section("section")
parser.set("section", "user", "${USER}")
parser.set("section", "rec", "${RECURSIVE}")
parser.set("section", "simple", "value")
self.assertEqual("wez", parser.get("section", "user"))
self.assertEqual("value", parser.get("section", "simple"))
self.assertEqual("a${RECURSIVE}b", parser.get("section", "rec"))
actual = {}
for section in parser.sections():
actual[section] = dict(parser.items(section))
expect = {
"section": {"user": "wez", "simple": "value", "rec": "a${RECURSIVE}b"}
}
self.assertEqual(expect, actual)
```
#### File: cli/test/tabulate_test.py
```python
import unittest
from eden.cli.tabulate import tabulate
eol = ""
class TabulateTest(unittest.TestCase):
def test_tabulate(self):
output = tabulate(
["a", "b", "c"],
rows=[
{"a": "a_1", "b": "b_1", "c": "see_1"},
{"a": "a_two", "b": "b_2", "c": "c_2"},
],
)
self.assertEqual(
output,
f"""\
A B C {eol}
a_1 b_1 see_1{eol}
a_two b_2 c_2 {eol}""",
)
def test_tabulate_header_labels(self):
output = tabulate(
["a", "b", "c"],
rows=[
{"a": "a_1", "b": "b_1", "c": "see_1"},
{"a": "a_two", "b": "b_2", "c": "c_2"},
],
header_labels={
"a": "Col1",
"b": "bee",
# omitting c so that we can test defaulting
},
)
self.assertEqual(
output,
f"""\
Col1 bee C {eol}
a_1 b_1 see_1{eol}
a_two b_2 c_2 {eol}""",
)
```
#### File: fs/service/client.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from typing import Any, Optional, cast # noqa: F401
from facebook.eden import EdenService
from thrift.protocol.THeaderProtocol import THeaderProtocol
from thrift.Thrift import TApplicationException
from thrift.transport.THeaderTransport import THeaderTransport
from thrift.transport.TTransport import TTransportException
from .windows_thrift import EdenTSocket
SOCKET_PATH = "socket"
class EdenNotRunningError(Exception):
def __init__(self, eden_dir):
# type: (str) -> None
msg = "edenfs daemon does not appear to be running: tried %s" % eden_dir
super(EdenNotRunningError, self).__init__(msg)
self.eden_dir = eden_dir
# Monkey-patch EdenService.EdenError's __str__() behavior to just return the
# error message. By default it returns the same data as __repr__(), which is
# ugly to show to users.
def _eden_thrift_error_str(ex):
# type: (EdenService.EdenError) -> str
return ex.message
# TODO: https://github.com/python/mypy/issues/2427
cast(Any, EdenService.EdenError).__str__ = _eden_thrift_error_str
class EdenClient(EdenService.Client):
"""
EdenClient is a subclass of EdenService.Client that provides
a few additional conveniences:
- Smarter constructor
- Implement the context manager __enter__ and __exit__ methods, so it can
be used in with statements.
"""
def __init__(self, eden_dir=None, socket_path=None):
# type: (Optional[str], Optional[str]) -> None
if socket_path is not None:
self._socket_path = socket_path
elif eden_dir is not None:
self._socket_path = os.path.join(eden_dir, SOCKET_PATH)
else:
raise TypeError("one of eden_dir or socket_path is required")
self._socket = EdenTSocket(unix_socket=self._socket_path)
# We used to set a timeout here, but picking the right duration is hard,
# and safely retrying an arbitrary thrift call may not be safe. So we
# just leave the client with no timeout.
# self.set_timeout(60)
self.set_timeout(None)
transport = THeaderTransport(self._socket)
self._transport = transport # type: Optional[THeaderTransport]
self._protocol = THeaderProtocol(transport)
super(EdenClient, self).__init__(self._protocol)
def __enter__(self):
# type: () -> EdenClient
self.open()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
# type: (Any, Any, Any) -> Optional[bool]
self.close()
return False
def open(self):
# type: () -> None
try:
assert self._transport is not None
self._transport.open()
except TTransportException as ex:
self.close()
if ex.type == TTransportException.NOT_OPEN:
raise EdenNotRunningError(self._socket_path)
raise
def close(self):
# type: () -> None
if self._transport is not None:
self._transport.close()
self._transport = None
def shutdown(self):
# type: () -> None
self.initiateShutdown(
"EdenClient.shutdown() invoked with no reason by pid=%s uid=%s"
% (os.getpid(), os.getuid())
)
def initiateShutdown(self, reason):
# type: (str) -> None
"""Helper for stopping the server.
To swing through the transition from calling the base shutdown() method
with context to the initiateShutdown() method with a reason, we want to
try the latter method first, falling back to the old way to handle the
case where we deploy a newer client while an older server is still
running on the local system."""
try:
super().initiateShutdown(reason)
except TApplicationException as ex:
if ex.type == TApplicationException.UNKNOWN_METHOD:
# Running an older server build, fall back to the old shutdown
# method with no context
super().shutdown()
else:
raise
def set_timeout(self, timeout):
# type: (Optional[float]) -> None
if timeout is None:
timeout_ms = None
else:
timeout_ms = timeout * 1000
self.set_timeout_ms(timeout_ms)
def set_timeout_ms(self, timeout_ms):
# type: (Optional[float]) -> None
self._socket.setTimeout(timeout_ms)
def create_thrift_client(eden_dir=None, socket_path=None):
# type: (Optional[str], Optional[str]) -> EdenClient
"""Construct a thrift client to speak to the running eden server
instance associated with the specified mount point.
@return Returns a context manager for EdenService.Client.
"""
return EdenClient(eden_dir=eden_dir, socket_path=socket_path)
```
#### File: eden/integration/chown_test.py
```python
import grp
import os
import pwd
from .lib import repobase, testcase
class ChownTest(testcase.EdenRepoTest):
nobody_uid: int
nobody_gid: int
def populate_repo(self) -> None:
self.repo.write_file("README.md", "tbd\n")
self.repo.write_file("proj/src/main.c", "int main() { return 0; }\n")
self.repo.write_file("proj/src/lib.c", "void foo() {}\n")
self.repo.write_file("proj/src/include/lib.h", "#pragma once\nvoid foo();\n")
self.repo.write_file(
"proj/test/test.sh", "#!/bin/bash\necho test\n", mode=0o755
)
self.repo.write_file("doc/foo.txt", "foo\n")
self.repo.write_file("doc/bar.txt", "bar\n")
self.repo.symlink("proj/doc", "../doc")
self.repo.commit("Initial commit.")
def create_repo(self, name: str) -> repobase.Repository:
return self.create_hg_repo("main")
def setup_eden_test(self) -> None:
super().setup_eden_test()
self.nobody_uid = pwd.getpwnam("nobody").pw_uid
self.nobody_gid = grp.getgrnam("nobody").gr_gid
def assert_path(self, path: str):
stat = os.lstat(path)
self.assertEqual(
stat.st_uid,
self.nobody_uid,
f"{stat.st_uid} uid does not match expected \
{self.nobody_uid} for path {path}",
)
self.assertEqual(
stat.st_gid,
self.nobody_gid,
f"{stat.st_gid} gid does not match expected \
{self.nobody_gid} for path {path}",
)
def assert_chown_worked(self, mount: str) -> None:
for root, dirs, files in os.walk(mount, followlinks=False):
# Avoid checking anything in .eden since the
# symlinks don't have o+r permissions
if root.endswith(".eden"):
continue
for d in dirs:
self.assert_path(os.path.join(root, d))
for f in files:
self.assert_path(os.path.join(root, f))
def run_chown(self, mount: str) -> None:
self.eden.run_cmd("chown", mount, str(self.nobody_uid), str(self.nobody_gid))
def test_chown(self) -> None:
self.run_chown(self.mount)
self.assert_chown_worked(self.mount)
def test_chown_with_overlay(self) -> None:
with open(os.path.join(self.mount, "notinrepo"), "w") as f:
f.write("created\n")
self.run_chown(self.mount)
self.assert_chown_worked(self.mount)
def test_chown_with_bindmount(self) -> None:
edenrc = os.path.join(os.environ["HOME"], ".edenrc")
with open(edenrc, "w") as f:
f.write(
"""\
["repository {repo_name}"]
path = "{repo_path}"
type = "{repo_type}"
["bindmounts {repo_name}"]
buck-out = "buck-out"
""".format(
repo_name=self.repo_name,
repo_path=self.repo.get_canonical_root(),
repo_type=self.repo.get_type(),
)
)
basename = "eden_mount"
tmp_mount = os.path.join(self.tmp_dir, basename)
self.eden.run_cmd("clone", self.repo_name, tmp_mount)
with open(os.path.join(tmp_mount, "buck-out", "bindmountedfile"), "w") as f:
f.write("created\n")
self.run_chown(tmp_mount)
self.assert_chown_worked(tmp_mount)
```
#### File: eden/integration/glob_test.py
```python
from typing import List, Optional
from facebook.eden.ttypes import EdenError, GlobParams
from .lib import testcase
@testcase.eden_repo_test
class GlobTest(testcase.EdenRepoTest):
def populate_repo(self) -> None:
self.repo.write_file("hello", "hola\n")
self.repo.write_file("adir/file", "foo!\n")
self.repo.write_file("bdir/file", "bar!\n")
self.repo.write_file("bdir/otherfile", "foo!\n")
self.repo.symlink("slink", "hello")
self.repo.write_file("cdir/subdir/new.txt", "and improved")
self.repo.write_file("ddir/notdotfile", "")
self.repo.write_file("ddir/subdir/notdotfile", "")
self.repo.write_file("ddir/subdir/.dotfile", "")
self.repo.write_file("java/com/example/package.html", "")
self.repo.write_file("java/com/example/Example.java", "")
self.repo.write_file("java/com/example/foo/Foo.java", "")
self.repo.write_file("java/com/example/foo/bar/Bar.java", "")
self.repo.write_file("java/com/example/foo/bar/baz/Baz.java", "")
self.repo.commit("Commit 1.")
def setUp(self) -> None:
super().setUp()
self.client = self.get_thrift_client()
self.client.open()
self.addCleanup(self.client.close)
def test_exact_path_component_match(self) -> None:
self.assert_glob(["hello"], [b"hello"])
self.assert_glob(["ddir/subdir/.dotfile"], [b"ddir/subdir/.dotfile"])
def test_wildcard_path_component_match(self) -> None:
self.assert_glob(["hel*"], [b"hello"])
self.assert_glob(["ad*"], [b"adir"])
self.assert_glob(["a*/file"], [b"adir/file"])
def test_no_accidental_substring_match(self) -> None:
self.assert_glob(["hell"], [], msg="No accidental substring match")
def test_match_all_files_in_directory(self) -> None:
self.assert_glob(["bdir/*"], [b"bdir/file", b"bdir/otherfile"])
def test_match_all_files_in_directory_with_dotfile(self) -> None:
self.assert_glob(["ddir/subdir/*"], [b"ddir/subdir/notdotfile"])
def test_overlapping_globs(self) -> None:
self.assert_glob(
["adir/*", "**/file"],
[b"adir/file", b"bdir/file"],
msg="De-duplicate results from multiple globs",
)
def test_recursive_wildcard_prefix(self) -> None:
self.assert_glob(["**/file"], [b"adir/file", b"bdir/file"])
def test_recursive_wildcard_suffix(self) -> None:
self.assert_glob(["adir/**"], [b"adir/file"])
self.assert_glob(["adir/**/*"], [b"adir/file"])
def test_recursive_wildcard_suffix_with_dotfile(self) -> None:
self.assert_glob(
["ddir/**"], [b"ddir/notdotfile", b"ddir/subdir", b"ddir/subdir/notdotfile"]
)
self.assert_glob(
["ddir/**"],
[
b"ddir/subdir",
b"ddir/subdir/.dotfile",
b"ddir/notdotfile",
b"ddir/subdir/notdotfile",
],
include_dotfiles=True,
)
self.assert_glob(
["ddir/**/*"],
[b"ddir/notdotfile", b"ddir/subdir", b"ddir/subdir/notdotfile"],
)
self.assert_glob(
["ddir/**/*"],
[
b"ddir/subdir",
b"ddir/subdir/.dotfile",
b"ddir/notdotfile",
b"ddir/subdir/notdotfile",
],
include_dotfiles=True,
)
def test_qualified_recursive_wildcard(self) -> None:
self.assert_glob(
["java/com/**/*.java"],
[
b"java/com/example/Example.java",
b"java/com/example/foo/Foo.java",
b"java/com/example/foo/bar/Bar.java",
b"java/com/example/foo/bar/baz/Baz.java",
],
)
self.assert_glob(
["java/com/example/*/*.java"], [b"java/com/example/foo/Foo.java"]
)
def test_malformed_query(self) -> None:
with self.assertRaises(EdenError) as ctx:
self.client.glob(self.mount_path_bytes, ["adir["])
self.assertIn("unterminated bracket sequence", str(ctx.exception))
with self.assertRaises(EdenError) as ctx:
self.client.globFiles(GlobParams(self.mount_path_bytes, ["adir["], True))
self.assertIn("unterminated bracket sequence", str(ctx.exception))
def assert_glob(
self,
globs: List[str],
expected_matches: List[bytes],
include_dotfiles: bool = False,
msg: Optional[str] = None,
) -> None:
params = GlobParams(self.mount_path_bytes, globs, include_dotfiles)
self.assertCountEqual(
expected_matches, self.client.globFiles(params).matchingFiles, msg=msg
)
# Also verify behavior of legacy Thrift API.
if include_dotfiles:
self.assertCountEqual(
expected_matches,
self.client.glob(self.mount_path_bytes, globs),
msg=msg,
)
```
#### File: integration/hg/debug_hg_get_dirstate_tuple_test.py
```python
from textwrap import dedent
from eden.integration.lib import hgrepo
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
@hg_test
class DebugHgGetDirstateTupleTest(EdenHgTestCase):
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("hello", "hola\n")
repo.write_file("dir/file", "blah\n")
repo.commit("Initial commit.")
def test_get_dirstate_tuple_normal_file(self) -> None:
output = self.eden.run_cmd(
"debug", "hg_get_dirstate_tuple", self.get_path("hello")
)
expected = dedent(
"""\
hello
status = Normal
mode = 0o100644
mergeState = NotApplicable
"""
)
self.assertEqual(expected, output)
```
#### File: integration/hg/files_test.py
```python
import os
import subprocess
from typing import List
from eden.integration.lib import hgrepo
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
@hg_test
class FilesTest(EdenHgTestCase):
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("README.md", "docs\n")
repo.write_file("LICENSE", "legal legal\n")
repo.write_file("src/main.cpp", "code\n")
repo.write_file("src/lib.cpp", "more code\n")
repo.write_file("src/stuff.cpp", "more code\n")
repo.write_file("src/util.py", "utils\n")
repo.write_file("src/lib/module.cpp", "module\n")
repo.write_file("src/lib/foo.cpp", "foo\n")
repo.write_file("src/include/stuff.h", "header\n")
repo.write_file("test/test1.py", "test\n")
repo.write_file("test/test2.py", "test\n")
repo.commit("Initial commit.")
def _assert_files(self, args: List[str], expected: List[str], cwd=None) -> None:
stdout = self.hg("files", *args, cwd=cwd)
results = stdout.splitlines()
# `hg files` currently produces results in sorted order,
# so we check for exact ordering here.
self.assertEqual(expected, results)
def test_all_files(self) -> None:
self._assert_files(
[],
[
"LICENSE",
"README.md",
"src/include/stuff.h",
"src/lib.cpp",
"src/lib/foo.cpp",
"src/lib/module.cpp",
"src/main.cpp",
"src/stuff.cpp",
"src/util.py",
"test/test1.py",
"test/test2.py",
],
)
def test_globs(self) -> None:
self._assert_files(
["glob:src/*.cpp"], ["src/lib.cpp", "src/main.cpp", "src/stuff.cpp"]
)
self._assert_files(
["glob:**.cpp"],
[
"src/lib.cpp",
"src/lib/foo.cpp",
"src/lib/module.cpp",
"src/main.cpp",
"src/stuff.cpp",
],
)
def test_subdirectory(self) -> None:
self._assert_files(
[],
[
"../LICENSE",
"../README.md",
"include/stuff.h",
"lib.cpp",
"lib/foo.cpp",
"lib/module.cpp",
"main.cpp",
"stuff.cpp",
"util.py",
"../test/test1.py",
"../test/test2.py",
],
cwd=os.path.join(self.repo.path, "src"),
)
self._assert_files(
["."],
[
"include/stuff.h",
"lib.cpp",
"lib/foo.cpp",
"lib/module.cpp",
"main.cpp",
"stuff.cpp",
"util.py",
],
cwd=os.path.join(self.repo.path, "src"),
)
self._assert_files(
["glob:*.cpp"],
["lib.cpp", "main.cpp", "stuff.cpp"],
cwd=os.path.join(self.repo.path, "src"),
)
def test_bad_matches(self) -> None:
# No matches at all should return 1
proc = self.repo.run_hg(
"files",
"foobar",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
)
self.assertEqual(proc.stdout, b"")
self.assertEqual(proc.stderr, b"")
self.assertEqual(proc.returncode, 1)
# Some matching and some non-matching patterns returns 0
# and does not print any diagnostics about the non-matching patterns.
proc = self.repo.run_hg(
"files",
"foobar",
"test",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
)
self.assertEqual(proc.stdout, b"test/test1.py\ntest/test2.py\n")
self.assertEqual(proc.stderr, b"")
self.assertEqual(proc.returncode, 0)
def test_files_with_changes(self) -> None:
self.write_file("src/new.cpp", "new file\n")
self.hg("add", "src/new.cpp")
self.hg("rm", "src/lib/foo.cpp")
self.write_file("src/untracked.cpp", "should not be included\n")
self._assert_files(
[],
[
"LICENSE",
"README.md",
"src/include/stuff.h",
"src/lib.cpp",
"src/lib/module.cpp",
"src/main.cpp",
"src/new.cpp",
"src/stuff.cpp",
"src/util.py",
"test/test1.py",
"test/test2.py",
],
)
self._assert_files(
["glob:**.cpp"],
[
"src/lib.cpp",
"src/lib/module.cpp",
"src/main.cpp",
"src/new.cpp",
"src/stuff.cpp",
],
)
```
#### File: integration/hg/histedit_test.py
```python
import os
from eden.integration.lib import hgrepo
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
from .lib.histedit_command import HisteditCommand
@hg_test
class HisteditTest(EdenHgTestCase):
_commit1: str
_commit2: str
_commit3: str
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("first", "")
self._commit1 = repo.commit("first commit")
repo.write_file("second", "")
self._commit2 = repo.commit("second commit")
repo.write_file("third", "")
self._commit3 = repo.commit("third commit")
def test_stop_at_earlier_commit_in_the_stack_without_reordering(self) -> None:
commits = self.repo.log()
self.assertEqual([self._commit1, self._commit2, self._commit3], commits)
# histedit, stopping in the middle of the stack.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.stop(self._commit2)
histedit.pick(self._commit3)
# We expect histedit to terminate with a nonzero exit code in this case.
with self.assertRaises(hgrepo.HgError) as context:
histedit.run(self)
head = self.repo.log(revset=".")[0]
expected_msg = (
"Changes committed as %s. " "You may amend the changeset now." % head[:12]
)
self.assertIn(expected_msg, str(context.exception))
# Verify the new commit stack and the histedit termination state.
# Note that the hash of commit[0] is unpredictable because Hg gives it a
# new hash in anticipation of the user amending it.
parent = self.repo.log(revset=".^")[0]
self.assertEqual(self._commit1, parent)
self.assertEqual(["first commit", "second commit"], self.repo.log("{desc}"))
# Make sure the working copy is in the expected state.
self.assert_status_empty(op="histedit")
self.assertSetEqual(
{".eden", ".hg", "first", "second"},
set(os.listdir(self.repo.get_canonical_root())),
)
self.hg("histedit", "--continue")
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_reordering_commits_without_merge_conflicts(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit2)
histedit.pick(self._commit3)
histedit.pick(self._commit1)
histedit.run(self)
self.assertEqual(
["second commit", "third commit", "first commit"], self.repo.log("{desc}")
)
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_drop_commit_without_merge_conflicts(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.drop(self._commit2)
histedit.pick(self._commit3)
histedit.run(self)
self.assertEqual(["first commit", "third commit"], self.repo.log("{desc}"))
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_roll_two_commits_into_parent(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.roll(self._commit2)
histedit.roll(self._commit3)
histedit.run(self)
self.assertEqual(["first commit"], self.repo.log("{desc}"))
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_abort_after_merge_conflict(self) -> None:
self.write_file("will_have_confict.txt", "original\n")
self.hg("add", "will_have_confict.txt")
commit4 = self.repo.commit("commit4")
self.write_file("will_have_confict.txt", "1\n")
commit5 = self.repo.commit("commit5")
self.write_file("will_have_confict.txt", "2\n")
commit6 = self.repo.commit("commit6")
histedit = HisteditCommand()
histedit.pick(commit4)
histedit.pick(commit6)
histedit.pick(commit5)
original_commits = self.repo.log()
with self.assertRaises(hgrepo.HgError) as context:
histedit.run(self, ancestor=commit4)
expected_msg = (
"Fix up the change (pick %s)\n" % commit6[:12]
) + " (hg histedit --continue to resume)"
self.assertIn(expected_msg, str(context.exception))
self.assert_status({"will_have_confict.txt": "M"}, op="histedit")
self.assert_file_regex(
"will_have_confict.txt",
"""\
<<<<<<< local: .*
original
=======
2
>>>>>>> histedit: .*
""",
)
self.hg("histedit", "--abort")
self.assertEqual("2\n", self.read_file("will_have_confict.txt"))
self.assertListEqual(
original_commits,
self.repo.log(),
msg="The original commit hashes should be restored by the abort.",
)
self.assert_status_empty()
```
#### File: integration/hg/non_eden_operation_test.py
```python
import os
from eden.integration.hg.lib.hg_extension_test_base import EdenHgTestCase, hg_test
from eden.integration.lib import hgrepo
@hg_test
class NonEdenOperationTest(EdenHgTestCase):
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("hello.txt", "hola")
def test_hg_clone_non_eden_repo_within_eden_repo(self):
"""Regression test to ensure that running `hg` commands from an
Eden-backed Hg repo on a non-Eden-backed Hg repo work as expected."""
non_eden_hg_repo = os.path.join(self.tmp_dir, "non-eden-hg-repo")
os.mkdir(non_eden_hg_repo)
# Create the non-Eden Hg repo.
self.hg("init", cwd=non_eden_hg_repo)
first_file = os.path.join(non_eden_hg_repo, "first.txt")
with open(first_file, "w") as f:
f.write("First file in non-Eden-backed Hg repo.\n")
self.hg(
"commit",
"--config",
"ui.username=<NAME> <<EMAIL>>",
"-Am",
"first commit",
cwd=non_eden_hg_repo,
)
# Run `hg clone` from the Eden repo.
clone_of_non_eden_hg_repo = os.path.join(self.tmp_dir, "clone-target")
self.hg(
"clone", non_eden_hg_repo, clone_of_non_eden_hg_repo, cwd=self.repo.path
)
dest_first_file = os.path.join(clone_of_non_eden_hg_repo, "first.txt")
with open(dest_first_file, "r") as f:
contents = f.read()
self.assertEqual("First file in non-Eden-backed Hg repo.\n", contents)
```
#### File: integration/hg/status_test.py
```python
import os
from eden.integration.lib.hgrepo import HgRepository
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
@hg_test("Flatmanifest", "Treemanifest", "TreeOnly")
class StatusTest(EdenHgTestCase):
def populate_backing_repo(self, repo: HgRepository) -> None:
repo.write_file("hello.txt", "hola")
repo.write_file("subdir/file.txt", "contents")
repo.commit("Initial commit.")
def test_status(self) -> None:
"""Test various `hg status` states in the root of an Eden mount."""
self.assert_status_empty()
self.touch("world.txt")
self.assert_status({"world.txt": "?"})
self.hg("add", "world.txt")
self.assert_status({"world.txt": "A"})
self.rm("hello.txt")
self.assert_status({"hello.txt": "!", "world.txt": "A"})
with open(self.get_path("hello.txt"), "w") as f:
f.write("new contents")
self.assert_status({"hello.txt": "M", "world.txt": "A"})
self.hg("forget", "hello.txt")
self.assert_status({"hello.txt": "R", "world.txt": "A"})
self.assertEqual("new contents", self.read_file("hello.txt"))
self.hg("rm", "hello.txt")
self.assert_status({"hello.txt": "R", "world.txt": "A"})
# If the file is already forgotten, `hg rm` does not remove it from
# disk.
self.assertEqual("new contents", self.read_file("hello.txt"))
self.hg("add", "hello.txt")
self.assert_status({"hello.txt": "M", "world.txt": "A"})
self.assertEqual("new contents", self.read_file("hello.txt"))
self.hg("rm", "--force", "hello.txt")
self.assert_status({"hello.txt": "R", "world.txt": "A"})
self.assertFalse(os.path.exists(self.get_path("hello.txt")))
def test_manual_revert(self) -> None:
self.assert_status_empty()
self.write_file("dir1/a.txt", "original contents\n")
self.hg("add", "dir1/a.txt")
self.repo.commit("create a.txt")
self.assert_status_empty()
self.write_file("dir1/a.txt", "updated contents\n")
self.repo.commit("modify a.txt")
self.assert_status_empty()
self.write_file("dir1/a.txt", "original contents\n")
self.repo.commit("revert a.txt")
self.assert_status_empty()
def test_truncation_upon_open_modifies_file(self) -> None:
fd = os.open(os.path.join(self.mount, "subdir/file.txt"), os.O_TRUNC)
try:
self.assert_status({"subdir/file.txt": "M"})
finally:
os.close(fd)
def test_truncation_after_open_modifies_file(self) -> None:
fd = os.open(os.path.join(self.mount, "subdir/file.txt"), os.O_WRONLY)
try:
os.ftruncate(fd, 0)
self.assert_status({"subdir/file.txt": "M"})
finally:
os.close(fd)
def test_partial_truncation_after_open_modifies_file(self) -> None:
fd = os.open(os.path.join(self.mount, "subdir/file.txt"), os.O_WRONLY)
try:
os.ftruncate(fd, 1)
self.assert_status({"subdir/file.txt": "M"})
finally:
os.close(fd)
# Define a separate TestCase class purely to test with different initial
# repository contents.
@hg_test
class StatusRevertTest(EdenHgTestCase):
commit1: str
commit2: str
commit3: str
commit4: str
def populate_backing_repo(self, repo: HgRepository) -> None:
repo.write_file("dir1/a.txt", "original contents of a\n")
repo.write_file("dir1/b.txt", "b.txt\n")
repo.write_file("dir1/c.txt", "c.txt\n")
repo.write_file("dir2/x.txt", "x.txt\n")
repo.write_file("dir2/y.txt", "y.txt\n")
self.commit1 = repo.commit("Initial commit.")
repo.write_file("dir1/a.txt", "updated contents of a\n", add=False)
self.commit2 = repo.commit("commit 2")
repo.write_file("dir1/b.txt", "updated b\n", add=False)
self.commit3 = repo.commit("commit 3")
repo.write_file("dir1/a.txt", "original contents of a\n")
self.commit4 = repo.commit("commit 4")
def test_reverted_contents(self) -> None:
self.assert_status_empty()
# Read dir1/a.txt so it is loaded by edenfs
self.read_file("dir1/a.txt")
# Reset the state from commit4 to commit1 without actually doing a
# checkout. dir1/a.txt has the same contents in commit4 as in commit1,
# but different blob hashes.
self.hg("reset", "--keep", self.commit1)
# Only dir1/b.txt should be reported as modified.
# dir1/a.txt should not show up in the status output.
self.assert_status({"dir1/b.txt": "M"})
```
#### File: integration/hg/storage_engine_test.py
```python
import abc
import os
from eden.integration.lib import hgrepo
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
from .lib.histedit_command import HisteditCommand
class _Hidden:
# This _Hidden class exists soley to hide the abstract StorageEngineTest class from
# the unittest framework, so it does not find it during test discovery. The
# unittest code is unfortunately not smart enough to skip abstract classes.
class StorageEngineTest(EdenHgTestCase, metaclass=abc.ABCMeta):
_commit1: str
_commit2: str
_commit3: str
# These tests were originally copied from histedit_test.py. It doesn't
# matter which tests are used as long as commits are created and checked out
# and a realistic workflow is verified against each storage engine.
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("first", "")
self._commit1 = repo.commit("first commit")
repo.write_file("second", "")
self._commit2 = repo.commit("second commit")
repo.write_file("third", "")
self._commit3 = repo.commit("third commit")
@abc.abstractmethod
def select_storage_engine(self) -> str:
raise NotImplementedError()
def test_stop_at_earlier_commit_in_the_stack_without_reordering(self) -> None:
commits = self.repo.log()
self.assertEqual([self._commit1, self._commit2, self._commit3], commits)
# histedit, stopping in the middle of the stack.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.stop(self._commit2)
histedit.pick(self._commit3)
# We expect histedit to terminate with a nonzero exit code in this case.
with self.assertRaises(hgrepo.HgError) as context:
histedit.run(self)
head = self.repo.log(revset=".")[0]
expected_msg = (
"Changes committed as %s. "
"You may amend the changeset now." % head[:12]
)
self.assertIn(expected_msg, str(context.exception))
# Verify the new commit stack and the histedit termination state.
# Note that the hash of commit[0] is unpredictable because Hg gives it a
# new hash in anticipation of the user amending it.
parent = self.repo.log(revset=".^")[0]
self.assertEqual(self._commit1, parent)
self.assertEqual(["first commit", "second commit"], self.repo.log("{desc}"))
# Make sure the working copy is in the expected state.
self.assert_status_empty(op="histedit")
self.assertSetEqual(
{".eden", ".hg", "first", "second"},
set(os.listdir(self.repo.get_canonical_root())),
)
self.hg("histedit", "--continue")
self.assertEqual(
["first commit", "second commit", "third commit"],
self.repo.log("{desc}"),
)
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
# Each LocalStore implementation may complete its futures from different
# threads. Verify that Eden works the same with all of them.
@hg_test
class HisteditMemoryStorageEngineTest(_Hidden.StorageEngineTest):
def select_storage_engine(self) -> str:
return "memory"
@hg_test
class HisteditSQLiteStorageEngineTest(_Hidden.StorageEngineTest):
def select_storage_engine(self) -> str:
return "sqlite"
@hg_test
class HisteditRocksDBStorageEngineTest(_Hidden.StorageEngineTest):
def select_storage_engine(self) -> str:
return "rocksdb"
```
#### File: integration/hg/update_test.py
```python
import logging
import os
import threading
import unittest
from textwrap import dedent
from typing import Dict, List, Set
from eden.integration.hg.lib.hg_extension_test_base import EdenHgTestCase, hg_test
from eden.integration.lib import hgrepo
@hg_test
class UpdateTest(EdenHgTestCase):
commit1: str
commit2: str
commit3: str
def edenfs_logging_settings(self) -> Dict[str, str]:
return {
"eden.fs.inodes.TreeInode": "DBG5",
"eden.fs.inodes.CheckoutAction": "DBG5",
"eden.fs.inodes.CheckoutContext": "DBG5",
}
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("hello.txt", "hola")
repo.write_file(".gitignore", "ignoreme\n")
repo.write_file("foo/.gitignore", "*.log\n")
repo.write_file("foo/bar.txt", "test\n")
repo.write_file("foo/subdir/test.txt", "test\n")
self.commit1 = repo.commit("Initial commit.")
repo.write_file("foo/.gitignore", "*.log\n/_*\n")
self.commit2 = repo.commit("Update foo/.gitignore")
repo.write_file("foo/bar.txt", "updated in commit 3\n")
self.commit3 = repo.commit("Update foo/.gitignore")
def test_update_clean_reverts_modified_files(self) -> None:
"""Test using `hg update --clean .` to revert file modifications."""
self.assert_status_empty()
self.write_file("hello.txt", "saluton")
self.assert_status({"hello.txt": "M"})
self.repo.update(".", clean=True)
self.assertEqual("hola", self.read_file("hello.txt"))
self.assert_status_empty()
def test_update_clean_removes_added_and_removed_statuses(self) -> None:
"""Test using `hg update --clean .` in the presence of added and removed
files."""
self.write_file("bar/some_new_file.txt", "new file\n")
self.hg("add", "bar/some_new_file.txt")
self.hg("remove", "foo/bar.txt")
self.assertFalse(os.path.isfile(self.get_path("foo/bar.txt")))
self.assert_status({"foo/bar.txt": "R", "bar/some_new_file.txt": "A"})
self.repo.update(".", clean=True)
self.assert_status({"bar/some_new_file.txt": "?"})
self.assertTrue(os.path.isfile(self.get_path("foo/bar.txt")))
self.assert_dirstate_empty()
def test_update_with_gitignores(self) -> None:
"""
Test `hg update` with gitignore files.
This exercises the normal checkout and ignore logic, but also exercises
some additional interesting cases: The `hg status` calls cause eden to
create FileInode objects for the .gitignore files, even though they
have never been requested via FUSE APIs. When we update them via
checkout, this triggers FUSE inode invalidation events. We want to
make sure the invalidation doesn't cause any errors even though the
kernel didn't previously know that these inode objects existed.
"""
# Call `hg status`, which causes eden to internally create FileInode
# objects for the .gitignore files.
self.assert_status_empty()
self.write_file("foo/subdir/test.log", "log data")
self.write_file("foo/_data", "data file")
self.assert_status_empty(
check_ignored=False, msg="test.log and _data should be ignored"
)
self.assert_status({"foo/subdir/test.log": "I", "foo/_data": "I"})
# Call `hg update` to move from commit2 to commit1, which will
# change the contents of foo/.gitignore. This will cause edenfs
# to send an inode invalidation event to FUSE, but FUSE never knew
# about this inode in the first place. edenfs should ignore the
# resulting ENOENT error in response to the invalidation request.
self.repo.update(self.commit1)
self.assert_status({"foo/_data": "?"}, check_ignored=False)
self.assert_status({"foo/subdir/test.log": "I", "foo/_data": "?"})
self.assertEqual("*.log\n", self.read_file("foo/.gitignore"))
self.assertEqual("test\n", self.read_file("foo/bar.txt"))
def test_update_with_new_commits(self) -> None:
"""
Test running `hg update` to check out commits that were created after
the edenfs daemon originally started.
This makes sure edenfs can correctly import new commits that appear in
the backing store repository.
"""
new_contents = "New contents for bar.txt\n"
self.backing_repo.write_file("foo/bar.txt", new_contents)
new_commit = self.backing_repo.commit("Update foo/bar.txt")
self.assert_status_empty()
self.assertNotEqual(new_contents, self.read_file("foo/bar.txt"))
self.repo.update(new_commit)
self.assertEqual(new_contents, self.read_file("foo/bar.txt"))
self.assert_status_empty()
def test_reset(self) -> None:
"""
Test `hg reset`
"""
self.assert_status_empty()
self.assertEqual("updated in commit 3\n", self.read_file("foo/bar.txt"))
self.repo.reset(self.commit2, keep=True)
self.assert_status({"foo/bar.txt": "M"})
self.assertEqual("updated in commit 3\n", self.read_file("foo/bar.txt"))
self.repo.update(self.commit2, clean=True)
self.assert_status_empty()
self.assertEqual("test\n", self.read_file("foo/bar.txt"))
def test_update_replace_untracked_dir(self) -> None:
"""
Create a local untracked directory, then run "hg update -C" to
checkout a commit where this directory exists in source control.
"""
self.assert_status_empty()
# Write some new files in the eden working directory
self.mkdir("new_project")
self.write_file("new_project/newcode.c", "test\n")
self.write_file("new_project/Makefile", "all:\n\techo done!\n")
self.write_file("new_project/.gitignore", "*.o\n")
self.write_file("new_project/newcode.o", "\x00\x01\x02\x03\x04")
# Add the same files to a commit in the backing repository
self.backing_repo.write_file("new_project/newcode.c", "test\n")
self.backing_repo.write_file("new_project/Makefile", "all:\n\techo done!\n")
self.backing_repo.write_file("new_project/.gitignore", "*.o\n")
new_commit = self.backing_repo.commit("Add new_project")
# Check the status before we update
self.assert_status(
{
"new_project/newcode.o": "I",
"new_project/newcode.c": "?",
"new_project/Makefile": "?",
"new_project/.gitignore": "?",
}
)
# Now run "hg update -C new_commit"
self.repo.update(new_commit, clean=True)
self.assert_status({"new_project/newcode.o": "I"})
def test_update_with_merge_flag_and_conflict(self) -> None:
self.write_file("foo/bar.txt", "changing yet again\n")
with self.assertRaises(hgrepo.HgError) as context:
self.hg("update", ".^", "--merge")
self.assertIn(
b"1 conflicts while merging foo/bar.txt! "
b"(edit, then use 'hg resolve --mark')",
# pyre-fixme[16]: `_E` has no attribute `stderr`.
context.exception.stderr,
)
self.assert_status({"foo/bar.txt": "M"}, op="merge")
self.assert_file_regex(
"foo/bar.txt",
"""\
<<<<<<< working copy: .*
changing yet again
=======
test
>>>>>>> destination: .*
""",
)
def test_merge_update_added_file_with_same_contents_in_destination(self) -> None:
base_commit = self.repo.get_head_hash()
file_contents = "new file\n"
self.write_file("bar/some_new_file.txt", file_contents)
self.hg("add", "bar/some_new_file.txt")
self.write_file("foo/bar.txt", "Modify existing file.\n")
new_commit = self.repo.commit("add some_new_file.txt")
self.assert_status_empty()
self.repo.update(base_commit)
self.assert_status_empty()
self.write_file("bar/some_new_file.txt", file_contents)
self.hg("add", "bar/some_new_file.txt")
self.assert_status({"bar/some_new_file.txt": "A"})
# Note the update fails even though some_new_file.txt is the same in
# both the working copy and the destination.
with self.assertRaises(hgrepo.HgError) as context:
self.repo.update(new_commit)
# pyre-fixme[16]: `_E` has no attribute `stderr`.
self.assertIn(b"abort: conflicting changes", context.exception.stderr)
self.assertEqual(
base_commit,
self.repo.get_head_hash(),
msg="We should still be on the base commit because "
"the merge was aborted.",
)
self.assert_dirstate({"bar/some_new_file.txt": ("a", 0, "MERGE_BOTH")})
self.assert_status({"bar/some_new_file.txt": "A"})
self.assertEqual(file_contents, self.read_file("bar/some_new_file.txt"))
# Now do the update with --merge specified.
self.repo.update(new_commit, merge=True)
# The repository will have no file changes, but will still report
# as being in the middle of a merge, even though everything was
# automatically resolved.
self.assert_status_empty(op="merge")
self.assertEqual(
new_commit,
self.repo.get_head_hash(),
msg="Should be expected commit hash because nothing has changed.",
)
def test_merge_update_added_file_with_conflict_in_destination(self) -> None:
self._test_merge_update_file_with_conflict_in_destination(True)
def test_merge_update_untracked_file_with_conflict_in_destination(self) -> None:
self._test_merge_update_file_with_conflict_in_destination(False)
def _test_merge_update_file_with_conflict_in_destination(
self, add_before_updating: bool
) -> None:
base_commit = self.repo.get_head_hash()
original_contents = "Original contents.\n"
self.write_file("some_new_file.txt", original_contents)
self.hg("add", "some_new_file.txt")
self.write_file("foo/bar.txt", "Modify existing file.\n")
commit = self.repo.commit("Commit a new file.")
self.assert_status_empty()
# Do an `hg prev` and re-create the new file with different contents.
self.repo.update(base_commit)
self.assert_status_empty()
self.assertFalse(os.path.exists(self.get_path("some_new_file.txt")))
modified_contents = "Re-create the file with different contents.\n"
self.write_file("some_new_file.txt", modified_contents)
if add_before_updating:
self.hg("add", "some_new_file.txt")
self.assert_status({"some_new_file.txt": "A"})
else:
self.assert_status({"some_new_file.txt": "?"})
# Verify `hg next` updates such that the original contents and commit
# hash are restored. No conflicts should be reported.
path_to_backup = ".hg/origbackups/some_new_file.txt"
expected_backup_file = os.path.join(self.mount, path_to_backup)
self.assertFalse(os.path.isfile(expected_backup_file))
with self.assertRaises(hgrepo.HgError) as context:
self.repo.update(commit, merge=True)
self.assertIn(
b"warning: 1 conflicts while merging some_new_file.txt! "
b"(edit, then use 'hg resolve --mark')",
# pyre-fixme[16]: `_E` has no attribute `stderr`.
context.exception.stderr,
)
self.assertEqual(
commit,
self.repo.get_head_hash(),
msg="Even though we have a merge conflict, "
"we should still be at the new commit.",
)
self.assert_dirstate_empty()
self.assert_status({"some_new_file.txt": "M"}, op="merge")
merge_contents = dedent(
"""\
<<<<<<< working copy: .*
Re-create the file with different contents.
=======
Original contents.
>>>>>>> destination: .*
"""
)
self.assertRegex(self.read_file("some_new_file.txt"), merge_contents)
self.assert_unresolved(["some_new_file.txt"])
# Verify the previous version of the file was backed up as expected.
self.assertTrue(os.path.isfile(expected_backup_file))
self.assertEqual(modified_contents, self.read_file(path_to_backup))
# Resolve the merge conflict and complete the update
resolved_contents = "Merged contents.\n"
self.write_file("some_new_file.txt", resolved_contents)
self.hg("resolve", "--mark", "some_new_file.txt")
self.assert_dirstate_empty()
self.assert_status({"some_new_file.txt": "M"}, op="merge")
self.repo.commit("Resolved file changes.")
self.assert_dirstate_empty()
self.assert_status_empty()
self.assertEqual(resolved_contents, self.read_file("some_new_file.txt"))
def test_update_modified_file_to_removed_file_taking_other(self) -> None:
self.write_file("some_new_file.txt", "I am new!\n")
self.hg("add", "some_new_file.txt")
self.repo.commit("Commit a new file.")
self.write_file("some_new_file.txt", "Make some changes to that new file.\n")
self.hg("update", ".^", "--merge", "--tool", ":other")
self.assertFalse(os.path.exists(self.get_path("some_new_file.txt")))
self.assertFalse(
os.path.isfile(
os.path.join(self.mount, ".hg/origbackups/some_new_file.txt")
),
msg="There should not be a backup file because "
":other was specified explicitly.",
)
def test_update_modified_file_to_removed_file_taking_local(self) -> None:
self.write_file("some_new_file.txt", "I am new!\n")
self.hg("add", "some_new_file.txt")
self.repo.commit("Commit a new file.")
new_contents = "Make some changes to that new file.\n"
self.write_file("some_new_file.txt", new_contents)
self.hg("update", ".^", "--merge", "--tool", ":local")
self.assertEqual(new_contents, self.read_file("some_new_file.txt"))
self.assert_status({"some_new_file.txt": "A"}, op="merge")
def test_update_untracked_added_conflict(self) -> None:
# Create a commit with a newly-created file foo/new_file.txt
self.write_file("foo/new_file.txt", "new file\n")
self.hg("add", "foo/new_file.txt")
new_commit = self.repo.commit("Add foo/new_file.txt")
# Switch back to commit 3
self.hg("update", self.commit3)
# Write foo/new_file.txt as an untracked file
self.write_file("foo/new_file.txt", "different contents\n")
# Try to switch back to the new commit
result = self.repo.run_hg(
"update",
new_commit,
"--config",
"experimental.updatecheck=noconflict",
check=False,
)
self.assertEqual(
"abort: conflicting changes:\n"
" foo/new_file.txt\n"
"(commit or update --clean to discard changes)\n",
result.stderr.decode("utf-8"),
)
self.assertNotEqual(0, result.returncode)
self.assert_status({"foo/new_file.txt": "?"})
def test_update_ignores_untracked_directory(self) -> None:
base_commit = self.repo.get_head_hash()
self.mkdir("foo/bar")
self.write_file("foo/bar/a.txt", "File in directory two levels deep.\n")
self.write_file("foo/bar/b.txt", "Another file.\n")
self.hg("add", "foo/bar/a.txt")
self.assert_status({"foo/bar/a.txt": "A", "foo/bar/b.txt": "?"})
self.repo.commit("Commit only a.txt.")
self.assert_status({"foo/bar/b.txt": "?"})
self.repo.update(base_commit)
self.assert_status({"foo/bar/b.txt": "?"})
self.assertFalse(os.path.exists(self.get_path("foo/bar/a.txt")))
self.assertTrue(os.path.exists(self.get_path("foo/bar/b.txt")))
@unittest.skipIf(
"SANDCASTLE" in os.environ,
"This test seems to leave behind unkillable processes "
"on sandcastle. Disable it for now.",
)
def test_dir_locking(self) -> None:
"""
Test performing checkouts that modify the directory foo/ while other
clients are simultaneously renaming untracked files under foo/
This exercises the interaction of the kernel's inode locks and Eden's
own user-space locking. We previously had some situations where
deadlock could occur because FUSE requests holding kernel inode lock
were blocked on userspace locks that were held by other threads blocked
on the kernel inode lock.
"""
num_checkout_changed_files = 500
num_rename_threads = 4
num_checkouts = 4
# Create a new commit in the backing repository with many new files in
# the foo/ directory
for n in range(num_checkout_changed_files):
path = os.path.join(self.backing_repo.path, "foo", "tracked.%d" % n)
with open(path, "w") as f:
f.write("file %d\n" % n)
self.backing_repo.add_files(["foo"])
new_commit = self.backing_repo.commit("Add many files under foo/")
# Spawn several threads that repeatedly rename ignored files under foo/
stop = threading.Event()
def rename_worker(thread_id):
logging.info("rename thread %d starting", thread_id)
path1 = os.path.join(self.repo.path, "foo", "_%d.log" % thread_id)
path2 = os.path.join(self.repo.path, "foo", "_%d.log2" % thread_id)
with open(path1, "w") as f:
f.write("ignored %d\n" % thread_id)
count = 0
while not stop.is_set():
os.rename(path1, path2)
os.rename(path2, path1)
count += 1
logging.info("rename thread %d performed %d renames", thread_id, count)
threads = []
for n in range(num_rename_threads):
thread = threading.Thread(target=rename_worker, args=(n,))
threads.append(thread)
thread.start()
logging.info("===== starting checkouts")
commits = [new_commit, self.commit3]
for n in range(num_checkouts):
self.repo.update(commits[n % len(commits)])
logging.info("===== checkouts complete")
stop.set()
for thread in threads:
thread.join()
logging.info("===== threads stopped")
# For the most part this test is mainly checking to ensure that
# we reach this point without causing a deadlock.
# However go ahead and check that the repository is left in an expected
# state too.
if num_checkouts % 2 == 0:
self.assertEqual(self.commit3, self.repo.get_head_hash())
else:
self.assertEqual(new_commit, self.repo.get_head_hash())
# Assert that the status is empty other than the ignored files
# created by the rename threads
self.assert_status(
{
os.path.join("foo", "_%d.log" % thread_id): "I"
for thread_id in range(num_rename_threads)
}
)
@hg_test
class UpdateCacheInvalidationTest(EdenHgTestCase):
commit1: str
def edenfs_logging_settings(self) -> Dict[str, str]:
return {
"eden.fs.inodes.TreeInode": "DBG5",
"eden.fs.inodes.CheckoutAction": "DBG5",
"eden.fs.inodes.CheckoutContext": "DBG5",
"eden.fs.fuse.FuseChannel": "DBG3",
}
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("dir/file1", "one")
repo.write_file("dir/file2", "two")
self.commit1 = repo.commit("Initial commit.")
repo.remove_file("dir/file1")
self.commit2 = repo.commit("Remove file1")
repo.write_file("dir/file3", "three")
self.commit3 = repo.commit("Add file3")
repo.update(self.commit1)
repo.write_file("dir/file2", "new two")
self.commit4 = repo.commit("Change file2")
def _populate_kernel_caches(self):
# Populate the kernel's readdir caches.
for _dirpath, _dirnames, _filenames in os.walk(self.repo.path):
pass
def _list_contents(self, path) -> Set[str]:
return set(os.listdir(os.path.join(self.repo.path, path)))
def _scan_contents(self, path) -> List[os.DirEntry]:
entries = list(os.scandir(os.path.join(self.repo.path, path)))
entries.sort(key=lambda entry: entry.name)
return entries
def test_update_adding_file_invalidates_tree_inode_caches(self):
self.repo.update(self.commit2)
self._populate_kernel_caches()
self.assertEqual({"file2"}, self._list_contents("dir"))
# The checkout operation should invalidate the kernel's caches.
self.repo.update(self.commit3)
self.assertEqual({"file2", "file3"}, self._list_contents("dir"))
def test_update_removing_file_invalidates_tree_inode_caches(self):
self.repo.update(self.commit1)
self._populate_kernel_caches()
self.assertEqual({"file1", "file2"}, self._list_contents("dir"))
# The checkout operation should invalidate the kernel's caches.
self.repo.update(self.commit2)
self.assertEqual({"file2"}, self._list_contents("dir"))
def test_changing_file_contents_creates_new_inode_and_flushes_dcache(self):
self.repo.update(self.commit1)
self._populate_kernel_caches()
before = self._scan_contents("dir")
self.repo.update(self.commit4)
after = self._scan_contents("dir")
self.assertEqual(["file1", "file2"], [x.name for x in before])
self.assertEqual(["file1", "file2"], [x.name for x in after])
self.assertEqual(before[0].inode(), after[0].inode())
self.assertNotEqual(before[1].inode(), after[1].inode())
def test_clean_update_removes_added_file(self) -> None:
self.repo.update(self.commit1)
self.write_file("dir/new_file.txt", "new file")
self.hg("add", "dir/new_file.txt")
self.assertTrue(os.path.isfile(self.get_path("dir/new_file.txt")))
self.assert_status({"dir/new_file.txt": "A"})
self._populate_kernel_caches()
self.repo.update(".", clean=True)
self.assert_status({"dir/new_file.txt": "?"})
self.assertTrue(os.path.isfile(self.get_path("dir/new_file.txt")))
self.assert_dirstate_empty()
self.assertEqual({"file1", "file2", "new_file.txt"}, self._list_contents("dir"))
def test_clean_update_adds_removed_file(self) -> None:
self.hg("remove", "dir/file1")
self.assertFalse(os.path.isfile(self.get_path("dir/file1")))
self.assert_status({"dir/file1": "R"})
self._populate_kernel_caches()
self.repo.update(".", clean=True)
self.assert_status({})
self.assertTrue(os.path.isfile(self.get_path("dir/file1")))
self.assert_dirstate_empty()
self.assertEqual({"file1", "file2"}, self._list_contents("dir"))
```
#### File: eden/integration/hypothesis_simple_test.py
```python
import os
import stat
import hypothesis
from eden.test_support.hypothesis import FILENAME_STRATEGY
from .lib import testcase
@testcase.eden_repo_test
class HypothesisSimpleTest(testcase.EdenRepoTest):
def populate_repo(self) -> None:
self.repo.write_file("hello", "hola\n")
self.repo.write_file("adir/file", "foo!\n")
self.repo.write_file("bdir/test.sh", "#!/bin/bash\necho test\n", mode=0o755)
self.repo.write_file("bdir/noexec.sh", "#!/bin/bash\necho test\n")
self.repo.symlink("slink", "hello")
self.repo.commit("Initial commit.")
@hypothesis.given(FILENAME_STRATEGY)
def test_create(self, basename: str) -> None:
filename = os.path.join(self.mount, basename)
# Ensure that we don't proceed if hypothesis has selected a name that
# conflicts with the names we generated in the repo.
hypothesis.assume(not os.path.exists(filename))
with open(filename, "w") as f:
f.write("created\n")
self.assert_checkout_root_entries(
{".eden", "adir", "bdir", "hello", basename, "slink"}
)
with open(filename, "r") as f:
self.assertEqual(f.read(), "created\n")
st = os.lstat(filename)
self.assertEqual(st.st_size, 8)
self.assertTrue(stat.S_ISREG(st.st_mode))
```
#### File: eden/integration/info_test.py
```python
import json
import os
from .lib import testcase
@testcase.eden_repo_test
class InfoTest(testcase.EdenRepoTest):
def populate_repo(self) -> None:
self.repo.write_file("hello", "hola\n")
self.repo.commit("Initial commit.")
def test_info_with_bind_mounts(self) -> None:
edenrc = os.path.join(os.environ["HOME"], ".edenrc")
with open(edenrc, "w") as f:
f.write(
"""\
["repository {repo_name}"]
path = "{repo_path}"
type = "{repo_type}"
["bindmounts {repo_name}"]
buck-out = "buck-out"
""".format(
repo_name=self.repo_name,
repo_path=self.repo.get_canonical_root(),
repo_type=self.repo.get_type(),
)
)
basename = "eden_mount"
tmp = os.path.join(self.tmp_dir, basename)
self.eden.run_cmd("clone", self.repo_name, tmp)
info = self.eden.run_cmd("info", tmp)
client_info = json.loads(info)
client_dir = os.path.join(self.eden_dir, "clients", basename)
self.assertEqual(
{
"bind-mounts": {"buck-out": "buck-out"},
"client-dir": client_dir,
"scm_type": self.repo.get_type(),
"mount": tmp,
"snapshot": self.repo.get_head_hash(),
},
client_info,
)
def test_relative_path(self) -> None:
"""
Test calling "eden info <relative_path>" and make sure it gives
the expected results.
"""
info = self.eden.run_cmd("info", os.path.relpath(self.mount))
client_info = json.loads(info)
client_dir = os.path.join(
self.eden_dir, "clients", os.path.basename(self.mount)
)
self.assertEqual(
{
"bind-mounts": {},
"client-dir": client_dir,
"scm_type": self.repo.get_type(),
"mount": self.mount,
"snapshot": self.repo.get_head_hash(),
},
client_info,
)
def test_through_symlink(self) -> None:
"""
Test calling "eden info" through a symlink and make sure it gives
the expected results. This makes sure "eden info" resolves the path
correctly before looking it up in the configuration.
"""
link1 = os.path.join(self.tmp_dir, "link1")
os.symlink(self.mount, link1)
info1 = json.loads(self.eden.run_cmd("info", link1))
self.assertEqual(self.mount, info1["mount"])
# Create a non-normalized symlink pointing to the parent directory
# of the mount
link2 = os.path.join(self.tmp_dir, "mounts_link")
os.symlink(self.mount + "//..", link2)
mount_through_link2 = os.path.join(link2, self.repo_name)
info2 = json.loads(self.eden.run_cmd("info", mount_through_link2))
self.assertEqual(self.mount, info2["mount"])
```
#### File: integration/lib/error.py
```python
import shlex
import subprocess
class CommandError(subprocess.CalledProcessError):
"""
A wrapper around subprocess.CalledProcessError that also includes
includes the process's stderr when converted to a string.
"""
def __init__(self, orig: subprocess.CalledProcessError) -> None:
super().__init__(
orig.returncode, orig.cmd, output=orig.output, stderr=orig.stderr
)
def __str__(self) -> str:
if not self.stderr:
return super().__str__()
cmd_str = " ".join(shlex.quote(arg) for arg in self.cmd)
stderr_str = self.stderr
if isinstance(self.stderr, bytes):
stderr_str = self.stderr.decode("utf-8", errors="replace")
# Indent the stderr output just to help indicate where it starts
# and ends in the test output.
stderr_str = stderr_str.replace("\n", "\n ")
msg = "Command [%s] failed with status %s\nstderr:\n %s" % (
cmd_str,
self.returncode,
stderr_str,
)
return msg
```
#### File: integration/lib/gitrepo.py
```python
import datetime
import os
import subprocess
import tempfile
import time
import typing
from typing import Dict, List, Optional
from . import repobase
from .error import CommandError
from .find_executables import FindExe
class GitError(CommandError):
pass
class GitRepository(repobase.Repository):
def __init__(self, path: str) -> None:
super().__init__(path)
self.git_bin = FindExe.GIT
def git(
self, *args: str, encoding: str = "utf-8", env: Optional[Dict[str, str]] = None
) -> str:
"""
Invoke a git command inside the repository.
All non-keyword arguments are treated as arguments to git.
A keyword argument of "env" can be used to specify a dictionary of
additional environment variables to be passed to git. (These will be
added to the current environment.)
"env" is currently the only valid keyword argument.
Example usage:
repo.git('commit', '-m', 'my new commit',
env={'GIT_AUTHOR_NAME': '<NAME>'})
"""
cmd = [self.git_bin] + list(args)
git_env = None
if env is not None:
git_env = os.environ.copy()
git_env.update(env)
try:
completed_process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
cwd=self.path,
env=git_env,
)
except subprocess.CalledProcessError as ex:
raise GitError(ex) from ex
return typing.cast(str, completed_process.stdout.decode(encoding))
def init(self) -> None:
self.git("init")
def get_type(self) -> str:
return "git"
def get_head_hash(self) -> str:
return self.git("rev-parse", "HEAD").rstrip()
def get_canonical_root(self) -> str:
return os.path.join(self.path, ".git")
def add_files(self, paths: List[str]) -> None:
self.git("add", *paths)
def remove_files(self, paths: List[str], force: bool = False) -> None:
if force:
self.git("rm", "--force", *paths)
else:
self.git("rm", *paths)
def commit(
self,
message: str,
author_name: Optional[str] = None,
author_email: Optional[str] = None,
date: Optional[datetime.datetime] = None,
amend: bool = False,
committer_name: Optional[str] = None,
committer_email: Optional[str] = None,
committer_date: Optional[datetime.datetime] = None,
) -> str:
if author_name is None:
author_name = self.author_name
if author_email is None:
author_email = self.author_email
if date is None:
date = self.get_commit_time()
date_str = time.strftime("%Y-%m-%dT%H:%M:%S%z", date.utctimetuple())
if committer_name is None:
committer_name = author_name
if committer_email is None:
committer_email = author_email
if committer_date is None:
committer_date = date
committer_date_str = time.strftime(
"%Y-%m-%dT%H:%M:%S%z", committer_date.utctimetuple()
)
# Specify all arguments to `git commit` to ensure the resulting hashes
# are the same every time this test is run.
git_commit_env = {
"GIT_AUTHOR_NAME": author_name,
"GIT_AUTHOR_EMAIL": author_email,
# pyre-fixme[18]: Global name `date_str` is undefined.
"GIT_AUTHOR_DATE": date_str,
"GIT_COMMITTER_NAME": committer_name,
"GIT_COMMITTER_EMAIL": committer_email,
# pyre-fixme[18]: Global name `committer_date_str` is undefined.
"GIT_COMMITTER_DATE": committer_date_str,
}
with tempfile.NamedTemporaryFile(
prefix="eden_commit_msg.", mode="w", encoding="utf-8"
) as msgf:
msgf.write(message)
msgf.flush()
args = ["commit", "-F", msgf.name]
if amend:
args.append("--amend")
self.git(*args, env=git_commit_env)
# Get the commit ID and return it
return self.git("rev-parse", "HEAD").strip()
```
#### File: integration/lib/overlay.py
```python
import os
import pathlib
import stat
import tempfile
import eden.integration.lib.edenclient as edenclient
class OverlayStore:
def __init__(self, eden: edenclient.EdenFS, mount: pathlib.Path) -> None:
self.eden = eden
self.mount = mount
self.overlay_dir = eden.overlay_dir_for_mount(mount)
def materialize_file(self, path: pathlib.Path) -> pathlib.Path:
"""Force the file inode at the specified path to be materialized and recorded in
the overlay. Returns the path to the overlay file that stores the data for this
inode in the overlay.
"""
path_in_mount = self.mount / path
# Opening the file in write mode will materialize it
with path_in_mount.open("w+b") as f:
s = os.fstat(f.fileno())
return self._get_overlay_path(s.st_ino)
def materialize_dir(self, path: pathlib.Path) -> pathlib.Path:
"""Force the directory inode at the specified path to be materialized and
recorded in the overlay. Returns the path to the overlay file that stores the
data for this inode in the overlay.
"""
path_in_mount = self.mount / path
s = os.lstat(path_in_mount)
assert stat.S_ISDIR(s.st_mode)
# Creating and then removing a file inside the directory will materialize it
with tempfile.NamedTemporaryFile(dir=str(path_in_mount)):
pass
return self._get_overlay_path(s.st_ino)
def _get_overlay_path(self, inode_number: int) -> pathlib.Path:
subdir = "{:02x}".format(inode_number % 256)
return self.overlay_dir / subdir / str(inode_number)
def delete_cached_next_inode_number(self) -> None:
(self.overlay_dir / "next-inode-number").unlink()
```
#### File: integration/lib/repobase.py
```python
import datetime
import errno
import os
from typing import List, Optional
class Repository(object):
def __init__(self, path: str) -> None:
self.path = path
# Default author and timestamp info for commits
self.author_name = "<NAME>"
self.author_email = "<EMAIL>"
self.commit_time = datetime.datetime(year=2000, month=1, day=1)
self.commit_time_delta = datetime.timedelta(seconds=1)
def get_commit_time(self) -> datetime.datetime:
"""
Get a datetime object to use for the next commit.
Rather than using real wall clock time, we use an internally maintained
date to ensure that we get the same commit hashes across repeated test
runs.
The date is advanced for each commit made.
"""
current = self.commit_time
self.commit_time += self.commit_time_delta
return current
def init(self) -> None:
raise NotImplementedError("subclasses must implement init()")
def get_type(self) -> str:
"""Returns the type of this repo as a string: "git" or "hg"."""
raise NotImplementedError("subclasses must implement get_type()")
def get_head_hash(self) -> str:
"""Returns the 40-character hex hash for HEAD."""
raise NotImplementedError("subclasses must implement get_head_hash()")
def commit(
self,
message: str,
author_name: Optional[str] = None,
author_email: Optional[str] = None,
date: Optional[datetime.datetime] = None,
amend: bool = False,
) -> str:
"""
Create a commit.
Returns the new commit hash as a 40-character hexadecimal string.
"""
raise NotImplementedError("subclasses must implement commit()")
def add_file(self, path: str) -> None:
self.add_files([path])
def add_files(self, paths: List[str]) -> None:
raise NotImplementedError("subclasses must implement add_files()")
def remove_file(self, path: str) -> None:
self.remove_files([path])
def remove_files(self, paths: List[str], force: bool = False) -> None:
raise NotImplementedError("subclasses must implement remove_files()")
def get_path(self, *args: str) -> str:
for arg in args:
assert not os.path.isabs(arg), "must not be absolute: %r" % (arg,)
return os.path.join(self.path, *args)
def get_canonical_root(self) -> str:
"""Returns cwd to use when calling scm commands."""
raise NotImplementedError("subclasses must implement get_canonical_root()")
def mkdir(self, path: str) -> None:
full_path = self.get_path(path)
try:
os.makedirs(full_path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def make_parent_dir(self, path: str) -> None:
dirname = os.path.dirname(path)
if dirname:
self.mkdir(dirname)
def write_file(
self, path: str, contents: str, mode: Optional[int] = None, add: bool = True
) -> None:
"""
Create or overwrite a file with the given contents.
"""
self.make_parent_dir(path)
if mode is None:
mode = 0o644
full_path = self.get_path(path)
with open(full_path, "w") as f:
f.write(contents)
os.chmod(full_path, mode)
if add:
self.add_file(path)
def symlink(self, path: str, contents: str, add: bool = True) -> None:
"""
Create a symlink at the specified path, pointed at the given
destination path contents.
"""
self.make_parent_dir(path)
full_path = self.get_path(path)
try:
os.unlink(full_path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
os.symlink(contents, full_path)
if add:
self.add_file(path)
```
#### File: integration/lib/systemd.py
```python
import abc
import contextlib
import logging
import os
import os.path
import pathlib
import re
import subprocess
import sys
import tempfile
import types
import typing
from eden.cli.daemon import wait_for_process_exit
from eden.cli.util import poll_until
from .find_executables import FindExe
from .linux import LinuxCgroup, ProcessID
from .temporary_directory import create_tmp_dir
logger = logging.getLogger(__name__)
SystemdUnitName = str
class SystemdUserServiceManager:
"""A running 'systemd --user' process manageable using 'systemctl --user'."""
def __init__(
self, xdg_runtime_dir: pathlib.Path, process_id: typing.Optional[ProcessID]
) -> None:
super().__init__()
self.__xdg_runtime_dir = xdg_runtime_dir
self.__process_id = process_id
@property
def xdg_runtime_dir(self) -> pathlib.Path:
return self.__xdg_runtime_dir
@property
def process_id(self) -> ProcessID:
if self.__process_id is None:
raise NotImplementedError()
return self.__process_id
def is_alive(self) -> bool:
result = self._systemctl.run(
["--user", "show-environment"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if result.returncode == 0:
return True
if result.returncode == 1:
logger.warning(f'{self} is not alive: {result.stdout.decode("utf-8")}')
return False
result.check_returncode()
return False
def enable_runtime_unit_from_file(self, unit_file: pathlib.Path) -> None:
self._systemctl.check_call(["enable", "--runtime", "--", unit_file])
self._systemctl.check_call(["daemon-reload"])
self.sanity_check_enabled_unit(unit_file=unit_file)
def sanity_check_enabled_unit(self, unit_file: pathlib.Path) -> None:
unit_name = unit_file.name
if "@" in unit_name:
unit_name = unit_name.replace("@", "@testinstance")
self.sanity_check_enabled_unit_fragment(
unit_name=unit_name, expected_unit_file=unit_file
)
self.sanity_check_enabled_unit_sources(
unit_name=unit_name, expected_unit_file=unit_file
)
def sanity_check_enabled_unit_fragment(
self, unit_name: SystemdUnitName, expected_unit_file: pathlib.Path
) -> None:
service = SystemdService(unit_name=unit_name, systemd=self)
actual_unit_file = service.query_fragment_path()
if actual_unit_file != expected_unit_file:
raise Exception(
f"Enabled unit's FragmentPath does not match unit file\n"
f"Expected: {expected_unit_file}\n"
f"Actual: {actual_unit_file}"
)
def sanity_check_enabled_unit_sources(
self, unit_name: SystemdUnitName, expected_unit_file: pathlib.Path
) -> None:
actual_unit_sources = self._systemctl.check_output(["cat", "--", unit_name])
expected_unit_sources = b""
for file in [expected_unit_file]:
expected_unit_sources += b"# " + bytes(file) + b"\n"
expected_unit_sources += file.read_bytes()
if actual_unit_sources != expected_unit_sources:
raise Exception(
"Enabled unit does not match unit file\n"
"Expected: {repr(expected_unit_sources)}\n"
"Actual: {repr(actual_unit_sources)}"
)
def systemd_run(
self,
command: typing.Sequence[str],
properties: typing.Mapping[str, str],
extra_env: typing.Mapping[str, str],
unit_name: typing.Optional[SystemdUnitName] = None,
) -> "SystemdService":
systemd_run_command = ["systemd-run", "--user"]
for name, value in properties.items():
systemd_run_command.extend(("--property", f"{name}={value}"))
for name, value in extra_env.items():
systemd_run_command.extend(("--setenv", f"{name}={value}"))
if unit_name is not None:
systemd_run_command.extend(("--unit", unit_name))
systemd_run_command.append("--")
systemd_run_command.extend(command)
try:
output = subprocess.check_output(
systemd_run_command, env=self.env, stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
sys.stderr.buffer.write(e.output)
raise
match = re.match(
r"^Running as unit: (?P<unit>.*)$",
output.decode("utf-8"),
flags=re.MULTILINE,
)
if match is None:
raise Exception("Failed to parse unit from command output")
return self.get_service(match.group("unit"))
def get_active_unit_names(self) -> typing.List[SystemdUnitName]:
def parse_line(line: str) -> SystemdUnitName:
parts = re.split(r" +", line)
return parts[0]
stdout = self._systemctl.check_output(
[
"list-units",
"--all",
"--full",
"--no-legend",
"--no-pager",
"--plain",
"--state=active",
]
)
return [parse_line(line) for line in stdout.decode("utf-8").splitlines()]
def get_unit_paths(self) -> typing.List[pathlib.Path]:
stdout = subprocess.check_output(
["systemd-analyze", "--user", "unit-paths"], env=self.env
)
return [pathlib.Path(line) for line in stdout.decode("utf-8").splitlines()]
def get_service(self, unit_name: SystemdUnitName) -> "SystemdService":
return SystemdService(unit_name=unit_name, systemd=self)
def exit(self) -> None:
process_id = self.process_id
# We intentionally do not check the result from the systemctl command here.
# It may fail with a "Connection reset by peer" error due to systemd exiting.
# We simply confirm that systemd exits after running this command.
self._systemctl.run(["start", "exit.target"])
if not wait_for_process_exit(process_id, timeout=60):
raise TimeoutError()
@property
def env(self) -> typing.Dict[str, str]:
env = dict(os.environ)
env.update(self.extra_env)
return env
@property
def extra_env(self) -> typing.Dict[str, str]:
return {
"DBUS_SESSION_BUS_ADDRESS": "",
"XDG_RUNTIME_DIR": str(self.xdg_runtime_dir),
}
@property
def _systemctl(self) -> "_SystemctlCLI":
return _SystemctlCLI(env=self.env)
def __str__(self) -> str:
return f"systemd ({self.xdg_runtime_dir})"
def __repr__(self) -> str:
return (
f"SystemdUserServiceManager("
f"xdg_runtime_dir={repr(self.xdg_runtime_dir)}, "
f"process_id={self.process_id}"
f")"
)
class SystemdService:
def __init__(
self, unit_name: SystemdUnitName, systemd: SystemdUserServiceManager
) -> None:
super().__init__()
self.__systemd = systemd
self.__unit_name = unit_name
@property
def unit_name(self) -> SystemdUnitName:
return self.__unit_name
def start(self) -> None:
self.__systemctl.check_call(["start", "--", self.unit_name])
def stop(self) -> None:
self.__systemctl.check_call(["stop", "--", self.unit_name])
def restart(self) -> None:
self.__systemctl.check_call(["restart", "--", self.unit_name])
def poll_until_inactive(self, timeout: float) -> None:
def check_inactive() -> typing.Optional[bool]:
return True if self.query_active_state() == "inactive" else None
poll_until(check_inactive, timeout=timeout)
def query_active_state(self) -> str:
return self.__query_property("ActiveState").decode("utf-8")
def query_sub_state(self) -> str:
return self.__query_property("SubState").decode("utf-8")
def query_main_process_id(self) -> typing.Optional[ProcessID]:
return ProcessID(self.__query_property("MainPID"))
def query_cgroup(self) -> LinuxCgroup:
return LinuxCgroup(self.__query_property("ControlGroup"))
def query_process_ids(self) -> typing.Sequence[ProcessID]:
return self.query_cgroup().query_process_ids()
def query_fragment_path(self) -> pathlib.Path:
return pathlib.Path(os.fsdecode(self.__query_property("FragmentPath")))
def __query_property(self, property: str) -> bytes:
stdout = self.__systemctl.check_output(
["show", f"--property={property}", "--", self.unit_name]
)
prefix = property.encode("utf-8") + b"="
if not stdout.startswith(prefix):
raise Exception(f"Failed to parse output of systemctl show: {stdout}")
return stdout[len(prefix) :].rstrip(b"\n")
@property
def __systemctl(self) -> "_SystemctlCLI":
return self.__systemd._systemctl
def __str__(self) -> str:
return f"{self.unit_name} (XDG_RUNTIME_DIR={self.__systemd.xdg_runtime_dir})"
def __repr__(self) -> str:
return (
f"SystemdService(unit_name={repr(self.unit_name)}, "
f"systemd={repr(self.__systemd)})"
)
class _SystemctlCLI:
def __init__(self, env: typing.Dict[str, str]) -> None:
super().__init__()
self.__env = env
def check_call(
self, command_arguments: typing.Sequence[typing.Union[str, pathlib.Path]]
) -> None:
"""Run 'systemctl --user' with the given arguments.
See also subprocess.check_call.
"""
subprocess.check_call(self.__command(command_arguments), env=self.__env)
def check_output(
self, command_arguments: typing.Sequence[typing.Union[str, pathlib.Path]]
) -> bytes:
"""Run 'systemctl --user' and return the command's output.
See also subprocess.check_output.
"""
return subprocess.check_output(
self.__command(command_arguments), env=self.__env
)
def run(
self,
command_arguments: typing.Sequence[typing.Union[str, pathlib.Path]],
stdout: "subprocess._FILE" = None,
stderr: "subprocess._FILE" = None,
) -> subprocess.CompletedProcess:
"""Run 'systemctl --user' and return the command's output and exit status.
See also subprocess.run.
"""
return subprocess.run(
self.__command(command_arguments),
env=self.__env,
stdout=stdout,
stderr=stderr,
)
def __command(
self, command_arguments: typing.Sequence[typing.Union[str, pathlib.Path]]
) -> typing.Sequence[str]:
command = ["systemctl", "--user"]
command.extend(str(arg) for arg in command_arguments)
return command
class SystemdUserServiceManagerMixin(metaclass=abc.ABCMeta):
def make_temporary_systemd_user_service_manager(self) -> SystemdUserServiceManager:
context_manager = temporary_systemd_user_service_manager()
exit = context_manager.__exit__
systemd = context_manager.__enter__()
self.addCleanup(lambda: exit(None, None, None))
return systemd
def addCleanup(
self,
function: typing.Callable[..., typing.Any],
*args: typing.Any,
**kwargs: typing.Any,
) -> None:
raise NotImplementedError()
@contextlib.contextmanager
def temporary_systemd_user_service_manager() -> typing.Iterator[
SystemdUserServiceManager
]:
"""Create an isolated systemd instance for tests."""
parent_systemd = SystemdUserServiceManager(
xdg_runtime_dir=_get_current_xdg_runtime_dir(), process_id=None
)
def should_create_managed() -> bool:
forced_type_variable = "EDEN_TEST_FORCE_SYSTEMD_USER_SERVICE_MANAGER_TYPE"
forced_type = os.getenv(forced_type_variable)
if forced_type is not None and forced_type:
if forced_type == "managed":
return True
if forced_type == "unmanaged":
return False
raise ValueError(
f"Unsupported value for {forced_type_variable}: {forced_type!r}"
)
if not _is_system_booted_with_systemd():
return False
# It's possible that the system was booted with systemd but PID 1 is not
# systemd. This happens on Sandcastle (Facebook's CI) which runs tests
# in a Linux process namespace. If this is the case, systemctl and
# systemd-run fail with the following message:
#
# > Failed to connect to bus: No data available
#
# If we can't talk to the system's systemd for this reason, run our
# temporary systemd user service manager unmanaged.
if os.getuid() == 0 and not parent_systemd.is_alive():
return False
return True
lifetime_duration = 30
with create_tmp_dir() as xdg_runtime_dir:
if should_create_managed():
with _transient_managed_systemd_user_service_manager(
xdg_runtime_dir=xdg_runtime_dir,
parent_systemd=parent_systemd,
lifetime_duration=lifetime_duration,
) as child_systemd:
yield child_systemd
else:
with _TransientUnmanagedSystemdUserServiceManager(
xdg_runtime_dir=xdg_runtime_dir, lifetime_duration=lifetime_duration
) as systemd:
yield systemd
def _is_system_booted_with_systemd() -> bool:
"""See the sd_booted(3) manual page."""
return pathlib.Path("/run/systemd/system/").exists()
@contextlib.contextmanager
def _transient_managed_systemd_user_service_manager(
xdg_runtime_dir: pathlib.Path,
parent_systemd: SystemdUserServiceManager,
lifetime_duration: int,
) -> typing.Iterator[SystemdUserServiceManager]:
"""Create an isolated systemd instance using 'systemd-run systemd'."""
child_systemd_service = parent_systemd.systemd_run(
command=["/usr/lib/systemd/systemd", "--user", "--unit=basic.target"],
properties={
"Description": f"Eden test systemd user service manager "
f"({xdg_runtime_dir})",
"CollectMode": "inactive-or-failed",
"Restart": "no",
"RuntimeMaxSec": str(lifetime_duration),
"TimeoutStartSec": str(lifetime_duration),
"Type": "notify",
},
extra_env={"XDG_RUNTIME_DIR": str(xdg_runtime_dir)},
)
child_systemd = SystemdUserServiceManager(
xdg_runtime_dir=xdg_runtime_dir,
process_id=child_systemd_service.query_main_process_id(),
)
try:
yield child_systemd
finally:
try:
child_systemd_service.stop()
except Exception:
logger.warning(
f"Failed to stop systemd user service manager ({child_systemd})"
)
# Ignore the exception.
class _TransientUnmanagedSystemdUserServiceManager:
"""Create an isolated systemd instance as child process.
This class does not work if a user systemd instance is already running.
"""
__cleanups: contextlib.ExitStack
__lifetime_duration: int
__xdg_runtime_dir: pathlib.Path
def __init__(self, xdg_runtime_dir: pathlib.Path, lifetime_duration: int) -> None:
super().__init__()
self.__xdg_runtime_dir = xdg_runtime_dir
self.__lifetime_duration = lifetime_duration
self.__cleanups = contextlib.ExitStack()
def start_systemd_process(self) -> subprocess.Popen:
self._create_run_systemd_directory()
cgroup = self.create_cgroup()
env = self.base_systemd_environment
env["XDG_RUNTIME_DIR"] = str(self.__xdg_runtime_dir)
# HACK(strager): Work around 'systemd --user' refusing to start if the
# system is not managed by systemd.
env["LD_PRELOAD"] = str(
pathlib.Path(
typing.cast(str, FindExe.FORCE_SD_BOOTED) # T38947910
).resolve(strict=True)
)
process = subprocess.Popen(
[
"timeout",
f"{self.__lifetime_duration}s",
"/usr/lib/systemd/systemd",
"--user",
"--unit=basic.target",
"--log-target=console",
],
stdin=subprocess.DEVNULL,
env=env,
preexec_fn=lambda: cgroup.add_current_process(),
)
self.__cleanups.callback(lambda: self.stop_systemd_process(process))
return process
def _create_run_systemd_directory(self) -> None:
"""Create /run/systemd/ if it doesn't already exist.
'systemctl --user daemon-reload' checks for disk free space by calling
statvfs(3) on /run/systemd [1]. If the directory is missing,
daemon-reload fails. Call this function to make daemon-reload pass this
check.
daemon-reload is right in expecting that /run/systemd exists, since
systemd requires sd_booted(3) to return true. However, we force
sd_booted(3) to return true, so /run/systemd might not exist.
[1] https://github.com/systemd/systemd/blob/v239/src/core/dbus-manager.c#L1277
"""
path = pathlib.Path("/run/systemd")
try:
path.mkdir(exist_ok=False, mode=0o755, parents=False)
except OSError:
logging.warning(
f"Failed to create {path}; ignoring error, but systemd might "
f"fail to reload",
exc_info=True,
)
@property
def base_systemd_environment(self) -> typing.Dict[str, str]:
# See https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Environment%20variables%20in%20spawned%20processes
return {"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}
def stop_systemd_process(self, systemd_process: subprocess.Popen) -> None:
systemd_process.terminate()
try:
systemd_process.wait(timeout=15)
return
except subprocess.TimeoutExpired:
pass
logger.warning(
"Failed to terminate systemd user service manager. Attempting to kill."
)
systemd_process.kill()
systemd_process.wait(timeout=3)
def create_cgroup(self) -> LinuxCgroup:
parent_cgroup = LinuxCgroup.from_current_process()
path = tempfile.mkdtemp(
prefix="edenfs_test.", dir=str(parent_cgroup.sys_fs_cgroup_path)
)
cgroup = LinuxCgroup.from_sys_fs_cgroup_path(pathlib.PosixPath(path))
self.__cleanups.callback(lambda: cgroup.delete_recursive())
return cgroup
def wait_until_systemd_is_alive(
self,
systemd_process: subprocess.Popen,
child_systemd: SystemdUserServiceManager,
) -> None:
while True:
systemd_did_exit = systemd_process.poll() is not None
if systemd_did_exit:
raise Exception("systemd failed to start")
if child_systemd.is_alive():
return
def __enter__(self) -> SystemdUserServiceManager:
systemd_process = self.start_systemd_process()
child_systemd = SystemdUserServiceManager(
xdg_runtime_dir=self.__xdg_runtime_dir, process_id=systemd_process.pid
)
self.wait_until_systemd_is_alive(systemd_process, child_systemd)
return child_systemd
def __exit__(
self,
exc_type: typing.Optional[typing.Type[BaseException]],
exc_value: typing.Optional[BaseException],
traceback: typing.Optional[types.TracebackType],
) -> typing.Optional[bool]:
self.__cleanups.close()
return None
def _get_current_xdg_runtime_dir() -> pathlib.Path:
problems = []
path = None
if path is None:
path_from_env = os.environ.get("XDG_RUNTIME_DIR")
if path_from_env is None or path_from_env == "":
problems.append("$XDG_RUNTIME_DIR is not set")
else:
path = pathlib.Path(path_from_env)
if path is None:
if os.getuid() == 0:
path = pathlib.Path("/run")
else:
path = pathlib.Path("/run/user") / str(os.getuid())
assert path is not None
if not path.exists():
problems.append(f"'{path}' does not exist")
raise Exception(
"Could not determine XDG_RUNTIME_DIR: " + ", and ".join(problems)
)
return path
```
#### File: eden/integration/setattr_test.py
```python
import errno
import os
import stat
import subprocess
import time
from .lib import testcase
@testcase.eden_repo_test
class SetAttrTest(testcase.EdenRepoTest):
def populate_repo(self) -> None:
self.repo.write_file("hello", "hola\n")
self.repo.commit("Initial commit.")
# mtime should not get changed on permission changes
def test_chmod(self) -> None:
filename = os.path.join(self.mount, "hello")
st = os.lstat(filename)
os.chmod(filename, st.st_mode | stat.S_IROTH)
new_st = os.lstat(filename)
self.assertGreaterEqual(new_st.st_atime, st.st_atime)
self.assertEqual(new_st.st_mtime, st.st_mtime)
self.assertEqual(new_st.st_atime, st.st_atime)
self.assertGreaterEqual(new_st.st_ctime, st.st_ctime)
self.assertEqual(new_st.st_mode, st.st_mode | stat.S_IROTH)
def test_chown_as_root(self) -> None:
if not self._can_always_chown():
# Don't skip. Skipped tests show up in the metrics and have tasks
# created for them.
return
filename = os.path.join(self.mount, "hello")
# If root, any ownership change is legal.
st = os.lstat(filename)
os.chown(filename, st.st_uid, st.st_gid)
os.chown(filename, st.st_uid + 1, st.st_gid)
newst = os.lstat(filename)
self.assertEqual(st.st_uid + 1, newst.st_uid)
self.assertEqual(st.st_gid, newst.st_gid)
os.chown(filename, st.st_uid, st.st_gid + 1)
newst = os.lstat(filename)
self.assertEqual(st.st_uid, newst.st_uid)
self.assertEqual(st.st_gid + 1, newst.st_gid)
def test_chown_uid_as_nonroot_fails(self) -> None:
if self._can_always_chown():
# Don't skip. Skipped tests show up in the metrics and have tasks
# created for them.
return
filename = os.path.join(self.mount, "hello")
# Chown should fail with EPERM unless we are setting it
# to the same current ownership.
st = os.lstat(filename)
os.chown(filename, st.st_uid, st.st_gid)
with self.assertRaises(OSError) as context:
os.chown(filename, st.st_uid + 1, st.st_gid)
self.assertEqual(
errno.EPERM,
context.exception.errno,
msg="changing uid of a file should raise EPERM",
)
def test_chown_gid_as_nonroot_succeeds_if_member(self) -> None:
if self._can_always_chown():
# Don't skip. Skipped tests show up in the metrics and have tasks
# created for them.
return
filename = os.path.join(self.mount, "hello")
st = os.lstat(filename)
os.chown(filename, st.st_uid, self._get_member_group())
def test_chown_gid_as_nonroot_fails_if_not_member(self) -> None:
if self._can_always_chown():
# Don't skip. Skipped tests show up in the metrics and have tasks
# created for them.
return
filename = os.path.join(self.mount, "hello")
st = os.lstat(filename)
with self.assertRaises(OSError) as context:
os.chown(filename, st.st_uid, self._get_non_member_group())
self.assertEqual(
errno.EPERM,
context.exception.errno,
msg="changing gid of a file should raise EPERM",
)
def _can_always_chown(self):
# Could instead check if the process doesn't have the CAP_CHOWN capability.
return 0 == os.geteuid()
def _get_member_group(self):
"""Find a group that this user is a member of."""
# This is a bit hard to do: we need to find a group the user is a member
# of that's not the effective or real gid. If there are none then we
# must skip.
groups = os.getgroups()
for gid in groups:
if gid != os.getgid() and gid != os.getegid():
return gid
self.skipTest("no usable groups found")
def _get_non_member_group(self):
"""Find a group that this user is not a member of."""
# All that matters is that we return a gid outside of the set of this
# user's groups.
user_groups = set(os.getgroups())
return max(user_groups) + 1
def test_truncate(self) -> None:
filename = os.path.join(self.mount, "hello")
st = os.lstat(filename)
with open(filename, "r+") as f:
f.truncate(0)
self.assertEqual("", f.read())
new_st = os.lstat(filename)
self.assertEqual(new_st.st_size, 0)
self.assertGreaterEqual(new_st.st_atime, st.st_atime)
self.assertGreaterEqual(new_st.st_ctime, st.st_ctime)
self.assertGreaterEqual(new_st.st_mtime, st.st_mtime)
def test_utime(self) -> None:
filename = os.path.join(self.mount, "hello")
# Update the atime and mtime to a time 5 seconds in the past.
#
# We round to the nearest second to avoid timestamp granularity issues.
# (Eden currently uses the underlying overlay filesystem to store the
# timestamps, and it might not necessarily support high resolution
# timestamps.)
timestamp = int(time.time() - 5)
os.utime(filename, (timestamp, timestamp))
st = os.lstat(filename)
self.assertEqual(st.st_atime, timestamp)
self.assertEqual(st.st_mtime, timestamp)
def test_touch(self) -> None:
filename = os.path.join(self.mount, "hello")
now = time.time()
subprocess.check_call(["touch", filename])
st = os.lstat(filename)
self.assertGreaterEqual(st.st_atime, now)
self.assertGreaterEqual(st.st_mtime, now)
newfile = os.path.join(self.mount, "touched-new-file")
now = time.time()
subprocess.check_call(["touch", newfile])
st = os.lstat(newfile)
self.assertGreaterEqual(st.st_atime, now)
self.assertGreaterEqual(st.st_mtime, now)
def test_umask(self) -> None:
original_umask = os.umask(0o177)
try:
filename = os.path.join(self.mount, "test1")
with open(filename, "w") as f:
f.write("garbage")
self.assertEqual(os.stat(filename).st_mode & 0o777, 0o600)
filename = os.path.join(self.mount, "test2")
os.umask(0o777)
with open(filename, "w") as f:
f.write("garbage")
self.assertEqual(os.stat(filename).st_mode & 0o777, 0o000)
finally:
os.umask(original_umask)
def test_dir_addfile(self) -> None:
dirname = os.path.join(self.mount, "test_dir")
self.mkdir("test_dir")
st = os.lstat(dirname)
self.write_file("test_file", "test string")
new_st = os.lstat(dirname)
self.assertEqual(new_st.st_atime, st.st_atime)
self.assertGreaterEqual(new_st.st_ctime, st.st_ctime)
self.assertGreaterEqual(new_st.st_mtime, st.st_mtime)
def test_dir_delfile(self) -> None:
dirname = os.path.join(self.mount, "test_dir")
self.mkdir("test_dir")
self.write_file("test_file", "test string")
st = os.lstat(dirname)
self.rm("test_file")
new_st = os.lstat(dirname)
self.assertEqual(new_st.st_atime, st.st_atime)
self.assertGreaterEqual(new_st.st_ctime, st.st_ctime)
self.assertGreaterEqual(new_st.st_mtime, st.st_mtime)
def test_dir_change_filecontents(self) -> None:
dirname = os.path.join(self.mount, "test_dir")
self.mkdir("test_dir")
self.write_file("test_file", "test string")
st = os.lstat(dirname)
self.write_file("test_file", "test string 1")
new_st = os.lstat(dirname)
self.assertEqual(new_st.st_mtime, st.st_mtime)
self.assertEqual(new_st.st_ctime, st.st_ctime)
self.assertEqual(new_st.st_mtime, st.st_mtime)
# Changing permisssions of directory should change
# only ctime of the directory, but not mtime and atime.
def test_dir_change_perm(self) -> None:
dirname = os.path.join(self.mount, "test_dir")
self.mkdir("test_dir")
st = os.lstat(dirname)
os.chmod(dirname, st.st_mode | stat.S_IROTH)
new_st = os.lstat(dirname)
self.assertEqual(new_st.st_mtime, st.st_mtime)
self.assertEqual(new_st.st_atime, st.st_atime)
self.assertGreaterEqual(new_st.st_ctime, st.st_ctime)
# Read call on a file in Edenfs should modify the atime of the file.
# Also, open call should not change the timeStamps of a file.
def test_timestamp_openfiles(self) -> None:
filename = os.path.join(self.mount, "hello")
st = os.lstat(filename)
with open(filename, "r") as f:
new_st = os.lstat(filename)
self.assertEqual(new_st.st_mtime, st.st_mtime)
self.assertEqual(new_st.st_atime, st.st_atime)
self.assertEqual(new_st.st_ctime, st.st_ctime)
f.read()
f.close()
new_st = os.lstat(filename)
self.assertEqual(new_st.st_mtime, st.st_mtime)
self.assertGreater(new_st.st_atime, st.st_atime)
self.assertEqual(new_st.st_ctime, st.st_ctime)
```
#### File: integration/snapshot/gen_snapshot.py
```python
import argparse
import logging
import sys
import time
from pathlib import Path
from eden.integration.lib.find_executables import FindExe
from . import snapshot as snapshot_mod
def main() -> int:
ap = argparse.ArgumentParser()
ap.add_argument(
"-l",
"--list",
action="store_true",
help="List all known snapshot generator types.",
)
ap.add_argument("-o", "--output", help="The output file path.")
ap.add_argument(
"name",
nargs="?",
help="The name of the snapshot generator to run. Use --list to list the "
"available generators.",
)
args = ap.parse_args()
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
if args.list:
print("Available generators:")
for name, snapshot_class in snapshot_mod.snapshot_types.items():
print(f" {name}: {snapshot_class.DESCRIPTION}")
return 0
if args.name is None:
ap.error("must specify a snapshot type or --list")
return 1
snapshot_type = snapshot_mod.snapshot_types.get(args.name)
if snapshot_type is None:
ap.error(
f'unknown snapshot type "{args.name}". '
"Use --list to see a list of available generators."
)
return 1
if args.output is not None:
output_path = Path(args.output)
else:
date_stamp = time.strftime("%Y%m%d")
base_name = f"{args.name}-{date_stamp}.tar.xz"
output_path = Path(
FindExe.REPO_ROOT, "eden", "test-data", "snapshots", base_name
)
logging.info(f'Running "{args.name}" snapshot generator')
with snapshot_mod.generate(snapshot_type) as snapshot:
snapshot.create_tarball(output_path)
logging.info(f"Successfully generated {output_path}")
return 0
if __name__ == "__main__":
rc = main()
sys.exit(rc)
```
#### File: integration/snapshot/test_snapshots.py
```python
import os
import stat
import unittest
from pathlib import Path
from typing import Callable
from eden.integration.lib import edenclient
from . import snapshot as snapshot_mod, verify as verify_mod
class Test(unittest.TestCase):
"""Tests to verify the contents of various saved snapshots.
All of the test functions in this class are dynamically added by register_tests()
"""
def _test_snapshot(self, snapshot_path: Path) -> None:
with snapshot_mod.create_tmp_dir() as tmp_dir:
snapshot = snapshot_mod.unpack_into(snapshot_path, tmp_dir)
self._run_test(snapshot)
def _run_test(self, snapshot: snapshot_mod.BaseSnapshot) -> None:
verifier = verify_mod.SnapshotVerifier()
snapshot.verify(verifier)
# Fail the test if any errors were found.
# The individual errors will have been printed out previously
# as they were found.
if verifier.errors:
self.fail(f"found {len(verifier.errors)} errors")
class InfraTests(unittest.TestCase):
"""Tests for the snapshot generation/verification code itself."""
NUM_SNAPSHOTS = 0
def test_snapshot_list(self) -> None:
# Ensure that at least one snapshot file was found, so that the tests will
# fail if we somehow can't find the snapshot data directory correctly.
self.assertGreater(self.NUM_SNAPSHOTS, 0)
def test_verify_directory(self) -> None:
expected = verify_mod.ExpectedFileSet()
expected.add_file("a/b/normal.txt", b"abc\n", 0o644)
expected.add_file("a/b/normal_exe.exe", b"abc\n", 0o755)
expected.add_file("a/b/missing.txt", b"abc\n", 0o644)
expected.add_file("a/b/wrong_perms.txt", b"abc\n", 0o644)
expected.add_file("a/b/wrong_file_type.txt", b"abc\n", 0o644)
expected.add_socket("a/normal.sock", 0o644)
expected.add_socket("a/exe.sock", 0o755)
expected.add_symlink("a/normal.link", b"symlink contents", 0o777)
expected.add_symlink("a/missing.link", b"missing symlink", 0o777)
# Define a subclass of HgSnapshot. We use define this solely so we can use its
# helper write_file(), make_socket(), and mkdir() methods
class MockSnapshot(snapshot_mod.HgSnapshot):
def populate_backing_repo(self) -> None:
pass
def populate_checkout(self) -> None:
pass
def verify_snapshot_data(
self, verifier: verify_mod.SnapshotVerifier, eden: edenclient.EdenFS
) -> None:
pass
with snapshot_mod.create_tmp_dir() as tmp_dir:
snapshot = MockSnapshot(tmp_dir)
snapshot.data_dir.mkdir()
snapshot.checkout_path.mkdir()
snapshot.write_file("a/b/normal.txt", b"abc\n", 0o644)
snapshot.write_file("a/b/normal_exe.exe", b"abc\n", 0o755)
snapshot.write_file("a/b/wrong_perms.txt", b"abc\n", 0o755)
snapshot.make_socket("a/b/wrong_file_type.txt", 0o755)
snapshot.make_socket("a/normal.sock", 0o644)
snapshot.make_socket("a/exe.sock", 0o755)
os.symlink(b"symlink contents", snapshot.checkout_path / "a/normal.link")
# The verifier code only checks files, not directories, so it should not
# complain about extra directories that may be present.
snapshot.mkdir("a/b/c/extra_dir", 0o755)
verifier = verify_mod.SnapshotVerifier()
verifier.verify_directory(snapshot.checkout_path, expected)
expected_errors = [
"a/b/missing.txt: file not present in snapshot",
"a/missing.link: file not present in snapshot",
f"a/b/wrong_file_type.txt: expected file type to be {stat.S_IFREG:#o}, "
f"found {stat.S_IFSOCK:#o}",
f"a/b/wrong_file_type.txt: expected permissions to be 0o644, found 0o755",
"a/b/wrong_perms.txt: expected permissions to be 0o644, found 0o755",
]
self.assertEqual(sorted(verifier.errors), sorted(expected_errors))
def register_tests() -> None:
# Create one test function for each snapshot
snapshot_dir = Path("eden/test-data/snapshots").resolve()
for snapshot in snapshot_dir.iterdir():
# We don't use Path.stem here since it only strips off the very last suffix,
# so foo.tar.bz2 becomes foo.tar rather than foo.
stem = snapshot.name.split(".", 1)[0]
setattr(Test, f"test_{stem}", _create_test_fn(snapshot))
InfraTests.NUM_SNAPSHOTS += 1
def _create_test_fn(snapshot: Path) -> Callable[[Test], None]:
def test_fn(self: Test) -> None:
self._test_snapshot(snapshot)
return test_fn
register_tests()
```
#### File: snapshot/types/basic.py
```python
from eden.integration.lib import edenclient
from eden.integration.snapshot import verify as verify_mod
from eden.integration.snapshot.snapshot import HgSnapshot, snapshot_class
@snapshot_class(
"basic",
"A simple directory structure with a mix of loaded, materialized, "
"and unloaded files.",
)
class BasicSnapshot(HgSnapshot):
def populate_backing_repo(self) -> None:
repo = self.backing_repo
repo.write_file("README.md", "project docs")
repo.write_file(".gitignore", "ignored.txt\n")
repo.write_file("main/loaded_dir/loaded_file.c", "loaded")
repo.write_file("main/loaded_dir/not_loaded_file.c", "not loaded")
repo.write_file("main/loaded_dir/not_loaded_exe.sh", "not loaded", mode=0o755)
repo.write_file("main/loaded_dir/not_loaded_subdir/a.txt", "some contents\n")
repo.write_file(
"main/loaded_dir/not_loaded_subdir/b.exe", "other contents", mode=0o755
)
repo.write_file("main/loaded_dir/loaded_subdir/dir1/file1.txt", "text\n")
repo.write_file("main/loaded_dir/loaded_subdir/dir2/file2.txt", "more text\n")
repo.write_file(
"main/materialized_subdir/script.sh", "original script contents", mode=0o755
)
repo.write_file("main/materialized_subdir/test.c", "original test contents")
repo.write_file("main/materialized_subdir/unmodified.txt", "original contents")
repo.symlink("main/materialized_subdir/modified_symlink.lnk", "original link")
repo.write_file("main/mode_changes/normal_to_exe.txt", "will change mode")
repo.write_file(
"main/mode_changes/exe_to_normal.txt", "will change mode", mode=0o755
)
repo.write_file("main/mode_changes/normal_to_readonly.txt", "will be readonly")
repo.write_file("never_accessed/foo/bar/baz.txt", "baz\n")
repo.write_file("never_accessed/foo/bar/xyz.txt", "xyz\n")
repo.write_file("never_accessed/foo/file.txt", "data\n")
repo.symlink("never_accessed/foo/some.lnk", "link destination")
repo.commit("Initial commit.")
def populate_checkout(self) -> None:
# Load the main/loaded_dir directory and some of its children.
# Listing directories will allocate inode numbers for its children which causes
# it to be tracked in the overlay, even if it has not been modified.
self.list_dir("main/loaded_dir")
self.list_dir("main/loaded_dir/loaded_subdir/dir1")
self.list_dir("main/loaded_dir/loaded_subdir/dir2")
self.read_file("main/loaded_dir/loaded_file.c")
self.read_file("main/loaded_dir/loaded_subdir/dir1/file1.txt")
self.read_file("main/loaded_dir/loaded_subdir/dir2/file2.txt")
# Modify some files in main/materialized_subdir to force them to be materialized
self.write_file(
"main/materialized_subdir/script.sh", b"new script contents", 0o755
)
self.write_file("main/materialized_subdir/test.c", b"new test contents")
self.symlink("main/materialized_subdir/modified_symlink.lnk", b"new link")
self.symlink("main/materialized_subdir/new_symlink.lnk", b"new link")
self.make_socket("main/materialized_subdir/test/socket.sock", mode=0o600)
# Test materializing some files by changing their mode
self.chmod("main/mode_changes/normal_to_exe.txt", 0o755)
self.chmod("main/mode_changes/exe_to_normal.txt", 0o644)
self.chmod("main/mode_changes/normal_to_readonly.txt", 0o400)
# Create a new top-level directory with some new files
self.write_file("untracked/new/normal.txt", b"new src contents")
self.write_file("untracked/new/normal2.txt", b"extra src contents")
self.write_file("untracked/new/readonly.txt", b"new readonly contents", 0o400)
self.write_file("untracked/new/subdir/abc.txt", b"abc")
self.write_file("untracked/new/subdir/xyz.txt", b"xyz")
self.write_file("untracked/executable.exe", b"do stuff", mode=0o755)
self.make_socket("untracked/everybody.sock", mode=0o666)
self.make_socket("untracked/owner_only.sock", mode=0o600)
# Create some untracked files in an existing tracked directory
self.write_file("main/untracked.txt", b"new new untracked file")
self.write_file("main/ignored.txt", b"new ignored file")
self.write_file("main/untracked_dir/foo.txt", b"foobar")
def get_expected_files(self) -> verify_mod.ExpectedFileSet:
# Confirm that the files look like what we expect
files = verify_mod.ExpectedFileSet()
# TODO: These symlink permissions should ideally be 0o777
files.add_symlink(".eden/root", bytes(self.checkout_path), 0o770)
files.add_symlink(
".eden/client", bytes(self.eden_state_dir / "clients" / "checkout"), 0o770
)
files.add_symlink(".eden/socket", bytes(self.eden_state_dir / "socket"), 0o770)
files.add_symlink(".eden/this-dir", bytes(self.checkout_path / ".eden"), 0o770)
files.add_file("README.md", b"project docs", 0o644)
files.add_file(".gitignore", b"ignored.txt\n", 0o644)
files.add_file("main/loaded_dir/loaded_file.c", b"loaded", 0o644)
files.add_file("main/loaded_dir/not_loaded_file.c", b"not loaded", 0o644)
files.add_file("main/loaded_dir/not_loaded_exe.sh", b"not loaded", 0o755)
files.add_file("main/loaded_dir/loaded_subdir/dir1/file1.txt", b"text\n", 0o644)
files.add_file(
"main/loaded_dir/loaded_subdir/dir2/file2.txt", b"more text\n", 0o644
)
files.add_file(
"main/loaded_dir/not_loaded_subdir/a.txt", b"some contents\n", 0o644
)
files.add_file(
"main/loaded_dir/not_loaded_subdir/b.exe", b"other contents", 0o755
)
files.add_file(
"main/materialized_subdir/script.sh", b"new script contents", 0o755
)
files.add_file("main/materialized_subdir/test.c", b"new test contents", 0o644)
files.add_file(
"main/materialized_subdir/unmodified.txt", b"original contents", 0o644
)
files.add_symlink(
"main/materialized_subdir/modified_symlink.lnk", b"new link", 0o770
)
files.add_symlink(
"main/materialized_subdir/new_symlink.lnk", b"new link", 0o770
)
files.add_socket("main/materialized_subdir/test/socket.sock", 0o600)
files.add_file(
"main/mode_changes/normal_to_exe.txt", b"will change mode", 0o755
)
files.add_file(
"main/mode_changes/exe_to_normal.txt", b"will change mode", 0o644
)
files.add_file(
"main/mode_changes/normal_to_readonly.txt", b"will be readonly", 0o400
)
files.add_file("main/untracked.txt", b"new new untracked file", 0o644)
files.add_file("main/ignored.txt", b"new ignored file", 0o644)
files.add_file("main/untracked_dir/foo.txt", b"foobar", 0o644)
files.add_file("never_accessed/foo/bar/baz.txt", b"baz\n", 0o644)
files.add_file("never_accessed/foo/bar/xyz.txt", b"xyz\n", 0o644)
files.add_file("never_accessed/foo/file.txt", b"data\n", 0o644)
files.add_symlink("never_accessed/foo/some.lnk", b"link destination", 0o755)
files.add_file("untracked/new/normal.txt", b"new src contents", 0o644)
files.add_file("untracked/new/normal2.txt", b"extra src contents", 0o644)
files.add_file("untracked/new/readonly.txt", b"new readonly contents", 0o400)
files.add_file("untracked/new/subdir/abc.txt", b"abc", 0o644)
files.add_file("untracked/new/subdir/xyz.txt", b"xyz", 0o644)
files.add_file("untracked/executable.exe", b"do stuff", 0o755)
files.add_socket("untracked/everybody.sock", 0o666)
files.add_socket("untracked/owner_only.sock", 0o600)
return files
def verify_snapshot_data(
self, verifier: verify_mod.SnapshotVerifier, eden: edenclient.EdenFS
) -> None:
# Confirm that `hg status` reports the correct information
self.verify_hg_status(verifier)
expected_files = self.get_expected_files()
verifier.verify_directory(self.checkout_path, expected_files)
def verify_hg_status(self, verifier: verify_mod.SnapshotVerifier) -> None:
expected_status = {
"main/materialized_subdir/script.sh": "M",
"main/materialized_subdir/test.c": "M",
"main/materialized_subdir/modified_symlink.lnk": "M",
"main/materialized_subdir/new_symlink.lnk": "?",
"main/materialized_subdir/test/socket.sock": "?",
"main/mode_changes/normal_to_exe.txt": "M",
"main/mode_changes/exe_to_normal.txt": "M",
# We changed the mode on main/mode_changes/normal_to_readonly.txt,
# but the change isn't significant to mercurial.
"untracked/new/normal.txt": "?",
"untracked/new/normal2.txt": "?",
"untracked/new/readonly.txt": "?",
"untracked/new/subdir/abc.txt": "?",
"untracked/new/subdir/xyz.txt": "?",
"untracked/executable.exe": "?",
"untracked/everybody.sock": "?",
"untracked/owner_only.sock": "?",
"main/untracked.txt": "?",
"main/ignored.txt": "I",
"main/untracked_dir/foo.txt": "?",
}
repo = self.hg_repo(self.checkout_path)
verifier.verify_hg_status(repo, expected_status)
```
#### File: integration/snapshot/verify.py
```python
import abc
import collections.abc
import os
import stat as stat_mod
import typing
from pathlib import Path
from typing import Dict, Iterator, List, Optional, TypeVar, Union
from eden.integration.lib import hgrepo
_AnyPath = Union[Path, str]
class _DefaultObject:
pass
_DEFAULT_OBJECT: _DefaultObject = _DefaultObject()
class ExpectedFileBase(metaclass=abc.ABCMeta):
def __init__(
self, path: _AnyPath, contents: bytes, perms: int, file_type: int
) -> None:
self.path: Path = Path(path)
self.contents: bytes = contents
self.permissions: int = perms
self.file_type: int = file_type
def verify(
self, verifier: "SnapshotVerifier", path: Path, stat_info: os.stat_result
) -> None:
found_perms = stat_mod.S_IMODE(stat_info.st_mode)
if found_perms != self.permissions:
verifier.error(
f"{self.path}: expected permissions to be {self.permissions:#o}, "
f"found {found_perms:#o}"
)
found_file_type = stat_mod.S_IFMT(stat_info.st_mode)
if found_file_type != self.file_type:
verifier.error(
f"{self.path}: expected file type to be {self.file_type:#o}, "
f"found {found_file_type:#o}"
)
else:
self.verify_contents(verifier, path)
@abc.abstractmethod
def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None:
pass
def _error(self, msg: str) -> None:
raise ValueError(msg)
class ExpectedFile(ExpectedFileBase):
def __init__(self, path: _AnyPath, contents: bytes, perms: int = 0o644) -> None:
super().__init__(path, contents, perms, stat_mod.S_IFREG)
def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None:
with path.open("rb") as f:
actual_contents = f.read()
if actual_contents != self.contents:
verifier.error(
f"file contents mismatch for {self.path}:\n"
f"expected: {self.contents!r}\n"
f"actual: {actual_contents!r}"
)
class ExpectedSymlink(ExpectedFileBase):
def __init__(self, path: _AnyPath, contents: bytes, perms: int = 0o777) -> None:
super().__init__(path, contents, perms, stat_mod.S_IFLNK)
def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None:
actual_contents = os.readlink(bytes(path))
if actual_contents != self.contents:
verifier.error(
f"symlink contents mismatch for {self.path}:\n"
f"expected: {self.contents!r}\n"
f"actual: {actual_contents!r}"
)
class ExpectedSocket(ExpectedFileBase):
def __init__(self, path: _AnyPath, perms: int = 0o755) -> None:
super().__init__(path, b"", perms, stat_mod.S_IFSOCK)
def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None:
pass
_ExpectedFile = TypeVar("_ExpectedFile", bound=ExpectedFileBase)
class ExpectedFileSet(collections.abc.Mapping):
"""
ExpectedFileSet is basically a container of ExpectedFileBase objects,
but also provides some helper methods for accessing and updating entries by path.
"""
def __init__(self) -> None:
self._entries: Dict[Path, ExpectedFileBase] = {}
def __len__(self) -> int:
return len(self._entries)
def __iter__(self) -> Iterator[ExpectedFileBase]:
return iter(self._entries.values())
def __getitem__(self, path: _AnyPath) -> ExpectedFileBase:
key = Path(path)
return self._entries[key]
def __delitem__(self, path: _AnyPath) -> None:
key = Path(path)
del self._entries[key]
def __contains__(self, path: object) -> bool:
if isinstance(path, str):
key = Path(path)
elif isinstance(path, Path):
key = path
else:
return False
return key in self._entries
@typing.overload
def pop(self, path: _AnyPath) -> ExpectedFileBase:
...
@typing.overload # noqa: F811
def pop(self, path: _AnyPath, default: ExpectedFileBase) -> ExpectedFileBase:
...
@typing.overload # noqa: F811
def pop(self, path: _AnyPath, default: None) -> Optional[ExpectedFileBase]:
...
def pop( # noqa: F811
self,
path: _AnyPath,
default: Union[ExpectedFileBase, None, _DefaultObject] = _DEFAULT_OBJECT,
) -> Optional[ExpectedFileBase]:
key = Path(path)
if default is _DEFAULT_OBJECT:
return self._entries.pop(key)
else:
tmp = typing.cast(Optional[ExpectedFileBase], default)
return self._entries.pop(key, tmp)
def add_file(
self, path: _AnyPath, contents: bytes, perms: int = 0o644
) -> ExpectedFile:
return self.add(ExpectedFile(path=path, contents=contents, perms=perms))
def add_symlink(
self, path: _AnyPath, contents: bytes, perms: int = 0o777
) -> ExpectedSymlink:
return self.add(ExpectedSymlink(path=path, contents=contents, perms=perms))
def add_socket(self, path: _AnyPath, perms: int = 0o755) -> ExpectedSocket:
return self.add(ExpectedSocket(path=path, perms=perms))
def add(self, entry: _ExpectedFile) -> _ExpectedFile:
assert entry.path not in self
self._entries[entry.path] = entry
return entry
def set_file(
self, path: _AnyPath, contents: bytes, perms: int = 0o644
) -> ExpectedFile:
return self.set(ExpectedFile(path=path, contents=contents, perms=perms))
def set_symlink(
self, path: _AnyPath, contents: bytes, perms: int = 0o777
) -> ExpectedSymlink:
return self.set(ExpectedSymlink(path=path, contents=contents, perms=perms))
def set_socket(self, path: _AnyPath, perms: int = 0o755) -> ExpectedSocket:
return self.set(ExpectedSocket(path=path, perms=perms))
def set(self, entry: _ExpectedFile) -> _ExpectedFile:
self._entries[entry.path] = entry
return entry
class SnapshotVerifier:
def __init__(self) -> None:
self.errors: List[str] = []
self.quiet: bool = False
def error(self, message: str) -> None:
self.errors.append(message)
if not self.quiet:
print(f"==ERROR== {message}")
def verify_directory(self, path: Path, expected: ExpectedFileSet) -> None:
"""Confirm that the contents of a directory match the expected file state."""
found_files = enumerate_directory(path)
for expected_entry in expected:
file_stat = found_files.pop(expected_entry.path, None)
if file_stat is None:
self.error(f"{expected_entry.path}: file not present in snapshot")
continue
full_path = path / expected_entry.path
try:
expected_entry.verify(self, full_path, file_stat)
except AssertionError as ex:
self.error(f"{expected_entry.path}: {ex}")
continue
for path, stat_info in found_files.items():
if stat_mod.S_ISDIR(stat_info.st_mode):
# Don't require directories to be listed explicitly in the input files
continue
if str(path.parents[0]) == ".hg":
# Don't complain about files inside the .hg directory that the caller
# did not explicitly specify. Mercurial can create a variety of files
# here, and we don't care about checking the exact list of files it
# happened to create when the snapshot was generated.
continue
self.error(f"{path}: unexpected file present in snapshot")
def verify_hg_status(
self,
repo: hgrepo.HgRepository,
expected: Dict[str, str],
check_ignored: bool = True,
) -> None:
actual_status = repo.status(include_ignored=check_ignored)
for path, expected_char in expected.items():
actual_char = actual_status.pop(path, None)
if expected_char != actual_char:
self.error(
f"{path}: unexpected hg status difference: "
f"reported as {actual_char}, expected {expected_char}"
)
for path, actual_char in actual_status.items():
self.error(
f"{path}: unexpected hg status difference: "
f"reported as {actual_char}, expected None"
)
def enumerate_directory(path: Path) -> Dict[Path, os.stat_result]:
"""
Recursively walk a directory and return a dictionary of all of the files and
directories it contains.
Returns a dictionary of [path -> os.stat_result]
The returned paths are relative to the input directory.
"""
entries: Dict[Path, os.stat_result] = {}
_enumerate_directory_helper(path, Path(), entries)
return entries
def _enumerate_directory_helper(
root_path: Path, rel_path: Path, results: Dict[Path, os.stat_result]
) -> None:
for entry in os.scandir(root_path / rel_path):
# Current versions of typeshed don't know about the follow_symlinks argument,
# so ignore type errors on the next line.
stat_info: os.stat_result = entry.stat(follow_symlinks=False) # type: ignore
entry_path: Path = rel_path / entry.name
results[entry_path] = stat_info
if stat_mod.S_ISDIR(stat_info.st_mode):
_enumerate_directory_helper(root_path, entry_path, results)
```
#### File: eden/integration/start_test.py
```python
import os
import pathlib
import subprocess
import sys
import typing
import unittest
from typing import List, Optional
import pexpect
from eden.cli.config import EdenInstance
from eden.cli.util import HealthStatus
from eden.test_support.environment_variable import EnvironmentVariableMixin
from fb303.ttypes import fb_status
from .lib import testcase
from .lib.edenfs_systemd import EdenFSSystemdMixin
from .lib.fake_edenfs import get_fake_edenfs_argv
from .lib.find_executables import FindExe
from .lib.pexpect import PexpectAssertionMixin, wait_for_pexpect_process
from .lib.service_test_case import (
ServiceTestCaseBase,
SystemdServiceTestCaseMarker,
service_test,
)
from .lib.systemd import SystemdUserServiceManagerMixin
class StartTest(testcase.EdenTestCase):
def test_start_if_necessary(self) -> None:
# Confirm there are no checkouts configured, then stop edenfs
checkouts = self.eden.list_cmd_simple()
self.assertEqual({}, checkouts)
self.assertTrue(self.eden.is_healthy())
self.eden.shutdown()
self.assertFalse(self.eden.is_healthy())
# `eden start --if-necessary` should not start eden
output = self.eden.run_cmd("start", "--if-necessary")
self.assertEqual("No Eden mount points configured.\n", output)
self.assertFalse(self.eden.is_healthy())
# Restart eden and create a checkout
self.eden.start()
self.assertTrue(self.eden.is_healthy())
# Create a repository with one commit
repo = self.create_hg_repo("testrepo")
repo.write_file("README", "test\n")
repo.commit("Initial commit.")
# Create an Eden checkout of this repository
checkout_dir = os.path.join(self.mounts_dir, "test_checkout")
self.eden.clone(repo.path, checkout_dir)
checkouts = self.eden.list_cmd_simple()
self.assertEqual({checkout_dir: "RUNNING"}, checkouts)
# Stop edenfs
self.eden.shutdown()
self.assertFalse(self.eden.is_healthy())
# `eden start --if-necessary` should start edenfs now
if eden_start_needs_allow_root_option(systemd=False):
output = self.eden.run_cmd(
"start", "--if-necessary", "--", "--allowRoot", capture_stderr=True
)
else:
output = self.eden.run_cmd("start", "--if-necessary", capture_stderr=True)
self.assertIn("Started edenfs", output)
self.assertTrue(self.eden.is_healthy())
# Stop edenfs. We didn't start it through self.eden.start()
# so the self.eden class doesn't really know it is running and that
# it needs to be shut down.
self.eden.run_cmd("stop")
@testcase.eden_repo_test
class StartWithRepoTest(
testcase.EdenRepoTest,
EnvironmentVariableMixin,
SystemdUserServiceManagerMixin,
EdenFSSystemdMixin,
):
"""Test 'eden start' with a repo and checkout already configured.
"""
def setUp(self) -> None:
super().setUp()
self.eden.shutdown()
def test_eden_start_mounts_checkouts(self) -> None:
self.run_eden_start(systemd=False)
self.assert_checkout_is_mounted()
def test_eden_start_with_systemd_mounts_checkouts(self) -> None:
self.set_up_edenfs_systemd_service()
self.run_eden_start(systemd=True)
self.assert_checkout_is_mounted()
def run_eden_start(self, systemd: bool) -> None:
run_eden_start_with_real_daemon(
eden_dir=pathlib.Path(self.eden_dir),
etc_eden_dir=pathlib.Path(self.etc_eden_dir),
home_dir=pathlib.Path(self.home_dir),
systemd=systemd,
)
def assert_checkout_is_mounted(self) -> None:
file = pathlib.Path(self.mount) / "hello"
self.assertTrue(file.is_file())
self.assertEqual(file.read_text(), "hola\n")
def populate_repo(self) -> None:
self.repo.write_file("hello", "hola\n")
self.repo.commit("Initial commit.")
def select_storage_engine(self) -> str:
""" we need to persist data across restarts """
return "rocksdb"
class DirectInvokeTest(unittest.TestCase):
def test_no_args(self) -> None:
"""Directly invoking edenfs with no arguments should fail."""
self._check_error([])
def test_eden_cmd_arg(self) -> None:
"""Directly invoking edenfs with an eden command should fail."""
self._check_error(["restart"])
def _check_error(self, args: List[str], err: Optional[str] = None) -> None:
cmd = [typing.cast(str, FindExe.EDEN_DAEMON)] # T38947910
cmd.extend(args)
out = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.assertEqual(os.EX_USAGE, out.returncode)
self.assertEqual(b"", out.stdout)
if err is None:
err = """\
error: the edenfs daemon should not normally be invoked manually
Did you mean to run "eden" instead of "edenfs"?
"""
self.maxDiff = 5000
self.assertMultiLineEqual(err, out.stderr.decode("utf-8", errors="replace"))
class StartFakeEdenFSTestBase(ServiceTestCaseBase, PexpectAssertionMixin):
def setUp(self) -> None:
super().setUp()
self.eden_dir = pathlib.Path(self.make_temporary_directory())
def spawn_start(
self,
eden_dir: typing.Optional[pathlib.Path] = None,
extra_args: typing.Optional[typing.Sequence[str]] = None,
) -> "pexpect.spawn[str]":
if eden_dir is None:
eden_dir = self.eden_dir
args = (
["--config-dir", str(eden_dir)]
+ self.get_required_eden_cli_args()
+ [
"start",
"--daemon-binary",
typing.cast(str, FindExe.FAKE_EDENFS), # T38947910
]
)
if extra_args:
args.extend(extra_args)
return pexpect.spawn(
FindExe.EDEN_CLI, args, encoding="utf-8", logfile=sys.stderr
)
@service_test
class StartFakeEdenFSTest(StartFakeEdenFSTestBase, PexpectAssertionMixin):
def test_eden_start_launches_separate_processes_for_separate_eden_dirs(
self
) -> None:
eden_dir_1 = self.eden_dir
eden_dir_2 = pathlib.Path(self.make_temporary_directory())
start_1_process = self.spawn_start(eden_dir=eden_dir_1)
self.assert_process_succeeds(start_1_process)
start_2_process = self.spawn_start(eden_dir=eden_dir_2)
self.assert_process_succeeds(start_2_process)
instance_1_health: HealthStatus = EdenInstance(
str(eden_dir_1), etc_eden_dir=None, home_dir=None
).check_health()
self.assertEqual(
instance_1_health.status,
fb_status.ALIVE,
f"First edenfs process should be healthy, but it isn't: "
f"{instance_1_health}",
)
instance_2_health: HealthStatus = EdenInstance(
str(eden_dir_2), etc_eden_dir=None, home_dir=None
).check_health()
self.assertEqual(
instance_2_health.status,
fb_status.ALIVE,
f"Second edenfs process should be healthy, but it isn't: "
f"{instance_2_health}",
)
self.assertNotEqual(
instance_1_health.pid,
instance_2_health.pid,
f"The edenfs process should have separate process IDs",
)
def test_daemon_command_arguments_should_forward_to_edenfs(self) -> None:
extra_daemon_args = ["--allowExtraArgs", "--", "hello world", "--ignoredOption"]
start_process = self.spawn_start(extra_args=["--"] + extra_daemon_args)
wait_for_pexpect_process(start_process)
argv = get_fake_edenfs_argv(self.eden_dir)
self.assertEqual(
argv[-len(extra_daemon_args) :],
extra_daemon_args,
f"fake_edenfs should have received arguments verbatim\nargv: {argv}",
)
def test_daemon_command_arguments_should_forward_to_edenfs_without_leading_dashdash(
self
) -> None:
start_process = self.spawn_start(
extra_args=[
"hello world",
"another fake_edenfs argument",
"--",
"--allowExtraArgs",
"arg_after_dashdash",
]
)
self.assert_process_succeeds(start_process)
expected_extra_daemon_args = [
"hello world",
"another fake_edenfs argument",
"--allowExtraArgs",
"arg_after_dashdash",
]
argv = get_fake_edenfs_argv(self.eden_dir)
self.assertEqual(
argv[-len(expected_extra_daemon_args) :],
expected_extra_daemon_args,
f"fake_edenfs should have received extra arguments\nargv: {argv}",
)
def test_eden_start_resolves_explicit_config_dir_symlinks(self) -> None:
# Test resolution of symlinks in the Eden state directectory when the
# --config-dir argument is specified to the Eden CLI.
link1 = self.tmp_dir / "link1"
link2 = self.tmp_dir / "link2"
link1.symlink_to(self.eden_dir, target_is_directory=True)
link2.symlink_to(link1)
self._test_eden_start_resolves_config_symlinks(link2, self.eden_dir)
def test_eden_start_resolves_auto_config_dir_symlinks(self) -> None:
# Test resolution of symlinks in the Eden state directectory if we don't specify
# --config-dir and let the Eden CLI automatically figure out the location.
# This is how Eden normally runs in practice most of the time.
#
# Set up symlinks in the home directory location normally used by Eden.
home_local_dir = self.home_dir / "local"
data_dir = self.tmp_dir / "data"
data_dir.mkdir()
home_local_dir.symlink_to(data_dir, target_is_directory=True)
resolved_eden_dir = data_dir / ".eden"
self._test_eden_start_resolves_config_symlinks(None, resolved_eden_dir)
def _test_eden_start_resolves_config_symlinks(
self, input_path: Optional[pathlib.Path], resolved_path: pathlib.Path
) -> None:
# Test that the eden CLI resolves symlinks in the Eden state directory path.
#
# These must be resolved by the CLI and not the edenfs process: in some cases
# where the symlinks are on an NFS mount point they can be resolved by the user
# but not by root. The edenfs privhelper process runs as root, so it may not be
# able to resolve these symlinks. Making sure the symlinks are fully resolved
# by the CLI enables Eden to still work in these situations.
if input_path is not None:
config_dir_args = ["--config-dir", str(input_path)]
else:
config_dir_args = []
args = (
self.get_required_eden_cli_args()
+ config_dir_args
+ [
"start",
"--daemon-binary",
typing.cast(str, FindExe.FAKE_EDENFS), # T38947910
]
)
start_process: pexpect.spawn[str] = pexpect.spawn(
FindExe.EDEN_CLI, args, encoding="utf-8", logfile=sys.stderr
)
wait_for_pexpect_process(start_process)
argv = get_fake_edenfs_argv(resolved_path)
self.assert_eden_dir(argv, resolved_path)
def assert_eden_dir(self, argv: List[str], expected: pathlib.Path) -> None:
try:
index = argv.index("--edenDir")
except ValueError:
self.fail(f"--edenDir not present in arguments: {argv}")
actual_config_dir = argv[index + 1]
self.assertEqual(str(expected), actual_config_dir, f"bad config dir: {argv}")
def test_eden_start_fails_if_edenfs_is_already_running(self) -> None:
with self.spawn_fake_edenfs(self.eden_dir) as daemon_pid:
start_process = self.spawn_start()
start_process.expect_exact(f"edenfs is already running (pid {daemon_pid})")
self.assert_process_fails(start_process, 1)
def test_eden_start_fails_if_edenfs_fails_during_startup(self) -> None:
start_process = self.spawn_start(extra_args=["--", "--failDuringStartup"])
start_process.expect_exact(
"Started successfully, but reporting failure because "
"--failDuringStartup was specified"
)
self.assert_process_fails(start_process, 1)
@service_test
class StartWithSystemdTest(StartFakeEdenFSTestBase, SystemdServiceTestCaseMarker):
def test_eden_start_fails_if_service_is_running(self) -> None:
with self.spawn_fake_edenfs(self.eden_dir):
# Make fake_edenfs inaccessible and undetectable (without talking to
# systemd), but keep the systemd service alive.
(self.eden_dir / "lock").unlink()
(self.eden_dir / "socket").unlink()
health: HealthStatus = EdenInstance(
str(self.eden_dir), etc_eden_dir=None, home_dir=None
).check_health()
self.assertEqual(health.status, fb_status.DEAD)
service = self.get_edenfs_systemd_service(eden_dir=self.eden_dir)
self.assertEqual(service.query_active_state(), "active")
start_process = self.spawn_start()
start_process.expect_exact(
f"error: edenfs systemd service is already running"
)
# edenfsctl should show the output of 'systemctl status'.
start_process.expect(r"\bfb-edenfs@.*?\.service\b")
start_process.expect(r"Active:[^\n]*active \(running\)")
self.assert_process_fails(start_process, 1)
def run_eden_start_with_real_daemon(
eden_dir: pathlib.Path,
etc_eden_dir: pathlib.Path,
home_dir: pathlib.Path,
systemd: bool,
) -> None:
env = dict(os.environ)
if systemd:
env["EDEN_EXPERIMENTAL_SYSTEMD"] = "1"
else:
env.pop("EDEN_EXPERIMENTAL_SYSTEMD", None)
command = [
typing.cast(str, FindExe.EDEN_CLI), # T38947910
"--config-dir",
str(eden_dir),
"--etc-eden-dir",
str(etc_eden_dir),
"--home-dir",
str(home_dir),
"start",
"--daemon-binary",
typing.cast(str, FindExe.EDEN_DAEMON), # T38947910
]
if eden_start_needs_allow_root_option(systemd=systemd):
command.extend(["--", "--allowRoot"])
subprocess.check_call(command, env=env)
def eden_start_needs_allow_root_option(systemd: bool) -> bool:
return not systemd and "SANDCASTLE" in os.environ
```
#### File: eden/integration/thrift_test.py
```python
import binascii
import hashlib
import os
from pathlib import Path
from facebook.eden.ttypes import ScmFileStatus, SHA1Result, TimeSpec
from .lib import testcase
@testcase.eden_repo_test
class ThriftTest(testcase.EdenRepoTest):
commit1: str
commit2: str
def populate_repo(self) -> None:
self.repo.write_file("hello", "hola\n")
self.repo.write_file("README", "docs\n")
self.repo.write_file("adir/file", "foo!\n")
self.repo.write_file("bdir/file", "bar!\n")
self.repo.symlink("slink", "hello")
self.commit1 = self.repo.commit("Initial commit.")
self.repo.write_file("bdir/file", "bar?\n")
self.repo.write_file("cdir/subdir/new.txt", "and improved")
self.repo.remove_file("README")
self.commit2 = self.repo.commit("Commit 2.")
def setUp(self) -> None:
super().setUp()
self.client = self.get_thrift_client()
self.client.open()
self.addCleanup(self.client.close)
def get_loaded_inodes_count(self, path: str) -> int:
result = self.client.debugInodeStatus(self.mount_path_bytes, os.fsencode(path))
inode_count = 0
for item in result:
assert item.entries is not None
for inode in item.entries:
if inode.loaded:
inode_count += 1
return inode_count
def test_list_mounts(self) -> None:
mounts = self.client.listMounts()
self.assertEqual(1, len(mounts))
mount = mounts[0]
self.assertEqual(self.mount_path_bytes, mount.mountPoint)
assert mount.edenClientPath is not None
# The client path should always be inside the main eden directory
# Path.relative_to() will throw a ValueError if self.eden.eden_dir is not a
# directory prefix of mount.edenClientPath
Path(os.fsdecode(mount.edenClientPath)).relative_to(self.eden.eden_dir)
def test_get_sha1(self) -> None:
expected_sha1_for_hello = hashlib.sha1(b"hola\n").digest()
result_for_hello = SHA1Result(expected_sha1_for_hello)
expected_sha1_for_adir_file = hashlib.sha1(b"foo!\n").digest()
result_for_adir_file = SHA1Result(expected_sha1_for_adir_file)
self.assertEqual(
[result_for_hello, result_for_adir_file],
self.client.getSHA1(self.mount_path_bytes, [b"hello", b"adir/file"]),
)
def test_get_sha1_throws_for_path_with_dot_components(self) -> None:
results = self.client.getSHA1(self.mount_path_bytes, [b"./hello"])
self.assertEqual(1, len(results))
self.assert_error(
results[0], "std::domain_error: PathComponent must not be . or .."
)
def test_get_sha1_throws_for_empty_string(self) -> None:
results = self.client.getSHA1(self.mount_path_bytes, [b""])
self.assertEqual(1, len(results))
self.assert_error(results[0], "path cannot be the empty string")
def test_get_sha1_throws_for_directory(self) -> None:
results = self.client.getSHA1(self.mount_path_bytes, [b"adir"])
self.assertEqual(1, len(results))
self.assert_error(results[0], "adir: Is a directory")
def test_get_sha1_throws_for_non_existent_file(self) -> None:
results = self.client.getSHA1(self.mount_path_bytes, [b"i_do_not_exist"])
self.assertEqual(1, len(results))
self.assert_error(results[0], "i_do_not_exist: No such file or directory")
def test_get_sha1_throws_for_symlink(self) -> None:
"""Fails because caller should resolve the symlink themselves."""
results = self.client.getSHA1(self.mount_path_bytes, [b"slink"])
self.assertEqual(1, len(results))
self.assert_error(results[0], "slink: file is a symlink: Invalid argument")
def assert_error(self, sha1result: SHA1Result, error_message: str) -> None:
self.assertIsNotNone(sha1result, msg="Must pass a SHA1Result")
self.assertEqual(
SHA1Result.ERROR, sha1result.getType(), msg="SHA1Result must be an error"
)
error = sha1result.get_error()
self.assertIsNotNone(error)
self.assertEqual(error_message, error.message)
def test_unload_free_inodes(self) -> None:
for i in range(100):
self.write_file("testfile%d.txt" % i, "unload test case")
inode_count_before_unload = self.get_loaded_inodes_count("")
self.assertGreater(
inode_count_before_unload, 100, "Number of loaded inodes should increase"
)
age = TimeSpec()
age.seconds = 0
age.nanoSeconds = 0
unload_count = self.client.unloadInodeForPath(self.mount_path_bytes, b"", age)
self.assertGreaterEqual(
unload_count, 100, "Number of loaded inodes should reduce after unload"
)
def test_unload_thrift_api_accepts_single_dot_as_root(self) -> None:
self.write_file("testfile.txt", "unload test case")
age = TimeSpec()
age.seconds = 0
age.nanoSeconds = 0
unload_count = self.client.unloadInodeForPath(self.mount_path_bytes, b".", age)
self.assertGreater(
unload_count, 0, "Number of loaded inodes should reduce after unload"
)
def get_counter(self, name: str) -> float:
return self.get_counters()[name]
def test_invalidate_inode_cache(self) -> None:
filename = "bdir/file"
full_dirname = os.path.join(self.mount, "bdir/")
# Exercise eden a bit to make sure counters are ready
for _ in range(20):
fn = os.path.join(self.mount, "_tmp_")
with open(fn, "w") as f:
f.write("foo!\n")
os.unlink(fn)
reads = self.get_counter("fuse.read_us.count")
self.read_file(filename)
reads_1read = self.get_counter("fuse.read_us.count")
self.assertEqual(reads_1read, reads + 1)
self.read_file(filename)
reads_2read = self.get_counter("fuse.read_us.count")
self.assertEqual(reads_1read, reads_2read)
self.client.invalidateKernelInodeCache(self.mount_path_bytes, b"bdir/file")
self.read_file(filename)
reads_3read = self.get_counter("fuse.read_us.count")
self.assertEqual(reads_2read + 1, reads_3read)
lookups = self.get_counter("fuse.lookup_us.count")
# -hl makes ls to do a lookup of the file to determine type
os.system("ls -hl " + full_dirname + " > /dev/null")
lookups_1ls = self.get_counter("fuse.lookup_us.count")
# equal, the file was lookup'ed above.
self.assertEqual(lookups, lookups_1ls)
self.client.invalidateKernelInodeCache(self.mount_path_bytes, b"bdir")
os.system("ls -hl " + full_dirname + " > /dev/null")
lookups_2ls = self.get_counter("fuse.lookup_us.count")
self.assertEqual(lookups_1ls + 1, lookups_2ls)
def test_diff_revisions(self) -> None:
# Convert the commit hashes to binary for the thrift call
with self.get_thrift_client() as client:
diff = client.getScmStatusBetweenRevisions(
os.fsencode(self.mount),
binascii.unhexlify(self.commit1),
binascii.unhexlify(self.commit2),
)
self.assertDictEqual(diff.errors, {})
self.assertDictEqual(
diff.entries,
{
b"cdir/subdir/new.txt": ScmFileStatus.ADDED,
b"bdir/file": ScmFileStatus.MODIFIED,
b"README": ScmFileStatus.REMOVED,
},
)
def test_diff_revisions_hex(self) -> None:
# Watchman currently calls getScmStatusBetweenRevisions()
# with 40-byte hexadecimal commit IDs, so make sure that works.
with self.get_thrift_client() as client:
diff = client.getScmStatusBetweenRevisions(
os.fsencode(self.mount),
self.commit1.encode("utf-8"),
self.commit2.encode("utf-8"),
)
self.assertDictEqual(diff.errors, {})
self.assertDictEqual(
diff.entries,
{
b"cdir/subdir/new.txt": ScmFileStatus.ADDED,
b"bdir/file": ScmFileStatus.MODIFIED,
b"README": ScmFileStatus.REMOVED,
},
)
```
#### File: eden/py/dirstate.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import binascii
import hashlib
import struct
from typing import IO, Any, Callable, Dict, Tuple
from six import iteritems
# Version number for the format of the .hg/dirstate file that is read/written by
# this library.
CURRENT_DIRSTATE_VERSION = 1
# Valid values for the merge state.
MERGE_STATE_NOT_APPLICABLE = 0
MERGE_STATE_BOTH_PARENTS = -1
MERGE_STATE_OTHER_PARENT = -2
def write(file, parents, tuples_dict, copymap):
# type: (IO[bytes], Tuple[bytes, bytes], Dict[bytes, Tuple[str, int, int]], Dict[bytes, bytes]) -> None
#
# The serialization format of the dirstate is as follows:
# - The first 40 bytes are the hashes of the two parent pointers.
# - The next 4 bytes are the version number of the format.
# - The next section is the dirstate tuples. Each dirstate tuple is
# represented as follows:
# - The first byte is '\x01'.
# - The second byte represents the status. It is the ASCII value of
# 'n', 'm', 'r', 'a', '?', as appropriate.
# - The next four bytes are an unsigned integer representing mode_t.
# - The seventh byte (which is signed) represents the merge state:
# - 0 is NotApplicable
# - -1 is BothParents
# - -2 is OtherParent
# - The next two bytes are an unsigned short representing the length of
# the path, in bytes.
# - The bytes of the path itself. Note that a path cannot contain \0.
# - The next section is the copymap. Each entry in the copymap is
# represented as follows.
# - The first byte is '\x02'.
# - An unsigned short (two bytes) representing the length, followed by
# that number of bytes, which constitutes the relative path name of the
# *destination* of the copy.
# - An unsigned short (two bytes) representing the length, followed by
# that number of bytes, which constitutes the relative path name of the
# *source* of the copy.
# - The last section is the checksum. Although the other tuples can be
# interleaved or reordered without issue, the checksum must come last.
# The checksum is a function of all of the bytes written up to this point
# plus the \xFF header for the checksum section.
# - The first byte is '\xFF' to distinguish it from the other fields.
# - Because we use SHA-256 as the hash algorithm for the checksum, the
# remaining 32 bytes are used for the hash.
sha = hashlib.sha256()
def hashing_write(data):
# type: (bytes) -> None
sha.update(data)
file.write(data)
hashing_write(parents[0])
hashing_write(parents[1])
hashing_write(struct.pack(">I", CURRENT_DIRSTATE_VERSION))
for path, dirstate_tuple in iteritems(tuples_dict):
status, mode, merge_state = dirstate_tuple
hashing_write(b"\x01")
hashing_write(struct.pack(">BIb", ord(status), mode, merge_state))
_write_path(hashing_write, path)
for dest, source in iteritems(copymap):
hashing_write(b"\x02")
_write_path(hashing_write, dest)
_write_path(hashing_write, source)
hashing_write(b"\xFF")
# Write the checksum, so we use file.write() instead of hashing_write().
file.write(sha.digest())
def read(fp, filename): # noqa: C901
# type: (IO[bytes], str) -> Tuple[Tuple[bytes, bytes], Dict[str, Tuple[str, int, int]], Dict[str, str]]
"""Returns a tuple of (parents, tuples_dict, copymap) if successful.
Any exception from create_file(), such as IOError with errno == ENOENT, will
be bubbled up to the caller.
If contents of the dirstate file do not match the expected format, then a
DirstateParseException will be thrown.
"""
parents = None
tuples_dict = {}
copymap = {}
sha = hashlib.sha256()
def hashing_read(num):
data = fp.read(num)
sha.update(data)
return data
parent_bytes = hashing_read(40)
num_parents_bytes = len(parent_bytes)
if num_parents_bytes != 40:
raise DirstateParseException(
"Reached EOF while reading dirstate parents in {}.\n".format(filename)
)
parents = parent_bytes[:20], parent_bytes[20:40]
binary_version = hashing_read(4)
if len(binary_version) != 4:
raise DirstateParseException(
"Reached EOF while reading the version number in {}.\n".format(filename)
)
version = struct.unpack(">I", binary_version)[0] # type: int
if version != CURRENT_DIRSTATE_VERSION:
raise DirstateParseException(
"Unknown dirstate version in {}. Found {} but expected {}.\n".format(
filename, version, CURRENT_DIRSTATE_VERSION
)
)
while True:
header = hashing_read(1)
if not header:
# We have reached the end of the file.
break
elif header == b"\x01":
scalars = hashing_read(6)
if len(scalars) != 6:
raise DirstateParseException(
"Malformed dirstate tuple in ".format(filename)
+ ". Aborting read().\n"
)
path = _read_path(hashing_read, filename)
status = 0 # type: int
mode = 0 # type: int
merge = 0 # type: int
status, mode, merge = struct.unpack(">BIb", scalars)
# TODO(mbolin): Verify status and merge?
tuples_dict[path] = (chr(status), mode, merge)
elif header == b"\x02":
dest = _read_path(hashing_read, filename)
source = _read_path(hashing_read, filename)
copymap[dest] = source
elif header == b"\xFF":
# Reading the checksum, so we use fp.read() instead of
# hashing_read().
binary_checksum = fp.read(32)
if len(binary_checksum) != 32:
raise DirstateParseException(
"Reached EOF while reading checksum hash in {}.\n".format(filename)
)
digest = sha.digest()
if binary_checksum == digest:
if fp.read(1) == b"":
# There is no more data, as expected.
break
else:
raise DirstateParseException(
"Suspicious data is present after "
"the end of the valid checksum in {}.\n".format(filename)
)
else:
raise DirstateParseException(
"Checksum mismatch when reading {}. Observed checksum is "
"{}, but the checksum in the file is {}.\n".format(
filename,
binascii.hexlify(digest),
binascii.hexlify(binary_checksum),
)
)
else:
raise DirstateParseException(
"Unexpected header byte "
"when reading {}: 0x{0:x}.".format(filename, header)
+ " Ignoring remaining dirstate data.\n"
)
return parents, tuples_dict, copymap
def _write_path(writer, path):
# type: (Callable[[bytes], None], bytes) -> None
writer(struct.pack(">H", len(path)))
writer(path)
def _read_path(reader, filename):
# type: (Callable[[int], bytes], str) -> str
binary_path_len = reader(2)
if len(binary_path_len) != 2:
raise DirstateParseException(
"Reached EOF while reading path length in {}.\n".format(filename)
)
path_len = struct.unpack(">H", binary_path_len)[0] # type: int
path = reader(path_len)
if len(path) == path_len:
if isinstance(path, str):
# Python 2.
return path
else:
# Python 3
return str(path, "utf8")
else:
raise DirstateParseException(
"Reached EOF while reading path in {}.\n".format(filename)
)
class DirstateParseException(Exception):
pass
``` |
{
"source": "jmswu/resistor_solver",
"score": 3
} |
#### File: jmswu/resistor_solver/resistor_solver.py
```python
from resistor import Resistor
from resistor_data import ResistorData
class ResistorSolver:
"""
find a solution to for the non-standard resistor values
"""
_target = Resistor()
_dataset_list = list()
def __init__(self, target, tolerance=0.05, extension=".resistor"):
"""
create Resistor solver object, with the target values and tolerance
:param target: target resistor value
:param tolerance: acceptable tolerance
"""
self._target = Resistor(val=target, tolerance=tolerance)
# extract the data from the data set file and put them into a data set list
self._dataset_list = ResistorData(extension).get_dataset()
def solution(self):
"""
:return: return a solution in a list [name, R1, R2, RParallel, error]
"""
result = list()
# loop through all data set
for data_set in self._dataset_list:
# loop through all the values
index = 0
for val_1 in data_set.data():
index = index + 1
for val_2 in data_set.data()[index:]:
r1 = Resistor(val=val_1)
r2 = Resistor(val=val_2)
rp = r1.parallel_with(r2)
# test the value
if self._target.in_tolerance(rp.value()):
error = (self._target.value() - rp.value()) / self._target.value()
result.append([data_set.name(), r1.value(), r2.value(), rp.value(), error])
return result
``` |
{
"source": "jmsz/lab3",
"score": 2
} |
#### File: lab3/scripts/timing.py
```python
from lab3_analysis_functions import *
from __future__ import division
# In[2]:
filename = './data/cs2.h5'
hf = tables.open_file(filename, "r")
raw_data_1 = import_data(filename)
event_data_cs1= hf.root.EventData.read()
hf.close()
# In[3]:
# filename = '/Users/Asia/Desktop/204/lab3/lab3/data/am1.h5'
# hf = tables.open_file(filename, "r")
# raw_data_1 = import_data(filename)
# event_data_am1= hf.root.EventData.read()
# hf.close()
# filename = '/Users/Asia/Desktop/204/lab3/lab3/data/am2.h5'
# hf = tables.open_file(filename, "r")
# raw_data_2 = import_data(filename)
# event_data_am2= hf.root.EventData.read()
# hf.close()
# filename = '/Users/Asia/Desktop/204/lab3/lab3/data/cs1.h5'
# hf = tables.open_file(filename, "r")
# raw_data_1 = import_data(filename)
# event_data_cs1= hf.root.EventData.read()
# hf.close()
# filename = '/Users/Asia/Desktop/204/lab3/lab3/data/cs2.h5'
# hf = tables.open_file(filename, "r")
# raw_data_2 = import_data(filename)
# event_data_cs2= hf.root.EventData.read()
# hf.close()
# In[4]:
# event_data_am = np.concatenate((event_data_am1, event_data_am2))
# event_data_cs = np.concatenate((event_data_cs1, event_data_cs2))
# In[5]:
# sort events by timestamp
event_data_cs1 = event_data_cs1[np.argsort(event_data_cs1['timestamp'])]
# In[6]:
time_btwn_events = np.diff(event_data_cs1['timestamp'])
counts, bin_edges = np.histogram(time_btwn_events, bins=30, range = [0, 30])
bins = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
for i in range(0, len(bins), 1):
bins[i] = i*10
plt.figure(4, figsize=(7, 5))
plt.cla()
plt.clf()
plt.plot(bins, counts, 'ko')
plt.xlabel('time to next event (ns)')
plt.ylabel('counts')
plt.title("Time-to-Next-Event Histogram")
plt.xlim([0, 300])
plt.savefig("./figures/time-to-next-event.pdf")
#plt.show()
# In[7]:
print('applying energy calibration to data files...')
print('(this part takes a bit of time)')
filename = './data/calibration_long.txt'
calibration = np.genfromtxt(filename,delimiter=' ')
slopes = calibration[:,0]
intercepts = calibration[:,1]
for i in range(0, 152, 1):
mask = (event_data_cs1['detector'] == i)
event_data_cs1['ADC_value'][mask] = calculate_energies(event_data_cs1['ADC_value'][mask], slopes[i], intercepts[i])
#if i == 90:
#print('still going... almost done')
if i == 151:
print('phew! done with that part')
# In[8]:
counts, bin_edges = np.histogram(event_data_cs1['ADC_value'], bins=2048, range = [300, 700])
bins = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
plt.cla()
plt.clf()
plt.plot(bins, counts)
plt.savefig('./figures/energy-spectrum.pdf')
#plt.show()
print('an energy spectrum of the Cs-137 peak has been plotted in energy-spectrum.pdf')
mask_1 = ((660 < event_data_cs1['ADC_value']) & (event_data_cs1['ADC_value'] < 663))
counts, bin_edges = np.histogram(event_data_cs1['ADC_value'][mask_1], bins=500, range = [657, 665])
bins = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
plt.cla()
plt.clf()
plt.plot(bins, counts)
plt.savefig('./figures/energy-spectrum-cut.pdf')
#plt.show()
# In[11]:
raw_data_1 = fast_baseline_correction(raw_data_1)
# In[12]:
def make_test_file(masked_data, rawdata):
f = open('test.csv', 'w')
for i in event_data_cs1[mask_1]:
#print(i)
f.write(str(i[0]) +','+ str(i[1]) + ','+str(i[2]) + ','+ str(i[3]) + ','+ str(i[4]) + ','+ str(i[5]) + ','+ str(i[6]) + '\n')
f.close()
f = open('test_trace.csv', 'w')
for i in event_data_cs1['rid'][mask_1]:
x = rawdata[i]
for j in x:
f.write(str(j) + ' ')
f.write('\n')
f.close()
#make_test_file(event_data_cs1[mask_1], raw_data_1)
# In[13]:
# full energy only
mask_1 = ((640 < event_data_cs1['ADC_value']) & (event_data_cs1['ADC_value'] < 680))
#print(event_data_cs1['ADC_value'][mask_1][0:10])
# In[14]:
#dc1, dc2, ac1, ac2
face1 = np.arange(0, 38) # dc1
face2 = np.arange(38, 76) # dc2
face3 = np.arange(76, 114) # ac1
face4 = np.arange(114, 152) # ac2
# In[15]:
def calculate_t50(signal, plot=False):
signal = signal[:-100]
x = np.linspace(0, len(signal) -100, len(signal) -99)
# print(x)
sig = savgol_filter(signal, 15, 3) # window size 51, polynomial order 3
if sig.all() == 0:
return -1000
elif np.argmax(sig) == len(sig):
return -1000
else:
maxval = np.amax(sig)
fiftyval = maxval* 0.5
fiftyindex = 0
for i in range(0, len(sig), 1):
if sig[i] <= fiftyval:
fiftyindex = i
if (fiftyindex + 3) >= len(sig):
x_fit_low = np.linspace((fiftyindex - 2), len(sig), len(sig)-2 - fiftyindex)
sig_fit_low = sig[int(fiftyindex - 2): len(sig)]
else:
x_fit_low = np.linspace((fiftyindex - 2), int(fiftyindex + 3), 5)
sig_fit_low = sig[int(fiftyindex - 2): int(fiftyindex + 3)]
if len(x_fit_low) != len(sig_fit_low):
#print(fiftyindex)
#print(len(x_fit_low))
#print(len(sig_fit_low))
#print(x_fit_low)
#print(sig_fit_low)
#plt.plot(signal)
#plt.plot(sig)
#plt.show()
#plt.plot(signal[int(fiftyindex - 4):int(fiftyindex + 5)], 'o')
#plt.plot(sig[int(fiftyindex -4): int(fiftyindex + 5)], 'o')
#plt.show()
x_fit_low = x_fit_low = np.linspace((fiftyindex - 1), int(fiftyindex + 2), 4)
sig_fit_low = sig[int(fiftyindex -1): int(fiftyindex +2)]
#print(len(x_fit_low))
#print(len(sig_fit_low))
x_fit_low = np.array(x_fit_low)
sig_fit_low = np.array(sig_fit_low)
if len(x_fit_low) < 1:
#print('x empty')
#plt.plot(signal)
#plt.plot(sig)
#plt.show()
return -1000
else:
m, b = np.polyfit(x_fit_low, sig_fit_low, deg=1)
fit_low = b + m * x_fit_low
rise_low = ((fiftyval - b )/ m)
t50 = (rise_low) * 10# ns
if plot==True:
plt.figure(figsize=(10,5))
plt.plot(signal, '-', label = 'raw signal')
plt.plot(sig, label = 'smoothed signal')
plt.plot(x_fit_low, fit_low,'-', linewidth = 5.0,alpha=0.7, label = 'fit')
plt.plot(t50/10, fiftyval, 'ro', label='t50')
plt.title('T50 Fitting')
plt.ylabel('ADC value')
plt.xlabel('ADC sample (10 ns sampling time)')
plt.legend()
#plt.savefig('t50_fitting.pdf')
plt.show()
return t50
# In[16]:
#x = 7000
diff1vals = []
flag = 0 # 1 = detector1, 2= detector2, 3=detector1 neighbors, 4=detector2 neigbors, 5=other
delta_t50_values_1 = []
delta_t50_values_2 = []
for t in range(0, len(event_data_cs1['timestamp'][mask_1]), 1):
#for t in range(0, x, 1):
diff0 = np.abs(event_data_cs1['timestamp'][mask_1][t] - event_data_cs1['timestamp'][mask_1][t-1])
diff1 = np.abs(event_data_cs1['timestamp'][mask_1][t] - event_data_cs1['timestamp'][mask_1][t-2])
# diff2 = np.abs(event_data_cs1['timestamp'][mask_1][t+1] - event_data_cs1['timestamp'][mask_1][t])
# diff3 = np.abs(event_data_cs1['timestamp'][mask_1][t+2] - event_data_cs1['timestamp'][mask_1][t])
if diff0 < 7 and diff1 > 7:
# print('0', diff0)
# print('1', diff1)
# print('2', diff2)
# print('3', diff3)
detector1 = (event_data_cs1['detector'][mask_1][t-1])
detector2 = (event_data_cs1['detector'][mask_1][t])
#print('-------------')
if detector1 in face1 and detector2 in face1:
# print('face1 neighbors')
flag = 3
elif detector1 in face2 and detector2 in face2:
# print('face2 neighbors')
flag = 3
elif detector1 in face3 and detector2 in face3:
# print('face3 neighbors')
flag = 4
elif detector1 in face4 and detector2 in face4:
# print('face4 neighbors')
flag = 4
elif detector1 in face1 and detector2 in face3:
# print('detector1')
flag = 1
elif detector1 in face3 and detector2 in face1:
# print('detector1')
flag = 1
elif detector1 in face2 and detector2 in face4:
# print('detector2')
flag = 2
elif detector1 in face4 and detector2 in face2:
# print('detector2')
flag = 2
else:
flag= 5
# print('other')
if flag == 1:
rid1 = (event_data_cs1['rid'][mask_1][t-1])
rid2 = (event_data_cs1['rid'][mask_1][t])
adc1 = (event_data_cs1['ADC_value'][mask_1][t-1])
adc2 = (event_data_cs1['ADC_value'][mask_1][t])
t501 = calculate_t50(raw_data_1[rid1], plot=False)
t502 = calculate_t50(raw_data_1[rid2], plot=False)
if rid1 > rid2: # electrons - holes = ac - dc
#deltat50 = -(diff0 * 10) - t502 + t501 #- diff0 * 10
deltat50 = (diff0 * 10) + t502 - t501
else: # rid1 < rid2
#deltat50 = (diff0 * 10) + t502 - t501 #- diff0 * 10
deltat50 = -(diff0 * 10) - t502 + t501
deltat50 = round(float(deltat50),4)
delta_t50_values_1.append(deltat50)
diff1vals.append(diff0)
elif flag == 2:
rid1 = (event_data_cs1['rid'][mask_1][t-1])
rid2 = (event_data_cs1['rid'][mask_1][t])
adc1 = (event_data_cs1['ADC_value'][mask_1][t-1])
adc2 = (event_data_cs1['ADC_value'][mask_1][t])
t501 = calculate_t50(raw_data_1[rid1], plot=False)
t502 = calculate_t50(raw_data_1[rid2], plot=False)
if rid1 > rid2: # electrons - holes = ac - dc
#deltat50 = -(diff0 * 10) - t502 + t501 #- diff0 * 10
deltat50 = (diff0 * 10) + t502 - t501
else: # rid1 < rid2
#deltat50 = (diff0 * 10) + t502 - t501 #- diff0 * 10
deltat50 = -(diff0 * 10) - t502 + t501
deltat50 = round(float(deltat50),4)
delta_t50_values_2.append(deltat50)
diff1vals.append(diff0)
else:
continue
# In[20]:
#print(len(delta_t50_values_1))
#print(len(delta_t50_values_2))
delta_t50_1 = np.asarray(delta_t50_values_1)#, dtype=float)
#print(type(delta_t50_1))
counts_det1, bin_edges = np.histogram(delta_t50_1, bins=200, range = [-225, 275])
bins_det1 = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
plt.cla()
plt.clf()
#plt.axvline(-160,color='k')
#plt.axvline(210,color='k')
plt.plot(bins_det1, counts_det1)
plt.title('$\Delta$t50 Values for Detector 1')
plt.ylabel('counts')
plt.xlabel('$\Delta$t50 (ns)')
plt.savefig('./figures/t50s_det1.pdf')
#plt.show()
delta_t50_2 = np.asarray(delta_t50_values_2)#, dtype=float)
#print(type(delta_t50_1))
counts_det2, bin_edges = np.histogram(delta_t50_2, bins=200, range = [-225, 275])
bins_det2 = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
plt.cla()
plt.clf()
#plt.axvline(-160,color='k')
#plt.axvline(210,color='k')
plt.plot(bins_det2, counts_det2)
plt.title('$\Delta$t50 Values for Detector 2')
plt.ylabel('counts')
plt.xlabel('$\Delta$t50 (ns)')
plt.savefig('./figures/t50s_det2.pdf')
#plt.show()
cut_counts_det1, bin_edges = np.histogram(delta_t50_1, bins=200, range = [-160, 210])
cut_bins_det1 = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
cut_counts_det2, bin_edges = np.histogram(delta_t50_2, bins=200, range=[-160, 210])
cut_bins_det2 = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
# In[21]:
# method 1 = linear fit
x = (np.linspace(0, 15, len(cut_bins_det1)))
m, b = np.polyfit(cut_bins_det1, x, deg=1)
x_values = b + m * bins_det1
y_values = counts_det1
plt.xlim([-3,18])
plt.cla()
plt.clf()
plt.plot(x_values, y_values,'b--', label='method 1')
#plt.show()
# method 2
z_0 = 5.2 # mm
z_0 = 5.2 + 0.75# mm
k_c = 0.04 # mm/ns
c = 3*10**8 # m/s
c = 299.792 # mm/ns
z_coord_eq_1 = []
#print(len(delta_t50_values_2))
for i in delta_t50_values_2:
z = z_0 + 0.04*(i)
z_coord_eq_1.append(z)
z_coord_eq_1= np.asarray(z_coord_eq_1)#, dtype=float)
#print(type(delta_t50_1))
z_coord_eq_1_y, bin_edges = np.histogram(z_coord_eq_1, bins=200)
z_coord_eq_1_x = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
plt.plot(z_coord_eq_1_x, z_coord_eq_1_y,'r--', label = 'method 2')
plt.xlim([-3,18])
plt.title('Interaction Depth Profile')
plt.xlabel('depth (mm)')
plt.ylabel('counts')
plt.legend()
plt.savefig('./figures/interactiondepths.pdf')
#plt.show()
# In[ ]:
# In[82]:
#Geant4 comparison
hf = tables.open_file("./data/hits.h5", "r")
event_pointers = hf.root.EventPointers.read()
event_lengths = hf.root.EventLengths.read()
idata = hf.root.InteractionData.read()
# energy weighted z coordinates for full E deposition in a signel crystal
event_energies = []
z_values = []
z_values_all = []
for i in range(0, len(event_pointers), 1):
#print('---', i)
pointer = event_pointers[i]
length = event_lengths[i]
energy = np.sum(idata['energy'][pointer:pointer+length])
z_coords = (idata['z'][pointer:pointer+length])
if (energy > 661.6):
neg = 0
pos = 0
for j in z_coords:
z_values_all.append(j)
if j > 0:
pos = 1
if j < 0:
neg = 1
if pos == 1 and neg == 0:
event_energies.append(energy)
z_val = []
for j in idata[pointer:pointer+length]:
z_val.append(j['z'] * j['energy'] / energy)
z_coord_1 = np.sum(np.asarray(z_val))
z_values.append(z_coord_1)
elif pos == 0 and neg == 1:
event_energies.append(energy)
z_val = []
for j in idata[pointer:pointer+length]:
z_val.append(j['z'] * j['energy'] / energy)
z_coord_1 = np.sum(np.asarray(z_val))
z_values.append(z_coord_1)
event_energies = np.asarray(event_energies)
z_values = np.asarray(z_values)
# In[83]:
counts_weight, bin_edges = np.histogram(z_values, bins=200, range=[-25,0])
bins_weight = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
counts_all, bin_edges = np.histogram(z_values_all, bins=200, range=[-25,0])
bins_all = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
# single interaction z coordinates for full E deposition in a signle crystal
event_energies = []
z_values = []
for i in range(0, len(event_pointers), 1):
pointer = event_pointers[i]
length = event_lengths[i]
energy = np.sum(idata['energy'][pointer:pointer+length])
if energy > 661.6:
event_energies.append(energy)
if length ==1:
z_values.append(idata['z'][pointer:pointer+length])
#elif length > 1:
#print(length)
event_energies = np.asarray(event_energies)
z_values = np.asarray(z_values)
counts, bin_edges = np.histogram(z_values, bins=200,range=[-25,0])
bins = (bin_edges[1:]+bin_edges[:-1])/2 # bin centers from bin edges
for i in range(0, len(counts), 1):
bins[i] = bins[i] + 20
for i in range(0, len(counts_weight), 1):
bins_weight[i] = bins_weight[i] + 20
plt.cla()
plt.clf()
plt.plot(bins, counts,'b')
#plt.plot(bins_all,counts_all,'r')
#plt.plot(bins_weight,counts_weight,'g')
plt.xlim([-5, 20])
plt.savefig('./figures/detector1_g4_zpos.pdf')
#plt.show()
plt.cla()
plt.clf()
#plt.plot(bins,counts,'b')
#plt.plot(bins_all,counts_all,'r')
plt.plot(bins_weight, counts_weight,'g')
plt.xlim([-5, 20])
plt.savefig('./figures/detector1_g4_zpos_weighted.pdf')
#plt.show()
# In[89]:
y1 = z_coord_eq_1_y / np.sum(z_coord_eq_1_y)
y2 = counts / np.sum(counts)
plt.cla()
plt.clf()
plt.plot(z_coord_eq_1_x, y1,'r--', label = 'method 2')
plt.plot(bins, y2,'b')
plt.xlim([-3,18])
plt.title('Comparison with Simulated Data')
plt.xlabel('depth (mm)')
plt.ylabel('counts')
plt.legend()
plt.savefig('./figures/g4_comp.pdf')
#plt.show()
# In[ ]:
``` |
{
"source": "jm-szlendak/particle-filter",
"score": 3
} |
#### File: particle-filter/src/pf.py
```python
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
"""
Based on Dakota's Nelson PF implementation for laser scanner
Visit: https://github.com/DakotaNelson/particle-filter
"""
import rospy
from std_msgs.msg import Header, String, ColorRGBA
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import PoseStamped, PoseWithCovarianceStamped, PoseArray, Pose, Point, Quaternion, Vector3, \
PoseWithCovariance
from beacon_msgs.msg import BeaconPositionAndDistance, BeaconsScan
from nav_msgs.srv import GetMap
from visualization_msgs.msg import Marker, MarkerArray
from copy import deepcopy
import tf
from tf import TransformListener
from tf import TransformBroadcaster
from tf.transformations import euler_from_quaternion, rotation_matrix, quaternion_from_matrix
from random import gauss
import math
import random
import time
import numpy as np
from numpy.random import random_sample, normal
import scipy
from scripts.helper_functions import (convert_pose_inverse_transform,
convert_translation_rotation_to_pose,
convert_pose_to_xy_and_theta,
angle_diff, sum_vectors, gaussian)
class Particle(object):
""" Represents a hypothesis (particle) of the robot's pose consisting of x,y and theta (yaw)
Attributes:
x: the x-coordinate of the hypothesis relative to the map frame
y: the y-coordinate of the hypothesis relative ot the map frame
theta: the yaw of the hypothesis relative to the map frame
w: the particle weight (the class does not ensure that particle weights are normalized
"""
def __init__(self, x=0.0, y=0.0, theta=0.0, w=1.0):
""" Construct a new Particle
x: the x-coordinate of the hypothesis relative to the map frame
y: the y-coordinate of the hypothesis relative ot the map frame
theta: the yaw of the hypothesis relative to the map frame
w: the particle weight (the class does not ensure that particle weights are normalized """
self.w = w
self.theta = theta
self.x = x
self.y = y
def as_pose(self):
""" A helper function to convert a particle to a geometry_msgs/Pose message """
orientation_tuple = tf.transformations.quaternion_from_euler(0, 0, self.theta)
return Pose(position=Point(x=self.x, y=self.y, z=0),
orientation=Quaternion(x=orientation_tuple[0], y=orientation_tuple[1], z=orientation_tuple[2],
w=orientation_tuple[3]))
class ParticleFilter:
""" The class that represents a Particle Filter ROS Node
Attributes list:
initialized: a Boolean flag to communicate to other class methods that initializaiton is complete
base_frame: the name of the robot base coordinate frame (should be "base_link" for most robots)
map_frame: the name of the map coordinate frame (should be "map" in most cases)
odom_frame: the name of the odometry coordinate frame (should be "odom" in most cases)
scan_topic: the name of the scan topic to listen to (should be "scan" in most cases)
n_particles: the number of particles in the filter
d_thresh: the amount of linear movement before triggering a filter update
a_thresh: the amount of angular movement before triggering a filter update
laser_max_distance: the maximum distance to an obstacle we should use in a likelihood calculation
pose_listener: a subscriber that listens for new approximate pose estimates (i.e. generated through the rviz GUI)
particle_pub: a publisher for the particle cloud
laser_subscriber: listens for new scan data on topic self.scan_topic
tf_listener: listener for coordinate transforms
tf_broadcaster: broadcaster for coordinate transforms
particle_cloud: a list of particles representing a probability distribution over robot poses
current_odom_xy_theta: the pose of the robot in the odometry frame when the last filter update was performed.
The pose is expressed as a list [x,y,theta] (where theta is the yaw)
map: the map we will be localizing ourselves in. The map should be of type nav_msgs/OccupancyGrid
"""
def __init__(self):
self.initialized = False # make sure we don't perform updates before everything is setup
rospy.init_node('pf') # tell roscore that we are creating a new node named "pf"
robot_name = rospy.get_namespace()[1:]
self.base_frame = robot_name + "base_link" # the frame of the robot base
self.map_frame = "map" # the name of the map coordinate frame
self.odom_frame = robot_name + "odom" # the name of the odometry coordinate frame
self.beacons_topic = "beacon_localization/distances/probabilistic" # the topic where we will get laser scans from TODO: filter parametrization
self.n_particles = 500 # the number of particles to use
self.d_thresh = 0.2 # the amount of linear movement before performing an update
self.a_thresh = math.pi / 6 # the amount of angular movement before performing an update
self.laser_max_distance = 2.0 # maximum penalty to assess in the likelihood field model
self.sigma = 1 # guess for how inaccurate beacon scans are are in meters
# Setup pubs and subs
# pose_listener responds to selection of a new approximate robot location (for instance using rviz)
self.pose_listener = rospy.Subscriber("initialpose", PoseWithCovarianceStamped, self.update_initial_pose)
# publish the current particle cloud. This enables viewing particles in rviz.
self.particle_pub = rospy.Publisher("bl/particlecloud", PoseArray, queue_size=10)
self.pose_pub = rospy.Publisher('bl/pose', PoseWithCovarianceStamped, queue_size=10)
self.marker_pub = rospy.Publisher("markers", MarkerArray, queue_size=10)
# laser_subscriber listens for data from the lidar
self.beacon_subscriber = rospy.Subscriber(self.beacons_topic, BeaconsScan, self.scan_received)
# enable listening for and broadcasting coordinate transforms
self.tf_listener = TransformListener()
self.tf_broadcaster = TransformBroadcaster()
self.particle_cloud = []
self.current_odom_xy_theta = []
self.robot_pose = None
self.odom_pose = None
self.receiver_pose = None
self.initialized = True
rospy.loginfo('Initialized particle filter')
def update_robot_pose(self):
""" Update the estimate of the robot's pose given the updated particles.
Computed by taking the weighted average of poses.
"""
# first make sure that the particle weights are normalized
self.normalize_particles()
x = 0
y = 0
theta = 0
angles = []
x_var = 0
y_var = 0
for particle in self.particle_cloud:
x += particle.x * particle.w
y += particle.y * particle.w
v = [particle.w * math.cos(math.radians(particle.theta)),
particle.w * math.sin(math.radians(particle.theta))]
angles.append(v)
theta = sum_vectors(angles)
for particle in self.particle_cloud:
x_var += (particle.x - x)**2 * particle.w
y_var += (particle.y - y) ** 2 * particle.w
print 'Position x, y, xvar, yvar', x, y, x_var, y_var
covar = np.zeros((6, 6))
covar[0, 0] = x_var
covar[1, 1] = y_var
orientation_tuple = tf.transformations.quaternion_from_euler(0, 0, theta)
self.robot_pose = PoseWithCovarianceStamped(
header=Header(
stamp=rospy.Time.now(),
frame_id=self.map_frame
),
pose=PoseWithCovariance(
pose=Pose(
position=Point(x=x, y=y),
orientation=Quaternion(
x=orientation_tuple[0],
y=orientation_tuple[1],
z=orientation_tuple[2],
w=orientation_tuple[3])),
covariance=covar.flatten()
)
)
def update_particles_with_odom(self, msg):
""" Update the particles using the newly given odometry pose.
The function computes the value delta which is a tuple (x,y,theta)
that indicates the change in position and angle between the odometry
when the particles were last updated and the current odometry.
msg: this is not really needed to implement this, but is here just in case.
"""
new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)
# compute the change in x,y,theta since our last update
if self.current_odom_xy_theta:
old_odom_xy_theta = self.current_odom_xy_theta
delta = (new_odom_xy_theta[0] - self.current_odom_xy_theta[0],
new_odom_xy_theta[1] - self.current_odom_xy_theta[1],
new_odom_xy_theta[2] - self.current_odom_xy_theta[2])
self.current_odom_xy_theta = new_odom_xy_theta
else:
self.current_odom_xy_theta = new_odom_xy_theta
return
for particle in self.particle_cloud:
r1 = math.atan2(delta[1], delta[0]) - old_odom_xy_theta[2]
d = math.sqrt((delta[0] ** 2) + (delta[1] ** 2))
particle.theta += r1 % 360
particle.x += d * math.cos(particle.theta) + normal(0, 0.1)
particle.y += d * math.sin(particle.theta) + normal(0, 0.1)
particle.theta += (delta[2] - r1 + normal(0, 0.1)) % 360
# For added difficulty: Implement sample_motion_odometry (Prob Rob p 136)
def resample_particles(self):
""" Resample the particles according to the new particle weights.
The weights stored with each particle should define the probability that a particular
particle is selected in the resampling step. You may want to make use of the given helper
function draw_random_sample.
"""
# make sure the distribution is normalized
self.normalize_particles()
new_particles = []
for i in range(len(self.particle_cloud)):
# resample the same # of particles
choice = random_sample()
# all the particle weights sum to 1
csum = 0 # cumulative sum
for particle in self.particle_cloud:
csum += particle.w
if csum >= choice:
# if the random choice fell within the particle's weight
new_particles.append(deepcopy(particle))
break
self.particle_cloud = new_particles
def update_particles_with_beacons(self, msg):
for particle in self.particle_cloud:
total_probability = 1.0
for beacon in msg.beacons:
distance = math.sqrt(
(beacon.pose.position.x - particle.x) ** 2 +
(beacon.pose.position.y - particle.y) ** 2
)
total_probability *= gaussian(distance, self.sigma, beacon.distance)
total_probability /= len(msg.beacons)
particle.w = total_probability
@staticmethod
def weighted_values(values, probabilities, size):
""" Return a random sample of size elements from the set values with the specified probabilities
values: the values to sample from (numpy.ndarray)
probabilities: the probability of selecting each element in values (numpy.ndarray)
size: the number of samples
"""
bins = np.add.accumulate(probabilities)
return values[np.digitize(random_sample(size), bins)]
@staticmethod
def draw_random_sample(choices, probabilities, n):
""" Return a random sample of n elements from the set choices with the specified probabilities
choices: the values to sample from represented as a list
probabilities: the probability of selecting each element in choices represented as a list
n: the number of samples
"""
values = np.array(range(len(choices)))
probs = np.array(probabilities)
bins = np.add.accumulate(probs)
inds = values[np.digitize(random_sample(n), bins)]
samples = []
for i in inds:
samples.append(deepcopy(choices[int(i)]))
return samples
def update_initial_pose(self, msg):
""" Callback function to handle re-initializing the particle filter based on a pose estimate.
These pose estimates could be generated by another ROS Node or could come from the rviz GUI """
xy_theta = convert_pose_to_xy_and_theta(msg.pose.pose)
self.initialize_particle_cloud(xy_theta)
self.fix_map_to_odom_transform(msg)
def initialize_particle_cloud(self, xy_theta=None):
""" Initialize the particle cloud.
Arguments
xy_theta: a triple consisting of the mean x, y, and theta (yaw) to initialize the
particle cloud around. If this input is ommitted, the odometry will be used """
if xy_theta == None:
xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)
rad = 1 # meters
self.particle_cloud = []
self.particle_cloud.append(Particle(xy_theta[0], xy_theta[1], xy_theta[2]))
for i in range(self.n_particles - 1):
# initial facing of the particle
theta = random.random() * 360
# compute params to generate x,y in a circle
other_theta = random.random() * 360
radius = random.random() * rad
# x => straight ahead
x = radius * math.sin(other_theta) + xy_theta[0]
y = radius * math.cos(other_theta) + xy_theta[1]
particle = Particle(x, y, theta)
self.particle_cloud.append(particle)
self.normalize_particles()
self.update_robot_pose()
def normalize_particles(self):
""" Make sure the particle weights define a valid distribution (i.e. sum to 1.0) """
tot_weight = sum([particle.w for particle in self.particle_cloud]) or 1
for particle in self.particle_cloud:
particle.w = particle.w / tot_weight;
def publish_particles(self, msg):
particles_conv = []
for p in self.particle_cloud:
particles_conv.append(p.as_pose())
# actually send the message so that we can view it in rviz
self.particle_pub.publish(PoseArray(header=Header(stamp=rospy.Time.now(),
frame_id=self.map_frame),
poses=particles_conv))
marker_array = []
for index, particle in enumerate(self.particle_cloud):
marker = Marker(header=Header(stamp=rospy.Time.now(),
frame_id=self.map_frame),
pose=particle.as_pose(),
type=0,
scale=Vector3(x=particle.w * 2, y=particle.w * 1, z=particle.w * 5),
id=index,
color=ColorRGBA(r=1, a=1))
marker_array.append(marker)
self.marker_pub.publish(MarkerArray(markers=marker_array))
self.pose_pub.publish(self.robot_pose)
def scan_received(self, msg):
""" This is the default logic for what to do when processing scan data.
Feel free to modify this, however, I hope it will provide a good
guide. The input msg is an object of type sensor_msgs/LaserScan """
rospy.loginfo('Scan received')
if not self.initialized:
# wait for initialization to complete
rospy.loginfo('not initialized')
return
if not (self.tf_listener.canTransform(self.base_frame, msg.header.frame_id, msg.header.stamp)):
return
if not (self.tf_listener.canTransform(self.base_frame, self.odom_frame, msg.header.stamp)):
self.tf_listener.waitForTransform(self.base_frame, self.odom_frame, msg.header.stamp, rospy.Duration(1.0/5))
# calculate pose of BLE receiver relative ot the robot base
p = PoseStamped(header=Header(stamp=rospy.Time(0),
frame_id=msg.header.frame_id))
self.receiver_pose = self.tf_listener.transformPose(self.base_frame, p)
# find out where the robot thinks it is based on its odometry
p = PoseStamped(header=Header(stamp=msg.header.stamp,
frame_id=self.base_frame),
pose=Pose())
# self.tf_listener.waitForTransform()
self.odom_pose = self.tf_listener.transformPose(self.odom_frame, p)
# store the the odometry pose in a more convenient format (x,y,theta)
new_odom_xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)
if not self.particle_cloud:
# now that we have all of the necessary transforms we can update the particle cloud
self.initialize_particle_cloud()
# cache the last odometric pose so we can only update our particle filter if we move more than self.d_thresh or self.a_thresh
self.current_odom_xy_theta = new_odom_xy_theta
# update our map to odom transform now that the particles are initialized
self.fix_map_to_odom_transform(msg)
elif (math.fabs(new_odom_xy_theta[0] - self.current_odom_xy_theta[0]) > self.d_thresh or
math.fabs(new_odom_xy_theta[1] - self.current_odom_xy_theta[1]) > self.d_thresh or
math.fabs(new_odom_xy_theta[2] - self.current_odom_xy_theta[2]) > self.a_thresh):
# we have moved far enough to do an update!
self.update_particles_with_odom(msg) # update based on odometry
self.update_particles_with_beacons(msg) # update based on laser scan
self.update_robot_pose() # update robot's pose
self.resample_particles() # resample particles to focus on areas of high density
self.fix_map_to_odom_transform(msg) # update map to odom transform now that we have new particles
# publish particles (so things like rviz can see them)
self.publish_particles(msg)
def fix_map_to_odom_transform(self, msg):
""" This method constantly updates the offset of the map and
odometry coordinate systems based on the latest results from
the localizer """
(translation, rotation) = convert_pose_inverse_transform(self.robot_pose.pose.pose)
p = PoseStamped(pose=convert_translation_rotation_to_pose(translation, rotation),
header=Header(stamp=msg.header.stamp, frame_id=self.base_frame))
self.odom_to_map = self.tf_listener.transformPose(self.odom_frame, p)
(self.translation, self.rotation) = convert_pose_inverse_transform(self.odom_to_map.pose)
def broadcast_last_transform(self):
""" Make sure that we are always broadcasting the last map
to odom transformation. This is necessary so things like
move_base can work properly. """
if not (hasattr(self, 'translation') and hasattr(self, 'rotation')):
return
self.tf_broadcaster.sendTransform(self.translation,
self.rotation,
rospy.get_rostime(),
self.odom_frame,
self.map_frame)
if __name__ == '__main__':
n = ParticleFilter()
r = rospy.Rate(5)
while not (rospy.is_shutdown()):
# in the main loop all we do is continuously broadcast the latest map to odom transform
n.broadcast_last_transform()
r.sleep()
``` |
{
"source": "jmtatsch/keras-contrib",
"score": 2
} |
#### File: keras-contrib/keras_contrib/constraints.py
```python
from __future__ import absolute_import
from . import backend as K
from keras.constraints import *
class Clip(Constraint):
"""Clips weights to [-c, c].
# Arguments
c: Clipping parameter.
"""
def __init__(self, c=0.01):
self.c = c
def __call__(self, p):
return K.clip(p, -self.c, self.c)
def get_config(self):
return {'name': self.__class__.__name__,
'c': self.c}
# Aliases.
clip = Clip
```
#### File: tests/keras_contrib/optimizers_test.py
```python
from __future__ import print_function
from keras.utils.test_utils import get_test_data
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.utils.np_utils import to_categorical
from keras_contrib import optimizers
import pytest
import numpy as np
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=1000,
num_test=200,
input_shape=(10,),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
def get_model(input_dim, nb_hidden, output_dim):
model = Sequential()
model.add(Dense(nb_hidden, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(output_dim))
model.add(Activation('softmax'))
return model
def _test_optimizer(optimizer, target=0.89):
model = get_model(X_train.shape[1], 10, y_train.shape[1])
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=12, batch_size=16,
validation_data=(X_test, y_test), verbose=2)
config = optimizer.get_config()
assert type(config) == dict
assert history.history['val_acc'][-1] >= target
if __name__ == '__main__':
pytest.main([__file__])
``` |
{
"source": "jmtatsch/PSPNet-Keras-tensorflow",
"score": 3
} |
#### File: PSPNet-Keras-tensorflow/pspnet/utils.py
```python
from __future__ import print_function
import colorsys
import requests
from os.path import join
import numpy as np
from keras.models import Model
def preprocess_image(img, mean=np.array([[[123.68, 116.779, 103.939]]])): # mean in rgb order
"""Preprocess an image as input."""
float_img = img.astype('float16')
centered_image = float_img - mean
bgr_image = centered_image[:, :, ::-1] # RGB => BGR
return bgr_image
def class_image_to_image(class_id_image, class_id_to_rgb_map):
"""Map the class image to a rgb-color image."""
colored_image = np.zeros((class_id_image.shape[0], class_id_image.shape[1], 3), np.uint8)
for class_id in np.nditer(np.unique(class_id_image)): # get all the unique classes and color at once
try:
cl = class_id_to_rgb_map[int(class_id)]
colored_image[class_id_image[:, :] == class_id] = cl.color
except KeyError as key_error:
print("Warning: could not resolve color of class_id %s" % key_error)
return colored_image
def gt_image_to_class_image(gt_image, class_id_to_rgb_map):
"""Map the rgb-color gt_image to a class image."""
class_image = np.zeros((gt_image.shape[0], gt_image.shape[1]), np.uint8)
for class_id in np.nditer(np.unique(gt_image)): # get all the unique classes and color at once
class_id = int(class_id)
try:
class_color = list(class_id_to_rgb_map[class_id].color)
# print("treating class %i i.e. color %s" % (class_id, class_color))
class_image[np.where((gt_image == class_color).all(axis=2))] = class_id
except KeyError as key_error:
print("Warning: could not resolve class_id %s" % key_error)
return class_image
def color_class_image(class_image, id2label):
"""Color classes according to their original colormap."""
if id2label:
colored_image = class_image_to_image(class_image, id2label)
else:
colored_image = add_color(class_image)
return colored_image
def add_color(img):
"""Color classes a good distance away from each other."""
h, w = img.shape
img_color = np.zeros((h, w, 3))
for i in range(1, 151):
img_color[img == i] = to_color(i)
return img_color * 255 # is [0.0-1.0] should be [0-255]
def to_color(category):
"""Map each category color a good distance from each other on the HSV color space."""
v = (category-1)*(137.5/360)
return colorsys.hsv_to_rgb(v, 1, 1)
def download_weights(name):
"""Download Keras weights from Dropbox."""
print("Downloading Keras weights from Dropbox ...")
link_dict = {'pspnet50_ade20k.h5': 'https://www.dropbox.com/s/0uxn14y26jcui4v/pspnet50_ade20k.h5?dl=1',
'pspnet50_ade20k.json': 'https://www.dropbox.com/s/v41lvku2lx7lh6m/pspnet50_ade20k.json?dl=1',
'pspnet101_cityscapes.h5': 'https://www.dropbox.com/s/c17g94n946tpalb/pspnet101_cityscapes.h5?dl=1',
'pspnet101_cityscapes.json': 'https://www.dropbox.com/s/fswowe8e3o14tdm/pspnet101_cityscapes.json?dl=1',
'pspnet101_voc2012.h5': 'https://www.dropbox.com/s/uvqj2cjo4b9c5wg/pspnet101_voc2012.h5?dl=1',
'pspnet101_voc2012.json': 'https://www.dropbox.com/s/rr5taqu19f5fuzy/pspnet101_voc2012.json?dl=1'}
for key in link_dict:
if name in key:
url = link_dict[key]
print("Downloading %s from %s" % (key, url))
response = requests.get(url)
with open(join("..", "weights", "keras", key), 'wb') as f:
f.write(response.content)
def download_npy_weights(name):
"""Download Numpy weights from Dropbox."""
print("Downloading Keras weights from Dropbox ...")
link_dict = {'pspnet50_ade20k.npy': 'https://www.dropbox.com/s/slzgd07ijcyl45b/pspnet50_ade20k.npy?dl=1',
'pspnet101_cityscapes.npy': 'https://www.dropbox.com/s/oimz7wrgqhfmkk9/pspnet101_cityscapes.npy?dl=1',
'pspnet101_voc2012.npy': 'https://www.dropbox.com/s/ht3qup5r1nourbg/pspnet101_voc2012.npy?dl=1'}
for key in link_dict:
if name in key:
url = link_dict[key]
print("Downloading %s from %s" % (key, url))
response = requests.get(url)
with open(join("..", "weights", "npy", key), 'wb') as f:
f.write(response.content)
def debug(model, data):
"""Debug model by printing the activations in each layer."""
names = [layer.name for layer in model.layers]
for name in names[:]:
print_activation(model, name, data)
def print_activation(model, layer_name, data):
"""Print the activations in each layer."""
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
io = intermediate_layer_model.predict(data)
print(layer_name, array_to_str(io))
def array_to_str(a):
"""Dump activation parameters into a string."""
return "{} {} {} {} {}".format(a.dtype, a.shape, np.min(a),
np.max(a), np.mean(a))
``` |
{
"source": "jmtaysom/idom",
"score": 2
} |
#### File: source/_exts/idom_view.py
```python
import os
from docutils.nodes import raw
from docutils.parsers.rst import directives
from sphinx.application import Sphinx
from sphinx.util.docutils import SphinxDirective
from docs.examples import get_normalized_example_name
_IDOM_EXAMPLE_HOST = os.environ.get("IDOM_DOC_EXAMPLE_SERVER_HOST", "")
_IDOM_STATIC_HOST = os.environ.get("IDOM_DOC_STATIC_SERVER_HOST", "/docs").rstrip("/")
class IteractiveWidget(SphinxDirective):
has_content = False
required_arguments = 1
_next_id = 0
option_spec = {
"activate-button": directives.flag,
"margin": float,
}
def run(self):
IteractiveWidget._next_id += 1
container_id = f"idom-widget-{IteractiveWidget._next_id}"
view_id = get_normalized_example_name(
self.arguments[0],
# only used if example name starts with "/"
self.get_source_info()[0],
)
return [
raw(
"",
f"""
<div>
<div
id="{container_id}"
class="interactive widget-container center-content"
style="margin-bottom: {self.options.get("margin", 0)}px;"
/>
<script type="module">
import {{ mountWidgetExample }} from "{_IDOM_STATIC_HOST}/_static/custom.js";
mountWidgetExample(
"{container_id}",
"{view_id}",
"{_IDOM_EXAMPLE_HOST}",
{"true" if "activate-button" in self.options else "false"},
);
</script>
</div>
""",
format="html",
)
]
def setup(app: Sphinx) -> None:
app.add_directive("idom-view", IteractiveWidget)
```
#### File: source/_exts/patched_html_translator.py
```python
from sphinx.util.docutils import is_html5_writer_available
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.html5 import HTML5Translator
class PatchedHTMLTranslator(
HTML5Translator if is_html5_writer_available() else HTMLTranslator
):
def starttag(self, node, tagname, *args, **attrs):
if (
tagname == "a"
and "target" not in attrs
and (
"external" in attrs.get("class", "")
or "external" in attrs.get("classes", [])
)
):
attrs["target"] = "_blank"
attrs["ref"] = "noopener noreferrer"
return super().starttag(node, tagname, *args, **attrs)
def setup(app):
app.set_translator("html", PatchedHTMLTranslator)
```
#### File: dangers-of-mutability/_examples/dict_remove.py
```python
from idom import component, html, run, use_state
@component
def Definitions():
term_to_add, set_term_to_add = use_state(None)
definition_to_add, set_definition_to_add = use_state(None)
all_terms, set_all_terms = use_state({})
def handle_term_to_add_change(event):
set_term_to_add(event["target"]["value"])
def handle_definition_to_add_change(event):
set_definition_to_add(event["target"]["value"])
def handle_add_click(event):
if term_to_add and definition_to_add:
set_all_terms({**all_terms, term_to_add: definition_to_add})
set_term_to_add(None)
set_definition_to_add(None)
def make_delete_click_handler(term_to_delete):
def handle_click(event):
set_all_terms({t: d for t, d in all_terms.items() if t != term_to_delete})
return handle_click
return html.div(
html.button({"onClick": handle_add_click}, "add term"),
html.label(
"Term: ",
html.input({"value": term_to_add, "onChange": handle_term_to_add_change}),
),
html.label(
"Definition: ",
html.input(
{
"value": definition_to_add,
"onChange": handle_definition_to_add_change,
}
),
),
html.hr(),
[
html.div(
html.button(
{"onClick": make_delete_click_handler(term)}, "delete term"
),
html.dt(term),
html.dd(definition),
key=term,
)
for term, definition in all_terms.items()
],
)
run(Definitions)
```
#### File: dangers-of-mutability/_examples/set_remove.py
```python
from idom import component, html, run, use_state
@component
def Grid():
line_size = 5
selected_indices, set_selected_indices = use_state({1, 2, 4})
def make_handle_click(index):
def handle_click(event):
if index in selected_indices:
set_selected_indices(selected_indices - {index})
else:
set_selected_indices(selected_indices | {index})
return handle_click
return html.div(
{"style": {"display": "flex", "flex-direction": "row"}},
[
html.div(
{
"onClick": make_handle_click(index),
"style": {
"height": "30px",
"width": "30px",
"backgroundColor": (
"black" if index in selected_indices else "white"
),
"outline": "1px solid grey",
"cursor": "pointer",
},
},
key=index,
)
for index in range(line_size)
],
)
run(Grid)
```
#### File: multiple-state-updates/_examples/set_color_3_times.py
```python
from idom import component, html, run, use_state
@component
def ColorButton():
color, set_color = use_state("gray")
def handle_click(event):
set_color("orange")
set_color("pink")
set_color("blue")
def handle_reset(event):
set_color("gray")
return html.div(
html.button(
{"onClick": handle_click, "style": {"backgroundColor": color}}, "Set Color"
),
html.button(
{"onClick": handle_reset, "style": {"backgroundColor": color}}, "Reset"
),
)
run(ColorButton)
```
#### File: responding-to-events/_examples/button_prints_message.py
```python
from idom import component, html, run
@component
def PrintButton(display_text, message_text):
def handle_event(event):
print(message_text)
return html.button({"onClick": handle_event}, display_text)
@component
def App():
return html.div(
PrintButton("Play", "Playing"),
PrintButton("Pause", "Paused"),
)
run(App)
```
#### File: state-as-a-snapshot/_examples/print_chat_message.py
```python
import asyncio
from idom import component, event, html, run, use_state
@component
def App():
recipient, set_recipient = use_state("Alice")
message, set_message = use_state("")
@event(prevent_default=True)
async def handle_submit(event):
set_message("")
print("About to send message...")
await asyncio.sleep(5)
print(f"Sent '{message}' to {recipient}")
return html.form(
{"onSubmit": handle_submit, "style": {"display": "inline-grid"}},
html.label(
"To: ",
html.select(
{
"value": recipient,
"onChange": lambda event: set_recipient(event["target"]["value"]),
},
html.option({"value": "Alice"}, "Alice"),
html.option({"value": "Bob"}, "Bob"),
),
),
html.input(
{
"type": "text",
"placeholder": "Your message...",
"value": message,
"onChange": lambda event: set_message(event["target"]["value"]),
}
),
html.button({"type": "submit"}, "Send"),
)
run(App)
```
#### File: your-first-components/_examples/nested_photos.py
```python
from idom import component, html, run
@component
def Photo():
return html.img(
{
"src": "https://picsum.photos/id/274/500/300",
"style": {"width": "30%"},
"alt": "<NAME>",
}
)
@component
def Gallery():
return html.section(
html.h1("Famous Musicians"),
Photo(),
Photo(),
Photo(),
)
run(Gallery)
```
#### File: your-first-components/_examples/simple_photo.py
```python
from idom import component, html, run
@component
def Photo():
return html.img(
{
"src": "https://picsum.photos/id/237/500/300",
"style": {"width": "50%"},
"alt": "Puppy",
}
)
run(Photo)
```
#### File: getting-started/_examples/debug_error_example.py
```python
from idom import component, html, run
@component
def App():
return html.div(GoodComponent(), BadComponent())
@component
def GoodComponent():
return html.p("This component rendered successfuly")
@component
def BadComponent():
raise RuntimeError("This component raised an error")
run(App)
```
#### File: getting-started/_examples/run_starlette.py
```python
from idom import run
from idom.backend import starlette as starlette_server
# the run() function is the entry point for examples
starlette_server.configure = lambda _, cmpt: run(cmpt)
from starlette.applications import Starlette
from idom import component, html
from idom.backend.starlette import configure
@component
def HelloWorld():
return html.h1("Hello, world!")
app = Starlette()
configure(app, HelloWorld)
```
#### File: _static/embed-idom-view/main.py
```python
from sanic import Sanic
from sanic.response import file
from idom import component, html
from idom.backend.sanic import Options, configure
app = Sanic("MyApp")
@app.route("/")
async def index(request):
return await file("index.html")
@component
def IdomView():
return html.code("This text came from an IDOM App")
configure(app, IdomView, Options(url_prefix="/_idom"))
app.run(host="127.0.0.1", port=5000)
```
#### File: reference/_examples/material_ui_switch.py
```python
import idom
mui = idom.web.module_from_template("react", "@material-ui/core@^5.0", fallback="⌛")
Switch = idom.web.export(mui, "Switch")
@idom.component
def DayNightSwitch():
checked, set_checked = idom.hooks.use_state(False)
return idom.html.div(
Switch(
{
"checked": checked,
"onChange": lambda event, checked: set_checked(checked),
}
),
"🌞" if checked else "🌚",
)
idom.run(DayNightSwitch)
```
#### File: reference/_examples/network_graph.py
```python
import random
import idom
react_cytoscapejs = idom.web.module_from_template(
# we need to use this template because react-cytoscapejs uses a default export
"react",
"react-cytoscapejs",
exports_default=True,
fallback="⌛",
)
Cytoscape = idom.web.export(react_cytoscapejs, "default")
@idom.component
def RandomNetworkGraph():
return Cytoscape(
{
"style": {"width": "100%", "height": "200px"},
"elements": random_network(20),
"layout": {"name": "cose"},
}
)
def random_network(number_of_nodes):
conns = []
nodes = [{"data": {"id": 0, "label": 0}}]
for src_node_id in range(1, number_of_nodes + 1):
tgt_node = random.choice(nodes)
src_node = {"data": {"id": src_node_id, "label": src_node_id}}
new_conn = {"data": {"source": src_node_id, "target": tgt_node["data"]["id"]}}
nodes.append(src_node)
conns.append(new_conn)
return nodes + conns
idom.run(RandomNetworkGraph)
```
#### File: idom/scripts/changes_since_release.py
```python
from __future__ import annotations
import sys
from common.github_utils import (
REPO_NAME,
date_range_query,
last_release_date,
search_idom_repo,
)
SECTION_FORMAT_TEMPLATES = {
"md": lambda title: f"# {title}",
"rst": lambda title: f"**{title}**\n",
"text": lambda title: f"{title}\n{'-' * len(title)}",
}
ISSUE_FORMAT_TEMPLATES = {
"md": lambda title, number, **_: f"- {title} - [#{number}](https://github.com/{REPO_NAME}/issues/{number})",
"rst": lambda title, number, **_: f"- {title} - :issue:`{number}`",
"text": lambda title, number, **_: f"- {title} - #{number}",
}
PULL_REQUEST_FORMAT_TEMPLATES = {
"md": lambda title, number, **_: f"- {title} - [#{number}](https://github.com/{REPO_NAME}/pull/{number})",
"rst": lambda title, number, **_: f"- {title} - :pull:`{number}`",
"text": lambda title, number, **_: f"- {title} - #{number}",
}
def show_issues(format: str):
print(SECTION_FORMAT_TEMPLATES[format]("Closed Issues"))
template = ISSUE_FORMAT_TEMPLATES[format]
query = f"type:issue closed:{date_range_query(last_release_date())}"
for issue in search_idom_repo(query):
print(template(**issue))
def show_pull_requests(format: str = "text"):
print(SECTION_FORMAT_TEMPLATES[format]("Merged Pull Requests"))
template = PULL_REQUEST_FORMAT_TEMPLATES[format]
query = f"type:pr merged:{date_range_query(last_release_date())}"
for pull in search_idom_repo(query):
print(template(**pull))
def main(format: str = "text"):
for func in [show_issues, show_pull_requests]:
func(format)
print()
if __name__ == "__main__":
main(*sys.argv[1:])
```
#### File: idom/backend/starlette.py
```python
from __future__ import annotations
import asyncio
import json
import logging
from dataclasses import dataclass
from typing import Any, Awaitable, Callable, Dict, Tuple, Union
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
from starlette.types import Receive, Scope, Send
from starlette.websockets import WebSocket, WebSocketDisconnect
from idom.backend.types import Location
from idom.config import IDOM_WEB_MODULES_DIR
from idom.core.hooks import Context, create_context, use_context
from idom.core.layout import Layout, LayoutEvent
from idom.core.serve import (
RecvCoroutine,
SendCoroutine,
VdomJsonPatch,
serve_json_patch,
)
from idom.core.types import RootComponentConstructor
from ._asgi import serve_development_asgi
from .utils import CLIENT_BUILD_DIR, safe_client_build_dir_path
logger = logging.getLogger(__name__)
WebSocketContext: type[Context[WebSocket | None]] = create_context(
None, "WebSocketContext"
)
def configure(
app: Starlette,
constructor: RootComponentConstructor,
options: Options | None = None,
) -> None:
"""Return a :class:`StarletteServer` where each client has its own state.
Implements the :class:`~idom.server.proto.ServerFactory` protocol
Parameters:
app: An application instance
constructor: A component constructor
options: Options for configuring server behavior
"""
options = options or Options()
# this route should take priority so set up it up first
_setup_single_view_dispatcher_route(options, app, constructor)
_setup_common_routes(options, app)
def create_development_app() -> Starlette:
"""Return a :class:`Starlette` app instance in debug mode"""
return Starlette(debug=True)
async def serve_development_app(
app: Starlette,
host: str,
port: int,
started: asyncio.Event | None = None,
) -> None:
"""Run a development server for starlette"""
await serve_development_asgi(app, host, port, started)
def use_location() -> Location:
"""Get the current route as a string"""
scope = use_scope()
pathname = "/" + scope["path_params"].get("path", "")
search = scope["query_string"].decode()
return Location(pathname, "?" + search if search else "")
def use_scope() -> Scope:
"""Get the current ASGI scope dictionary"""
return use_websocket().scope
def use_websocket() -> WebSocket:
"""Get the current WebSocket object"""
websocket = use_context(WebSocketContext)
if websocket is None:
raise RuntimeError( # pragma: no cover
"No websocket. Are you running with a Starllette server?"
)
return websocket
@dataclass
class Options:
"""Optionsuration options for :class:`StarletteRenderServer`"""
cors: Union[bool, Dict[str, Any]] = False
"""Enable or configure Cross Origin Resource Sharing (CORS)
For more information see docs for ``starlette.middleware.cors.CORSMiddleware``
"""
serve_static_files: bool = True
"""Whether or not to serve static files (i.e. web modules)"""
url_prefix: str = ""
"""The URL prefix where IDOM resources will be served from"""
def _setup_common_routes(options: Options, app: Starlette) -> None:
cors_options = options.cors
if cors_options: # pragma: no cover
cors_params = (
cors_options if isinstance(cors_options, dict) else {"allow_origins": ["*"]}
)
app.add_middleware(CORSMiddleware, **cors_params)
# This really should be added to the APIRouter, but there's a bug in Starlette
# BUG: https://github.com/tiangolo/fastapi/issues/1469
url_prefix = options.url_prefix
if options.serve_static_files:
wm_dir = IDOM_WEB_MODULES_DIR.current
web_module_files = StaticFiles(directory=wm_dir, html=True, check_dir=False)
app.mount(url_prefix + "/_api/modules", web_module_files)
app.mount(url_prefix + "/{_:path}/_api/modules", web_module_files)
# register this last so it takes least priority
app.mount(url_prefix + "/", single_page_app_files())
def single_page_app_files() -> Callable[..., Awaitable[None]]:
static_files_app = StaticFiles(
directory=CLIENT_BUILD_DIR,
html=True,
check_dir=False,
)
async def spa_app(scope: Scope, receive: Receive, send: Send) -> None:
# Path safety is the responsibility of starlette.staticfiles.StaticFiles -
# using `safe_client_build_dir_path` is for convenience in this case.
path = safe_client_build_dir_path(scope["path"]).name
return await static_files_app({**scope, "path": path}, receive, send)
return spa_app
def _setup_single_view_dispatcher_route(
options: Options, app: Starlette, constructor: RootComponentConstructor
) -> None:
@app.websocket_route(options.url_prefix + "/_api/stream")
@app.websocket_route(options.url_prefix + "/{path:path}/_api/stream")
async def model_stream(socket: WebSocket) -> None:
await socket.accept()
send, recv = _make_send_recv_callbacks(socket)
try:
await serve_json_patch(
Layout(WebSocketContext(constructor(), value=socket)),
send,
recv,
)
except WebSocketDisconnect as error:
logger.info(f"WebSocket disconnect: {error.code}")
def _make_send_recv_callbacks(
socket: WebSocket,
) -> Tuple[SendCoroutine, RecvCoroutine]:
async def sock_send(value: VdomJsonPatch) -> None:
await socket.send_text(json.dumps(value))
async def sock_recv() -> LayoutEvent:
return LayoutEvent(**json.loads(await socket.receive_text()))
return sock_send, sock_recv
```
#### File: idom/backend/tornado.py
```python
from __future__ import annotations
import asyncio
import json
from asyncio import Queue as AsyncQueue
from asyncio.futures import Future
from dataclasses import dataclass
from typing import Any, List, Tuple, Type, Union
from urllib.parse import urljoin
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPServerRequest
from tornado.log import enable_pretty_logging
from tornado.platform.asyncio import AsyncIOMainLoop
from tornado.web import Application, RequestHandler, StaticFileHandler
from tornado.websocket import WebSocketHandler
from tornado.wsgi import WSGIContainer
from idom.backend.types import Location
from idom.config import IDOM_WEB_MODULES_DIR
from idom.core.hooks import Context, create_context, use_context
from idom.core.layout import Layout, LayoutEvent
from idom.core.serve import VdomJsonPatch, serve_json_patch
from idom.core.types import ComponentConstructor
from .utils import CLIENT_BUILD_DIR, safe_client_build_dir_path
ConnectionContext: type[Context[Connection | None]] = create_context(
None, "ConnectionContext"
)
def configure(
app: Application,
component: ComponentConstructor,
options: Options | None = None,
) -> None:
"""Return a :class:`TornadoServer` where each client has its own state.
Implements the :class:`~idom.server.proto.ServerFactory` protocol
Parameters:
app: A tornado ``Application`` instance.
component: A root component constructor
options: Options for configuring how the component is mounted to the server.
"""
options = options or Options()
_add_handler(
app,
options,
(
# this route should take priority so set up it up first
_setup_single_view_dispatcher_route(component)
+ _setup_common_routes(options)
),
)
def create_development_app() -> Application:
return Application(debug=True)
async def serve_development_app(
app: Application,
host: str,
port: int,
started: asyncio.Event | None = None,
) -> None:
enable_pretty_logging()
# setup up tornado to use asyncio
AsyncIOMainLoop().install()
server = HTTPServer(app)
server.listen(port, host)
if started:
# at this point the server is accepting connection
started.set()
try:
# block forever - tornado has already set up its own background tasks
await asyncio.get_event_loop().create_future()
finally:
# stop accepting new connections
server.stop()
# wait for existing connections to complete
await server.close_all_connections()
def use_location() -> Location:
"""Get the current route as a string"""
conn = use_connection()
search = conn.request.query
return Location(pathname="/" + conn.path, search="?" + search if search else "")
def use_scope() -> dict[str, Any]:
"""Get the current WSGI environment dictionary"""
return WSGIContainer.environ(use_request())
def use_request() -> HTTPServerRequest:
"""Get the current ``HTTPServerRequest``"""
return use_connection().request
def use_connection() -> Connection:
connection = use_context(ConnectionContext)
if connection is None:
raise RuntimeError( # pragma: no cover
"No connection. Are you running with a Tornado server?"
)
return connection
@dataclass
class Connection:
"""A simple wrapper for holding connection information"""
request: HTTPServerRequest
"""The current request object"""
path: str
"""The current path being served"""
@dataclass
class Options:
"""Render server options for :class:`TornadoRenderServer` subclasses"""
serve_static_files: bool = True
"""Whether or not to serve static files (i.e. web modules)"""
url_prefix: str = ""
"""The URL prefix where IDOM resources will be served from"""
_RouteHandlerSpecs = List[Tuple[str, Type[RequestHandler], Any]]
def _setup_common_routes(options: Options) -> _RouteHandlerSpecs:
handlers: _RouteHandlerSpecs = []
if options.serve_static_files:
handlers.append(
(
r"/.*/?_api/modules/(.*)",
StaticFileHandler,
{"path": str(IDOM_WEB_MODULES_DIR.current)},
)
)
# register last to give lowest priority
handlers.append(
(
r"/(.*)",
SpaStaticFileHandler,
{"path": str(CLIENT_BUILD_DIR)},
)
)
return handlers
def _add_handler(
app: Application, options: Options, handlers: _RouteHandlerSpecs
) -> None:
prefixed_handlers: List[Any] = [
(urljoin(options.url_prefix, route_pattern),) + tuple(handler_info)
for route_pattern, *handler_info in handlers
]
app.add_handlers(r".*", prefixed_handlers)
def _setup_single_view_dispatcher_route(
constructor: ComponentConstructor,
) -> _RouteHandlerSpecs:
return [
(
r"/(.*)/_api/stream",
ModelStreamHandler,
{"component_constructor": constructor},
),
(
r"/_api/stream",
ModelStreamHandler,
{"component_constructor": constructor},
),
]
class SpaStaticFileHandler(StaticFileHandler):
async def get(self, path: str, include_body: bool = True) -> None:
# Path safety is the responsibility of tornado.web.StaticFileHandler -
# using `safe_client_build_dir_path` is for convenience in this case.
return await super().get(safe_client_build_dir_path(path).name, include_body)
class ModelStreamHandler(WebSocketHandler):
"""A web-socket handler that serves up a new model stream to each new client"""
_dispatch_future: Future[None]
_message_queue: AsyncQueue[str]
def initialize(self, component_constructor: ComponentConstructor) -> None:
self._component_constructor = component_constructor
async def open(self, path: str = "", *args: Any, **kwargs: Any) -> None:
message_queue: "AsyncQueue[str]" = AsyncQueue()
async def send(value: VdomJsonPatch) -> None:
await self.write_message(json.dumps(value))
async def recv() -> LayoutEvent:
return LayoutEvent(**json.loads(await message_queue.get()))
self._message_queue = message_queue
self._dispatch_future = asyncio.ensure_future(
serve_json_patch(
Layout(
ConnectionContext(
self._component_constructor(),
value=Connection(self.request, path),
)
),
send,
recv,
)
)
async def on_message(self, message: Union[str, bytes]) -> None:
await self._message_queue.put(
message if isinstance(message, str) else message.decode()
)
def on_close(self) -> None:
if not self._dispatch_future.done():
self._dispatch_future.cancel()
```
#### File: idom/core/_event_proxy.py
```python
from typing import Any, Dict, Sequence
from warnings import warn
def _wrap_in_warning_event_proxies(values: Sequence[Any]) -> Sequence[Any]:
return [_EventProxy(x) if isinstance(x, dict) else x for x in values]
class _EventProxy(Dict[Any, Any]):
def __getitem__(self, key: Any) -> Any: # pragma: no cover
try:
return super().__getitem__(key)
except KeyError:
target = self.get("target")
if isinstance(target, dict) and key in target:
warn(
f"The event key event[{key!r}] has been moved event['target'][{key!r}",
DeprecationWarning,
stacklevel=2,
)
return target[key]
else:
raise
def get(self, key: Any, default: Any = None) -> Any: # pragma: no cover
try:
return super().__getitem__(key)
except KeyError:
target = self.get("target")
if isinstance(target, dict) and key in target:
warn(
f"The event key event[{key!r}] has been moved event['target'][{key!r}",
DeprecationWarning,
stacklevel=2,
)
return target[key]
else:
return default
```
#### File: idom/core/_fixed_jsonpatch.py
```python
from jsonpatch import _ST_REMOVE
from jsonpatch import DiffBuilder as _DiffBuilder
from jsonpatch import JsonPatch as _JsonPatch
from jsonpatch import RemoveOperation, _path_join, basestring
from jsonpointer import JsonPointer
def apply_patch(doc, patch, in_place=False, pointer_cls=JsonPointer):
if isinstance(patch, basestring):
patch = JsonPatch.from_string(patch, pointer_cls=pointer_cls)
else:
patch = JsonPatch(patch, pointer_cls=pointer_cls)
return patch.apply(doc, in_place)
def make_patch(src, dst, pointer_cls=JsonPointer):
return JsonPatch.from_diff(src, dst, pointer_cls=pointer_cls)
class JsonPatch(_JsonPatch):
@classmethod
def from_diff(
cls,
src,
dst,
optimization=True,
dumps=None,
pointer_cls=JsonPointer,
):
json_dumper = dumps or cls.json_dumper
builder = DiffBuilder(src, dst, json_dumper, pointer_cls=pointer_cls)
builder._compare_values("", None, src, dst)
ops = list(builder.execute())
return cls(ops, pointer_cls=pointer_cls)
class DiffBuilder(_DiffBuilder):
def _item_removed(self, path, key, item):
new_op = RemoveOperation(
{
"op": "remove",
"path": _path_join(path, key),
}
)
new_index = self.insert(new_op)
self.store_index(item, new_index, _ST_REMOVE)
```
#### File: idom/testing/common.py
```python
from __future__ import annotations
import asyncio
import inspect
import shutil
import time
from functools import wraps
from typing import Any, Awaitable, Callable, Generic, Optional, TypeVar, cast
from uuid import uuid4
from weakref import ref
from typing_extensions import ParamSpec
from idom.config import IDOM_TESTING_DEFAULT_TIMEOUT, IDOM_WEB_MODULES_DIR
from idom.core.events import EventHandler, to_event_handler_function
from idom.core.hooks import LifeCycleHook, current_hook
def clear_idom_web_modules_dir() -> None:
"""Clear the directory where IDOM stores registered web modules"""
for path in IDOM_WEB_MODULES_DIR.current.iterdir():
shutil.rmtree(path) if path.is_dir() else path.unlink()
_P = ParamSpec("_P")
_R = TypeVar("_R")
_RC = TypeVar("_RC", covariant=True)
_DEFAULT_POLL_DELAY = 0.1
class poll(Generic[_R]): # noqa: N801
"""Wait until the result of an sync or async function meets some condition"""
def __init__(
self,
function: Callable[_P, Awaitable[_R] | _R],
*args: _P.args,
**kwargs: _P.kwargs,
) -> None:
coro: Callable[_P, Awaitable[_R]]
if not inspect.iscoroutinefunction(function):
async def coro(*args: _P.args, **kwargs: _P.kwargs) -> _R:
return cast(_R, function(*args, **kwargs))
else:
coro = cast(Callable[_P, Awaitable[_R]], function)
self._func = coro
self._args = args
self._kwargs = kwargs
async def until(
self,
condition: Callable[[_R], bool],
timeout: float = IDOM_TESTING_DEFAULT_TIMEOUT.current,
delay: float = _DEFAULT_POLL_DELAY,
) -> None:
"""Check that the coroutines result meets a condition within the timeout"""
started_at = time.time()
while True:
await asyncio.sleep(delay)
result = await self._func(*self._args, **self._kwargs)
if condition(result):
break
elif (time.time() - started_at) > timeout: # pragma: no cover
raise TimeoutError(
f"Condition not met within {timeout} "
f"seconds - last value was {result!r}"
)
async def until_is(
self,
right: _R,
timeout: float = IDOM_TESTING_DEFAULT_TIMEOUT.current,
delay: float = _DEFAULT_POLL_DELAY,
) -> None:
"""Wait until the result is identical to the given value"""
return await self.until(lambda left: left is right, timeout, delay)
async def until_equals(
self,
right: _R,
timeout: float = IDOM_TESTING_DEFAULT_TIMEOUT.current,
delay: float = _DEFAULT_POLL_DELAY,
) -> None:
"""Wait until the result is equal to the given value"""
return await self.until(lambda left: left == right, timeout, delay)
class HookCatcher:
"""Utility for capturing a LifeCycleHook from a component
Example:
.. code-block::
hooks = HookCatcher(index_by_kwarg="thing")
@idom.component
@hooks.capture
def MyComponent(thing):
...
... # render the component
# grab the last render of where MyComponent(thing='something')
hooks.index["something"]
# or grab the hook from the component's last render
hooks.latest
After the first render of ``MyComponent`` the ``HookCatcher`` will have
captured the component's ``LifeCycleHook``.
"""
latest: LifeCycleHook
def __init__(self, index_by_kwarg: Optional[str] = None):
self.index_by_kwarg = index_by_kwarg
self.index: dict[Any, LifeCycleHook] = {}
def capture(self, render_function: Callable[..., Any]) -> Callable[..., Any]:
"""Decorator for capturing a ``LifeCycleHook`` on each render of a component"""
# The render function holds a reference to `self` and, via the `LifeCycleHook`,
# the component. Some tests check whether components are garbage collected, thus
# we must use a `ref` here to ensure these checks pass once the catcher itself
# has been collected.
self_ref = ref(self)
@wraps(render_function)
def wrapper(*args: Any, **kwargs: Any) -> Any:
self = self_ref()
assert self is not None, "Hook catcher has been garbage collected"
hook = current_hook()
if self.index_by_kwarg is not None:
self.index[kwargs[self.index_by_kwarg]] = hook
self.latest = hook
return render_function(*args, **kwargs)
return wrapper
class StaticEventHandler:
"""Utility for capturing the target of one event handler
Example:
.. code-block::
static_handler = StaticEventHandler()
@idom.component
def MyComponent():
state, set_state = idom.hooks.use_state(0)
handler = static_handler.use(lambda event: set_state(state + 1))
return idom.html.button({"onClick": handler}, "Click me!")
# gives the target ID for onClick where from the last render of MyComponent
static_handlers.target
If you need to capture event handlers from different instances of a component
the you should create multiple ``StaticEventHandler`` instances.
.. code-block::
static_handlers_by_key = {
"first": StaticEventHandler(),
"second": StaticEventHandler(),
}
@idom.component
def Parent():
return idom.html.div(Child(key="first"), Child(key="second"))
@idom.component
def Child(key):
state, set_state = idom.hooks.use_state(0)
handler = static_handlers_by_key[key].use(lambda event: set_state(state + 1))
return idom.html.button({"onClick": handler}, "Click me!")
# grab the individual targets for each instance above
first_target = static_handlers_by_key["first"].target
second_target = static_handlers_by_key["second"].target
"""
def __init__(self) -> None:
self.target = uuid4().hex
def use(
self,
function: Callable[..., Any],
stop_propagation: bool = False,
prevent_default: bool = False,
) -> EventHandler:
return EventHandler(
to_event_handler_function(function),
stop_propagation,
prevent_default,
self.target,
)
```
#### File: idom/testing/display.py
```python
from __future__ import annotations
from contextlib import AsyncExitStack
from types import TracebackType
from typing import Any
from playwright.async_api import Browser, BrowserContext, Page, async_playwright
from idom import html
from idom.types import RootComponentConstructor
from .server import ServerFixture
class DisplayFixture:
"""A fixture for running web-based tests using ``playwright``"""
_exit_stack: AsyncExitStack
def __init__(
self,
server: ServerFixture | None = None,
driver: Browser | BrowserContext | Page | None = None,
) -> None:
if server is not None:
self.server = server
if driver is not None:
if isinstance(driver, Page):
self.page = driver
else:
self._browser = driver
self._next_view_id = 0
async def show(
self,
component: RootComponentConstructor,
) -> None:
self._next_view_id += 1
view_id = f"display-{self._next_view_id}"
self.server.mount(lambda: html.div({"id": view_id}, component()))
await self.goto("/")
await self.page.wait_for_selector(f"#{view_id}", state="attached")
async def goto(self, path: str, query: Any | None = None) -> None:
await self.page.goto(self.server.url(path, query))
async def __aenter__(self) -> DisplayFixture:
es = self._exit_stack = AsyncExitStack()
browser: Browser | BrowserContext
if not hasattr(self, "page"):
if not hasattr(self, "_browser"):
pw = await es.enter_async_context(async_playwright())
browser = await pw.chromium.launch()
else:
browser = self._browser
self.page = await browser.new_page()
if not hasattr(self, "server"):
self.server = ServerFixture()
await es.enter_async_context(self.server)
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.server.mount(None)
await self._exit_stack.aclose()
```
#### File: idom/web/utils.py
```python
import logging
import re
from pathlib import Path, PurePosixPath
from typing import Set, Tuple
from urllib.parse import urlparse
import requests
logger = logging.getLogger(__name__)
def module_name_suffix(name: str) -> str:
if name.startswith("@"):
name = name[1:]
head, _, tail = name.partition("@") # handle version identifier
version, _, tail = tail.partition("/") # get section after version
return PurePosixPath(tail or head).suffix or ".js"
def resolve_module_exports_from_file(
file: Path,
max_depth: int,
is_re_export: bool = False,
) -> Set[str]:
if max_depth == 0:
logger.warning(f"Did not resolve all exports for {file} - max depth reached")
return set()
elif not file.exists():
logger.warning(f"Did not resolve exports for unknown file {file}")
return set()
export_names, references = resolve_module_exports_from_source(
file.read_text(), exclude_default=is_re_export
)
for ref in references:
if urlparse(ref).scheme: # is an absolute URL
export_names.update(
resolve_module_exports_from_url(ref, max_depth - 1, is_re_export=True)
)
else:
path = file.parent.joinpath(*ref.split("/"))
export_names.update(
resolve_module_exports_from_file(path, max_depth - 1, is_re_export=True)
)
return export_names
def resolve_module_exports_from_url(
url: str,
max_depth: int,
is_re_export: bool = False,
) -> Set[str]:
if max_depth == 0:
logger.warning(f"Did not resolve all exports for {url} - max depth reached")
return set()
try:
text = requests.get(url).text
except requests.exceptions.ConnectionError as error:
reason = "" if error is None else " - {error.errno}"
logger.warning("Did not resolve exports for url " + url + reason)
return set()
export_names, references = resolve_module_exports_from_source(
text, exclude_default=is_re_export
)
for ref in references:
url = _resolve_relative_url(url, ref)
export_names.update(
resolve_module_exports_from_url(url, max_depth - 1, is_re_export=True)
)
return export_names
def resolve_module_exports_from_source(
content: str, exclude_default: bool
) -> Tuple[Set[str], Set[str]]:
names: Set[str] = set()
references: Set[str] = set()
if _JS_DEFAULT_EXPORT_PATTERN.search(content):
names.add("default")
# Exporting functions and classes
names.update(_JS_FUNC_OR_CLS_EXPORT_PATTERN.findall(content))
for export in _JS_GENERAL_EXPORT_PATTERN.findall(content):
export = export.rstrip(";").strip()
# Exporting individual features
if export.startswith("let "):
names.update(let.split("=", 1)[0] for let in export[4:].split(","))
# Renaming exports and export list
elif export.startswith("{") and export.endswith("}"):
names.update(
item.split(" as ", 1)[-1] for item in export.strip("{}").split(",")
)
# Exporting destructured assignments with renaming
elif export.startswith("const "):
names.update(
item.split(":", 1)[0]
for item in export[6:].split("=", 1)[0].strip("{}").split(",")
)
# Default exports
elif export.startswith("default "):
names.add("default")
# Aggregating modules
elif export.startswith("* as "):
names.add(export[5:].split(" from ", 1)[0])
elif export.startswith("* "):
references.add(export[2:].split("from ", 1)[-1].strip("'\""))
elif export.startswith("{") and " from " in export:
names.update(
item.split(" as ", 1)[-1]
for item in export.split(" from ")[0].strip("{}").split(",")
)
elif not (export.startswith("function ") or export.startswith("class ")):
logger.warning(f"Unknown export type {export!r}")
names = {n.strip() for n in names}
references = {r.strip() for r in references}
if exclude_default and "default" in names:
names.remove("default")
return names, references
def _resolve_relative_url(base_url: str, rel_url: str) -> str:
if not rel_url.startswith("."):
return rel_url
base_url = base_url.rsplit("/", 1)[0]
if rel_url.startswith("./"):
return base_url + rel_url[1:]
while rel_url.startswith("../"):
base_url = base_url.rsplit("/", 1)[0]
rel_url = rel_url[3:]
return f"{base_url}/{rel_url}"
_JS_DEFAULT_EXPORT_PATTERN = re.compile(
r";?\s*export\s+default\s",
)
_JS_FUNC_OR_CLS_EXPORT_PATTERN = re.compile(
r";?\s*export\s+(?:function|class)\s+([a-zA-Z_$][0-9a-zA-Z_$]*)"
)
_JS_GENERAL_EXPORT_PATTERN = re.compile(
r"(?:^|;|})\s*export(?=\s+|{)(.*?)(?=;|$)", re.MULTILINE
)
```
#### File: src/idom/widgets.py
```python
from __future__ import annotations
from base64 import b64encode
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
)
from typing_extensions import Protocol
import idom
from . import html
from .core import hooks
from .core.component import component
from .core.types import ComponentConstructor, VdomDict
from .utils import Ref
def image(
format: str,
value: Union[str, bytes] = "",
attributes: Optional[Dict[str, Any]] = None,
) -> VdomDict:
"""Utility for constructing an image from a string or bytes
The source value will automatically be encoded to base64
"""
if format == "svg":
format = "svg+xml"
if isinstance(value, str):
bytes_value = value.encode()
else:
bytes_value = value
base64_value = b64encode(bytes_value).decode()
src = f"data:image/{format};base64,{base64_value}"
return {"tagName": "img", "attributes": {"src": src, **(attributes or {})}}
_Value = TypeVar("_Value")
def use_linked_inputs(
attributes: Sequence[Dict[str, Any]],
on_change: Callable[[_Value], None] = lambda value: None,
cast: _CastFunc[_Value] = lambda value: value,
initial_value: str = "",
ignore_empty: bool = True,
) -> List[VdomDict]:
"""Return a list of linked inputs equal to the number of given attributes.
Parameters:
attributes:
That attributes of each returned input element. If the number of generated
inputs is variable, you may need to assign each one a
:ref:`key <Organizing Items With Keys>` by including a ``"key"`` in each
attribute dictionary.
on_change:
A callback which is triggered when any input is changed. This callback need
not update the 'value' field in the attributes of the inputs since that is
handled automatically.
cast:
Cast the 'value' of changed inputs that is passed to ``on_change``.
initial_value:
Initialize the 'value' field of the inputs.
ignore_empty:
Do not trigger ``on_change`` if the 'value' is an empty string.
"""
value, set_value = idom.hooks.use_state(initial_value)
def sync_inputs(event: Dict[str, Any]) -> None:
new_value = event["value"]
set_value(new_value)
if not new_value and ignore_empty:
return None
on_change(cast(new_value))
inputs: list[VdomDict] = []
for attrs in attributes:
# we're going to mutate this so copy it
attrs = attrs.copy()
key = attrs.pop("key", None)
attrs.update({"onChange": sync_inputs, "value": value})
inputs.append(html.input(attrs, key=key))
return inputs
_CastTo = TypeVar("_CastTo", covariant=True)
class _CastFunc(Protocol[_CastTo]):
def __call__(self, value: str) -> _CastTo:
...
MountFunc = Callable[["Callable[[], Any] | None"], None]
def hotswap(update_on_change: bool = False) -> Tuple[MountFunc, ComponentConstructor]:
"""Swap out components from a layout on the fly.
Since you can't change the component functions used to create a layout
in an imperative manner, you can use ``hotswap`` to do this so
long as you set things up ahead of time.
Parameters:
update_on_change: Whether or not all views of the layout should be udpated on a swap.
Example:
.. code-block:: python
import idom
show, root = idom.hotswap()
PerClientStateServer(root).run_in_thread("localhost", 8765)
@idom.component
def DivOne(self):
return {"tagName": "div", "children": [1]}
show(DivOne)
# displaying the output now will show DivOne
@idom.component
def DivTwo(self):
return {"tagName": "div", "children": [2]}
show(DivTwo)
# displaying the output now will show DivTwo
"""
constructor_ref: Ref[Callable[[], Any]] = Ref(lambda: None)
if update_on_change:
set_constructor_callbacks: Set[Callable[[Callable[[], Any]], None]] = set()
@component
def HotSwap() -> Any:
# new displays will adopt the latest constructor and arguments
constructor, set_constructor = _use_callable(constructor_ref.current)
def add_callback() -> Callable[[], None]:
set_constructor_callbacks.add(set_constructor)
return lambda: set_constructor_callbacks.remove(set_constructor)
hooks.use_effect(add_callback)
return constructor()
def swap(constructor: Callable[[], Any] | None) -> None:
constructor = constructor_ref.current = constructor or (lambda: None)
for set_constructor in set_constructor_callbacks:
set_constructor(constructor)
return None
else:
@component
def HotSwap() -> Any:
return constructor_ref.current()
def swap(constructor: Callable[[], Any] | None) -> None:
constructor_ref.current = constructor or (lambda: None)
return None
return swap, HotSwap
_Func = Callable[..., Any]
def _use_callable(initial_func: _Func) -> Tuple[_Func, Callable[[_Func], None]]:
state = hooks.use_state(lambda: initial_func)
return state[0], lambda new: state[1](lambda old: new)
```
#### File: tests/test_core/test_dispatcher.py
```python
import asyncio
from typing import Any, Sequence
import idom
from idom.core.layout import Layout, LayoutEvent, LayoutUpdate
from idom.core.serve import VdomJsonPatch, serve_json_patch
from idom.testing import StaticEventHandler
EVENT_NAME = "onEvent"
STATIC_EVENT_HANDLER = StaticEventHandler()
def test_vdom_json_patch_create_from_apply_to():
update = LayoutUpdate("", {"a": 1, "b": [1]}, {"a": 2, "b": [1, 2]})
patch = VdomJsonPatch.create_from(update)
result = patch.apply_to({"a": 1, "b": [1]})
assert result == {"a": 2, "b": [1, 2]}
def make_send_recv_callbacks(events_to_inject):
changes = []
# We need a semaphor here to simulate recieving an event after each update is sent.
# The effect is that the send() and recv() callbacks trade off control. If we did
# not do this, it would easy to determine when to halt because, while we might have
# received all the events, they might not have been sent since the two callbacks are
# executed in separate loops.
sem = asyncio.Semaphore(0)
async def send(patch):
changes.append(patch)
sem.release()
if not events_to_inject:
raise idom.Stop()
async def recv():
await sem.acquire()
try:
return events_to_inject.pop(0)
except IndexError:
# wait forever
await asyncio.Event().wait()
return changes, send, recv
def make_events_and_expected_model():
events = [LayoutEvent(STATIC_EVENT_HANDLER.target, [])] * 4
expected_model = {
"tagName": "",
"children": [
{
"tagName": "div",
"attributes": {"count": 4},
"eventHandlers": {
EVENT_NAME: {
"target": STATIC_EVENT_HANDLER.target,
"preventDefault": False,
"stopPropagation": False,
}
},
}
],
}
return events, expected_model
def assert_changes_produce_expected_model(
changes: Sequence[LayoutUpdate],
expected_model: Any,
) -> None:
model_from_changes = {}
for update in changes:
model_from_changes = update.apply_to(model_from_changes)
assert model_from_changes == expected_model
@idom.component
def Counter():
count, change_count = idom.hooks.use_reducer(
(lambda old_count, diff: old_count + diff),
initial_value=0,
)
handler = STATIC_EVENT_HANDLER.use(lambda: change_count(1))
return idom.html.div({EVENT_NAME: handler, "count": count})
async def test_dispatch():
events, expected_model = make_events_and_expected_model()
changes, send, recv = make_send_recv_callbacks(events)
await asyncio.wait_for(serve_json_patch(Layout(Counter()), send, recv), 1)
assert_changes_produce_expected_model(changes, expected_model)
async def test_dispatcher_handles_more_than_one_event_at_a_time():
block_and_never_set = asyncio.Event()
will_block = asyncio.Event()
second_event_did_execute = asyncio.Event()
blocked_handler = StaticEventHandler()
non_blocked_handler = StaticEventHandler()
@idom.component
def ComponentWithTwoEventHandlers():
@blocked_handler.use
async def block_forever():
will_block.set()
await block_and_never_set.wait()
@non_blocked_handler.use
async def handle_event():
second_event_did_execute.set()
return idom.html.div(
idom.html.button({"onClick": block_forever}),
idom.html.button({"onClick": handle_event}),
)
send_queue = asyncio.Queue()
recv_queue = asyncio.Queue()
asyncio.ensure_future(
serve_json_patch(
idom.Layout(ComponentWithTwoEventHandlers()),
send_queue.put,
recv_queue.get,
)
)
await recv_queue.put(LayoutEvent(blocked_handler.target, []))
await will_block.wait()
await recv_queue.put(LayoutEvent(non_blocked_handler.target, []))
await second_event_did_execute.wait()
```
#### File: idom/tests/test_html.py
```python
import pytest
from idom import component, config, html, use_state
from idom.testing import DisplayFixture, poll
from idom.utils import Ref
def use_toggle(initial=True):
state, set_state = use_state(initial)
return state, lambda: set_state(not state)
def use_counter(initial_value):
state, set_state = use_state(initial_value)
return state, lambda: set_state(state + 1)
async def test_script_mount_unmount(display: DisplayFixture):
toggle_is_mounted = Ref()
@component
def Root():
is_mounted, toggle_is_mounted.current = use_toggle()
return html.div(
html.div({"id": "mount-state", "data-value": False}),
HasScript() if is_mounted else html.div(),
)
@component
def HasScript():
return html.script(
"""() => {
const mapping = {"false": false, "true": true};
const mountStateEl = document.getElementById("mount-state");
mountStateEl.setAttribute(
"data-value", !mapping[mountStateEl.getAttribute("data-value")]);
return () => mountStateEl.setAttribute(
"data-value", !mapping[mountStateEl.getAttribute("data-value")]);
}"""
)
await display.show(Root)
mount_state = await display.page.wait_for_selector("#mount-state", state="attached")
poll_mount_state = poll(mount_state.get_attribute, "data-value")
await poll_mount_state.until_equals("true")
toggle_is_mounted.current()
await poll_mount_state.until_equals("false")
toggle_is_mounted.current()
await poll_mount_state.until_equals("true")
async def test_script_re_run_on_content_change(display: DisplayFixture):
incr_count = Ref()
@component
def HasScript():
count, incr_count.current = use_counter(1)
return html.div(
html.div({"id": "mount-count", "data-value": 0}),
html.div({"id": "unmount-count", "data-value": 0}),
html.script(
f"""() => {{
const mountCountEl = document.getElementById("mount-count");
const unmountCountEl = document.getElementById("unmount-count");
mountCountEl.setAttribute("data-value", {count});
return () => unmountCountEl.setAttribute("data-value", {count});;
}}"""
),
)
await display.show(HasScript)
mount_count = await display.page.wait_for_selector("#mount-count", state="attached")
poll_mount_count = poll(mount_count.get_attribute, "data-value")
unmount_count = await display.page.wait_for_selector(
"#unmount-count", state="attached"
)
poll_unmount_count = poll(unmount_count.get_attribute, "data-value")
await poll_mount_count.until_equals("1")
await poll_unmount_count.until_equals("0")
incr_count.current()
await poll_mount_count.until_equals("2")
await poll_unmount_count.until_equals("1")
incr_count.current()
await poll_mount_count.until_equals("3")
await poll_unmount_count.until_equals("2")
async def test_script_from_src(display: DisplayFixture):
incr_src_id = Ref()
file_name_template = "__some_js_script_{src_id}__.js"
@component
def HasScript():
src_id, incr_src_id.current = use_counter(0)
if src_id == 0:
# on initial display we haven't added the file yet.
return html.div()
else:
return html.div(
html.div({"id": "run-count", "data-value": 0}),
html.script(
{"src": f"/_api/modules/{file_name_template.format(src_id=src_id)}"}
),
)
await display.show(HasScript)
for i in range(1, 4):
script_file = config.IDOM_WEB_MODULES_DIR.current / file_name_template.format(
src_id=i
)
script_file.write_text(
f"""
let runCountEl = document.getElementById("run-count");
runCountEl.setAttribute("data-value", {i});
"""
)
incr_src_id.current()
run_count = await display.page.wait_for_selector("#run-count", state="attached")
poll_run_count = poll(run_count.get_attribute, "data-value")
await poll_run_count.until_equals("1")
def test_script_may_only_have_one_child():
with pytest.raises(ValueError, match="'script' nodes may have, at most, one child"):
html.script("one child", "two child")
def test_child_of_script_must_be_string():
with pytest.raises(ValueError, match="The child of a 'script' must be a string"):
html.script(1)
def test_simple_fragment():
assert html._() == {"tagName": ""}
assert html._(1, 2, 3) == {"tagName": "", "children": [1, 2, 3]}
assert html._(key="something") == {"tagName": "", "key": "something"}
assert html._(1, 2, 3, key="something") == {
"tagName": "",
"key": "something",
"children": [1, 2, 3],
}
def test_fragment_can_have_no_attributes():
with pytest.raises(TypeError, match="Fragments cannot have attributes"):
html._({"some-attribute": 1})
```
#### File: tests/test_server/test_utils.py
```python
import asyncio
import threading
import time
from contextlib import ExitStack
import pytest
from playwright.async_api import Page
from idom.backend import flask as flask_implementation
from idom.backend.utils import find_available_port
from idom.backend.utils import run as sync_run
from idom.backend.utils import traversal_safe_path
from idom.sample import SampleApp as SampleApp
from tests.tooling.loop import open_event_loop
@pytest.fixture
def exit_stack():
with ExitStack() as es:
yield es
def test_find_available_port():
assert find_available_port("localhost", port_min=5000, port_max=6000)
with pytest.raises(RuntimeError, match="no available port"):
# check that if port range is exhausted we raise
find_available_port("localhost", port_min=0, port_max=0)
async def test_run(page: Page, exit_stack: ExitStack):
loop = exit_stack.enter_context(open_event_loop(as_current=False))
host = "127.0.0.1"
port = find_available_port(host)
url = f"http://{host}:{port}"
def run_in_thread():
asyncio.set_event_loop(loop)
sync_run(
SampleApp,
host,
port,
implementation=flask_implementation,
)
threading.Thread(target=run_in_thread, daemon=True).start()
# give the server a moment to start
time.sleep(0.5)
await page.goto(url)
await page.wait_for_selector("#sample")
@pytest.mark.parametrize(
"bad_path",
[
"../escaped",
"ok/../../escaped",
"ok/ok-again/../../ok-yet-again/../../../escaped",
],
)
def test_catch_unsafe_relative_path_traversal(tmp_path, bad_path):
with pytest.raises(ValueError, match="Unsafe path"):
traversal_safe_path(tmp_path, *bad_path.split("/"))
```
#### File: tests/tooling/loop.py
```python
import asyncio
import sys
import threading
import time
from asyncio import wait_for
from contextlib import contextmanager
from typing import Iterator
from idom.config import IDOM_TESTING_DEFAULT_TIMEOUT
TIMEOUT = 3
@contextmanager
def open_event_loop(as_current: bool = True) -> Iterator[asyncio.AbstractEventLoop]:
"""Open a new event loop and cleanly stop it
Args:
as_current: whether to make this loop the current loop in this thread
"""
loop = asyncio.new_event_loop()
try:
if as_current:
asyncio.set_event_loop(loop)
loop.set_debug(True)
yield loop
finally:
try:
_cancel_all_tasks(loop, as_current)
if as_current:
loop.run_until_complete(wait_for(loop.shutdown_asyncgens(), TIMEOUT))
if sys.version_info >= (3, 9):
# shutdown_default_executor only available in Python 3.9+
loop.run_until_complete(
wait_for(loop.shutdown_default_executor(), TIMEOUT)
)
finally:
if as_current:
asyncio.set_event_loop(None)
start = time.time()
while loop.is_running():
if (time.time() - start) > IDOM_TESTING_DEFAULT_TIMEOUT.current:
raise TimeoutError(
"Failed to stop loop after "
f"{IDOM_TESTING_DEFAULT_TIMEOUT.current} seconds"
)
time.sleep(0.1)
loop.close()
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop, is_current: bool) -> None:
to_cancel = asyncio.all_tasks(loop)
if not to_cancel:
return
done = threading.Event()
count = len(to_cancel)
def one_task_finished(future):
nonlocal count
count -= 1
if count == 0:
done.set()
for task in to_cancel:
loop.call_soon_threadsafe(task.cancel)
task.add_done_callback(one_task_finished)
if is_current:
loop.run_until_complete(
wait_for(
asyncio.gather(*to_cancel, loop=loop, return_exceptions=True), TIMEOUT
)
)
else:
# user was responsible for cancelling all tasks
if not done.wait(timeout=3):
raise TimeoutError("Could not stop event loop in time")
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled exception during event loop shutdown",
"exception": task.exception(),
"task": task,
}
)
``` |
{
"source": "jmtchllrx/pyMuse",
"score": 2
} |
#### File: hsaudiotag/tests/ogg_test.py
```python
from .. import ogg
from .util import TestData, eq_
def test_page_valid_on_test1():
fp = open(TestData.filepath('ogg/test1.ogg'), 'rb')
page = ogg.VorbisPage(fp)
assert page.valid
eq_(0, page.page_number)
eq_(0, page.position)
eq_(30, page.size)
fp.seek(page.start_offset + page.header_size)
data = fp.read(page.size)
eq_(data, page.read())
page = next(page)
assert page.valid
eq_(1, page.page_number)
eq_(0, page.position)
eq_(0x10f1, page.size)
page = next(page)
assert page.valid
eq_(2, page.page_number)
eq_(0, page.position)
eq_(0x91, page.size)
page = next(page)
assert page.valid
eq_(3, page.page_number)
eq_(0x2800, page.position)
eq_(0x1019, page.size)
fp.close()
def test_file_valid_on_test1():
o = ogg.Vorbis(TestData.filepath('ogg/test1.ogg'))
eq_(o.size, 101785)
eq_(o.bitrate, 160)
eq_(o.sample_rate, 44100)
eq_(o.sample_count, 0x6d3eae)
eq_(o.duration, 162)
eq_(o.artist, 'The White Stripes')
eq_(o.album, 'The White Stripes')
eq_(o.title, 'Astro')
eq_(o.genre, '')
eq_(o.comment, '')
eq_(o.year, '1999')
eq_(o.track, 8)
eq_(o.audio_offset, 0x1158)
eq_(o.audio_size, 101785 - 0x1158)
def test_file_valid_on_test2():
o = ogg.Vorbis(TestData.filepath('ogg/test2.ogg'))
eq_(103168, o.size)
eq_(199, o.bitrate)
eq_(44100, o.sample_rate)
eq_(0xb2a2c8, o.sample_count)
eq_(265, o.duration)
eq_('<NAME>', o.artist)
eq_('Le coeur dans la t\u00eate', o.album)
eq_('Le coeur dans la t\u00eate', o.title)
eq_('Pop', o.genre)
eq_('', o.comment)
eq_('2005', o.year)
eq_(3, o.track)
eq_(0xf79, o.audio_offset)
eq_(103168 - 0xf79, o.audio_size)
def test_lowercase_fieldnames():
# Support ogg files with lowercase fieldnames (artist, album, etc.)
o = ogg.Vorbis(TestData.filepath('ogg/lowercase.ogg'))
eq_(o.artist, 'The White Stripes')
eq_(o.album, 'The White Stripes')
eq_(o.title, 'Astro')
def test_track_with_slash():
# A track number field with a slash (for example, 1/20) is supported and will return the first
# number of the field.
# FILE NOTE: Because I had added 4 bytes to the TRACKNUMBER field in the test file and that I
# wasn't sure where I had to adjust the vorbis comment offset other than just in front of the
# field, I removed 4 bytes in the otherwise unused TRACKTOTAL (now TRACKT) field.
o = ogg.Vorbis(TestData.filepath('ogg/track_with_slash.ogg'))
eq_(o.track, 18)
def test_small():
# Previously, a small (<64kb) OGG file couldn't be read due to a hardcoded 64kb offset. Tix #2.
o = ogg.Vorbis(TestData.filepath('ogg/small.ogg'))
eq_(o.bitrate, 60)
eq_(o.duration, 4)
def verify_emptyness(o):
eq_(0, o.bitrate)
eq_(0, o.sample_rate)
eq_(0, o.sample_count)
eq_(0, o.duration)
eq_('', o.artist)
eq_('', o.album)
eq_('', o.title)
eq_('', o.genre)
eq_('', o.comment)
eq_('', o.year)
eq_(0, o.track)
eq_(0, o.audio_offset)
eq_(0, o.audio_size)
def test_invalid_zerofile():
o = ogg.Vorbis(TestData.filepath('zerofile'))
verify_emptyness(o)
def test_invalid_zerofill():
o = ogg.Vorbis(TestData.filepath('zerofill'))
verify_emptyness(o)
def test_invalid_randomfile():
o = ogg.Vorbis(TestData.filepath('randomfile'))
verify_emptyness(o)
def test_invalid_mp3():
o = ogg.Vorbis(TestData.filepath('mpeg/test1.mp3'))
verify_emptyness(o)
def test_invalid_wma():
o = ogg.Vorbis(TestData.filepath('wma/test1.wma'))
verify_emptyness(o)
def test_invalid_mp4():
o = ogg.Vorbis(TestData.filepath('mp4/test1.m4a'))
verify_emptyness(o)
``` |
{
"source": "jmtcsngr/htsget-compliance",
"score": 2
} |
#### File: compliance/config/methods.py
```python
from ga4gh.htsget.compliance.config import constants as c
def format_url(test_case, kwargs, use_reads=True):
datatype_urlpath = kwargs["reads_base_path"] \
if use_reads \
else kwargs["variants_base_path"]
template = c.BASE_URL + datatype_urlpath + c.ID_URLPATH
url = template.format(**{
"base_url": kwargs["htsget_url"],
"obj_id": test_case["obj_id"]
})
return url
def format_reads_url(test_case, kwargs):
return format_url(test_case, kwargs)
def format_variants_url(test_case, kwargs):
return format_url(test_case, kwargs, use_reads=False)
FORMAT_READS_URL = format_reads_url
FORMAT_VARIANTS_URL = format_variants_url
```
#### File: htsget/compliance/schema_validator.py
```python
import inspect
import json
import os
from jsonschema import validate
from jsonschema import RefResolver
from jsonschema.exceptions import ValidationError
from ga4gh.htsget.compliance.config import constants as c
class SchemaValidator(object):
"""Validates htsget response matches JSON schema
Attributes:
SUCCESS (int): constant. indicates successful validation
FAILURE (int): constant. indicates unsuccessful validation
schema_file (str): filename containing JSON schema
schema_dir (str): path to local dir containing htsget JSON schemas
schema_path (str): full path to htsget response JSON schema file
resolver (RefResolver): resolves external references to the schema dir
schema_json (dict): loaded htsget response JSON schema
"""
SUCCESS = 1
FAILURE = -1
def __init__(self):
"""Instantiates a SchemaValidator object"""
self.schema_file = c.SCHEMA_HTSGET_RESPONSE
self.schema_dir = os.path.join(
os.path.dirname(
os.path.dirname(inspect.getmodule(self).__file__)
),
"schemas"
)
self.schema_path = os.path.join(self.schema_dir, self.schema_file)
self.resolver = RefResolver('file://{}/'.format(self.schema_dir), None)
self.schema_json = json.loads(open(self.schema_path, 'r').read())
def validate_instance(self, instance_json):
"""Validate a JSON object/response against the htsget response schema
Args:
instance_json (dict): loaded JSON object to validate
Returns:
dict: contains success/failure of validation, and message
"""
# setup validation object
# test status initialized as passing
validation_result = {
"status": SchemaValidator.SUCCESS,
"exception_class": "",
"message": ""
}
try:
# api method to compare json instance to the schema
validate(instance=instance_json, schema=self.schema_json,
resolver=self.resolver)
except ValidationError as e:
# if the api method raises an error, the result dictionary set
# to include failure status and error message
validation_result["status"] = SchemaValidator.FAILURE
validation_result["exception_class"] = str(e.__class__.__name__)
validation_result["message"] = e.message
return validation_result
```
#### File: htsget/compliance/test_case.py
```python
import json
import requests
from ga4gh.htsget.compliance.config import constants as c
from ga4gh.htsget.compliance.schema_validator import SchemaValidator
from ga4gh.htsget.compliance.file_validator import FileValidator
from ga4gh.htsget.compliance.filepart_aggregator import FilepartAggregator
from ga4gh.testbed.models.report_case import ReportCase
class TestCase(object):
def __init__(self, props, kwargs):
"""Instantiates a TestCase object"""
self.set_name(props["name"])
self.set_url_function(props["url_function"])
self.set_url_params(props["url_params"])
self.set_obj_id(props["obj_id"])
self.set_expected_response_status(props["expected_response_status"])
self.set_expected_contents(props["expected_contents"])
self.set_kwargs(kwargs)
def validate_response_code(self, response):
if self.get_expected_response_status() != response.status_code:
raise Exception("incorrect status code")
def validate_response_schema(self, response):
response_json = response.json()
sv = SchemaValidator()
validation_result = sv.validate_instance(response_json)
if validation_result["status"] == SchemaValidator.FAILURE:
raise Exception(validation_result["message"])
def validate_file_contents(self, response):
aggregator = FilepartAggregator(response)
aggregator.aggregate()
returned_filepath = aggregator.get_output_filepath()
expected_filepath = self.get_expected_contents()
file_validator = FileValidator(returned_filepath, expected_filepath)
validation_result = file_validator.validate()
if validation_result == FileValidator.FAILURE:
raise Exception("returned file does not match expected")
def execute_test(self):
report_case = ReportCase()
report_case.set_name(self.get_name())
try:
url = self.get_formatted_url()
params = self.get_url_params()
report_case.add_debug_msg("URL: " + url)
report_case.add_debug_msg("PARAMS: " + str(params))
response = requests.get(url, params=params)
self.validate_response_code(response)
self.validate_response_schema(response)
self.validate_file_contents(response)
report_case.set_status_success()
except Exception as e:
# any raised exceptions will set the ReportCase status to failure
report_case.set_status_failure()
report_case.set_error(str(e))
return report_case
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_url_function(self, url_function):
self.url_function = url_function
def get_url_function(self):
return self.url_function
def set_url_params(self, url_params):
self.url_params = url_params
def get_url_params(self):
return self.url_params
def get_formatted_url(self):
func = self.get_url_function()
return func(self.get_props_dict(), self.get_kwargs())
def set_obj_id(self, obj_id):
self.obj_id = obj_id
def get_obj_id(self):
return self.obj_id
def set_expected_response_status(self, expected_response_status):
self.expected_response_status = expected_response_status
def get_expected_response_status(self):
return self.expected_response_status
def set_expected_contents(self, expected_contents):
self.expected_contents = expected_contents
def get_expected_contents(self):
return self.expected_contents
def get_props_dict(self):
return {
"name": self.get_name(),
"url_function": self.get_url_function(),
"obj_id": self.get_obj_id(),
"expected_response_status": self.get_expected_response_status()
}
def set_kwargs(self, kwargs):
self.kwargs = kwargs
def get_kwargs(self):
return self.kwargs
```
#### File: testbed/models/report_summary.py
```python
from ga4gh.testbed import constants as c
class ReportSummary(object):
"""Summarizes statuses of all cases in a group or report
Attributes:
run (int): total number of cases run
passed (int): total number of cases passed
warned (int): total number of cases passed with warning(s)
failed (int): total number of cases failed
skipped (int): total number of cases skipped
increment_dict (dict): increments the correct attribute by status code
"""
def __init__(self):
"""Instantiate a ReportSummary"""
self.run = 0
self.passed = 0
self.warned = 0
self.failed = 0
self.skipped = 0
self.increment_dict = {
c.RESULT_SUCCESS: self.increment_passed,
c.RESULT_WARNING: self.increment_warned,
c.RESULT_FAILURE: self.increment_failed,
c.RESULT_SKIPPED: self.increment_skipped
}
def increment_run(self):
"""increment run count by 1"""
self.run += 1
def get_run(self):
"""get run count
Returns:
int: run count
"""
return self.run
def increment_passed(self):
"""increment pass count by 1"""
self.passed += 1
def get_passed(self):
"""get passed count
Returns:
int: passed count
"""
return self.passed
def increment_warned(self):
"""increment warn count by 1"""
self.warned += 1
def get_warned(self):
"""get warn count
Returns:
int: warn count
"""
return self.warned
def increment_failed(self):
"""increment fail count by 1"""
self.failed += 1
def get_failed(self):
"""get fail count
Returns:
int: fail count
"""
return self.failed
def increment_skipped(self):
"""increment skip count by 1"""
self.skipped += 1
def get_skipped(self):
"""get skip count
Returns:
int: skip count
"""
return self.skipped
def increment(self, status):
"""increment run and status-specific counts by 1
For a given status code, increment the total number of cases run, as
well as the count for that status by 1
Args:
status (int): status code to increment
"""
self.increment_run()
self.increment_dict[status]()
def add_from_summary(self, summary_obj):
"""add all counts from another summary to the existing summary
Used when aggregating summaries from multiple groups for the overall
report.
Args:
summary_obj (ReportSummary): another ReportSummary object
"""
self.run += summary_obj.run
self.passed += summary_obj.passed
self.warned += summary_obj.warned
self.failed += summary_obj.failed
self.skipped += summary_obj.skipped
def as_json(self):
"""Dump ReportSummary as simple python dictionary
The returned dictionary can be easily converted to JSON via
'json.dumps', either on its own or as part of a larger object (e.g.
ReportGroup, Report)
Returns:
dict: ReportSummary object as python dictionary
"""
return {
"run": self.run,
"passed": self.passed,
"warned": self.warned,
"failed": self.failed,
"skipped": self.skipped
}
``` |
{
"source": "jmtd/cekit",
"score": 3
} |
#### File: cekit/concreate/generator.py
```python
import logging
import os
import subprocess
from jinja2 import Environment, FileSystemLoader
from concreate import tools
from concreate.descriptor import Descriptor
from concreate.errors import ConcreateError
from concreate.module import copy_module_to_target
from concreate.template_helper import TemplateHelper
logger = logging.getLogger('concreate')
class Generator(object):
def __init__(self, descriptor_path, target, overrides):
self.descriptor = Descriptor(descriptor_path, 'image').process()
self.target = target
self.effective_descriptor = self.descriptor
if overrides:
self.effective_descriptor = self.override(overrides)
def prepare_modules(self, descriptor=None):
"""
Prepare module to be used for Dockerfile generation.
This means:
1. Place module to args.target/image/modules/ directory
2. Fetch its artifacts to target/image/sources directory
3. Merge modules descriptor with iamge descriptor
Arguments:
descriptor: Module descriptor used to dig required modules,
if descriptor is not provided image descriptor is used.
"""
if not descriptor:
descriptor = self.descriptor
# If descriptor doesn't requires any module we can start merging descriptors
# and fetching artifacts. There ibs nothing left to do except for this
if 'modules' not in descriptor:
self.effective_descriptor.merge(descriptor)
return
for module in descriptor['modules']:
version = None
if 'version' in module:
version = module['version']
req_module = copy_module_to_target(module['name'],
version,
os.path.join(self.target, 'image', 'modules'))
# If there is any required module it needs to be prepared too
self.prepare_modules(req_module.descriptor)
self.effective_descriptor.merge(descriptor)
def fetch_artifacts(self):
""" Goes through artifacts section of image descriptor
and fetches all of them
"""
if 'artifacts' not in self.descriptor:
return
artifacts = self.descriptor['artifacts']
for artifact_dict in artifacts:
artifact = tools.Artifact(artifact_dict)
artifact.fetch()
def override(self, overrides_path):
logger.info("Using overrides file from '%s'." % overrides_path)
descriptor = Descriptor(overrides_path, 'overrides').process()
descriptor.merge(self.effective_descriptor)
return descriptor
def render_dockerfile(self):
""" Renders Dockerfile to $target/image/Dockerfile
Args:
template_file - a path to jinja2 template file
"""
logger.info("Rendering Dockerfile...")
template_file = os.path.join(os.path.dirname(__file__),
'templates',
'template.jinja')
loader = FileSystemLoader(os.path.dirname(template_file))
env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)
env.globals['helper'] = TemplateHelper()
template = env.get_template(os.path.basename(template_file))
dockerfile = os.path.join(self.target,
'image',
'Dockerfile')
with open(dockerfile, 'wb') as f:
f.write(template.render(self.effective_descriptor.descriptor).encode('utf-8'))
logger.debug("Done")
def prepare_repositories(self):
"""Udates descriptor with added repositories"""
self.descriptor['additional_repos'] = \
tools.prepare_external_repositories(os.path.join(self.target,
'image'))
def build(self):
"""
After the source siles are generated, the container image can be built.
We're using Docker to build the image currently.
Built image will be avaialbe under two tags:
1. version defined in the image descriptor
2. 'latest'
"""
# Desired tag of the image
tag = "%s:%s" % (self.effective_descriptor['name'], self.effective_descriptor['version'])
latest_tag = "%s:latest" % self.effective_descriptor['name']
logger.info("Building %s container image..." % tag)
ret = subprocess.call(["docker", "build", "-t", tag, "-t", latest_tag, os.path.join(self.target, 'image')])
if ret == 0:
logger.info("Image built and available under following tags: %s and %s" % (tag, latest_tag))
else:
raise ConcreateError("Image build failed, see logs above.")
```
#### File: cekit/tests/test_unit_descriptor.py
```python
import os
import tempfile
import unittest
import yaml
from concreate import descriptor
from concreate.errors import ConcreateError
class TestMergingDictionaries(unittest.TestCase):
def test_merging_plain_dictionaries(self):
dict1 = {'a': 1,
'b': 2}
dict2 = {'b': 5,
'c': 3}
expected = {'a': 1,
'b': 2,
'c': 3}
self.assertEqual(expected,
descriptor.merge_dictionaries(dict1, dict2))
def test_merging_emdedded_dictionaires(self):
dict1 = {'a': 1,
'b': {'b1': 10,
'b2': 20}}
dict2 = {'b': {'b2': 50,
'b3': 30},
'c': 3}
expected = {'a': 1,
'b': {'b1': 10,
'b2': 20,
'b3': 30},
'c': 3}
self.assertEqual(expected,
descriptor.merge_dictionaries(dict1, dict2))
class TestMergingLists(unittest.TestCase):
def test_descriptor_schema_version(self):
img_descriptor = descriptor.Descriptor.__new__(descriptor.Descriptor)
img_descriptor.descriptor = {'schema_version': 1}
img_descriptor.check_schema_version()
def test_descriptor_schema_version_bad_version(self):
img_descriptor = descriptor.Descriptor.__new__(descriptor.Descriptor)
img_descriptor.descriptor = {'schema_version': 123}
with self.assertRaises(ConcreateError):
img_descriptor.check_schema_version()
def test_merging_plain_lists(self):
list1 = [1, 2, 3]
list2 = [2, 3, 4, 5]
expected = [1, 2, 3, 4, 5]
self.assertEqual(descriptor.merge_lists(list1, list2),
expected)
def test_merging_plain_list_oflist(self):
list1 = [1, 2, 3]
list2 = [3, 4, []]
with self.assertRaises(ConcreateError):
descriptor.merge_lists(list1, list2)
def test_merging_list_of_dictionaries(self):
list1 = [{'name': 1,
'a': 1,
'b': 2}, 'a']
list2 = [{'name': 1,
'b': 3,
'c': 3},
{'name': 2,
'a': 123}]
expected = [{'name': 1,
'a': 1,
'b': 2,
'c': 3},
'a',
{'name': 2,
'a': 123}]
self.assertEqual(expected,
descriptor.merge_lists(list1, list2))
class TestLabels(unittest.TestCase):
def setUp(self):
_, self.descriptor = tempfile.mkstemp()
def tearDown(self):
os.remove(self.descriptor)
def prepare_descriptor(self, data={}):
image = {'name': 'image/name', 'version': 1.0,
'from': 'from/image', 'schema_version': 1}
image.update(data)
with open(self.descriptor, 'w') as outfile:
yaml.dump(image, outfile, default_flow_style=False)
def test_no_labels_should_be_added(self):
self.prepare_descriptor()
img_descriptor = descriptor.Descriptor(self.descriptor, 'image')
img_descriptor._process_labels()
self.assertIsNone(img_descriptor.label('description'))
self.assertIsNone(img_descriptor.label('summary'))
self.assertIsNone(img_descriptor.label('maintainer'))
def test_description_label_should_be_added(self):
self.prepare_descriptor({'description': 'This is image description'})
img_descriptor = descriptor.Descriptor(self.descriptor, 'image')
img_descriptor._process_labels()
self.assertIsNone(img_descriptor.label('maintainer'))
self.assertEqual(img_descriptor.label('description').get(
'value'), 'This is image description')
# In this case the 'summary' label should be also set
self.assertEqual(img_descriptor.label('summary').get(
'value'), 'This is image description')
def test_description_and_summary_labels_should_not_be_overriden(self):
self.prepare_descriptor({'description': 'This is image description', 'labels': [
{'name': 'summary', 'value': 'summary value'},
{'name': 'description', 'value': 'description value'}]})
img_descriptor = descriptor.Descriptor(self.descriptor, 'image')
img_descriptor._process_labels()
self.assertIsNone(img_descriptor.label('maintainer'))
self.assertEqual(img_descriptor.label(
'description').get('value'), 'description value')
self.assertEqual(img_descriptor.label(
'summary').get('value'), 'summary value')
``` |
{
"source": "jmtomczak/git_flow",
"score": 2
} |
#### File: git_flow/models/idf.py
```python
import numpy as np
import torch
import torch.nn as nn
from utils.nn import RoundStraightThrough
class IDF(nn.Module):
def __init__(self, nett, num_flows, D=2):
super(IDF, self).__init__()
print('IDF by JT.')
self.t = torch.nn.ModuleList([nett() for _ in range(num_flows)])
self.num_flows = num_flows
self.round = RoundStraightThrough.apply
self.p = nn.Parameter(torch.zeros(1, D))
self.mu = nn.Parameter(torch.ones(1, D) * 0.5)
def coupling(self, x, index, forward=True):
(xa, xb) = torch.chunk(x, 2, 1)
t = self.t[index](xa)
if forward:
yb = xb + self.round(t)
else:
yb = xb - self.round(t)
return torch.cat((xa, yb), 1)
def permute(self, x):
return x.flip(1)
def f(self, x):
z = x
for i in range(self.num_flows):
z = self.coupling(z, i, forward=True)
z = self.permute(z)
return z
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x = self.coupling(x, i, forward=False)
return x
def forward(self, x):
z = self.f(x)
return self.log_prior(z)
def sample(self, batchSize, D=2, intMax=100):
# sample z:
z = self.prior_sample(batchSize=batchSize, D=D, intMax=intMax)
# x = f^-1(z)
x = self.f_inv(z)
return x.view(batchSize, 1, D)
def log_integer_probability(self, x, p, mu):
# Chakraborty & Chakravarty, "A new discrete probability distribution with integer support on (−∞, ∞)",
# Communications in Statistics - Theory and Methods, 45:2, 492-505, DOI: 10.1080/03610926.2013.830743
log_p = torch.log(1. - p) + (x - mu) * torch.log(p) \
- torch.log(1. + torch.exp((x - mu) * torch.log(p))) \
- torch.log(1. + torch.exp((x - mu + 1.) * torch.log(p)))
return log_p
def log_prior(self, x):
p = torch.sigmoid(self.p)
log_p = self.log_integer_probability(x, p, self.mu)
return log_p.sum(1)
def prior_sample(self, batchSize, D=2, intMax=100):
ints = np.expand_dims(np.arange(-intMax, intMax + 1), 0)
for d in range(D):
p = torch.sigmoid(self.p[:, [d]])
mu = self.mu[:, d]
log_p = self.log_integer_probability(torch.from_numpy(ints), p, mu)
if d == 0:
z = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
else:
z_new = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
z = torch.cat((z, z_new), 1)
return z
class IDF2(nn.Module):
def __init__(self, nett_a, nett_b, num_flows, D=2):
super(IDF2, self).__init__()
print('IDF by JT.')
self.t_a = torch.nn.ModuleList([nett_a() for _ in range(num_flows)])
self.t_b = torch.nn.ModuleList([nett_b() for _ in range(num_flows)])
self.num_flows = num_flows
self.round = RoundStraightThrough.apply
self.p = nn.Parameter(torch.zeros(1, D))
self.mu = nn.Parameter(torch.ones(1, D) * 0.5)
def coupling(self, x, index, forward=True):
(xa, xb) = torch.chunk(x, 2, 1)
if forward:
ya = xa + self.round(self.t_a[index](xb))
yb = xb + self.round(self.t_b[index](ya))
else:
yb = xb - self.round(self.t_b[index](xa))
ya = xa - self.round(self.t_a[index](yb))
return torch.cat((ya, yb), 1)
def permute(self, x):
return x.flip(1)
def f(self, x):
z = x
for i in range(self.num_flows):
z = self.coupling(z, i, forward=True)
z = self.permute(z)
return z
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x = self.coupling(x, i, forward=False)
return x
def forward(self, x):
z = self.f(x)
return self.log_prior(z)
def sample(self, batchSize, D=2, intMax=100):
# sample z:
z = self.prior_sample(batchSize=batchSize, D=D, intMax=intMax)
# x = f^-1(z)
x = self.f_inv(z)
return x.view(batchSize, 1, D)
def log_integer_probability(self, x, p, mu):
# Chakraborty & Chakravarty, "A new discrete probability distribution with integer support on (−∞, ∞)",
# Communications in Statistics - Theory and Methods, 45:2, 492-505, DOI: 10.1080/03610926.2013.830743
log_p = torch.log(1. - p) + (x - mu) * torch.log(p) \
- torch.log(1. + torch.exp((x - mu) * torch.log(p))) \
- torch.log(1. + torch.exp((x - mu + 1.) * torch.log(p)))
return log_p
def log_prior(self, x):
p = torch.sigmoid(self.p)
log_p = self.log_integer_probability(x, p, self.mu)
return log_p.sum()
def prior_sample(self, batchSize, D=2, intMax=100):
ints = np.expand_dims(np.arange(-intMax, intMax + 1), 0)
for d in range(D):
p = torch.sigmoid(self.p[:, [d]])
mu = self.mu[:, d]
log_p = self.log_integer_probability(torch.from_numpy(ints), p, mu)
if d == 0:
z = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
else:
z_new = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
z = torch.cat((z, z_new), 1)
return z
class IDF4(nn.Module):
def __init__(self, nett_a, nett_b, nett_c, nett_d, num_flows, D=2):
super(IDF4, self).__init__()
print('IDF by JT.')
self.t_a = torch.nn.ModuleList([nett_a() for _ in range(num_flows)])
self.t_b = torch.nn.ModuleList([nett_b() for _ in range(num_flows)])
self.t_c = torch.nn.ModuleList([nett_c() for _ in range(num_flows)])
self.t_d = torch.nn.ModuleList([nett_d() for _ in range(num_flows)])
self.num_flows = num_flows
self.round = RoundStraightThrough.apply
self.p = nn.Parameter(torch.zeros(1, D))
self.mu = nn.Parameter(torch.ones(1, D) * 0.5)
def coupling(self, x, index, forward=True):
(xa, xb, xc, xd) = torch.chunk(x, 4, 1)
if forward:
ya = xa + self.round(self.t_a[index](torch.cat((xb, xc, xd), 1)))
yb = xb + self.round(self.t_b[index](torch.cat((ya, xc, xd), 1)))
yc = xc + self.round(self.t_c[index](torch.cat((ya, yb, xd), 1)))
yd = xd + self.round(self.t_d[index](torch.cat((ya, yb, yc), 1)))
else:
yd = xd - self.round(self.t_d[index](torch.cat((xa, xb, xc), 1)))
yc = xc - self.round(self.t_c[index](torch.cat((xa, xb, yd), 1)))
yb = xb - self.round(self.t_b[index](torch.cat((xa, yc, yd), 1)))
ya = xa - self.round(self.t_a[index](torch.cat((yb, yc, yd), 1)))
return torch.cat((ya, yb, yc, yd), 1)
def permute(self, x):
return x.flip(1)
def f(self, x):
z = x
for i in range(self.num_flows):
z = self.coupling(z, i, forward=True)
z = self.permute(z)
return z
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x = self.coupling(x, i, forward=False)
return x
def forward(self, x):
z = self.f(x)
return self.log_prior(z)
def sample(self, batchSize, D=2, intMax=100):
# sample z:
z = self.prior_sample(batchSize=batchSize, D=D, intMax=intMax)
# x = f^-1(z)
x = self.f_inv(z)
return x.view(batchSize, 1, D)
def log_integer_probability(self, x, p, mu):
# Chakraborty & Chakravarty, "A new discrete probability distribution with integer support on (−∞, ∞)",
# Communications in Statistics - Theory and Methods, 45:2, 492-505, DOI: 10.1080/03610926.2013.830743
log_p = torch.log(1. - p) + (x - mu) * torch.log(p) \
- torch.log(1. + torch.exp((x - mu) * torch.log(p))) \
- torch.log(1. + torch.exp((x - mu + 1.) * torch.log(p)))
return log_p
def log_prior(self, x):
p = torch.sigmoid(self.p)
log_p = self.log_integer_probability(x, p, self.mu)
return log_p.sum()
def prior_sample(self, batchSize, D=2, intMax=100):
ints = np.expand_dims(np.arange(-intMax, intMax + 1), 0)
for d in range(D):
p = torch.sigmoid(self.p[:, [d]])
mu = self.mu[:, d]
log_p = self.log_integer_probability(torch.from_numpy(ints), p, mu)
if d == 0:
z = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
else:
z_new = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
z = torch.cat((z, z_new), 1)
return z
class IDF8(nn.Module):
def __init__(self, nett_a, nett_b, nett_c, nett_d, nett_e, nett_f, nett_g, nett_h, num_flows, D=2):
super(IDF8, self).__init__()
print('IDF by JT.')
self.t_a = torch.nn.ModuleList([nett_a() for _ in range(num_flows)])
self.t_b = torch.nn.ModuleList([nett_b() for _ in range(num_flows)])
self.t_c = torch.nn.ModuleList([nett_c() for _ in range(num_flows)])
self.t_d = torch.nn.ModuleList([nett_d() for _ in range(num_flows)])
self.t_e = torch.nn.ModuleList([nett_e() for _ in range(num_flows)])
self.t_f = torch.nn.ModuleList([nett_f() for _ in range(num_flows)])
self.t_g = torch.nn.ModuleList([nett_g() for _ in range(num_flows)])
self.t_h = torch.nn.ModuleList([nett_h() for _ in range(num_flows)])
self.num_flows = num_flows
self.round = RoundStraightThrough.apply
self.p = nn.Parameter(torch.zeros(1, D))
self.mu = nn.Parameter(torch.ones(1, D) * 0.5)
def coupling(self, x, index, forward=True):
(xa, xb, xc, xd, xe, xf, xg, xh) = torch.chunk(x, 8, 1)
if forward:
ya = xa + self.round(self.t_a[index](torch.cat((xb, xc, xd, xe, xf, xg, xh), 1)))
yb = xb + self.round(self.t_b[index](torch.cat((ya, xc, xd, xe, xf, xg, xh), 1)))
yc = xc + self.round(self.t_c[index](torch.cat((ya, yb, xd, xe, xf, xg, xh), 1)))
yd = xd + self.round(self.t_d[index](torch.cat((ya, yb, yc, xe, xf, xg, xh), 1)))
ye = xe + self.round(self.t_e[index](torch.cat((ya, yb, yc, yd, xf, xg, xh), 1)))
yf = xf + self.round(self.t_f[index](torch.cat((ya, yb, yc, yd, ye, xg, xh), 1)))
yg = xg + self.round(self.t_g[index](torch.cat((ya, yb, yc, yd, ye, yf, xh), 1)))
yh = xh + self.round(self.t_h[index](torch.cat((ya, yb, yc, yd, ye, yf, yg), 1)))
else:
yh = xh - self.round(self.t_h[index](torch.cat((xa, xb, xc, xd, xe, xf, xg), 1)))
yg = xg - self.round(self.t_g[index](torch.cat((xa, xb, xc, xd, xe, xf, yh), 1)))
yf = xf - self.round(self.t_f[index](torch.cat((xa, xb, xc, xd, xe, yg, yh), 1)))
ye = xe - self.round(self.t_e[index](torch.cat((xa, xb, xc, xd, yf, yg, yh), 1)))
yd = xd - self.round(self.t_d[index](torch.cat((xa, xb, xc, ye, yf, yg, yh), 1)))
yc = xc - self.round(self.t_c[index](torch.cat((xa, xb, yd, ye, yf, yg, yh), 1)))
yb = xb - self.round(self.t_b[index](torch.cat((xa, yc, yd, ye, yf, yg, yh), 1)))
ya = xa - self.round(self.t_a[index](torch.cat((yb, yc, yd, ye, yf, yg, yh), 1)))
return torch.cat((ya, yb, yc, yd, ye, yf, yg, yh), 1)
def permute(self, x):
return x.flip(1)
def f(self, x):
z = x
for i in range(self.num_flows):
z = self.coupling(z, i, forward=True)
z = self.permute(z)
return z
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x = self.coupling(x, i, forward=False)
return x
def forward(self, x):
z = self.f(x)
return self.log_prior(z)
def sample(self, batchSize, D=2, intMax=100):
# sample z:
z = self.prior_sample(batchSize=batchSize, D=D, intMax=intMax)
# x = f^-1(z)
x = self.f_inv(z)
return x.view(batchSize, 1, D)
def log_integer_probability(self, x, p, mu):
# Chakraborty & Chakravarty, "A new discrete probability distribution with integer support on (−∞, ∞)",
# Communications in Statistics - Theory and Methods, 45:2, 492-505, DOI: 10.1080/03610926.2013.830743
log_p = torch.log(1. - p) + (x - mu) * torch.log(p) \
- torch.log(1. + torch.exp((x - mu) * torch.log(p))) \
- torch.log(1. + torch.exp((x - mu + 1.) * torch.log(p)))
return log_p
def log_prior(self, x):
p = torch.sigmoid(self.p)
log_p = self.log_integer_probability(x, p, self.mu)
return log_p.sum()
def prior_sample(self, batchSize, D=2, intMax=100):
ints = np.expand_dims(np.arange(-intMax, intMax + 1), 0)
for d in range(D):
p = torch.sigmoid(self.p[:, [d]])
mu = self.mu[:, d]
log_p = self.log_integer_probability(torch.from_numpy(ints), p, mu)
if d == 0:
z = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
else:
z_new = torch.from_numpy(np.random.choice(ints[0], (batchSize, 1),
p=torch.exp(log_p[0]).detach().numpy()).astype(np.float32))
z = torch.cat((z, z_new), 1)
return z
```
#### File: git_flow/models/realnvp.py
```python
import numpy as np
import torch
import torch.nn as nn
class RealNVP(nn.Module):
def __init__(self, nets, nett, num_flows, prior, dequantization=True):
super(RealNVP, self).__init__()
print('RealNVP by JT.')
self.dequantization = dequantization
self.prior = prior
self.t = torch.nn.ModuleList([nett() for _ in range(num_flows)])
self.s = torch.nn.ModuleList([nets() for _ in range(num_flows)])
self.num_flows = num_flows
def coupling(self, x, index, forward=True):
(xa, xb) = torch.chunk(x, 2, 1)
s = self.s[index](xa)
t = self.t[index](xa)
if forward:
yb = (xb - t) * torch.exp(-s)
else:
yb = torch.exp(s) * xb + t
return torch.cat((xa, yb), 1), s, t
def permute(self, x):
return x.flip(1)
def f(self, x):
log_det_J, z = x.new_zeros(x.shape[0]), x
for i in range(self.num_flows):
z, s, _ = self.coupling(z, i, forward=True)
z = self.permute(z)
log_det_J = log_det_J - s.sum(dim=1)
return z, log_det_J
def f_inv(self, z):
x = z
for i in reversed(range(self.num_flows)):
x = self.permute(x)
x, _, _ = self.coupling(x, i, forward=False)
return x
def forward(self, x):
z, log_det_J = self.f(x)
return self.prior.log_prob(z) + log_det_J
def sample(self, batchSize, D=2):
z = self.prior.sample((batchSize, D))
z = z[:, 0, :]
x = self.f_inv(z)
return x.view(-1, D)
```
#### File: git_flow/utils/nn.py
```python
import torch
import torch.nn as nn
class RoundStraightThrough(torch.autograd.Function):
def __init__(self):
super().__init__()
@staticmethod
def forward(ctx, input):
rounded = torch.round(input, out=None)
return rounded
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
class Swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class Reshape3d(nn.Module):
def __init__(self, size):
super().__init__()
self.size = size
def forward(self, x):
B = x.shape[0]
return x.view(B, self.size[0], self.size[1], self.size[2])
class Flatten(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
B = x.shape[0]
return x.view(B, -1)
``` |
{
"source": "jmtomczak/popi4sb",
"score": 3
} |
#### File: popi4sb/algorithms/population_optimization_algorithms.py
```python
import numpy as np
from sklearn.neighbors import KNeighborsRegressor
from algorithms.general_method import GeneralMethod
from simulators.ode_simulator import calculate_fitness
class DE(GeneralMethod):
def __init__(self, config_method, config_model):
super().__init__(config_method, config_model)
print('Initialized Differential Evolution (DE).')
def proposal(self, theta, E=None):
indices_1 = np.random.permutation(theta.shape[0])
indices_2 = np.random.permutation(theta.shape[0])
theta_1 = theta[indices_1]
theta_2 = theta[indices_2]
de_noise = self.config_method['gamma'] * (theta_1 - theta_2)
if self.config_method['best']:
tht = theta[[np.argmin(E)]]
else:
tht = theta
theta_new = tht + de_noise
p_1 = np.random.binomial(1, self.config_method['CR'], tht.shape)
return p_1 * theta_new + (1. - p_1) * tht
def step(self, theta, E_old, x_obs, mod, params):
# (1. Generate)
theta_new = self.proposal(theta, E_old)
theta_new = np.clip(theta_new, a_min=self.config_method['clip_min'], a_max=self.config_method['clip_max'])
# (2. Evaluate)
E_new = calculate_fitness(x_obs, theta_new, mod, params, self.dist, self.config_model, self.config_method)
# (3. Select)
theta_cat = np.concatenate((theta, theta_new), 0)
E_cat = np.concatenate((E_old, E_new), 0)
indx = np.argsort(E_cat.squeeze())
return theta_cat[indx[:theta.shape[0]],:], E_cat[indx[:theta.shape[0]],:]
class RevDE(GeneralMethod):
def __init__(self, config_method, config_model):
super().__init__(config_method, config_model)
print('Initialized RevDE.')
R = np.asarray([[1, self.config_method['gamma'], -self.config_method['gamma']],
[-self.config_method['gamma'], 1. - self.config_method['gamma'] ** 2, self.config_method['gamma'] + self.config_method['gamma'] ** 2],
[self.config_method['gamma'] + self.config_method['gamma'] ** 2, -self.config_method['gamma'] + self.config_method['gamma'] ** 2 + self.config_method['gamma'] ** 3, 1. - 2. * self.config_method['gamma'] ** 2 - self.config_method['gamma'] ** 3]])
self.R = np.expand_dims(R, 0) # 1 x 3 x 3
def proposal(self, theta, E=None):
theta_0 = np.expand_dims(theta, 1) # B x 1 x D
indices_1 = np.random.permutation(theta.shape[0])
indices_2 = np.random.permutation(theta.shape[0])
theta_1 = np.expand_dims(theta[indices_1], 1)
theta_2 = np.expand_dims(theta[indices_2], 1)
tht = np.concatenate((theta_0, theta_1, theta_2), 1) # B x 3 x D
y = np.matmul(self.R, tht)
theta_new = np.concatenate((y[:,0], y[:,1], y[:,2]), 0)
p_1 = np.random.binomial(1, self.config_method['CR'], theta_new.shape)
return p_1 * theta_new + (1. - p_1) * np.concatenate((tht[:,0], tht[:,1], tht[:,2]), 0)
def step(self, theta, E_old, x_obs, mod, params):
# (1. Generate)
theta_new = self.proposal(theta, E_old)
theta_new = np.clip(theta_new, a_min=self.config_method['clip_min'], a_max=self.config_method['clip_max'])
# (2. Evaluate)
E_new = calculate_fitness(x_obs, theta_new, mod, params, self.dist, self.config_model, self.config_method)
# (3. Select)
theta_cat = np.concatenate((theta, theta_new), 0)
E_cat = np.concatenate((E_old, E_new), 0)
indx = np.argsort(E_cat.squeeze())
return theta_cat[indx[:theta.shape[0]],:], E_cat[indx[:theta.shape[0]],:]
class RevDEknn(GeneralMethod):
def __init__(self, config_method, config_model):
super().__init__(config_method, config_model)
print('Initialized RevDE+knn.')
R = np.asarray([[1, self.config_method['gamma'], -self.config_method['gamma']],
[-self.config_method['gamma'], 1. - self.config_method['gamma'] ** 2,
self.config_method['gamma'] + self.config_method['gamma'] ** 2],
[self.config_method['gamma'] + self.config_method['gamma'] ** 2,
-self.config_method['gamma'] + self.config_method['gamma'] ** 2 + self.config_method['gamma'] ** 3,
1. - 2. * self.config_method['gamma'] ** 2 - self.config_method['gamma'] ** 3]])
self.R = np.expand_dims(R, 0) # 1 x 3 x 3
self.nn = KNeighborsRegressor(n_neighbors=3)
self.X = None
self.E = None
def proposal(self, theta, E=None):
if self.X is None:
self.X = theta
self.E = E
else:
if self.X.shape[0] < 10000:
self.X = np.concatenate((self.X, theta), 0)
self.E = np.concatenate((self.E, E), 0)
self.nn.fit(self.X, self.E)
theta_0 = np.expand_dims(theta, 1) # B x 1 x D
indices_1 = np.random.permutation(theta.shape[0])
indices_2 = np.random.permutation(theta.shape[0])
theta_1 = np.expand_dims(theta[indices_1], 1)
theta_2 = np.expand_dims(theta[indices_2], 1)
tht = np.concatenate((theta_0, theta_1, theta_2), 1) # B x 3 x D
y = np.matmul(self.R, tht)
theta_new = np.concatenate((y[:,0], y[:,1], y[:,2]), 0)
p_1 = np.random.binomial(1, self.config_method['CR'], theta_new.shape)
theta_new = p_1 * theta_new + (1. - p_1) * np.concatenate((tht[:,0], tht[:,1], tht[:,2]), 0)
E_pred = self.nn.predict((theta_new))
ind = np.argsort(E_pred.squeeze())
return theta_new[ind[:theta.shape[0]]]
def step(self, theta, E_old, x_obs, mod, params):
# (1. Generate)
theta_new = self.proposal(theta, E_old)
theta_new = np.clip(theta_new, a_min=self.config_method['clip_min'], a_max=self.config_method['clip_max'])
# (2. Evaluate)
E_new = calculate_fitness(x_obs, theta_new, mod, params, self.dist, self.config_model, self.config_method)
# (3. Select)
theta_cat = np.concatenate((theta, theta_new), 0)
E_cat = np.concatenate((E_old, E_new), 0)
indx = np.argsort(E_cat.squeeze())
return theta_cat[indx[:theta.shape[0]],:], E_cat[indx[:theta.shape[0]],:]
class ES(GeneralMethod):
def __init__(self, config_method, config_model):
super().__init__(config_method, config_model)
print('Initialized ES.')
self.sigma = config_method['std']
self.c = 0.817
def proposal(self, theta, E=None):
noise = self.sigma * np.random.randn(theta.shape[0], theta.shape[1])
if self.config_method['best']:
tht = theta[[np.argmin(E)]]
else:
tht = theta
theta_new = tht + noise
p_1 = np.random.binomial(1, self.config_method['CR'], tht.shape)
return p_1 * theta_new + (1. - p_1) * tht
def step(self, theta, E_old, x_obs, mod, params):
# (1. Generate)
theta_new = self.proposal(theta, E_old)
theta_new = np.clip(theta_new, a_min=self.config_method['clip_min'], a_max=self.config_method['clip_max'])
# (2. Evaluate)
E_new = calculate_fitness(x_obs, theta_new, mod, params, self.dist, self.config_model, self.config_method)
# (3. Select)
m = (E_new < E_old) * 1.
if np.mean(m) < 0.2:
self.sigma = self.sigma * self.c
elif np.mean(m) > 0.2:
self.sigma = self.sigma / self.c
return m * theta_new + (1. - m) * theta, m * E_new + (1. - m) * E_old
class EDA(GeneralMethod):
def __init__(self, config_method, config_model):
super().__init__(config_method, config_model)
print('Initialized EDA.')
def estimate(self, x):
m = np.mean(x, 0, keepdims=True)
z = np.expand_dims(x - m, 2)
S = np.mean(np.matmul(z, np.transpose(z, [0, 2, 1])), 0)
L = np.linalg.cholesky(S)
return m, L
def proposal(self, theta, E=None):
# Fit Gaussian:
# 1) Calculate mean and covariance matrix
m, L = self.estimate(theta)
# 2) Generate new points (2 x more than original theta):
# x_new = mean + epsilon * L
theta_new = m + np.dot( np.random.randn(theta.shape[0], theta.shape[1]), L )
return theta_new
def step(self, theta, E_old, x_obs, mod, params):
# (1. Generate)
theta_new = self.proposal(theta, E_old)
theta_new = np.clip(theta_new, a_min=self.config_method['clip_min'], a_max=self.config_method['clip_max'])
# (2. Evaluate)
E_new = calculate_fitness(x_obs, theta_new, mod, params, self.dist, self.config_model, self.config_method)
# (3. Select)
theta_cat = np.concatenate((theta, theta_new), 0)
E_cat = np.concatenate((E_old, E_new), 0)
indx = np.argsort(E_cat.squeeze())
return theta_cat[indx[:theta.shape[0]], :], E_cat[indx[:theta.shape[0]], :]
class EDAknn(GeneralMethod):
def __init__(self, config_method, config_model):
super().__init__(config_method, config_model)
print('Initialized EDA+knn.')
self.nn = KNeighborsRegressor(n_neighbors=3)
self.X = None
self.E = None
def estimate(self, x):
m = np.mean(x, 0, keepdims=True)
z = np.expand_dims(x - m, 2)
S = np.mean(np.matmul(z, np.transpose(z, [0, 2, 1])), 0)
L = np.linalg.cholesky(S)
return m, L
def proposal(self, theta, E=None):
if self.X is None:
self.X = theta
self.E = E
else:
if self.X.shape[0] < 10000:
self.X = np.concatenate((self.X, theta), 0)
self.E = np.concatenate((self.E, E), 0)
self.nn.fit(self.X, self.E)
# Fit Gaussian:
# 1) Calculate mean and covariance matrix
m, L = self.estimate(theta)
# 2) Generate new points (2 x more than original theta):
# x_new = mean + epsilon * L
theta_new = m + np.dot( np.random.randn(theta.shape[0] * 5, theta.shape[1]), L )
E_pred = self.nn.predict((theta_new))
ind = np.argsort(E_pred.squeeze())
return theta_new[ind[:theta.shape[0]]]
def step(self, theta, E_old, x_obs, mod, params):
# (1. Generate)
theta_new = self.proposal(theta, E_old)
theta_new = np.clip(theta_new, a_min=self.config_method['clip_min'], a_max=self.config_method['clip_max'])
# (2. Evaluate)
E_new = calculate_fitness(x_obs, theta_new, mod, params, self.dist, self.config_model, self.config_method)
# (3. Select)
theta_cat = np.concatenate((theta, theta_new), 0)
E_cat = np.concatenate((E_old, E_new), 0)
indx = np.argsort(E_cat.squeeze())
return theta_cat[indx[:theta.shape[0]], :], E_cat[indx[:theta.shape[0]], :]
``` |
{
"source": "jmtoung/26p",
"score": 3
} |
#### File: jmtoung/26p/modelPaymentMethod.py
```python
from google.appengine.ext import ndb
import ndbTools
class PaymentMethod(ndb.Model):
purchaser = ndb.KeyProperty(kind='Purchaser')
store = ndb.KeyProperty(kind='Store')
type = ndb.StringProperty(choices=['Credit Card', 'Debit Card', 'Bank Account', 'Cash', 'Paypal'])
cardType = ndb.StringProperty(choices=['Visa', 'Master Card', 'Discover', 'American Express', 'Paypal'])
accountNumber = ndb.StringProperty()
expirationDate = ndb.DateProperty()
paymentDirection = ndb.StringProperty(choices=['Outgoing', 'Incoming'], repeated=True)
displayName = ndb.ComputedProperty(lambda self: ComputeDisplayName(self))
def GetPaymentMethod(key):
return PaymentMethod.query(ancestor=ndb.Key(urlsafe=key)).fetch()
# if property == "key":
# return ndb.Key(urlsafe=value).get()
# #else:
# #stores = [s for s in Store.query(Store._properties[property] == value)]
# #return stores[0]
def ComputeDisplayName(self):
if self.type=="Credit Card":
result = ""
for x in [self.cardType, self.accountNumber, self.expirationDate]:
if x:
result = result + " " + str(x)
return result
else:
return 'NOT CODED YET'
def AddPaymentMethod(d):
return ndbTools.AddData(PaymentMethod, d, d['purchaser']['key'])
```
#### File: jmtoung/26p/ndbTools.py
```python
from google.appengine.ext import ndb
import re
import datetime
# function for getting entity from GAE by key
def GetData(key):
return ndb.Key(urlsafe=key).get()
# function for querying GAE
def QueryData(model, queryParams):
newQueryParams = {}
if 'ancestor' in queryParams:
if isinstance(queryParams['ancestor'], basestring):
ancestor = ndb.Key(urlsafe=queryParams['ancestor'])
elif isinstance(queryParams['ancestor'], dict):
ancestor = ndb.Key(urlsafe=queryParams['ancestor']['key'])
else:
raise Exception('invalid key parameter in queryParams: ' + str(queryParams['key']))
newQueryParams['ancestor'] = ancestor
if 'filters' in queryParams:
filterNode = []
for f in queryParams['filters']:
filterNode.append(ndb.FilterNode(f['property'], f['operator'], f['value']))
newQueryParams['filters'] = ndb.AND(*filterNode)
query = model.query(**newQueryParams)
if 'orders' in queryParams:
if queryParams['orders']['direction'] == 'ASC':
query = query.order(getattr(model, queryParams['orders']['property']))
elif queryParams['orders']['direction'] == 'DESC':
query = query.order(-getattr(model, queryParams['orders']['property']))
else:
raise Exception('invalid direction parameter for order: ' + queryParams['orders']['direction'])
return query.fetch()
# function for adding a new GAE entity
def AddData(model, data, parent=None):
entity = _CreateEntity(model, data, parent)
entity.put()
return entity
# private helper function for adding data
def _CreateEntity(model, data, parent=None):
entity = None
if parent:
key = None
if isinstance(parent, ndb.model.KeyProperty):
key = parent
elif isinstance(parent, basestring):
key = ndb.Key(urlsafe=parent)
elif isinstance(parent, dict):
key = ndb.Key(urlsafe=parent['key'])
elif isinstance(parent, list):
key = ndb.Key(pairs=parent)
else:
raise Exception('invalid parent parameter to CreateEntity method')
if key.get():
entity = model(parent=key)
else:
entity = model()
for x in model._properties:
property = model._properties[x]
if x in data:
if property._repeated:
values = _CheckValueIntegrityList(property, data[x])
setattr(entity, x, values)
else:
value = _CheckValueIntegrity(property, data[x])
setattr(entity, x, value)
else:
if property._required:
raise Exception('required attribute ' + x + ' not defined')
return entity
# private helper function for adding data
def _CheckValueIntegrityList(property, data):
values = []
for d in data:
value = _CheckValueIntegrity(property, d)
if value not in values:
values.append(value)
return values
# private helper function for adding data
def _CheckValueIntegrity(property, data):
if isinstance(property, ndb.model.StringProperty):
if isinstance(data, basestring):
return data
else:
raise Exception('property ' + str(property) + ' expects StringProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.TextProperty):
if isinstance(data, basestring):
return data
else:
raise Exception('property ' + str(property) + ' expects TextProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.BooleanProperty):
if isinstance(data, bool):
return data
else:
raise Exception('property ' + str(property) + ' expects BooleanProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.IntegerProperty):
if isinstance(data, int):
return data
else:
raise Exception('property ' + str(property) + ' expects IntegerProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.FloatProperty):
if isinstance(data, float):
return data
elif isinstance(data, int):
return float(data)
else:
raise Exception('property ' + str(property) + ' expects FloatProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.DateProperty):
if isinstance(data, basestring):
return AsDateObject(data)
else:
raise Exception('property ' + str(property) + ' expects StringProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.TimeProperty):
if isinstance(data, basestring):
return AsTimeObject(data)
else:
raise Exception('property ' + str(property) + ' expects StringProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.DateTimeProperty):
if isinstance(data, basestring):
return AsDateTimeObject(data)
else:
raise Exception('property ' + str(property) + ' expects StringProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.StructuredProperty):
return _CreateEntity(property._modelclass, data)
elif isinstance(property, ndb.model.KeyProperty):
(urlsafe, key) = None, None
if isinstance(data, basestring):
urlsafe = data
elif isinstance(data, dict):
urlsafe = data['key']
else:
raise Exception('property ' + str(property) + ' expects KeyProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
if urlsafe:
key = ndb.Key(urlsafe=urlsafe)
if key and key.get():
return key
else:
raise Exception('key points to nonexistent object: ' + str(key))
else:
raise Exception('property ' + str(property) + ' not yet implemented')
def DeleteData(key):
if isinstance(key, basestring):
key = ndb.Key(urlsafe=key)
elif isinstance(key, ndb.model.KeyProperty):
pass
else:
raise Exception('invalid key parameter passed to DeleteData')
key.delete()
return
# function for getting an object referenced by 'key'
def GetData(key):
if isinstance(key, basestring):
return ndb.Key(urlsafe=key).get()
elif isinstance(key, dict):
return ndb.Key(urlsafe=key['key'])
else:
raise Exception('unknown key type')
# function for saving a python dict object (obj) into GAE entity (referenced by 'key')
def SaveData(key, obj):
entity = ndb.Key(urlsafe=key).get()
for x in entity.__class__._properties:
property = entity.__class__._properties[x]
if isinstance(property, ndb.model.ComputedProperty):
continue
if property._repeated:
values = []
for d in obj[x]:
value = _CheckValueIntegrity(property, d)
if value not in values:
values.append(value)
setattr(entity, x, values)
else:
value = _CheckValueIntegrity(property, obj[x])
setattr(entity, x, value)
entity.put()
return entity
# function for returning a ndb object as a dictionary
def AsDict(obj, level = 0):
dict = None
entity = None
if isinstance(obj, ndb.Key):
dict = {'id': obj.id(), 'key': obj.urlsafe()}
if level > 2:
return obj.urlsafe()
entity = obj.get()
if entity is None:
return None
elif isinstance(obj, ndb.Model):
dict = {}
entity = obj
else:
raise Exception('invalid parameter obj passed to AsDict')
for p in entity._properties:
attr = getattr(entity, p)
#if p == "shipping":
#print '~~~'
#print attr
#print '~~~'
if isinstance(attr, ndb.Key):
attr = AsDict(attr, level = (level + 1))
elif isinstance(attr, list):
#print 'p: ' + str(p)
#print 'attr: ' + str(attr)
attr = [AsDict(a, level = (level + 1)) for a in attr]
attr = [a for a in attr if not a is None]
elif isinstance(attr, (datetime.datetime, datetime.date, datetime.time)):
attr = str(attr)
dict[p] = attr
return dict
# function for returning a ndb object as a dictionary
def AsDictBACKUP(obj, level = 0):
if isinstance(obj, ndb.Key):
dict = {'id': obj.id(), 'key': obj.urlsafe()}
if obj.get() is None:
return None
if level > 2:
return obj.urlsafe()
for p in obj.get()._properties:
attr = getattr(obj.get(), p)
if p == "shipping":
print '~~~'
print attr
print '~~~'
if isinstance(attr, ndb.Key):
attr = AsDict(attr, level = (level + 1))
elif isinstance(attr, list):
print 'p: ' + str(p)
print 'attr: ' + str(attr)
attr = [AsDict(a, level = (level + 1)) for a in attr]
attr = [a for a in attr if not a is None]
elif isinstance(attr, (datetime.datetime, datetime.date, datetime.time)):
attr = str(attr)
dict[p] = attr
return dict
elif isinstance(obj, ndb.Model):
dict = {}
for p in obj._properties:
print '@@@'
print p
print getattr(obj, p)
print '@@@'
print obj
print type(obj)
print isinstance(obj, ndb.Model)
print '/////'
return '{error: "AsDict()" requires entity key}'
# function for returning a date and time string as a date object
def AsDateTimeObject(dt):
dateTimeMatch = re.match('.*(\d{4}-\d{2}-\d{2}).*(\d{2}:\d{2}).*',dt)
if dateTimeMatch:
dateValue = dateTimeMatch.groups()[0].split('-')
dateValue = [int(dv) for dv in dateValue]
timeValue = dateTimeMatch.groups()[1].split(':')
timeValue = [int(tv) for tv in timeValue]
return datetime.datetime(dateValue[0], dateValue[1], dateValue[2], timeValue[0], timeValue[1])
else:
raise Exception('invalid DateTime parameter: ' + str(dt))
def AsDateObject(d):
dateMatch = re.match('.*(\d{4}-\d{2}-\d{2}).*',d)
if dateMatch:
dateValue = dateMatch.groups()[0].split('-')
dateValue = [int(dv) for dv in dateValue]
return datetime.date(dateValue[0], dateValue[1], dateValue[2])
else:
return Exception('invalid Date parameter: ' + str(d))
def AsTimeObject(t):
timeMatch = re.match('.*(\d{2}:\d{2}).*',t)
if timeMatch:
timeValue = timeMatch.groups()[0].split(':')
timeValue = [int(tv) for tv in timeValue]
return datetime.time(timeValue[0], timeValue[1])
else:
return Exception('invalid Time parameter: ' + str(t))
``` |
{
"source": "jmtsuji/atlas",
"score": 2
} |
#### File: atlas/atlas/conf.py
```python
import logging
import multiprocessing
import os
import sys
import tempfile
from snakemake import utils
from snakemake.io import load_configfile
import pandas as pd
import numpy as np
from collections import defaultdict
import click
sys.path.append(os.path.dirname(__file__))
from default_values import make_default_config
# default globals
ADAPTERS = "adapters.fa"
RRNA = "silva_rfam_all_rRNAs.fa"
PHIX = "phiX174_virus.fa"
ADDITIONAL_SAMPLEFILE_HEADERS=[]#,'Contigs']
def get_samples_from_fastq(path):
"""
creates table sampleID R1 R2 with the absolute paths of fastq files in a given folder
"""
samples = defaultdict(dict)
seen = set()
for dir_name, sub_dirs, files in os.walk(os.path.abspath(path)):
for fname in files:
if ".fastq" in fname or ".fq" in fname:
sample_id = fname.split(".fastq")[0].split(".fq")[0]
sample_id = sample_id.replace("_R1", "").replace("_r1", "").replace("_R2", "").replace("_r2", "")
sample_id = sample_id.replace("_", "-").replace(" ", "-")
fq_path = os.path.join(dir_name, fname)
if fq_path in seen: continue
if "_R2" in fname or "_r2" in fname:
if 'R2' in samples[sample_id]:
logging.error(f"Duplicate sample {sample_id} was found after renaming; skipping... \n Samples: \n{samples}")
samples[sample_id]['R2'] = fq_path
else:
if 'R1' in samples[sample_id]:
logging.error(f"Duplicate sample {sample_id} was found after renaming; skipping... \n Samples: \n{samples}")
samples[sample_id]['R1'] = fq_path
samples= pd.DataFrame(samples).T
if samples.isnull().any().any():
logging.error(f"Missing files:\n\n {samples}")
exit(1)
if samples.shape[0]==0:
logging.error(f"No files found in {path}\n"
"I'm looking for files with .fq or .fastq extension. ")
exit(1)
return samples
def validate_sample_table(sampleTable):
Expected_Headers =['BinGroup'] + ADDITIONAL_SAMPLEFILE_HEADERS
for h in Expected_Headers:
if not (h in sampleTable.columns):
logging.error(f"expect '{h}' to be found in samples.tsv")
exit(1)
elif sampleTable[h].isnull().any():
logging.error(f"Found empty values in the sample table column '{h}'")
exit(1)
if not sampleTable.index.is_unique:
duplicated_samples=', '.join(D.index.duplicated())
logging.error( f"Expect Samples to be unique. Found {duplicated_samples} more than once")
exit(1)
def prepare_sample_table(path_to_fastq,reads_are_QC=False,outfile='samples.tsv'):
"""
Write the file `samples.tsv` and complete the sample names and paths for all
files in `path`.
Args:
path_to_fastq (str): fastq/fasta data directory
"""
samples = get_samples_from_fastq(path_to_fastq)
columns= samples.columns # R1 and R2 or only R1 , who knows
if 'R2' not in columns:
assert len(columns) == 1, "expect columns to be only ['R1']"
columns=['se']
if reads_are_QC:
samples.columns= ['Reads_QC_'+c for c in columns]
else:
samples.columns= ['Reads_raw_'+c for c in columns]
Headers = ADDITIONAL_SAMPLEFILE_HEADERS
for h in Headers:
samples[h]=np.nan
samples['BinGroup']= samples.index
validate_sample_table(samples)
logging.info("Found %d samples under %s" % (len(samples), path_to_fastq))
if os.path.exists(outfile):
logging.error(f"Output file {outfile} already exists I don't dare to overwrite it.")
exit(1)
else:
samples.to_csv(outfile,sep='\t')
def load_sample_table(sample_table='samples.tsv'):
sampleTable = pd.read_csv(sample_table,index_col=0,sep='\t')
validate_sample_table(sampleTable)
return sampleTable
def make_config(database_dir, threads, assembler, data_type='metagenome',interleaved_fastq=False,config='config.yaml'):
"""
Reads template config file with comments from ./template_config.yaml
updates it by the parameters provided.
Args:
config (str): output file path for yaml
database_dir (str): location of downloaded databases
threads (int): number of threads per node to utilize
assembler (str): either spades or megahit
data_type (str): this is either metagenome or metatranscriptome
"""
from ruamel.yaml import YAML #used for yaml reading with comments
yaml = YAML()
yaml.version = (1, 1)
yaml.default_flow_style = False
template_conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"template_config.yaml")
with open(template_conf_file) as template_config:
conf = yaml.load(template_config)
conf["tmpdir"] = tempfile.gettempdir()
conf["threads"] = multiprocessing.cpu_count() if not threads else threads
conf["preprocess_adapters"] = os.path.join(database_dir, "adapters.fa")
conf["contaminant_references"] = {"PhiX":os.path.join(database_dir, "phiX174_virus.fa")}
if data_type == 'metatranscriptome':
conf["contaminant_references"]["rRNA"]= os.path.join(database_dir, "silva_rfam_all_rRNAs.fa"),
conf["data_type"]= data_type
conf["interleaved_fastqs"]=interleaved_fastq
conf["assembler"] = assembler
conf["database_dir"] = database_dir
#conf["refseq_namemap"] = os.path.join(database_dir, "refseq.db")
#conf["refseq_tree"] = os.path.join(database_dir, "refseq.tree")
#conf["diamond_db"] = os.path.join(database_dir, "refseq.dmnd")
if os.path.exists(config):
logging.warning(f"Config file {config} already exists, I didn't dare to overwrite it. continue...")
else:
with open(config, "w") as f:
yaml.dump(conf, f)
logging.info(
"Configuration file written to %s\n"
"You may want to edit it using any text editor."% config
)
def validate_config(config, workflow):
conf = load_configfile(config)
# validate_sample_defs(conf, workflow)
# could later add more validation steps
def update_config(config):
"""
Populates config file with default config values.
And made changes if necessary.
"""
# in old version java_mem was used, new is mem
if ('java_mem' in config) and (not ('mem' in config)):
config['mem']=config['java_mem']
# get default values and update them with values specified in config file
default_config = make_default_config()
utils.update_config(default_config, config)
return default_config
@click.command(
"init",
short_help="prepare configuration file and sample table for atlas run",
)
@click.argument("path_to_fastq",type=click.Path(readable=True))
@click.option(
"-d",
"--db-dir",
default=os.path.join(os.path.realpath("."), "databases"),
type=click.Path(dir_okay=True,writable=True,resolve_path=True),
show_default=True,
help="location to store databases (need ~50GB)",
)
@click.option("-w",
"--working-dir",
type=click.Path(dir_okay=True,writable=True,resolve_path=True),
help="location to run atlas",
default="."
)
@click.option(
"--assembler",
default="spades",
type=click.Choice(["megahit", "spades"]),
show_default=True,
help="assembler",
)
@click.option(
"--data-type",
default="metagenome",
type=click.Choice(["metagenome", "metatranscriptome"]),
show_default=True,
help="sample data type",
)
@click.option(
"--interleaved-fastq",
is_flag=True,
default=False,
help="fastq files are paired-end in one files (interleaved)",
)
@click.option(
"--threads",
default=8,
type=int,
help="number of threads to use per multi-threaded job",
)
@click.option(
"--skip-qc",
is_flag=True,
help="Skip QC, if reads are already pre-processed",
)
def run_init(path_to_fastq,db_dir, working_dir, assembler, data_type, interleaved_fastq,threads,skip_qc=False):
"""Write the file CONFIG and complete the sample names and paths for all
FASTQ files in PATH.
PATH is traversed recursively and adds any file with '.fastq' or '.fq' in
the file name with the file name minus extension as the sample ID.
"""
if not os.path.exists(working_dir): os.makedirs(working_dir)
config=os.path.join(working_dir,'config.yaml')
if not os.path.exists(db_dir): os.makedirs(db_dir)
sample_file= os.path.join(working_dir,'samples.tsv')
make_config(db_dir, threads, assembler,data_type,interleaved_fastq,config)
prepare_sample_table(path_to_fastq,reads_are_QC=skip_qc,outfile=sample_file)
```
#### File: atlas/atlas/group_species.py
```python
import pandas as pd
import scipy.spatial as sp
import scipy.cluster.hierarchy as hc
from sklearn.metrics import silhouette_score
import numpy as np
from common import genome_pdist as gd
def automatic_cluster_species(Dist,seed_tresholds= [0.92,0.97],linkage_method='average'):
linkage = hc.linkage(sp.distance.squareform(Dist), method=linkage_method)
def get_Nclusters(treshold):
labels= hc.fcluster(linkage,(1-treshold),criterion='distance')
return max(labels)
N_range= [get_Nclusters(t) for t in seed_tresholds]
assert (N_range[1]-N_range[0])< 60, "Need to evaluate more than 60 tresholds"
assert ~np.isnan(N_range).any(), "N range is not defined"
Scores= gd.evaluate_clusters_range(np.arange(min(N_range),max(N_range)+1),Dist,linkage_method=linkage_method)
if N_range[0]==N_range[1]:
labels= hc.fcluster(linkage,(1-seed_tresholds[0]),criterion='distance')
else:
N_species= Scores.Silhouette_score.idxmax()
labels= hc.fcluster(linkage,N_species,criterion='maxclust')
return Scores,labels
def treshold_based_clustering(Dist,treshold,linkage_method='average'):
assert (treshold>0.9)&(treshold<1), "treshold should be between 0.9 and 1 or 'auto', treshold was {treshold}"
linkage = hc.linkage(sp.distance.squareform(Dist), method=linkage_method)
labels = hc.fcluster(linkage,(1-treshold),criterion='distance')
Scores= gd.evaluate_clusters_tresholds([treshold],Dist,linkage_method=linkage_method)
return Scores,labels
if __name__=='__main__':
linkage_method= snakemake.params.linkage_method
treshold = snakemake.params.treshold
quality_score_formula = snakemake.config['quality_score']
Q= gd.load_quality(snakemake.input.quality)
quality_score= Q.eval(quality_score_formula)
M= gd.load_mummer(snakemake.input.dists)
Dist= 1-gd.pairewise2matrix(M,fillna=0.9)
if treshold=='auto':
Scores,labels= automatic_cluster_species(Dist,linkage_method=linkage_method)
else:
Scores, labels = treshold_based_clustering(Dist,treshold,linkage_method=linkage_method)
Scores.to_csv(snakemake.output.scores,sep='\t')
mag2Species= pd.DataFrame(index=Q.index,columns=['SpeciesNr','Species'])
mag2Species.index.name='genome'
mag2Species.loc[Dist.index,'SpeciesNr']= labels
speciesNr= labels.max()
missing_species=mag2Species.SpeciesNr.isnull()
mag2Species.loc[missing_species,'SpeciesNr']=np.arange(speciesNr+1,
speciesNr+1+missing_species.sum())
print(f"Identified { mag2Species.SpeciesNr.max()} species")
n_leading_zeros= len(str(max(labels)))
format_int='sp{:0'+str(n_leading_zeros)+'d}'
mag2Species['Species']=mag2Species.SpeciesNr.apply(format_int.format)
mag2Species['Representative_Species']=gd.best_genome_from_table(mag2Species.Species,quality_score)
mag2Species.to_csv(snakemake.output.cluster_file,sep='\t')
```
#### File: atlas/report/assembly_report.py
```python
import os,sys
f = open(os.devnull, 'w'); sys.stdout = f # block cufflinks to plot strange code
from cufflinks import iplot
log=open(snakemake.log[0],"w")
sys.stderr= log
sys.stdout= log
import pandas as pd
import plotly.graph_objs as go
from plotly import offline
from snakemake.utils import report
PLOTLY_PARAMS = dict(
include_plotlyjs=False, show_link=False, output_type="div", image_height=700
)
atlas_dir= os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))
sys.path.append(os.path.join(atlas_dir,'scripts'))
from utils.parsers_bbmap import parse_bbmap_log_file
def parse_map_stats(sample_data, out_tsv):
stats_df = pd.DataFrame()
for sample in sample_data.keys():
df = pd.read_csv(sample_data[sample]["contig_stats"],sep='\t')
assert df.shape[0] == 1, "Assumed only one row in file {}; found {}".format(
sample_data[sample]["contig_stats"], df.iloc[0]
)
df = df.iloc[0]
df.name = sample
genes_df = pd.read_csv(sample_data[sample]["gene_table"], index_col=0,sep='\t')
df["N_Predicted_Genes"] = genes_df.shape[0]
used_reads,mapped_reads= parse_bbmap_log_file(sample_data[sample]["mapping_log"])
df["Assembled_Reads"] = mapped_reads
df["Percent_Assembled_Reads"] = mapped_reads/used_reads *100
stats_df = stats_df.append(df)
stats_df = stats_df.loc[:, ~ stats_df.columns.str.startswith("scaf_")]
stats_df.columns = stats_df.columns.str.replace("ctg_", "")
stats_df.to_csv(out_tsv, sep="\t")
return stats_df
def main(samples, contig_stats, gene_tables, mapping_logs, report_out, combined_stats):
sample_data = {}
for sample in samples:
sample_data[sample] = {}
for c_stat in contig_stats:
# underscore version was for simplified local testing
# if "%s_" % sample in c_stat:
if "%s/" % sample in c_stat:
sample_data[sample]["contig_stats"] = c_stat
for g_table in gene_tables:
# if "%s_" % sample in g_table:
if "%s/" % sample in g_table:
sample_data[sample]["gene_table"] = g_table
for mapping_log in mapping_logs:
# if "%s_" % sample in mapping_log:
if "%s/" % sample in mapping_log:
sample_data[sample]["mapping_log"] = mapping_log
df = parse_map_stats(sample_data, combined_stats)
div = {}
labels = {
"Percent_Assembled_Reads": "Percent of Assembled Reads",
"contig_bp": "Total BP",
"n_contigs": "Contigs (count)",
"N_Predicted_Genes": "Predicted Genes (count)",
}
for variable in [
"Percent_Assembled_Reads", "contig_bp", "n_contigs", "N_Predicted_Genes"
]:
y_axis_label = labels[variable]
div[variable] = offline.plot(
df[variable].iplot(
asFigure=True,
kind="bar",
xTitle="Samples",
layout=go.Layout(
xaxis=dict(tickangle=45), yaxis=dict(title=y_axis_label)
),
),
**PLOTLY_PARAMS,
)
div["L50"] = offline.plot(
df[["L50", "L90"]].iplot(
asFigure=True,
kind="bar",
xTitle="Samples",
layout=go.Layout(xaxis=dict(tickangle=45), yaxis=(dict(title="Bases"))),
),
**PLOTLY_PARAMS,
)
report_str = """
.. raw:: html
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
=============================================================
ATLAS_ - Assembly Summary
=============================================================
.. _ATLAS: https://github.com/metagenome-atlas/atlas
.. contents::
:backlinks: none
Summary
-------
Fragmentation
*************
L50/L90 is a measure of how fractionated assemblies are:
50%/ 90% of the assembly is made up of contigs of Length L50/L90 or longer. Sometimes refered to as N50/N90.
.. raw:: html
{div[L50]}
Assembly Length
***************
.. raw:: html
{div[contig_bp]}
Number of Contigs
*****************
.. raw:: html
{div[n_contigs]}
Number of Predicted Genes
*************************
.. raw:: html
{div[N_Predicted_Genes]}
Percent of Assembled Reads
**************************
.. raw:: html
{div[Percent_Assembled_Reads]}
For more information see Table_1_
Downloads
---------
"""
report(report_str, report_out, Table_1=combined_stats, stylesheet=os.path.join(atlas_dir,'report', "report.css"))
if __name__ == "__main__":
try:
main(
samples=snakemake.params.samples,
contig_stats=snakemake.input.contig_stats,
gene_tables=snakemake.input.gene_tables,
mapping_logs=snakemake.input.mapping_logs,
report_out=snakemake.output.report,
combined_stats=snakemake.output.combined_contig_stats
)
except NameError:
import argparse
p = argparse.ArgumentParser()
p.add_argument("--samples", nargs="+")
p.add_argument("--contig-stats", nargs="+")
p.add_argument("--gene-tables", nargs="+")
p.add_argument("--mapping-logs", nargs="+")
p.add_argument("--report-out")
p.add_argument("--combined-stats")
args = p.parse_args()
main(
args.samples,
args.contig_stats,
args.gene_tables,
args.mapping_logs,
args.report_out,
args.combined_stats,
)
```
#### File: scripts/utils/tree.py
```python
import os
os.environ['QT_QPA_PLATFORM']='offscreen' # because we might not have a X server
import ete3
import pandas as pd
import warnings
def load_tree(netwik_file):
return ete3.Tree(netwik_file,quoted_node_names=True,format=1)
def root_tree_by_phyla(T,phyla):
""" Root the tree next to the phylum that is as far apart as possible from the other phyla
"""
phylum_LCA={}
for p in phyla.unique():
phylum_LCA[p]=T.get_common_ancestor(*tuple(phyla.index[phyla==p].values))
Dist= pd.DataFrame()
for p1,lca1 in phylum_LCA.items():
for p2,lca2 in phylum_LCA.items():
Dist.loc[p1,p2]=T.get_distance(lca1,lca2)
furthest_phylum= Dist.mean().idxmax()
outgroup=phylum_LCA[furthest_phylum]
if not outgroup== T:
T.set_outgroup(outgroup)
def layout_black_circles(node):
# If node is a leaf
if node.is_leaf():
node.img_style["fgcolor"]='k'
else:
node.img_style["size"]=0
def render_tree(T,out):
from ete3 import TreeStyle
ts = TreeStyle()
ts.show_leaf_name= False
ts.mode = "c"
ts.scale=200
ts.show_scale=False
T.render(out,tree_style=ts,layout=layout_black_circles)
``` |
{
"source": "jmt-transloc/python_automation_frameworks",
"score": 3
} |
#### File: tests/step_definitions/test_steps__google.py
```python
from pytest_bdd import scenarios, given, when, then, parsers
from pages.search import GoogleSearch
from pages.results import GoogleResults
#
# Scenarios
#
scenarios('../features/google.feature')
#
# Background:
#
@given('we are viewing the Google search page')
def navigate_to_page(browser):
search_page = GoogleSearch(browser)
search_page.load()
#
# Test: Page Title Matches the Page
#
@then('the title should feature the Google name')
def compare_title(browser):
search_page = GoogleSearch(browser)
assert search_page.get_title() == 'Google'
#
# Test: Process a Google Search
#
@when(parsers.parse('we search for "{phrase}"'))
def search(browser, phrase):
search_page = GoogleSearch(browser)
search_page.search(phrase)
@then(parsers.parse('"{phrase}" should appear in the results field'))
def verify_search_results(browser, phrase):
results_page = GoogleResults(browser)
assert results_page.result_div_count() > 0
assert results_page.search_input_value(results_page.search_field) == phrase
``` |
{
"source": "jmtyszka/fMRI_Utilities",
"score": 2
} |
#### File: jmtyszka/fMRI_Utilities/dropout.py
```python
import sys
import os
import numpy as np
import nibabel as nib
USAGE = """
USAGE :
dropout.py <fmap mag nifti> <fmap nifti> <TE>
ARGS :
<fmap mag nifti> - Nifti-1 fieldmap magnitude image filename
<fmap nifti> - Nifti-1 fieldmap (in rad/s) image filename
<TE> - new effective TE in ms for dropout calculation [30 ms]
"""
# Main function
def main():
# Command line args
if len(sys.argv) < 3:
print(USAGE)
sys.exit()
else:
fmap_mag_file = sys.argv[1]
fmap_file = sys.argv[2]
# Simulated echo time in seconds
if len(sys.argv) < 5:
TE = 30.0
else:
TE = float(sys.argv[3]) / 1000.0
# Isolate file stub in presence of .nii.gz or .nii extensions
if '.nii.gz' in fmap_mag_file:
dropout_file = fmap_mag_file[:-7]
else:
if '.nii' in fmap_mag_file:
dropout_file = fmap_mag_file[:-4]
else:
dropout_file = fmap_mag_file
# Complete output filename
dropout_file = dropout_file + '_dropout.nii.gz'
# Report arguments
print('')
print('Gradient Echo Dropout Simulator')
print('-------------------------------')
print('Fieldmap magnitude : ' + fmap_mag_file)
print('Fieldmap phase : ' + fmap_file)
print('Simulated TE : ' + str(TE) + ' ms')
print('Adjusted mag image : ' + dropout_file)
print('')
print('Simulating intravoxel dephasing')
# Load fmap_mag and fmap volumes
# See http://niftilib.sourceforge.net/pynifti/examples.html for examples
print(' Loading phase image from ' + fmap_file)
try:
nim_phi = nib.load(fmap_file)
except:
sys.exit()
# Get phase data from Niftidd object
phi = nim_phi.get_data()
# Calculate grad(phi). Returns numpy array
print(' Calculating grad(phi)')
Gz, Gy, Gx = np.gradient(phi)
# Calculate absolute gradient magnitude and scale to Hz/voxel
print(' Calculating scaled absolute gradient')
aG = np.sqrt(Gx*Gx + Gy*Gy + Gz*Gz) / (2 * np.pi)
# Weighting function estimates additional signal loss from intravoxel
# dephasing assuming spherical voxel and local linear gradient.
# TODO: account for imaging x susceptibility gradient interactions
print(' Calculating intravoxel dephasing weight')
w = np.abs(np.sinc(TE / 1000.0 * aG))
print(' Loading magnitude image from ' + fmap_mag_file)
try:
nim_M = nib.load(fmap_mag_file)
except:
sys.exit()
# Get mag data from Nifti object
M = nim_M.get_data()
# Create signal mask from mag data
# Use 10% of 99th percentile as threshold
p99 = np.percentile(M, 99)
M_mask = (M > p99 * 0.1).astype(int)
# Adjust TE of magnitude image
print(' Applying weight to magnitude image')
M_dropout = M * w
# Create signal masked dropout weight image
w_mask = w * M_mask
# Construct TE-adjusted mag output image - same affine transform as original mag image
nim_M_dropout = nib.Nifti1Image(M_dropout, nim_M.get_affine())
# Save TE-adjusted mag image
print(' Saving TE adjusted magnitude image to ' + dropout_file)
nim_M_dropout.to_filename(dropout_file)
print('Done')
print('')
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
``` |
{
"source": "jmtyszka/freesurfer-editing-utils",
"score": 2
} |
#### File: jmtyszka/freesurfer-editing-utils/fs_merge_control_points.py
```python
import os
import sys
import argparse
import numpy as np
def load_cps(fname):
"""
Load contents of FS7 control point file
Example control.dat file:
58.4497 -6.64394 -14.5253
60.4497 -7.64394 -16.5253
58.4497 -5.64394 -13.5253
58.4497 -2.64394 -11.5253
58.4497 -9.64394 -16.5253
57.4497 -7.64394 -14.5253
60.4497 -6.64394 -16.5253
-49.5503 -7.64394 -19.5253
-46.5503 -5.64394 -17.5253
info
numpoints 9
useRealRAS 1
"""
cps = []
try:
with open(fname, 'r') as fd:
for line in fd:
# Split line into space-separated values
tmp = line.strip()
vals = tmp.split(' ')
if 'numpoints' in vals[0]:
npnts = int(vals[1])
elif 'useRealRAS' in vals[0]:
use_real_ras = int(vals[1])
elif 'info' in vals[0]:
pass
elif len(vals) == 3:
cps.append(vals)
else:
pass
except IOError:
print('* Problem loading {}'.format(fname))
except UnicodeDecodeError:
print('* Problem decoding {}'.format(fname))
return np.array(cps, dtype=float), npnts, use_real_ras
def save_cps(fname, cps, ras_flag):
with open(fname, 'w') as fd:
for pnt in cps:
fd.write(' '.join(['{:f}'.format(x) for x in pnt]))
fd.write('\n')
fd.write('info\n')
fd.write('numpoints {:d}\n'.format(cps.shape[0]))
fd.write('useRealRAS {:d}\n'.format(ras_flag))
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description='Merge Freesurfer control point files')
parser.add_argument('-i1', '--infile1', required=True, help="Freesurfer control point text file #1")
parser.add_argument('-i2', '--infile2', required=True, help="Freesurfer control point text file #2")
parser.add_argument('-o', '--outfile', help="Merged control point output text file ['control_merge.dat']")
# Parse command line arguments
args = parser.parse_args()
cp1_fname = args.infile1
if not os.path.isfile(cp1_fname):
print('* {} does not exist - exiting'.format(cp1_fname))
sys.exit(1)
cp2_fname = args.infile2
if not os.path.isfile(cp2_fname):
print('* {} does not exist - exiting'.format(cp2_fname))
sys.exit(1)
if args.outfile:
merge_fname = args.outfile
else:
merge_fname = 'control_merge.dat'
print('Loading {}'.format(cp1_fname))
cp1, npnts1, ras_flag1 = load_cps(cp1_fname)
print('Loading {}'.format(cp2_fname))
cp2, npnts2, ras_flag2 = load_cps(cp2_fname)
if not ras_flag1 == ras_flag2:
print('useRealRAS flags differ between files - exiting')
sys.exit(2)
# Minimum separation for points to be considered distinct (mm)
d_tol = 0.01
# Loop over points in second set, checking for distance to points in first set
cps = cp1.copy()
for p2 in cp2:
# Init closest distance and point in set 1
d_min = 1e30
p1_closest = np.array([-1, -1, -1])
for p1 in cp1:
d = np.linalg.norm(p1-p2)
if d < d_min:
d_min = d
p1_closest = p1
if d_min > d_tol:
cps = np.vstack([cps, p2])
else:
print('1:[{:6.1f}, {:6.1f}, {:6.1f}] close to 2:[{:6.1f}, {:6.1f}, {:6.1f}]'.format(
p1_closest[0], p1_closest[1], p1_closest[2],
p2[0], p2[1], p2[2],
d_min)
)
print('Merge summary')
print(' {} points in {}'.format(cp1.shape[0], cp1_fname))
print(' {} points in {}'.format(cp2.shape[0], cp2_fname))
print(' {} points in {}'.format(cps.shape[0], merge_fname))
# Save merged point set
print('Writing merged point set to {}'.format(merge_fname))
save_cps(merge_fname, cps, ras_flag1)
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
``` |
{
"source": "jmtyszka/mrgaze",
"score": 2
} |
#### File: lib/mrgaze/pupilometry.py
```python
import os
import time
import getpass
import cv2
from mrgaze import media, utils, config, calibrate, report, engine
def LivePupilometry(data_dir, live_eyetracking=False):
"""
Perform pupil boundary ellipse fitting on camera feed
Arguments
----
data_dir : string
Root data directory path.
cfg :
Analysis configuration parameters
Returns
----
pupils : boolean
Completion status (True = successful)
"""
# If user did not provide a root data directory, we use HOME/mrgaze
if data_dir == '':
data_dir = os.path.join(os.getenv("HOME"), 'mrgaze')
# Full video file paths
hostname = os.uname()[1]
username = getpass.getuser()
ss_dir = os.path.join(data_dir, "%s_%s_%s" % (hostname, username, int(time.time())))
else:
ss_dir = data_dir
# Load Configuration
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Output flags
verbose = cfg.getboolean('OUTPUT', 'verbose')
overwrite = cfg.getboolean('OUTPUT', 'overwrite')
# Video information
# vin_ext = cfg.get('VIDEO', 'inputextension')
vout_ext = cfg.get('VIDEO' ,'outputextension')
# vin_fps = cfg.getfloat('VIDEO', 'inputfps')
# Flag for freeze frame
freeze_frame = False
vid_dir = os.path.join(ss_dir, 'videos')
res_dir = os.path.join(ss_dir, 'results')
vout_path = os.path.join(vid_dir, 'gaze' + vout_ext)
cal_vout_path = os.path.join(vid_dir, 'cal' + vout_ext)
# if we do live eye-tracking, we read in what would be the output of the live eye-tracking
if not live_eyetracking:
vin_path = vout_path
cal_vin_path = cal_vout_path
else:
vin_path = 0
# Raw and filtered pupilometry CSV file paths
cal_pupils_csv = os.path.join(res_dir, 'cal_pupils.csv')
pupils_csv = os.path.join(res_dir, 'gaze_pupils.csv')
# Check that output directory exists
if not os.path.isdir(res_dir):
os.makedirs(res_dir)
print('* %s does not exist - creating' % res_dir)
if not os.path.isdir(vid_dir):
os.makedirs(vid_dir)
print('* %s does not exist - creating' % vid_dir)
# Set up the LBP cascade classifier
LBP_path = os.path.join(utils._package_root(), 'Cascade/cascade.xml')
print(' Loading LBP cascade')
cascade = cv2.CascadeClassifier(LBP_path)
if cascade.empty():
print('* LBP cascade is empty - mrgaze installation problem')
return False
# Check for output CSV existance and overwrite flag
if os.path.isfile(pupils_csv):
print('+ Pupilometry output already exists - checking overwrite flag')
if overwrite:
print('+ Overwrite allowed - continuing')
else:
print('+ Overwrite forbidden - skipping pupilometry')
return True
#
# Camera Input
#
print(' Opening camera stream')
try:
if not live_eyetracking:
vin_stream = cv2.VideoCapture(vin_path)
cal_vin_stream = cv2.VideoCapture(cal_vin_path)
else:
vin_stream = cv2.VideoCapture(vin_path)
cal_vin_stream = vin_stream
except:
print('* Problem opening input video stream - skipping pupilometry')
return False
while not vin_stream.isOpened():
print("Waiting for Camera.")
key = utils._waitKey(500)
if key == 'ESC':
print("User Abort.")
break
if not vin_stream.isOpened():
print('* Video input stream not opened - skipping pupilometry')
return False
if not cal_vin_stream.isOpened():
print('* Calibration video input stream not opened - skipping pupilometry')
return False
# Video FPS from metadata
# TODO: may not work with Quicktime videos
# fps = vin_stream.get(cv2.cv.CV_CAP_PROP_FPS)
# fps = cfg.getfloat('CAMERA', 'fps')
# Desired time between frames in milliseconds
# time_bw_frames = 1000.0 / fps
vin_stream.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
vin_stream.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
vin_stream.set(cv2.cv.CV_CAP_PROP_FPS, 30)
# Total number of frames in video file
# nf = vin_stream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# print(' Video has %d frames at %0.3f fps' % (nf, vin_fps))
# Read first preprocessed video frame from stream
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Get size of preprocessed frame for output video setup
nx, ny = frame.shape[1], frame.shape[0]
# By default we start in non-calibration mode
# switch between gaze/cal modes by pressing key "c"
do_cal = False
while keep_going:
if do_cal == False:
#
# Output video
#
if live_eyetracking:
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
# TODO : Find a better multiplatform codec
fourcc = cv2.cv.CV_FOURCC('m','p','4','v')
try:
vout_stream = cv2.VideoWriter(vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
pupils_stream = open(pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s' % (
'Time (s)', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
t = t0
while keep_going:
# check whether config file has been updated, reload of that is the case
if fc % 30 == 0:
cfg_mtime = os.path.getmtime(os.path.join(data_dir, 'mrgaze.cfg'))
if cfg_mtime > cfg_ts:
print("Updating Configuration")
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Current video time in seconds
t = time.time()
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
# b4_engine = time.time()
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# print "Enging took %s ms" % (time.time() - b4_engine)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
pupils_stream.write(
'%0.4f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
if live_eyetracking:
# Write output video frame
vout_stream.write(frame_orig)
# Read next frame, unless we want to figure out the correct settings for this frame
if not freeze_frame:
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10d %10.3f %10.1f' % (
t, area, blink, art_power, pfps))
t0 = time.time()
fc = 0
# wait whether user pressed esc to exit the experiment
key = utils._waitKey(1)
if key == 'ESC':
# Clean up
if live_eyetracking:
vout_stream.release()
pupils_stream.close()
keep_going = False
elif key == 'c':
# Clean up
if live_eyetracking:
vout_stream.release()
pupils_stream.close()
do_cal = True
print("Starting calibration.")
break
elif key == 'f':
freeze_frame = not freeze_frame
else: # do calibration
#
# Output video
#
if live_eyetracking:
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
# TODO : Find a better multiplatform codec
fourcc = cv2.cv.CV_FOURCC('m','p','4','v')
try:
cal_vout_stream = cv2.VideoWriter(cal_vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not cal_vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
cal_pupils_stream = open(cal_pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s' % (
'Time (s)', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
t = t0
while keep_going:
# check whether config file has been updated, reload of that is the case
if fc % 30 == 0:
cfg_mtime = os.path.getmtime(os.path.join(data_dir, 'mrgaze.cfg'))
if cfg_mtime > cfg_ts:
print("Updating Configuration")
cfg = config.LoadConfig(data_dir)
cfg_ts = time.time()
# Current video time in seconds
t = time.time()
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
# b4_engine = time.time()
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# print "Engine took %s ms" % (time.time() - b4_engine)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
cal_pupils_stream.write(
'%0.4f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
# Write output video frame
if live_eyetracking:
cal_vout_stream.write(frame_orig)
# Read next frame (if available)
# if verbose:
# b4_frame = time.time()
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
#if verbose:
# print "Time to load frame: %s" % (time.time() - b4_frame)
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10d %10.3f %10.1f' % (
t, area, blink, art_power, pfps))
t0 = time.time()
fc = 0
# wait whether user pressed esc to exit the experiment
key = utils._waitKey(1)
if key == 'ESC':
keep_going = False
# Clean up
if live_eyetracking:
cal_vout_stream.release()
cal_pupils_stream.close()
elif key == 'v' or not keep_going:
do_cal = False
print("Stopping calibration.")
# Clean up
if live_eyetracking:
cal_vout_stream.release()
cal_pupils_stream.close()
break
print(' Create calibration model')
C, central_fix = calibrate.AutoCalibrate(res_dir, cfg)
if not C.any():
print('* Empty calibration matrix detected - skipping')
try:
print(' Calibrate pupilometry')
calibrate.ApplyCalibration(ss_dir, C, central_fix, cfg)
except UnboundLocalError:
print(' No calibration data found')
cv2.destroyAllWindows()
vin_stream.release()
print('')
print(' Generate Report')
print(' ---------------')
report.WriteReport(ss_dir, cfg)
# Return pupilometry timeseries
return t, px, py, area, blink, art_power
def VideoPupilometry(data_dir, subj_sess, v_stub, cfg):
"""
Perform pupil boundary ellipse fitting on entire video
Arguments
----
data_dir : string
Root data directory path.
subj_sess : string
Subject/Session name used for subdirectory within data_dir
v_stub : string
Video filename stub, eg 'cal' or 'gaze'
cfg :
Analysis configuration parameters
Returns
----
pupils : boolean
Completion status (True = successful)
"""
# Output flags
verbose = cfg.getboolean('OUTPUT', 'verbose')
overwrite = cfg.getboolean('OUTPUT','overwrite')
# Video information
vin_ext = cfg.get('VIDEO', 'inputextension')
vout_ext = cfg.get('VIDEO' ,'outputextension')
vin_fps = cfg.getfloat('VIDEO', 'inputfps')
# Full video file paths
ss_dir = os.path.join(data_dir, subj_sess)
vid_dir = os.path.join(ss_dir, 'videos')
res_dir = os.path.join(ss_dir, 'results')
vin_path = os.path.join(vid_dir, v_stub + vin_ext)
vout_path = os.path.join(res_dir, v_stub + '_pupils' + vout_ext)
# Raw and filtered pupilometry CSV file paths
pupils_csv = os.path.join(res_dir, v_stub + '_pupils.csv')
# Check that input video file exists
if not os.path.isfile(vin_path):
print('* %s does not exist - returning' % vin_path)
return False
# Set up the LBP cascade classifier
LBP_path = os.path.join(utils._package_root(), 'Cascade/cascade.xml')
print(' Loading LBP cascade')
cascade = cv2.CascadeClassifier(LBP_path)
if cascade.empty():
print('* LBP cascade is empty - mrgaze installation problem')
return False
# Check for output CSV existance and overwrite flag
if os.path.isfile(pupils_csv):
print('+ Pupilometry output already exists - checking overwrite flag')
if overwrite:
print('+ Overwrite allowed - continuing')
else:
print('+ Overwrite forbidden - skipping pupilometry')
return True
#
# Input video
#
print(' Opening input video stream')
try:
vin_stream = cv2.VideoCapture(vin_path)
except:
print('* Problem opening input video stream - skipping pupilometry')
return False
if not vin_stream.isOpened():
print('* Video input stream not opened - skipping pupilometry')
return False
# Video FPS from metadata
# TODO: may not work with Quicktime videos
# fps = vin_stream.get(cv2.cv.CV_CAP_PROP_FPS)
# Total number of frames in video file
nf = vin_stream.get(cv2.CAP_PROP_FRAME_COUNT)
print(' Video has %d frames at %0.3f fps' % (nf, vin_fps))
# Read first preprocessed video frame from stream
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Get size of preprocessed frame for output video setup
nx, ny = frame.shape[1], frame.shape[0]
#
# Output video
#
print(' Opening output video stream')
# Output video codec (MP4V - poor quality compression)
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
try:
vout_stream = cv2.VideoWriter(vout_path, fourcc, 30, (nx, ny), True)
except:
print('* Problem creating output video stream - skipping pupilometry')
return False
if not vout_stream.isOpened():
print('* Output video not opened - skipping pupilometry')
return False
# Open pupilometry CSV file to write
try:
pupils_stream = open(pupils_csv, 'w')
except:
print('* Problem opening pupilometry CSV file - skipping pupilometry')
return False
#
# Main Video Frame Loop
#
# Print verbose column headers
if verbose:
print('')
print(' %10s %10s %10s %10s %10s %10s' % (
'Time (s)', '% Done', 'Area', 'Blink', 'Artifact', 'FPS'))
# Init frame counter
fc = 0
# Init processing timer
t0 = time.time()
while keep_going:
# Current video time in seconds
t = fc / vin_fps
# -------------------------------------
# Pass this frame to pupilometry engine
# -------------------------------------
pupil_ellipse, roi_rect, blink, glint, frame_rgb = engine.PupilometryEngine(frame, cascade, cfg)
# Derive pupilometry parameters
px, py, area = engine.PupilometryPars(pupil_ellipse, glint, cfg)
# Write data line to pupilometry CSV file
pupils_stream.write(
'%0.3f,%0.3f,%0.3f,%0.3f,%d,%0.3f,\n' %
(t, area, px, py, blink, art_power)
)
# Write output video frame
vout_stream.write(frame_rgb)
# Read next frame (if available)
keep_going, frame_orig = media.LoadVideoFrame(vin_stream, cfg)
if keep_going:
frame, art_power = media.Preproc(frame_orig, cfg)
else:
art_power = 0.0
# Increment frame counter
fc = fc + 1
# Report processing FPS
if verbose:
if fc % 100 == 0:
perc_done = fc / float(nf) * 100.0
pfps = fc / (time.time() - t0)
print(' %10.1f %10.1f %10.1f %10d %10.3f %10.1f' % (
t, perc_done, area, blink, art_power, pfps))
# Clean up
cv2.destroyAllWindows()
vin_stream.release()
vout_stream.release()
pupils_stream.close()
# Return pupilometry timeseries
return t, px, py, area, blink, art_power
```
#### File: build/scripts-3.4/mrgaze_batch.py
```python
__version__ = '0.7.2'
import os
import sys
import datetime as dt
from mrgaze import pipeline
def main():
# Get single session directory from command line
if len(sys.argv) > 1:
data_dir = sys.argv[1]
else:
data_dir = os.getcwd()
# Text splash
print('')
print('--------------------------------------------------')
print('mrgaze Batch Gaze Tracking Video Analysis')
print('--------------------------------------------------')
print('Version : %s' % __version__)
print('Date : %s' % dt.datetime.now())
print('Data dir : %s' % data_dir)
print('')
print('Starting batch analysis')
pipeline.RunBatch(data_dir)
print('')
print('Completed batch analysis')
# Clean exit
sys.exit(0)
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
```
#### File: mrgaze/mrgaze/fitellipse.py
```python
import numpy as np
import random
import cv2
#---------------------------------------------
# Ellipse Fitting Functions
#---------------------------------------------
def FitEllipse_RANSAC_Support(pnts, roi, cfg, max_itts=5, max_refines=3, max_perc_inliers=95.0):
'''
Robust ellipse fitting to segmented boundary with image support
Parameters
----
pnts : n x 2 array of integers
Candidate pupil-iris boundary points from edge detection
roi : 2D scalar array
Grayscale image of pupil-iris region for support calculation.
max_itts : integer
Maximum RANSAC ellipse candidate iterations
max_refines : integer
Maximum RANSAC ellipse inlier refinements
max_perc_inliers : float
Maximum inlier percentage of total points for convergence
Returns
----
best_ellipse : tuple of tuples
Best fitted ellipse parameters ((x0, y0), (a,b), theta)
'''
# Debug flag
DEBUG = False
# Output flags
graphics = cfg.getboolean('OUTPUT', 'graphics')
# Suppress invalid values
np.seterr(invalid='ignore')
# Maximum normalized error squared for inliers
max_norm_err_sq = 4.0
# Tiny circle init
best_ellipse = ((0,0),(1e-6,1e-6),0)
# High support is better, so init with -Infinity
best_support = -np.inf
# Create display window and init overlay image
if graphics:
cv2.namedWindow('RANSAC', cv2.WINDOW_AUTOSIZE)
# Count pnts (n x 2)
n_pnts = pnts.shape[0]
# Break if too few points to fit ellipse (RARE)
if n_pnts < 5:
return best_ellipse
# Precalculate roi intensity gradients
dIdx = cv2.Sobel(roi, cv2.CV_32F, 1, 0)
dIdy = cv2.Sobel(roi, cv2.CV_32F, 0, 1)
# Ransac iterations
for itt in range(0,max_itts):
# Select 5 points at random
sample_pnts = np.asarray(random.sample(pnts, 5))
# Fit ellipse to points
ellipse = cv2.fitEllipse(sample_pnts)
# Dot product of ellipse and image gradients for support calculation
grad_dot = EllipseImageGradDot(sample_pnts, ellipse, dIdx, dIdy)
# Skip this iteration if one or more dot products are <= 0
# implying that the ellipse is unlikely to bound the pupil
if all(grad_dot > 0):
# Refine inliers iteratively
for refine in range(0,max_refines):
# Calculate normalized errors for all points
norm_err = EllipseNormError(pnts, ellipse)
# Identify inliers
inliers = np.nonzero(norm_err**2 < max_norm_err_sq)[0]
# Update inliers set
inlier_pnts = pnts[inliers]
# Protect ellipse fitting from too few points
if inliers.size < 5:
if DEBUG: print('Break < 5 Inliers (During Refine)')
break
# Fit ellipse to refined inlier set
ellipse = cv2.fitEllipse(inlier_pnts)
# End refinement
# Count inliers (n x 2)
n_inliers = inliers.size
perc_inliers = (n_inliers * 100.0) / n_pnts
# Calculate support for the refined inliers
support = EllipseSupport(inlier_pnts, ellipse, dIdx, dIdy)
# Report on RANSAC progress
if DEBUG:
print('RANSAC %d,%d : %0.3f (%0.1f)' % (itt, refine, support, best_support))
# Update overlay image and display
if graphics:
overlay = cv2.cvtColor(roi/2,cv2.COLOR_GRAY2RGB)
OverlayRANSACFit(overlay, pnts, inlier_pnts, ellipse)
cv2.imshow('RANSAC', overlay)
cv2.waitKey(5)
# Update best ellipse
if support > best_support:
best_support = support
best_ellipse = ellipse
else:
# Ellipse gradients did not match image gradients
support = 0.0
perc_inliers = 0.0
if perc_inliers > max_perc_inliers:
if DEBUG: print('Break Max Perc Inliers')
break
return best_ellipse
def FitEllipse_RANSAC(pnts, roi, cfg, max_itts=5, max_refines=3, max_perc_inliers=95.0):
'''
Robust ellipse fitting to segmented boundary points
Parameters
----
pnts : n x 2 array of integers
Candidate pupil-iris boundary points from edge detection
roi : 2D scalar array
Grayscale image of pupil-iris region for display only
max_itts : integer
Maximum RANSAC ellipse candidate iterations
max_refines : integer
Maximum RANSAC ellipse inlier refinements
max_perc_inliers : float
Maximum inlier percentage of total points for convergence
Returns
----
best_ellipse : tuple of tuples
Best fitted ellipse parameters ((x0, y0), (a,b), theta)
'''
# Debug flag
DEBUG = False
# Output flags
graphics = cfg.getboolean('OUTPUT', 'graphics')
# Suppress invalid values
np.seterr(invalid='ignore')
# Maximum normalized error squared for inliers
max_norm_err_sq = 4.0
# Tiny circle init
best_ellipse = ((0,0),(1e-6,1e-6),0)
# Create display window and init overlay image
if graphics:
cv2.namedWindow('RANSAC', cv2.WINDOW_AUTOSIZE)
# Count pnts (n x 2)
n_pnts = pnts.shape[0]
# Break if too few points to fit ellipse (RARE)
if n_pnts < 5:
return best_ellipse
# Ransac iterations
for itt in range(0,max_itts):
# Select 5 points at random
sample_pnts = np.asarray(random.sample(pnts, 5))
# Fit ellipse to points
ellipse = cv2.fitEllipse(sample_pnts)
# Refine inliers iteratively
for refine in range(0,max_refines):
# Calculate normalized errors for all points
norm_err = EllipseNormError(pnts, ellipse)
# Identify inliers
inliers = np.nonzero(norm_err**2 < max_norm_err_sq)[0]
# Update inliers set
inlier_pnts = pnts[inliers]
# Protect ellipse fitting from too few points
if inliers.size < 5:
if DEBUG: print('Break < 5 Inliers (During Refine)')
break
# Fit ellipse to refined inlier set
ellipse = cv2.fitEllipse(inlier_pnts)
# End refinement
# Count inliers (n x 2)
n_inliers = inliers.size
perc_inliers = (n_inliers * 100.0) / n_pnts
# Update overlay image and display
if graphics:
overlay = cv2.cvtColor(roi/2,cv2.COLOR_GRAY2RGB)
OverlayRANSACFit(overlay, pnts, inlier_pnts, ellipse)
cv2.imshow('RANSAC', overlay)
cv2.waitKey(5)
# Update best ellipse
best_ellipse = ellipse
if perc_inliers > max_perc_inliers:
if DEBUG: print('Break Max Perc Inliers')
break
return best_ellipse
def FitEllipse_RobustLSQ(pnts, roi, cfg, max_refines=5, max_perc_inliers=95.0):
'''
Iterate ellipse fit on inliers
Parameters
----
pnts : n x 2 array of integers
Candidate pupil-iris boundary points from edge detection
roi : 2D scalar array
Grayscale image of pupil-iris region for display only
cfg : configuration structure
Configuration parameters
max_refines : integer
Maximum number of inlier refinements
max_perc_inliers : float
Maximum inlier percentage of total points for convergence
Returns
----
best_ellipse : tuple of tuples
Best fitted ellipse parameters ((x0, y0), (a,b), theta)
'''
# Debug flag
DEBUG = False
# Suppress invalid values
np.seterr(invalid='ignore')
# Maximum normalized error squared for inliers
max_norm_err_sq = 4.0
# Tiny circle init
best_ellipse = ((0,0),(1e-6,1e-6),0)
# Count edge points
n_pnts = pnts.shape[0]
# Break if too few points to fit ellipse (RARE)
if n_pnts < 5:
return best_ellipse
# Fit ellipse to points
ellipse = cv2.fitEllipse(pnts)
# Refine inliers iteratively
for refine in range(0, max_refines):
# Calculate normalized errors for all points
norm_err = EllipseNormError(pnts, ellipse)
# Identify inliers
inliers = np.nonzero(norm_err**2 < max_norm_err_sq)[0]
# Update inliers set
inlier_pnts = pnts[inliers]
# Protect ellipse fitting from too few points
if inliers.size < 5:
if DEBUG: print('Break < 5 Inliers (During Refine)')
break
# Fit ellipse to refined inlier set
ellipse = cv2.fitEllipse(inlier_pnts)
# Count inliers (n x 2)
n_inliers = inliers.size
perc_inliers = (n_inliers * 100.0) / n_pnts
# Update best ellipse
best_ellipse = ellipse
if perc_inliers > max_perc_inliers:
if DEBUG: print('Break > maximum inlier percentage')
break
return best_ellipse
def FitEllipse_LeastSquares(pnts, roi, cfg):
'''
Simple least-squares ellipse fit to boundary points
Parameters
----
pnts : n x 2 array of integers
Candidate pupil-iris boundary points from edge detection
roi : 2D scalar array
Grayscale image of pupil-iris region for display only
cfg : configuration structure
Configuration parameters
Returns
----
best_ellipse : tuple of tuples
Best fitted ellipse parameters ((x0, y0), (a,b), theta)
'''
# Tiny circle init
best_ellipse = ((0,0),(1e-6,1e-6),0)
# Break if too few points to fit ellipse (RARE)
if pnts.shape[0] < 5:
return best_ellipse
# Call OpenCV ellipse fitting
best_ellipse = cv2.fitEllipse(pnts)
return best_ellipse
def EllipseError(pnts, ellipse):
"""
Ellipse fit error function
"""
# Suppress divide-by-zero warnings
np.seterr(divide='ignore')
# Calculate algebraic distances and gradients of all points from fitted ellipse
distance, grad, absgrad, normgrad = ConicFunctions(pnts, ellipse)
# Calculate error from distance and gradient
# See Swirski et al 2012
# TODO : May have to use distance / |grad|^0.45 - see Swirski source
# Gradient array has x and y components in rows (see ConicFunctions)
err = distance / absgrad
return err
def EllipseNormError(pnts, ellipse):
"""
Error normalization factor, alpha
Normalizes cost to 1.0 at point 1 pixel out from minor vertex along minor axis
"""
# Ellipse tuple has form ( ( x0, y0), (bb, aa), phi_b_deg) )
# Where aa and bb are the major and minor axes, and phi_b_deg
# is the CW x to minor axis rotation in degrees
(x0,y0), (bb,aa), phi_b_deg = ellipse
# Semiminor axis
b = bb/2
# Convert phi_b from deg to rad
phi_b_rad = phi_b_deg * np.pi / 180.0
# Minor axis vector
bx, by = np.cos(phi_b_rad), np.sin(phi_b_rad)
# Point one pixel out from ellipse on minor axis
p1 = np.array( (x0 + (b + 1) * bx, y0 + (b + 1) * by) ).reshape(1,2)
# Error at this point
err_p1 = EllipseError(p1, ellipse)
# Errors at provided points
err_pnts = EllipseError(pnts, ellipse)
return err_pnts / err_p1
def EllipseSupport(pnts, ellipse, dIdx, dIdy):
"""
Ellipse support function
"""
if pnts.size < 5:
return -np.inf
# Return sum of (grad Q . grad image) over point set
return EllipseImageGradDot(pnts, ellipse, dIdx, dIdy).sum()
def EllipseImageGradDot(pnts, ellipse, dIdx, dIdy):
# Calculate normalized grad Q at inlier pnts
distance, grad, absgrad, normgrad = ConicFunctions(pnts, ellipse)
# Extract vectors of x and y values
x, y = pnts[:,0], pnts[:,1]
# Extract image gradient at inlier points
dIdx_pnts = dIdx[y,x]
dIdy_pnts = dIdy[y,x]
# Construct intensity gradient array (2 x N)
gradI = np.array( (dIdx_pnts, dIdy_pnts) )
# Calculate the sum of the column-wise dot product of normgrad and gradI
# http://stackoverflow.com/questions/6229519/numpy-column-wise-dot-product
return np.einsum('ij,ij->j', normgrad, gradI)
#---------------------------------------------
# Ellipse Math
#---------------------------------------------
def Geometric2Conic(ellipse):
"""
Geometric to conic parameter conversion
References
----
Adapted from Swirski's ConicSection.h
https://bitbucket.org/Leszek/pupil-tracker/
"""
# Ellipse tuple has form ( ( x0, y0), (bb, aa), phi_b_deg) )
# Where aa and bb are the major and minor axes, and phi_b_deg
# is the CW x to minor axis rotation in degrees
(x0,y0), (bb, aa), phi_b_deg = ellipse
# Semimajor and semiminor axes
a, b = aa/2, bb/2
# Convert phi_b from deg to rad
phi_b_rad = phi_b_deg * np.pi / 180.0
# Major axis unit vector
ax, ay = -np.sin(phi_b_rad), np.cos(phi_b_rad)
# Useful intermediates
a2 = a*a
b2 = b*b
#
# Conic parameters
#
if a2 > 0 and b2 > 0:
A = ax*ax / a2 + ay*ay / b2;
B = 2*ax*ay / a2 - 2*ax*ay / b2;
C = ay*ay / a2 + ax*ax / b2;
D = (-2*ax*ay*y0 - 2*ax*ax*x0) / a2 + (2*ax*ay*y0 - 2*ay*ay*x0) / b2;
E = (-2*ax*ay*x0 - 2*ay*ay*y0) / a2 + (2*ax*ay*x0 - 2*ax*ax*y0) / b2;
F = (2*ax*ay*x0*y0 + ax*ax*x0*x0 + ay*ay*y0*y0) / a2 + (-2*ax*ay*x0*y0 + ay*ay*x0*x0 + ax*ax*y0*y0) / b2 - 1;
else:
# Tiny dummy circle - response to a2 or b2 == 0 overflow warnings
A,B,C,D,E,F = (1,0,1,0,0,-1e-6)
# Compose conic parameter array
conic = np.array((A,B,C,D,E,F))
return conic
def Conic2Geometric(conic):
"""
Merge geometric parameter functions from van Foreest code
References
----
http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
"""
# Extract modified conic parameters
A,B,C,D,E,F = conic[0], conic[1]/2, conic[2], conic[3]/2, conic[4]/2, conic[5]
# Usefult intermediates
dAC = A-C
Z = np.sqrt( 1 + 4*B*B/(dAC*dAC) )
# Center
num = B * B - A * C
x0 = (C * D - B * E) / num
y0 = (A * E - B * D) / num
# Axis lengths
up = 2 * (A*E*E + C*D*D + F*B*B - 2*B*D*E - A*C*F)
down1 = (B*B-A*C) * ( -dAC*Z - (C+A) )
down2 = (B*B-A*C) * ( dAC*Z - (C+A) )
b, a = np.sqrt(up/down1), np.sqrt(up/down2)
# Minor axis rotation angle in degrees (CW from x axis, origin upper left)
phi_b_deg = 0.5 * np.arctan(2 * B / dAC) * 180.0 / np.pi
# Note OpenCV ellipse parameter format (full axes)
return (x0,y0), (2*b, 2*a), phi_b_deg
def ConicFunctions(pnts, ellipse):
"""
Calculate various conic quadratic curve support functions
General 2D quadratic curve (biquadratic)
Q = Ax^2 + Bxy + Cy^2 + Dx + Ey + F
For point on ellipse, Q = 0, with appropriate coefficients
Parameters
----
pnts : n x 2 array of floats
ellipse : tuple of tuples
Returns
----
distance : array of floats
grad : array of floats
absgrad : array of floats
normgrad : array of floats
References
----
Adapted from Swirski's ConicSection.h
https://bitbucket.org/Leszek/pupil-tracker/
"""
# Suppress invalid values
np.seterr(invalid='ignore')
# Convert from geometric to conic ellipse parameters
conic = Geometric2Conic(ellipse)
# Row vector of conic parameters (Axx, Axy, Ayy, Ax, Ay, A1) (1 x 6)
C = np.array(conic)
# Extract vectors of x and y values
x, y = pnts[:,0], pnts[:,1]
# Construct polynomial array (6 x n)
X = np.array( ( x*x, x*y, y*y, x, y, np.ones_like(x) ) )
# Calculate Q/distance for all points (1 x n)
distance = C.dot(X)
# Quadratic curve gradient at (x,y)
# Analytical grad of Q = Ax^2 + Bxy + Cy^2 + Dx + Ey + F
# (dQ/dx, dQ/dy) = (2Ax + By + D, Bx + 2Cy + E)
# Construct conic gradient coefficients vector (2 x 3)
Cg = np.array( ( (2*C[0], C[1], C[3]), (C[1], 2*C[2], C[4]) ) )
# Construct polynomial array (3 x n)
Xg = np.array( (x, y, np.ones_like(x) ) )
# Gradient array (2 x n)
grad = Cg.dot(Xg)
# Normalize gradient -> unit gradient vector
# absgrad = np.apply_along_axis(np.linalg.norm, 0, grad)
absgrad = np.sqrt(np.sqrt(grad[0,:]**2 + grad[1,:]**2))
normgrad = grad / absgrad
return distance, grad, absgrad, normgrad
def Eccentricity(ellipse):
'''
Calculate eccentricity of an ellipse
'''
# Ellipse tuple has form ( ( x0, y0), (bb, aa), phi_b_deg) )
# Where aa and bb are the major and minor axes, and phi_b_deg
# is the CW x to minor axis rotation in degrees
(x0,y0), (bb, aa), phi_b_deg = ellipse
return np.sqrt(1 - (bb/aa)**2)
def OverlayRANSACFit(img, all_pnts, inlier_pnts, ellipse):
"""
NOTE
----
All points are (x,y) pairs, but arrays are (row, col) so swap
coordinate ordering for correct positioning in array
"""
# Overlay all pnts in red
for col,row in all_pnts:
img[row,col] = [0,0,255]
# Overlay inliers in green
for col,row in inlier_pnts:
img[row,col] = [0,255,0]
# Overlay inlier fitted ellipse in yellow
cv2.ellipse(img, ellipse, (0,255,255), 1)
```
#### File: mrgaze/mrgaze/pipeline.py
```python
import os
import sys
from mrgaze import utils, pupilometry, calibrate, report, config
def RunBatch(data_dir=[]):
"""
Run the gaze tracking pipeline over all sessions within a data directory
"""
# Default data directory
if not data_dir:
print('* No data directory provided - exiting')
return False
# Check for missing directories
if not os.path.isdir(data_dir):
print('* Data directory does not exist - exiting')
sys.exit(1)
# Loop over all subject subdirectories of the data directory
for subj_sess in os.walk(data_dir).next()[1]:
# Run single-session pipeline
RunSingle(data_dir, subj_sess)
# Clean exit
return True
def RunSingle(data_dir, subj_sess):
"""
Run the gaze tracking pipeline on a single gaze tracking session
"""
print('')
print('Running single-session pipeline : ' + subj_sess)
if not data_dir or not subj_sess:
print('* Data or subject/session directory not provided - returning')
return False
# Subject/session directory name
ss_dir = os.path.join(data_dir, subj_sess)
# Video and results directory names for this subject/session
ss_vid_dir = os.path.join(ss_dir, 'videos')
ss_res_dir = os.path.join(ss_dir, 'results')
# Load configuration from root directory or subj/sess video dir
# If no config file exists, a default root config is created
cfg = config.LoadConfig(data_dir, subj_sess)
if not cfg:
print('* Configuration file missing - returning')
return False
# Extract operational flags from config
do_cal = cfg.getboolean('CALIBRATION', 'calibrate')
# Run pipeline if video directory present
if os.path.isdir(ss_vid_dir):
# Create results subj/sess dir
utils._mkdir(ss_res_dir)
print('')
print(' Calibration Pupilometry')
print(' -----------------------')
pupilometry.VideoPupilometry(data_dir, subj_sess, 'cal', cfg)
if do_cal:
print(' Create calibration model')
C, central_fix = calibrate.AutoCalibrate(ss_res_dir, cfg)
if not C.any():
print('* Empty calibration matrix detected - skipping')
return False
print('')
print(' Gaze Pupilometry')
print(' -----------------------')
pupilometry.VideoPupilometry(data_dir, subj_sess, 'gaze', cfg)
if do_cal:
print(' Calibrate pupilometry')
calibrate.ApplyCalibration(ss_dir, C, central_fix, cfg)
print('')
print(' Generate Report')
print(' ---------------')
report.WriteReport(ss_dir, cfg)
else:
print('%s does not exist - skipping' % ss_vid_dir)
print('')
print('Completed single-session pipeline')
return True
```
#### File: mrgaze/testing/test_fitellipse.py
```python
import cv2
import numpy as np
import ConfigParser
from mrgaze import pupilometry, media, config
def main():
# Setup default config structure
print('Initializing configuration')
cfg = ConfigParser.ConfigParser()
cfg = config.InitConfig(cfg)
# Update defaults
cfg.set('VIDEO','downsampling','1')
cfg.set('PUPILSEG','method','otsu')
cfg.set('PUPILSEG','thresholdperc','50.0')
cfg.set('PUPILSEG','pupildiameterperc','15.0')
cfg.set('PUPILSEG','sigma','0.0')
cfg.set('PUPILFIT','method','ROBUST_LSQ')
cfg.set('PUPILFIT','maxrefinements','5')
# Load test eye tracking frame
print('Loading test frame')
test_frame = '/Users/jmt/GitHub/mrgaze/testing/CBIC_Example_2.png'
frame = media.LoadImage(test_frame, cfg)
# Init ROI to whole frame
# Note (col, row) = (x, y) for shape
x0, x1, y0, y1 = 0, frame.shape[1], 0, frame.shape[0]
# Define ROI rect
roi_rect = (x0,y0),(x1,y1)
# Extract pupil ROI (note row,col indexing of image array)
roi = frame[y0:y1,x0:x1]
# Find glint(s) in frame
glints, glints_mask, roi_noglints = pupilometry.FindGlints(roi, cfg)
# Segment pupil intelligently - also return glint mask
print('Segmenting pupil')
pupil_bw, roi_rescaled = pupilometry.SegmentPupil(roi, cfg)
# Create composite image of various stages of segmentation
strip_bw = np.hstack((roi, pupil_bw * 255, glints_mask * 255, roi_rescaled))
# Init montage
montage_rgb = np.array([])
# Fit ellipse to pupil boundary - returns ellipse ROI
for method in ('RANSAC_SUPPORT','RANSAC','ROBUST_LSQ','LSQ'):
print('Fitting pupil ellipse : %s' % method)
cfg.set('PUPILFIT','method',method)
eroi = pupilometry.FitPupil(pupil_bw, roi, cfg)
# Construct pupil ellipse tuple
pupil_ellipse = (eroi[0][0], eroi[0][1]), eroi[1], eroi[2]
# TODO: find best glint candidate in glint mask
glint = pupilometry.FindBestGlint(glints_mask, pupil_ellipse)
# RGB version of preprocessed frame for output video
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
# Create RGB overlay of pupilometry on ROI
frame_rgb = pupilometry.OverlayPupil(frame_rgb, pupil_ellipse, roi_rect, glint)
if montage_rgb.size == 0:
montage_rgb = frame_rgb
else:
montage_rgb = np.hstack((montage_rgb, frame_rgb))
cv2.imshow('Segmentation', strip_bw)
cv2.imshow('Pupilometry', montage_rgb)
cv2.waitKey()
print('Done')
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
``` |
{
"source": "jmtyszka/mrikit",
"score": 2
} |
#### File: jmtyszka/mrikit/mp2rage_T1w.py
```python
__version__ = '0.1.0'
import sys
import argparse
import numpy as np
import nibabel as nb
from skimage.filters import threshold_otsu, gaussian
from scipy import logical_or
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description='Reconstruct T1w image from MP2RAGE data')
parser.add_argument('-i1', '--inv1', help='MP2RAGE INV1 image filename')
parser.add_argument('-i2', '--inv2', help='MP2RAGE INV2 image filename')
parser.add_argument('-u', '--unified', help='MP2RAGE UNI image filename')
parser.add_argument('-o', '--outname', help='Output T1w image filename')
# Parse command line arguments
args = parser.parse_args()
uni_fname = args.unified
inv1_fname = args.inv1
inv2_fname = args.inv2
t1w_fname = args.outname
print('Loading UNI image (%s)' % uni_fname)
try:
uni_nii = nb.load(uni_fname)
uni = uni_nii.get_data()
except:
print('* Problem loading %s - exiting' % uni_fname)
sys.exit(1)
print('Loading INV1 image (%s)' % inv1_fname)
try:
inv1_nii = nb.load(inv1_fname)
inv1 = inv1_nii.get_data()
except:
print('* Problem loading %s - exiting' % inv1_fname)
sys.exit(1)
print('Loading INV2 image (%s)' % inv2_fname)
try:
inv2_nii = nb.load(inv2_fname)
inv2 = inv2_nii.get_data()
except:
print('* Problem loading %s - exiting' % inv2_fname)
sys.exit(1)
print('')
print('Starting T1w image recon')
# Hardwired Otsu threshold scale factor
otsu_sf = 0.33
# Otsu threshold INV1 and INV2 images
inv1_th = threshold_otsu(inv1) * otsu_sf
print(' INV1 Otsu threshold : %0.1f' % inv1_th)
inv1_mask = inv1 > inv1_th
# Otsu threshold INV1 and INV2 images
inv2_th = threshold_otsu(inv2) * otsu_sf
print(' INV2 Otsu threshold : %0.1f' % inv2_th)
inv2_mask = inv2 > inv2_th
# Combine INV1 and INV2 masks
print(' Combining INV masks')
inv12_mask = logical_or(inv1_mask, inv2_mask)
# Feather combined mask by one pixel (Gaussin blur)
print(' Feathering mask')
inv12_mask = gaussian(inv12_mask, 1.0)
# Multiply UNI image by feathered mask
print(' Applying mask to UNI image')
t1w = uni * inv12_mask
# Save T1w image
print('')
print('Saving T1w image to %s' % t1w_fname)
t1w_nii = nb.Nifti1Image(t1w, uni_nii.affine)
t1w_nii.to_filename(t1w_fname)
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
``` |
{
"source": "jmtyszka/pydeface",
"score": 2
} |
#### File: jmtyszka/pydeface/setup.py
```python
descr = """pydeface: deidentify structural MRI data by voxelizing the face"""
import os
from setuptools import setup
import glob
DISTNAME="pydeface"
DESCRIPTION=descr
MAINTAINER='<NAME>'
MAINTAINER_EMAIL='<EMAIL>'
LICENSE='MIT'
URL='http://evendim.sites.caltech.edu'
DOWNLOAD_URL='https://github.com/jmtyszka/pydeface/'
VERSION='2020.4.3'
def check_dependencies():
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
needed_deps = ["numpy", "nibabel", "nipype"]
missing_deps = []
for dep in needed_deps:
try:
__import__(dep)
except ImportError:
missing_deps.append(dep)
if missing_deps:
raise ImportError("Missing dependencies: %s" % missing_deps)
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
import sys
if not (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'egg_info',
'clean'))):
check_dependencies()
datafiles = {'pydeface': ['data/ConteCore2_50_T1w_2mm.nii.gz',
'data/ConteCore2_50_T1w_2mm_deface_mask.nii.gz',
'ident.mat']}
setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
url=URL,
download_url=DOWNLOAD_URL,
packages=['pydeface'],
package_data = datafiles,
scripts=['scripts/pydeface.py'],
classifiers=['Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
install_requires=['nipype',
'numpy'],
)
``` |
{
"source": "JMU2021/DESRGAN",
"score": 3
} |
#### File: codes/models/gradient_loss.py
```python
import torch
import torch.nn as nn
from kornia.filters import laplacian
class GradientLoss(nn.Module):
def __init__(self, gradient_type, horizontal, vertical, loss_type, device):
super(GradientLoss, self).__init__()
self.gradient_type = gradient_type
self.horizontal = horizontal
self.vertical = vertical
self.device = device
if loss_type == 'l1':
self.loss = torch.nn.L1Loss(reduction='sum').to(device)
elif loss_type == 'l2':
self.loss = torch.nn.MSELoss(reduction='sum').to(device)
else:
raise NotImplementedError('Loss type [{:s}] not recognized.'.format(loss_type))
return
# When horizontal = 3, vertical = 3:
# +────+────+────+
# | 00 | 10 | 20 |
# +────+────+────+
# | 01 | 11 | 21 |
# +────+────+────+
# | 02 | 12 | 22 |
# +────+────+────+
# allow_lost_pixels: 当像素点数量无法正好整除的时候,是否将剩下的像素补充到最后的小块中
def crop_image_to_grid(self, image, horizontal, vertical, allow_lost_pixels=False):
# Input image: [Channel * Height * Width]
image_height = image.shape[1]
image_width = image.shape[2]
image_whc = image.reshape([image_height, image_width, image.shape[0]])
horizontal_block_pixels = image_width // horizontal # 切割后每个小块的宽
vertical_block_pixels = image_height // vertical # 切割后每个小块的高
is_horizontal_average = horizontal_block_pixels * horizontal == image_width
is_vertical_average = vertical_block_pixels * vertical == image_height
cropped_images = {}
for i in range(horizontal):
for j in range(vertical):
left = i * horizontal_block_pixels
up = j * vertical_block_pixels
if (not allow_lost_pixels) and (not is_horizontal_average) and i == horizontal - 1:
right = image_width
else:
right = left + horizontal_block_pixels
if (not allow_lost_pixels) and (not is_vertical_average) and j == vertical - 1:
bottom = image_height
else:
bottom = up + vertical_block_pixels
cropped_block = image_whc[up:bottom, left:right]
cropped_images[str(i) + '|' + str(j)] = cropped_block.reshape(
[cropped_block.shape[2], cropped_block.shape[0], cropped_block.shape[1]])
return cropped_images
def forward(self, ground_truth_batch, segmented_print_batch):
# Input image: [Batch * Channel * Height * Width]
if self.gradient_type == 'laplace':
ground_truth_gradient_batch = laplacian(ground_truth_batch, kernel_size=3)
segmented_print_gradient_batch = laplacian(segmented_print_batch, kernel_size=3)
# elif self.gradient_type == 'sobel_x':
# pass
# elif self.gradient_type == 'sobel_y':
# pass
else:
raise NotImplementedError('Gradient type [{:s}] not recognized.'.format(self.radient_type))
loss = torch.tensor(.0).to(self.device)
for i in range(len(ground_truth_batch)):
ground_truth_gradient = ground_truth_gradient_batch[i]
segmented_print_gradient = segmented_print_gradient_batch[i]
ground_truth_gradient_cropped = self.crop_image_to_grid(ground_truth_gradient, self.horizontal,
self.vertical)
segmented_print_gradient_cropped = self.crop_image_to_grid(segmented_print_gradient, self.horizontal,
self.vertical)
max_key = list(ground_truth_gradient_cropped.keys())[0]
max_loss = self.loss(ground_truth_gradient_cropped[max_key], segmented_print_gradient_cropped[max_key])
for key in ground_truth_gradient_cropped.keys():
ground_truth_image_gradient_cropped_tensor = ground_truth_gradient_cropped[key]
segmented_print_image_gradient_cropped_tensor = segmented_print_gradient_cropped[key]
loss_value = self.loss(ground_truth_image_gradient_cropped_tensor,
segmented_print_image_gradient_cropped_tensor)
if loss_value > max_loss:
max_key = key
max_loss = loss_value
loss += self.loss(ground_truth_gradient_cropped[max_key].to(self.device),
segmented_print_gradient_cropped[max_key].to(self.device)).to(self.device)
return loss
``` |
{
"source": "jmucha90/Stock-Analyzer",
"score": 3
} |
#### File: jmucha90/Stock-Analyzer/linearregressmodel.py
```python
import yfinance as yf
import pandas as pd
from sklearn.linear_model import LinearRegression
import plotly.graph_objects as go
class lrmodel:
'''
Building class to develop the linear regression model to be used in the application.
'''
def __init__(self, ticker):
ticker = ticker
self.info = yf.Ticker(str(ticker)).info
# Gets the stock historical data and ensures that no non-numerical data exists
def history(self, ticker):
ticker = ticker
stock_history = yf.Ticker(str(ticker)).history(period='max')
if stock_history.isnull().values.any():
issues = stock_history[stock_history.isnull().values]
issue_index = []
for issue in issues.index:
if issue not in issue_index:
issue_index.append(issue)
stock_history.drop([issue], inplace = True)
return stock_history
else:
return stock_history
# Builds the linear regression model
def linearregression(self, ticker):
stockdata = self.history(ticker)
num_train_vals = round(len(stockdata) * .80)
model = LinearRegression()
X_train = stockdata[:num_train_vals]
X_train = X_train.drop('Close', axis=1)
X_test = stockdata[num_train_vals:]
X_test = X_test.drop('Close', axis=1)
y_train = stockdata[:num_train_vals]
y_train = y_train[['Close']]
y_test = stockdata[num_train_vals:]
y_test = y_test[['Close']]
model.fit(X_train, y_train)
trainingpredictions = pd.DataFrame()
trainingpredictions['Predictions'] = model.predict(X_train).flatten()
trainingpredictions.index = X_train.index
testpredictions = pd.DataFrame()
testpredictions['Predictions'] = model.predict(X_test).flatten()
testpredictions.index = X_test.index
return testpredictions
# Graphs the results of the linear regression on a Plotly graph
def graphlrresults(self, ticker):
stockdata = self.history(ticker)
num_train_vals = round(len(stockdata) * .80)
actualdatafortrain = stockdata[:num_train_vals]
actualdatafortest = stockdata[num_train_vals:]
testpredictions = self.linearregression(ticker)
predfig = go.Figure()
# Create and style traces
predfig.add_trace(go.Scatter(x=actualdatafortrain.index, y=actualdatafortrain['Close'], name='Actual Training Set Price',
line=dict(color='#003300', width=2)))
predfig.add_trace(go.Scatter(x=actualdatafortest.index, y=actualdatafortest['Close'], name='Actual Test Set Price',
line=dict(color='#1A0099', width=2)))
predfig.add_trace(go.Scatter(x=testpredictions.index, y=testpredictions['Predictions'], name='Test Predictions',
line=dict(color='#CC0000', width=2)))
predfig.update_layout(title=str(ticker) + '\'s Stock Price Predicted by Linear Regression',
xaxis_title='Date',
yaxis_title='USD')
fig = predfig.show()
return fig
``` |
{
"source": "jmuchovej/paperpile-notion",
"score": 2
} |
#### File: paperpile_notion/utils/notion.py
```python
from typing import List, Dict
from functools import partial
import click
from notion_client import Client
from paperpile_notion import models
def retrieve_dbs(ctx: click.Context) -> None:
notion = ctx.obj["notion"]
config = ctx.obj["config"]
errmsg = partial(click.secho, fg="red", bold=True)
try:
assert len(notion.databases.list()["results"]) > 0
except AssertionError:
errmsg("Please follow the steps to add your Integration to the Database.")
click.echo("Retrieving database(s) ... ", nl=False)
dbs = [
[db["id"], db["title"][0]["plain_text"]]
for db in notion.databases.list()["results"]
]
try:
# fmt: off
authors = next(filter(lambda db: config["db"]["authors" ] in db, dbs))[0]
articles = next(filter(lambda db: config["db"]["articles"] in db, dbs))[0]
ctx.obj["articles-cls"] = models.RelationalArticle
# fmt: on
except KeyError:
authors = None
articles = next(filter(lambda db: config["db"]["articles"] in db, dbs))[0]
ctx.obj["articles-cls"] = models.MultiSelectArticle
except (IndexError, StopIteration):
errmsg("Your Integration must have access to your database(s).")
exit(2)
# fmt: off
ctx.obj["authors" ] = authors
ctx.obj["articles"] = articles
ctx.obj["authors-id"] = authors
ctx.obj["articles-id"] = articles
# fmt: on
click.secho("done.", fg="green", bold=True)
return ctx
def paginate_db(notion: Client, id: str) -> List[Dict]:
"""Paginates the database, since Notion returns <= 100 records per query."""
query = partial(notion.databases.query, id)
db = query()
index = db["results"]
while db["has_more"]:
db = query(start_cursor=db["next_cursor"])
index += db["results"]
index += db["results"]
return index
def db_to_dict(cls, key: str, items: List, uniq: str) -> Dict:
"""Converts a paginated database into a dictionary with keys for Notion's internal
ID and the record itself.
"""
attrs = [
{"notionID": item["id"], key: models.from_props(cls, item["properties"])}
for item in items
]
return {getattr(item[key], uniq).string: item for item in attrs}
``` |
{
"source": "JMU-CIME/CPR-Music-Backend",
"score": 2
} |
#### File: assignments/tests/test_drf_views.py
```python
import pytest
from django.test import RequestFactory
from teleband.assignments.api.views import AssignmentViewSet
from teleband.courses.models import Enrollment
pytestmark = pytest.mark.django_db
class TestAssignmentViewSet:
def test_get_queryset_student(self, enrollment: Enrollment, rf: RequestFactory):
view = AssignmentViewSet()
enrollment.role.name = "Student"
enrollment.role.save()
request = rf.get("/fake-url/")
request.user = enrollment.user
view.request = request
setattr(view, "kwargs", {"course_slug_slug": enrollment.course.slug})
queryset = view.get_queryset()
# actually there is nothing in the queryset, need
# to populate it with some assignments for this student
# and some other students to actually check this
# Make sure every assignment is assigned to me and only me
for assignment in queryset:
assert enrollment.user == assignment.enrollment.user
```
#### File: courses/migrations/0005_data_migration_demo_course.py
```python
from django.db import migrations
from datetime import date
def demo_course(apps, schema_editor):
User = apps.get_model("users", "User")
# how to import this from the other migrations file?
michael = User.objects.filter(email="<EMAIL>")[0]
alden = User.objects.filter(email="<EMAIL>")[0]
dave = User.objects.filter(email="<EMAIL>")[0]
Course = apps.get_model("courses", "course")
sixth_grade = Course.objects.update_or_create(
name="6th Grade Band",
owner=dave,
start_date=date(2022, 1, 9),
end_date=date(2022, 6, 9),
slug="6th-grade-band",
)[0]
Instrument = apps.get_model("instruments", "Instrument")
trombone = Instrument.objects.filter(name="Trombone")[0]
Role = apps.get_model("users", "Role")
teacher = Role.objects.filter(name="Teacher")[0]
student = Role.objects.filter(name="Student")[0]
Enrollment = apps.get_model("courses", "enrollment")
Enrollment.objects.update_or_create(user=dave, course=sixth_grade, role=teacher)
Enrollment.objects.update_or_create(
user=michael, course=sixth_grade, instrument=trombone, role=student
)
Enrollment.objects.update_or_create(
user=alden, course=sixth_grade, instrument=trombone, role=student
)
class Migration(migrations.Migration):
dependencies = [
("courses", "0004_alter_enrollment_unique_together"),
("users", "0004_data_migration_demo_users"),
]
operations = [migrations.RunPython(demo_course, migrations.RunPython.noop)]
```
#### File: instruments/migrations/0002_data_migration_seed_transpositions.py
```python
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Transposition = apps.get_model("instruments", "Transposition")
Transposition.objects.update_or_create(
name="Alto Clef",
)
Transposition.objects.update_or_create(
name="Bb",
)
Transposition.objects.update_or_create(
name="Concert Pitch BC",
)
Transposition.objects.update_or_create(
name="Concert Pitch BC 8vb",
)
Transposition.objects.update_or_create(
name="Concert Pitch TC",
)
Transposition.objects.update_or_create(
name="Concert Pitch TC 8va",
)
Transposition.objects.update_or_create(
name="Eb",
)
Transposition.objects.update_or_create(
name="F",
)
class Migration(migrations.Migration):
dependencies = [
("instruments", "0001_initial"),
]
operations = [migrations.RunPython(update_site_forward, migrations.RunPython.noop)]
```
#### File: instruments/migrations/0003_data_migration_seed_instruments.py
```python
from django.db import migrations
def initial_instruments(apps, schema_editor):
Transposition = apps.get_model("instruments", "Transposition")
transpositions = {elem.name: elem for elem in Transposition.objects.all()}
Instrument = apps.get_model("instruments", "Instrument")
Instrument.objects.update_or_create(
name="Viola",
transposition=transpositions["Alto Clef"],
)
Instrument.objects.update_or_create(
name="Baritone TC",
transposition=transpositions["Bb"],
)
Instrument.objects.update_or_create(
name="Bass Clarinet",
transposition=transpositions["Bb"],
)
Instrument.objects.update_or_create(
name="Bb Clarinet",
transposition=transpositions["Bb"],
)
Instrument.objects.update_or_create(
name="Bb Cornet",
transposition=transpositions["Bb"],
)
Instrument.objects.update_or_create(
name="Bb Soprano Saxophone",
transposition=transpositions["Bb"],
)
Instrument.objects.update_or_create(
name="Bb Tenor Saxophone",
transposition=transpositions["Bb"],
)
Instrument.objects.update_or_create(
name="Bb Trumpet",
transposition=transpositions["Bb"],
)
Instrument.objects.update_or_create(
name="Bassoon",
transposition=transpositions["Concert Pitch BC"],
)
Instrument.objects.update_or_create(
name="Cello",
transposition=transpositions["Concert Pitch BC"],
)
Instrument.objects.update_or_create(
name="Euphonium BC",
transposition=transpositions["Concert Pitch BC"],
)
Instrument.objects.update_or_create(
name="Trombone",
transposition=transpositions["Concert Pitch BC"],
)
Instrument.objects.update_or_create(
name="Bass",
transposition=transpositions["Concert Pitch BC 8vb"],
)
Instrument.objects.update_or_create(
name="Bass Trombone",
transposition=transpositions["Concert Pitch BC 8vb"],
)
Instrument.objects.update_or_create(
name="Contrabassoon",
transposition=transpositions["Concert Pitch BC 8vb"],
)
Instrument.objects.update_or_create(
name="String Bass",
transposition=transpositions["Concert Pitch BC 8vb"],
)
Instrument.objects.update_or_create(
name="Tuba",
transposition=transpositions["Concert Pitch BC 8vb"],
)
Instrument.objects.update_or_create(
name="Oboe",
transposition=transpositions["Concert Pitch TC"],
)
Instrument.objects.update_or_create(
name="Percussion",
transposition=transpositions["Concert Pitch TC"],
)
Instrument.objects.update_or_create(
name="Piano",
transposition=transpositions["Concert Pitch TC"],
)
Instrument.objects.update_or_create(
name="Flute",
transposition=transpositions["Concert Pitch TC 8va"],
)
Instrument.objects.update_or_create(
name="Piccolo",
transposition=transpositions["Concert Pitch TC 8va"],
)
Instrument.objects.update_or_create(
name="Violin",
transposition=transpositions["Concert Pitch TC 8va"],
)
Instrument.objects.update_or_create(
name="Contrabass Clarinet",
transposition=transpositions["Eb"],
)
Instrument.objects.update_or_create(
name="Eb Alto Clarinet",
transposition=transpositions["Eb"],
)
Instrument.objects.update_or_create(
name="Eb Alto Horn",
transposition=transpositions["Eb"],
)
Instrument.objects.update_or_create(
name="Eb Alto Saxophone",
transposition=transpositions["Eb"],
)
Instrument.objects.update_or_create(
name="Eb Baritone Saxophone",
transposition=transpositions["Eb"],
)
Instrument.objects.update_or_create(
name="Eb Soprano Clarinet",
transposition=transpositions["Eb"],
)
Instrument.objects.update_or_create(
name="F Horn",
transposition=transpositions["F"],
)
class Migration(migrations.Migration):
dependencies = [
("instruments", "0002_data_migration_seed_transpositions"),
]
operations = [migrations.RunPython(initial_instruments, migrations.RunPython.noop)]
```
#### File: musics/migrations/0002_seed_ensemble_type.py
```python
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
EnsembleType = apps.get_model("musics", "EnsembleType")
for name in ["Band", "Orchestra"]:
EnsembleType.objects.update_or_create(name=name)
class Migration(migrations.Migration):
dependencies = [
("musics", "0001_initial"),
]
operations = [migrations.RunPython(update_site_forward, migrations.RunPython.noop)]
```
#### File: musics/migrations/0006_legitimize_part_type.py
```python
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Part = apps.get_model("musics", "Part")
PartType = apps.get_model("musics", "PartType")
pt = PartType.objects.all().first()
for p in Part.objects.all():
p.part_type = pt
p.save()
class Migration(migrations.Migration):
dependencies = [
("musics", "0005_seed_part_types"),
]
operations = [migrations.RunPython(update_site_forward, migrations.RunPython.noop)]
```
#### File: musics/migrations/0013_assign_piece_slugs.py
```python
from django.db import migrations
from teleband.utils.fields import generate_slug_from_name
def update_site_forward(apps, schema_editor):
"""Compute slugs from names."""
Piece = apps.get_model("musics", "Piece")
for piece in Piece.objects.all():
generate_slug_from_name(piece, Piece)
piece.save()
class Migration(migrations.Migration):
dependencies = [
('musics', '0012_piece_slug'),
]
operations = [migrations.RunPython(update_site_forward, migrations.RunPython.noop)]
```
#### File: musics/migrations/0016_seed_pieces_later.py
```python
from django.db import migrations
import json
from teleband.musics.api.serializers import *
data = {
"name": "Air for Band",
"ensemble_type": "Band",
"parts": [
{
"name": "Air for Band Melody",
"part_type": "Melody",
"transpositions": [
{"transposition": "Bb"},
{"transposition": "Concert Pitch BC 8vb"},
{"transposition": "Concert Pitch BC"},
{"transposition": "Concert Pitch TC 8va"},
{"transposition": "Concert Pitch TC"},
{"transposition": "Eb"},
{"transposition": "F"},
],
},
{
"name": "Air for Band Bassline",
"part_type": "Bassline",
"transpositions": [
{"transposition": "Bb"},
{"transposition": "Concert Pitch BC 8vb"},
{"transposition": "Concert Pitch BC"},
{"transposition": "Concert Pitch TC 8va"},
{"transposition": "Concert Pitch TC"},
{"transposition": "Eb"},
{"transposition": "F"},
],
},
],
}
flatios = {
"Air for Band Melody": {
"Bb": {
"scoreId": "61e09029dffcd50014571a80",
"sharingKey": "<KEY>",
},
"Concert Pitch BC 8vb": {
"scoreId": "61e09029f7c4ec0013a88255",
"sharingKey": "da2261d04292fb3ec8562a5fd1ac167e70937ee5c531bf51ba01a49e7629e4df47c838c488e5b2e978d72a2885879787c1f2357b46b35abecc6f8308c943c35f",
},
"Concert Pitch BC": {
"scoreId": "61e0902a29718e0012080b97",
"sharingKey": "7399632fbfbc793448182049dead3b88d921d7fab4328b5b9f5c589e654374d1d12a86d2e5567df1723b33e517b392552371129a9302bda49930f3e8a28dd857",
},
"Concert Pitch TC 8va": {
"scoreId": "61e0902a58d51b001256f80c",
"sharingKey": "<KEY>",
},
"Concert Pitch TC": {
"scoreId": "61e0902a32669f0013f6f91d",
"sharingKey": "<KEY>",
},
"Eb": {
"scoreId": "61e0902a74bfb70013c4ea3e",
"sharingKey": "a88d609cd8c2224ccef9c043b03622ee11cd02390343eb9bbf3830b17929602ce138f7e623c99d9ebbbb291d9a415d62519ba5321b870be84c5fef49008f6d6e",
},
"F": {
"scoreId": "61e0902a1ffc3c00126dc83a",
"sharingKey": "7a9c959174769c7998d654218ba1d5c39054cf889f888e5b8eae871f2f4ccdca0d603a42266f45c875274263eb214cd517698bde86960dbdb088d4975bf33764",
},
},
"Air for Band Bassline": {
"Bb": {
"scoreId": "61e0902832669f0013f6f8cf",
"sharingKey": "176abdfc4283560ad234533e5a392e0ced4ea08f9f799a165dd950d66debf48daa293211ee11a56e98f1e62a6b6b12dd2d5d50085a866ea3910f9b721bf0cb11",
},
"Concert Pitch BC 8vb": {
"scoreId": "61e0902874bfb70013c4e9f0",
"sharingKey": "<KEY>",
},
"Concert Pitch BC": {
"scoreId": "61e090281ffc3c00126dc7d6",
"sharingKey": "b6a819a7636cfc5e7842417ce68d598725ca98b184b7997321ae15dd3b2a14f82622d249175d9fd308f00636cf3c7f28ad0a5dde6a309ca7ab7c37f01170404b",
},
"Concert Pitch TC 8va": {
"scoreId": "61e09028c9707c0013966a6d",
"sharingKey": "1bf46c7b7cf16bf5d06b9dbb9d822c54299ed092bc3f3a0d3209d0f776b23fe1dbb49a49d7d7b97f9fad460c2c0ea42cd1c572eced32e734c28f26f8f064d989",
},
"Concert Pitch TC": {
"scoreId": "61e090281cad9a0012110fa6",
"sharingKey": "<KEY>",
},
"Eb": {
"scoreId": "61e090290f63e200125c8da7",
"sharingKey": "6609d33e30426aeaf95279321291501cc8296da784b6c96fc2bdd6fdad968ed94faa3508e1b3bb609a537d9be3246a63b8d6c38273814a228728f6765de7208f",
},
"F": {
"scoreId": "61e090290bd92f0012185daa",
"sharingKey": "224061dc8f008b68e3f1ffb1d9de357a88f655557fd26b735f6f67761521195c51368775ab30b9e82d1f03f08d23611ff84796d198e4ba5b09dc1d458202de26",
},
},
}
def update_site_forward(apps, schema_editor):
Piece = apps.get_model("musics", "Piece")
if Piece.objects.filter(name="Air for Band").exists():
return
for part in data["parts"]:
for t in part["transpositions"]:
t["flatio"] = json.dumps(flatios[part["name"]][t["transposition"]])
serializer = PieceCreateSerializer(data=data)
serializer.is_valid()
serializer.create(serializer.validated_data)
class Migration(migrations.Migration):
dependencies = [
('musics', '0015_auto_20220206_2027'),
]
operations = [migrations.RunPython(update_site_forward, migrations.RunPython.noop)]
```
#### File: submissions/api/serializers.py
```python
from rest_framework import serializers
from teleband.submissions.models import Grade, Submission, SubmissionAttachment
from teleband.assignments.api.serializers import AssignmentSerializer
class SubmissionSerializer(serializers.ModelSerializer):
class Meta:
model = Submission
fields = ["id", "submitted", "content"]
# extra_kwargs = {
# "assignment": {"view_name": "api:assignment-detail", "lookup_field": "id"},
# }
class AttachmentSerializer(serializers.ModelSerializer):
class Meta:
model = SubmissionAttachment
fields = ["id", "file", "submitted"]
class GradeSerializer(serializers.ModelSerializer):
class Meta:
model = Grade
fields = ["id", "rhythm", "tone", "expression", "created_at", "grader", "submission"]
class TeacherSubmissionSerializer(serializers.ModelSerializer):
attachments = AttachmentSerializer(read_only=True, many=True)
assignment = AssignmentSerializer()
grades = GradeSerializer(many=True)
def get_attachments(self, queryset):
print(queryset)
return None
class Meta:
model = Submission
fields = ["id", "assignment", "submitted", "content", "attachments", "grades"]
# extra_kwargs = {
# "assignment": {"view_name": "api:assignment-detail", "lookup_field": "id"},
# }
```
#### File: users/api/views.py
```python
import collections
import csv
from io import StringIO
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from rest_framework import permissions
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, UpdateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from invitations.utils import get_invitation_model
from invitations.exceptions import AlreadyAccepted, AlreadyInvited, UserRegisteredEmail
from invitations.forms import CleanEmailMixin
from .serializers import UserSerializer, UserInstrumentSerializer
from teleband.courses.models import Enrollment, Course
User = get_user_model()
Invitation = get_invitation_model()
class IsRelevantTeacherUpdate(permissions.IsAuthenticated):
def has_object_permission(self, request, view, obj):
if view.action not in ["update", "partial_update"]:
return True
# only permissible if request.user is a teacher of obj in any existing class
return Enrollment.objects.filter(
user=obj,
course__in=Course.objects.filter(
enrollment__user=request.user, enrollment__role__name="Teacher"
),
role__name="Student",
).exists()
class UserViewSet(RetrieveModelMixin, ListModelMixin, UpdateModelMixin, GenericViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
lookup_field = "username"
permission_classes = [IsRelevantTeacherUpdate & permissions.IsAuthenticated]
def get_queryset(self, *args, **kwargs):
if self.action in ["update", "partial_update"]:
return self.queryset.filter(
enrollment__course__in=[
e.course
for e in Enrollment.objects.filter(
user__username="admin", role__name="Teacher"
)
]
)
assert isinstance(self.request.user.id, int)
return self.queryset.filter(id=self.request.user.id)
def get_serializer_class(self):
if self.action in ["update", "partial_update"]:
return UserInstrumentSerializer
return self.serializer_class
@action(detail=False)
def me(self, request):
serializer = UserSerializer(request.user, context={"request": request})
return Response(status=status.HTTP_200_OK, data=serializer.data)
@action(
detail=False, methods=["post"], permission_classes=[permissions.IsAdminUser]
)
def bulk_create_teachers(self, request):
users_file = request.FILES["file"]
contents = "".join([line.decode("utf-8") for line in users_file.readlines()])
reader = csv.reader(StringIO(contents))
teacher_group = Group.objects.get(name="Teacher")
response = collections.defaultdict(list)
for row in reader:
# based on https://github.com/bee-keeper/django-invitations/blob/9069002f1a0572ae37ffec21ea72f66345a8276f/invitations/views.py#L63
invitee = row[0]
try:
validate_email(invitee)
CleanEmailMixin().validate_invitation(invitee)
invite = Invitation.create(invitee, group=teacher_group)
except (ValidationError):
response["invalid"].append({invitee: "invalid email"})
except (AlreadyAccepted):
response["invalid"].append({invitee: "already accepted"})
except (AlreadyInvited):
response["invalid"].append({invitee: "pending invite"})
except (UserRegisteredEmail):
response["invalid"].append({invitee: "user registered email"})
else:
invite.send_invitation(request)
response["valid"].append({invitee: "invited"})
return Response(status=status.HTTP_200_OK, data=response)
class IsAuthForDelete(permissions.IsAuthenticated):
def has_permission(self, request, view):
if request.method == "DELETE":
return super().has_permission(request, view)
return True
class ObtainDeleteAuthToken(ObtainAuthToken):
permission_classes = [IsAuthForDelete]
def delete(self, request, *args, **kwargs):
try:
Token.objects.get(user=request.user).delete()
return Response(status=status.HTTP_200_OK)
except Token.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
obtain_delete_auth_token = ObtainDeleteAuthToken.as_view()
```
#### File: teleband/users/models.py
```python
from django.contrib.auth.models import AbstractUser, Group
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from invitations.models import Invitation
import reversion
from teleband.instruments.models import Instrument
@reversion.register()
class User(AbstractUser):
"""Default user for TeleBand."""
#: First and last name do not cover name patterns around the globe
name = models.CharField(_("Name of User"), blank=True, max_length=255)
grade = models.CharField(blank=True, max_length=255)
first_name = None # type: ignore
last_name = None # type: ignore
instrument = models.ForeignKey(Instrument, null=True, on_delete=models.DO_NOTHING)
external_id = models.CharField(
_("External ID"),
blank=True,
max_length=255,
help_text=_(
"Any string that is meaningful to the user's educational institution."
),
)
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
class Role(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class GroupInvitation(Invitation):
group = models.ForeignKey(Group, on_delete=models.DO_NOTHING)
```
#### File: teleband/utils/fields.py
```python
import itertools
from django.utils.text import slugify
def generate_slug_from_name(instance, model_cls):
# based on https://simpleit.rocks/python/django/generating-slugs-automatically-in-django-easy-solid-approaches/
max_length = instance._meta.get_field("slug").max_length
value = instance.name
slug_candidate = slug_original = slugify(value, allow_unicode=True)
for i in itertools.count(1):
if not model_cls.objects.filter(slug=slug_candidate).exists():
break
slug_candidate = "{}-{}".format(slug_original, i)
instance.slug = slug_candidate
```
#### File: teleband/utils/serializers.py
```python
from rest_framework import serializers
class GenericNameSerializer(serializers.Serializer):
name = serializers.CharField(max_length=255)
def __init__(self, *args, **kwargs):
self.model_cls = kwargs.pop("model_cls", None)
super().__init__(*args, **kwargs)
def to_representation(self, instance):
return instance.name
def to_internal_value(self, data):
return self.model_cls.objects.get(name=data)
``` |
{
"source": "JMU-CS354/move_base_354",
"score": 3
} |
#### File: move_base_354/scripts/move_base.py
```python
from collections import deque
import rospy
from nav_msgs.srv import GetPlan
from nav_msgs.msg import Path
from move_base_msgs.msg import MoveBaseAction, MoveBaseFeedback
from geometry_msgs.msg import PoseStamped
import tf2_ros
import actionlib
import util
class MoveBase(object):
"""
Class handling navigation logic for a simple-action-server-based
navigation system.
"""
def __init__(self):
"""
Set up the node and read the goal parameters.
"""
rospy.init_node('move_base')
self.xy_goal_tolerance = rospy.get_param('~xy_goal_tolerance', .15)
self.stuck_delay = rospy.get_param('~stuck_delay', 10.0)
self.stuck_threshold = .2
self.action_s = actionlib.SimpleActionServer('move_base',
MoveBaseAction,
execute_cb=self.execute_cb,
auto_start=False)
self.tf_buffer = tf2_ros.Buffer()
tf2_ros.TransformListener(self.tf_buffer)
rospy.sleep(1.0) # accumulate some transforms
rospy.Subscriber('move_base_simple/goal', PoseStamped,
self.goal_callback)
self.path_pub = rospy.Publisher('/path', Path, latch=True,
queue_size=10)
rospy.loginfo("Waiting for create_global_plan service...")
rospy.wait_for_service('create_global_plan')
rospy.loginfo("OK")
self.action_s.start()
rospy.spin()
def _check_stuck(self, poses, pose):
"""Return true if we haven't moved for a while.
"""
max_delay = rospy.Duration.from_sec(self.stuck_delay)
poses.append(pose)
if (poses[-1].header.stamp - poses[0].header.stamp) > max_delay:
dist = util.distance(poses[-1], poses[0])
if dist < self.stuck_threshold:
return True
poses.popleft()
return False
def execute_cb(self, goal):
"""This is the callback that will occur when a new goal arrives
through the simple action client. goal will have a
.target_pose field
"""
# Store the goal pose in the global frame so we can
# monitor progress.
goal_pose_world = util.pose_transform(self.tf_buffer,
goal.target_pose, 'map')
path = self.request_plan(goal.target_pose)
if path is None:
msg = "Global planner couldn't find plan. Giving up."
self.action_s.set_aborted(text=msg)
return
# After this happens the local planner should be working.
self.path_pub.publish(path.plan)
at_goal = False
stuck = False
rate = rospy.Rate(10)
poses = deque() # store recent poses to see if we are stuck.
while not at_goal and not stuck and not rospy.is_shutdown():
if self.action_s.is_preempt_requested():
rospy.loginfo('Navigation Preempted')
self.path_pub.publish(Path())
self.action_s.set_preempted()
break
robot_pose = self.get_current_pose()
distance = util.distance(robot_pose, goal_pose_world)
if distance < self.xy_goal_tolerance:
at_goal = True
rospy.loginfo("Goal reached.")
self.action_s.set_succeeded()
elif self._check_stuck(poses, robot_pose):
stuck = True
msg = "Not making progress. Giving up."
self.path_pub.publish(Path())
self.action_s.set_aborted(text=msg)
else:
feedback = MoveBaseFeedback()
feedback.base_position = robot_pose
self.action_s.publish_feedback(feedback)
rate.sleep()
def goal_callback(self, goal_pose):
""" This callback handles non-action-client goal requests."""
path = self.request_plan(goal_pose)
if path is None:
rospy.loginfo("Global planner couldn't find plan.")
else:
self.path_pub.publish(path.plan)
def get_current_pose(self):
""" Return the current robot pose in the world frame. """
pose_base = util.create_pose_stamped(0, 0, frame_id='base_link')
return util.pose_transform(self.tf_buffer, pose_base, 'map')
def request_plan(self, goal_pose):
"""
Create a service proxy and use it to request a plan from the
global planner. Returns the plan, or None if no plan
is available.
"""
rospy.wait_for_service('create_global_plan')
try:
get_plan = rospy.ServiceProxy('create_global_plan',
GetPlan)
robot_pose = self.get_current_pose()
rospy.loginfo("Requesting plan...")
resp = get_plan(robot_pose, goal_pose, self.xy_goal_tolerance)
rospy.loginfo("Plan received.")
if len(resp.plan.poses) == 0:
resp = None
except rospy.ServiceException:
resp = None
return resp
if __name__ == "__main__":
MoveBase()
``` |
{
"source": "JMU-CS/jmu_python_gradescope_utils",
"score": 3
} |
#### File: jmu_python_gradescope_utils/jmu_gradescope_utils/jmu_test_case.py
```python
import types
import unittest
import tempfile
import subprocess
import os
import shutil
import re
from functools import wraps
from . import utils
import sys
from importlib import import_module
_TEST_ORDER = {}
def _order(f):
global _TEST_ORDER
_TEST_ORDER[f.__name__] = len(_TEST_ORDER)
return f
def test_compare(a, b):
if a in _TEST_ORDER and b in _TEST_ORDER:
return [1, -1][_TEST_ORDER[a] < _TEST_ORDER[b]]
elif a in _TEST_ORDER:
return -1
elif b in _TEST_ORDER:
return 1
else:
return [1, -1][a < b]
def _check_required(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if hasattr(self.__class__, '_FAILED_REQUIRED_TEST'):
self.fail("Failed required test: {}".format(self._FAILED_REQUIRED_TEST))
result = None
else:
result = func(self, *args, **kwargs)
return result
return wrapper
def required():
"""Used to decorate required test method. If a required method is
failed then all of the following methods will fail as well.
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
result = func(self, *args, **kwargs)
except Exception as e:
self.__class__._FAILED_REQUIRED_TEST = func.__doc__
result = None
raise e.__class__(str(e) + "\n This test was required. All of the following tests will fail automatically.")
return result
return wrapper
return decorator
# https://stackoverflow.com/questions/8245135/python-decorate-all-methods-of-subclass-and-provide-means-to-override
class OrderAllTestsMeta(type):
"""Decorate every method so that they will be ordered during testing,
and they will respect the 'required' decorator.
"""
def __new__(cls, name, bases, local):
for attr in local:
value = local[attr]
if isinstance(value, types.FunctionType):
local[attr] = _check_required(_order(value))
return type.__new__(cls, name, bases, local)
class _JmuTestCase(unittest.TestCase):
""" Additional useful assertions for grading. """
# counts the number of dynamic modules created
module_count = 0
def assertScriptOutputEqual(self, filename, string_in, expected,
variables=None, args="", msg=None,
processor=None):
tmpdir = None
try:
tmpdir, new_file_name = utils.replace_variables(filename,
variables)
# Make a clean backup of the original submission:
shutil.copy(utils.full_submission_path(filename),
os.path.join(tmpdir, "__tmp_backup.py"))
# Replace the original submission in source:
shutil.copy(new_file_name, utils.full_source_path(filename))
command = ['python3', utils.full_source_path(filename)]
command.extend(args.split())
proc = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
actual, stderr = proc.communicate(input=string_in.encode())
actual_text = actual.decode()
if processor:
actual_text = processor(actual_text)
stderr_text = stderr.decode()
if len(stderr) > 0:
stderr_text = stderr_text.replace(utils.full_source_path() + "/" , '')
err_msg = "Error during script execution:\n{}".format(stderr_text)
out_msg = "\nOutput before failure:\n{}".format(actual_text)
self.fail(err_msg + out_msg)
show_in = string_in.encode('unicode_escape').decode()
message = "Input was: '{}'".format(show_in)
if len(args) > 0:
message += "\nCommand line arguments: {}".format(args)
if msg is not None:
message += "\n" + msg
self.assertEqual(actual_text, expected, message)
finally:
if tmpdir is not None:
# Restore the original submission in source:
shutil.copy(os.path.join(tmpdir, "__tmp_backup.py"),
utils.full_source_path(filename))
shutil.rmtree(tmpdir)
def assertNoLoops(self, filename, msg=None):
loop_regex = "(^|(\r\n?|\n))\s*(for|while).*:\s*(#.*)*($|(\r\n?|\n))"
count = utils.count_regex_matches(loop_regex, filename)
message = f"It looks like the file {filename} contains at least one loop."
if msg is not None:
message += f"\n{msg}"
if count > 0:
self.fail(message)
def assertNoForLoops(self, filename, msg=None):
loop_regex = "(^|(\r\n?|\n))\s*(for).*:\s*(#.*)*($|(\r\n?|\n))"
count = utils.count_regex_matches(loop_regex, filename)
message = f"It looks like the file {filename} contains at least one for loop."
if msg is not None:
message += f"\n{msg}"
if count > 0:
self.fail(message)
def assertNoWhileLoops(self, filename, msg=None):
loop_regex = "(^|(\r\n?|\n))\s*(while).*:\s*(#.*)*($|(\r\n?|\n))"
count = utils.count_regex_matches(loop_regex, filename)
message = f"It looks like the file {filename} contains at least one while loop."
if msg is not None:
message += f"\n{msg}"
if count > 0:
self.fail(message)
def assertNoConditionals(self, filename, msg=None):
if_regex = "(^|(\r\n?|\n))\s*if.*:\s*(#.*)*($|(\r\n?|\n))"
main_regex = "(^|(\r\n?|\n))\s*if\s*__name__.*:\s*(#.*)*($|(\r\n?|\n))"
count = utils.count_regex_matches(if_regex, filename)
main_count = utils.count_regex_matches(main_regex, filename)
message = f"It looks like the file {filename} contains at least one if statement."
if msg is not None:
message += f"\n{msg}"
if count > main_count:
self.fail(message)
def assertPassesPep8(self, filename):
output = utils.run_flake8(filename)
if len(output) != 0:
self.fail("Submission does not pass pep8 checks:\n" + output)
print('Submission passes all formatting checks!')
def assertDocstringsCorrect(self, filename):
output = utils.run_flake8_docstring(filename)
if len(output) != 0:
self.fail("Submission does not pass docstring checks:\n" + output)
print('Submission passes all docstring checks!')
def assertRequiredFilesPresent(self, required_files):
missing_files = utils.check_submitted_files(required_files)
for path in missing_files:
print('Missing {0}'.format(path))
self.assertEqual(len(missing_files), 0, 'Missing some required files!')
print('All required files submitted!')
def assertOutputCorrect(self, filename, string_in, expected,
variables=None, processor=None):
self.assertScriptOutputEqual(filename, string_in, expected,
variables=variables, processor=processor)
print('Correct output:\n' + expected)
def run_with_substitution(self, filename, variables, func):
"""substitute variable values, then load a module and execute the given function `func`"""
_JmuTestCase.module_count = _JmuTestCase.module_count + 1
short_filename = filename
if filename[-3:] == '.py':
short_filename = filename[0:-3]
new_module_name = short_filename + "_" + str(_JmuTestCase.module_count)
(tmpdir, new_file_name) = utils.replace_variables(filename, variables=variables, new_name=new_module_name + ".py")
# insert the new temporary directory into the system module load path
sys.path.insert(1, tmpdir)
# load the module
dynamic_module = import_module(new_module_name)
func(dynamic_module)
def assertMatchCount(self, filename, regex, num_matches, msg=None):
count = utils.count_regex_matches(regex, filename)
self.assertEqual(num_matches, count, msg=msg)
class JmuTestCase(_JmuTestCase, metaclass=OrderAllTestsMeta):
"""Test methods declared within subclasses will be executed in the
order they are declared as long as the sortTestMethodUsing attrute
of the defaultTestLoader has been set::
unittest.defaultTestLoader.sortTestMethodsUsing = test_compare
They will also respect the @required decorator.
"""
pass
``` |
{
"source": "JMU-CS/Upload-Canvas-Grades",
"score": 3
} |
#### File: JMU-CS/Upload-Canvas-Grades/generate_form_msp.py
```python
import argparse
import generate_form
import re
def autolab_assignment_name(filename):
"""Extract the autolab assessment name for an autolab csv grade
file name.
"""
m = re.search("(.*)_(.*)_[0-9]{12}.csv", filename)
if m:
return m.group(2)
return None
class AutoLabForm(generate_form.CanvasForm):
def __init__(self, canvas_csv, assignment_id, autolab_csvs):
super(AutoLabForm, self).__init__(canvas_csv, assignment_id)
self.student_dicts = [] # one dictionary per exercise/csv file
self.table_headers = []
self.ex_names = []
for autolab_csv in autolab_csvs:
self.ex_names.append(autolab_assignment_name(autolab_csv))
autolab_table = generate_form.csv_to_numpy(autolab_csv)
cur_entries = {}
for i in range(1, autolab_table.shape[0], 1):
eid = autolab_table[i, 0].split('@')[0]
cur_entries[eid] = list(autolab_table[i, :])
self.student_dicts.append(cur_entries)
cur_headers = {}
for col in range(autolab_table.shape[1]):
cur_headers[autolab_table[0, col].strip(':')] = col
self.table_headers.append(cur_headers)
def generate_comments(self, eid):
comments = ""
tool_score = 0
try:
table_data = zip(self.student_dicts, self.ex_names, self.table_headers)
for student_data, ex_name, table_headers in table_data:
cur_score = student_data[eid][table_headers['Total']]
comments += ex_name + ": "
if cur_score == "not_submitted" or \
cur_score == "not_yet_submitted":
comments += "No Autolab submission.\n"
cur_score = "0"
else:
comments += cur_score + "\n"
tool_score += float(cur_score)
except KeyError as e:
# This may happen if a student is in Canvas, but not autolab
pass
return comments, tool_score
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("canvas_csv", metavar="CANVAS_CSV")
parser.add_argument("canvas_assignment_id", metavar="CANVAS_ASSIGNMENT_ID")
parser.add_argument("autolab_csv", metavar="AUTOLAB_CSV", nargs='*')
args = parser.parse_args()
wf = AutoLabForm(args.canvas_csv, args.canvas_assignment_id,
args.autolab_csv)
wf.generate_form()
``` |
{
"source": "jmuddappa/DeepClassificationBot",
"score": 3
} |
#### File: DeepClassificationBot/deepanimebot/bot.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import functools
import logging
import os
import random
import time
import tweepy
import deploy
import gceutil
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
from deepanimebot import messages
INPUT_SHAPE = 128 # change it to your input image size
TWEET_MAX_LENGTH = 140
logging.basicConfig()
logger = logging.getLogger('bot')
logger.setLevel(logging.INFO)
def wait_like_a_human(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
start = time.time()
rv = f(*args, **kwargs)
if not rv:
return
api, action, args, kwargs = rv
end = start + random.randint(1, 5)
sleep = end - time.time()
if sleep > 0:
time.sleep(sleep)
return getattr(api, action)(*args, **kwargs)
return wrapper
class ReplyToTweet(tweepy.StreamListener):
def __init__(self, screen_name, classifier, api=None, silent=False):
super(ReplyToTweet, self).__init__(api)
self.screen_name = screen_name
self.classifier = classifier
self.silent = silent
@wait_like_a_human
def on_direct_message(self, data):
status = data.direct_message
sender_name = status['sender']['screen_name']
if sender_name == self.screen_name:
return
logger.debug(u"{0} incoming dm {1}".format(status['id'], status['text']))
reply = self.get_reply(status['id'], status['entities'], TWEET_MAX_LENGTH - len('d {} '.format(sender_name)), messages.DMMessages)
if self.silent:
return
return self.api, 'send_direct_message', tuple(), dict(user_id=status['sender']['id'], text=reply)
@wait_like_a_human
def on_status(self, status):
sender_name = status.author.screen_name
if sender_name == self.screen_name:
return
logger.debug(u"{0} incoming status {1}".format(status.id, status.text))
if retweets_me(status, self.screen_name):
logger.debug("{0} is a retweet".format(status.id))
return
if not status_mentions(status, self.screen_name):
logger.debug("{0} doesn't mention {1}".format(status.id, self.screen_name))
return
prefix = '@{0} '.format(sender_name)
reply = self.get_reply(status.id, status.entities, TWEET_MAX_LENGTH - len(prefix), messages.StatusMessages)
status_text = prefix + reply
if self.silent:
return
return self.api, 'update_status', (status_text,), dict(in_reply_to_status_id=status.id)
def get_reply(self, status_id, entities, max_length, messages):
maybe_image_url = url_from_entities(entities)
if not maybe_image_url:
logger.debug("{0} doesn't have a URL".format(status_id))
return messages.give_me_an_image()
try:
y = self.classifier.classify(url=maybe_image_url)
except exc.TimeoutError:
logger.debug("{0} timed out while classifying {1}".format(status_id, maybe_image_url))
return messages.took_too_long()
except exc.NotImage:
logger.debug("{0} no image found at {1}".format(status_id, maybe_image_url))
return messages.not_an_image()
except exc.RemoteError as e:
logger.debug("{0} remote error {1}".format(status_id, e))
return e.message
except Exception as e:
logger.error("{0} error while classifying {1}: {2}".format(status_id, maybe_image_url, e))
return messages.something_went_wrong()
reply = messages.my_guess(y, max_length)
logger.debug("{0} reply: {1}".format(status_id, reply))
return reply
def on_error(self, status):
if status == 420:
# we are rate-limited.
# returning False disconnects the stream
return False
def retweets_me(status, screen_name):
retweeted_status = getattr(status, 'retweeted_status', None)
if retweeted_status is None:
return False
return retweeted_status.author.screen_name == screen_name
def status_mentions(status, screen_name):
for mention in status.entities.get('user_mentions', []):
if mention['screen_name'] == screen_name:
return True
return False
def url_from_entities(entities):
for media in entities.get('media', []):
if media['type'] == 'photo':
return media['media_url']
for url in entities.get('urls', []):
return url['expanded_url']
def main(args):
if args.debug:
logger.setLevel(logging.DEBUG)
auth = tweepy.OAuthHandler(args.consumer_key, args.consumer_secret)
auth.set_access_token(args.access_token, args.access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
screen_name = api.me().screen_name
if args.classifier == 'mock':
classifier = classifiers.MockClassifier()
elif args.classifier == 'local':
classifier = classifiers.URLClassifier(classifiers.ImageClassifier(args.dataset_path, INPUT_SHAPE))
elif args.classifier == 'remote':
classifier = classifiers.RemoteClassifier(args.remote_endpoint)
stream = tweepy.Stream(auth=auth, listener=ReplyToTweet(screen_name, classifier, api, args.silent))
logger.info('Listening as {}'.format(screen_name))
stream.userstream(track=[screen_name])
if __name__ == '__main__':
import configargparse
parser = configargparse.getArgumentParser()
parser.add('-c', '--config', required=False, is_config_file=True, help='Config file path. See bot.ini.example')
parser.add('--consumer-key', required=True, env_var='CONSUMER_KEY', help='Twitter app consumer key')
parser.add('--consumer-secret', required=True, env_var='CONSUMER_SECRET', help='Twitter app consumer secret')
parser.add('--access-token', required=True, env_var='ACCESS_TOKEN', help='Twitter access token')
parser.add('--access-token-secret', required=True, env_var='ACCESS_TOKEN_SECRET', help='Twitter access token secret')
parser.add('--classifier', choices=['mock', 'local', 'remote'], default='mock', help='Which classifier to use')
parser.add('--dataset-path', default='data/data.hdf5', help='Path to dataset when using a local calssifier')
parser.add('--remote-endpoint', default=None, help='API endpoint to call when using a remote classifier')
parser.add('--silent', action='store_true', default=False, help='Run bot without actually replying')
parser.add('--debug', action='store_true', default=False, help='Set log level to debug')
try:
args = parser.parse_args()
except SystemExit as e:
if gceutil.detect_gce_environment(logger):
attrname_env_varnames = {action.dest.replace('_', '-'): action.env_var
for action in parser._actions if action.env_var}
metadata = gceutil.get_metadata(attrname_env_varnames.keys())
environ = dict(os.environ)
environ.update({attrname_env_varnames[attr]: value for attr, value in metadata.items()})
args = parser.parse_args(env_vars=environ)
else:
raise
main(args)
```
#### File: DeepClassificationBot/deepanimebot/messages.py
```python
from __future__ import absolute_import
import math
import deploy
from deepanimebot.shortcuts import at_random
class Messages(object):
'''Each method is expected to return a message of length under TWEET_MAX_LENGTH.
'''
@staticmethod
def took_too_long():
return at_random(
"It took too long to get the image. Try again?",
)
@staticmethod
def something_went_wrong():
return at_random(
"Something went wrong. Try again later?",
)
@staticmethod
def not_an_image():
return at_random(
"That doesn't look like an image",
"Are you sure it's an image?",
)
@staticmethod
def unknown_image():
return at_random(
'I have no clue!',
'Unknown',
)
@classmethod
def my_guess(cls, y, top_n=3, max_length=None, preface="Probable Anime:"):
if not len(y):
return cls.unknown_image()
pred_lines = []
max_category_length = 0
max_category_length_index = 0
for i, pred in enumerate(y[:top_n]):
pred_lines.append(deploy.Prediction(
"{}.".format(pred.rank),
pred.category,
"{:.2%}".format(pred.probability),
))
if max_category_length < len(pred.category):
max_category_length_index = i
max_category_length = len(pred.category)
newline_count = len(pred_lines)
pred_length = sum(sum(map(len, pred)) + len(pred) - 1 for pred in pred_lines)
current_length = len(preface) + newline_count + pred_length
# truncate category name(s) if needed
if max_length is not None and current_length > max_length:
lengthy_pred = pred_lines[max_category_length_index]
excess_length = current_length - max_length
# don't penalize the longest category if it's going to be truncated too much
if len(lengthy_pred.category) * 0.5 < excess_length:
subtract_from_everyone_length = int(math.ceil(excess_length / len(pred_lines)))
pred_lines = [
deploy.Prediction(
pred.rank, pred.category[:-subtract_from_everyone_length], pred.probability)
for pred in pred_lines]
else:
shortened_pred = deploy.Prediction(
lengthy_pred.rank, lengthy_pred.category[:-excess_length], lengthy_pred.probability)
pred_lines[max_category_length_index] = shortened_pred
reply = "{}\n{}".format(preface, "\n".join(" ".join(pred) for pred in pred_lines))
return reply[:max_length] if max_length is not None else reply
class StatusMessages(Messages):
@staticmethod
def give_me_an_image():
return at_random(
'Give me a direct image URL or attach it to your tweet',
"I don't see an image. Tweet a direct image URL or attach it please",
)
class DMMessages(Messages):
@staticmethod
def give_me_an_image():
return at_random(
'Give me a direct image URL',
"I don't see an image. Message me a direct image URL please",
)
```
#### File: DeepClassificationBot/examples/anime_names.py
```python
from __future__ import absolute_import
import sys
import codecs
import re
from backports import csv
import xml.etree.ElementTree as ElementTree
import requests
ANN_REPORTS_URL = 'http://www.animenewsnetwork.com/encyclopedia/reports.xml'
ANN_DETAILS_URL = 'http://cdn.animenewsnetwork.com/encyclopedia/api.xml'
ANN_ANIME_RATINGS_REPORT_ID = 172
TRAILING_KIND_RE = re.compile(r'\s+\([^)]+\)$')
def get_top_n_shows(n):
"""Returns top n shows from Anime News Network"""
assert n <= 1000
params = {'id': ANN_ANIME_RATINGS_REPORT_ID, 'nlist': n, 'nskip': 0}
response = requests.get(ANN_REPORTS_URL, params=params)
root = ElementTree.fromstring(response.content)
return {
'fields': ['id', 'name'],
'items': [{'id': item.get('id'), 'name': _extract_item_name(item)} for item in root.iter('item')]
}
def _extract_item_name(item):
return TRAILING_KIND_RE.sub('', item.find('anime').text)
def list_characters(shows):
ids = [show['id'] for show in shows['items']]
params = {'anime': ids}
response = requests.get(ANN_DETAILS_URL, params=params)
root = ElementTree.fromstring(response.content)
return {
'fields': ['show_id', 'show_name', 'character_name'],
'items': _extract_anime_characters(root),
}
def _extract_anime_characters(root):
for anime in root.iter('anime'):
anime_id = anime.get('id')
anime_name = anime.get('name')
seen_names = set()
for role in anime.findall("cast[@lang='JA']/role"):
name = role.text
if name not in seen_names:
yield {
'anime_id': anime_id,
'anime_name': anime_name,
'name': role.text,
}
seen_names.add(name)
def print_csv(field_items, fileobj=sys.stdout, fields=None):
writer = csv.writer(codecs.getwriter('utf8')(fileobj))
fields = field_items['fields'] if fields is None else fields
writer.writerow(fields)
for row in field_items['items']:
writer.writerow([row[field] for field in fields])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('what', choices=['shows', 'characters'])
parser.add_argument('-n', type=int, default=50)
args = parser.parse_args()
if args.what == 'shows':
print_csv(get_top_n_shows(args.n), fields=['name'])
elif args.what == 'characters':
shows = get_top_n_shows(args.n)
print_csv(list_characters(shows), fields=['anime_name', 'name'])
```
#### File: jmuddappa/DeepClassificationBot/model.py
```python
from __future__ import absolute_import
from __future__ import print_function
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.layers.core import Activation, Dense, Flatten, Dropout
from keras.layers.normalization import BatchNormalization
def get_model(n_outputs=1000, input_size=256):
'''Builds a Deep Convolutional Neural Network of architecture VGG-Net as described in
paper http://arxiv.org/pdf/1409.1556.pdf and adapted with batch_norm and dropout regularization
Returns the model ready for compilation and training or predictions
we have commented out dropout in between the conv layers because it was not needed for our use cases. However if
you find that your models overfit you can choose to uncomment and add them. Back into your architecture.
'''
conv = Sequential()
conv.add(Convolution2D(64, 3, 3, activation='relu', input_shape=(3, input_size, input_size)))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(64, 3, 3, activation='relu'))
conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
conv.add(BatchNormalization())
# conv.add(Dropout(0.5))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(128, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(128, 3, 3, activation='relu'))
conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
conv.add(BatchNormalization())
# conv.add(Dropout(0.5))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(256, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(256, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(256, 3, 3, activation='relu'))
conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
conv.add(BatchNormalization())
# conv.add(Dropout(0.5))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 3, 3, activation='relu'))
conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
conv.add(BatchNormalization())
# conv.add(Dropout(0.5))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 3, 3, activation='relu'))
conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
conv.add(BatchNormalization())
conv.add(Flatten())
conv.add(Dropout(0.5))
conv.add(Dense(4096))
conv.add(BatchNormalization())
conv.add(Dropout(0.5))
conv.add(Dense(4096))
conv.add(BatchNormalization())
conv.add(Dropout(0.5))
conv.add(Dense(n_outputs))
conv.add(Activation('softmax'))
print(conv.summary())
return conv
def get_deep_anime_model(n_outputs=1000, input_size=128):
'''The deep neural network used for deep anime bot'''
conv = Sequential()
conv.add(Convolution2D(64, 3, 3, activation='relu', input_shape=(3, input_size, input_size)))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(64, 3, 3, activation='relu'))
conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
conv.add(BatchNormalization())
# conv.add(Dropout(0.5))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(128, 3, 3, activation='relu'))
# conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(128, 1, 1, activation='relu'))
conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
conv.add(BatchNormalization())
# conv.add(Dropout(0.5))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(256, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(256, 3, 3, activation='relu'))
# conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(256, 1, 1, activation='relu'))
conv.add(MaxPooling2D((2, 2), strides=(2, 2)))
conv.add(BatchNormalization())
# conv.add(Dropout(0.5))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 3, 3, activation='relu'))
conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 3, 3, activation='relu'))
# conv.add(ZeroPadding2D((1, 1)))
conv.add(Convolution2D(512, 1, 1, activation='relu'))
conv.add(AveragePooling2D((8, 8), strides=(2, 2)))
conv.add(BatchNormalization())
# conv.add(Dropout(0.5))
# conv.add(ZeroPadding2D((1, 1)))
# conv.add(Convolution2D(512, 3, 3, activation='relu'))
# conv.add(ZeroPadding2D((1, 1)))
# conv.add(Convolution2D(512, 3, 3, activation='relu'))
# #conv.add(ZeroPadding2D((1, 1)))
# conv.add(Convolution2D(512, 1, 1, activation='relu'))
# conv.add(AveragePooling2D((4, 4)))
# conv.add(BatchNormalization())
conv.add(Flatten())
conv.add(Dropout(0.5))
conv.add(Dense(2048))
conv.add(BatchNormalization())
conv.add(Dropout(0.7))
conv.add(Dense(2048))
conv.add(BatchNormalization())
conv.add(Dropout(0.7))
conv.add(Dense(n_outputs))
conv.add(Activation('softmax'))
print(conv.summary())
return conv
```
#### File: jmuddappa/DeepClassificationBot/tasks.py
```python
from __future__ import absolute_import
from __future__ import print_function
import os
import subprocess
import urllib
from configargparse import ConfigFileParser
from temporary import temp_dir
import click
DEFAULT_INSTANCE_NAME = 'bot-standalone'
DEFAULT_ZONE = 'us-central1-a'
DEFAULT_MACHINE_TYPE = 'f1-micro'
LOGGING_AGENT_INSTALL_SCRIPT = 'https://dl.google.com/cloudagents/install-logging-agent.sh'
@click.group()
def bot():
pass
@bot.command()
@click.argument('version')
def build_and_push_images(version):
images = ['deploy-base', 'bot-remote', 'webapp']
for image_name in images:
version_tag = 'classificationbot/{}:{}'.format(image_name, version)
latest_tag = 'classificationbot/{}:latest'.format(image_name)
args = [
'docker',
'build',
'-t',
version_tag,
'-t',
latest_tag,
'-f',
'dockerfiles/{}/Dockerfile'.format(image_name),
'.']
subprocess.call(args)
for tag in [version_tag, latest_tag]:
args = [
'docker',
'push',
tag
]
subprocess.call(args)
@bot.command()
@click.option('--name', default=DEFAULT_INSTANCE_NAME)
@click.option('--zone', default=DEFAULT_ZONE)
@click.option('--machine-type', default=DEFAULT_MACHINE_TYPE)
@click.option('--address', default=None)
@click.option('--bot-config', default='bot.ini')
@click.option('--stackdriver-logging/--no-stackdriver-logging', default=False, help='Install logging agent and add config to collect logs')
@click.pass_context
def create_standalone_instance(ctx, name, zone, machine_type, address, bot_config, stackdriver_logging):
args = [
'gcloud', 'compute', 'instances', 'create', name,
'--image', 'container-vm',
'--zone', zone,
'--machine-type', machine_type,
]
if address:
args.append('--address')
args.append(address)
with open(bot_config) as f:
bot_config_content = ConfigFileParser().parse(f)
if len(bot_config_content):
secret_args = [
'--metadata',
','.join('='.join(item) for item in bot_config_content.items()),
]
with temp_dir() as d:
# add metadata from file
args.append('--metadata-from-file')
metadata_files = ['google-container-manifest=etc/standalone-bot-containers.yaml']
startup_script_path = os.path.join(d, 'startup-script.sh')
if stackdriver_logging:
urllib.urlretrieve(LOGGING_AGENT_INSTALL_SCRIPT, startup_script_path)
with open(startup_script_path, 'a') as f:
f.write('\nmkdir -p /var/log/bot\n')
metadata_files.append('startup-script={}'.format(startup_script_path))
args.append(','.join(metadata_files))
confirm = "Create the following instance? (+{num} metadata from {config})\n{command}".format(
num=len(bot_config_content),
config=bot_config,
command=' '.join(args))
click.confirm(confirm, abort=True)
subprocess.call(args + secret_args)
if stackdriver_logging:
ctx.invoke(copy_fluentd_conf, name=name, zone=zone)
@bot.command()
@click.option('--name', default=DEFAULT_INSTANCE_NAME)
@click.option('--zone', default=DEFAULT_ZONE)
def copy_fluentd_conf(name, zone):
args = [
'gcloud', 'compute', 'copy-files',
'etc/fluentd.conf', '{}:~/bot.conf'.format(name),
'--zone', zone,
]
print(' '.join(args))
subprocess.call(args)
args = [
'gcloud', 'compute', 'ssh',
name,
'--zone', zone,
'--command', 'sudo mkdir -p /etc/google-fluentd/config.d && sudo mv bot.conf /etc/google-fluentd/config.d/ && sudo service google-fluentd restart || true'
]
print(' '.join(args))
subprocess.call(args)
@bot.command()
@click.option('--name', default=DEFAULT_INSTANCE_NAME)
@click.option('--zone', default=DEFAULT_ZONE)
def delete_standalone_instance(name, zone):
args = [
'gcloud', 'compute', 'instances', 'delete', name,
'--zone', zone,
]
subprocess.call(args)
if __name__ == '__main__':
bot()
```
#### File: DeepClassificationBot/tests/test_deepanimebot_classifiers.py
```python
import os
import time
from multiprocessing import TimeoutError
import json
import h5py
import cv2
import pytest
import requests
import six
import deploy
import data
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
import mocks
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'fixtures', '1920x1080.png')
def test_fetch_cvimage_from_url(monkeypatch):
with open(TEST_IMAGE_PATH, 'rb') as f:
image = f.read()
monkeypatch.setattr(requests, 'get', mocks.mock_get(image))
image = classifiers.fetch_cvimage_from_url('this url is ignored')
assert image is not None
def test_fetch_cvimage_from_url_non_image(monkeypatch):
monkeypatch.setattr(requests, 'get', mocks.mock_get('non-image string'))
image = classifiers.fetch_cvimage_from_url('this url is ignored')
assert image is None
def test_fetch_cvimage_from_url_timeout(monkeypatch):
def long_func(*args, **kwargs):
time.sleep(3)
monkeypatch.setattr(requests, 'get', long_func)
with pytest.raises(TimeoutError):
classifiers.fetch_cvimage_from_url('this url is ignored', timeout_max_timeout=1)
def test_fetch_cvimage_from_url_too_large(monkeypatch):
monkeypatch.setattr(requests, 'get', mocks.mock_get('12'))
with pytest.raises(ValueError):
classifiers.fetch_cvimage_from_url('this url is ignored', maxsize=1)
def test_mock_classifier_classify():
classifier = classifiers.MockClassifier()
y = classifier.classify()
assert isinstance(y, list)
assert isinstance(y[0], deploy.Prediction)
def test_image_classifier_classify(monkeypatch):
# TODO: add fixture for categories and mean. (95 is a magic number corresponding to the deployed model)
monkeypatch.setattr(data, 'get_categories', lambda: dict((str(n), n) for n in range(95)))
monkeypatch.setattr(data, 'get_mean', lambda path: None)
cvimage = cv2.imread(TEST_IMAGE_PATH)
# TODO: add fixture for weights and refactor so that model is loaded from a workspace directory
classifier = classifiers.ImageClassifier('ignored path', 128, 'deep_anime_model')
y = classifier.classify(cvimage)
assert isinstance(y, list)
assert isinstance(y[0], deploy.Prediction)
def create_url_classifier(monkeypatch):
# TODO: add fixture for categories and mean. (95 is a magic number corresponding to the deployed model)
monkeypatch.setattr(data, 'get_categories', lambda: dict((str(n), n) for n in range(95)))
monkeypatch.setattr(data, 'get_mean', lambda path: None)
# TODO: add fixture for weights and refactor so that model is loaded from a workspace directory
image_classifier = classifiers.ImageClassifier('ignored path', 128, 'deep_anime_model')
return classifiers.URLClassifier(image_classifier)
def test_url_classifier_classify(monkeypatch):
with open(TEST_IMAGE_PATH, 'rb') as f:
image = f.read()
monkeypatch.setattr(requests, 'get', mocks.mock_get(image))
url_classifier = create_url_classifier(monkeypatch)
y = url_classifier.classify(TEST_IMAGE_PATH)
assert isinstance(y, list)
assert isinstance(y[0], deploy.Prediction)
def test_url_classifier_classify_none(monkeypatch):
monkeypatch.setattr(classifiers, 'fetch_cvimage_from_url', lambda url: None)
url_classifier = create_url_classifier(monkeypatch)
with pytest.raises(exc.NotImage):
url_classifier.classify(TEST_IMAGE_PATH)
def test_remote_classifier_classify(monkeypatch):
response = json.dumps(dict(y=[dict(rank=1, category='noein', probability=1.0)]))
monkeypatch.setattr(requests, 'get', mocks.mock_get(response))
classifier = classifiers.RemoteClassifier('base url')
y = classifier.classify(url='param')
assert isinstance(y, list)
assert isinstance(y[0], deploy.Prediction)
def test_remote_classifier_classify(monkeypatch):
response = json.dumps(dict(error='something went wrong expectedly'))
monkeypatch.setattr(requests, 'get', mocks.mock_get(response))
classifier = classifiers.RemoteClassifier('base url')
with pytest.raises(exc.RemoteError):
classifier.classify(url='param')
```
#### File: jmuddappa/DeepClassificationBot/train.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import argparse
import h5py
import numpy as np
import data
import model as m
def get_top_n_error(preds, y, n):
index_of_true = np.argmax(y, axis=1)
index_of_preds = np.argsort(preds, axis=1)
correct = 0
for i in range(len(index_of_true)):
for j in range(1, n + 1):
if index_of_true[i] == index_of_preds[i, -j]:
correct = correct + 1
break
total = len(y)
accuracy = correct / total
return accuracy
def run(epochs=500, training_percentage=0.4, validation_percentage=0.1, extract=True, cont=True, size=256, top_k=5):
'''Does the routine required to get the data, put them in needed format and start training the model
saves weights whenever the model produces a better test result and keeps track of the best loss'''
if extract:
print("Extracting data..")
X, y = data.extract_data(size=size)
print("Preprocessing data..")
X, y, nb_samples, num_categories = data.preprocess_data(X, y, save=True, subtract_mean=True)
else:
print("Loading data..")
h5f = h5py.File('data.hdf5', 'r')
nb_samples = h5f['nb_samples'].value
num_categories = h5f['n_categories'].value
h5f.close()
print("Number of categories: {}".format(num_categories))
print("Number of samples {}".format(nb_samples))
data_ids = np.arange(start=0, stop=nb_samples)
val_ids = data.produce_validation_indices(data_ids, nb_samples * validation_percentage)
train_ids = data.produce_train_indices(dataset_indx=data_ids, number_of_samples=nb_samples * training_percentage,
val_indx=val_ids)
# X_train, y_train, X_test, y_test = data.split_data(X, y, split_ratio=split)
X_train, y_train, X_val, y_val = data.load_dataset_bit_from_hdf5(train_ids, val_ids, only_train=False)
X_val = X_val / 255
print("Building and Compiling model..")
model = m.get_model(n_outputs=num_categories, input_size=size)
if cont:
# model.load_weights_until_layer("pre_trained_weights/latest_model_weights.hdf5", 26)
model.load_weights("pre_trained_weights/latest_model_weights.hdf5")
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"])
print("Training..")
best_performance = np.inf
for i in range(epochs):
train_ids = data.produce_train_indices(dataset_indx=data_ids, number_of_samples=15000, val_indx=val_ids)
X_train, y_train = data.load_dataset_bit_from_hdf5(train_ids, val_ids, only_train=True)
X_train = X_train / 255
X_train = data.augment_data(X_train)
# fit the model on the batches generated by datagen.flow()
metadata = model.fit(X_train, y_train, validation_data=[X_val, y_val], batch_size=64,
nb_epoch=1, verbose=1, shuffle=True, class_weight=None,
sample_weight=None)
current_loss = metadata.history['loss'][-1]
current_val_loss = metadata.history['val_loss'][-1]
preds = model.predict_proba(X_val, batch_size=64)
print("Loss: {}".format(current_loss))
print("Val_loss: {}".format(current_val_loss))
top_3_error = get_top_n_error(preds, y_val, top_k)
print("Top 3 error: {}".format(top_3_error))
if current_val_loss < best_performance:
model.save_weights("pre_trained_weights/model_weights.hdf5", overwrite=True)
best_performance = current_val_loss
print("Saving weights..")
model.save_weights("pre_trained_weights/latest_model_weights.hdf5", overwrite=True)
def extract_data(size=256):
print("Extracting data..")
X, y = data.extract_data(size=256)
print("Preprocessing data..")
X, y, nb_samples, num_categories = data.preprocess_data(X, y, save=True, subtract_mean=True)
return X, y, nb_samples, num_categories
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--extract_data', dest='extract_data', action='store_const', const=True, default=False)
parser.add_argument('--run', dest='run', action='store_const', const=True, default=False)
parser.add_argument('--continue', dest='continue_training', action='store_const', const=True, default=False)
args = parser.parse_args()
extract_mode = args.extract_data
run_mode = args.run
continue_ = args.continue_training
if run_mode:
run(extract=extract_mode, cont=continue_)
elif extract_mode:
extract_data()
``` |
{
"source": "jmuddappa/tattoo-gan-erator",
"score": 3
} |
#### File: jmuddappa/tattoo-gan-erator/app.py
```python
from flask import Flask, request, send_from_directory
from flask import request
import os
from flask import render_template
#from .attnGAN.gen_art import gen_example_from_text
#from user import about
from attnGAN.gen_art import *
project_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = os.path.join(project_dir, 'images')
app = Flask(__name__, static_url_path='/static')
# No caching at all for API endpoints.
@app.after_request
def add_header(response):
# response.cache_control.no_store = True
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
@app.route("/")
def hello():
return "Hello World!"
@app.route("/input")
def input():
return render_template('input.html')
@app.route("/output")
def test():
input_text = request.args.get('input')
run(input_text=input_text)
return render_template('output.html', value=input_text)
"""
python2 gen_art.py
--gpu 0
--input_text "the red bird"
--data_dir data/birds
--model_path models/bird_AttnGAN2.pth
--textencoder_path DAMSMencoders/bird/text_encoder200.pth
--output_dir output
"""
def run(input_text = "", gpu_id=0, data_dir="attnGAN/data/birds",
model_path = "attnGAN/models/bird_AttnGAN2.pth",
textencoder_path="attnGAN/DAMSMencoders/bird/text_encoder200.pth",
output_directory="static"):
print(input_text)
# args = parse_args()
#if args.cfg_file is not None:
cfg_from_file("attnGAN/cfg/eval_bird.yml")
cfg.GPU_ID = gpu_id
#else:
# cfg.CUDA = False
cfg.DATA_DIR = data_dir
cfg.TRAIN.NET_G = model_path
cfg.TRAIN.NET_E = textencoder_path
#if not cfg.TRAIN.FLAG:
# args.manualSeed = 100
#elif args.manualSeed is None:
manualSeed = 100
random.seed(manualSeed)
np.random.seed(manualSeed)
torch.manual_seed(manualSeed)
if cfg.CUDA:
torch.cuda.manual_seed_all(manualSeed)
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_dir = './output/%s_%s_%s' % \
(cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
split_dir, bshuffle = 'train', True
#if not cfg.TRAIN.FLAG:
# bshuffle = False
# split_dir = 'test'
# Get data loader
imsize = cfg.TREE.BASE_SIZE * (2 ** (cfg.TREE.BRANCH_NUM - 1))
image_transform = transforms.Compose([
transforms.Scale(int(imsize * 76 / 64)),
transforms.RandomCrop(imsize),
transforms.RandomHorizontalFlip()])
dataset = TextDataset(cfg.DATA_DIR, split_dir,
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
assert dataset
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=cfg.TRAIN.BATCH_SIZE,
drop_last=True, shuffle=bshuffle, num_workers=int(cfg.WORKERS))
# Define models and go to train/evaluate
algo = trainer(output_dir, dataloader, dataset.n_words, dataset.ixtoword)
start_t = time.time()
if cfg.TRAIN.FLAG:
algo.train()
else:
'''
Generate images from pre-extracted embeddings
'''
if cfg.B_VALIDATION:
# generate images for the whole valid dataset
algo.sampling(split_dir)
else:
# generate images for customized captions
gen_example_from_text(
input_text, output_directory, dataset.wordtoix, algo)
end_t = time.time()
print('Total time for training:', end_t - start_t)
if __name__ == '__main__':
app.run(debug=True)
```
#### File: neural-style-docker/neuralstyle/algorithms.py
```python
from subprocess import call
from itertools import product
from tempfile import TemporaryDirectory, NamedTemporaryFile
from shutil import copyfile
import logging
from math import ceil
import numpy as np
import json
import GPUtil
from neuralstyle.utils import filename, fileext
from neuralstyle.imagemagick import (convert, resize, shape, assertshape, choptiles, feather, smush, composite,
extractalpha, mergealpha)
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
# Folders and commands for style-transfer algorithms
ALGORITHMS = {
"gatys": {
"folder": "/app/neural-style",
"command": "th neural_style.lua",
"defaultpars": [
"-backend", "cudnn",
"-cudnn_autotune",
"-normalize_gradients",
"-init", "image",
"-content_weight", "100",
"-save_iter", "10000",
"-proto_file", "/app/neural-style/models/VGG_ILSVRC_19_layers_deploy.prototxt",
"-model_file", "/app/neural-style/models/VGG_ILSVRC_19_layers.caffemodel",
"-num_iterations", "500"
]
},
"gatys-multiresolution": {},
"chen-schmidt": {
"folder": "/app/style-swap",
"command": "th style-swap.lua",
"defaultpars": [
"--patchSize", "7",
"--patchStride", "3"
]
},
"chen-schmidt-inverse": {
"folder": "/app/style-swap",
"command": "th style-swap.lua",
"defaultpars": [
"--decoder", "models/dec-tconv-sigmoid.t7"
]
}
}
# Load file with GPU configuration
with open("gpuconfig.json", "r") as f:
GPUCONFIG = json.load(f)
def styletransfer(contents, styles, savefolder, size=None, alg="gatys", weights=None, stylescales=None,
tileoverlap=100, algparams=None):
"""General style transfer routine over multiple sets of options"""
# Check arguments
if alg not in ALGORITHMS.keys():
raise ValueError("Unrecognized algorithm %s, must be one of %s" % (alg, str(list(ALGORITHMS.keys()))))
# Plug default options
if alg != "gatys" and alg != "gatys-multiresolution":
if weights is not None:
LOGGER.warning("Only gatys algorithm accepts style weights. Ignoring style weight parameters")
weights = [None]
else:
if weights is None:
weights = [5.0]
if stylescales is None:
stylescales = [1.0]
if tileoverlap is None:
tileoverlap = 100
if algparams is None:
algparams = []
# Iterate through all combinations
for content, style, weight, scale in product(contents, styles, weights, stylescales):
outfile = outname(savefolder, content, style, alg, scale, weight)
# If the desired size is smaller than the maximum tile size, use a direct neural style
if fitsingletile(targetshape(content, size), alg):
styletransfer_single(content=content, style=style, outfile=outfile, size=size, alg=alg, weight=weight,
stylescale=scale, algparams=algparams)
# Else use a tiling strategy
else:
neuraltile(content=content, style=style, outfile=outfile, size=size, overlap=tileoverlap, alg=alg,
weight=weight, stylescale=scale, algparams=algparams)
def styletransfer_single(content, style, outfile, size=None, alg="gatys", weight=5.0, stylescale=1.0, algparams=None):
"""General style transfer routine over a single set of options"""
workdir = TemporaryDirectory()
# Cut out alpha channel from content
rgbfile = workdir.name + "/" + "rgb.png"
alphafile = workdir.name + "/" + "alpha.png"
extractalpha(content, rgbfile, alphafile)
# Transform style to png, as some algorithms don't understand other formats
stylepng = workdir.name + "/" + "style.png"
convert(style, stylepng)
# Call style transfer algorithm
algfile = workdir.name + "/" + "algoutput.png"
if alg == "gatys":
gatys(rgbfile, stylepng, algfile, size, weight, stylescale, algparams)
elif alg == "gatys-multiresolution":
gatys_multiresolution(rgbfile, stylepng, algfile, size, weight, stylescale, algparams)
elif alg in ["chen-schmidt", "chen-schmidt-inverse"]:
chenschmidt(alg, rgbfile, stylepng, algfile, size, stylescale, algparams)
# Enforce correct size
correctshape(algfile, content, size)
# Recover alpha channel
correctshape(alphafile, content, size)
mergealpha(algfile, alphafile, outfile)
def neuraltile(content, style, outfile, size=None, overlap=100, alg="gatys", weight=5.0, stylescale=1.0,
algparams=None):
"""Strategy to generate a high resolution image by running style transfer on overlapping image tiles"""
LOGGER.info("Starting tiling strategy")
if algparams is None:
algparams = []
workdir = TemporaryDirectory()
# Gather size info from original image
fullshape = targetshape(content, size)
# Compute number of tiles required to map all the image
xtiles, ytiles = tilegeometry(fullshape, alg, overlap)
# First scale image to target resolution
firstpass = workdir.name + "/" + "lowres.png"
convert(content, firstpass)
resize(firstpass, fullshape)
# Chop the styled image into tiles with the specified overlap value.
lowrestiles = choptiles(firstpass, xtiles=xtiles, ytiles=ytiles, overlap=overlap,
outname=workdir.name + "/" + "lowres_tiles")
# High resolution pass over each tile
highrestiles = []
for i, tile in enumerate(lowrestiles):
name = workdir.name + "/" + "highres_tiles_" + str(i) + ".png"
styletransfer_single(tile, style, name, size=None, alg=alg, weight=weight, stylescale=stylescale,
algparams=algparams)
highrestiles.append(name)
# Feather tiles
featheredtiles = []
for i, tile in enumerate(highrestiles):
name = workdir.name + "/" + "feathered_tiles_" + str(i) + ".png"
feather(tile, name)
featheredtiles.append(name)
# Smush the feathered tiles together
smushedfeathered = workdir.name + "/" + "feathered_smushed.png"
smush(featheredtiles, xtiles, ytiles, overlap, overlap, smushedfeathered)
# Smush also the non-feathered tiles
smushedhighres = workdir.name + "/" + "highres_smushed.png"
smush(highrestiles, xtiles, ytiles, overlap, overlap, smushedhighres)
# Combine feathered and un-feathered output images to disguise feathering
composite([smushedfeathered, smushedhighres], outfile)
# Adjust back to desired size
assertshape(outfile, fullshape)
def gatys(content, style, outfile, size, weight, stylescale, algparams):
"""Runs Gatys et al style-transfer algorithm
References:
* https://arxiv.org/abs/1508.06576
* https://github.com/jcjohnson/neural-style
"""
# Gatys can only process one combination of content, style, weight and scale at a time, so we need to iterate
tmpout = NamedTemporaryFile(suffix=".png")
runalgorithm("gatys", [
"-content_image", content,
"-style_image", style,
"-style_weight", weight * 100, # Because content weight is 100
"-style_scale", stylescale,
"-output_image", tmpout.name,
"-image_size", size if size is not None else shape(content)[0],
*algparams
])
# Transform to original file format
convert(tmpout.name, outfile)
tmpout.close()
def gatys_multiresolution(content, style, outfile, size, weight, stylescale, algparams, startres=256):
"""Runs a multiresolution version of Gatys et al method
The multiresolution strategy starts by generating a small image, then using that image as initializer
for higher resolution images. This procedure is repeated up to the tilesize.
Once the maximum tile size attainable by L-BFGS is reached, more iterations are run by using Adam. This allows
to produce larger images using this method than the basic Gatys.
References:
* Gatys et al - Controlling Perceptual Factors in Neural Style Transfer (https://arxiv.org/abs/1611.07865)
* https://gist.github.com/jcjohnson/ca1f29057a187bc7721a3a8c418cc7db
"""
# Multiresolution strategy: list of rounds, each round composed of a optimization method and a number of
# upresolution steps.
# Using "adam" as optimizer means that Adam will be used when necessary to attain higher resolutions
strategy = [
["lbfgs", 7],
["lbfgs", 7],
["lbfgs", 7],
["lbfgs", 7],
["lbfgs", 7]
]
LOGGER.info("Starting gatys-multiresolution with strategy " + str(strategy))
# Initialization
workdir = TemporaryDirectory()
maxres = targetshape(content, size)[0]
if maxres < startres:
LOGGER.warning("Target resolution (%d) might too small for the multiresolution method to work well" % maxres)
startres = maxres / 2.0
seed = None
tmpout = workdir.name + "/tmpout.png"
# Iterate over rounds
for roundnumber, (optimizer, steps) in enumerate(strategy):
LOGGER.info("gatys-multiresolution round %d with %s optimizer and %d steps" % (roundnumber, optimizer, steps))
roundmax = min(maxtile("gatys"), maxres) if optimizer == "lbfgs" else maxres
resolutions = np.linspace(startres, roundmax, steps, dtype=int)
iters = 1000
for stepnumber, res in enumerate(resolutions):
stepopt = "adam" if res > maxtile("gatys") else "lbfgs"
LOGGER.info("Step %d, resolution %d, optimizer %s" % (stepnumber, res, stepopt))
passparams = algparams[:]
passparams.extend([
"-num_iterations", iters,
"-tv_weight", "0",
"-print_iter", "0",
"-optimizer", stepopt
])
if seed is not None:
passparams.extend([
"-init", "image",
"-init_image", seed
])
gatys(content, style, tmpout, res, weight, stylescale, passparams)
seed = workdir.name + "/seed.png"
copyfile(tmpout, seed)
iters = max(iters/2.0, 100)
convert(tmpout, outfile)
def chenschmidt(alg, content, style, outfile, size, stylescale, algparams):
"""Runs Chen and Schmidt fast style-transfer algorithm
References:
* https://arxiv.org/pdf/1612.04337.pdf
* https://github.com/rtqichen/style-swap
"""
if alg not in ["chen-schmidt", "chen-schmidt-inverse"]:
raise ValueError("Unnaceptable subalgorithm %s for Chen-Schmidt family")
# Rescale style as requested
instyle = NamedTemporaryFile()
copyfile(style, instyle.name)
resize(instyle.name, int(stylescale * shape(style)[0]))
# Run algorithm
outdir = TemporaryDirectory()
runalgorithm(alg, [
"--save", outdir.name,
"--content", content,
"--style", instyle.name,
"--maxContentSize", size if size is not None else shape(content)[0],
"--maxStyleSize", size if size is not None else shape(content)[0],
*algparams
])
# Gather output results
output = outdir.name + "/" + filename(content) + "_stylized" + fileext(content)
convert(output, outfile)
instyle.close()
def runalgorithm(alg, params):
"""Run a style transfer algorithm with given parameters"""
# Move to algorithm folder
command = "cd " + ALGORITHMS[alg]["folder"] + "; "
# Algorithm command with default parameters
command += ALGORITHMS[alg]["command"] + " " + " ".join(ALGORITHMS[alg]["defaultpars"])
# Add provided parameters, if any
command += " " + " ".join([str(p) for p in params])
LOGGER.info("Running command: %s" % command)
call(command, shell=True)
def outname(savefolder, content, style, alg, scale, weight=None, ext=None):
"""Creates an output filename that reflects the style transfer parameters"""
return (
savefolder + "/" +
filename(content) +
"_" + filename(style) +
"_" + alg +
"_ss" + str(scale) +
("_sw" + str(weight) if weight is not None else "") +
(ext if ext is not None else fileext(content))
)
def correctshape(result, original, size=None):
"""Corrects the result of style transfer to ensure shape is coherent with original image and desired output size
If output size is not specified, the result image is corrected to have the same shape as the original.
"""
assertshape(result, targetshape(original, size))
def tilegeometry(imshape, alg, overlap=50):
"""Given the shape of an image, computes the number of X and Y tiles to cover it"""
maxtilesize = maxtile(alg)
xtiles = ceil(float(imshape[0] - maxtilesize) / float(maxtilesize - overlap) + 1)
ytiles = ceil(float(imshape[1] - maxtilesize) / float(maxtilesize - overlap) + 1)
return xtiles, ytiles
def fitsingletile(imshape, alg):
"""Returns whether a given image shape will fit in a single tile or not.
This depends on the algorithm used and the GPU available in the system"""
mx = maxtile(alg)
return mx*mx >= np.prod(imshape)
def targetshape(content, size=None):
"""Computes the shape the resultant image will have after a reshape of the size given
If size is None, return original shape.
"""
contentshape = shape(content)
if size is None:
return contentshape
else:
return [size, int(size * contentshape[1] / contentshape[0])]
def gpuname():
"""Returns the model name of the first available GPU"""
try:
gpus = GPUtil.getGPUs()
except:
LOGGER.warning("Unable to detect GPU model. Is your GPU configured? Are you running with nvidia-docker?")
return "UNKNOWN"
if len(gpus) == 0:
raise ValueError("No GPUs detected in the system")
return gpus[0].name
def maxtile(alg="gatys"):
"""Returns the recommended configuration maximum tile size, based on the available GPU and algorithm to be run
The size returned should be understood as the maximum tile size for a square tile. If non-square tiles are used,
a maximum tile of the same number of pixels should be used.
"""
gname = gpuname()
if gname not in GPUCONFIG:
LOGGER.warning(f"Unknown GPU model {gname}, will use default tiling parameters")
gname = "default"
return GPUCONFIG[gname][alg]
```
#### File: neural-style-docker/tests/algorithms_tests.py
```python
from tempfile import TemporaryDirectory
from glob import glob
from neuralstyle.algorithms import styletransfer, neuraltile, ALGORITHMS
from neuralstyle.imagemagick import shape, equalimages
from neuralstyle.utils import filename
CONTENTS = "/app/entrypoint/tests/contents/"
STYLES = "/app/entrypoint/tests/styles/"
def assertalldifferent(pattern, expected=None):
"""Asserts that all images that follow a given glob pattern have different contents
An expected number of images can also be provided to be checked.
"""
files = glob(pattern)
if expected is not None:
assert len(files) == expected
for f1, f2 in zip(files, files[1:]):
assert not equalimages(f1, f2)
def test_styletransfer_gatys():
"""Style transfer works without error for the Gatys algorithm"""
tmpdir = TemporaryDirectory()
styletransfer([CONTENTS + "dockersmall.png"], [STYLES + "cubism.jpg"], tmpdir.name, alg="gatys")
assert len(glob(tmpdir.name + "/dockersmall*cubism*")) == 1
def test_styletransfer_gatys_parameters():
"""Algorithm parameters can be passed to the Gatys method"""
tmpdir = TemporaryDirectory()
algparams = ("-num_iterations", "50")
styletransfer([CONTENTS + "dockersmall.png"], [STYLES + "cubism.jpg"], tmpdir.name, algparams=algparams)
assert len(glob(tmpdir.name + "/dockersmall*cubism*")) == 1
def test_styletransfer_gatysmultiresolution():
"""Style transfer works without error for the Gatys algorithm with multiresolution"""
tmpdir = TemporaryDirectory()
styletransfer([CONTENTS + "docker.png"], [STYLES + "cubism.jpg"], tmpdir.name, alg="gatys-multiresolution",
size=600)
assert len(glob(tmpdir.name + "/docker*cubism*")) == 1
def test_styletransfer_chenschmidt():
"""Style transfer method works without error for the Chend-Schmidt algorithm"""
tmpdir = TemporaryDirectory()
styletransfer([CONTENTS + "dockersmall.png"], [STYLES + "cubism.jpg"], tmpdir.name, alg="chen-schmidt")
assert len(glob(tmpdir.name + "/dockersmall*cubism*")) == 1
def test_styletransfer_chenschmidtinverse():
"""Style transfer method works without error for the Chend-Schmidt Inverse algorithm"""
tmpdir = TemporaryDirectory()
styletransfer([CONTENTS + "dockersmall.png"], [STYLES + "cubism.jpg"], tmpdir.name, alg="chen-schmidt-inverse")
assert len(glob(tmpdir.name + "/dockersmall*cubism*")) == 1
def test_styletransfer_keepsize():
"""Style transfer keeps the original image size if no size paramenter is given"""
for alg in ALGORITHMS.keys():
tmpdir = TemporaryDirectory()
img = CONTENTS + "dockersmall.png"
styletransfer([img], [STYLES + "cubism.jpg"], tmpdir.name, alg=alg)
files = glob(tmpdir.name + "/" + filename(img) + "*cubism*")
print("Expected size", shape(img))
print("Actual shape", shape(files[0]))
assert len(files) == 1
assert shape(files[0]) == shape(img)
def test_styletransfer_size():
"""Style transfer works for varying image sizes, producing correctly scaled images"""
for alg in ALGORITHMS.keys():
for size in [50, 100, 200]:
for img in ["docker.png", "obama.jpg"]:
originalshape = shape(CONTENTS + img)
tmpdir = TemporaryDirectory()
styletransfer([CONTENTS + img], [STYLES + "cubism.jpg"], tmpdir.name, alg=alg, size=size)
files = glob(tmpdir.name + "/" + filename(img) + "*cubism*")
resultshape = shape(files[0])
rescalefactor = size / originalshape[0]
expectedshape = [size, int(rescalefactor * originalshape[1])]
print("Expected shape", expectedshape)
print("Actual shape", resultshape)
assert len(files) == 1
assert expectedshape == resultshape
def test_styletransfer_ss():
"""Style transfer works for varying style scales"""
stylescales = [0.75, 1, 1.25]
for alg in ALGORITHMS.keys():
img = "docker.png"
tmpdir = TemporaryDirectory()
styletransfer([CONTENTS + img], [STYLES + "cubism.jpg"], tmpdir.name, alg=alg, size=100,
stylescales=stylescales)
assertalldifferent(tmpdir.name + "/" + filename(img) + "*cubism*", len(stylescales))
def test_styletransfer_sw():
"""Style transfer works for varying style weights"""
styleweights = [1, 5, 10]
alg = "gatys"
img = "docker.png"
tmpdir = TemporaryDirectory()
styletransfer([CONTENTS + img], [STYLES + "cubism.jpg"], tmpdir.name, alg=alg, size=100,
weights=styleweights)
assertalldifferent(tmpdir.name + "/" + filename(img) + "*cubism*", len(styleweights))
def test_neuraltile():
"""The neural tiling procedure can be run without issues"""
tmpdir = TemporaryDirectory()
content = CONTENTS + "avila-walls.jpg"
outfile = tmpdir.name + "/tiled.png"
neuraltile(content, STYLES + "cubism.jpg", outfile, alg="chen-schmidt-inverse", overlap=100)
assert shape(outfile) == shape(content)
def test_formattga():
"""TGA format images can be processed correctly"""
contents = [CONTENTS + f for f in ["tgasample.tga", "marbles.tga"]]
tmpdir = TemporaryDirectory()
styletransfer(contents, [STYLES + "cubism.jpg"], tmpdir.name, alg="chen-schmidt-inverse")
assert len(glob(tmpdir.name + "/*cubism*")) == 2
def test_formatpsd():
"""PSD format images can be processed correctly"""
contents = [CONTENTS + f for f in ["oldtelephone.psd"]]
tmpdir = TemporaryDirectory()
styletransfer(contents, [STYLES + "cubism.jpg"], tmpdir.name, alg="chen-schmidt-inverse")
assert len(glob(tmpdir.name + "/*cubism*")) == 1
def test_alpha():
"""Transformation of images with an alpha channel preserve transparency"""
tmpdir = TemporaryDirectory()
# Transform image with alpha
styletransfer([CONTENTS + "dockersmallalpha.png"], [STYLES + "cubism.jpg"], tmpdir.name, alg="chen-schmidt-inverse")
assert len(glob(tmpdir.name + "/*dockersmallalpha_cubism*")) == 1
# Transform image without alpha
styletransfer([CONTENTS + "dockersmall.png"], [STYLES + "cubism.jpg"], tmpdir.name, alg="chen-schmidt-inverse")
assert len(glob(tmpdir.name + "/*dockersmall_cubism*")) == 1
# Check correct that generated image are different
assertalldifferent(tmpdir.name + "/*cubism*")
def test_alpha_tiling():
"""Transformation of images with an alpha channel preserve transparency, even when a tiling strategy is used"""
tmpdir = TemporaryDirectory()
# Transform image with alpha
styletransfer([CONTENTS + "dockersmallalpha.png"], [STYLES + "cubism.jpg"], tmpdir.name, alg="chen-schmidt-inverse",
size=500)
assert len(glob(tmpdir.name + "/*dockersmallalpha_cubism*")) == 1
# Transform image without alpha
styletransfer([CONTENTS + "dockersmall.png"], [STYLES + "cubism.jpg"], tmpdir.name, alg="chen-schmidt-inverse",
size=500)
assert len(glob(tmpdir.name + "/*dockersmall_cubism*")) == 1
# Check correct that generated image are different
assertalldifferent(tmpdir.name + "/*cubism*")
``` |
{
"source": "jmudge14/whackastreamdeck",
"score": 3
} |
#### File: whackastreamdeck/whackastreamdeck/app.py
```python
import whackastreamdeck.Deck as Deck
from random import randint
from time import time
from time import sleep
import threading
import os
class MoleGame():
def __init__(this, deck, numMoles=3, minDelay=500, maxDelay=1000, explosionDisplayTime=500, gameTime=60000): # Initial State
this.hill = []
this.deck = deck
this.numMoles = numMoles
this.minDelay = minDelay
this.maxDelay = maxDelay
this.gameTime = gameTime
this.score = 0
this.startTime = -1
this.nextUpdateTime = -1
this.gameOverTime = -1
# Mark where the last mole was whacked, to show an explosion
this.explosions = []
this.explosionUpdateTime = -1
this.explosionDisplayTime = explosionDisplayTime
# Game assets
this.moleImage = Deck.getAsset(deck,"Mole.jpeg")
this.blankImage = Deck.getAsset(deck, "Blank.jpeg")
this.explosionImage = Deck.getAsset(deck, "Explosion.jpeg")
# Process management
this.updateLock = threading.Lock()
this.drawLock = threading.Lock()
def keyCallback(deck, key, state):
this.keyCallback(key,state)
this.deck.set_key_callback(keyCallback)
# Storyboard Management
this.storyboard = "notstarted"
this.redraw()
def tick(this):
this.update()
this.redraw()
if this.storyboard != "gameover":
threading.Timer((randint(this.minDelay, this.maxDelay)/1000.0), this.tick).start()
def randbutton(this):
return randint(0, this.deck.key_count()-1)
def start(this):
# Mole positions are randomized in update()
this.storyboard = "started"
this.nextUpdateTime = -1 # Force initial update
this.startTime = time()
this.gameOverTime = this.startTime+(this.gameTime/1000)
this.tick() # set everything in motion!
def duration(this):
return time()-this.startTime
def update(this):
if this.storyboard != "started":
return
t = time()
# Check if any further updates are required
if t>this.gameOverTime:
this.storyboard = "gameover"
return
#elif t>this.nextUpdateTime:
# this.nextUpdateTime = t+(randint(this.minDelay, this.maxDelay)/1000.0)
#else:
# return
# Populate the mole hill with new moles
this.updateLock.acquire()
nextHill = []
for _ in range(this.numMoles):
nextMole = this.randbutton()
# Force the mole to *look* like it has moved: It can't overlap in the new
# hill, and it should be different from any occupied spot in the old hill.
while nextMole in nextHill or nextMole in this.hill:
nextMole = this.randbutton()
nextHill.append(nextMole)
# Remove any covered explosions from the list
try:
this.explosions.remove(nextMole)
except ValueError:
pass
this.hill = nextHill
this.updateLock.release()
this.redraw()
def redraw(this):
with this.drawLock:
if this.storyboard == "notstarted":
Deck.renderString(this.deck, " PRESS KEY TO START", background="black", color="white")
elif this.storyboard == "gameover":
Deck.renderString(this.deck, "GAME XOVER. SCORE: {}".format(this.score))
else:
for key in range(this.deck.key_count()):
if key in this.hill:
image = this.moleImage
elif key in this.explosions:
image = this.explosionImage
else:
image = this.blankImage
this.deck.set_key_image(key, image)
def removeExplosion(this, key):
this.updateLock.acquire()
try:
this.explosions.remove(key)
except ValueError:
pass
this.updateLock.release()
this.redraw()
def keyCallback(this, key, state):
if not state:
return
if this.storyboard in ("gameover", "quit"):
if key == 7:
this.storyboard = "quit"
deck.reset()
deck.close()
this.redraw()
return
elif this.storyboard == "notstarted":
this.start()
elif key in this.hill:
this.updateLock.acquire()
this.score += 1
# Move the mole that was just whacked
nextMole = this.randbutton()
while nextMole in this.hill:
nextMole = this.randbutton()
this.hill.remove(key)
this.hill.append(nextMole)
# Display an explosion icon for some time
this.explosions.append(key)
this.updateLock.release()
threading.Timer(this.explosionDisplayTime/1000.0, this.removeExplosion, args=(key,)).start()
this.redraw()
def printAndSaveHighScores(score):
print("Score: " + str(game.score))
HIGH_SCORE_FILE = os.path.join(os.path.dirname(__file__), "highscores")
if os.path.exists(HIGH_SCORE_FILE):
with open(HIGH_SCORE_FILE,"r") as highScoreFile:
high_scores = eval(highScoreFile.read())
else:
high_scores = []
high_scores.append(game.score)
high_scores.sort(reverse=True)
high_scores = high_scores[:10] # keep ten scores
print("High Scores:")
print("\n".join([str(s) for s in high_scores]))
with open(HIGH_SCORE_FILE, "w") as highScoreFile:
highScoreFile.write(repr(high_scores))
def run():
deck = Deck.getInitializedDeck(background="green");
game = MoleGame(deck)
print("Good luck!")
while game.storyboard != "quit":
#game.update()
sleep(0.1)
``` |
{
"source": "jmueller95/deepgrind",
"score": 3
} |
#### File: deepgrind/scripts/preprocess_for_prediction.py
```python
import pandas as pd
import utils
def check_msms_model_name(converter):
def wrapper(*args, **kwargs):
if kwargs['style'] not in ["pdeep", "prosit"]:
raise Exception("MSMS model must be 'pdeep' or 'prosit'")
converter(*args, **kwargs)
return wrapper
@check_msms_model_name
def _convert_for_msms(comet_df, style, output):
if style == "prosit":
res = pd.DataFrame(
{"modified_sequence": comet_df.Peptide.apply(
lambda pep: utils.find_modifications(pep[2:-2], style="prosit")).values,
"collision_energy": snakemake.params['collision_energy'],
"precursor_charge": comet_df.apply(lambda row: row.Charge1 + 2 * row.Charge2 + 3 * row.Charge3, axis=1)})
res.dropna(inplace=True)
res.to_csv(output, sep=",", header=True, index=False)
else:
res = pd.DataFrame(
comet_df.Peptide.apply(lambda pep: utils.find_modifications(pep[2:-2], style="pdeep")).to_list(),
columns=["peptide", "modification"])
# The charge is one-hot encoded in the comet df, so we can resolve this into 1,2 or 3 by multiplying 1,2 and 3
# with the entries of Charge1, Charge2 and Charge3
res["charge"] = comet_df.apply(lambda row: row.Charge1 + 2 * row.Charge2 + 3 * row.Charge3, axis=1)
res.dropna(inplace=True)
res.to_csv(output, sep="\t", header=True, index=False)
@check_msms_model_name
def _convert_for_rt(comet_df, style, output):
if style == "prosit":
res = pd.DataFrame(
{"modified_sequence": comet_df.Peptide.apply(lambda pep: utils.find_modifications(pep[2:-2], style="prosit")).values})
res.dropna(inplace=True)
res.to_csv(output, sep=",", header=True, index=False)
else:
raise Exception("Not implemented. Right now, the only accepted RT Model is 'prosit'.")
def main():
# Parse the input file:
comet_df = pd.read_csv(snakemake.input[0], sep="\t", header=0,
usecols=["Peptide", "Charge1", "Charge2", "Charge3"],
index_col=False)
# Determine if MSMS and RT prediction will be performed jointly or separately
if "msms_model" in dict(snakemake.params) and "rt_model" in dict(snakemake.params):
_convert_for_msms(comet_df, style=snakemake.params['msms_model'].lower(),
output=snakemake.output['msms_prediction_input'])
_convert_for_rt(comet_df, style=snakemake.params['rt_model'].lower(),
output=snakemake.output['rt_prediction_input'])
else:
# If only one model was supplied, the prediction will be joint
# Only convert the input for msms in that case
_convert_for_msms(comet_df, style=snakemake.params['model'].lower(),
output=snakemake.output['prediction_input'])
if __name__ == '__main__':
main()
``` |
{
"source": "jmueller95/prosit",
"score": 3
} |
#### File: prosit/converters/rtlist.py
```python
from .. import utils
import pandas as pd
import numpy as np
class Converter:
def __init__(self, data, out_path):
self.data = data
self.out_path = out_path
def convert(self):
res = pd.DataFrame({
"modified_sequence" : [utils.get_sequence(seq_int) for seq_int in self.data["sequence_integer"]],
"iRT" : np.hstack(self.data["iRT"])
})
res[['modified_sequence', 'iRT']].to_csv(self.out_path, sep="\t", index=False)
```
#### File: prosit/prosit/server.py
```python
import time
import os
import tempfile
import warnings
import flask
from flask import after_this_request
import pandas as pd
import tensorflow as tf
import argparse
import zipfile
import io
from . import model
from . import io_local
from . import constants
from . import tensorize
from . import prediction
from . import alignment
from . import converters
app = flask.Flask(__name__)
@app.route("/")
def hello():
return "prosit!\n"
def predict(df, mode, fragmentation_mode=None, nlosses=None):
""" nlosses and fragmentation mode are only required if MSMS prediction should be included
fragmentation_mode must be one of "CID", "HCD"
mode: String, one of 'rt', 'msms', 'both'
"""
#If msms prediction is included, convert and predict the input df. Else only convert the input for rt prediction
data = prediction.predict(tensorize.csv(df, nlosses), d_spectra[fragmentation_mode], nlosses) if mode != "rt" else tensorize.csv_only_seq(df)
#Perform rt prediction if requested
if mode != "msms":
data = prediction.predict(data, d_irt)
return data
@app.route("/predict/<output_format>", methods=["POST"])
@app.route("/predict/<output_format>/<fragmentation_mode>", methods=["POST"])
def run_prosit(output_format=None, fragmentation_mode=None):
df = pd.read_csv(flask.request.files['peptides'])
result = predict(df,
mode="rt" if output_format == "rt" else "both", #Isolated msms prediction is currently not possible, but there's no reason to do that right now.
fragmentation_mode = fragmentation_mode.upper() if fragmentation_mode else None,
nlosses = 3) ###Neutral Losses currently are always predicted by default.
if output_format not in ["speclib", "msms"]:
tmp_f = tempfile.NamedTemporaryFile(delete=True)
if output_format == "rt":
c = converters.rtlist.Converter(result, tmp_f.name)
elif output_format == "mgf":
c = converters.mgf.Converter(result, tmp_f.name)
elif output_format == "generic":
c = converters.generic.Converter(result, tmp_f.name)
elif output_format == "msp":
print("Warning: msp output is not yet implemented!")
c = converters.msp.Converter(result, tmp_f.name)
c.convert()
elif output_format == "msms":
df_pred = converters.maxquant.convert_prediction(result)
tmp_f = tempfile.NamedTemporaryFile(delete=True)
converters.maxquant.write(df_pred, tmp_f.name)
if output_format != "speclib":
@after_this_request
def cleanup(response):
tmp_f.close()
return response
return flask.send_file(tmp_f.name)
else:
peptides_filename = ".".join(flask.request.files["peptides"].filename.split("/")[-1].split(".")[:-1])
zipdata = io.BytesIO()
with zipfile.ZipFile(zipdata, 'w', zipfile.ZIP_DEFLATED) as zipf:
mgf_file = "{}.mgf".format(peptides_filename)
c_mgf = converters.mgf.Converter(result, mgf_file)
c_mgf.convert(as_speclib=True)
zipf.write(mgf_file)
# SSL only needs the input data, not the predictions
ssl_file = "{}.ssl".format(peptides_filename)
c_ssl = converters.ssl.Converter(df, ssl_file)
c_ssl.convert()
zipf.write(ssl_file)
@after_this_request
def cleanup(response):
os.remove("{}.mgf".format(peptides_filename))
os.remove("{}.ssl".format(peptides_filename))
return response
zipdata.seek(0)
return flask.send_file(zipdata, mimetype='zip')
if __name__ == "__main__":
###################################
# Have Keras allocate memory only when needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#This allows the user to customize the port the server uses
parser = argparse.ArgumentParser()
parser.add_argument("-p" "--port", action="store", dest="port")
args = parser.parse_args()
###################################
warnings.filterwarnings("ignore")
global d_spectra
global d_irt
#Load CID MSMS Model
d_spectra_cid = {}
d_spectra_cid["graph"] = tf.Graph()
with d_spectra_cid["graph"].as_default():
d_spectra_cid["session"] = tf.Session(config=config)
with d_spectra_cid["session"].as_default():
d_spectra_cid["model"], d_spectra_cid["config"] = model.load(
constants.MODEL_CONFIG_SPECTRA,
constants.WEIGHTS_CID,
trained=True
)
d_spectra_cid["model"].compile(optimizer="adam", loss="mse")
#Load HCD MSMS Model
d_spectra_hcd = {}
d_spectra_hcd["graph"] = tf.Graph()
with d_spectra_hcd["graph"].as_default():
d_spectra_hcd["session"] = tf.Session(config=config)
with d_spectra_hcd["session"].as_default():
d_spectra_hcd["model"], d_spectra_hcd["config"] = model.load(
constants.MODEL_CONFIG_SPECTRA,
constants.WEIGHTS_HCD,
trained=True
)
d_spectra_hcd["model"].compile(optimizer="adam", loss="mse")
d_spectra = {"CID": d_spectra_cid, "HCD": d_spectra_hcd}
#Load RT Model
d_irt = {}
d_irt["graph"] = tf.Graph()
with d_irt["graph"].as_default():
d_irt["session"] = tf.Session(config=config)
with d_irt["session"].as_default():
d_irt["model"], d_irt["config"] = model.load(constants.MODEL_CONFIG_RT,
constants.WEIGHTS_RT,
trained=True)
d_irt["model"].compile(optimizer="adam", loss="mse")
app.run(host="0.0.0.0", port=args.port)
```
#### File: prosit/prosit/train_prosit.py
```python
import os
import numpy as np
import pandas as pd
import yaml
from . import model as model_lib
from . import training, tensorize, io_local
def main():
#Turn off warnings:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
###Load training data - Put the path to your own data here
training_data_path = "/root/training/training_preprocessed.csv"
training_df = pd.read_csv(training_data_path)
###Dump all Peptides containing selenocystein
training_df = training_df.loc[~training_df.modified_sequence.str.contains("U")]
print("CSV Loaded, shape is {}.".format(training_df.shape))
###Load Untrained Retention Time Model and prepare its training data
iRT_model_dir = "/root/training/iRT/"
iRT_model, iRT_config = model_lib.load(iRT_model_dir, trained=False)
iRT_callbacks = training.get_callbacks(iRT_model_dir)
iRT_raw_mean = training_df.uRT.mean()
iRT_raw_var = training_df.uRT.var()
iRT_config['iRT_rescaling_mean'] = float(iRT_raw_mean)
iRT_config['iRT_rescaling_var'] = float(iRT_raw_var)
with open(iRT_model_dir + "config_new.yml", "w") as config_outfile:
yaml.dump(iRT_config, config_outfile)
###Load Untrained Fragmentation Model and prepare its training data
msms_model_dir = "/root/training/msms/"
msms_model, msms_config = model_lib.load(msms_model_dir, trained=False)
msms_callbacks = training.get_callbacks(msms_model_dir)
#The intensity lists are already in proper order, but might have some missing values and need to be padded to the correct length
#(Only a peptide of the maximal length 29 will have 522 values, but all lists need to be of this length)
intensities_length = 522
print("iRT and Fragmentation Intensity Models Loaded.")
#Compile the models once, and then call fit separately - useful if you lack memory or space and have to partition your training data
training.compile_model(iRT_model, iRT_config)
training.compile_model(msms_model, msms_config)
training_tensorized = tensorize.csv(training_df[['modified_sequence', 'collision_energy', 'precursor_charge']], nlosses=3)
print("CSV Tensorized.")
training_tensorized['prediction'] = np.reshape(
np.asarray((training_df.uRT - iRT_raw_mean) / np.sqrt(iRT_raw_var)),
(-1,1))
training_df.relative_intensities = training_df.relative_intensities.apply(eval)
training_df.relative_intensities = training_df.relative_intensities.apply(
lambda ls: np.nan_to_num(np.pad(ls, pad_width=(0,intensities_length-len(ls)),constant_values=-1, mode="constant"),-1))
training_tensorized['intensities_raw'] = np.stack(training_df.relative_intensities)
###Write and reload training data in hdf5 format
hdf5_path = "/root/training/training_data.hdf5"
io_local.to_hdf5(training_tensorized,hdf5_path)
print("Training Data Written to HDF5 File.")
#Load the hdf5 again
training_loaded = io_local.from_hdf5(hdf5_path)
print("Training Data Reloaded from HDF5 File.\nCommencing Training of iRT Model...")
###Train both models
iRT_history = training.train_model(training_loaded, iRT_model, iRT_config, iRT_callbacks)
iRT_epochs = len(iRT_history.history['val_loss'])
iRT_val_loss = iRT_history.history['val_loss'][-1]
iRT_weights_filename = "{}/weight_{:02d}_{:.5f}.hdf5".format(iRT_model_dir, iRT_epochs, iRT_val_loss)
iRT_model.save_weights(iRT_weights_filename)
print("Training of iRT Model Complete.\nCommencing Training of Fragmentation Intensity Model...")
msms_history = training.train_model(training_loaded, msms_model, msms_config, msms_callbacks)
#Save the weights to a file named by the val_loss and the epochs
msms_epochs = len(msms_history.history['val_loss'])
msms_val_loss = msms_history.history['val_loss'][-1]
msms_weights_filename = "{}/weight_{:02d}_{:.5f}.hdf5".format(msms_model_dir, msms_epochs, msms_val_loss)
msms_model.save_weights(msms_weights_filename)
print("Training of Fragmentation Intensity Model Complete.")
print("Done! You may now use these models for your predictions.")
if __name__ == '__main__':
main()
``` |
{
"source": "jmueller95/pyteomics",
"score": 3
} |
#### File: pyteomics/openms/idxml.py
```python
import warnings
from .. import auxiliary as aux
from .. import xml, _schema_defaults
class IDXML(xml.IndexedXML):
"""Parser class for idXML files."""
file_format = 'idXML'
_root_element = 'IdXML'
_default_schema = _schema_defaults._idxml_schema_defaults
_default_version = '1.5'
_default_iter_tag = 'PeptideIdentification'
_structures_to_flatten = {}
_indexed_tags = {'ProteinHit'}
_schema_location_param = 'noNamespaceSchemaLocation'
def __init__(self, *args, **kwargs):
kwargs.setdefault('retrieve_refs', True)
super(IDXML, self).__init__(*args, **kwargs)
def _get_info_smart(self, element, **kwargs):
"""Extract the info in a smart way depending on the element type"""
name = xml._local_name(element)
kwargs = dict(kwargs)
rec = kwargs.pop("recursive", None)
# Try not to recursively unpack the root element
# unless the user really wants to.
if name == self._root_element:
info = self._get_info(element, recursive=(rec if rec is not None else False), **kwargs)
else:
info = self._get_info(element, recursive=(rec if rec is not None else True), **kwargs)
for k in ['start', 'end']:
v = info.get(k)
if isinstance(v, list) and len(v) == 2:
info[k] = [int(x) for x in v[0].split()]
for k in ['aa_before', 'aa_after']:
if k in info:
info[k] = info[k].split()
return info
def _retrieve_refs(self, info, **kwargs):
"""Retrieves and embeds the data for each attribute in `info` that
ends in _ref. Removes the id attribute from `info`"""
for k, v in dict(info).items():
if k[-5:] == '_refs':
try:
by_id = [self.get_by_id(x, retrieve_refs=True) for x in v.split()]
except KeyError:
warnings.warn('Ignoring unresolved reference: ' + v)
else:
for x in by_id:
x.pop('id', None)
info[k[:-5]] = by_id
del info[k]
def read(source, **kwargs):
"""Parse `source` and iterate through peptide-spectrum matches.
.. note:: This function is provided for backward compatibility only.
It simply creates an :py:class:`IDXML` instance using
provided arguments and returns it.
Parameters
----------
source : str or file
A path to a target IDXML file or the file object itself.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
retrieve_refs : bool, optional
If :py:const:`True`, additional information from references will be
automatically added to the results. The file processing time will
increase. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the IDXML header (default). Otherwise, use default
parameters. Disable this to avoid waiting on slow network connections or
if you don't like to get the related warnings.
build_id_cache : bool, optional
Defines whether a cache of element IDs should be built and stored on the
created :py:class:`IDXML` instance. Default value is the value of
`retrieve_refs`.
.. note:: This parameter is ignored when ``use_index`` is ``True`` (default).
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
the indexed elements. If :py:const:`True` (default), `build_id_cache` is ignored.
indexed_tags : container of bytes, optional
Defines which elements need to be indexed. Empty set by default.
Returns
-------
out : IDXML
An iterator over the dicts with PSM properties.
"""
kwargs = kwargs.copy()
kwargs.setdefault('retrieve_refs', True)
kwargs['build_id_cache'] = kwargs.get('build_id_cache', kwargs.get('retrieve_refs'))
return IDXML(source, **kwargs)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`IDXML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
retrieve_refs : bool, optional
If :py:const:`True`, additional information from references will be
automatically added to the results. The file processing time will
increase. Default is :py:const:`False`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the IDXML header (default). Otherwise, use default
parameters. Disable this to avoid waiting on slow network connections or
if you don't like to get the related warnings.
build_id_cache : bool, optional
Defines whether a cache of element IDs should be built and stored on the
created :py:class:`IDXML` instance. Default value is the value of
`retrieve_refs`.
Returns
-------
out : iterator
"""
kwargs = kwargs.copy()
kwargs['build_id_cache'] = kwargs.get('build_id_cache', kwargs.get('retrieve_refs'))
return IDXML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(IDXML)
def get_by_id(source, elem_id, **kwargs):
"""Parse `source` and return the element with `id` attribute equal
to `elem_id`. Returns :py:const:`None` if no such element is found.
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`get_by_id` calls on one file, you should
create an :py:class:`IDXML` object and use its
:py:meth:`!get_by_id` method.
Parameters
----------
source : str or file
A path to a target mzIdentML file of the file object itself.
elem_id : str
The value of the `id` attribute to match.
Returns
-------
out : :py:class:`dict` or :py:const:`None`
"""
return IDXML(source, **kwargs).get_by_id(elem_id, **kwargs)
chain = aux.ChainBase._make_chain(IDXML)
def is_decoy(psm, prefix=None):
"""Given a PSM dict, return :py:const:`True` if it is marked as decoy,
and :py:const:`False` otherwise.
Parameters
----------
psm : dict
A dict, as yielded by :py:func:`read`.
prefix : ignored
Returns
-------
out : bool
"""
return psm['PeptideHit'][0]['target_decoy'] == 'decoy'
def DataFrame(*args, **kwargs):
"""Read idXML files into a :py:class:`pandas.DataFrame`.
Requires :py:mod:`pandas`.
.. warning :: Only the first 'PeptideHit' element is considered in every 'PeptideIdentification'.
Parameters
----------
*args, **kwargs : passed to :py:func:`chain`
sep : str or None, optional
Some values related to PSMs (such as protein information) are variable-length
lists. If `sep` is a :py:class:`str`, they will be packed into single string using
this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is
:py:const:`None`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
data = []
sep = kwargs.pop('sep', None)
with chain(*args, **kwargs) as f:
for item in f:
info = {}
for k, v in item.items():
if isinstance(v, (str, int, float)):
info[k] = v
peptide_hit = item.get('PeptideHit', [None])[0]
if peptide_hit is not None:
info.update((k, v) for k, v in peptide_hit.items() if isinstance(v, (str, int, float)))
protein = peptide_hit.get('protein')
if protein:
accessions, isd, starts, ends, scores, aa_bs, aa_as = [], [], [], [], [], [], []
for d, start, end, aab, aaa in zip(protein, peptide_hit['start'], peptide_hit['end'], peptide_hit['aa_before'], peptide_hit['aa_after']):
accessions.append(d.get('accession'))
isd.append(d.get('target_decoy'))
scores.append(d.get('score'))
starts.append(start)
ends.append(end)
aa_bs.append(aab)
aa_as.append(aaa)
isd = all(x == 'decoy' for x in isd)
if sep is not None:
if all(isinstance(acc, str) for acc in accessions):
accessions = sep.join(accessions)
if all(isinstance(aaa, str) for aaa in aa_as):
aa_as = sep.join(aa_as)
if all(isinstance(aab, str) for aab in aa_bs):
aa_bs = sep.join(aa_bs)
if all(acc is None for acc in accessions):
accessions = None
info.update((k, v) for k, v in protein[0].items() if isinstance(v, (str, int, float, list)))
info['accession'] = accessions
info['is decoy'] = isd
info['start'] = starts
info['end'] = ends
info['aa_before'] = aa_bs
info['aa_after'] = aa_as
data.append(info)
df = pd.DataFrame(data)
return df
def filter_df(*args, **kwargs):
"""Read idXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.
Positional arguments can be idXML files or DataFrames.
Requires :py:mod:`pandas`.
.. warning :: Only the first 'PeptideHit' element is considered in every 'PeptideIdentification'.
Parameters
----------
key : str / iterable / callable, optional
Default is 'score'. You will probably need to change it.
is_decoy : str / iterable / callable, optional
Default is 'is decoy'.
*args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
kwargs.setdefault('key', 'score')
if all(isinstance(arg, pd.DataFrame) for arg in args):
df = pd.concat(args)
else:
df = DataFrame(*args, **kwargs)
if 'is_decoy' not in kwargs:
kwargs['is_decoy'] = 'is decoy'
return aux.filter(df, **kwargs)
fdr = aux._make_fdr(is_decoy, None)
_key = lambda x: x['PeptideHit'][0]['score']
qvalues = aux._make_qvalues(chain, is_decoy, None, _key)
filter = aux._make_filter(chain, is_decoy, None, _key, qvalues)
filter.chain = aux._make_chain(filter, 'filter', True)
```
#### File: pyteomics/pyteomics/parser.py
```python
import re
from collections import deque
import itertools as it
from .auxiliary import PyteomicsError, memoize, BasicComposition
std_amino_acids = ['Q', 'W', 'E', 'R', 'T', 'Y', 'I', 'P', 'A', 'S',
'D', 'F', 'G', 'H', 'K', 'L', 'C', 'V', 'N', 'M']
"""modX labels for the 20 standard amino acids."""
std_nterm = 'H-'
"""modX label for the unmodified N-terminus."""
std_cterm = '-OH'
"""modX label for the unmodified C-terminus."""
std_labels = std_amino_acids + [std_nterm, std_cterm]
"""modX labels for the standard amino acids and unmodified termini."""
_nterm_mod = r'[^-]+-$'
_cterm_mod = r'-[^-]+$'
def is_term_mod(label):
"""Check if `label` corresponds to a terminal modification.
Parameters
----------
label : str
Returns
-------
out : bool
Examples
--------
>>> is_term_mod('A')
False
>>> is_term_mod('Ac-')
True
>>> is_term_mod('-customGroup')
True
>>> is_term_mod('this-group-')
False
>>> is_term_mod('-')
False
"""
return (re.match(_nterm_mod, label) or re.match(_cterm_mod, label)) is not None
def match_modX(label):
"""Check if `label` is a valid 'modX' label.
Parameters
----------
label : str
Returns
-------
out : re.match or None
"""
return re.match(_modX_single, label)
def is_modX(label):
"""Check if `label` is a valid 'modX' label.
Parameters
----------
label : str
Returns
-------
out : bool
Examples
--------
>>> is_modX('M')
True
>>> is_modX('oxM')
True
>>> is_modX('oxMet')
False
>>> is_modX('160C')
True
"""
return bool(match_modX(label))
def length(sequence, **kwargs):
"""Calculate the number of amino acid residues in a polypeptide
written in modX notation.
Parameters
----------
sequence : str or list or dict
A string with a polypeptide sequence, a list with a parsed sequence or
a dict of amino acid composition.
labels : list, optional
A list of allowed labels for amino acids and terminal modifications.
Returns
-------
out : int
Examples
--------
>>> length('PEPTIDE')
7
>>> length('H-PEPTIDE-OH')
7
"""
if not sequence: return 0
if isinstance(sequence, str) or isinstance(sequence, list):
if isinstance(sequence, str):
parsed_sequence = parse(sequence, **kwargs)
else:
parsed_sequence = sequence
num_term_groups = 0
if is_term_mod(parsed_sequence[0]):
num_term_groups += 1
if is_term_mod(parsed_sequence[-1]):
num_term_groups += 1
return len(parsed_sequence) - num_term_groups
elif isinstance(sequence, dict):
return sum(amount for aa, amount in sequence.items()
if not is_term_mod(aa))
raise PyteomicsError('Unsupported type of sequence.')
def _split_label(label):
try:
mod, X = match_modX(label).groups()
except AttributeError:
raise PyteomicsError('Cannot split a non-modX label: %s' % label)
if not mod:
return (X,)
else:
return mod, X
_modX_sequence = re.compile(r'^([^-]+-)?((?:[^A-Z-]*[A-Z])+)(-[^-]+)?$')
_modX_group = re.compile(r'[^A-Z-]*[A-Z]')
_modX_split = re.compile(r'([^A-Z-]*)([A-Z])')
_modX_single = re.compile(r'^([^A-Z-]*)([A-Z])$')
def parse(sequence, show_unmodified_termini=False, split=False, allow_unknown_modifications=False, **kwargs):
"""Parse a sequence string written in modX notation into a list of
labels or (if `split` argument is :py:const:`True`) into a list of
tuples representing amino acid residues and their modifications.
Parameters
----------
sequence : str
The sequence of a polypeptide.
show_unmodified_termini : bool, optional
If :py:const:`True` then the unmodified N- and C-termini are explicitly
shown in the returned list. Default value is :py:const:`False`.
split : bool, optional
If :py:const:`True` then the result will be a list of tuples with 1 to 4
elements: terminal modification, modification, residue. Default value is
:py:const:`False`.
allow_unknown_modifications : bool, optional
If :py:const:`True` then do not raise an exception when an unknown
modification of a known amino acid residue is found in the sequence.
This also includes terminal groups.
Default value is :py:const:`False`.
.. note::
Since version 2.5, this parameter has effect only if `labels`
are provided.
labels : container, optional
A container of allowed labels for amino acids,
modifications and terminal modifications.
If not provided, no checks will be done.
Separate labels for modifications (such as 'p' or 'ox')
can be supplied, which means they are applicable to all residues.
.. warning::
If `show_unmodified_termini` is set to :py:const:`True`, standard
terminal groups need to be present in `labels`.
.. warning::
Avoid using sequences with only one terminal group, as they are
ambiguous. If you provide one, `labels` (or :py:const:`std_labels`)
will be used to resolve the ambiguity.
Returns
-------
out : list
List of tuples with labels of modifications and amino acid residues.
Examples
--------
>>> parse('PEPTIDE', split=True)
[('P',), ('E',), ('P',), ('T',), ('I',), ('D',), ('E',)]
>>> parse('H-PEPTIDE')
['P', 'E', 'P', 'T', 'I', 'D', 'E']
>>> parse('PEPTIDE', show_unmodified_termini=True)
['H-', 'P', 'E', 'P', 'T', 'I', 'D', 'E', '-OH']
>>> parse('TEpSToxM', labels=std_labels + ['pS', 'oxM'])
['T', 'E', 'pS', 'T', 'oxM']
>>> parse('zPEPzTIDzE', True, True, labels=std_labels+['z'])
[('H-', 'z', 'P'), ('E',), ('P',), ('z', 'T'), ('I',), ('D',), ('z', 'E', '-OH')]
>>> parse('Pmod1EPTIDE')
['P', 'mod1E', 'P', 'T', 'I', 'D', 'E']
"""
sequence = str(sequence)
try:
n, body, c = re.match(_modX_sequence, sequence).groups()
except AttributeError:
raise PyteomicsError('Not a valid modX sequence: ' + sequence)
# Check for allowed labels, if they were explicitly given
labels = kwargs.get('labels')
# labels help save the day when only one terminal group is given
if c is None and n is not None:
if labels is None:
labels = std_labels
# we can try to resolve the ambiguity
if n != std_nterm and n not in labels:
# n is the body then
c = '-' + body
body = n[:-1]
n = None
# Actual parsing
if split:
parsed_sequence = [g if g[0] else (g[1],) for g in re.findall(
_modX_split, body)]
else:
parsed_sequence = re.findall(_modX_group, body)
nterm, cterm = (n or std_nterm), (c or std_cterm)
# Check against `labels` if given
if labels is not None:
labels = set(labels)
for term, std_term in zip([n, c], [std_nterm, std_cterm]):
if term and term not in labels and not allow_unknown_modifications:
raise PyteomicsError(
'Unknown label: {}'.format(term))
for group in parsed_sequence:
if split:
mod, X = group if len(group) == 2 else ('', group[0])
else:
mod, X = re.match(_modX_split, group).groups()
if ((not mod) and X not in labels) or not ((mod+X in labels) or (
X in labels and (
mod in labels or allow_unknown_modifications))):
raise PyteomicsError(
'Unknown label: {}'.format(group))
# Append terminal labels
if show_unmodified_termini or nterm != std_nterm:
if split:
parsed_sequence[0] = (nterm,) + parsed_sequence[0]
else:
parsed_sequence.insert(0, nterm)
if show_unmodified_termini or cterm != std_cterm:
if split:
parsed_sequence[-1] = parsed_sequence[-1] + (cterm,)
else:
parsed_sequence.append(cterm)
return parsed_sequence
def valid(*args, **kwargs):
"""Try to parse sequence and catch the exceptions.
All parameters are passed to :py:func:`parse`.
Returns
-------
out : bool
:py:const:`True` if the sequence was parsed successfully, and
:py:const:`False` otherwise.
"""
try:
parse(*args, **kwargs)
except PyteomicsError:
return False
return True
def fast_valid(sequence, labels=set(std_labels)):
"""Iterate over `sequence` and check if all items are in `labels`.
With strings, this only works as expected on sequences without
modifications or terminal groups.
Parameters
----------
sequence : iterable (expectedly, str)
The sequence to check. A valid sequence would be a string of
labels, all present in `labels`.
labels : iterable, optional
An iterable of known labels.
Returns
-------
out : bool
"""
return set(sequence).issubset(labels)
def tostring(parsed_sequence, show_unmodified_termini=True):
"""Create a string from a parsed sequence.
Parameters
----------
parsed_sequence : iterable
Expected to be in one of the formats returned by
:py:func:`parse`, i.e. list of labels or list of tuples.
show_unmodified_termini : bool, optional
Defines the behavior towards standard terminal groups in the input.
:py:const:`True` means that they will be preserved if present (default).
:py:const:`False` means that they will be removed. Standard terminal
groups will not be added if not shown in `parsed_sequence`,
regardless of this setting.
Returns
-------
sequence : str
"""
parsed_sequence = list(parsed_sequence)
labels = []
nterm = parsed_sequence[0]
cterm = parsed_sequence[-1]
if isinstance(nterm, str):
if nterm != std_nterm or show_unmodified_termini:
labels.append(nterm)
labels.extend(parsed_sequence[1:-1])
if len(parsed_sequence) > 1 and (cterm != std_cterm or show_unmodified_termini):
labels.append(cterm)
else:
if len(parsed_sequence) == 1:
g = nterm
if nterm[0] == std_nterm and not show_unmodified_termini:
g = g[1:]
if nterm[-1] == std_cterm and not show_unmodified_termini:
g = g[:-1]
return ''.join(g)
if nterm[0] != std_nterm or show_unmodified_termini:
labels.append(''.join(nterm))
else:
labels.append(''.join(nterm[1:]))
labels.extend(''.join(g) for g in parsed_sequence[1:-1])
if len(parsed_sequence) > 1:
if cterm[-1] != std_cterm or show_unmodified_termini:
labels.append(''.join(cterm))
else:
labels.append(''.join(cterm[:-1]))
return ''.join(labels)
def amino_acid_composition(sequence, show_unmodified_termini=False, term_aa=False, allow_unknown_modifications=False, **kwargs):
"""Calculate amino acid composition of a polypeptide.
Parameters
----------
sequence : str or list
The sequence of a polypeptide or a list with a parsed sequence.
show_unmodified_termini : bool, optional
If :py:const:`True` then the unmodified N- and C-terminus are explicitly
shown in the returned dict. Default value is :py:const:`False`.
term_aa : bool, optional
If :py:const:`True` then the terminal amino acid residues are
artificially modified with `nterm` or `cterm` modification.
Default value is :py:const:`False`.
allow_unknown_modifications : bool, optional
If :py:const:`True` then do not raise an exception when an unknown
modification of a known amino acid residue is found in the sequence.
Default value is :py:const:`False`.
labels : list, optional
A list of allowed labels for amino acids and terminal modifications.
Returns
-------
out : dict
A dictionary of amino acid composition.
Examples
--------
>>> amino_acid_composition('PEPTIDE') == \
{'I': 1, 'P': 2, 'E': 2, 'T': 1, 'D': 1}
True
>>> amino_acid_composition('PEPTDE', term_aa=True) == \
{'ctermE': 1, 'E': 1, 'D': 1, 'P': 1, 'T': 1, 'ntermP': 1}
True
>>> amino_acid_composition('PEPpTIDE', labels=std_labels+['pT']) == \
{'I': 1, 'P': 2, 'E': 2, 'D': 1, 'pT': 1}
True
"""
labels = kwargs.get('labels')
if isinstance(sequence, str):
parsed_sequence = parse(sequence, show_unmodified_termini,
allow_unknown_modifications=allow_unknown_modifications,
labels=labels)
elif isinstance(sequence, list):
if sequence and isinstance(sequence[0], tuple):
parsed_sequence = parse(tostring(sequence, True),
show_unmodified_termini,
allow_unknown_modifications=allow_unknown_modifications,
labels=labels)
else:
parsed_sequence = sequence
else:
raise PyteomicsError('Unsupported type of a sequence.'
'Must be str or list, not %s' % type(sequence))
aa_dict = BasicComposition()
# Process terminal amino acids.
if term_aa:
nterm_aa_position = 1 if is_term_mod(parsed_sequence[0]) else 0
cterm_aa_position = (
len(parsed_sequence) - 2 if is_term_mod(parsed_sequence[-1])
else len(parsed_sequence) - 1)
if len(parsed_sequence) > 1:
aa_dict['cterm' + parsed_sequence.pop(cterm_aa_position)] = 1
aa_dict['nterm' + parsed_sequence.pop(nterm_aa_position)] = 1
# Process core amino acids.
for aa in parsed_sequence:
aa_dict[aa] += 1
return aa_dict
@memoize()
def cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exception=None):
"""Cleaves a polypeptide sequence using a given rule.
Parameters
----------
sequence : str
The sequence of a polypeptide.
.. note::
The sequence is expected to be in one-letter uppercase notation.
Otherwise, some of the cleavage rules in :py:data:`expasy_rules`
will not work as expected.
rule : str or compiled regex
A key present in :py:const:`expasy_rules` or a
`regular expression <https://docs.python.org/library/re.html#regular-expression-syntax>`_
describing the site of cleavage. It is recommended
to design the regex so that it matches only the residue whose C-terminal
bond is to be cleaved. All additional requirements should be specified
using `lookaround assertions
<http://www.regular-expressions.info/lookaround.html>`_.
:py:data:`expasy_rules` contains cleavage rules for popular cleavage agents.
missed_cleavages : int, optional
Maximum number of allowed missed cleavages. Defaults to 0.
min_length : int or None, optional
Minimum peptide length. Defaults to :py:const:`None`.
.. note ::
This checks for string length, which is only correct for one-letter
notation and not for full *modX*. Use :py:func:`length` manually if
you know what you are doing and apply :py:func:`cleave` to *modX*
sequences.
semi : bool, optional
Include products of semi-specific cleavage. Default is :py:const:`False`.
This effectively cuts every peptide at every position and adds results to the output.
exception : str or compiled RE or None, optional
Exceptions to the cleavage rule. If specified, should be a key present in :py:const:`expasy_rules`
or regular expression. Cleavage sites matching `rule` will be checked against `exception` and omitted
if they match.
Returns
-------
out : set
A set of unique (!) peptides.
Examples
--------
>>> cleave('AKAKBK', expasy_rules['trypsin'], 0) == {'AK', 'BK'}
True
>>> cleave('AKAKBK', 'trypsin', 0) == {'AK', 'BK'}
True
>>> cleave('GKGKYKCK', expasy_rules['trypsin'], 2) == \
{'CK', 'GKYK', 'YKCK', 'GKGK', 'GKYKCK', 'GK', 'GKGKYK', 'YK'}
True
"""
return set(_cleave(sequence, rule, missed_cleavages, min_length, semi, exception))
def _cleave(sequence, rule, missed_cleavages=0, min_length=None, semi=False, exception=None):
"""Like :py:func:`cleave`, but the result is a list. Refer to
:py:func:`cleave` for explanation of parameters.
"""
rule = expasy_rules.get(rule, rule)
exception = expasy_rules.get(exception, exception)
peptides = []
ml = missed_cleavages+2
trange = range(ml)
cleavage_sites = deque([0], maxlen=ml)
if min_length is None:
min_length = 1
cl = 1
if exception is not None:
exceptions = {x.end() for x in re.finditer(exception, sequence)}
for i in it.chain([x.end() for x in re.finditer(rule, sequence)],
[None]):
if exception is not None and i in exceptions:
continue
cleavage_sites.append(i)
if cl < ml:
cl += 1
for j in trange[:cl-1]:
seq = sequence[cleavage_sites[j]:cleavage_sites[-1]]
if seq and len(seq) >= min_length:
peptides.append(seq)
if semi:
for k in range(min_length, len(seq)-1):
peptides.append(seq[:k])
for k in range(1, len(seq)-min_length+1):
peptides.append(seq[k:])
return peptides
def num_sites(sequence, rule, **kwargs):
"""Count the number of sites where `sequence` can be cleaved using
the given `rule` (e.g. number of miscleavages for a peptide).
Parameters
----------
sequence : str
The sequence of a polypeptide.
rule : str or compiled regex
A regular expression describing the site of cleavage. It is recommended
to design the regex so that it matches only the residue whose C-terminal
bond is to be cleaved. All additional requirements should be specified
using `lookaround assertions
<http://www.regular-expressions.info/lookaround.html>`_.
labels : list, optional
A list of allowed labels for amino acids and terminal modifications.
exception : str or compiled RE or None, optional
Exceptions to the cleavage rule. If specified, should be a regular expression.
Cleavage sites matching `rule` will be checked against `exception` and omitted
if they match.
Returns
-------
out : int
Number of cleavage sites.
"""
return len(_cleave(sequence, rule, **kwargs)) - 1
expasy_rules = {
'arg-c': r'R',
'asp-n': r'\w(?=D)',
'bnps-skatole' : r'W',
'caspase 1': r'(?<=[FWYL]\w[HAT])D(?=[^PEDQKR])',
'caspase 2': r'(?<=DVA)D(?=[^PEDQKR])',
'caspase 3': r'(?<=DMQ)D(?=[^PEDQKR])',
'caspase 4': r'(?<=LEV)D(?=[^PEDQKR])',
'caspase 5': r'(?<=[LW]EH)D',
'caspase 6': r'(?<=VE[HI])D(?=[^PEDQKR])',
'caspase 7': r'(?<=DEV)D(?=[^PEDQKR])',
'caspase 8': r'(?<=[IL]ET)D(?=[^PEDQKR])',
'caspase 9': r'(?<=LEH)D',
'caspase 10': r'(?<=IEA)D',
'chymotrypsin high specificity' : r'([FY](?=[^P]))|(W(?=[^MP]))',
'chymotrypsin low specificity':
r'([FLY](?=[^P]))|(W(?=[^MP]))|(M(?=[^PY]))|(H(?=[^DMPW]))',
'clostripain': r'R',
'cnbr': r'M',
'enterokinase': r'(?<=[DE]{3})K',
'factor xa': r'(?<=[AFGILTVM][DE]G)R',
'formic acid': r'D',
'glutamyl endopeptidase': r'E',
'granzyme b': r'(?<=IEP)D',
'hydroxylamine': r'N(?=G)',
'iodosobenzoic acid': r'W',
'lysc': r'K',
'ntcb': r'\w(?=C)',
'pepsin ph1.3': r'((?<=[^HKR][^P])[^R](?=[FL][^P]))|'
r'((?<=[^HKR][^P])[FL](?=\w[^P]))',
'pepsin ph2.0': r'((?<=[^HKR][^P])[^R](?=[FLWY][^P]))|'
r'((?<=[^HKR][^P])[FLWY](?=\w[^P]))',
'proline endopeptidase': r'(?<=[HKR])P(?=[^P])',
'proteinase k': r'[AEFILTVWY]',
'staphylococcal peptidase i': r'(?<=[^E])E',
'thermolysin': r'[^DE](?=[AFILMV])',
'thrombin': r'((?<=G)R(?=G))|'
r'((?<=[AFGILTVM][AFGILTVWA]P)R(?=[^DE][^DE]))',
'trypsin': r'([KR](?=[^P]))|((?<=W)K(?=P))|((?<=M)R(?=P))',
'trypsin_exception': r'((?<=[CD])K(?=D))|((?<=C)K(?=[HY]))|((?<=C)R(?=K))|((?<=R)R(?=[HR]))',
}
"""
This dict contains regular expressions for cleavage rules of the most
popular proteolytic enzymes. The rules were taken from the
`PeptideCutter tool
<http://ca.expasy.org/tools/peptidecutter/peptidecutter_enzymes.html>`_
at Expasy.
.. note::
'trypsin_exception' can be used as `exception` argument when calling
:py:func:`cleave` with 'trypsin' `rule`::
>>> parser.cleave('PEPTIDKDE', parser.expasy_rules['trypsin'])
{'DE', 'PEPTIDK'}
>>> parser.cleave('PEPTIDKDE', parser.expasy_rules['trypsin'], \
exception=parser.expasy_rules['trypsin_exception'])
{'PEPTIDKDE'}
"""
def isoforms(sequence, **kwargs):
"""
Apply variable and fixed modifications to the polypeptide and yield
the unique modified sequences.
Parameters
----------
sequence : str
Peptide sequence to modify.
variable_mods : dict, optional
A dict of variable modifications in the following format:
:py:const:`{'label1': ['X', 'Y', ...], 'label2': ['X', 'A', 'B', ...]}`
Keys in the dict are modification labels (terminal modifications allowed).
Values are iterables of residue labels (one letter each) or
:py:const:`True`. If a value for a modification is :py:const:`True`,
it is applicable to any residue (useful for terminal modifications).
You can use values such as 'ntermX' or 'ctermY' to specify that a
mdofication only occurs when the residue is in the terminal position.
This is *not needed* for terminal modifications.
.. note:: Several variable modifications can occur on amino acids of the
same type, but in the output each amino acid residue will be
modified at most once (apart from terminal modifications).
fixed_mods : dict, optional
A dict of fixed modifications in the same format.
**Note**: if a residue is affected by a fixed modification, no variable
modifications will be applied to it (apart from terminal modifications).
labels : list, optional
A list of amino acid labels containing all the labels present in
`sequence`. Modified entries will be added automatically.
Defaults to :py:data:`std_labels`.
Not required since version 2.5.
max_mods : int or None, optional
Number of modifications that can occur simultaneously on a peptide,
excluding fixed modifications. If :py:const:`None` or if ``max_mods``
is greater than the number of modification sites, all possible
isoforms are generated. Default is :py:const:`None`.
override : bool, optional
Defines how to handle the residues that are modified in the input.
:py:const:`False` means that they will be preserved (default).
:py:const:`True` means they will be treated as unmodified.
show_unmodified_termini : bool, optional
If :py:const:`True` then the unmodified N- and C-termini are explicitly
shown in the returned sequences. Default value is :py:const:`False`.
format : str, optional
If :py:const:`'str'` (default), an iterator over sequences is returned.
If :py:const:`'split'`, the iterator will yield results in the same
format as :py:func:`parse` with the 'split' option, with unmodified
terminal groups shown.
Returns
-------
out : iterator over strings or lists
All possible unique polypeptide sequences resulting from
the specified modifications are yielded obe by one.
"""
def main(group): # index of the residue (capital letter) in `group`
if group[-1][0] == '-':
i = -2
else:
i = -1
return len(group) + i, group[i]
def apply_mod(label, mod):
# `label` is assumed to be a tuple (see split option of parse)
# unmodified termini are assumed shown
# if the modification is not applicable, `label` is returned
group = list(label)
m = main(group)[0]
if m == 0 and not is_term_mod(mod):
group.insert(0, mod)
elif mod[0] == '-' and (group[-1] == std_cterm or (
group[-1][0] == '-' and override)):
group[-1] = mod
elif mod[-1] == '-' and (group[0] == std_nterm or (
group[0][-1] == '-' and override)):
group[0] = mod
elif not is_term_mod(mod):
if m and not group[m-1][-1] == '-':
if override:
group[m-1] = mod
else:
group.insert(m, mod)
return tuple(group)
variable_mods = kwargs.get('variable_mods', {})
fixed_mods = kwargs.get('fixed_mods', {})
parse_kw = {}
if 'labels' in kwargs:
parse_kw['labels'] = list(kwargs['labels']) + list(fixed_mods)
parsed = parse(sequence, True, True, **parse_kw)
override = kwargs.get('override', False)
show_unmodified_termini = kwargs.get('show_unmodified_termini', False)
max_mods = kwargs.get('max_mods')
format_ = kwargs.get('format', 'str')
# Apply fixed modifications
for cmod in fixed_mods:
for i, group in enumerate(parsed):
if fixed_mods[cmod] == True or main(group)[1] in fixed_mods[cmod]:
parsed[i] = apply_mod(group, cmod)
# Create a list of possible states for each group
# Start with N-terminal mods and regular mods on the N-terminal residue
second = set(apply_mod(parsed[0], m) for m, r in variable_mods.items()
if (r == True or
main(parsed[0])[1] in r or
'nterm' + main(parsed[0])[1] in r or
(len(parsed) == 1 and 'cterm' + main(parsed[0])[1] in r))
and not is_term_mod(m)
).union([parsed[0]])
first = it.chain((apply_mod(group, mod) for group in second
for mod, res in variable_mods.items()
if (mod.endswith('-') or (mod.startswith('-') and len(parsed) == 1))
and (res == True or main(group)[1] in res)), second)
states = [[parsed[0]] + list(set(first).difference({parsed[0]}))]
# Continue with regular mods
states.extend([group] + list(set(apply_mod(group, mod)
for mod in variable_mods if (
variable_mods[mod] == True or
group[-1] in variable_mods[mod]) and not is_term_mod(mod)
).difference({group}))
for group in parsed[1:-1])
# Finally add C-terminal mods and regular mods on the C-terminal residue
if len(parsed) > 1:
second = set(apply_mod(parsed[-1], m) for m, r in variable_mods.items()
if (r == True or
main(parsed[-1])[1] in r or
'cterm' + main(parsed[-1])[1] in r)
and not is_term_mod(m)
).union((parsed[-1],))
first = it.chain((apply_mod(group, mod) for group in second
for mod, res in variable_mods.items()
if mod.startswith('-') and (
res == True or main(group)[1] in res)), second)
states.append([parsed[-1]] + list(set(first).difference({parsed[-1]})))
sites = [s for s in enumerate(states) if len(s[1]) > 1]
if max_mods is None or max_mods > len(sites):
possible_states = it.product(*states)
else:
def state_lists():
for m in range(max_mods+1):
for comb in it.combinations(sites, m):
skel = [[s[0]] for s in states]
for i, e in comb:
skel[i] = e[1:]
yield skel
possible_states = it.chain.from_iterable(
it.product(*skel) for skel in state_lists())
if format_ == 'split':
def strip_std_terms():
for ps in possible_states:
ps = list(ps)
if not show_unmodified_termini:
if ps[0][0] == std_nterm:
ps[0] = ps[0][1:]
if ps[-1][-1] == std_cterm:
ps[-1] = ps[-1][:-1]
yield ps
return strip_std_terms()
elif format_ == 'str':
return (tostring(form, show_unmodified_termini)
for form in possible_states)
else:
raise PyteomicsError('Unsupported value of "format": {}'.format(format_))
def coverage(protein, peptides):
"""Calculate how much of `protein` is covered by `peptides`.
Peptides can overlap. If a peptide is found multiple times in `protein`,
it contributes more to the overall coverage.
Requires :py:mod:`numpy`.
.. note::
Modifications and terminal groups are discarded.
Parameters
----------
protein : str
A protein sequence.
peptides : iterable
An iterable of peptide sequences.
Returns
-------
out : float
The sequence coverage, between 0 and 1.
Examples
--------
>>> coverage('PEPTIDES'*100, ['PEP', 'EPT'])
0.5
"""
import numpy as np
protein = re.sub(r'[^A-Z]', '', protein)
mask = np.zeros(len(protein), dtype=np.int8)
for peptide in peptides:
indices = [m.start() for m in re.finditer(
'(?={})'.format(re.sub(r'[^A-Z]', '', peptide)), protein)]
for i in indices:
mask[i:i+len(peptide)] = 1
return mask.sum(dtype=float) / mask.size
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: pyteomics/pyteomics/pepxml.py
```python
from lxml import etree
from . import xml, auxiliary as aux, _schema_defaults
class PepXML(xml.MultiProcessingXML, xml.IndexSavingXML):
"""Parser class for pepXML files."""
file_format = 'pepXML'
_root_element = 'msms_pipeline_analysis'
_default_schema = _schema_defaults._pepxml_schema_defaults
_default_version = '1.15'
_default_iter_tag = 'spectrum_query'
_indexed_tags = {'spectrum_query'}
_indexed_tag_keys = {'spectrum_query': 'spectrum'}
_default_id_attr = 'spectrum'
_structures_to_flatten = {'search_score_summary', 'modification_info'}
# attributes which contain unconverted values
_convert_items = {'float': {'calc_neutral_pep_mass', 'massdiff',
'probability', 'variable', 'static'},
'int': {'start_scan', 'end_scan', 'index', 'num_matched_peptides'},
'bool': {'is_rejected'},
'floatarray': {'all_ntt_prob'}}.items()
def _get_info_smart(self, element, **kwargs):
"""Extract the info in a smart way depending on the element type"""
try:
name = kwargs.pop('ename')
except KeyError:
name = xml._local_name(element)
rec = kwargs.pop('recursive', None)
if name == self._root_element:
info = self._get_info(element, ename=name,
recursive=(rec if rec is not None else False),
**kwargs)
else:
info = self._get_info(element, ename=name,
recursive=(rec if rec is not None else True),
**kwargs)
def safe_float(s):
try:
return float(s)
except ValueError:
if s.startswith('+-0'):
return 0
return None
converters = {'float': safe_float, 'int': int,
'bool': lambda x: x.lower() in {'1', 'true'},
'floatarray': lambda x: list(map(float, x[1:-1].split(',')))}
for k, v in dict(info).items():
for t, s in self._convert_items:
if k in s:
del info[k]
info[k] = converters[t](v)
for k in {'search_score', 'parameter'}:
if k in info and isinstance(info[k], list) and all(
isinstance(x, dict) and len(x) == 1 for x in info[k]):
scores = {}
for score in info[k]:
name, value = score.popitem()
try:
scores[name] = float(value)
except ValueError:
scores[name] = value
info[k] = scores
if 'search_result' in info and len(info['search_result']) == 1:
info.update(info['search_result'][0])
del info['search_result']
if 'protein' in info and 'peptide' in info:
info['proteins'] = [{'protein': info.pop('protein'),
'protein_descr': info.pop('protein_descr', None)}]
for add_key in {'peptide_prev_aa', 'peptide_next_aa', 'protein_mw'}:
if add_key in info:
info['proteins'][0][add_key] = info.pop(add_key)
info['proteins'][0]['num_tol_term'] = info.pop('num_tol_term', 0)
if 'alternative_protein' in info:
info['proteins'].extend(info['alternative_protein'])
del info['alternative_protein']
if 'peptide' in info and not 'modified_peptide' in info:
info['modified_peptide'] = info['peptide']
if 'peptide' in info:
info['modifications'] = info.pop('mod_aminoacid_mass', [])
if 'mod_nterm_mass' in info:
info['modifications'].insert(0, {'position': 0,
'mass': float(info.pop('mod_nterm_mass'))})
if 'mod_cterm_mass' in info:
info['modifications'].append({'position': 1 + len(info['peptide']),
'mass': float(info.pop('mod_cterm_mass'))})
if 'modified_peptide' in info and info['modified_peptide'] == info.get(
'peptide'):
if not info.get('modifications'):
info['modifications'] = []
else:
mp = info['modified_peptide']
for mod in sorted(info['modifications'],
key=lambda m: m['position'],
reverse=True):
if mod['position'] not in {0, 1+len(info['peptide'])}:
p = mod['position']
mp = mp[:p] + '[{}]'.format(int(mod['mass'])) + mp[p:]
info['modified_peptide'] = mp
if 'search_hit' in info:
info['search_hit'].sort(key=lambda x: x['hit_rank'])
return info
def read(source, read_schema=False, iterative=True, **kwargs):
"""Parse `source` and iterate through peptide-spectrum matches.
Parameters
----------
source : str or file
A path to a target pepXML file or the file object itself.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the pepXML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
Returns
-------
out : PepXML
An iterator over dicts with PSM properties.
"""
return PepXML(source, read_schema=read_schema, iterative=iterative)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`PepXML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
Returns
-------
out : iterator
"""
return PepXML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(PepXML)
def roc_curve(source):
"""Parse source and return a ROC curve for peptideprophet analysis.
Parameters
----------
source : str or file
A path to a target pepXML file or the file object itself.
Returns
-------
out : list
A list of ROC points.
"""
parser = etree.XMLParser(remove_comments=True, ns_clean=True)
tree = etree.parse(source, parser=parser)
roc_curve = []
for roc_error_data in tree.xpath(
"/*[local-name()='msms_pipeline_analysis'] \
//*[local-name()='analysis_summary' and @analysis='peptideprophet'] \
//*[local-name()='peptideprophet_summary'] \
//*[local-name()='roc_error_data']"):
for element in roc_error_data.xpath("*[local-name()='roc_data_point' or local-name()='error_point']"):
data_point = dict(element.attrib)
for key in data_point:
data_point[key] = float(data_point[key])
data_point["charge"] = roc_error_data.attrib["charge"]
data_point["tag"] = etree.QName(element).localname
roc_curve.append(data_point)
return roc_curve
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(read)
def _is_decoy_prefix(psm, prefix='DECOY_'):
"""Given a PSM dict, return :py:const:`True` if all protein names for
the PSM start with ``prefix``, and :py:const:`False` otherwise. This
function might not work for some pepXML flavours. Use the source to get the
idea and suit it to your needs.
Parameters
----------
psm : dict
A dict, as yielded by :py:func:`read`.
prefix : str, optional
A prefix used to mark decoy proteins. Default is `'DECOY_'`.
Returns
-------
out : bool
"""
return all(protein['protein'].startswith(prefix)
for protein in psm['search_hit'][0]['proteins'])
def _is_decoy_suffix(psm, suffix='_DECOY'):
return all(protein['protein'].endswith(suffix)
for protein in psm['search_hit'][0]['proteins'])
is_decoy = _is_decoy_prefix
fdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)
_key = lambda x: min(
sh['search_score']['expect'] for sh in x['search_hit'])
qvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, _key)
filter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, _key, qvalues)
filter.chain = aux._make_chain(filter, 'filter', True)
def DataFrame(*args, **kwargs):
"""Read pepXML output files into a :py:class:`pandas.DataFrame`.
Requires :py:mod:`pandas`.
Parameters
----------
*args, **kwargs : passed to :py:func:`chain`
sep : str or None, optional
Some values related to PSMs (such as protein information) are variable-length
lists. If `sep` is a :py:class:`str`, they will be packed into single string using
this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is
:py:const:`None`.
pd_kwargs : dict, optional
Keyword arguments passed to the :py:class:`pandas.DataFrame` constructor.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
kwargs = kwargs.copy()
sep = kwargs.pop('sep', None)
pd_kwargs = kwargs.pop('pd_kwargs', {})
def gen_items():
with chain(*args, **kwargs) as f:
for item in f:
info = {}
for k, v in item.items():
if isinstance(v, (str, int, float)):
info[k] = v
if 'search_hit' in item:
sh = item['search_hit'][0]
proteins = sh.pop('proteins')
prot_dict = {}
for p in proteins:
for k in p:
prot_dict[k] = []
for p in proteins:
for k, v in prot_dict.items():
v.append(p.get(k))
if sep is None:
info.update(prot_dict)
else:
for k, v in prot_dict.items():
info[k] = sep.join(str(val) if val is not None else '' for val in v)
info.update(sh.pop('search_score'))
mods = sh.pop('modifications', [])
formatted_mods = ['{0[mass]:.3f}@{0[position]}'.format(x) for x in mods]
if sep is not None:
info['modifications'] = sep.join(formatted_mods)
else:
info['modifications'] = formatted_mods
for k, v in sh.items():
if isinstance(v, (str, int, float)):
info[k] = v
if 'analysis_result' in sh:
for ar in sh['analysis_result']:
if ar['analysis'] == 'peptideprophet':
try:
info.update(ar['peptideprophet_result']['parameter'])
except KeyError:
pass
info['peptideprophet_probability'] = ar['peptideprophet_result']['probability']
info['peptideprophet_ntt_prob'] = ar['peptideprophet_result']['all_ntt_prob']
elif ar['analysis'] == 'interprophet':
info.update(ar['interprophet_result']['parameter'])
info['interprophet_probability'] = ar['interprophet_result']['probability']
info['interprophet_ntt_prob'] = ar['interprophet_result']['all_ntt_prob']
yield info
return pd.DataFrame(gen_items(), **pd_kwargs)
def filter_df(*args, **kwargs):
"""Read pepXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.
Positional arguments can be pepXML files or DataFrames.
Requires :py:mod:`pandas`.
Parameters
----------
key : str / iterable / callable, optional
Default is 'expect'.
is_decoy : str / iterable / callable, optional
Default is to check if all strings in the "protein" column start with `'DECOY_'`
*args, **kwargs : passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
sep = kwargs.get('sep')
kwargs.setdefault('key', 'expect')
if all(isinstance(arg, pd.DataFrame) for arg in args):
if len(args) > 1:
df = pd.concat(args)
else:
df = args[0]
else:
read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep', 'pd_kwargs'] if k in kwargs}
df = DataFrame(*args, **read_kw)
if 'is_decoy' not in kwargs:
if sep is not None:
if 'decoy_suffix' in kwargs:
kwargs['is_decoy'] = df['protein'].str.split(';').apply(
lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))
else:
kwargs['is_decoy'] = df['protein'].str.split(';').apply(
lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))
else:
if 'decoy_suffix' in kwargs:
kwargs['is_decoy'] = df['protein'].apply(
lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))
else:
kwargs['is_decoy'] = df['protein'].apply(
lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))
return aux.filter(df, **kwargs)
``` |
{
"source": "jmuelmen/scmlib",
"score": 2
} |
#### File: scmlib/SCM_diagnostics/SCM_diagnostics_functions.py
```python
from netCDF4 import Dataset
import tarfile
import numpy as np
import os
def makevarlist(datadir,filelist,dimtest,varstoplot):
numfiles=len(filelist)
for f in range(0,numfiles):
filename=filelist[f]
file=datadir+filename
print(file)
fh=Dataset(file,mode='r')
varsinfile=fh.variables.keys()
numvars=len(varsinfile)
dimtest_rev=dimtest
dimsinfile=fh.dimensions.keys()
if (dimsinfile[0] == "ncol"):
dimtest_rev=dimtest-1
for v in range(0,numvars):
varname=varsinfile[v]
if (varname not in varstoplot):
vartotest=fh.variables[varname][:]
if (vartotest.ndim == dimtest_rev):
theshape=np.shape(vartotest)
if (theshape[vartotest.ndim - 1] == 1):
varstoplot.append(varname)
return varstoplot
def make_tarfile(output_filename,source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
return make_tarfile
def replace_string(infile, outfile, replacements):
with open(infile) as fin:
with open(outfile, 'w') as fout:
for line in fin:
for src,target in replacements.iteritems():
line = line.replace(src,target)
fout.write(line)
return outfile
``` |
{
"source": "jmuema/instaclone254",
"score": 2
} |
#### File: instaclone254/gram254/email.py
```python
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from .token import account_activation_token
def activation_email(user, current_site, receiver):
subject = 'Account Activation'
message = render_to_string('registration/confirm.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user)
})
email = EmailMessage(subject, message, to=[receiver])
email.send()
``` |
{
"source": "jmuema/password-locker",
"score": 4
} |
#### File: jmuema/password-locker/credential_test.py
```python
import unittest # Importing the unittest module
from credential import Credential # Importing the credential class
class TestCredential(unittest.TestCase):
'''
Test class that defines test cases for the credential class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_credential = Credential("instagram", "ironman", "ironman20") # create credential object
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_credential.myaccountname,"instagram")
self.assertEqual(self.new_credential.myusername,"ironman")
self.assertEqual(self.new_credential.mypassword,"<PASSWORD>")
def test_save_credential(self):
'''
test_save_credential test case to test if the credential object is saved into
the credential list
'''
self.new_credential.save_credential() # saving the new credential
self.assertEqual(len(Credential.credential_list),1)
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
Credential.credential_list = []
def test_save_multiple_credential(self):
'''
test to check if we can check multiple objects within credenials list
'''
self.new_credential.save_credential()
test_credential = Credential("instagram","ironman","ironman20") # new credential
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list),2)
def test_delete_credential(self):
'''
test to remove a credential from our credential list
'''
self.new_credential.save_credential()
test_credential = Credential("instagram","ironman","ironman20") # new credential
test_credential.save_credential()
self.new_credential.delete_credential()# Deleting a credential object
self.assertEqual(len(Credential.credential_list),1)
def test_find_credential_by_username(self):
'''
test to check if we can find a credential by username and display information
'''
self.new_credential.save_credential()
test_credential = Credential("instagram","ironman","ironman20") # new credential
test_credential.save_credential()
found_credential = Credential.find_by_username("ironman")
self.assertEqual(found_credential.mypassword,test_credential.mypassword)
def test_credential_exists(self):
'''
test to check if we can return a Boolean if we cannot find the credential.
'''
self.new_credential.save_credential()
test_credential = Credential("instagram","ironman","ironman20") # new credential
test_credential.save_credential()
credential_exists = Credential.credential_exist("ironman")
self.assertTrue(credential_exists)
def test_display_all_credentials(self):
'''
method that returns a list of all credentials saved
'''
self.assertEqual(Credential.display_credentials(),Credential.credential_list)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JMuff22/Scripts",
"score": 3
} |
#### File: casino/positron_utils/compute_e_twist.py
```python
import numpy as np
from scipy.optimize import curve_fit
import argparse
import sys
def get_args():
"""Define the task arguments with the default values.
Returns:
experiment parameters
"""
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
'--qmc_ks',
help='Filename of the QMC twisted energies in HA.',
type=str,
default='E_qmc_ks.txt'
)
args_parser.add_argument(
'--dft_ks',
help='Filename of the DFT twisted energies in Ry.',
type=str,
default='E_dft_ks.txt'
)
args_parser.add_argument(
'--dft_dense',
help='Value of the dft energy with dense k-grid in Ry',
type=float,
default=0.0
)
args_parser.add_argument(
'--atsup_dense',
help='Difference of the atsup energies epsilon_ks - epsilon_dense in Ha.',
type=float,
default=0.0
)
return args_parser.parse_args()
def Eks(Edft, E_ta, b):
return E_ta+b*Edft
def main():
args=get_args()
# Inputs: Energies of QMC, DFT, and DFT dense. In addition, positron energy
# is given as the differense E_atsup_loose-E_atsup_dense.
Eqmc=np.loadtxt(args.qmc_ks)
Edft=0.5*np.loadtxt(args.dft_ks)
if(Eqmc.shape[0]!=Edft.shape[0]):
sys.exit("QMC and DFT have different number of twists.")
Edft_dense=0.5*args.dft_dense
Epositron=args.atsup_dense
ydata=Eqmc[:,0]
xdata=Edft-Edft_dense+Epositron
popt,pcov=curve_fit(Eks,xdata,ydata)
print(popt)
print("-")
print(pcov)
if __name__=='__main__':
main()
```
#### File: casino/positron_utils/connect.py
```python
import numpy as np
import matplotlib.pyplot as plt
import argparse
import sys
def get_args():
"""Define the task arguments with the default values.
Returns:
experiment parameters
"""
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
'--files',
help='lineplot.dat-files.',
type=str,
nargs='+',
default=['lineplot.dat']
)
# Data files arguments
args_parser.add_argument(
'--dft',
help='Filename of the DFT result.',
type=str,
default='acar1d_100_ave'
)
args_parser.add_argument(
'--weight',
help=
'''
value by which the APMD coefficients are to be weighted.
Hint: in si-units, S=1.9014321124319818e-21
''',
type=float,
default=1
)
args_parser.add_argument(
'--ylog',
help='log scale (1) or not (0).',
type=int,
default=0
)
args_parser.add_argument(
'--ylim',
help='ylim1,ylim2',
type=str,
default="not"
)
args_parser.add_argument(
'--xlim',
help='xlim1,xlim2',
type=str,
default="not"
)
return args_parser.parse_args()
def main():
args=get_args()
if(args.ylim!="not"):
temp=args.ylim.split(',')
if(len(temp)!=2):
sys.exit("Wrong ylim input")
ylim1=float(temp[0])
ylim2=float(temp[1])
if(args.xlim!="not"):
temp=args.xlim.split(',')
if(len(temp)!=2):
sys.exit("Wrong ylim input")
xlim1=float(temp[0])
xlim2=float(temp[1])
data=[]
for fname in args.files:
a=np.loadtxt(fname)
print(a.shape)
data.append(a[:,1])
x=a[:,0]
y=np.array(data)*args.weight
print(y.shape)
ymean=np.mean(y,axis=0)
ystd=np.std(y,axis=0)
plt.plot(x,ymean,'b-')
plt.plot(x,ymean+ystd,'r--')
plt.plot(x,ymean-ystd,'r--')
plt.grid(True)
plt.legend(["Mean data","60% confidence interval"])
plt.xlabel("G-vector length (a.u.)")
plt.ylabel("APMD value")
if(args.ylim!="not"):
plt.ylim([ylim1,ylim2])
if(args.xlim!="not"):
plt.xlim([xlim1,xlim2])
if(not(args.ylog==0)):
plt.yscale('log')
plt.show()
if __name__ == '__main__':
main()
```
#### File: casino/positron_utils/errorbar_on_expval.py
```python
import numpy as np
import argparse
import matplotlib.pyplot as plt
import sys
'''
This script searches for g-vectors along a given line, takes APMD values
corresponding to the vectors from one or multiple expval.data-files, and
plots the values as a function of the g-vector lentghts. If multiple
expval.data-files are fed in to the script, the plto will also contain
errorbars of the APMD values.
'''
def get_args():
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
'--files',
help='Filenames.',
nargs='+',
type=str,
default=['expval.data']
)
args_parser.add_argument(
'--angle',
help='3 integers: i,j,k, so that we look points in the direction i*b1+j*b2+k*b3.',
type=str,
default='1,0,0'
)
args_parser.add_argument(
'--ntwist',
help='Number of twists at each lattice vector direction, given as i,j,k.',
type=str,
default='1,1,1'
)
args_parser.add_argument(
'--weight',
help='''
value by which the APMD coefficients are to be weighted.
Hint: in si-units, S=1.9014321124319818e-21
''',
type=float,
default=1
)
return args_parser.parse_args()
def parse(fn,nt):
'''
Parses expval.data-file and returns the reciprocal lattice vectors,
Number of g-vectors, list of g-vectors and the APMD values. Because
we also want to parse merged expval.data-files, where multiple twisted
g-vector-grids are merged, we give as an argument to this function the
array nt, that describes the number of twists at each lattice vector
direction. Then the i:th reciprocal lattice vector is divided by nt[i],
so that by integer linear combinations of the newly generated reciprocal
lattice vectors we can found ALL of the g-vectors, including the twisted
ones.
'''
with open(fn) as f:
lines=f.readlines()
for i in range(len(lines)):
if(lines[i]=="Supercell reciprocal lattice vectors (au)\n"):
pbmat=np.zeros((3,3))
for j in range(3):
words=lines[i+j+1].split()
for k in range(3):
pbmat[j,k]=float(words[k])/nt[j]
elif(lines[i]=="Number of G-vectors in set\n"):
Ng=int(lines[i+1])
elif(lines[i]=="G-vector components Gx, Gy, Gz (au)\n"):
gvec=np.zeros((Ng,3))
for j in range(Ng):
words=lines[i+j+1].split()
for k in range(3):
gvec[j,k]=float(words[k])
elif(lines[i]=="Complex pair-density coefficients (real part, imaginary part)\n"):
pmd=np.zeros((Ng,2))
for j in range(Ng):
words=lines[i+j+1].split()
for k in range(2):
pmd[j,k]=float(words[k])
return pbmat,Ng,gvec,pmd
def testint(r):
tol=0.0000001
if(abs(round(r[0])-r[0])>tol):
return -1
elif(abs(round(r[1])-r[1])>tol):
return -1
elif(abs(round(r[2])-r[2])>tol):
return -1
else:
return 1
def linepoints(gvec,pmd,A,ia,ib,ic):
g=[]; p=[]
g.append(np.linalg.norm(gvec[0,:]))
p.append(pmd[0,0])
for i in range(1,gvec.shape[0]):
gv=gvec[i,:]
intvec=np.matmul(A,gv)
if(testint(intvec)<0):
sys.exit("Non-integer linear combination of reciprocal lattice vectors:[{} {} {}]".format(intvec[0],intvec[1],intvec[2]))
ivec=np.array((round(intvec[0]),round(intvec[1]),round(intvec[2])))
absivec=np.absolute(ivec)
if(np.nonzero(absivec)[0].shape[0]>1):
ii=np.min(absivec[np.nonzero(absivec)])
else:
ii=absivec[np.nonzero(absivec)]
intvec=intvec/ii
if(testint(intvec)<0):
continue
if((round(intvec[0])==ia and round(intvec[1])==ib and round(intvec[2])==ic)):
g.append(np.sqrt(gv[0]**2+gv[1]**2+gv[2]**2))
p.append(pmd[i,0])
g=np.array(g)
p=np.array(p)
isort=np.argsort(g)
return g[isort],p[isort]
def main():
args=get_args()
print(" ")
print("====== Plot APMD values along a given line =====")
print(" ")
print("CASINO expval files to be parsed:")
for file in args.files:
print(file)
print(" ")
temp=args.angle.split(',')
ia=int(temp[0]); ib=int(temp[1]); ic=int(temp[2])
temp=args.ntwist.split(',')
nt1=int(temp[0]); nt2=int(temp[1]); nt3=int(temp[2])
nt=np.array([nt1,nt2,nt3])
dict={}
ind=0
plt.figure(1)
pointlists=[]
for fname in args.files:
print("Parsing {}...".format(fname))
pbmat,Ng,gvec,pmd=parse(fname,nt)
A=np.transpose(pbmat)
AtA=np.matmul(np.transpose(A),A)
mpinv=np.matmul(np.linalg.inv(AtA),np.transpose(A))
g,p=linepoints(gvec,pmd,mpinv,ia,ib,ic)
p=p*args.weight
pointlists.append(p)
ind+=1
for p in pointlists[1:]:
if p.shape[0]!=pointlists[0].shape[0]:
sys.exit("Error: Different number of points along a line found from expval.data-files. ")
YX=np.array(pointlists)
yval=np.mean(YX,axis=0)
yvar=np.std(YX,axis=0)
print(" ")
print("Obtained statistics: ")
print("g-vec length, mean value, error")
for i in range(yval.shape[0]):
print("{}, {}, {}".format(g[i],yval[i],yvar[i]))
plt.errorbar(g,yval,yvar)
plt.title("Points found in direction "+args.angle)
plt.grid()
plt.legend(args.files)
plt.show()
if __name__=='__main__':
main()
```
#### File: casino/positron_utils/merge_expval.py
```python
import argparse
import sys
import numpy as np
def get_args():
"""Define the task arguments with the default values.
Returns:
experiment parameters
"""
args_parser = argparse.ArgumentParser()
# Data files arguments
args_parser.add_argument(
'--files',
help='Filenames.',
nargs='+',
type=str,
default=['expval.data1','expval.data2']
)
return args_parser.parse_args()
def sort_expval_files(l):
d=[]
for f in l:
ind=f.find('expval.data')
if(ind==-1):
sys.exit("Filenames are not including the string 'expval.data'.")
d.append(int(f[ind+11:]))
d=np.array(d)
perm=np.argsort(d)
newl=[]
for i in range(d.shape[0]):
newl.append(l[perm[i]])
return newl
def main():
def _read_expval(filename):
gvec=[]
pmd=[]
pmd_sqr=[]
with open(filename) as f:
read_pmd2=False
lines=f.readlines()
for i in range(len(lines)):
if lines[i][:39]=='START POSITRON MOMENTUM DENSITY SQUARED':
read_pmd2=True
if lines[i][:19]=='Number of G-vectors':
Ng=int(lines[i+1])
if lines[i][:19] == 'G-vector components':
for j in range(1,Ng+1):
components=lines[i+j].split()
vec=[]
vec.append(float(components[0]))
vec.append(float(components[1]))
vec.append(float(components[2]))
gvec.append(np.array(vec))
if ((lines[i][:17] == 'APMD coefficients')):
if(read_pmd2): # Read the squared component array
for j in range(1,Ng+1):
components=lines[i+j].split()
vec=[]
vec.append(float(components[0]))
vec.append(float(components[1]))
pmd_sqr.append(np.array(vec))
else: # Read the non-squared mom den
for j in range(1,Ng+1):
components=lines[i+j].split()
vec=[]
vec.append(float(components[0]))
vec.append(float(components[1]))
pmd.append(np.array(vec))
return Ng,np.array(gvec),np.array(pmd),np.array(pmd_sqr)
def _read_twists(filename='k_offsets'):
twists=[]
with open(filename) as f:
lines=f.readlines()
Nconf=int(lines[0])
for line in lines[1:]:
words=line.split()
vec=np.array([float(words[0]),float(words[1]),float(words[2])])
twists.append(vec)
return Nconf,twists
def _merge_expvals(files,file_to):
Ng,gvec,pmd,pmd_sqr=_read_expval(files[0])
Ng0=Ng
Nconf,twists=_read_twists()
i=1
for file in files[1:]:
Ng2,gvec2,pmd2,pmd_sqr_2=_read_expval(file)
gvec2=gvec2+twists[i]; i+=1
Ng=Ng+Ng2
gvec=np.concatenate((gvec,gvec2))
pmd=np.concatenate((pmd,pmd2))
pmd_sqr=np.concatenate((pmd_sqr,pmd_sqr_2))
with open(files[0]) as f:
lines=f.readlines()
with open(file_to,'w') as f:
write_line=True
i=0
write_pmd_sqr=False
while(write_line):
if lines[i][:29] == 'END POSITRON MOMENTUM DENSITY':
write_pmd_sqr=True
if lines[i][:19]=='G-vector components':
f.write(lines[i]); i+=1
for j in range(Ng):
if j<Ng0:
i+=1
f.write(str(gvec[j,0])+" "+str(gvec[j,1])+" "+str(gvec[j,2])+'\n')
elif((lines[i][:17] == 'APMD coefficients')):
if(write_pmd_sqr): # Write squared momentum density
f.write(lines[i]); i+=1
for j in range(Ng):
if j<Ng0:
i+=1
f.write(str(pmd_sqr[j,0])+" "+str(pmd_sqr[j,1])+'\n')
else: # Non-squared
f.write(lines[i]); i+=1
for j in range(Ng):
if j<Ng0:
i+=1
f.write(str(pmd[j,0])+" "+str(pmd[j,1])+'\n')
elif(lines[i][:19]=='Number of G-vectors'):
f.write(lines[i]); i+=1
f.write(str(Ng)+'\n'); i+=1
elif(lines[i][:10]=='END EXPVAL'): # Terminate
f.write(lines[i])
write_line=False
else:
f.write(lines[i]); i+=1
args=get_args()
if len(args.files)<2:
sys.exit("Error: Less than 2 files to merge found.")
for file in args.files:
if file[:11] != "expval.data":
sys.exit("Wrong filenames provided: "+file)
files=sort_expval_files(args.files)
_merge_expvals(files,'expval.data')
if __name__ == '__main__':
main()
```
#### File: JMuff22/Scripts/twitter-delete-script.py
```python
import json
import tweepy
import threading
import time
# Extract your archive and name the folder something you can remember
# Paste your folder's absolute path below
# For example it can be–
# ARCHIVE_PATH = "/Users/mayur57/Desktop/archive"
ARCHIVE_PATH = ""
JS_FILE = ARCHIVE_PATH + "/data/tweet.js"
JSON_FILE = ARCHIVE_PATH + "/data/deleter.json"
DELETED_TWEETS = ARCHIVE_PATH + "/data/deleted_tweets.txt"
SKIPPED_TWEETS = ARCHIVE_PATH + "/data/skipped_tweets.txt"
# These are optional filters for the tweets you want to delete.
# DO NOT edit these if you want to delete all of your tweets without
# any filters. Otherwise, for example, if you want to skip tweets
# having likes more 30 and retweets more than 5, you change the below
# values as:
# -> LIKES=30, RETWEETS=5
RETWEETS=0
LIKES=40
# Generate your own Twitter API keys and edit them here
consumer_key = 'xxxxxxxx'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_token_secret = '<KEY>'
skipped_tweets = []
tweets_to_delete = []
# Number of tweets to be deleted by each thread.
# Smaller batch size means faster deletion but may strain the CPU.
# Recommended value is <500> tweets per thread.
batch_size = 500
def process_tweet_json(JS_FILE):
print("\n\n>> Processing your tweet.js file and converting it to a JSON.")
print("\n>> Processing tweet.js file")
js_file = open(JS_FILE, "r")
js_data = js_file.read()
js_file.close()
print("\n>> Converting to processable JSON")
json_data = js_data[25:]
json_file = open(JSON_FILE, "w")
json_file.write(json_data)
json_file.close()
def parse_json(JSON_FILE, LIKES, RETWEETS=0):
with open(JSON_FILE) as jfd:
data = json.load(jfd)
for tweet in data:
if(int(tweet["tweet"]["favorite_count"]) > LIKES and int(tweet["tweet"]["retweet_count"]) > RETWEETS):
f = open(SKIPPED_TWEETS, "w")
f.write("Tweets skipped:\n\n"
+ "ID: "+tweet["tweet"]["id"] + "\n"
+ "Tweet: "+tweet["tweet"]["full_text"]+"\n"
+ "Likes: "+tweet["tweet"]["favorite_count"]+"\n"
+ "Retweets: "+tweet["tweet"]["retweet_count"]+"\n\n")
f.close()
skipped_tweets.append(str(tweet["tweet"]["id"]))
else:
f = open(DELETED_TWEETS, "w")
f.write("Tweets deleted:\n\n"
+ "ID: "+tweet["tweet"]["id"] + "\n"
+ "Tweet: "+tweet["tweet"]["full_text"]+"\n"
+ "Likes: "+tweet["tweet"]["favorite_count"]+"\n"
+ "Retweets: "+tweet["tweet"]["retweet_count"]+"\n\n")
f.close()
tweets_to_delete.append(str(tweet["tweet"]["id"]))
print("\n>> You have selected " + str(len(tweets_to_delete)) + " tweets to be deleted.")
print(">> You have selected " + str(len(skipped_tweets)) + " tweets to be deleted.")
print("\n>> Your choices for filters were: ")
print(f">> Likes more than {LIKES} and retweets more than {RETWEETS}.\n")
def delete_tweets(batch_start, api):
for i in range(batch_start, batch_start+batch_size):
try:
api.destroy_status(int(tweets_to_delete[i]))
print("T - Deleted: " + tweets_to_delete[i])
except tweepy.TweepError as e:
error_code = str(e.reason[10:13])
if error_code == "327":
print(" D ---Skipping: " + tweets_to_delete[i])
elif error_code == "139":
print(" D ---Already liked: " + tweets_to_delete[i])
elif error_code == "185":
print("ERROR: Rate limit reached. Exiting.")
break
else:
print("ERROR: ---" + e.reason + tweets_to_delete[i])
def make_threads():
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
print('''
>> Deletion of tweets takes a long time due to the sluggish Twitter API. The script uses multithreading to delete threads faster. However, it might still take a few minutes to delete your entire history of tweets if it exceeds a couple of thousand tweets.''')
i = 0
thread_count = 0
while(i < int(len(tweets_to_delete))):
thread_count = thread_count + 1
t1 = threading.Thread(target=delete_tweets, args=(i, api))
t1.start()
print(f"Thread started for deletion. Thread #{thread_count}")
i = i + batch_size
start_time = time.time()
process_tweet_json(JS_FILE)
parse_json(JSON_FILE, LIKES, RETWEETS)
make_threads()
print(f"--> Successfully deleted {len(tweets_to_delete)} tweets.")
print(f"--> Successfully skipped {len(skipped_tweets)} tweets from deletion.")
print(f"--> Number of threads used: {len(tweets_to_delete) % batch_size} of batch size {batch_size} each.")
print("\n\n--> Process finished in %s seconds." % (time.time() - start_time))
``` |
{
"source": "jmuhlich/cycif_registration",
"score": 2
} |
#### File: ashlar/scripts/ashlar.py
```python
import warnings
import sys
import re
import argparse
import pathlib
import blessed
from .. import __version__ as VERSION
from .. import reg
from ..reg import PlateReader, BioformatsReader
from ..filepattern import FilePatternReader
from ..fileseries import FileSeriesReader
from ..zen import ZenReader
def main(argv=sys.argv):
parser = argparse.ArgumentParser(
description='Stitch and align multi-tile cyclic microscope images',
formatter_class=HelpFormatter,
)
parser.add_argument(
'filepaths', metavar='FILE', nargs='+',
help='Image file(s) to be processed, one per cycle',
)
parser.add_argument(
'-o', '--output', dest='output', default='ashlar_output.ome.tif',
metavar='PATH',
help=("Output file. If PATH ends in .ome.tif a pyramidal OME-TIFF will"
" be written. If PATH ends in just .tif and includes {cycle} and"
" {channel} placeholders, a series of single-channel plain TIFF"
" files will be written. If PATH starts with a relative or"
" absolute path to another directory, that directory must already"
" exist."),
)
parser.add_argument(
'-c', '--align-channel', dest='align_channel', type=int,
default='0', metavar='CHANNEL',
help=('Reference channel number for image alignment. Numbering starts'
' at 0.'),
)
parser.add_argument(
'--flip-x', default=False, action='store_true',
help='Flip tile positions left-to-right',
)
parser.add_argument(
'--flip-y', default=False, action='store_true',
help='Flip tile positions top-to-bottom',
)
parser.add_argument(
'--flip-mosaic-x', default=False, action='store_true',
help='Flip output image left-to-right',
)
parser.add_argument(
'--flip-mosaic-y', default=False, action='store_true',
help='Flip output image top-to-bottom',
)
parser.add_argument(
'--output-channels', nargs='+', type=int, metavar='CHANNEL',
help=('Output only specified channels for each cycle. Numbering starts'
' at 0. (default: all channels)'),
)
parser.add_argument(
'-m', '--maximum-shift', type=float, default=15, metavar='SHIFT',
help='Maximum allowed per-tile corrective shift in microns',
)
parser.add_argument(
'--filter-sigma', type=float, default=0, metavar='SIGMA',
help=('Filter images before alignment using a Gaussian kernel with s.d.'
' of SIGMA pixels (default: no filtering)'),
)
parser.add_argument(
'-f', '--filename-format', dest='filename_format',
default='cycle_{cycle}_channel_{channel}.tif', help=argparse.SUPPRESS,
)
parser.add_argument(
'--pyramid', default=False, action='store_true', help=argparse.SUPPRESS
)
parser.add_argument(
'--tile-size', type=int, default=1024, metavar='PIXELS',
help='Pyramid tile size for OME-TIFF output',
)
parser.add_argument(
'--ffp', metavar='FILE', nargs='+',
help=("Perform flat field illumination correction using the given"
" profile image. Specify one common file for all cycles or one"
" file for every cycle. Channel counts must match input files."
" (default: no flat field correction)"),
)
parser.add_argument(
'--dfp', metavar='FILE', nargs='+',
help=("Perform dark field illumination correction using the given"
" profile image. Specify one common file for all cycles or one"
" file for every cycle. Channel counts must match input files."
" (default: no dark field correction)"),
)
parser.add_argument(
'--plates', default=False, action='store_true',
help='Enable plate mode for HTS data',
)
parser.add_argument(
'-q', '--quiet', dest='quiet', default=False,
action='store_true', help='Suppress progress display',
)
parser.add_argument(
'--version', action='version', version=f"ashlar {VERSION}"
)
args = parser.parse_args(argv[1:])
configure_terminal()
configure_warning_format()
filepaths = args.filepaths
output_path = pathlib.Path(args.output)
op_tiff = bool(re.search(r"\.tiff?$", output_path.name, re.IGNORECASE))
ff_default = args.filename_format == parser.get_default("filename_format")
if op_tiff and ff_default:
# Standard usage: -o includes a .tif filename, -f not included.
args.filename_format = output_path.name
output_path = output_path.parent
else:
# Old, deprecated usage: -o is a directory and/or -f was specified.
if ff_default:
warnings.warn(
"The output path must include a filename with a .tif or .tiff"
" suffix. Specifying only a directory path with -o/--output has"
" been deprecated and will be disabled in a future version. See"
" the -o documentation for details.",
reg.Warning,
)
else:
warnings.warn(
"The -f/--filename-format argument has been deprecated and its"
" functionality merged into the -o argument. See the -o"
" documentation for details.",
reg.Warning,
)
if op_tiff and not output_path.is_dir():
# Checking is_dir() avoids erroring out in the strange but legal
# situation where output_path is a DIRECTORY that ends in .tif !
print_error(
"Filename may be appended to the output path specified by"
" -o/--output, or specified separately with"
" -f/--filename-format, but not both at the same time"
)
return 1
if not re.search(r"\.tiff?$", args.filename_format, re.IGNORECASE):
print_error(
f"Filename format does not end in .tif: {args.filename_format}"
)
return 1
if not output_path.is_dir():
print_error(
"Output location does not exist or is not a directory:"
f" {output_path}"
)
return 1
if re.search(r"\.ome\.tiff?$", args.filename_format, re.IGNORECASE):
args.pyramid = True
if args.tile_size != parser.get_default("tile_size") and not args.pyramid:
print_error("--tile-size can only be used with OME-TIFF output")
return 1
ffp_paths = args.ffp
if ffp_paths:
if len(ffp_paths) not in (0, 1, len(filepaths)):
print_error(
"Wrong number of flat-field profiles. Must be 1, or {}"
" (number of input files)".format(len(filepaths))
)
return 1
if len(ffp_paths) == 1:
ffp_paths = ffp_paths * len(filepaths)
dfp_paths = args.dfp
if dfp_paths:
if len(dfp_paths) not in (0, 1, len(filepaths)):
print_error(
"Wrong number of dark-field profiles. Must be 1, or {}"
" (number of input files)".format(len(filepaths))
)
return 1
if len(dfp_paths) == 1:
dfp_paths = dfp_paths * len(filepaths)
aligner_args = {}
aligner_args['channel'] = args.align_channel
aligner_args['verbose'] = not args.quiet
aligner_args['max_shift'] = args.maximum_shift
aligner_args['filter_sigma'] = args.filter_sigma
mosaic_args = {}
if args.output_channels:
mosaic_args['channels'] = args.output_channels
if args.pyramid:
mosaic_args['tile_size'] = args.tile_size
if args.quiet is False:
mosaic_args['verbose'] = True
mosaic_args['flip_mosaic_x'] = args.flip_mosaic_x
mosaic_args['flip_mosaic_y'] = args.flip_mosaic_y
try:
if args.plates:
return process_plates(
filepaths, output_path, args.filename_format, args.flip_x,
args.flip_y, ffp_paths, dfp_paths, aligner_args, mosaic_args,
args.pyramid, args.quiet
)
else:
mosaic_path_format = str(output_path / args.filename_format)
return process_single(
filepaths, mosaic_path_format, args.flip_x, args.flip_y,
ffp_paths, dfp_paths, aligner_args, mosaic_args, args.pyramid,
args.quiet
)
except ProcessingError as e:
print_error(str(e))
return 1
def process_single(
filepaths, output_path_format, flip_x, flip_y, ffp_paths, dfp_paths,
aligner_args, mosaic_args, pyramid, quiet, plate_well=None
):
mosaic_args = mosaic_args.copy()
writer_args = {}
if pyramid:
writer_args["tile_size"] = mosaic_args.pop("tile_size", None)
mosaics = []
if not quiet:
print("Stitching and registering input images")
print('Cycle 0:')
print(' reading %s' % filepaths[0])
reader = build_reader(filepaths[0], plate_well=plate_well)
process_axis_flip(reader, flip_x, flip_y)
ea_args = aligner_args.copy()
if len(filepaths) == 1:
ea_args['do_make_thumbnail'] = False
edge_aligner = reg.EdgeAligner(reader, **ea_args)
edge_aligner.run()
mshape = edge_aligner.mosaic_shape
mosaic_args_final = mosaic_args.copy()
if ffp_paths:
mosaic_args_final['ffp_path'] = ffp_paths[0]
if dfp_paths:
mosaic_args_final['dfp_path'] = dfp_paths[0]
mosaics.append(reg.Mosaic(edge_aligner, mshape, **mosaic_args_final))
for cycle, filepath in enumerate(filepaths[1:], 1):
if not quiet:
print('Cycle %d:' % cycle)
print(' reading %s' % filepath)
reader = build_reader(filepath, plate_well=plate_well)
process_axis_flip(reader, flip_x, flip_y)
layer_aligner = reg.LayerAligner(reader, edge_aligner, **aligner_args)
layer_aligner.run()
mosaic_args_final = mosaic_args.copy()
if ffp_paths:
mosaic_args_final['ffp_path'] = ffp_paths[cycle]
if dfp_paths:
mosaic_args_final['dfp_path'] = dfp_paths[cycle]
mosaics.append(reg.Mosaic(layer_aligner, mshape, **mosaic_args_final))
# Disable reader caching to save memory during mosaicing and writing.
edge_aligner.reader = edge_aligner.reader.reader
if not quiet:
print()
print(f"Merging tiles and writing to {output_path_format}")
writer_class = reg.PyramidWriter if pyramid else reg.TiffListWriter
writer = writer_class(
mosaics, output_path_format, verbose=not quiet, **writer_args
)
writer.run()
return 0
def process_plates(
filepaths, output_path, filename_format, flip_x, flip_y, ffp_paths,
dfp_paths, aligner_args, mosaic_args, pyramid, quiet
):
temp_reader = build_reader(filepaths[0])
metadata = temp_reader.metadata
if metadata.num_plates == 0:
# FIXME raise ProcessingError here instead?
print("Dataset does not contain plate information.")
return 1
for p, plate_name in enumerate(metadata.plate_names):
print("Plate {} ({})\n==========\n".format(p, plate_name))
for w, well_name in enumerate(metadata.well_names[p]):
print("Well {}\n-----".format(well_name))
if len(metadata.plate_well_series[p][w]) > 0:
well_path = output_path / plate_name / well_name
well_path.mkdir(parents=True, exist_ok=True)
mosaic_path_format = str(well_path / filename_format)
process_single(
filepaths, mosaic_path_format, flip_x, flip_y,
ffp_paths, dfp_paths, aligner_args, mosaic_args, pyramid,
quiet, plate_well=(p, w)
)
else:
print("Skipping -- No images found.")
print()
print()
return 0
def process_axis_flip(reader, flip_x, flip_y):
metadata = reader.metadata
# Trigger lazy initialization.
_ = metadata.positions
sx = -1 if flip_x else 1
sy = -1 if flip_y else 1
metadata._positions *= [sy, sx]
readers = {
'filepattern': FilePatternReader,
'fileseries': FileSeriesReader,
'bioformats': BioformatsReader,
'zen': ZenReader,
}
# This is a short-term hack to provide a way to specify alternate reader
# classes and pass specific args to them.
def build_reader(path, plate_well=None):
# Default to BioformatsReader if name not specified.
reader_class = BioformatsReader
kwargs = {}
match = re.match(
r'(?P<reader>\w+)\|(?P<path>.*?)(\|(?P<kwargs>.*))?$', path
)
if match:
path = match.group('path')
reader_name = match.group('reader')
reader_class = readers.get(reader_name)
if reader_class is None:
raise ProcessingError("Unknown reader: {}".format(reader_name))
kwargs.update(parse_kwargs_string(match.group('kwargs')))
if plate_well is not None:
if not issubclass(reader_class, PlateReader):
raise ProcessingError(
"The %s reader does not support plate/well processing"
% reader_class.__name__
)
kwargs.update(plate=plate_well[0], well=plate_well[1])
reader = reader_class(path, **kwargs)
return reader
def parse_kwargs_string(string):
kwargs = {}
if string is not None:
for piece in string.split('|'):
name, value = piece.split('=')
# Optimistically parse as float.
try:
value = float(value)
except ValueError:
pass
kwargs[name] = value
return kwargs
def configure_terminal():
global terminal
terminal = blessed.Terminal()
def print_error(message):
print(terminal.bright_red("ERROR:"), message)
def warning_formatter(message, category, filename, lineno, line=None):
if issubclass(category, reg.Warning):
return terminal.bright_yellow("WARNING:") + f" {message}\n"
else:
return _old_formatwarning(message, category, filename, lineno, line)
def configure_warning_format():
global _old_formatwarning
_old_formatwarning = warnings.formatwarning
warnings.formatwarning = warning_formatter
class HelpFormatter(argparse.HelpFormatter):
"""Help message formatter which adds default values to argument help.
Based on argparse.ArgumentDefaultsHelpFormatter.
"""
def _get_help_string(self, action):
help = action.help
if isinstance(action, (argparse._HelpAction, argparse._VersionAction)):
help = help.capitalize()
elif (
not isinstance(action, argparse._StoreTrueAction)
and "%(default)" not in help
and "(default:" not in help
and action.default is not argparse.SUPPRESS
):
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += " (default: %(default)s)"
return help
class ProcessingError(RuntimeError):
pass
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "jmuhlich/gr_metrics",
"score": 3
} |
#### File: python/scripts/add_gr_column.py
```python
import sys
import textwrap
import fileinput
import csv
import collections
import decimal
import gr50
# ------------------------------------------------------------------------------
def print_augmented_row(row, last_col):
print '%s\t%s' % ('\t'.join(row), last_col)
# ------------------------------------------------------------------------------
def main():
'''
Usage: add_gr_column.py input.tsv > output.tsv
Compute Growth Response value from cell count data in TSV format.
The input tsv file must meet the following requirements:
- The first row contains column names.
- The 'cell_count' column contains measured cell counts under a given
perturbation at some time.
- The 'cell_count__time0' column contains cell counts for the corresponding
time=0 controls.
- The 'cell_count__ctrl' column contains cell counts for the corresponding
no-perturbation controls.
All other columns are ignored and passed through untouched.
The output tsv will have all columns from the input, as well as a new
'GRvalue' column at the end containing the GR values.
'''
if '-h' in sys.argv or '--help' in sys.argv:
print textwrap.dedent(main.__doc__)
return
reader = csv.reader(fileinput.input(mode='rb'), delimiter='\t')
headers = next(reader)
print_augmented_row(row=headers, last_col='GRvalue')
record = collections.namedtuple(typename='record',
field_names=headers,
rename=True)
for r in (record(*row) for row in reader):
gr = gr50.compute_gr_single(r)
print_augmented_row(row=r, last_col=gr)
if __name__ == '__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.