code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
import os from discord.ext.commands import Bot from Flare import Flare bot = Bot("~~") bot.add_cog(Flare(bot)) @bot.command("ping") async def ping_pong(ctx): ctx.send("pong") bot.run(os.environ.get("BOT_TOKEN"))
[ "discord.ext.commands.Bot", "os.environ.get", "Flare.Flare" ]
[((79, 88), 'discord.ext.commands.Bot', 'Bot', (['"""~~"""'], {}), "('~~')\n", (82, 88), False, 'from discord.ext.commands import Bot\n'), ((102, 112), 'Flare.Flare', 'Flare', (['bot'], {}), '(bot)\n', (107, 112), False, 'from Flare import Flare\n'), ((194, 221), 'os.environ.get', 'os.environ.get', (['"""BOT_TOKEN"""'], {}), "('BOT_TOKEN')\n", (208, 221), False, 'import os\n')]
import setuptools ver = {} with open('OpenControl/_version.py') as fd: exec(fd.read(), ver) version = ver.get('__version__') with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setuptools.setup( name="OpenControl", version=version, author="VNOpenAI", author_email="<EMAIL>", description="A python control systems package", long_description=long_description, long_description_content_type="text/markdown", url="https://opencontrol.readthedocs.io/en/latest/", project_urls={ "Bug Tracker": "https://github.com/VNOpenAI/OpenControl/issues", }, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], packages=setuptools.find_packages(), python_requires=">=3.7", )
[ "setuptools.find_packages" ]
[((815, 841), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (839, 841), False, 'import setuptools\n')]
'''See the shared Google Drive documentation for an inheritance diagram that shows the relationships between the classes defined in this file. ''' import numpy as np import socket import time from riglib import source from ismore import settings, udp_feedback_client import ismore_bmi_lib from utils.constants import * #import armassist #import rehand from riglib.filter import Filter from riglib.plants import Plant import os class BasePlantUDP(Plant): ''' Common UDP interface for the ArmAssist/ReHand ''' debug = 0 sensor_data_timeout = 1 # seconds. if this number of seconds has passed since sensor data was received, velocity commands will not be sent lpf_vel = 0 # define in subclasses! ssm_cls = None addr = None feedback_data_cls = None data_source_name = None n_dof = None blocking_joints = None safety_grid = None feedback_str = '' def __init__(self, *args, **kwargs): self.source = source.DataSource(self.feedback_data_cls, bufferlen=5, name=self.data_source_name) self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # used only for sending ssm = self.ssm_cls() self.pos_state_names = [s.name for s in ssm.states if s.order == 0] self.vel_state_names = [s.name for s in ssm.states if s.order == 1] self.aa_xy_ix = [i for i, j in enumerate(ssm.states) if j.name in ['aa_px', 'aa_py']] self.aa_psi_ix = [i for i, j in enumerate(ssm.states) if j.name == 'aa_ppsi'] self.rh_pron_ix = [i for i, j in enumerate(ssm.states) if j.name == 'rh_pprono'] self.rh_pfings = [(i, j.name) for i, j in enumerate(ssm.states) if j.name in ['rh_pthumb', 'rh_pindex', 'rh_pfing3']] self.drive_velocity_raw = np.zeros((len(self.vel_state_names),)) self.drive_velocity_raw_fb_gain = np.zeros((len(self.vel_state_names),)) self.drive_velocity_sent = np.zeros((len(self.vel_state_names),)) self.drive_velocity_sent_pre_safety = np.zeros((len(self.vel_state_names),)) self.pre_drive_state = np.zeros((len(self.vel_state_names), )) # low-pass filters to smooth out command velocities # from scipy.signal import butter # b, a = butter(5, 0.1) # fifth order, 2 Hz bandpass (assuming 10 Hz update rate) #omega, H = signal.freqz(b, a) #plt.figure() #plt.plot(omega/np.pi, np.abs(H)) # self.vel_command_lpfs = [None] * self.n_dof # for k in range(self.n_dof): # self.vel_command_lpfs[k] = Filter(b=b, a=a) # self.last_sent_vel = np.ones(self.n_dof) * np.nan # calculate coefficients for a 4th-order Butterworth LPF at 1.5 Hz for kinematic data received from the exo # fs_synch = 20 #Frequency at which emg and kin data are synchronized # nyq = 0.5 * fs_synch # cuttoff_freq = 1.5 / nyq # bpf_kin_coeffs = butter(4, cuttoff_freq, btype='low') # self.pos_filt = [None] * self.n_dof # for k in range(self.n_dof): # self.pos_filt[k] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1]) def init(self): from riglib import sink sink.sinks.register(self.source) def start(self): # only start this DataSource after it has been registered with # the SinkManager singleton (sink.sinks) in the call to init() self.source.start() self.ts_start_data = time.time() def stop(self): # send a zero-velocity command self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(np.zeros(self.n_dof)))) self.source.stop() self.feedback_file.close() def last_data_ts_arrival(self): return self.source.read(n_pts=1)['ts_arrival'][0] def _send_command(self, command): self.sock.sendto(command, self.addr) def pack_vel(self, vel): format_str = "%f " * self.n_dof return format_str % tuple(vel) def send_vel(self, vel): assert len(vel) == self.n_dof vel = vel.copy() vel *= self.vel_gain # change the units of the velocity, if necessary self.last_sent_vel = vel #command_vel is already fitlered at the task level, no need to filter it again. #self.last_sent_vel = filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel() if all(v <= 0.00000001 for v in abs(self.last_sent_vel)): print ('last sent vel') print (self.last_sent_vel) if (self.last_data_ts_arrival() == 0) or ((self.last_data_ts_arrival() - time.time()) > self.sensor_data_timeout): print ("sensor data not received for %s recently enough, not sending velocity command!" % self.plant_type) return # squash any velocities which would take joints outside of the rectangular bounding box current_pos = self.get_pos() * self.vel_gain projected_pos = current_pos + vel * 0.1 max_reached, = np.nonzero((projected_pos > self.max_pos_vals) * (vel > 0)) min_reached, = np.nonzero((projected_pos < self.min_pos_vals) * (vel < 0)) vel[max_reached] = 0 vel[min_reached] = 0 self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel))) # set max speed limts faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed) vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed]) #if we wanna define some limit values for the rehand use the filt_vel. Otherwise use vel #self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(filt_vel))) self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel))) # set max speed limts faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed) vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed]) if self.debug: print ("input vel") print (vel) print ("vel sent to %s" % self.plant_type) print (vel) print ("current_pos") print (current_pos) print ("projected_pos") print (projected_pos) print ("actual velocity") print (self.get_vel()) if self.lpf_vel: # squash any velocities which would take joints outside of the rectangular bounding box current_pos = self.get_pos() * self.vel_gain projected_pos = current_pos + vel * (1.0/20) max_reached, = np.nonzero((projected_pos > self.max_pos_vals) * (vel > 0)) min_reached, = np.nonzero((projected_pos < self.min_pos_vals) * (vel < 0)) vel[max_reached] = 0 vel[min_reached] = 0 # set max speed limts faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed) vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed]) if faster_than_max_speed > 0: print ('faster_than_max_speed') print (faster_than_max_speed) if self.debug: print ("input vel") print (vel) print ("vel sent to %s" % self.plant_type) print (vel) #print "current_pos" #print current_pos #print "projected_pos" #print projected_pos #print "actual velocity" #print self.get_vel() self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel))) else: self._send_command('SetSpeed %s %s\r' % (self.plant_type, self.pack_vel(vel))) # def get_pos(self): # # udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF # return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0])) def drive(self, decoder): vel = decoder['qdot'] vel_bl = vel.copy() feedback_str = '' if self.blocking_joints is not None: vel_bl[self.blocking_joints] = 0 if self.safety_grid is not None: #If the next position is outside of safety then damp velocity to only go to limit: pos_pred = decoder['q'] + 0.1*vel_bl #Make sure predicted AA PX, AA PY within bounds: xy_change = True if len(self.aa_xy_ix) > 0: if self.safety_grid.is_valid_pos(pos_pred[self.aa_xy_ix]) is False: #If not, make their velocity zero: vel_bl[self.aa_xy_ix] = 0 xy_change = False feedback_str = feedback_str+ ' stopping xy from moving' else: xy_change = False # Make sure AA Psi within bounds: if len(self.aa_psi_ix) > 0: # If X/Y ok if xy_change: mn, mx = self.safety_grid.get_minmax_psi(pos_pred[self.aa_xy_ix]) # If x/y not ok: else: mn, mx = self.safety_grid.get_minmax_psi(decoder['q'][self.aa_xy_ix]) # Set psi velocity : if np.logical_and(pos_pred[self.aa_psi_ix] >= mn, pos_pred[self.aa_psi_ix] <= mx): pass else: vel_bl[self.aa_psi_ix] = 0 feedback_str = feedback_str+ 'stopping psi' # Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY) if len(self.rh_pron_ix) > 0: # If X/Y ok if xy_change: mn, mx = self.safety_grid.get_minmax_prono(pos_pred[self.aa_xy_ix]) # If x/y not ok or not moving bc not part of state pace : else: if len(self.aa_xy_ix) > 0: mn, mx = self.safety_grid.get_minmax_prono(decoder['q'][self.aa_xy_ix]) else: mn, mx = self.safety_grid.get_minmax_prono(settings.starting_pos['aa_px'], settings.starting_pos['aa_py']) # Set prono velocity : if np.logical_and(pos_pred[self.rh_pron_ix] >= mn, pos_pred[self.rh_pron_ix] <= mx): pass else: vel_bl[self.rh_pron_ix] = 0 feedback_str = feedback_str+ 'stopping prono' # Assure RH fingers are within range: if len(self.rh_pfings) > 0: for i, (ix, nm) in enumerate(self.rh_pfings): mn, mx = self.safety_grid.get_rh_minmax(nm) if np.logical_and(pos_pred[ix] >= mn, pos_pred[ix] <= mx): pass else: vel_bl[ix] = 0 feedback_str = feedback_str+ 'stopping rh fings' self.feedback_str = feedback_str self.drive_velocity = vel_bl self.send_vel(vel_bl) decoder['q'] = self.get_pos() def write_feedback(self): pos_vel = [str(i) for i in np.hstack(( self.get_pos(), self.get_vel() )) ] #self.feedback_file.write(','.join(pos_vel)+'\n') if self.feedback_str != '': self.feedback_file.write(self.feedback_str+ time.ctime() + '\n') class ArmAssistPlantUDP(BasePlantUDP): '''Sends velocity commands and receives feedback over UDP. Can be used with either the real or simulated ArmAssist. ''' ssm_cls = ismore_bmi_lib.StateSpaceArmAssist addr = settings.ARMASSIST_UDP_SERVER_ADDR feedback_data_cls = udp_feedback_client.ArmAssistData data_source_name = 'armassist' n_dof = 3 plant_type = 'ArmAssist' vel_gain = np.array([cm_to_mm, cm_to_mm, rad_to_deg]) # convert units to: [mm/s, mm/s, deg/s] max_pos_vals = np.array([np.inf, np.inf, np.inf]) min_pos_vals = np.array([-np.inf, -np.inf, -np.inf]) max_speed = np.array([np.inf, np.inf, np.inf]) feedback_file = open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'w') #max_speed = np.array([40, 60, 20]) # in mm/s and deg/s #max_speed = np.array([60, 80, 50]) # in mm/s and deg/s #parameters for kinematics low-pass filtering from scipy.signal import butter, lfilter from ismore.filter import Filter fs_synch = 25 #Frequency at which emg and kin data are synchronized nyq = 0.5 * fs_synch cuttoff_freq = 1.5 / nyq bpf_kin_coeffs = butter(2, cuttoff_freq, btype='low') n_dof = 3 vel_filter = [None] * n_dof for k in range(n_dof): vel_filter[k] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1]) n_getpos_iter= 0 def __init__(self, *args, **kwargs): super(ArmAssistPlantUDP, self).__init__(*args, **kwargs) def set_pos_control(self): # position control with global reference system self._send_command('SetControlMode ArmAssist Position') def set_global_control(self): #velocity control with global reference system self._send_command('SetControlMode ArmAssist Global') def set_trajectory_control(self): #trajectory control with global reference system self._send_command('SetControlMode ArmAssist Trajectory') def send_vel(self, vel): vel = vel.copy() # units of vel should be: [cm/s, cm/s, rad/s] assert len(vel) == self.n_dof # convert units to: [mm/s, mm/s, deg/s] to send them through UDP to the ArmAssist application vel[0] *= cm_to_mm vel[1] *= cm_to_mm vel[2] *= rad_to_deg # set max speed limts faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed) vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed]) self.debug = True if self.debug: # print "vel sent to armassist" # print vel if faster_than_max_speed.any() > 0: print ('faster_than_max_speed') print (faster_than_max_speed) print ("speed set to: ") print (vel) self._send_command('SetSpeed ArmAssist %f %f %f\r' % tuple(vel)) # get raw position def get_pos_raw(self): # udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF #get the last poitns of data of the armassist and low-pass filter return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0])) # get filtered position def get_pos(self): return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0])) # calculate vel from raw position def get_vel_raw(self): recent_pos_data = self.source.read(n_pts=2) pos = recent_pos_data['data'][self.pos_state_names] ts = recent_pos_data['ts'] delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0])) delta_ts = ts[1] - ts[0] vel = delta_pos / delta_ts #filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel() #nerea --> to test! if ts[0] != 0 and any(np.isnan(v) for v in vel): print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel) for i in range(3): if np.isnan(vel[i]): vel[i] = 0 return vel #calculate vel from raw position and filter def get_vel(self): recent_pos_data = self.source.read(n_pts=2) pos = recent_pos_data['data'][self.pos_state_names] ts = recent_pos_data['ts'] delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0])) delta_ts = ts[1] - ts[0] vel = delta_pos / delta_ts if ts[0] != 0 and any(np.isnan(v) for v in vel): print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel) for i in range(3): if np.isnan(vel[i]): vel[i] = 0 # the first value of the pos because it is always NaN and if a NaN is introduced in the filter, all the following filtered values will be also NaNs if np.any(np.isnan(vel)): self.n_getpos_iter = self.n_getpos_iter +1 vel_filt = vel else: vel_filt = np.array([self.vel_filter[k](vel[k]) for k in range(self.n_dof)]).ravel() return vel_filt def send_pos(self, pos, time): pos = pos.copy() # units of vel should be: [cm/s, cm/s, rad/s] assert len(pos) == 3 # convert units to: [mm/s, mm/s, deg/s] pos[0] *= cm_to_mm pos[1] *= cm_to_mm pos[2] *= rad_to_deg # mode 1: the forearm angle (psi) stays the same as it is. mode 2: psi will move according to the determined value mode = 2 pos_command = np.zeros(5) pos_command[0] = pos[0] pos_command[1] = pos[1] pos_command[2] = pos[2] pos_command[3] = time pos_command[4] = mode print ("pos") print (pos) print ("time") print (time) self._send_command('SetPosition ArmAssist %f %f %f %f %f\r' % tuple(pos_command)) def enable(self): self._send_command('SetControlMode ArmAssist Global\r') def disable(self): self._send_command('SetControlMode ArmAssist Disable\r') def enable_watchdog(self, timeout_ms): print ('ArmAssist watchdog not enabled, doing nothing') def send_traj(self, pos_vel): pos_vel = pos_vel.copy() # units of vel should be: [cm/s, cm/s, rad/s] assert len(pos_vel) == 6 # units to are alread in [mm/s, mm/s, rad/s] # convert values to integers to reduce noise #pos_vel_int = np.rint(pos_vel) pos_vel_int = pos_vel print ("trajectory sent to AA") print ("x y psi vx vy vpsi") print (pos_vel_int) traj_command = np.zeros(6) traj_command[0] = pos_vel_int[0] traj_command[1] = pos_vel_int[1] traj_command[2] = pos_vel_int[2] traj_command[3] = pos_vel_int[3] traj_command[4] = pos_vel_int[4] traj_command[5] = pos_vel_int[5] self._send_command('SetTrajectory ArmAssist %d %d %d %d %d %d\r' % tuple(traj_command)) class DummyPlantUDP(object): drive_velocity_raw = np.array([0,0,0]) drive_velocity_sent = np.array([0,0,0]) drive_velocity_sent_pre_safety = np.array([0,0,0]) pre_drive_state = np.array([0, 0, 0]) def init(self): pass def enable(self): pass def start(self): pass def stop(self): pass def write_feedback(self): pass def get_pos_raw(self): return np.array([0,0,0]) def get_pos(self): return np.array([0,0,0]) def get_vel_raw(self): return np.array([0,0,0]) def get_vel(self): return np.array([0,0,0]) class ReHandPlantUDP(BasePlantUDP): '''Sends velocity commands and receives feedback over UDP. Can be used with either the real or simulated ReHand. ''' ssm_cls = ismore_bmi_lib.StateSpaceReHand addr = settings.REHAND_UDP_SERVER_ADDR feedback_data_cls = udp_feedback_client.ReHandData data_source_name = 'rehand' n_dof = 4 plant_type = 'ReHand' vel_gain = np.array([rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg]) max_pos_vals = np.array([60, 60, 60, 90], dtype=np.float64) # degrees min_pos_vals = np.array([25, 25, 25, 25], dtype=np.float64) # degrees max_speed = np.array([np.inf, np.inf, np.inf, np.inf], dtype=np.float64) # degrees/sec #max_speed = np.array([15., 15., 15., 15.], dtype=np.float64) # degrees/sec feedback_file = open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'w') def send_vel(self, vel): vel = vel.copy() # units of vel should be: [rad/s, rad/s, rad/s, rad/s] assert len(vel) == self.n_dof # convert units to: [deg/s, deg/s, deg/s, deg/s] vel *= rad_to_deg #filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel() # set max speed limts faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed) vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed]) self.debug = True if self.debug: # print 'filt_vel in plants in degrees' # print filt_vel #*np.array([deg_to_rad, deg_to_rad, deg_to_rad, deg_to_rad]) if faster_than_max_speed.any() > 0: print ('faster_than_max_speed') print (faster_than_max_speed) print ("speed set to: ") print (vel) # self.plant.enable() #when we send vel commands always enable the rehand motors # self._send_command('SystemEnable ReHand\r') self._send_command('SetSpeed ReHand %f %f %f %f\r' % tuple(vel)) def get_vel_raw(self): return np.array(tuple(self.source.read(n_pts=1)['data'][self.vel_state_names][0])) def get_vel(self): return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.vel_state_names][0])) def enable(self): self._send_command('SystemEnable ReHand\r') def disable(self): self._send_command('SystemDisable ReHand\r') def diff_enable(self,DoFs): self._send_command('DiffEnable ReHand %i %i %i %i\r' % tuple(DoFs)) def get_enable_state(self): self._send_command('GetEnableState ReHand\r') def enable_watchdog(self, timeout_ms): self._send_command('WatchDogEnable ReHand %d\r' % timeout_ms) def get_pos_raw(self): # udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0])) #get pos filtered def get_pos(self): return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0])) ################################################ class BasePlantIsMore(Plant): # define in subclasses! aa_plant_cls = None rh_plant_cls = None safety_grid = None both_feedback_str = '' def __init__(self, *args, **kwargs): self.aa_plant = self.aa_plant_cls() self.rh_plant = self.rh_plant_cls() self.drive_velocity_raw = np.zeros((7,)) self.drive_velocity_sent= np.zeros((7,)) self.drive_velocity_sent_pre_safety = np.zeros((7, )) self.pre_drive_state = np.zeros((7, )) self.prev_vel_bl_aa = np.zeros((3, ))*np.NaN self.prev_vel_bl_rh = np.zeros((4, ))*np.NaN self.accel_lim_armassist = np.inf #0.8 self.accel_lim_psi = np.inf #0.16 self.accel_lim_rehand = np.inf #0.16 def init(self): self.aa_plant.init() self.rh_plant.init() def start(self): self.aa_plant.start() self.rh_plant.start() self.ts_start_data = time.time() def stop(self): self.aa_plant.stop() self.rh_plant.stop() def last_data_ts_arrival(self): return { 'ArmAssist': self.aa_plant.last_data_ts_arrival(), 'ReHand': self.rh_plant.last_data_ts_arrival(), } def send_vel(self, vel): self.aa_plant.send_vel(vel[0:3]) self.rh_plant.send_vel(vel[3:7]) def get_pos_raw(self): aa_pos = self.aa_plant.get_pos_raw() rh_pos = self.rh_plant.get_pos_raw() return np.hstack([aa_pos, rh_pos]) def get_pos(self): aa_pos = self.aa_plant.get_pos() rh_pos = self.rh_plant.get_pos() return np.hstack([aa_pos, rh_pos]) def get_vel_raw(self): aa_vel = self.aa_plant.get_vel_raw() rh_vel = self.rh_plant.get_vel_raw() return np.hstack([aa_vel, rh_vel]) def get_vel(self): aa_vel = self.aa_plant.get_vel() rh_vel = self.rh_plant.get_vel() return np.hstack([aa_vel, rh_vel]) def enable(self): self.aa_plant.enable() self.rh_plant.enable() def disable(self): self.aa_plant.disable() self.rh_plant.disable() def drive(self, decoder): # print self.aa_plant.aa_xy_ix: [0, 1] # print self.aa_plant.aa_psi_ix: [2] # print self.rh_plant.rh_pfings: [0, 1, 2] # print self.rh_plant.rh_pron_ix: [3] vel = decoder['qdot'] vel_bl = vel.copy() current_state = self.get_pos() self.pre_drive_state = current_state.copy() self.drive_velocity_raw = vel_bl.copy() if self.blocking_joints is not None: vel_bl[self.blocking_joints] = 0 vel_bl_aa0 = vel_bl[0:3].copy() vel_bl_rh0 = vel_bl[3:7].copy() ### Accel Limit Velocitites ### # if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))): # aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa # rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh # ### AA XY ### # for i in np.arange(2): # if aa_output_accel[i] > self.accel_lim_armassist: # vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist # elif aa_output_accel[i] < -1*self.accel_lim_armassist: # vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist # ### AA PSI ### # if aa_output_accel[2] > self.accel_lim_psi: # vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi # elif aa_output_accel[2] < -1*self.accel_lim_psi: # vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi # ### RH All ### # for i in np.arange(4): # if rh_output_accel[i] > self.accel_lim_rehand: # vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand # elif rh_output_accel[i] < -1*self.accel_lim_rehand: # vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand ### Add Attractor ### if self.safety_grid is not None: attractor_point_aa = self.safety_grid.attractor_point[:3] attractor_point_rh = self.safety_grid.attractor_point[3:] vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05 vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05 vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy() vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy() else: vel_bl_aa = vel_bl_aa0 vel_bl_rh = vel_bl_rh0 ### LPF Filter Velocities ### for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']): vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s]) if np.isnan(vel_bl_aa[s]): vel_bl_aa[s] = 0 for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']): vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s]) if np.isnan(vel_bl_rh[s]): vel_bl_rh[s] = 0 self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy())) #If the next position is outside of safety then damp velocity to only go to limit: pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh)) pos_pred_aa = pos_pred[0:3] pos_pred_rh = pos_pred[3:7] both_feedback_str = '' if self.safety_grid is not None: if len(self.aa_plant.aa_xy_ix) > 0: x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix]) if x_tmp == False: current_pos = current_state[self.aa_plant.aa_xy_ix] pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix] #d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos) vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05 pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix] #print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix] xy_change = True # Make sure AA Psi within bounds: if len(self.aa_plant.aa_psi_ix) > 0: mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix]) predx, predy= pos_pred_aa[[0, 1]] # Set psi velocity : psi_ok = False if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx): # Test if globally ok: global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0 if global_ok: psi_ok = True if psi_ok == False: # Move psi back to attractor pos: psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix] vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05 # Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY) if len(self.rh_plant.rh_pron_ix) > 0: mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix]) # Set prono velocity : if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx): pass else: tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix] prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix] vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05 # Assure RH fingers are within range: if len(self.rh_plant.rh_pfings) > 0: for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings): mn, mx = self.safety_grid.get_rh_minmax(nm) if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx): pass else: tmp_ = pos_pred_rh[ix] neutral = attractor_point_rh[ix] vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05 # If in the rest state -- block the arm: if self.task_state in ['rest', 'prep', 'baseline_check']: vel_bl_aa[:] = 0 vel_bl_rh[:] = 0 elif self.task_state == 'emg_rest': scaling = self.rest_emg_output if scaling <= 0.5: scaling = 0 else: scaling = 0.5*scaling vel_bl_aa = scaling*vel_bl_aa vel_bl_rh = scaling*vel_bl_rh max_vel_xy = 10. vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy max_vel_ang = 2. if vel_bl_aa[2] > max_vel_ang: vel_bl_aa[2] = max_vel_ang elif vel_bl_aa[2] < -1*max_vel_ang: vel_bl_aa[2] = -1*max_vel_ang vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang if self.blocking_joints is not None: for j in [0, 1, 2]: if j in self.blocking_joints: vel_bl_aa[j] = 0 #print 'blocking vel_bl_aa: ', j for j in [3, 4, 5, 6]: if j in self.blocking_joints: vel_bl_rh[j-3] = 0 #print 'blocking vel_bl_rh: ', j-3 self.both_feedback_str = both_feedback_str self.aa_plant.send_vel(vel_bl_aa) self.rh_plant.send_vel(vel_bl_rh) self.prev_vel_bl_aa = vel_bl_aa.copy() self.prev_vel_bl_rh = vel_bl_rh.copy() self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy())) decoder['q'] = self.get_pos() class IsMorePlantUDP(BasePlantIsMore): '''Sends velocity commands and receives feedback over UDP. Can be used with either the real or simulated ArmAssist+ReHand. ''' aa_plant_cls = ArmAssistPlantUDP rh_plant_cls = ReHandPlantUDP def write_feedback(self): self.aa_plant.feedback_str = self.both_feedback_str self.aa_plant.write_feedback() #self.rh_plant.write_feedback() class IsMorePlantEMGControl(IsMorePlantUDP): # Plant used for the pure EMG control task def drive(self): vel_bl = self.drive_velocity_raw current_state = self.get_pos() self.pre_drive_state = current_state.copy() if self.blocking_joints is not None: vel_bl[self.blocking_joints] = 0 vel_bl_aa0 = vel_bl[0:3].copy() vel_bl_rh0 = vel_bl[3:7].copy() ### Accel Limit Velocitites ### # if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))): # aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa # rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh # ### AA XY ### # for i in np.arange(2): # if aa_output_accel[i] > self.accel_lim_armassist: # vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist # elif aa_output_accel[i] < -1*self.accel_lim_armassist: # vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist # ### AA PSI ### # if aa_output_accel[2] > self.accel_lim_psi: # vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi # elif aa_output_accel[2] < -1*self.accel_lim_psi: # vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi # ### RH All ### # for i in np.arange(4): # if rh_output_accel[i] > self.accel_lim_rehand: # vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand # elif rh_output_accel[i] < -1*self.accel_lim_rehand: # vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand ### Add Attractor ### attractor_point_aa = self.safety_grid.attractor_point[:3] attractor_point_rh = self.safety_grid.attractor_point[3:] vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05 vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05 vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy() vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy() ### LPF Filter Velocities ### for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']): vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s]) if np.isnan(vel_bl_aa[s]): vel_bl_aa[s] = 0 for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']): vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s]) if np.isnan(vel_bl_rh[s]): vel_bl_rh[s] = 0 self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy())) #If the next position is outside of safety then damp velocity to only go to limit: pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh)) pos_pred_aa = pos_pred[0:3] pos_pred_rh = pos_pred[3:7] both_feedback_str = '' if self.safety_grid is not None: if len(self.aa_plant.aa_xy_ix) > 0: x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix]) if x_tmp == False: current_pos = current_state[self.aa_plant.aa_xy_ix] pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix] #d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos) vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05 pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix] #print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix] xy_change = True # Make sure AA Psi within bounds: if len(self.aa_plant.aa_psi_ix) > 0: mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix]) predx, predy= pos_pred_aa[[0, 1]] # Set psi velocity : psi_ok = False if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx): # Test if globally ok: global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0 if global_ok: psi_ok = True if psi_ok == False: # Move psi back to attractor pos: psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix] vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05 # Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY) if len(self.rh_plant.rh_pron_ix) > 0: mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix]) # Set prono velocity : if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx): pass else: tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix] prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix] vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05 # Assure RH fingers are within range: if len(self.rh_plant.rh_pfings) > 0: for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings): mn, mx = self.safety_grid.get_rh_minmax(nm) if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx): pass else: tmp_ = pos_pred_rh[ix] neutral = attractor_point_rh[ix] vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05 # If in the rest state -- block the arm: if self.task_state in ['rest', 'prep']: vel_bl_aa[:] = 0 vel_bl_rh[:] = 0 elif self.task_state == 'emg_rest': scaling = self.rest_emg_output if scaling <= 0.5: scaling = 0 else: scaling = 0.5*scaling vel_bl_aa = scaling*vel_bl_aa vel_bl_rh = scaling*vel_bl_rh max_vel_xy = 10. vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy max_vel_ang = 2. if vel_bl_aa[2] > max_vel_ang: vel_bl_aa[2] = max_vel_ang elif vel_bl_aa[2] < -1*max_vel_ang: vel_bl_aa[2] = -1*max_vel_ang vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang if self.blocking_joints is not None: for j in [0, 1, 2]: if j in self.blocking_joints: vel_bl_aa[j] = 0 #print 'blocking vel_bl_aa: ', j for j in [3, 4, 5, 6]: if j in self.blocking_joints: vel_bl_rh[j-3] = 0 #print 'blocking vel_bl_rh: ', j-3 self.both_feedback_str = both_feedback_str self.aa_plant.send_vel(vel_bl_aa) self.rh_plant.send_vel(vel_bl_rh) self.prev_vel_bl_aa = vel_bl_aa.copy() self.prev_vel_bl_rh = vel_bl_rh.copy() self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy())) class IsMorePlantHybridBMI(IsMorePlantUDP): # Plant used for the hybrid (EMG + brain) BMI task. def __init__(self, *args, **kwargs): self.drive_velocity_raw_brain = np.zeros((7,)) self.emg_vel_raw_scaled = np.zeros((7,)) super(IsMorePlantHybridBMI, self).__init__(*args, **kwargs) def drive(self, decoder): vel = decoder['qdot'] vel_brain = vel.copy() vel_brain_aa = vel_brain[[0, 1, 2]] vel_brain_fingers = vel_brain[[3, 4, 5]] vel_brain_prono = vel_brain[[6]] self.drive_velocity_raw_brain = vel_brain.copy() # Use EMG scaled array to scale the output: vel_emg = self.emg_vel.copy() vel_emg_scaled = [] for i in range(7): vel_emg_scaled.append(vel_emg[i]*self.scale_emg_pred_arr[i]) vel_emg_scaled = np.hstack((vel_emg_scaled)) self.emg_vel_raw_scaled = vel_emg_scaled.copy() vel_emg_aa = vel_emg_scaled[[0, 1, 2]] vel_emg_fingers = vel_emg_scaled[[3, 4, 5]] vel_emg_prono = vel_emg_scaled[[6]] vel_bl_aa = vel_emg_aa*self.emg_weight_aa + vel_brain_aa*(1-self.emg_weight_aa) vel_bl_fingers = vel_emg_fingers*self.emg_weight_fingers + vel_brain_fingers*(1-self.emg_weight_fingers) vel_bl_prono = vel_emg_prono*self.emg_weight_prono + vel_brain_prono*(1-self.emg_weight_prono) vel_bl = np.hstack((vel_bl_aa, vel_bl_fingers, vel_bl_prono)) # Fuse velocities from EMG and neural decoders #vel_bl = vel_emg*self.emg_weight + vel_brain*(1-self.emg_weight) self.drive_velocity_raw = vel_bl.copy() vel_bl_fb_gain = [] for i in range(7): vel_bl_fb_gain.append(vel_bl[i]*self.fb_vel_gain[i]) vel_bl_fb_gain = np.hstack((vel_bl_fb_gain)) self.drive_velocity_raw_fb_gain = vel_bl_fb_gain.copy() current_state = self.get_pos() self.pre_drive_state = current_state.copy() if self.blocking_joints is not None: print ('self.blocking_joints --> ', self.blocking_joints) vel_bl_fb_gain[self.blocking_joints] = 0 vel_bl_aa0 = vel_bl_fb_gain[0:3].copy() vel_bl_rh0 = vel_bl_fb_gain[3:7].copy() ### Accel Limit Velocitites ### # if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))): # aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa # rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh # ### AA XY ### # for i in np.arange(2): # if aa_output_accel[i] > self.accel_lim_armassist: # vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist # elif aa_output_accel[i] < -1*self.accel_lim_armassist: # vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist # ### AA PSI ### # if aa_output_accel[2] > self.accel_lim_psi: # vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi # elif aa_output_accel[2] < -1*self.accel_lim_psi: # vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi # ### RH All ### # for i in np.arange(4): # if rh_output_accel[i] > self.accel_lim_rehand: # vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand # elif rh_output_accel[i] < -1*self.accel_lim_rehand: # vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand ### Add Attractor ### attractor_point_aa = self.safety_grid.attractor_point[:3] attractor_point_rh = self.safety_grid.attractor_point[3:] vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05 vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05 vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy() vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy() ### LPF Filter Velocities ### for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']): vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s]) if np.isnan(vel_bl_aa[s]): vel_bl_aa[s] = 0 for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']): vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s]) if np.isnan(vel_bl_rh[s]): vel_bl_rh[s] = 0 self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy())) #If the next position is outside of safety then damp velocity to only go to limit: pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh)) pos_pred_aa = pos_pred[0:3] pos_pred_rh = pos_pred[3:7] both_feedback_str = '' if self.safety_grid is not None: if len(self.aa_plant.aa_xy_ix) > 0: x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix]) if x_tmp == False: print ('false position') current_pos = current_state[self.aa_plant.aa_xy_ix] pos_valid = attractor_point_aa[self.aa_plant.aa_xy_ix] #d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos) vel_bl_aa[self.aa_plant.aa_xy_ix] = self.attractor_speed*(pos_valid - current_pos)/0.05 pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix] #print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix] xy_change = True # Make sure AA Psi within bounds: if len(self.aa_plant.aa_psi_ix) > 0: mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix]) predx, predy= pos_pred_aa[[0, 1]] # Set psi velocity : psi_ok = False if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx): # Test if globally ok: #global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0 global_ok = True if global_ok: psi_ok = True if psi_ok == False: # Move psi back to attractor pos: psi_neutral = attractor_point_aa[self.aa_plant.aa_psi_ix] vel_bl_aa[self.aa_plant.aa_psi_ix] = self.attractor_speed*(psi_neutral-current_state[self.aa_plant.aa_psi_ix])/0.05 # Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY) if len(self.rh_plant.rh_pron_ix) > 0: mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix]) # Set prono velocity : if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx): pass else: tmp_pos = pos_pred_rh[self.rh_plant.rh_pron_ix] prono_neutral = attractor_point_rh[self.rh_plant.rh_pron_ix] vel_bl_rh[self.rh_plant.rh_pron_ix] = self.attractor_speed*(prono_neutral-tmp_pos)/0.05 # Assure RH fingers are within range: if len(self.rh_plant.rh_pfings) > 0: for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings): mn, mx = self.safety_grid.get_rh_minmax(nm) if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx): pass else: tmp_ = pos_pred_rh[ix] neutral = attractor_point_rh[ix] vel_bl_rh[ix] = self.attractor_speed*(neutral - tmp_)/0.05 # print 'safely adjusting fingers! ', nm, 'min: ', mn, ' max: ', mx, ' pred: ', pos_pred_rh[ix] # If in the rest state -- block the arm: if self.task_state in ['rest', 'prep', 'baseline_check']: vel_bl_aa[:] = 0 vel_bl_rh[:] = 0 elif self.task_state == 'emg_rest': scaling = self.rest_emg_output if scaling <= 0.5: scaling = 0 else: scaling = 0.5*scaling vel_bl_aa = scaling*vel_bl_aa vel_bl_rh = scaling*vel_bl_rh elif self.task_state == 'rest_back': vel_bl_aa = vel_bl_aa_pull/self.attractor_speed_const*self.rest_back_attractor_speed vel_bl_rh = vel_bl_rh_pull/self.attractor_speed_const*self.rest_back_attractor_speed elif self.task_state in ['drive_to_start', 'drive_to_rest']: vel_bl_aa = self.back_to_target_speed*(self.drive_to_start_target[:3] - current_state[:3])/0.05 vel_bl_rh = self.back_to_target_speed*(self.drive_to_start_target[3:] - current_state[3:])/0.05 max_vel_xy = 10. vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy max_vel_ang = 2. if vel_bl_aa[2] > max_vel_ang: vel_bl_aa[2] = max_vel_ang elif vel_bl_aa[2] < -1*max_vel_ang: vel_bl_aa[2] = -1*max_vel_ang vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang if self.blocking_joints is not None: for j in [0, 1, 2]: if j in self.blocking_joints: vel_bl_aa[j] = 0 #print 'blocking vel_bl_aa: ', j for j in [3, 4, 5, 6]: if j in self.blocking_joints: vel_bl_rh[j-3] = 0 #print 'blocking vel_bl_rh: ', j-3 self.both_feedback_str = both_feedback_str self.aa_plant.send_vel(vel_bl_aa) self.rh_plant.send_vel(vel_bl_rh) self.prev_vel_bl_aa = vel_bl_aa.copy() self.prev_vel_bl_rh = vel_bl_rh.copy() self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy())) decoder['q'] = self.get_pos() class IsMorePlantHybridBMISoftSafety(IsMorePlantHybridBMI): def drive(self, decoder): vel = decoder['qdot'] vel_brain = vel.copy() vel_brain_aa = vel_brain[[0, 1, 2]] vel_brain_fingers = vel_brain[[3, 4, 5]] vel_brain_prono = vel_brain[[6]] self.drive_velocity_raw_brain = vel_brain.copy() # Use EMG scaled array to scale the output: vel_emg = self.emg_vel.copy() vel_emg_scaled = [] for i in range(7): vel_emg_scaled.append(vel_emg[i]*self.scale_emg_pred_arr[i]) vel_emg_scaled = np.hstack((vel_emg_scaled)) self.emg_vel_raw_scaled = vel_emg_scaled.copy() vel_emg_aa = vel_emg_scaled[[0, 1, 2]] vel_emg_fingers = vel_emg_scaled[[3, 4, 5]] vel_emg_prono = vel_emg_scaled[[6]] vel_bl_aa = vel_emg_aa*self.emg_weight_aa + vel_brain_aa*(1-self.emg_weight_aa) vel_bl_fingers = vel_emg_fingers*self.emg_weight_fingers + vel_brain_fingers*(1-self.emg_weight_fingers) vel_bl_prono = vel_emg_prono*self.emg_weight_prono + vel_brain_prono*(1-self.emg_weight_prono) vel_bl = np.hstack((vel_bl_aa, vel_bl_fingers, vel_bl_prono)) # Fuse velocities from EMG and neural decoders #vel_bl = vel_emg*self.emg_weight + vel_brain*(1-self.emg_weight) self.drive_velocity_raw = vel_bl.copy() vel_bl_fb_gain = [] for i in range(7): vel_bl_fb_gain.append(vel_bl[i]*self.fb_vel_gain[i]) vel_bl_fb_gain = np.hstack((vel_bl_fb_gain)) self.drive_velocity_raw_fb_gain = vel_bl_fb_gain.copy() current_state = self.get_pos() self.pre_drive_state = current_state.copy() if self.blocking_joints is not None: vel_bl_fb_gain[self.blocking_joints] = 0 vel_bl_aa0 = vel_bl_fb_gain[0:3].copy() vel_bl_rh0 = vel_bl_fb_gain[3:7].copy() ### Accel Limit Velocitites ### # if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))): # aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa # rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh # ### AA XY ### # for i in np.arange(2): # if aa_output_accel[i] > self.accel_lim_armassist: # vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist # elif aa_output_accel[i] < -1*self.accel_lim_armassist: # vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist # ### AA PSI ### # if aa_output_accel[2] > self.accel_lim_psi: # vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi # elif aa_output_accel[2] < -1*self.accel_lim_psi: # vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi # ### RH All ### # for i in np.arange(4): # if rh_output_accel[i] > self.accel_lim_rehand: # vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand # elif rh_output_accel[i] < -1*self.accel_lim_rehand: # vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand ### Add Attractor ### attractor_point_aa = self.safety_grid.attractor_point[:3] attractor_point_rh = self.safety_grid.attractor_point[3:] vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05 vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05 vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy() vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy() ### LPF Filter Velocities ### for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']): vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s]) if np.isnan(vel_bl_aa[s]): vel_bl_aa[s] = 0 for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']): vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s]) if np.isnan(vel_bl_rh[s]): vel_bl_rh[s] = 0 self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy())) #If the next position is outside of safety then damp velocity to only go to limit: pos_pred = current_state + 0.05*np.hstack((vel_bl_aa, vel_bl_rh)) pos_pred_aa = pos_pred[0:3] pos_pred_rh = pos_pred[3:7] both_feedback_str = '' if self.safety_grid is not None: if len(self.aa_plant.aa_xy_ix) > 0: x_tmp = self.safety_grid.is_valid_pos(pos_pred_aa[self.aa_plant.aa_xy_ix]) if x_tmp == False: # Find the closest point on the boundary of the safety grid and set velocity in same # direction, but at 90% of way to get to the edge of the safety grid: current_pos = current_state[self.aa_plant.aa_xy_ix] ### loop through percentages of velocity and check validity of point: valid_scale = False scale = 1.0 while valid_scale is False: scale -= 0.05 pos_pred_xy = current_pos + 0.05*(vel_bl_aa[self.aa_plant.aa_xy_ix]*scale) valid_scale = self.safety_grid.is_valid_pos(pos_pred_xy) if scale < -1.0: scale = 0.0 break #d_to_valid, pos_valid = self.safety_grid.dist_to_valid_point(current_pos) vel_bl_aa[self.aa_plant.aa_xy_ix] = vel_bl_aa[self.aa_plant.aa_xy_ix]*scale pos_pred_aa[self.aa_plant.aa_xy_ix] = current_pos + 0.05*vel_bl_aa[self.aa_plant.aa_xy_ix] #print 'plant adjust: ', vel_bl_aa[self.aa_plant.aa_xy_ix], pos_pred_aa[self.aa_plant.aa_xy_ix] xy_change = True # Make sure AA Psi within bounds: if len(self.aa_plant.aa_psi_ix) > 0: mn, mx = self.safety_grid.get_minmax_psi(pos_pred_aa[self.aa_plant.aa_xy_ix]) predx, predy= pos_pred_aa[[0, 1]] # Set psi velocity : psi_ok = False if np.logical_and(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx): # Test if globally ok: #global_ok = self.safety_grid.global_hull.hull3d.find_simplex(np.array([predx, predy, pos_pred_aa[2]])) >=0 global_ok = True if global_ok: psi_ok = True if psi_ok == False: valid_scale_psi = False scale = 1.0 while valid_scale_psi is False: scale -= 0.05 psi_pred = current_state[self.aa_plant.aa_psi_ix] + 0.05*(scale*vel_bl_aa[self.aa_plant.aa_psi_ix]) if np.logical_and(psi_pred >= mn, psi_pred <= mx): valid_scale_psi = True if scale < -1.0: scale = 0.0 break vel_bl_aa[self.aa_plant.aa_psi_ix] = scale*vel_bl_aa[self.aa_plant.aa_psi_ix] # Make sure RH Prono within bounds (if SSM is only RH, use settings.starting_pos for AAPX, AAPY) if len(self.rh_plant.rh_pron_ix) > 0: mn, mx = self.safety_grid.get_minmax_prono(pos_pred_aa[self.aa_plant.aa_xy_ix]) # Set prono velocity : if np.logical_and(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx): pass else: valid_scale_prono = False scale = 1.0 while valid_scale_prono is False: scale -= 0.05 pron_pred = pos_pred_rh[self.rh_plant.rh_pron_ix] + 0.05*(scale*vel_bl_rh[self.rh_plant.rh_pron_ix]) if np.logical_and(pron_pred >= mn, pron_pred <= mx): valid_scale_prono = True if scale < -1.0: scale = 0. break vel_bl_rh[self.rh_plant.rh_pron_ix] = scale*vel_bl_rh[self.rh_plant.rh_pron_ix] # Assure RH fingers are within range: if len(self.rh_plant.rh_pfings) > 0: for i, (ix, nm) in enumerate(self.rh_plant.rh_pfings): mn, mx = self.safety_grid.get_rh_minmax(nm) if np.logical_and(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx): pass else: finger_scale = False scale = 1.0 while finger_scale is False: scale -= 0.05 fing_pred = pos_pred_rh[ix] + 0.05*(scale*vel_bl_rh[ix]) if np.logical_and(fing_pred >= mn, fing_pred<= mx): finger_scale = True if scale < -1.0: scale = 0.0 break vel_bl_rh[ix] = scale*vel_bl_rh[ix] # If in the rest state -- block the arm: if self.task_state in ['rest', 'prep', 'baseline_check', 'wait']: vel_bl_aa[:] = 0 vel_bl_rh[:] = 0 elif self.task_state == 'emg_rest': scaling = self.rest_emg_output if scaling <= 0.5: scaling = 0 else: scaling = 0.5*scaling vel_bl_aa = scaling*vel_bl_aa vel_bl_rh = scaling*vel_bl_rh elif self.task_state == 'rest_back': vel_bl_aa = vel_bl_aa_pull/self.attractor_speed_const*self.rest_back_attractor_speed vel_bl_rh = vel_bl_rh_pull/self.attractor_speed_const*self.rest_back_attractor_speed elif self.task_state in ['drive_to_start', 'drive_to_rest']: vel_bl_aa = self.back_to_target_speed*(self.drive_to_start_target[:3] - current_state[:3])/0.05 vel_bl_rh = self.back_to_target_speed*(self.drive_to_start_target[3:] - current_state[3:])/0.05 max_vel_xy = 10. vel_bl_aa[vel_bl_aa>max_vel_xy] = max_vel_xy vel_bl_aa[vel_bl_aa<-1*max_vel_xy] = -1*max_vel_xy max_vel_ang = 2. if vel_bl_aa[2] > max_vel_ang: vel_bl_aa[2] = max_vel_ang elif vel_bl_aa[2] < -1*max_vel_ang: vel_bl_aa[2] = -1*max_vel_ang vel_bl_rh[vel_bl_rh>max_vel_ang] = max_vel_ang vel_bl_rh[vel_bl_rh<-1*max_vel_ang] = -1*max_vel_ang if self.blocking_joints is not None: for j in [0, 1, 2]: if j in self.blocking_joints: vel_bl_aa[j] = 0 #print 'blocking vel_bl_aa: ', j for j in [3, 4, 5, 6]: if j in self.blocking_joints: vel_bl_rh[j-3] = 0 #print 'blocking vel_bl_rh: ', j-3 self.both_feedback_str = both_feedback_str self.aa_plant.send_vel(vel_bl_aa) self.rh_plant.send_vel(vel_bl_rh) self.prev_vel_bl_aa = vel_bl_aa.copy() self.prev_vel_bl_rh = vel_bl_rh.copy() self.drive_velocity_sent = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy())) decoder['q'] = self.get_pos() UDP_PLANT_CLS_DICT = { 'ArmAssist': ArmAssistPlantUDP, 'ReHand': ReHandPlantUDP, 'IsMore': IsMorePlantUDP, 'IsMoreEMGControl': IsMorePlantEMGControl, 'IsMoreHybridControl': IsMorePlantHybridBMI, 'IsMorePlantHybridBMISoftSafety': IsMorePlantHybridBMISoftSafety, 'DummyPlant': DummyPlantUDP, } ########################### ##### Deprecated code ##### ########################### class BasePlant(object): def __init__(self, *args, **kwargs): raise NotImplementedError('Implement in subclasses!') def init(self): raise NotImplementedError('Implement in subclasses!') def start(self): raise NotImplementedError('Implement in subclasses!') def stop(self): raise NotImplementedError('Implement in subclasses!') def last_data_ts_arrival(self): raise NotImplementedError('Implement in subclasses!') def send_vel(self, vel): raise NotImplementedError('Implement in subclasses!') def get_pos(self): raise NotImplementedError('Implement in subclasses!') def get_vel(self): raise NotImplementedError('Implement in subclasses!') def enable(self): '''Disable the device's motor drivers.''' raise NotImplementedError('Implement in subclasses!') def disable(self): '''Disable the device's motor drivers.''' raise NotImplementedError('Implement in subclasses!') def enable_watchdog(self, timeout_ms): raise NotImplementedError('Implement in subclasses!') def get_intrinsic_coordinates(self): return self.get_pos()
[ "numpy.abs", "time.ctime", "socket.socket", "numpy.hstack", "os.path.expandvars", "ismore.filter.Filter", "numpy.logical_and", "scipy.signal.butter", "riglib.sink.sinks.register", "numpy.array", "numpy.zeros", "numpy.isnan", "numpy.sign", "numpy.nonzero", "time.time", "riglib.source.DataSource" ]
[((12073, 12115), 'numpy.array', 'np.array', (['[cm_to_mm, cm_to_mm, rad_to_deg]'], {}), '([cm_to_mm, cm_to_mm, rad_to_deg])\n', (12081, 12115), True, 'import numpy as np\n'), ((12180, 12214), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (12188, 12214), True, 'import numpy as np\n'), ((12239, 12276), 'numpy.array', 'np.array', (['[-np.inf, -np.inf, -np.inf]'], {}), '([-np.inf, -np.inf, -np.inf])\n', (12247, 12276), True, 'import numpy as np\n'), ((12301, 12335), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (12309, 12335), True, 'import numpy as np\n'), ((12847, 12883), 'scipy.signal.butter', 'butter', (['(2)', 'cuttoff_freq'], {'btype': '"""low"""'}), "(2, cuttoff_freq, btype='low')\n", (12853, 12883), False, 'from scipy.signal import butter, lfilter\n'), ((18816, 18835), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18824, 18835), True, 'import numpy as np\n'), ((18860, 18879), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18868, 18879), True, 'import numpy as np\n'), ((18915, 18934), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18923, 18934), True, 'import numpy as np\n'), ((18955, 18974), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18963, 18974), True, 'import numpy as np\n'), ((19843, 19901), 'numpy.array', 'np.array', (['[rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg]'], {}), '([rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg])\n', (19851, 19901), True, 'import numpy as np\n'), ((19926, 19970), 'numpy.array', 'np.array', (['[60, 60, 60, 90]'], {'dtype': 'np.float64'}), '([60, 60, 60, 90], dtype=np.float64)\n', (19934, 19970), True, 'import numpy as np\n'), ((20005, 20049), 'numpy.array', 'np.array', (['[25, 25, 25, 25]'], {'dtype': 'np.float64'}), '([25, 25, 25, 25], dtype=np.float64)\n', (20013, 20049), True, 'import numpy as np\n'), ((20084, 20144), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf, np.inf]'], {'dtype': 'np.float64'}), '([np.inf, np.inf, np.inf, np.inf], dtype=np.float64)\n', (20092, 20144), True, 'import numpy as np\n'), ((1022, 1109), 'riglib.source.DataSource', 'source.DataSource', (['self.feedback_data_cls'], {'bufferlen': '(5)', 'name': 'self.data_source_name'}), '(self.feedback_data_cls, bufferlen=5, name=self.\n data_source_name)\n', (1039, 1109), False, 'from riglib import source\n'), ((1125, 1173), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1138, 1173), False, 'import socket\n'), ((3234, 3266), 'riglib.sink.sinks.register', 'sink.sinks.register', (['self.source'], {}), '(self.source)\n', (3253, 3266), False, 'from riglib import sink\n'), ((3489, 3500), 'time.time', 'time.time', ([], {}), '()\n', (3498, 3500), False, 'import time\n'), ((5076, 5135), 'numpy.nonzero', 'np.nonzero', (['((projected_pos > self.max_pos_vals) * (vel > 0))'], {}), '((projected_pos > self.max_pos_vals) * (vel > 0))\n', (5086, 5135), True, 'import numpy as np\n'), ((5159, 5218), 'numpy.nonzero', 'np.nonzero', (['((projected_pos < self.min_pos_vals) * (vel < 0))'], {}), '((projected_pos < self.min_pos_vals) * (vel < 0))\n', (5169, 5218), True, 'import numpy as np\n'), ((12365, 12421), 'os.path.expandvars', 'os.path.expandvars', (['"""$HOME/code/bmi3d/log/armassist.txt"""'], {}), "('$HOME/code/bmi3d/log/armassist.txt')\n", (12383, 12421), False, 'import os\n'), ((12993, 13037), 'ismore.filter.Filter', 'Filter', (['bpf_kin_coeffs[0]', 'bpf_kin_coeffs[1]'], {}), '(bpf_kin_coeffs[0], bpf_kin_coeffs[1])\n', (12999, 13037), False, 'from ismore.filter import Filter\n'), ((17267, 17278), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (17275, 17278), True, 'import numpy as np\n'), ((18404, 18415), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (18412, 18415), True, 'import numpy as np\n'), ((19200, 19219), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (19208, 19219), True, 'import numpy as np\n'), ((19257, 19276), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (19265, 19276), True, 'import numpy as np\n'), ((19318, 19337), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (19326, 19337), True, 'import numpy as np\n'), ((19375, 19394), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (19383, 19394), True, 'import numpy as np\n'), ((20276, 20329), 'os.path.expandvars', 'os.path.expandvars', (['"""$HOME/code/bmi3d/log/rehand.txt"""'], {}), "('$HOME/code/bmi3d/log/rehand.txt')\n", (20294, 20329), False, 'import os\n'), ((23006, 23020), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (23014, 23020), True, 'import numpy as np\n'), ((23055, 23069), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (23063, 23069), True, 'import numpy as np\n'), ((23116, 23130), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (23124, 23130), True, 'import numpy as np\n'), ((23163, 23177), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (23171, 23177), True, 'import numpy as np\n'), ((23613, 23624), 'time.time', 'time.time', ([], {}), '()\n', (23622, 23624), False, 'import time\n'), ((24140, 24167), 'numpy.hstack', 'np.hstack', (['[aa_pos, rh_pos]'], {}), '([aa_pos, rh_pos])\n', (24149, 24167), True, 'import numpy as np\n'), ((24289, 24316), 'numpy.hstack', 'np.hstack', (['[aa_pos, rh_pos]'], {}), '([aa_pos, rh_pos])\n', (24298, 24316), True, 'import numpy as np\n'), ((24450, 24477), 'numpy.hstack', 'np.hstack', (['[aa_vel, rh_vel]'], {}), '([aa_vel, rh_vel])\n', (24459, 24477), True, 'import numpy as np\n'), ((24599, 24626), 'numpy.hstack', 'np.hstack', (['[aa_vel, rh_vel]'], {}), '([aa_vel, rh_vel])\n', (24608, 24626), True, 'import numpy as np\n'), ((41559, 41573), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (41567, 41573), True, 'import numpy as np\n'), ((41608, 41622), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (41616, 41622), True, 'import numpy as np\n'), ((42229, 42254), 'numpy.hstack', 'np.hstack', (['vel_emg_scaled'], {}), '(vel_emg_scaled)\n', (42238, 42254), True, 'import numpy as np\n'), ((42779, 42831), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_fingers, vel_bl_prono)'], {}), '((vel_bl_aa, vel_bl_fingers, vel_bl_prono))\n', (42788, 42831), True, 'import numpy as np\n'), ((43155, 43180), 'numpy.hstack', 'np.hstack', (['vel_bl_fb_gain'], {}), '(vel_bl_fb_gain)\n', (43164, 43180), True, 'import numpy as np\n'), ((52437, 52462), 'numpy.hstack', 'np.hstack', (['vel_emg_scaled'], {}), '(vel_emg_scaled)\n', (52446, 52462), True, 'import numpy as np\n'), ((52987, 53039), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_fingers, vel_bl_prono)'], {}), '((vel_bl_aa, vel_bl_fingers, vel_bl_prono))\n', (52996, 53039), True, 'import numpy as np\n'), ((53364, 53389), 'numpy.hstack', 'np.hstack', (['vel_bl_fb_gain'], {}), '(vel_bl_fb_gain)\n', (53373, 53389), True, 'import numpy as np\n'), ((5557, 5592), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (5564, 5592), True, 'import numpy as np\n'), ((6052, 6087), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (6059, 6087), True, 'import numpy as np\n'), ((6726, 6785), 'numpy.nonzero', 'np.nonzero', (['((projected_pos > self.max_pos_vals) * (vel > 0))'], {}), '((projected_pos > self.max_pos_vals) * (vel > 0))\n', (6736, 6785), True, 'import numpy as np\n'), ((6813, 6872), 'numpy.nonzero', 'np.nonzero', (['((projected_pos < self.min_pos_vals) * (vel < 0))'], {}), '((projected_pos < self.min_pos_vals) * (vel < 0))\n', (6823, 6872), True, 'import numpy as np\n'), ((14128, 14163), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (14135, 14163), True, 'import numpy as np\n'), ((16568, 16581), 'numpy.isnan', 'np.isnan', (['vel'], {}), '(vel)\n', (16576, 16581), True, 'import numpy as np\n'), ((20874, 20909), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (20881, 20909), True, 'import numpy as np\n'), ((23209, 23223), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (23217, 23223), True, 'import numpy as np\n'), ((23262, 23276), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (23270, 23276), True, 'import numpy as np\n'), ((27532, 27554), 'numpy.isnan', 'np.isnan', (['vel_bl_aa[s]'], {}), '(vel_bl_aa[s])\n', (27540, 27554), True, 'import numpy as np\n'), ((27760, 27782), 'numpy.isnan', 'np.isnan', (['vel_bl_rh[s]'], {}), '(vel_bl_rh[s])\n', (27768, 27782), True, 'import numpy as np\n'), ((35907, 35929), 'numpy.isnan', 'np.isnan', (['vel_bl_aa[s]'], {}), '(vel_bl_aa[s])\n', (35915, 35929), True, 'import numpy as np\n'), ((36135, 36157), 'numpy.isnan', 'np.isnan', (['vel_bl_rh[s]'], {}), '(vel_bl_rh[s])\n', (36143, 36157), True, 'import numpy as np\n'), ((45598, 45620), 'numpy.isnan', 'np.isnan', (['vel_bl_aa[s]'], {}), '(vel_bl_aa[s])\n', (45606, 45620), True, 'import numpy as np\n'), ((45826, 45848), 'numpy.isnan', 'np.isnan', (['vel_bl_rh[s]'], {}), '(vel_bl_rh[s])\n', (45834, 45848), True, 'import numpy as np\n'), ((55736, 55758), 'numpy.isnan', 'np.isnan', (['vel_bl_aa[s]'], {}), '(vel_bl_aa[s])\n', (55744, 55758), True, 'import numpy as np\n'), ((55964, 55986), 'numpy.isnan', 'np.isnan', (['vel_bl_rh[s]'], {}), '(vel_bl_rh[s])\n', (55972, 55986), True, 'import numpy as np\n'), ((5450, 5461), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (5456, 5461), True, 'import numpy as np\n'), ((5945, 5956), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (5951, 5956), True, 'import numpy as np\n'), ((7146, 7181), 'numpy.sign', 'np.sign', (['vel[faster_than_max_speed]'], {}), '(vel[faster_than_max_speed])\n', (7153, 7181), True, 'import numpy as np\n'), ((9469, 9547), 'numpy.logical_and', 'np.logical_and', (['(pos_pred[self.aa_psi_ix] >= mn)', '(pos_pred[self.aa_psi_ix] <= mx)'], {}), '(pos_pred[self.aa_psi_ix] >= mn, pos_pred[self.aa_psi_ix] <= mx)\n', (9483, 9547), True, 'import numpy as np\n'), ((10463, 10548), 'numpy.logical_and', 'np.logical_and', (['(pos_pred[self.rh_pron_ix] >= mn)', '(pos_pred[self.rh_pron_ix] <= mx)'], {}), '(pos_pred[self.rh_pron_ix] >= mn, pos_pred[self.rh_pron_ix] <= mx\n )\n', (10477, 10548), True, 'import numpy as np\n'), ((14021, 14032), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (14027, 14032), True, 'import numpy as np\n'), ((15725, 15741), 'numpy.isnan', 'np.isnan', (['vel[i]'], {}), '(vel[i])\n', (15733, 15741), True, 'import numpy as np\n'), ((16344, 16360), 'numpy.isnan', 'np.isnan', (['vel[i]'], {}), '(vel[i])\n', (16352, 16360), True, 'import numpy as np\n'), ((20767, 20778), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (20773, 20778), True, 'import numpy as np\n'), ((28046, 28079), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_rh)'], {}), '((vel_bl_aa, vel_bl_rh))\n', (28055, 28079), True, 'import numpy as np\n'), ((29389, 29496), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn)', '(pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx)'], {}), '(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self\n .aa_plant.aa_psi_ix] <= mx)\n', (29403, 29496), True, 'import numpy as np\n'), ((30359, 30468), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn)', '(pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx)'], {}), '(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[\n self.rh_plant.rh_pron_ix] <= mx)\n', (30373, 30468), True, 'import numpy as np\n'), ((36421, 36454), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_rh)'], {}), '((vel_bl_aa, vel_bl_rh))\n', (36430, 36454), True, 'import numpy as np\n'), ((37764, 37871), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn)', '(pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx)'], {}), '(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self\n .aa_plant.aa_psi_ix] <= mx)\n', (37778, 37871), True, 'import numpy as np\n'), ((38734, 38843), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn)', '(pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx)'], {}), '(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[\n self.rh_plant.rh_pron_ix] <= mx)\n', (38748, 38843), True, 'import numpy as np\n'), ((46112, 46145), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_rh)'], {}), '((vel_bl_aa, vel_bl_rh))\n', (46121, 46145), True, 'import numpy as np\n'), ((47478, 47585), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn)', '(pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx)'], {}), '(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self\n .aa_plant.aa_psi_ix] <= mx)\n', (47492, 47585), True, 'import numpy as np\n'), ((48486, 48595), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn)', '(pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx)'], {}), '(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[\n self.rh_plant.rh_pron_ix] <= mx)\n', (48500, 48595), True, 'import numpy as np\n'), ((56250, 56283), 'numpy.hstack', 'np.hstack', (['(vel_bl_aa, vel_bl_rh)'], {}), '((vel_bl_aa, vel_bl_rh))\n', (56259, 56283), True, 'import numpy as np\n'), ((58245, 58352), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn)', '(pos_pred_aa[self.aa_plant.aa_psi_ix] <= mx)'], {}), '(pos_pred_aa[self.aa_plant.aa_psi_ix] >= mn, pos_pred_aa[self\n .aa_plant.aa_psi_ix] <= mx)\n', (58259, 58352), True, 'import numpy as np\n'), ((59616, 59725), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn)', '(pos_pred_rh[self.rh_plant.rh_pron_ix] <= mx)'], {}), '(pos_pred_rh[self.rh_plant.rh_pron_ix] >= mn, pos_pred_rh[\n self.rh_plant.rh_pron_ix] <= mx)\n', (59630, 59725), True, 'import numpy as np\n'), ((4665, 4676), 'time.time', 'time.time', ([], {}), '()\n', (4674, 4676), False, 'import time\n'), ((7035, 7046), 'numpy.abs', 'np.abs', (['vel'], {}), '(vel)\n', (7041, 7046), True, 'import numpy as np\n'), ((10947, 11001), 'numpy.logical_and', 'np.logical_and', (['(pos_pred[ix] >= mn)', '(pos_pred[ix] <= mx)'], {}), '(pos_pred[ix] >= mn, pos_pred[ix] <= mx)\n', (10961, 11001), True, 'import numpy as np\n'), ((15574, 15585), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (15582, 15585), True, 'import numpy as np\n'), ((16193, 16204), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (16201, 16204), True, 'import numpy as np\n'), ((31045, 31105), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[ix] >= mn)', '(pos_pred_rh[ix] <= mx)'], {}), '(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx)\n', (31059, 31105), True, 'import numpy as np\n'), ((39420, 39480), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[ix] >= mn)', '(pos_pred_rh[ix] <= mx)'], {}), '(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx)\n', (39434, 39480), True, 'import numpy as np\n'), ((49172, 49232), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[ix] >= mn)', '(pos_pred_rh[ix] <= mx)'], {}), '(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx)\n', (49186, 49232), True, 'import numpy as np\n'), ((60686, 60746), 'numpy.logical_and', 'np.logical_and', (['(pos_pred_rh[ix] >= mn)', '(pos_pred_rh[ix] <= mx)'], {}), '(pos_pred_rh[ix] >= mn, pos_pred_rh[ix] <= mx)\n', (60700, 60746), True, 'import numpy as np\n'), ((3641, 3661), 'numpy.zeros', 'np.zeros', (['self.n_dof'], {}), '(self.n_dof)\n', (3649, 3661), True, 'import numpy as np\n'), ((11581, 11593), 'time.ctime', 'time.ctime', ([], {}), '()\n', (11591, 11593), False, 'import time\n'), ((58985, 59031), 'numpy.logical_and', 'np.logical_and', (['(psi_pred >= mn)', '(psi_pred <= mx)'], {}), '(psi_pred >= mn, psi_pred <= mx)\n', (58999, 59031), True, 'import numpy as np\n'), ((60108, 60156), 'numpy.logical_and', 'np.logical_and', (['(pron_pred >= mn)', '(pron_pred <= mx)'], {}), '(pron_pred >= mn, pron_pred <= mx)\n', (60122, 60156), True, 'import numpy as np\n'), ((29618, 29658), 'numpy.array', 'np.array', (['[predx, predy, pos_pred_aa[2]]'], {}), '([predx, predy, pos_pred_aa[2]])\n', (29626, 29658), True, 'import numpy as np\n'), ((37993, 38033), 'numpy.array', 'np.array', (['[predx, predy, pos_pred_aa[2]]'], {}), '([predx, predy, pos_pred_aa[2]])\n', (38001, 38033), True, 'import numpy as np\n'), ((61095, 61143), 'numpy.logical_and', 'np.logical_and', (['(fing_pred >= mn)', '(fing_pred <= mx)'], {}), '(fing_pred >= mn, fing_pred <= mx)\n', (61109, 61143), True, 'import numpy as np\n')]
import logging import pytest from ocs_ci.framework.testlib import tier1, skipif_ui_not_support, ui from ocs_ci.ocs.ui.pvc_ui import PvcUI from ocs_ci.framework.testlib import skipif_ocs_version from ocs_ci.framework.pytest_customization.marks import green_squad from ocs_ci.ocs.resources.pvc import get_all_pvc_objs, get_pvc_objs from ocs_ci.ocs import constants from ocs_ci.helpers import helpers from ocs_ci.helpers.helpers import wait_for_resource_state, create_unique_resource_name from ocs_ci.utility.utils import get_ocp_version from ocs_ci.ocs.ui.views import locators from ocs_ci.ocs.resources.pod import get_fio_rw_iops logger = logging.getLogger(__name__) @ui @skipif_ocs_version("<4.6") @skipif_ui_not_support("pvc") @green_squad class TestPvcUserInterface(object): """ Test PVC User Interface """ @tier1 @pytest.mark.parametrize( argnames=["sc_name", "access_mode", "pvc_size", "vol_mode"], argvalues=[ pytest.param( "ocs-storagecluster-cephfs", "ReadWriteMany", "2", "Filesystem", ), pytest.param( "ocs-storagecluster-ceph-rbd", "ReadWriteMany", "3", "Block", ), pytest.param( "ocs-storagecluster-cephfs", "ReadWriteOnce", "10", "Filesystem", ), pytest.param( "ocs-storagecluster-ceph-rbd", "ReadWriteOnce", "11", "Block", ), pytest.param( "ocs-storagecluster-ceph-rbd", "ReadWriteOnce", "13", "Filesystem", ), ], ) def test_create_resize_delete_pvc( self, project_factory, teardown_factory, setup_ui, sc_name, access_mode, pvc_size, vol_mode, ): """ Test create, resize and delete pvc via UI """ # Creating a test project via CLI pro_obj = project_factory() project_name = pro_obj.namespace pvc_ui_obj = PvcUI(setup_ui) # Creating PVC via UI pvc_name = create_unique_resource_name("test", "pvc") pvc_ui_obj.create_pvc_ui( project_name, sc_name, pvc_name, access_mode, pvc_size, vol_mode ) pvc_objs = get_all_pvc_objs(namespace=project_name) pvc = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name] assert pvc[0].size == int(pvc_size), ( f"size error| expected size:{pvc_size} \n " f"actual size:{str(pvc[0].size)}" ) assert pvc[0].get_pvc_access_mode == access_mode, ( f"access mode error| expected access mode:{access_mode} " f"\n actual access mode:{pvc[0].get_pvc_access_mode}" ) assert pvc[0].backed_sc == sc_name, ( f"storage class error| expected storage class:{sc_name} " f"\n actual storage class:{pvc[0].backed_sc}" ) assert pvc[0].get_pvc_vol_mode == vol_mode, ( f"volume mode error| expected volume mode:{vol_mode} " f"\n actual volume mode:{pvc[0].get_pvc_vol_mode}" ) # Verifying PVC via UI logger.info("Verifying PVC Details via UI") pvc_ui_obj.verify_pvc_ui( pvc_size=pvc_size, access_mode=access_mode, vol_mode=vol_mode, sc_name=sc_name, pvc_name=pvc_name, project_name=project_name, ) logger.info("PVC Details Verified via UI..!!") # Creating Pod via CLI logger.info("Creating Pod") if sc_name in (constants.DEFAULT_STORAGECLASS_RBD,): interface_type = constants.CEPHBLOCKPOOL else: interface_type = constants.CEPHFILESYSTEM new_pod = helpers.create_pod( interface_type=interface_type, pvc_name=pvc_name, namespace=project_name, raw_block_pv=vol_mode == constants.VOLUME_MODE_BLOCK, ) logger.info(f"Waiting for Pod: state= {constants.STATUS_RUNNING}") wait_for_resource_state(resource=new_pod, state=constants.STATUS_RUNNING) # Calling the Teardown Factory Method to make sure Pod is deleted teardown_factory(new_pod) # Expanding the PVC logger.info("Pvc Resizing") new_size = int(pvc_size) + 3 pvc_ui_obj.pvc_resize_ui( pvc_name=pvc_name, new_size=new_size, project_name=project_name ) assert new_size > int( pvc_size ), f"New size of the PVC cannot be less than existing size: new size is {new_size})" ocp_version = get_ocp_version() self.pvc_loc = locators[ocp_version]["pvc"] # Verifying PVC expansion logger.info("Verifying PVC resize") expected_capacity = f"{new_size} GiB" pvc_resize = pvc_ui_obj.verify_pvc_resize_ui( project_name=project_name, pvc_name=pvc_name, expected_capacity=expected_capacity, ) assert pvc_resize, "PVC resize failed" logger.info( "Pvc resize verified..!!" f"New Capacity after PVC resize is {expected_capacity}" ) # Running FIO logger.info("Execute FIO on a Pod") if vol_mode == constants.VOLUME_MODE_BLOCK: storage_type = constants.WORKLOAD_STORAGE_TYPE_BLOCK else: storage_type = constants.WORKLOAD_STORAGE_TYPE_FS new_pod.run_io(storage_type, size=(new_size - 1), invalidate=0, rate="1000m") get_fio_rw_iops(new_pod) logger.info("FIO execution on Pod successfully completed..!!") # Checking if the Pod is deleted or not new_pod.delete(wait=True) new_pod.ocp.wait_for_delete(resource_name=new_pod.name) # Deleting the PVC via UI logger.info(f"Delete {pvc_name} pvc") pvc_ui_obj.delete_pvc_ui(pvc_name, project_name) pvc[0].ocp.wait_for_delete(pvc_name, timeout=120) pvc_objs = get_all_pvc_objs(namespace=project_name) pvcs = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name] if len(pvcs) > 0: assert f"PVC {pvcs[0].name} does not deleted" @tier1 @pytest.mark.parametrize( argnames=["sc_name", "access_mode", "clone_access_mode"], argvalues=[ pytest.param( "ocs-storagecluster-ceph-rbd", constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWO, ), pytest.param( "ocs-storagecluster-cephfs", constants.ACCESS_MODE_RWX, constants.ACCESS_MODE_RWO, ), ], ) def test_clone_pvc( self, project_factory, teardown_factory, setup_ui, sc_name, access_mode, clone_access_mode, ): """ Test to verify PVC clone from UI """ pvc_size = "1" vol_mode = constants.VOLUME_MODE_FILESYSTEM # Creating a project from CLI pro_obj = project_factory() project_name = pro_obj.namespace pvc_ui_obj = PvcUI(setup_ui) # Creating PVC from UI pvc_name = create_unique_resource_name("test", "pvc") pvc_ui_obj.create_pvc_ui( project_name, sc_name, pvc_name, access_mode, pvc_size, vol_mode ) teardown_factory(get_pvc_objs(pvc_names=[pvc_name], namespace=project_name)[0]) # Verifying PVC details in UI logger.info("Verifying PVC details in UI") pvc_ui_obj.verify_pvc_ui( pvc_size=pvc_size, access_mode=access_mode, vol_mode=vol_mode, sc_name=sc_name, pvc_name=pvc_name, project_name=project_name, ) logger.info("Verified PVC details in UI") # Clone PVC from UI clone_pvc_name = f"{pvc_name}-clone" pvc_ui_obj.pvc_clone_ui( project_name=project_name, pvc_name=pvc_name, cloned_pvc_access_mode=clone_access_mode, cloned_pvc_name=clone_pvc_name, ) teardown_factory( get_pvc_objs(pvc_names=[clone_pvc_name], namespace=project_name)[0] ) # Verifying cloned PVC details in UI logger.info("Verifying cloned PVC details in UI") pvc_ui_obj.verify_pvc_ui( pvc_size=pvc_size, access_mode=clone_access_mode, vol_mode=vol_mode, sc_name=sc_name, pvc_name=clone_pvc_name, project_name=project_name, ) logger.info("Verified cloned PVC details in UI")
[ "logging.getLogger", "ocs_ci.helpers.helpers.create_pod", "ocs_ci.ocs.resources.pod.get_fio_rw_iops", "ocs_ci.ocs.ui.pvc_ui.PvcUI", "ocs_ci.ocs.resources.pvc.get_all_pvc_objs", "pytest.param", "ocs_ci.framework.testlib.skipif_ui_not_support", "ocs_ci.framework.testlib.skipif_ocs_version", "ocs_ci.helpers.helpers.create_unique_resource_name", "ocs_ci.ocs.resources.pvc.get_pvc_objs", "ocs_ci.helpers.helpers.wait_for_resource_state", "ocs_ci.utility.utils.get_ocp_version" ]
[((640, 667), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (657, 667), False, 'import logging\n'), ((675, 701), 'ocs_ci.framework.testlib.skipif_ocs_version', 'skipif_ocs_version', (['"""<4.6"""'], {}), "('<4.6')\n", (693, 701), False, 'from ocs_ci.framework.testlib import skipif_ocs_version\n'), ((703, 731), 'ocs_ci.framework.testlib.skipif_ui_not_support', 'skipif_ui_not_support', (['"""pvc"""'], {}), "('pvc')\n", (724, 731), False, 'from ocs_ci.framework.testlib import tier1, skipif_ui_not_support, ui\n'), ((2242, 2257), 'ocs_ci.ocs.ui.pvc_ui.PvcUI', 'PvcUI', (['setup_ui'], {}), '(setup_ui)\n', (2247, 2257), False, 'from ocs_ci.ocs.ui.pvc_ui import PvcUI\n'), ((2308, 2350), 'ocs_ci.helpers.helpers.create_unique_resource_name', 'create_unique_resource_name', (['"""test"""', '"""pvc"""'], {}), "('test', 'pvc')\n", (2335, 2350), False, 'from ocs_ci.helpers.helpers import wait_for_resource_state, create_unique_resource_name\n'), ((2492, 2532), 'ocs_ci.ocs.resources.pvc.get_all_pvc_objs', 'get_all_pvc_objs', ([], {'namespace': 'project_name'}), '(namespace=project_name)\n', (2508, 2532), False, 'from ocs_ci.ocs.resources.pvc import get_all_pvc_objs, get_pvc_objs\n'), ((4006, 4161), 'ocs_ci.helpers.helpers.create_pod', 'helpers.create_pod', ([], {'interface_type': 'interface_type', 'pvc_name': 'pvc_name', 'namespace': 'project_name', 'raw_block_pv': '(vol_mode == constants.VOLUME_MODE_BLOCK)'}), '(interface_type=interface_type, pvc_name=pvc_name,\n namespace=project_name, raw_block_pv=vol_mode == constants.\n VOLUME_MODE_BLOCK)\n', (4024, 4161), False, 'from ocs_ci.helpers import helpers\n'), ((4296, 4369), 'ocs_ci.helpers.helpers.wait_for_resource_state', 'wait_for_resource_state', ([], {'resource': 'new_pod', 'state': 'constants.STATUS_RUNNING'}), '(resource=new_pod, state=constants.STATUS_RUNNING)\n', (4319, 4369), False, 'from ocs_ci.helpers.helpers import wait_for_resource_state, create_unique_resource_name\n'), ((4870, 4887), 'ocs_ci.utility.utils.get_ocp_version', 'get_ocp_version', ([], {}), '()\n', (4885, 4887), False, 'from ocs_ci.utility.utils import get_ocp_version\n'), ((5789, 5813), 'ocs_ci.ocs.resources.pod.get_fio_rw_iops', 'get_fio_rw_iops', (['new_pod'], {}), '(new_pod)\n', (5804, 5813), False, 'from ocs_ci.ocs.resources.pod import get_fio_rw_iops\n'), ((6249, 6289), 'ocs_ci.ocs.resources.pvc.get_all_pvc_objs', 'get_all_pvc_objs', ([], {'namespace': 'project_name'}), '(namespace=project_name)\n', (6265, 6289), False, 'from ocs_ci.ocs.resources.pvc import get_all_pvc_objs, get_pvc_objs\n'), ((7400, 7415), 'ocs_ci.ocs.ui.pvc_ui.PvcUI', 'PvcUI', (['setup_ui'], {}), '(setup_ui)\n', (7405, 7415), False, 'from ocs_ci.ocs.ui.pvc_ui import PvcUI\n'), ((7467, 7509), 'ocs_ci.helpers.helpers.create_unique_resource_name', 'create_unique_resource_name', (['"""test"""', '"""pvc"""'], {}), "('test', 'pvc')\n", (7494, 7509), False, 'from ocs_ci.helpers.helpers import wait_for_resource_state, create_unique_resource_name\n'), ((969, 1046), 'pytest.param', 'pytest.param', (['"""ocs-storagecluster-cephfs"""', '"""ReadWriteMany"""', '"""2"""', '"""Filesystem"""'], {}), "('ocs-storagecluster-cephfs', 'ReadWriteMany', '2', 'Filesystem')\n", (981, 1046), False, 'import pytest\n'), ((1139, 1213), 'pytest.param', 'pytest.param', (['"""ocs-storagecluster-ceph-rbd"""', '"""ReadWriteMany"""', '"""3"""', '"""Block"""'], {}), "('ocs-storagecluster-ceph-rbd', 'ReadWriteMany', '3', 'Block')\n", (1151, 1213), False, 'import pytest\n'), ((1306, 1384), 'pytest.param', 'pytest.param', (['"""ocs-storagecluster-cephfs"""', '"""ReadWriteOnce"""', '"""10"""', '"""Filesystem"""'], {}), "('ocs-storagecluster-cephfs', 'ReadWriteOnce', '10', 'Filesystem')\n", (1318, 1384), False, 'import pytest\n'), ((1477, 1552), 'pytest.param', 'pytest.param', (['"""ocs-storagecluster-ceph-rbd"""', '"""ReadWriteOnce"""', '"""11"""', '"""Block"""'], {}), "('ocs-storagecluster-ceph-rbd', 'ReadWriteOnce', '11', 'Block')\n", (1489, 1552), False, 'import pytest\n'), ((1645, 1730), 'pytest.param', 'pytest.param', (['"""ocs-storagecluster-ceph-rbd"""', '"""ReadWriteOnce"""', '"""13"""', '"""Filesystem"""'], {}), "('ocs-storagecluster-ceph-rbd', 'ReadWriteOnce', '13', 'Filesystem'\n )\n", (1657, 1730), False, 'import pytest\n'), ((7657, 7715), 'ocs_ci.ocs.resources.pvc.get_pvc_objs', 'get_pvc_objs', ([], {'pvc_names': '[pvc_name]', 'namespace': 'project_name'}), '(pvc_names=[pvc_name], namespace=project_name)\n', (7669, 7715), False, 'from ocs_ci.ocs.resources.pvc import get_all_pvc_objs, get_pvc_objs\n'), ((8426, 8490), 'ocs_ci.ocs.resources.pvc.get_pvc_objs', 'get_pvc_objs', ([], {'pvc_names': '[clone_pvc_name]', 'namespace': 'project_name'}), '(pvc_names=[clone_pvc_name], namespace=project_name)\n', (8438, 8490), False, 'from ocs_ci.ocs.resources.pvc import get_all_pvc_objs, get_pvc_objs\n'), ((6591, 6692), 'pytest.param', 'pytest.param', (['"""ocs-storagecluster-ceph-rbd"""', 'constants.ACCESS_MODE_RWO', 'constants.ACCESS_MODE_RWO'], {}), "('ocs-storagecluster-ceph-rbd', constants.ACCESS_MODE_RWO,\n constants.ACCESS_MODE_RWO)\n", (6603, 6692), False, 'import pytest\n'), ((6765, 6864), 'pytest.param', 'pytest.param', (['"""ocs-storagecluster-cephfs"""', 'constants.ACCESS_MODE_RWX', 'constants.ACCESS_MODE_RWO'], {}), "('ocs-storagecluster-cephfs', constants.ACCESS_MODE_RWX,\n constants.ACCESS_MODE_RWO)\n", (6777, 6864), False, 'import pytest\n')]
from sys import exit from colour import * from rs.reaction_system import ReactionSystem class ReactionSystemWithConcentrations(ReactionSystem): def __init__(self): self.reactions = [] self.meta_reactions = dict() self.permanent_entities = dict() self.background_set = [] self.context_entities = [] # legacy. to be removed self.reactions_by_prod = None self.max_concentration = 0 self.max_conc_per_ent = dict() def add_bg_set_entity(self, e): name = "" def_max_conc = -1 if type(e) is tuple and len(e) == 2: name, def_max_conc = e elif type(e) is str: name = e print("\nWARNING: no maximal concentration level specified for:", e, "\n") else: raise RuntimeError( "Bad entity type when adding background set element") self.assume_not_in_bgset(name) self.background_set.append(name) if def_max_conc != -1: ent_id = self.get_entity_id(name) self.max_conc_per_ent.setdefault(ent_id, 0) if self.max_conc_per_ent[ent_id] < def_max_conc: self.max_conc_per_ent[ent_id] = def_max_conc if self.max_concentration < def_max_conc: self.max_concentration = def_max_conc def get_max_concentration_level(self, e): if e in self.max_conc_per_ent: return self.max_conc_per_ent[e] else: return self.max_concentration def is_valid_entity_with_concentration(self, e): """Sanity check for entities with concentration""" if type(e) is tuple: if len(e) == 2 and type(e[1]) is int: return True if type(e) is list: if len(e) == 2 and type(e[1]) is int: return True print("FATAL. Invalid entity+concentration: {:s}".format(e)) exit(1) return False def get_state_ids(self, state): """Returns entities of the given state without levels""" return [e for e, c in state] def has_non_zero_concentration(self, elem): if elem[1] < 1: raise RuntimeError( "Unexpected concentration level in state: " + str(elem)) def process_rip(self, R, I, P, ignore_empty_R=False): """Chcecks concentration levels and converts entities names into their ids""" if R == [] and not ignore_empty_R: raise RuntimeError("No reactants defined") reactants = [] for r in R: self.is_valid_entity_with_concentration(r) self.has_non_zero_concentration(r) entity, level = r reactants.append((self.get_entity_id(entity), level)) if self.max_concentration < level: self.max_concentration = level inhibitors = [] for i in I: self.is_valid_entity_with_concentration(i) self.has_non_zero_concentration(i) entity, level = i inhibitors.append((self.get_entity_id(entity), level)) if self.max_concentration < level: self.max_concentration = level products = [] for p in P: self.is_valid_entity_with_concentration(p) self.has_non_zero_concentration(p) entity, level = p products.append((self.get_entity_id(entity), level)) return reactants, inhibitors, products def add_reaction(self, R, I, P): """Adds a reaction""" if P == []: raise RuntimeError("No products defined") reaction = self.process_rip(R, I, P) self.reactions.append(reaction) def add_reaction_without_reactants(self, R, I, P): """Adds a reaction""" if P == []: raise RuntimeError("No products defined") reaction = self.process_rip(R, I, P, ignore_empty_R=True) self.reactions.append(reaction) def add_reaction_inc(self, incr_entity, incrementer, R, I): """Adds a macro/meta reaction for increasing the value of incr_entity""" reactants, inhibitors, products = self.process_rip( R, I, [], ignore_empty_R=True) incr_entity_id = self.get_entity_id(incr_entity) self.meta_reactions.setdefault(incr_entity_id, []) self.meta_reactions[incr_entity_id].append( ("inc", self.get_entity_id(incrementer), reactants, inhibitors)) def add_reaction_dec(self, decr_entity, decrementer, R, I): """Adds a macro/meta reaction for decreasing the value of incr_entity""" reactants, inhibitors, products = self.process_rip( R, I, [], ignore_empty_R=True) decr_entity_id = self.get_entity_id(decr_entity) self.meta_reactions.setdefault(decr_entity_id, []) self.meta_reactions[decr_entity_id].append( ("dec", self.get_entity_id(decrementer), reactants, inhibitors)) def add_permanency(self, ent, I): """Sets entity to be permanent unless it is inhibited""" ent_id = self.get_entity_id(ent) if ent_id in self.permanent_entities: raise RuntimeError( "Permanency for {0} already defined.".format(ent)) inhibitors = self.process_rip([], I, [], ignore_empty_R=True)[1] self.permanent_entities[ent_id] = inhibitors def set_context_entities(self, entities): raise NotImplementedError def entities_names_set_to_str(self, entities): s = "" for entity in entities: s += entity + ", " s = s[:-2] return s def entities_ids_set_to_str(self, entities): s = "" for entity in entities: s += self.get_entity_name(entity) + ", " s = s[:-2] return s def state_to_str(self, state): s = "" for ent, level in state: s += self.get_entity_name(ent) + "=" + str(level) + ", " s = s[:-2] return s def show_background_set(self): print( C_MARK_INFO + " Background set: {" + self.entities_names_set_to_str(self.background_set) + "}") def show_meta_reactions(self): print(C_MARK_INFO + " Meta reactions:") for param_ent, reactions in self.meta_reactions.items(): for r_type, command, reactants, inhibitors in reactions: if r_type == "inc" or r_type == "dec": print(" - [ Type=" + repr(r_type) + " Operand=( " + self.get_entity_name(param_ent) + " ) Command=( " + self.get_entity_name( command) + " ) ] -- ( R={" + self.state_to_str(reactants) + "}, I={" + self.state_to_str(inhibitors) + "} )") else: raise RuntimeError( "Unknown meta-reaction type: " + repr(r_type)) def show_max_concentrations(self): print( C_MARK_INFO + " Maximal allowed concentration levels (for optimized translation to RS):") for e, max_conc in self.max_conc_per_ent.items(): print(" - {0:^20} = {1:<6}".format(self.get_entity_name(e), max_conc)) def show_permanent_entities(self): print(C_MARK_INFO + " Permanent entities:") for e, inhibitors in self.permanent_entities.items(): print(" - {0:^20}{1:<6}".format(self.get_entity_name(e) + ": ", "I={" + self.state_to_str(inhibitors) + "}")) def show(self, soft=False): self.show_background_set() self.show_reactions(soft) self.show_permanent_entities() self.show_meta_reactions() self.show_max_concentrations() def get_reactions_by_product(self): """Sorts reactions by their products and returns a dictionary of products""" if self.reactions_by_prod != None: return self.reactions_by_prod producible_entities = set() for reaction in self.reactions: product_entities = [e for e, c in reaction[2]] producible_entities = producible_entities.union( set(product_entities)) reactions_by_prod = {} for p_e in producible_entities: reactions_by_prod[p_e] = [] rcts_for_p_e = reactions_by_prod[p_e] for r in self.reactions: product_entities = [e for e, c in r[2]] if p_e in product_entities: reactants = r[0] inhibitors = r[1] products = [(e, c) for e, c in r[2] if e == p_e] prod_conc = products[0][1] insert_place = None # we need to order the reactions w.r.t. the concentration levels produced (increasing order) for i in range(0, len(rcts_for_p_e)): checked_conc = rcts_for_p_e[i][2][0][1] if prod_conc <= checked_conc: insert_place = i break if insert_place == None: # empty or the is only one element which is smaller than the element being added # we append (to the end) rcts_for_p_e.append((reactants, inhibitors, products)) else: rcts_for_p_e.insert( insert_place, (reactants, inhibitors, products)) # save in cache self.reactions_by_prod = reactions_by_prod return reactions_by_prod def get_reaction_system(self): rs = ReactionSystem() for reactants, inhibitors, products in self.reactions: new_reactants = [] new_inhibitors = [] new_products = [] for ent, conc in reactants: n = self.get_entity_name(ent) + "#" + str(conc) rs.ensure_bg_set_entity(n) new_reactants.append(n) for ent, conc in inhibitors: n = self.get_entity_name(ent) + "#" + str(conc) rs.ensure_bg_set_entity(n) new_inhibitors.append(n) for ent, conc in products: for i in range(1, conc+1): n = self.get_entity_name(ent) + "#" + str(i) rs.ensure_bg_set_entity(n) new_products.append(n) rs.add_reaction(new_reactants, new_inhibitors, new_products) for param_ent, reactions in self.meta_reactions.items(): for r_type, command, reactants, inhibitors in reactions: param_ent_name = self.get_entity_name(param_ent) new_reactants = [] new_inhibitors = [] for ent, conc in reactants: n = self.get_entity_name(ent) + "#" + str(conc) rs.ensure_bg_set_entity(n) new_reactants.append(n) for ent, conc in inhibitors: n = self.get_entity_name(ent) + "#" + str(conc) rs.ensure_bg_set_entity(n) new_inhibitors.append(n) max_cmd_c = self.max_concentration if command in self.max_conc_per_ent: max_cmd_c = self.max_conc_per_ent[command] else: print( "WARNING:\n\tThere is no maximal concentration level defined for " + self.get_entity_name(command)) print("\tThis is a very bad idea -- expect degraded performance\n") for l in range(1, max_cmd_c+1): cmd_ent = self.get_entity_name(command) + "#" + str(l) rs.ensure_bg_set_entity(cmd_ent) if r_type == "inc": # pre_conc -- predecessor concentration # succ_conc -- successor concentration concentration for i in range(1, self.max_concentration): pre_conc = param_ent_name + "#" + str(i) rs.ensure_bg_set_entity(pre_conc) new_products = [] succ_value = i+l for j in range(1, succ_value+1): if j > self.max_concentration: break new_p = param_ent_name + "#" + str(j) rs.ensure_bg_set_entity(new_p) new_products.append(new_p) if new_products != []: rs.add_reaction( set(new_reactants + [pre_conc, cmd_ent]), set(new_inhibitors), set(new_products)) elif r_type == "dec": for i in range(1, self.max_concentration+1): pre_conc = param_ent_name + "#" + str(i) rs.ensure_bg_set_entity(pre_conc) new_products = [] succ_value = i-l for j in range(1, succ_value+1): if j > self.max_concentration: break new_p = param_ent_name + "#" + str(j) rs.ensure_bg_set_entity(new_p) new_products.append(new_p) if new_products != []: rs.add_reaction( set(new_reactants + [pre_conc, cmd_ent]), set(new_inhibitors), set(new_products)) else: raise RuntimeError( "Unknown meta-reaction type: " + repr(r_type)) for ent, inhibitors in self.permanent_entities.items(): max_c = self.max_concentration if ent in self.max_conc_per_ent: max_c = self.max_conc_per_ent[ent] else: print( "WARNING:\n\tThere is no maximal concentration level defined for " + self.get_entity_name(ent)) print("\tThis is a very bad idea -- expect degraded performance\n") def e_value(i): return self.get_entity_name(ent) + "#" + str(i) for value in range(1, max_c+1): new_reactants = [] new_inhibitors = [] new_products = [] new_reactants = [e_value(value)] for e_inh, conc in inhibitors: n = self.get_entity_name(e_inh) + "#" + str(conc) rs.ensure_bg_set_entity(n) new_inhibitors.append(n) for i in range(1, value+1): new_products.append(e_value(i)) rs.add_reaction(new_reactants, new_inhibitors, new_products) return rs class ReactionSystemWithAutomaton(object): def __init__(self, reaction_system, context_automaton): self.rs = reaction_system self.ca = context_automaton def show(self, soft=False): self.rs.show(soft) self.ca.show() def is_with_concentrations(self): if not isinstance(self.rs, ReactionSystemWithConcentrations): return False if not isinstance(self.ca, ContextAutomatonWithConcentrations): return False return True def sanity_check(self): pass def get_ordinary_reaction_system_with_automaton(self): if not self.is_with_concentrations(): raise RuntimeError("Not RS/CA with concentrations") ors = self.rs.get_reaction_system() oca = self.ca.get_automaton_with_flat_contexts(ors) return ReactionSystemWithAutomaton(ors, oca) # EOF
[ "rs.reaction_system.ReactionSystem", "sys.exit" ]
[((1936, 1943), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (1940, 1943), False, 'from sys import exit\n'), ((9620, 9636), 'rs.reaction_system.ReactionSystem', 'ReactionSystem', ([], {}), '()\n', (9634, 9636), False, 'from rs.reaction_system import ReactionSystem\n')]
# Copyright <NAME> 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) from __future__ import print_function ''' >>> from iterator_ext import * >>> from input_iterator import * >>> x = list_int() >>> x.push_back(1) >>> x.back() 1 >>> x.push_back(3) >>> x.push_back(5) >>> for y in x: ... print(y) 1 3 5 >>> z = range(x) >>> for y in z: ... print(y) 1 3 5 Range2 wraps a transform_iterator which doubles the elements it traverses. This proves we can wrap input iterators >>> z2 = range2(x) >>> for y in z2: ... print(y) 2 6 10 >>> l2 = two_lists() >>> for y in l2.primes: ... print(y) 2 3 5 7 11 13 >>> for y in l2.evens: ... print(y) 2 4 6 8 10 12 >>> ll = list_list() >>> ll.push_back(x) >>> x.push_back(7) >>> ll.push_back(x) >>> for a in ll: #doctest: +NORMALIZE_WHITESPACE ... for b in a: ... print(b, end='') ... print('') ... 1 3 5 1 3 5 7 ''' def run(args = None): import sys import doctest if args is not None: sys.argv = args return doctest.testmod(sys.modules.get(__name__)) if __name__ == '__main__': print("running...") import sys status = run()[0] if (status == 0): print("Done.") sys.exit(status)
[ "sys.modules.get", "sys.exit" ]
[((1297, 1313), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (1305, 1313), False, 'import sys\n'), ((1136, 1161), 'sys.modules.get', 'sys.modules.get', (['__name__'], {}), '(__name__)\n', (1151, 1161), False, 'import sys\n')]
# -*- Python -*- # license # license. # ====================================================================== """Looks name up in the [geonames database](http://www.geonames.org/). [GeoNames Search Webservice API](http://www.geonames.org/export/geonames-search.html) """ import sys, os, urllib.request, json, time from pathlib import Path import logging; module_logger = logging.getLogger(__name__) from .utilities import is_chinese # ====================================================================== def geonames(name): if not name: return name if is_chinese(name): r = _lookup_chinese(name=name) else: r = _lookup("search", isNameRequired="true", name=name) return r # ---------------------------------------------------------------------- def _lookup(feature, **args): def make(entry): if entry.get("fcl") in ["A", "P"]: return { # "local_name": entry[], "name": entry["toponymName"], "province": entry["adminName1"], "country": entry["countryName"], "latitude": entry["lat"], "longitude": entry["lng"], } else: return None return _get(feature, make, args) # ---------------------------------------------------------------------- def _get(feature, result_maker, args): args.update({"username": "acorg", "type": "json"}) url = "http://api.geonames.org/{}?{}".format(feature, urllib.parse.urlencode(args)) # module_logger.debug('_lookup {!r}'.format(url)) while True: rj = json.loads(urllib.request.urlopen(url=url).read().decode("utf-8")) try: return [e2 for e2 in (result_maker(e1) for e1 in rj["geonames"]) if e2] except KeyError: if "the hourly limit of" in rj.get("status", {}).get("message"): print(f"WARNING: {rj['status']['message']}", file=sys.stderr) seconds_to_wait = 120 print(f"WARNING: about to wait {seconds_to_wait} seconds", file=sys.stderr) time.sleep(seconds_to_wait) else: print(f"ERROR: {rj}", file=sys.stderr) raise RuntimeError(str(rj)) except Exception as err: print(f"ERROR: {rj}: {err}", file=sys.stderr) raise RuntimeError(f"{rj}: {err}") # ---------------------------------------------------------------------- def _lookup_chinese(name): if len(name) > 3: r = [] if provinces := _find_chinese_province(name): province = provinces[0] county = _find_chinese_county(name, province); if county: r = [{ "local_name": name, "name": _make_chinese_name(province, county), "province": _make_province_name(province), "country": province["countryName"], "latitude": county["lat"], "longitude": county["lng"], }] else: def make(entry): province_name = _make_province_name(entry) return { "local_name": name, "name": province_name, "province": province_name, "country": entry["countryName"], "latitude": entry["lat"], "longitude": entry["lng"], } r = [make(e) for e in _find_chinese_province(name)] return r # ---------------------------------------------------------------------- def _find_chinese_province(name): r = _get("search", lambda e: e if e["name"] == name[:2] else None, {"isNameRequired": "true", "name_startsWith": name[:2], "fclass": "A", "fcode": "ADM1", "lang": "cn"}) # module_logger.debug('name: {!r} : {!r}'.format(name[:2], r)) if not r: # Inner Mongolia is written using 3 Hanzi r = _get("search", lambda e: e if e["name"] == name[:3] else None, {"isNameRequired": "true", "name_startsWith": name[:3], "fclass": "A", "fcode": "ADM1", "lang": "cn"}) return r # ---------------------------------------------------------------------- def _make_province_name(entry): r = entry["toponymName"].upper() space_pos = r.find(' ', 6 if r[:6] == "INNER " else 0) if space_pos >= 0: r = r[:space_pos] return r; # ---------------------------------------------------------------------- def _find_chinese_county(full_name, province): name = full_name[len(province["name"]):] r = _get("search", lambda e: e, {"isNameRequired": "true", "name_startsWith": name, "fclass": "A", "fcode": "ADM3", "adminCode1": province["adminCode1"], "lang": "cn"}) if not r: r = _get("search", lambda e: e, {"isNameRequired": "true", "name_startsWith": name, "adminCode1": province["adminCode1"], "lang": "cn"}) # module_logger.debug('_find_chinese_county {}'.format(r)) return r[0] if r else None # ---------------------------------------------------------------------- def _make_chinese_name(province, county): return _make_province_name(province) + " " + _make_county_name(county) # ---------------------------------------------------------------------- def _make_county_name(county): def remove_suffix(source, suffix): if source[-len(suffix):] == suffix: source = source[:-len(suffix)] return source def remove_apostrophe(source): return source.replace("’", "") r = county["toponymName"].upper() r1 = remove_suffix(r, " ZIZHIXIAN") if r1 != r: r = remove_suffix(r1, "ZU") else: for s in [" QU", " XIAN", " SHI"]: r2 = remove_suffix(r, s) if r2 != r: r = r2 break r = remove_apostrophe(r) return r # ====================================================================== ### Local Variables: ### eval: (if (fboundp 'eu-rename-buffer) (eu-rename-buffer)) ### End:
[ "logging.getLogger", "time.sleep" ]
[((374, 401), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (391, 401), False, 'import logging\n'), ((2104, 2131), 'time.sleep', 'time.sleep', (['seconds_to_wait'], {}), '(seconds_to_wait)\n', (2114, 2131), False, 'import sys, os, urllib.request, json, time\n')]
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.tools.mo.front.common.partial_infer.utils import mo_array from openvino.tools.mo.ops.proposal import ProposalOp from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs from openvino.tools.mo.front.extractor import FrontExtractorOp class ProposalFrontExtractor(FrontExtractorOp): op = 'Proposal' enabled = True @classmethod def extract(cls, node): proto_layer = node.pb param = proto_layer.proposal_param update_attrs = { 'feat_stride': param.feat_stride, 'base_size': param.base_size, 'min_size': param.min_size, 'ratio': mo_array(param.ratio), 'scale': mo_array(param.scale), 'pre_nms_topn': param.pre_nms_topn, 'post_nms_topn': param.post_nms_topn, 'nms_thresh': param.nms_thresh } mapping_rule = merge_attrs(param, update_attrs) # update the attributes of the node ProposalOp.update_node_stat(node, mapping_rule) return cls.enabled
[ "openvino.tools.mo.ops.proposal.ProposalOp.update_node_stat", "openvino.tools.mo.front.caffe.collect_attributes.merge_attrs", "openvino.tools.mo.front.common.partial_infer.utils.mo_array" ]
[((969, 1001), 'openvino.tools.mo.front.caffe.collect_attributes.merge_attrs', 'merge_attrs', (['param', 'update_attrs'], {}), '(param, update_attrs)\n', (980, 1001), False, 'from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs\n'), ((1054, 1101), 'openvino.tools.mo.ops.proposal.ProposalOp.update_node_stat', 'ProposalOp.update_node_stat', (['node', 'mapping_rule'], {}), '(node, mapping_rule)\n', (1081, 1101), False, 'from openvino.tools.mo.ops.proposal import ProposalOp\n'), ((727, 748), 'openvino.tools.mo.front.common.partial_infer.utils.mo_array', 'mo_array', (['param.ratio'], {}), '(param.ratio)\n', (735, 748), False, 'from openvino.tools.mo.front.common.partial_infer.utils import mo_array\n'), ((771, 792), 'openvino.tools.mo.front.common.partial_infer.utils.mo_array', 'mo_array', (['param.scale'], {}), '(param.scale)\n', (779, 792), False, 'from openvino.tools.mo.front.common.partial_infer.utils import mo_array\n')]
# -*- coding: utf-8 -*- # mk42 # mk42/apps/users/migrations/0003_auto_20170614_0038.py # Generated by Django 1.11.2 on 2017-06-14 00:38 from __future__ import unicode_literals from django.db import ( migrations, models, ) class Migration(migrations.Migration): dependencies = [ ("users", "0002_auto_20170613_2124"), ] operations = [ migrations.AlterField( model_name="user", name="language", field=models.CharField(choices=[("en", "English"), ("uk", "\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")], default="en", max_length=5, verbose_name="language"), ), ]
[ "django.db.models.CharField" ]
[((478, 603), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('en', 'English'), ('uk', 'Українська')]", 'default': '"""en"""', 'max_length': '(5)', 'verbose_name': '"""language"""'}), "(choices=[('en', 'English'), ('uk', 'Українська')], default\n ='en', max_length=5, verbose_name='language')\n", (494, 603), False, 'from django.db import migrations, models\n')]
import pickle import pandas as pd import torch import torch.nn as nn import torchvision.transforms as T from torch.utils import data from torch.utils.data import random_split from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from datasets.celeba import CelebA1000 from datasets.facescrub import FaceScrub from datasets.stanford_dogs import StanfordDogs def get_normalization(): normalization = T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) return normalization def get_train_val_split(data, split_ratio, seed=0): validation_set_length = int(split_ratio * len(data)) training_set_length = len(data) - validation_set_length torch.manual_seed(seed) training_set, validation_set = random_split( data, [training_set_length, validation_set_length]) return training_set, validation_set def get_subsampled_dataset(dataset, dataset_size=None, proportion=None, seed=0): if dataset_size > len(dataset): raise ValueError( 'Dataset size is smaller than specified subsample size') if dataset_size is None: if proportion is None: raise ValueError('Neither dataset_size nor proportion specified') else: dataset_size = int(proportion * len(dataset)) torch.manual_seed(seed) subsample, _ = random_split( dataset, [dataset_size, len(dataset) - dataset_size]) return subsample def get_facescrub_idx_to_class(): with open('utils/files/facescrub_idx_to_class.pkl', 'rb') as f: idx_to_class = pickle.load(f) return idx_to_class def get_facescrub_class_to_idx(): with open('utils/files/facescrub_class_to_idx.pkl', 'rb') as f: class_to_idx = pickle.load(f) return class_to_idx def get_celeba_idx_to_attr(list_attr_file='data/celeba/list_attr_celeba.txt'): file = pd.read_csv(list_attr_file) attributes = file.iloc[0].tolist()[0].split(' ')[:-1] attr_dict = {idx: attributes[idx] for idx in range(len(attributes))} return attr_dict def get_celeba_attr_to_idx(list_attr_file='data/celeba/list_attr_celeba.txt'): file = pd.read_csv(list_attr_file) attributes = file.iloc[0].tolist()[0].split(' ')[:-1] attr_dict = {attributes[idx]: idx for idx in range(len(attributes))} return attr_dict def get_stanford_dogs_idx_to_class(): with open('utils/files/stanford_dogs_idx_to_class.pkl', 'rb') as f: idx_to_class = pickle.load(f) return idx_to_class def get_stanford_dogs_class_to_idx(): with open('utils/files/stanford_dogs_class_to_idx.pkl', 'rb') as f: class_to_idx = pickle.load(f) return class_to_idx def create_target_dataset(dataset_name, transform): if dataset_name.lower() == 'facescrub': return FaceScrub(group='all', train=True, transform=transform) elif dataset_name.lower() == 'celeba_identities': return CelebA1000(train=True, transform=transform) elif 'stanford_dogs' in dataset_name.lower(): return StanfordDogs(train=True, cropped=True, transform=transform) else: print(f'{dataset_name} is no valid dataset.')
[ "torch.manual_seed", "pandas.read_csv", "torch.utils.data.random_split", "datasets.facescrub.FaceScrub", "pickle.load", "torchvision.transforms.Normalize", "datasets.celeba.CelebA1000", "datasets.stanford_dogs.StanfordDogs" ]
[((448, 502), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (459, 502), True, 'import torchvision.transforms as T\n'), ((703, 726), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (720, 726), False, 'import torch\n'), ((762, 826), 'torch.utils.data.random_split', 'random_split', (['data', '[training_set_length, validation_set_length]'], {}), '(data, [training_set_length, validation_set_length])\n', (774, 826), False, 'from torch.utils.data import random_split\n'), ((1386, 1409), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1403, 1409), False, 'import torch\n'), ((1950, 1977), 'pandas.read_csv', 'pd.read_csv', (['list_attr_file'], {}), '(list_attr_file)\n', (1961, 1977), True, 'import pandas as pd\n'), ((2222, 2249), 'pandas.read_csv', 'pd.read_csv', (['list_attr_file'], {}), '(list_attr_file)\n', (2233, 2249), True, 'import pandas as pd\n'), ((1653, 1667), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1664, 1667), False, 'import pickle\n'), ((1819, 1833), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1830, 1833), False, 'import pickle\n'), ((2537, 2551), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2548, 2551), False, 'import pickle\n'), ((2711, 2725), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2722, 2725), False, 'import pickle\n'), ((2863, 2918), 'datasets.facescrub.FaceScrub', 'FaceScrub', ([], {'group': '"""all"""', 'train': '(True)', 'transform': 'transform'}), "(group='all', train=True, transform=transform)\n", (2872, 2918), False, 'from datasets.facescrub import FaceScrub\n'), ((3038, 3081), 'datasets.celeba.CelebA1000', 'CelebA1000', ([], {'train': '(True)', 'transform': 'transform'}), '(train=True, transform=transform)\n', (3048, 3081), False, 'from datasets.celeba import CelebA1000\n'), ((3147, 3206), 'datasets.stanford_dogs.StanfordDogs', 'StanfordDogs', ([], {'train': '(True)', 'cropped': '(True)', 'transform': 'transform'}), '(train=True, cropped=True, transform=transform)\n', (3159, 3206), False, 'from datasets.stanford_dogs import StanfordDogs\n')]
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7 from KratosMultiphysics import * from KratosMultiphysics.IncompressibleFluidApplication import * from KratosMultiphysics.FluidDynamicsApplication import * from KratosMultiphysics.ExternalSolversApplication import * from KratosMultiphysics.MeshingApplication import * import KratosMultiphysics.MappingApplication as KratosMapping # In this example two domains are solved, a coarse background mesh and a fine mesh around # an obstacle. The fine domain receives the values from the coarse domain as input on it's boundary ###################################################################################### ###################################################################################### ###################################################################################### ##PARSING THE PARAMETERS #import define_output parameter_file_background = open("ProjectParameters_Background.json",'r') Projectparameters_BG = Parameters( parameter_file_background.read()) parameter_file_bodyfitted = open("ProjectParameters_BodyFitted.json",'r') Projectparameters_BF = Parameters( parameter_file_bodyfitted.read()) ## Fluid model part definition main_model_part_bg = ModelPart(Projectparameters_BG["problem_data"]["model_part_name"].GetString()) main_model_part_bg.ProcessInfo.SetValue(DOMAIN_SIZE, Projectparameters_BG["problem_data"]["domain_size"].GetInt()) main_model_part_bf = ModelPart(Projectparameters_BF["problem_data"]["model_part_name"].GetString()) main_model_part_bf.ProcessInfo.SetValue(DOMAIN_SIZE, Projectparameters_BF["problem_data"]["domain_size"].GetInt()) ###TODO replace this "model" for real one once available Model_BG = {Projectparameters_BG["problem_data"]["model_part_name"].GetString() : main_model_part_bg} Model_BF = {Projectparameters_BF["problem_data"]["model_part_name"].GetString() : main_model_part_bf} ## Solver construction solver_module = __import__(Projectparameters_BG["solver_settings"]["solver_type"].GetString()) solver_bg = solver_module.CreateSolver(main_model_part_bg, Projectparameters_BG["solver_settings"]) solver_bg.AddVariables() solver_module = __import__(Projectparameters_BF["solver_settings"]["solver_type"].GetString()) solver_bf = solver_module.CreateSolver(main_model_part_bf, Projectparameters_BF["solver_settings"]) solver_bf.AddVariables() ## Read the model - note that SetBufferSize is done here solver_bg.ImportModelPart() solver_bf.ImportModelPart() ## Add AddDofs solver_bg.AddDofs() solver_bf.AddDofs() ## Initialize GiD I/O from gid_output_process import GiDOutputProcess gid_output_bg = GiDOutputProcess(solver_bg.GetComputingModelPart(), Projectparameters_BG["problem_data"]["problem_name"].GetString() , Projectparameters_BG["output_configuration"]) gid_output_bg.ExecuteInitialize() gid_output_bf = GiDOutputProcess(solver_bf.GetComputingModelPart(), Projectparameters_BF["problem_data"]["problem_name"].GetString() , Projectparameters_BF["output_configuration"]) gid_output_bf.ExecuteInitialize() ##here all of the allocation of the strategies etc is done solver_bg.Initialize() solver_bf.Initialize() ##TODO: replace MODEL for the Kratos one ASAP ## Get the list of the skin submodel parts in the object Model for i in range(Projectparameters_BG["solver_settings"]["skin_parts"].size()): skin_part_name = Projectparameters_BG["solver_settings"]["skin_parts"][i].GetString() Model_BG.update({skin_part_name: main_model_part_bg.GetSubModelPart(skin_part_name)}) for i in range(Projectparameters_BF["solver_settings"]["skin_parts"].size()): skin_part_name = Projectparameters_BF["solver_settings"]["skin_parts"][i].GetString() Model_BF.update({skin_part_name: main_model_part_bf.GetSubModelPart(skin_part_name)}) ## Get the list of the initial conditions submodel parts in the object Model for i in range(Projectparameters_BF["initial_conditions_process_list"].size()): initial_cond_part_name = Projectparameters_BF["initial_conditions_process_list"][i]["Parameters"]["model_part_name"].GetString() Model_BF.update({initial_cond_part_name: main_model_part_bf.GetSubModelPart(initial_cond_part_name)}) ## Processes construction import process_factory # "list_of_processes_bg" contains all the processes already constructed (boundary conditions, initial conditions and gravity) # Note that the conditions are firstly constructed. Otherwise, they may overwrite the BCs information. list_of_processes_bg = process_factory.KratosProcessFactory(Model_BG).ConstructListOfProcesses( Projectparameters_BG["initial_conditions_process_list"] ) list_of_processes_bg += process_factory.KratosProcessFactory(Model_BG).ConstructListOfProcesses( Projectparameters_BG["boundary_conditions_process_list"] ) list_of_processes_bf = process_factory.KratosProcessFactory(Model_BF).ConstructListOfProcesses( Projectparameters_BF["initial_conditions_process_list"] ) list_of_processes_bf += process_factory.KratosProcessFactory(Model_BF).ConstructListOfProcesses( Projectparameters_BF["boundary_conditions_process_list"] ) ## Processes initialization for process in list_of_processes_bg: process.ExecuteInitialize() for process in list_of_processes_bf: process.ExecuteInitialize() # Mapper initialization mapper_settings_file = open("MapperSettings.json",'r') Projectparameters_Mapper = Parameters( mapper_settings_file.read())["mapper_settings"] inlet_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg, main_model_part_bf, Projectparameters_Mapper[0]) sides_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg, main_model_part_bf, Projectparameters_Mapper[1]) outlet_mapper = KratosMapping.MapperFactory.CreateMapper(main_model_part_bg, main_model_part_bf, Projectparameters_Mapper[2]) ## Stepping and time settings Dt = Projectparameters_BG["problem_data"]["time_step"].GetDouble() end_time = Projectparameters_BG["problem_data"]["end_time"].GetDouble() time = 0.0 step = 0 out = 0.0 gid_output_bg.ExecuteBeforeSolutionLoop() gid_output_bf.ExecuteBeforeSolutionLoop() for process in list_of_processes_bg: process.ExecuteBeforeSolutionLoop() for process in list_of_processes_bf: process.ExecuteBeforeSolutionLoop() while(time <= end_time): time = time + Dt step = step + 1 main_model_part_bg.CloneTimeStep(time) main_model_part_bf.CloneTimeStep(time) print("STEP = ", step) print("TIME = ", time) if(step >= 3): for process in list_of_processes_bg: process.ExecuteInitializeSolutionStep() for process in list_of_processes_bf: process.ExecuteInitializeSolutionStep() gid_output_bg.ExecuteInitializeSolutionStep() gid_output_bf.ExecuteInitializeSolutionStep() solver_bg.Solve() inlet_mapper.Map(VELOCITY, VELOCITY) sides_mapper.Map(VELOCITY, VELOCITY) outlet_mapper.Map(VELOCITY, VELOCITY) solver_bf.Solve() for process in list_of_processes_bg: process.ExecuteFinalizeSolutionStep() for process in list_of_processes_bf: process.ExecuteFinalizeSolutionStep() gid_output_bg.ExecuteFinalizeSolutionStep() gid_output_bf.ExecuteFinalizeSolutionStep() #TODO: decide if it shall be done only when output is processed or not for process in list_of_processes_bg: process.ExecuteBeforeOutputStep() for process in list_of_processes_bf: process.ExecuteBeforeOutputStep() if gid_output_bg.IsOutputStep(): gid_output_bg.PrintOutput() gid_output_bf.PrintOutput() for process in list_of_processes_bg: process.ExecuteAfterOutputStep() for process in list_of_processes_bf: process.ExecuteAfterOutputStep() out = out + Dt for process in list_of_processes_bg: process.ExecuteFinalize() for process in list_of_processes_bf: process.ExecuteFinalize() gid_output_bg.ExecuteFinalize() gid_output_bf.ExecuteFinalize()
[ "KratosMultiphysics.MappingApplication.MapperFactory.CreateMapper", "process_factory.KratosProcessFactory" ]
[((5636, 5749), 'KratosMultiphysics.MappingApplication.MapperFactory.CreateMapper', 'KratosMapping.MapperFactory.CreateMapper', (['main_model_part_bg', 'main_model_part_bf', 'Projectparameters_Mapper[0]'], {}), '(main_model_part_bg,\n main_model_part_bf, Projectparameters_Mapper[0])\n', (5676, 5749), True, 'import KratosMultiphysics.MappingApplication as KratosMapping\n'), ((5850, 5963), 'KratosMultiphysics.MappingApplication.MapperFactory.CreateMapper', 'KratosMapping.MapperFactory.CreateMapper', (['main_model_part_bg', 'main_model_part_bf', 'Projectparameters_Mapper[1]'], {}), '(main_model_part_bg,\n main_model_part_bf, Projectparameters_Mapper[1])\n', (5890, 5963), True, 'import KratosMultiphysics.MappingApplication as KratosMapping\n'), ((6065, 6178), 'KratosMultiphysics.MappingApplication.MapperFactory.CreateMapper', 'KratosMapping.MapperFactory.CreateMapper', (['main_model_part_bg', 'main_model_part_bf', 'Projectparameters_Mapper[2]'], {}), '(main_model_part_bg,\n main_model_part_bf, Projectparameters_Mapper[2])\n', (6105, 6178), True, 'import KratosMultiphysics.MappingApplication as KratosMapping\n'), ((4683, 4729), 'process_factory.KratosProcessFactory', 'process_factory.KratosProcessFactory', (['Model_BG'], {}), '(Model_BG)\n', (4719, 4729), False, 'import process_factory\n'), ((4838, 4884), 'process_factory.KratosProcessFactory', 'process_factory.KratosProcessFactory', (['Model_BG'], {}), '(Model_BG)\n', (4874, 4884), False, 'import process_factory\n'), ((4994, 5040), 'process_factory.KratosProcessFactory', 'process_factory.KratosProcessFactory', (['Model_BF'], {}), '(Model_BF)\n', (5030, 5040), False, 'import process_factory\n'), ((5149, 5195), 'process_factory.KratosProcessFactory', 'process_factory.KratosProcessFactory', (['Model_BF'], {}), '(Model_BF)\n', (5185, 5195), False, 'import process_factory\n')]
""" 提供几种常用的控制器。 这些验证器通常需要提供一些参数进行一次调用,返回的结果才是真正的验证器,其中的技巧在于通过闭包使要控制的对象能够被内部函数访问。 版本: 1.3.0+ """ import re from nonebot import CommandSession from nonebot.helpers import render_expression def handle_cancellation(session: CommandSession): """ 在用户发送 `算了`、`不用了`、`取消吧`、`停` 之类的话的时候,结束当前传入的命令会话(调用 `session.finish()`),并发送配置项 `SESSION_CANCEL_EXPRESSION` 所填的内容。 如果不是上述取消指令,则将输入原样输出。 参数: session: 要控制的命令会话 """ def control(value): if _is_cancellation(value) is True: session.finish( render_expression(session.bot.config.SESSION_CANCEL_EXPRESSION)) return value return control def _is_cancellation(sentence: str) -> bool: for kw in ('算', '别', '不', '停', '取消'): if kw in sentence: # a keyword matches break else: # no keyword matches return False if re.match(r'^那?[算别不停]\w{0,3}了?吧?$', sentence) or \ re.match(r'^那?(?:[给帮]我)?取消了?吧?$', sentence): return True return False __all__ = [ 'handle_cancellation', ]
[ "re.match", "nonebot.helpers.render_expression" ]
[((888, 932), 're.match', 're.match', (['"""^那?[算别不停]\\\\w{0,3}了?吧?$"""', 'sentence'], {}), "('^那?[算别不停]\\\\w{0,3}了?吧?$', sentence)\n", (896, 932), False, 'import re\n'), ((950, 992), 're.match', 're.match', (['"""^那?(?:[给帮]我)?取消了?吧?$"""', 'sentence'], {}), "('^那?(?:[给帮]我)?取消了?吧?$', sentence)\n", (958, 992), False, 'import re\n'), ((548, 611), 'nonebot.helpers.render_expression', 'render_expression', (['session.bot.config.SESSION_CANCEL_EXPRESSION'], {}), '(session.bot.config.SESSION_CANCEL_EXPRESSION)\n', (565, 611), False, 'from nonebot.helpers import render_expression\n')]
import argparse import numpy as np import glob import re from log import print_to_file from scipy.fftpack import fftn, ifftn from skimage.feature import peak_local_max, canny from skimage.transform import hough_circle import pickle as pickle from paths import TRAIN_DATA_PATH, LOGS_PATH, PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH from paths import TEST_DATA_PATH def orthogonal_projection_on_slice(percentual_coordinate, source_metadata, target_metadata): point = np.array([[percentual_coordinate[0]], [percentual_coordinate[1]], [0], [1]]) image_size = [source_metadata["Rows"], source_metadata["Columns"]] point = np.dot(np.array( [[image_size[0],0,0,0], [0,image_size[1],0,0], [0,0,0,0], [0,0,0,1]]), point) pixel_spacing = source_metadata["PixelSpacing"] point = np.dot(np.array( [[pixel_spacing[0],0,0,0], [0,pixel_spacing[1],0,0], [0,0,0,0], [0,0,0,1]]), point) Fa = np.array(source_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:] posa = source_metadata["ImagePositionPatient"] point = np.dot(np.array( [[Fa[0,0],Fa[1,0],0,posa[0]], [Fa[0,1],Fa[1,1],0,posa[1]], [Fa[0,2],Fa[1,2],0,posa[2]], [0,0,0,1]]), point) posb = target_metadata["ImagePositionPatient"] point = np.dot(np.array( [[1,0,0,-posb[0]], [0,1,0,-posb[1]], [0,0,1,-posb[2]], [0,0,0,1]]), point) Fb = np.array(target_metadata["ImageOrientationPatient"]).reshape( (2,3) )[::-1,:] ff0 = np.sqrt(np.sum(Fb[0,:]*Fb[0,:])) ff1 = np.sqrt(np.sum(Fb[1,:]*Fb[1,:])) point = np.dot(np.array( [[Fb[0,0]/ff0,Fb[0,1]/ff0,Fb[0,2]/ff0,0], [Fb[1,0]/ff1,Fb[1,1]/ff1,Fb[1,2]/ff1,0], [0,0,0,0], [0,0,0,1]]), point) pixel_spacing = target_metadata["PixelSpacing"] point = np.dot(np.array( [[1./pixel_spacing[0],0,0,0], [0,1./pixel_spacing[1],0,0], [0,0,0,0], [0,0,0,1]]), point) image_size = [target_metadata["Rows"], target_metadata["Columns"]] point = np.dot(np.array( [[1./image_size[0],0,0,0], [0,1./image_size[1],0,0], [0,0,0,0], [0,0,0,1]]), point) return point[:2,0] # percentual coordinate as well #joni minradius = 15 maxradius = 65 kernel_width = 5 center_margin = 8 num_peaks = 10 num_circles = 10 # 20 radstep = 2 #ira minradius_mm=25 maxradius_mm=45 kernel_width=5 center_margin=8 num_peaks=10 num_circles=20 radstep=2 def extract_roi(data, pixel_spacing, minradius_mm=15, maxradius_mm=65, kernel_width=5, center_margin=8, num_peaks=10, num_circles=10, radstep=2): """ Returns center and radii of ROI region in (i,j) format """ # radius of the smallest and largest circles in mm estimated from the train set # convert to pixel counts minradius = int(minradius_mm / pixel_spacing) maxradius = int(maxradius_mm / pixel_spacing) ximagesize = data[0]['data'].shape[1] yimagesize = data[0]['data'].shape[2] xsurface = np.tile(list(range(ximagesize)), (yimagesize, 1)).T ysurface = np.tile(list(range(yimagesize)), (ximagesize, 1)) lsurface = np.zeros((ximagesize, yimagesize)) allcenters = [] allaccums = [] allradii = [] for dslice in data: ff1 = fftn(dslice['data']) fh = np.absolute(ifftn(ff1[1, :, :])) fh[fh < 0.1 * np.max(fh)] = 0.0 image = 1. * fh / np.max(fh) # find hough circles and detect two radii edges = canny(image, sigma=3) hough_radii = np.arange(minradius, maxradius, radstep) hough_res = hough_circle(edges, hough_radii) if hough_res.any(): centers = [] accums = [] radii = [] for radius, h in zip(hough_radii, hough_res): # For each radius, extract num_peaks circles peaks = peak_local_max(h, num_peaks=num_peaks) centers.extend(peaks) accums.extend(h[peaks[:, 0], peaks[:, 1]]) radii.extend([radius] * num_peaks) # Keep the most prominent num_circles circles sorted_circles_idxs = np.argsort(accums)[::-1][:num_circles] for idx in sorted_circles_idxs: center_x, center_y = centers[idx] allcenters.append(centers[idx]) allradii.append(radii[idx]) allaccums.append(accums[idx]) brightness = accums[idx] lsurface = lsurface + brightness * np.exp( -((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / kernel_width ** 2) lsurface = lsurface / lsurface.max() # select most likely ROI center roi_center = np.unravel_index(lsurface.argmax(), lsurface.shape) # determine ROI radius roi_x_radius = 0 roi_y_radius = 0 for idx in range(len(allcenters)): xshift = np.abs(allcenters[idx][0] - roi_center[0]) yshift = np.abs(allcenters[idx][1] - roi_center[1]) if (xshift <= center_margin) & (yshift <= center_margin): roi_x_radius = np.max((roi_x_radius, allradii[idx] + xshift)) roi_y_radius = np.max((roi_y_radius, allradii[idx] + yshift)) if roi_x_radius > 0 and roi_y_radius > 0: roi_radii = roi_x_radius, roi_y_radius else: roi_radii = None return roi_center, roi_radii def read_slice(path): return pickle.load(open(path))['data'] def read_metadata(path): d = pickle.load(open(path))['metadata'][0] metadata = {k: d[k] for k in ['PixelSpacing', 'ImageOrientationPatient', 'ImagePositionPatient', 'SliceLocation', 'PatientSex', 'PatientAge', 'Rows', 'Columns']} metadata['PixelSpacing'] = np.float32(metadata['PixelSpacing']) metadata['ImageOrientationPatient'] = np.float32(metadata['ImageOrientationPatient']) metadata['SliceLocation'] = np.float32(metadata['SliceLocation']) metadata['ImagePositionPatient'] = np.float32(metadata['ImagePositionPatient']) metadata['PatientSex'] = 1 if metadata['PatientSex'] == 'F' else 0 metadata['PatientAge'] = int(metadata['PatientAge'][1:3]) metadata['Rows'] = int(metadata['Rows']) metadata['Columns'] = int(metadata['Columns']) return metadata def get_patient_data(patient_data_path): patient_data = [] spaths = sorted(glob.glob(patient_data_path + '/sax_*.pkl'), key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1))) pid = re.search(r'/(\d+)/study$', patient_data_path).group(1) for s in spaths: slice_id = re.search(r'/(sax_\d+\.pkl)$', s).group(1) metadata = read_metadata(s) d = read_slice(s) patient_data.append({'data': d, 'metadata': metadata, 'slice_id': slice_id, 'patient_id': pid}) return patient_data def get_patient_ch_data(patient_data_path): patient_data = [] spaths = sorted(glob.glob(patient_data_path + '/*ch_*.pkl'), key=lambda x: int(re.search(r'/\w*_(\d+)*\.pkl$', x).group(1))) pid = re.search(r'/(\d+)/study$', patient_data_path).group(1) for s in spaths: slice_id = re.search(r'/(\d+ch_\d+\.pkl)$', s).group(1) metadata = read_metadata(s) d = read_slice(s) patient_data.append({'data': d, 'metadata': metadata, 'slice_id': slice_id, 'patient_id': pid}) return patient_data def sort_slices(slices): nslices = len(slices) positions = np.zeros((nslices,)) for i in range(nslices): positions[i] = slices[i]['metadata']['SliceLocation'] sorted_slices = [s for pos, s in sorted(zip(positions.tolist(), slices), key=lambda x: x[0], reverse=True)] return sorted_slices def group_slices(slice_stack): """ Groups slices into stacks with the same image orientation :param slice_stack: :return: list of slice stacks """ img_orientations = [] for s in slice_stack: img_orientations.append(tuple(s['metadata']['ImageOrientationPatient'])) img_orientations = list(set(img_orientations)) if len(img_orientations) == 1: return [slice_stack] else: slice_groups = [[] for _ in range(len(img_orientations))] for s in slice_stack: group = img_orientations.index(tuple(s['metadata']['ImageOrientationPatient'])) slice_groups[group].append(s) return slice_groups def plot_roi(slice_group, roi_center, roi_radii): x_roi_center, y_roi_center = roi_center[0], roi_center[1] x_roi_radius, y_roi_radius = roi_radii[0], roi_radii[1] print('nslices', len(slice_group)) for dslice in [slice_group[len(slice_group) / 2]]: outdata = dslice['data'] # print dslice['slice_id'] # print dslice['metadata']['SliceLocation'] # print dslice['metadata']['ImageOrientationPatient'] # print dslice['metadata']['PixelSpacing'] # print dslice['data'].shape # print '--------------------------------------' roi_mask = np.zeros_like(outdata[0]) roi_mask[x_roi_center - x_roi_radius:x_roi_center + x_roi_radius, y_roi_center - y_roi_radius:y_roi_center + y_roi_radius] = 1 outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5] outdata[:, roi_mask > 0.5] = 0.4 * outdata[:, roi_mask > 0.5] fig = plt.figure(1) fig.canvas.set_window_title(dslice['patient_id'] + dslice['slice_id']) def init_out(): im.set_data(outdata[0]) def animate_out(i): im.set_data(outdata[i]) return im im = fig.gca().imshow(outdata[0], cmap='gist_gray_r', vmin=0, vmax=255) anim = animation.FuncAnimation(fig, animate_out, init_func=init_out, frames=30, interval=50) plt.show() def get_slice2roi(data_path, plot=False): patient_paths = sorted(glob.glob(data_path + '*/study')) slice2roi = {} for p in patient_paths: patient_data = get_patient_data(p) sorted_slices = sort_slices(patient_data) grouped_slices = group_slices(sorted_slices) ch_data = get_patient_ch_data(p) ch4, ch2 = None,None for data in ch_data: if data['slice_id'].startswith("4"): ch4 = data elif data['slice_id'].startswith("2"): ch2 = data # init patient dict pid = sorted_slices[0]['patient_id'] print("processing patient %s" % pid) # print pid slice2roi[pid] = {} # pixel spacing doesn't change within one patient pixel_spacing = sorted_slices[0]['metadata']['PixelSpacing'][0] for slice_group in grouped_slices: try: roi_center, roi_radii = extract_roi(slice_group, pixel_spacing) except: print('Could not find ROI') roi_center, roi_radii = None, None print(roi_center, roi_radii) if plot and roi_center and roi_radii: pass #plot_roi(slice_group, roi_center, roi_radii) for s in slice_group: sid = s['slice_id'] slice2roi[pid][sid] = {'roi_center': roi_center, 'roi_radii': roi_radii} # project found roi_centers on the 4ch and 2ch slice ch4_centers = [] ch2_centers = [] for slice in sorted_slices: sid = slice['slice_id'] roi_center = slice2roi[pid][sid]['roi_center'] metadata_source = slice['metadata'] hough_roi_center = (float(roi_center[0]) / metadata_source['Rows'], float(roi_center[1]) / metadata_source['Columns']) if ch4 is not None: metadata_target = ch4['metadata'] result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target) ch_roi_center = [float(result[0]) * metadata_target['Rows'], float(result[1]) * metadata_target['Columns']] ch4_centers.append(ch_roi_center) if ch2 is not None: metadata_target = ch2['metadata'] result = orthogonal_projection_on_slice(hough_roi_center, metadata_source, metadata_target) ch_roi_center = [float(result[0]) * metadata_target['Rows'], float(result[1]) * metadata_target['Columns']] ch2_centers.append(ch_roi_center) if ch4 is not None: centers = np.array(ch4_centers) ch4_result_center = np.mean(centers, axis=0) ch4_result_radius = np.max(np.sqrt((centers - ch4_result_center)**2)) sid = ch4['slice_id'] slice2roi[pid][sid] = {'roi_center': tuple(ch4_result_center), 'roi_radii': (ch4_result_radius, ch4_result_radius)} if ch2 is not None: centers = np.array(ch2_centers) ch2_result_center = np.mean(centers, axis=0) ch2_result_radius = np.max(np.sqrt((centers - ch2_result_center)**2)) sid = ch2['slice_id'] slice2roi[pid][sid] = {'roi_center': tuple(ch2_result_center), 'roi_radii': (ch2_result_radius, ch2_result_radius)} filename = data_path.split('/')[-1] + '_slice2roi_joni.pkl' with open(filename, 'w') as f: pickle.dump(slice2roi, f) print('saved to ', filename) return slice2roi if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__) required = parser.add_argument_group('required arguments') #required.add_argument('-c', '--config', # help='configuration to run', # required=True) args = parser.parse_args() data_paths = [PKL_TRAIN_DATA_PATH, PKL_TEST_DATA_PATH] log_path = LOGS_PATH + "generate_roi.log" with print_to_file(log_path): for d in data_paths: get_slice2roi(d, plot=True) print("log saved to '%s'" % log_path)
[ "numpy.sqrt", "scipy.fftpack.fftn", "numpy.array", "numpy.argsort", "numpy.arange", "re.search", "numpy.mean", "argparse.ArgumentParser", "numpy.max", "numpy.exp", "log.print_to_file", "glob.glob", "numpy.abs", "numpy.float32", "pickle.dump", "numpy.sum", "numpy.zeros", "skimage.feature.canny", "skimage.transform.hough_circle", "scipy.fftpack.ifftn", "numpy.zeros_like", "skimage.feature.peak_local_max" ]
[((482, 558), 'numpy.array', 'np.array', (['[[percentual_coordinate[0]], [percentual_coordinate[1]], [0], [1]]'], {}), '([[percentual_coordinate[0]], [percentual_coordinate[1]], [0], [1]])\n', (490, 558), True, 'import numpy as np\n'), ((3780, 3814), 'numpy.zeros', 'np.zeros', (['(ximagesize, yimagesize)'], {}), '((ximagesize, yimagesize))\n', (3788, 3814), True, 'import numpy as np\n'), ((6458, 6494), 'numpy.float32', 'np.float32', (["metadata['PixelSpacing']"], {}), "(metadata['PixelSpacing'])\n", (6468, 6494), True, 'import numpy as np\n'), ((6538, 6585), 'numpy.float32', 'np.float32', (["metadata['ImageOrientationPatient']"], {}), "(metadata['ImageOrientationPatient'])\n", (6548, 6585), True, 'import numpy as np\n'), ((6619, 6656), 'numpy.float32', 'np.float32', (["metadata['SliceLocation']"], {}), "(metadata['SliceLocation'])\n", (6629, 6656), True, 'import numpy as np\n'), ((6697, 6741), 'numpy.float32', 'np.float32', (["metadata['ImagePositionPatient']"], {}), "(metadata['ImagePositionPatient'])\n", (6707, 6741), True, 'import numpy as np\n'), ((8271, 8291), 'numpy.zeros', 'np.zeros', (['(nslices,)'], {}), '((nslices,))\n', (8279, 8291), True, 'import numpy as np\n'), ((14462, 14506), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (14485, 14506), False, 'import argparse\n'), ((720, 814), 'numpy.array', 'np.array', (['[[image_size[0], 0, 0, 0], [0, image_size[1], 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]\n ]'], {}), '([[image_size[0], 0, 0, 0], [0, image_size[1], 0, 0], [0, 0, 0, 0],\n [0, 0, 0, 1]])\n', (728, 814), True, 'import numpy as np\n'), ((978, 1079), 'numpy.array', 'np.array', (['[[pixel_spacing[0], 0, 0, 0], [0, pixel_spacing[1], 0, 0], [0, 0, 0, 0], [0,\n 0, 0, 1]]'], {}), '([[pixel_spacing[0], 0, 0, 0], [0, pixel_spacing[1], 0, 0], [0, 0, \n 0, 0], [0, 0, 0, 1]])\n', (986, 1079), True, 'import numpy as np\n'), ((1329, 1460), 'numpy.array', 'np.array', (['[[Fa[0, 0], Fa[1, 0], 0, posa[0]], [Fa[0, 1], Fa[1, 1], 0, posa[1]], [Fa[0,\n 2], Fa[1, 2], 0, posa[2]], [0, 0, 0, 1]]'], {}), '([[Fa[0, 0], Fa[1, 0], 0, posa[0]], [Fa[0, 1], Fa[1, 1], 0, posa[1]\n ], [Fa[0, 2], Fa[1, 2], 0, posa[2]], [0, 0, 0, 1]])\n', (1337, 1460), True, 'import numpy as np\n'), ((1616, 1707), 'numpy.array', 'np.array', (['[[1, 0, 0, -posb[0]], [0, 1, 0, -posb[1]], [0, 0, 1, -posb[2]], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, -posb[0]], [0, 1, 0, -posb[1]], [0, 0, 1, -posb[2]], [0,\n 0, 0, 1]])\n', (1624, 1707), True, 'import numpy as np\n'), ((1905, 1932), 'numpy.sum', 'np.sum', (['(Fb[0, :] * Fb[0, :])'], {}), '(Fb[0, :] * Fb[0, :])\n', (1911, 1932), True, 'import numpy as np\n'), ((1949, 1976), 'numpy.sum', 'np.sum', (['(Fb[1, :] * Fb[1, :])'], {}), '(Fb[1, :] * Fb[1, :])\n', (1955, 1976), True, 'import numpy as np\n'), ((1996, 2144), 'numpy.array', 'np.array', (['[[Fb[0, 0] / ff0, Fb[0, 1] / ff0, Fb[0, 2] / ff0, 0], [Fb[1, 0] / ff1, Fb[1,\n 1] / ff1, Fb[1, 2] / ff1, 0], [0, 0, 0, 0], [0, 0, 0, 1]]'], {}), '([[Fb[0, 0] / ff0, Fb[0, 1] / ff0, Fb[0, 2] / ff0, 0], [Fb[1, 0] /\n ff1, Fb[1, 1] / ff1, Fb[1, 2] / ff1, 0], [0, 0, 0, 0], [0, 0, 0, 1]])\n', (2004, 2144), True, 'import numpy as np\n'), ((2290, 2402), 'numpy.array', 'np.array', (['[[1.0 / pixel_spacing[0], 0, 0, 0], [0, 1.0 / pixel_spacing[1], 0, 0], [0, \n 0, 0, 0], [0, 0, 0, 1]]'], {}), '([[1.0 / pixel_spacing[0], 0, 0, 0], [0, 1.0 / pixel_spacing[1], 0,\n 0], [0, 0, 0, 0], [0, 0, 0, 1]])\n', (2298, 2402), True, 'import numpy as np\n'), ((2579, 2686), 'numpy.array', 'np.array', (['[[1.0 / image_size[0], 0, 0, 0], [0, 1.0 / image_size[1], 0, 0], [0, 0, 0, \n 0], [0, 0, 0, 1]]'], {}), '([[1.0 / image_size[0], 0, 0, 0], [0, 1.0 / image_size[1], 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 1]])\n', (2587, 2686), True, 'import numpy as np\n'), ((3919, 3939), 'scipy.fftpack.fftn', 'fftn', (["dslice['data']"], {}), "(dslice['data'])\n", (3923, 3939), False, 'from scipy.fftpack import fftn, ifftn\n'), ((4138, 4159), 'skimage.feature.canny', 'canny', (['image'], {'sigma': '(3)'}), '(image, sigma=3)\n', (4143, 4159), False, 'from skimage.feature import peak_local_max, canny\n'), ((4183, 4223), 'numpy.arange', 'np.arange', (['minradius', 'maxradius', 'radstep'], {}), '(minradius, maxradius, radstep)\n', (4192, 4223), True, 'import numpy as np\n'), ((4245, 4277), 'skimage.transform.hough_circle', 'hough_circle', (['edges', 'hough_radii'], {}), '(edges, hough_radii)\n', (4257, 4277), False, 'from skimage.transform import hough_circle\n'), ((5584, 5626), 'numpy.abs', 'np.abs', (['(allcenters[idx][0] - roi_center[0])'], {}), '(allcenters[idx][0] - roi_center[0])\n', (5590, 5626), True, 'import numpy as np\n'), ((5645, 5687), 'numpy.abs', 'np.abs', (['(allcenters[idx][1] - roi_center[1])'], {}), '(allcenters[idx][1] - roi_center[1])\n', (5651, 5687), True, 'import numpy as np\n'), ((7086, 7129), 'glob.glob', 'glob.glob', (["(patient_data_path + '/sax_*.pkl')"], {}), "(patient_data_path + '/sax_*.pkl')\n", (7095, 7129), False, 'import glob\n'), ((7685, 7728), 'glob.glob', 'glob.glob', (["(patient_data_path + '/*ch_*.pkl')"], {}), "(patient_data_path + '/*ch_*.pkl')\n", (7694, 7728), False, 'import glob\n'), ((9907, 9932), 'numpy.zeros_like', 'np.zeros_like', (['outdata[0]'], {}), '(outdata[0])\n', (9920, 9932), True, 'import numpy as np\n'), ((10768, 10800), 'glob.glob', 'glob.glob', (["(data_path + '*/study')"], {}), "(data_path + '*/study')\n", (10777, 10800), False, 'import glob\n'), ((14334, 14359), 'pickle.dump', 'pickle.dump', (['slice2roi', 'f'], {}), '(slice2roi, f)\n', (14345, 14359), True, 'import pickle as pickle\n'), ((14868, 14891), 'log.print_to_file', 'print_to_file', (['log_path'], {}), '(log_path)\n', (14881, 14891), False, 'from log import print_to_file\n'), ((3966, 3985), 'scipy.fftpack.ifftn', 'ifftn', (['ff1[1, :, :]'], {}), '(ff1[1, :, :])\n', (3971, 3985), False, 'from scipy.fftpack import fftn, ifftn\n'), ((4055, 4065), 'numpy.max', 'np.max', (['fh'], {}), '(fh)\n', (4061, 4065), True, 'import numpy as np\n'), ((5783, 5829), 'numpy.max', 'np.max', (['(roi_x_radius, allradii[idx] + xshift)'], {}), '((roi_x_radius, allradii[idx] + xshift))\n', (5789, 5829), True, 'import numpy as np\n'), ((5858, 5904), 'numpy.max', 'np.max', (['(roi_y_radius, allradii[idx] + yshift)'], {}), '((roi_y_radius, allradii[idx] + yshift))\n', (5864, 5904), True, 'import numpy as np\n'), ((7227, 7273), 're.search', 're.search', (['"""/(\\\\d+)/study$"""', 'patient_data_path'], {}), "('/(\\\\d+)/study$', patient_data_path)\n", (7236, 7273), False, 'import re\n'), ((7826, 7872), 're.search', 're.search', (['"""/(\\\\d+)/study$"""', 'patient_data_path'], {}), "('/(\\\\d+)/study$', patient_data_path)\n", (7835, 7872), False, 'import re\n'), ((13512, 13533), 'numpy.array', 'np.array', (['ch4_centers'], {}), '(ch4_centers)\n', (13520, 13533), True, 'import numpy as np\n'), ((13567, 13591), 'numpy.mean', 'np.mean', (['centers'], {'axis': '(0)'}), '(centers, axis=0)\n', (13574, 13591), True, 'import numpy as np\n'), ((13893, 13914), 'numpy.array', 'np.array', (['ch2_centers'], {}), '(ch2_centers)\n', (13901, 13914), True, 'import numpy as np\n'), ((13948, 13972), 'numpy.mean', 'np.mean', (['centers'], {'axis': '(0)'}), '(centers, axis=0)\n', (13955, 13972), True, 'import numpy as np\n'), ((1179, 1231), 'numpy.array', 'np.array', (["source_metadata['ImageOrientationPatient']"], {}), "(source_metadata['ImageOrientationPatient'])\n", (1187, 1231), True, 'import numpy as np\n'), ((1808, 1860), 'numpy.array', 'np.array', (["target_metadata['ImageOrientationPatient']"], {}), "(target_metadata['ImageOrientationPatient'])\n", (1816, 1860), True, 'import numpy as np\n'), ((4532, 4570), 'skimage.feature.peak_local_max', 'peak_local_max', (['h'], {'num_peaks': 'num_peaks'}), '(h, num_peaks=num_peaks)\n', (4546, 4570), False, 'from skimage.feature import peak_local_max, canny\n'), ((7325, 7359), 're.search', 're.search', (['"""/(sax_\\\\d+\\\\.pkl)$"""', 's'], {}), "('/(sax_\\\\d+\\\\.pkl)$', s)\n", (7334, 7359), False, 'import re\n'), ((7924, 7961), 're.search', 're.search', (['"""/(\\\\d+ch_\\\\d+\\\\.pkl)$"""', 's'], {}), "('/(\\\\d+ch_\\\\d+\\\\.pkl)$', s)\n", (7933, 7961), False, 'import re\n'), ((13632, 13675), 'numpy.sqrt', 'np.sqrt', (['((centers - ch4_result_center) ** 2)'], {}), '((centers - ch4_result_center) ** 2)\n', (13639, 13675), True, 'import numpy as np\n'), ((14013, 14056), 'numpy.sqrt', 'np.sqrt', (['((centers - ch2_result_center) ** 2)'], {}), '((centers - ch2_result_center) ** 2)\n', (14020, 14056), True, 'import numpy as np\n'), ((4010, 4020), 'numpy.max', 'np.max', (['fh'], {}), '(fh)\n', (4016, 4020), True, 'import numpy as np\n'), ((4818, 4836), 'numpy.argsort', 'np.argsort', (['accums'], {}), '(accums)\n', (4828, 4836), True, 'import numpy as np\n'), ((5190, 5281), 'numpy.exp', 'np.exp', (['(-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / kernel_width ** 2\n )'], {}), '(-((xsurface - center_x) ** 2 + (ysurface - center_y) ** 2) / \n kernel_width ** 2)\n', (5196, 5281), True, 'import numpy as np\n'), ((7170, 7206), 're.search', 're.search', (['"""/\\\\w*_(\\\\d+)*\\\\.pkl$"""', 'x'], {}), "('/\\\\w*_(\\\\d+)*\\\\.pkl$', x)\n", (7179, 7206), False, 'import re\n'), ((7769, 7805), 're.search', 're.search', (['"""/\\\\w*_(\\\\d+)*\\\\.pkl$"""', 'x'], {}), "('/\\\\w*_(\\\\d+)*\\\\.pkl$', x)\n", (7778, 7805), False, 'import re\n')]
# -*- coding: utf-8 -*- """Main module.""" import os from google.cloud import bigquery from pbq.query import Query from google.cloud import bigquery_storage_v1beta1 from google.cloud.exceptions import NotFound from google.api_core.exceptions import BadRequest import pandas as pd import datetime class PBQ(object): """ bigquery driver using the google official API Attributes ------ query : str the query query_obj : Query pbq.Query object client : Client the client object for bigquery bqstorage_client : BigQueryStorageClient the google storage client object Methods ------ to_dataframe(save_query=False, **params) return the query results as data frame to_csv(filename, sep=',', save_query=False, **params) save the query results to a csv file save_to_table(table, dataset, project=None, replace=True, partition=None) save query to table run_query() simply execute your query table_details(table, dataset, project) get the information about the table Static Methods ------ save_file_to_table(filename, table, dataset, project, file_format=bigquery.SourceFormat.CSV, max_bad_records=0, replace=True, partition=None) save file to table, it can be partitioned and it can append to existing table. the supported formats are CSV or PARQUET save_dataframe_to_table(df: pd.DataFrame, table, dataset, project, max_bad_records=0, replace=True, partition=None) same as save file just with pandas dataframe table_exists(client: bigquery.Client, table_ref: bigquery.table.TableReference) check if table exists - if True - table exists else not exists Examples ------ getting query to dataframe >>> from pbq import Query, PBQ >>> query = Query("select * from table") >>> print("the query price:", query.price) >>> if not query.validate(): >>> raise RuntimeError("table not valid") >>> pbq = PBQ(query) >>> pbq.to_dataframe() saving query to csv >>> from pbq import Query, PBQ >>> query = Query("select * from table") >>> pbq = PBQ(query) >>> pbq.to_csv() saving dataframe to table >>> import pandas as pd >>> from pbq import Query, PBQ >>> df = pd.DataFrame() >>> PBQ.save_dataframe_to_table(df, 'table', 'dataset', 'project_id', partition='20191013', replace=False) """ def __init__(self, query: Query, project=None): """ bigquery driver using the google official API :param query: Query object :param project: str the BQ project """ self.query = query.query self.query_obj = query self.project = project if project: self.client = bigquery.Client(project=project) else: self.client = bigquery.Client() self.bqstorage_client = bigquery_storage_v1beta1.BigQueryStorageClient() def to_dataframe(self, save_query=False, **params): """ return the query results as data frame in order to save the query to a table as well as getting the dataframe, send a dict as params with: - table - dataset it will save to the same project :param save_query: boolean if to save the query to a table also :param params: dict when `save_query` flag is on you need to give the relevant params :return: pd.DataFrame the query results """ job_config = bigquery.QueryJobConfig() if save_query: table_ref = self.client.dataset(params['dataset']).table(params['table']) job_config.destination = table_ref query_job = self.client.query(query=self.query, job_config=job_config) query_job_res = query_job.result() df = query_job_res.to_dataframe(bqstorage_client=self.bqstorage_client) return df def to_csv(self, filename, sep=',', save_query=False, **params): """ save the query results to a csv file in order to save the query to a table as well as getting the dataframe, send a dict as params with: - table - dataset it will save to the same project :param filename: str with the path to save the file :param sep: str separator to the csv file :param save_query: boolean if to save the query to a table also :param params: dict when `save_query` flag is on you need to give the relevant params """ df = self.to_dataframe(save_query, **params) df.to_csv(filename, sep=sep, index=False) def run_query(self): """ execute your query """ # Set the destination table client = self.client query_job = client.query(self.query) query_job.result() print('Done running your amazing query') def save_to_table(self, table, dataset, project=None, replace=True, partition=None): """ save query to table :param table: str table name :param dataset: str data set name :param project: str project name :param replace: boolean if set as true - it will replace the table, else append to table (default: True) :param partition: str partition format DDMMYYY (default: None) """ job_config = bigquery.QueryJobConfig() # Set the destination table client = self.client if partition: table = '{0}${1}'.format(table, partition) table_ref = client.dataset(dataset).table(table.split('$')[0]) exists_ok = PBQ._writing_disposition(job_config, replace) if project: table_ref = client.dataset(dataset, project=project).table(table) PBQ._create_table(client, exists_ok, partition, replace, table_ref) job_config.destination = table_ref query_job = client.query(self.query, job_config=job_config) query_job.result() print('Query results loaded to table {}'.format(table_ref.path)) @staticmethod def _writing_disposition(job_config: bigquery.QueryJobConfig, replace): exists_ok = False if replace: exists_ok = True job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE else: job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND return exists_ok @staticmethod def _create_table(client: bigquery.Client, exists_ok, partition, replace, table_ref): if (partition and not PBQ.table_exists(client, table_ref)) or (not partition and replace): bq_table = bigquery.Table(table_ref) if partition: time_partitioning = bigquery.TimePartitioning() bq_table.time_partitioning = time_partitioning client.create_table(bq_table, exists_ok=exists_ok) @staticmethod def save_file_to_table(filename, table, dataset, project, file_format=bigquery.SourceFormat.CSV, max_bad_records=0, replace=True, partition=None): """ save file to table, it can be partitioned and it can append to existing table. the supported formats are CSV or PARQUET :param filename: str with the path to save the file :param table: str table name :param dataset: str data set name :param project: str project name :param file_format: str possible file format (CSV, PARQUET) (default: CSV) :param max_bad_records: int number of bad records allowed in file (default: 0) :param replace: boolean if set as trueit will replace the table, else append to table (default: True) :param partition: str partition format DDMMYYY (default: None) """ client = bigquery.Client(project=project) dataset_ref = client.dataset(dataset) table_ref = dataset_ref.table(table) job_config = bigquery.LoadJobConfig() job_config.max_bad_records = max_bad_records job_config.source_format = file_format exists_ok = PBQ._writing_disposition(job_config, replace) if file_format == bigquery.SourceFormat.CSV: job_config.skip_leading_rows = 1 job_config.autodetect = True PBQ._create_table(client, exists_ok, partition, replace, table_ref) if not partition: with open(filename, "rb") as source_file: job = client.load_table_from_file(source_file, table_ref, job_config=job_config) job.result() # Waits for table load to complete. print("Loaded {} rows into {}:{}.".format(job.output_rows, dataset, table)) else: print('fallback loading by CMD command due to missing api feature for partition') table = '{0}${1}'.format(table, partition) cmd = "bq load" if replace: cmd = "{} --replace".format(cmd) cmd = "{cmd} --source_format={file_format} '{project}:{dataset}.{tbl_name}' {filename}". \ format(cmd=cmd, tbl_name=table, filename=filename, project=project, dataset=dataset, file_format=file_format) os.system(cmd) @staticmethod def save_dataframe_to_table(df: pd.DataFrame, table, dataset, project, max_bad_records=0, replace=True, partition=None, validate_params=False): """ save pd.DataFrame object to table :param df: pd.DataFrame the dataframe you want to save :param table: str table name :param dataset: str data set name :param project: str project name :param max_bad_records: int number of bad records allowed in file (default: 0) :param replace: boolean if set as true - it will replace the table, else append to table (default: True) :param partition: str partition format DDMMYYY (default: None) :param validate_params: boolean validate the schema of the table to the dataframe object (default: False) """ now = datetime.datetime.now() random_string = '{}'.format(now.strftime('%y%m%d%H%M%S')) input_path = "/tmp/tmp-{}.parquet".format(random_string) schema = None if validate_params: # because of the fallback it need to change to be as the schema table_details = PBQ.table_details(table, dataset, project) if 'schema' in table_details: schema = table_details['schema'] PBQ._save_df_to_parquet(df, input_path, schema=schema) PBQ.save_file_to_table(input_path, table, dataset, project, file_format=bigquery.SourceFormat.PARQUET, max_bad_records=max_bad_records, replace=replace, partition=partition) @staticmethod def _save_df_to_parquet(df, input_path, index=False, schema=None): if schema: for s in schema: if s['field_type'] == 'STRING': s['field_type'] = 'str' if s['field_type'] == 'INTEGER': s['field_type'] = 'int' if s['field_type'] == 'TIMESTAMP': df[s['column']] = pd.to_datetime(df[s['column']], errors='coerce') continue if s['field_type'] == 'DATE': df[s['column']] = pd.to_datetime(df[s['column']], errors='coerce') df[s['column']] = df[s['column']].dt.date continue df.columns = ["{}".format(col) for col in df.columns] df.to_parquet(input_path, index=index) @staticmethod def table_details(table, dataset, project): """ return a dict object with some details about the table :param table: str table name :param dataset: str data set name :param project: str project name :return: dict with some table information like, last_modified_time, num_bytes, num_rows, and creation_time """ client = bigquery.Client(project=project) dataset_ref = client.dataset(dataset, project=project) table_ref = dataset_ref.table(table) try: table = client.get_table(table_ref) except NotFound as error: return {} schema = [] for s in table.schema: schema.append({'column': s.name, 'field_type': s.field_type}) res = {'last_modified_time': table.modified, 'num_bytes': table.num_bytes, 'num_rows': table.num_rows, 'creation_time': table.created, 'schema': schema} return res @staticmethod def table_exists(client: bigquery.Client, table_ref: bigquery.table.TableReference): """ check if table exists - if True - table exists else not exists :param client: bigquery.Client object :param table_ref: bigquery.table.TableReference object with the table name and dataset :return: boolean True if table exists False if table not exists """ try: table = client.get_table(table_ref) if table: return True except NotFound as error: return False except BadRequest as error: return True
[ "google.cloud.bigquery.QueryJobConfig", "datetime.datetime.now", "google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient", "google.cloud.bigquery.LoadJobConfig", "google.cloud.bigquery.Table", "google.cloud.bigquery.TimePartitioning", "os.system", "google.cloud.bigquery.Client", "pandas.to_datetime" ]
[((3002, 3050), 'google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient', 'bigquery_storage_v1beta1.BigQueryStorageClient', ([], {}), '()\n', (3048, 3050), False, 'from google.cloud import bigquery_storage_v1beta1\n'), ((3634, 3659), 'google.cloud.bigquery.QueryJobConfig', 'bigquery.QueryJobConfig', ([], {}), '()\n', (3657, 3659), False, 'from google.cloud import bigquery\n'), ((5590, 5615), 'google.cloud.bigquery.QueryJobConfig', 'bigquery.QueryJobConfig', ([], {}), '()\n', (5613, 5615), False, 'from google.cloud import bigquery\n'), ((8141, 8173), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {'project': 'project'}), '(project=project)\n', (8156, 8173), False, 'from google.cloud import bigquery\n'), ((8287, 8311), 'google.cloud.bigquery.LoadJobConfig', 'bigquery.LoadJobConfig', ([], {}), '()\n', (8309, 8311), False, 'from google.cloud import bigquery\n'), ((10515, 10538), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10536, 10538), False, 'import datetime\n'), ((12544, 12576), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {'project': 'project'}), '(project=project)\n', (12559, 12576), False, 'from google.cloud import bigquery\n'), ((2879, 2911), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {'project': 'project'}), '(project=project)\n', (2894, 2911), False, 'from google.cloud import bigquery\n'), ((2952, 2969), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (2967, 2969), False, 'from google.cloud import bigquery\n'), ((6890, 6915), 'google.cloud.bigquery.Table', 'bigquery.Table', (['table_ref'], {}), '(table_ref)\n', (6904, 6915), False, 'from google.cloud import bigquery\n'), ((9548, 9562), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (9557, 9562), False, 'import os\n'), ((6978, 7005), 'google.cloud.bigquery.TimePartitioning', 'bigquery.TimePartitioning', ([], {}), '()\n', (7003, 7005), False, 'from google.cloud import bigquery\n'), ((11641, 11689), 'pandas.to_datetime', 'pd.to_datetime', (["df[s['column']]"], {'errors': '"""coerce"""'}), "(df[s['column']], errors='coerce')\n", (11655, 11689), True, 'import pandas as pd\n'), ((11820, 11868), 'pandas.to_datetime', 'pd.to_datetime', (["df[s['column']]"], {'errors': '"""coerce"""'}), "(df[s['column']], errors='coerce')\n", (11834, 11868), True, 'import pandas as pd\n')]
import re import types from functools import partial LITERAL_TYPE = types.StringTypes + (int, float, long, bool, ) class Spec(object): """ This object, when overridden with an object that implements a file format specification, will perform validation on a given parsed version of the format input. SPEC Node Documentation: ======================== expected_type: A type object whose type the object should match. required_nodes: A list of nodes that are required for the current node. required_nodes_when: A dict of node name/lambda pairs. If the lambda evaluates to True, a node whose name corresponds to the node name is required. The current node is passed as a parameter to the lambda as the only argument. disallowed_nodes: A list of nodes that explicitly are disallowed in the current node. allowed_once_nodes: A list of nodes that are allowed only once. allowed_nodes: A list of nodes that are allowed multiple times. unknown_node_level: The message type to return when an unknown node is encountered. child_nodes: A dict of node definitions for nodes that can exist within this node. max_length: For sequence values only. An integer describing the maximum length of the string. not_empty: A boolean value describing whether the string/list/dict can be empty. values: A list of possible values for the node. Only applies to lists and literal nodes. value_matches: If `values` is not set, the value must match this regex. Only applies to string nodes. process: A lambda function that returns a function to process the node. The lambda accepts one parameter (self) and should return a function that accepts two parameters (self, node). child_process: A lambda function (similar to `process` that returns a function to process a child node. The lambda accepts one parameter (self) and should return a function that accepts three parameters (self, node_name, node). If this is set, no further testing will take place on child nodes. """ SPEC_NAME = "Specification" MORE_INFO = "You can find more info online." SPEC = None def __init__(self, data, err): self.data = self.parse(data) self.err = err self.error = partial(self._err_message, self.err.error) self.warning = partial(self._err_message, self.err.warning) self.notice = partial(self._err_message, self.err.notice) self.err_map = {"error": self.error, "warning": self.warning, "notice": self.notice} self.path = [] def _err_message(self, func, *args, **kwargs): if self.path: nodepath = "Node: %s" % self._get_path() if isinstance(kwargs["description"], list): kwargs["description"].append(nodepath) else: kwargs["description"] = [ kwargs["description"], nodepath] func(*args, **kwargs) def _message(self, type_, *args, **kwargs): kwargs[type_] = kwargs.pop("message") self.err_map[type_](*args, **kwargs) def validate(self): # Validate the root node. root_name, root_node = self.get_root_node(self.data) root_val_result = self.validate_root_node(root_node) if root_val_result == False: return # Iterate the tree and validate as we go. self.iterate(root_name, root_node, self.SPEC) def parse(self, data): pass def validate_root_node(self, node): pass def get_root_node(self, data): """ We expect this function to return a tuple: ("Root Node Name", root_node) """ def has_attribute(self, node, key): pass def get_attribute(self, node, key): pass def has_child(self, node, child_name): pass def get_children(self, node): """ This function should return a list of (child_name, child)-form tuples. """ def iterate(self, branch_name, branch, spec_branch): self.path.append(branch_name) self._iterate(branch_name, branch, spec_branch) self.path.pop() def _get_path(self): return ' > '.join(self.path) def _iterate(self, branch_name, branch, spec_branch): """Iterate the tree of nodes and validate as we go.""" # Check that the node is of the proper type. If it isn't, then we need # to stop iterating at this point. exp_type = spec_branch.get("expected_type") if (exp_type and not isinstance(branch, exp_type) or # Handle `isinstance(True, int) == True` :( (isinstance(branch, bool) and (exp_type == int if isinstance(exp_type, type) else bool not in exp_type))): self.error( err_id=("spec", "iterate", "bad_type"), error="%s's `%s` was of an unexpected type." % (self.SPEC_NAME, branch_name), description=["While validating a %s, a `%s` was encountered " "which is of an improper type." % (self.SPEC_NAME, branch_name), "Found: %s" % repr(branch), self.MORE_INFO]) return # Handle any generic processing. if "process" in spec_branch: # Let the spec processor resolve the processor and then run the # processor. spec_branch["process"](self)(branch) if "not_empty" in spec_branch and not branch: self.error( err_id=("spec", "iterate", "empty"), error="`%s` is empty." % branch_name, description=["A value was expected for `%s`, but one wasn't " "found." % branch_name, self.MORE_INFO]) # If the node isn't an object... if not isinstance(branch, dict): if "values" in spec_branch and branch not in spec_branch["values"]: self.error( err_id=("spec", "iterate", "bad_value"), error="`%s` contains an invalid value in %s" % (branch_name, self.SPEC_NAME), description=["A `%s` was encountered while validating a " "`%s` containing the value '%s'. This value " "is not appropriate for this type of " "element." % (branch_name, self.SPEC_NAME, branch), self.MORE_INFO]) elif ("value_matches" in spec_branch and isinstance(branch, types.StringTypes)): raw_pattern = spec_branch["value_matches"] if not re.match(raw_pattern, branch): self.error( err_id=("spec", "iterate", "value_pattern_fail"), error="`%s` contains an invalid value in %s" % (branch_name, self.SPEC_NAME), description=["A `%s` was encountered while validating " "a `%s`. Its value does not match the " "pattern required for `%s`s." % (branch_name, self.SPEC_NAME, branch_name), "Found value: %s" % branch, "Pattern: %s" % raw_pattern, self.MORE_INFO]) if ("max_length" in spec_branch and len(branch) > spec_branch["max_length"]): self.error( err_id=("spec", "iterate", "max_length"), error="`%s` has exceeded its maximum length." % branch_name, description=["`%s` has a maximum length (%d), which has " "been exceeded (%d)." % (branch_name, spec_branch["max_length"], len(branch)), self.MORE_INFO]) # The rest of the tests are for child items. if not isinstance(branch, (list, tuple)): return if "child_nodes" in spec_branch: for child in branch: self.iterate(branch_name + " descendant", child, spec_branch["child_nodes"]) # We've got nothing else to do with lists. return # If we need to process the child nodes individually, do that now. if "child_process" in spec_branch: processor = spec_branch["child_process"](self) for child_name, child in self.get_children(branch): processor(child_name, child) # If there's nothing else to do, don't go down that path. if ("required_nodes" not in spec_branch and "required_nodes_when" not in spec_branch and "disallowed_nodes" not in spec_branch): return considered_nodes = set() # Check that all required node as present. if "required_nodes" in spec_branch: considered_nodes.update(spec_branch["required_nodes"]) for req_node in [n for n in spec_branch["required_nodes"] if not self.has_child(branch, n)]: self.error( err_id=("spec", "iterate", "missing_req"), error="%s expecting `%s`" % (self.SPEC_NAME, req_node), description=["The '%s' node of the %s expects a `%s` " "element, which was not found." % (branch_name, self.SPEC_NAME, req_node), self.MORE_INFO]) # Check that conditionally required nodes are present. if "required_nodes_when" in spec_branch: considered_nodes.update(spec_branch["required_nodes_when"].keys()) for req_node in [name for name, cond in spec_branch["required_nodes_when"].items() if cond(branch) and not self.has_child(branch, name)]: self.error( err_id=("spec", "iterate", "missing_req_cond"), error="%s expecting `%s`" % (self.SPEC_NAME, req_node), description=["The '%s' node, under the current " "circumstances, is missing a `%s` element. " "This is a required condition of a %s." % (branch_name, req_node, self.SPEC_NAME), self.MORE_INFO]) # Check that there are no disallowed nodes. if "disallowed_nodes" in spec_branch: disallowed_nodes = spec_branch["disallowed_nodes"] considered_nodes.update(disallowed_nodes) for dnode in [n for n in disallowed_nodes if self.has_child(branch, n)]: self.error( err_id=("spec", "iterate", "disallowed"), error="%s found `%s`, which is not allowed." % (self.SPEC_NAME, dnode), description=["The '%s' node contains `%s`, which is a " "disallowed element. It should be removed." % (branch_name, dnode), self.MORE_INFO]) if ("allowed_nodes" not in spec_branch and "allowed_once_nodes" not in spec_branch): return # Check that allowed nodes are obeyed. allowed_nodes = set(spec_branch.setdefault("allowed_nodes", [])) allowed_once_nodes = spec_branch.setdefault("allowed_once_nodes", []) allowed_nodes.update(allowed_once_nodes) child_node_specs = spec_branch.setdefault("child_nodes", {}) seen_nodes = set() warned_nodes = set() for child_name, child in self.get_children(branch): cspec_branch = None # Process the node first. if child_name in child_node_specs: cspec_branch = child_node_specs[child_name] elif "*" in child_node_specs: cspec_branch = child_node_specs["*"] if cspec_branch is not None: # If it's a lazily evaluated branch, evaluate it now. if isinstance(cspec_branch, types.LambdaType): cspec_branch = cspec_branch(self) # Iterate the node. self.iterate(child_name, child, cspec_branch) # If we've seen a node before that's only supposed to be seen a # single time, warn about it. if child_name in allowed_once_nodes and child_name in seen_nodes: # Don't warn about the same node multiple times. if child_name in warned_nodes: continue self.error( err_id=("spec", "iterate", "allow_once_multiple"), error="%s found `%s` more than once." % (self.SPEC_NAME, child_name), description=["%ss may only contain a single `%s` element, " "however, it was encountered multiple times." % (self.SPEC_NAME, child_name), self.MORE_INFO]) continue # Remember that we've seen this node. seen_nodes.add(child_name) if child_name in considered_nodes: continue # If the child isn't allowed, throw an error. if child_name not in allowed_nodes and "*" not in allowed_nodes: self._message( spec_branch.get("unknown_node_level", "warning"), err_id=("spec", "iterate", "not_allowed"), message="`%s` is not a recognized element within a %s" % (child_name, self.SPEC_NAME), description=["While iterating a %s, a `%s` was found " "within a %s, which is not valid." % (self.SPEC_NAME, child_name, branch_name), self.MORE_INFO])
[ "functools.partial", "re.match" ]
[((2456, 2498), 'functools.partial', 'partial', (['self._err_message', 'self.err.error'], {}), '(self._err_message, self.err.error)\n', (2463, 2498), False, 'from functools import partial\n'), ((2522, 2566), 'functools.partial', 'partial', (['self._err_message', 'self.err.warning'], {}), '(self._err_message, self.err.warning)\n', (2529, 2566), False, 'from functools import partial\n'), ((2589, 2632), 'functools.partial', 'partial', (['self._err_message', 'self.err.notice'], {}), '(self._err_message, self.err.notice)\n', (2596, 2632), False, 'from functools import partial\n'), ((7062, 7091), 're.match', 're.match', (['raw_pattern', 'branch'], {}), '(raw_pattern, branch)\n', (7070, 7091), False, 'import re\n')]
from setuptools import setup setup( name='lsearch', version='1.0', description='The Head First Python Search Tools', author='<NAME>', author_email='<EMAIL>', url='headfirstlabs.com', py_modules=['lsearch'], )
[ "setuptools.setup" ]
[((30, 216), 'setuptools.setup', 'setup', ([], {'name': '"""lsearch"""', 'version': '"""1.0"""', 'description': '"""The Head First Python Search Tools"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""headfirstlabs.com"""', 'py_modules': "['lsearch']"}), "(name='lsearch', version='1.0', description=\n 'The Head First Python Search Tools', author='<NAME>', author_email=\n '<EMAIL>', url='headfirstlabs.com', py_modules=['lsearch'])\n", (35, 216), False, 'from setuptools import setup\n')]
import json import logging from http import HTTPStatus from typing import Any, Dict, List, Optional, Tuple, Type, Union import werkzeug from flask import Blueprint, Flask, Response, abort, jsonify from flask.views import MethodView from flask_cors import CORS from gevent.pywsgi import WSGIServer from geventwebsocket import Resource as WebsocketResource, WebSocketServer from marshmallow import Schema from marshmallow.exceptions import ValidationError from webargs.flaskparser import parser from werkzeug.exceptions import NotFound from rotkehlchen.api.rest import RestAPI, api_response, wrap_in_fail_result from rotkehlchen.api.v1.parser import ignore_kwarg_parser, resource_parser from rotkehlchen.api.v1.resources import ( AaveBalancesResource, AaveHistoryResource, AccountingReportDataResource, AccountingReportsResource, AdexBalancesResource, AdexHistoryResource, AllAssetsResource, AllBalancesResource, AssetIconsResource, AssetMovementsResource, AssetsReplaceResource, AssetsTypesResource, AssetUpdatesResource, AssociatedLocations, AsyncTasksResource, AvalancheTransactionsResource, BalancerBalancesResource, BalancerEventsHistoryResource, BalancerTradesHistoryResource, BinanceAvailableMarkets, BinanceUserMarkets, BlockchainBalancesResource, BlockchainsAccountsResource, BTCXpubResource, CompoundBalancesResource, CompoundHistoryResource, CounterpartiesResource, CurrentAssetsPriceResource, DatabaseBackupsResource, DatabaseInfoResource, DataImportResource, DBSnapshotDeletingResource, DBSnapshotDownloadingResource, DBSnapshotExportingResource, DBSnapshotImportingResource, DefiBalancesResource, ERC20TokenInfo, ERC20TokenInfoAVAX, Eth2DailyStatsResource, Eth2StakeDepositsResource, Eth2StakeDetailsResource, Eth2ValidatorsResource, EthereumAirdropsResource, EthereumAssetsResource, EthereumModuleDataResource, EthereumModuleResource, EthereumTransactionsResource, ExchangeBalancesResource, ExchangeRatesResource, ExchangesDataResource, ExchangesResource, ExternalServicesResource, HistoricalAssetsPriceResource, HistoryActionableItemsResource, HistoryBaseEntryResource, HistoryDownloadingResource, HistoryExportingResource, HistoryProcessingResource, HistoryStatusResource, IgnoredActionsResource, IgnoredAssetsResource, InfoResource, LedgerActionsResource, LiquityStakingHistoryResource, LiquityStakingResource, LiquityTrovesHistoryResource, LiquityTrovesResource, LoopringBalancesResource, MakerdaoDSRBalanceResource, MakerdaoDSRHistoryResource, MakerdaoVaultDetailsResource, MakerdaoVaultsResource, ManuallyTrackedBalancesResource, MessagesResource, NamedEthereumModuleDataResource, NamedOracleCacheResource, NFTSBalanceResource, NFTSResource, OraclesResource, OwnedAssetsResource, PeriodicDataResource, PickleDillResource, PingResource, QueriedAddressesResource, ReverseEnsResource, SettingsResource, StakingResource, StatisticsAssetBalanceResource, StatisticsNetvalueResource, StatisticsRendererResource, StatisticsValueDistributionResource, SushiswapBalancesResource, SushiswapEventsHistoryResource, SushiswapTradesHistoryResource, TagsResource, TradesResource, UniswapBalancesResource, UniswapEventsHistoryResource, UniswapTradesHistoryResource, UserAssetsResource, UserPasswordChangeResource, UserPremiumKeyResource, UserPremiumSyncResource, UsersByNameResource, UsersResource, WatchersResource, YearnVaultsBalancesResource, YearnVaultsHistoryResource, YearnVaultsV2BalancesResource, YearnVaultsV2HistoryResource, create_blueprint, ) from rotkehlchen.api.websockets.notifier import RotkiNotifier, RotkiWSApp from rotkehlchen.logging import RotkehlchenLogsAdapter URLS = List[ Union[ Tuple[str, Type[MethodView]], Tuple[str, Type[MethodView], str], ] ] URLS_V1: URLS = [ ('/users', UsersResource), ('/watchers', WatchersResource), ('/users/<string:name>', UsersByNameResource), ('/users/<string:name>/password', UserPasswordChangeResource), ('/premium', UserPremiumKeyResource), ('/premium/sync', UserPremiumSyncResource), ('/settings', SettingsResource), ('/tasks/', AsyncTasksResource), ('/tasks/<int:task_id>', AsyncTasksResource, 'specific_async_tasks_resource'), ('/exchange_rates', ExchangeRatesResource), ('/external_services/', ExternalServicesResource), ('/oracles', OraclesResource), ('/oracles/<string:oracle>/cache', NamedOracleCacheResource), ('/exchanges', ExchangesResource), ('/exchanges/balances', ExchangeBalancesResource), ( '/exchanges/balances/<string:location>', ExchangeBalancesResource, 'named_exchanges_balances_resource', ), ('/assets/<string:asset>/icon', AssetIconsResource), ('/trades', TradesResource), ('/ledgeractions', LedgerActionsResource), ('/asset_movements', AssetMovementsResource), ('/tags', TagsResource), ('/exchanges/binance/pairs', BinanceAvailableMarkets), ('/exchanges/binance/pairs/<string:name>', BinanceUserMarkets), ('/exchanges/data/', ExchangesDataResource), ('/exchanges/data/<string:location>', ExchangesDataResource, 'named_exchanges_data_resource'), ('/balances/blockchains', BlockchainBalancesResource), ( '/balances/blockchains/<string:blockchain>', BlockchainBalancesResource, 'named_blockchain_balances_resource', ), ('/balances/', AllBalancesResource), ('/balances/manual', ManuallyTrackedBalancesResource), ('/statistics/netvalue', StatisticsNetvalueResource), ('/statistics/balance/<string:asset>', StatisticsAssetBalanceResource), ('/statistics/value_distribution', StatisticsValueDistributionResource), ('/statistics/renderer', StatisticsRendererResource), ('/messages/', MessagesResource), ('/periodic/', PeriodicDataResource), ('/history/', HistoryProcessingResource), ('/history/status', HistoryStatusResource), ('/history/export/', HistoryExportingResource), ('/history/download/', HistoryDownloadingResource), ('/history/events', HistoryBaseEntryResource), ('/history/actionable_items', HistoryActionableItemsResource), ('/reports/', AccountingReportsResource), ( '/reports/<int:report_id>', AccountingReportsResource, 'per_report_resource', ), ( '/reports/<int:report_id>/data', AccountingReportDataResource, 'per_report_data_resource', ), ('/queried_addresses', QueriedAddressesResource), ('/blockchains/ETH/transactions', EthereumTransactionsResource), ( '/blockchains/ETH/transactions/<string:address>', EthereumTransactionsResource, 'per_address_ethereum_transactions_resource', ), ('/blockchains/ETH2/validators', Eth2ValidatorsResource), ('/blockchains/ETH2/stake/deposits', Eth2StakeDepositsResource), ('/blockchains/ETH2/stake/details', Eth2StakeDetailsResource), ('/blockchains/ETH2/stake/dailystats', Eth2DailyStatsResource), ('/blockchains/ETH/defi', DefiBalancesResource), ('/blockchains/ETH/airdrops', EthereumAirdropsResource), ('/blockchains/ETH/erc20details/', ERC20TokenInfo), ('/blockchains/ETH/modules/<string:module_name>/data', NamedEthereumModuleDataResource), ('/blockchains/ETH/modules/data', EthereumModuleDataResource), ('/blockchains/ETH/modules/data/counterparties', CounterpartiesResource), ('/blockchains/ETH/modules/', EthereumModuleResource), ('/blockchains/ETH/modules/makerdao/dsrbalance', MakerdaoDSRBalanceResource), ('/blockchains/ETH/modules/makerdao/dsrhistory', MakerdaoDSRHistoryResource), ('/blockchains/ETH/modules/makerdao/vaults', MakerdaoVaultsResource), ('/blockchains/ETH/modules/makerdao/vaultdetails', MakerdaoVaultDetailsResource), ('/blockchains/ETH/modules/aave/balances', AaveBalancesResource), ('/blockchains/ETH/modules/aave/history', AaveHistoryResource), ('/blockchains/ETH/modules/adex/balances', AdexBalancesResource), ('/blockchains/ETH/modules/adex/history', AdexHistoryResource), ('/blockchains/ETH/modules/balancer/balances', BalancerBalancesResource), ('/blockchains/ETH/modules/balancer/history/trades', BalancerTradesHistoryResource), ('/blockchains/ETH/modules/balancer/history/events', BalancerEventsHistoryResource), ('/blockchains/ETH/modules/compound/balances', CompoundBalancesResource), ('/blockchains/ETH/modules/compound/history', CompoundHistoryResource), ('/blockchains/ETH/modules/uniswap/balances', UniswapBalancesResource), ('/blockchains/ETH/modules/uniswap/history/events', UniswapEventsHistoryResource), ('/blockchains/ETH/modules/uniswap/history/trades', UniswapTradesHistoryResource), ('/blockchains/ETH/modules/sushiswap/balances', SushiswapBalancesResource), ('/blockchains/ETH/modules/sushiswap/history/events', SushiswapEventsHistoryResource), ('/blockchains/ETH/modules/sushiswap/history/trades', SushiswapTradesHistoryResource), ('/blockchains/ETH/modules/yearn/vaults/balances', YearnVaultsBalancesResource), ('/blockchains/ETH/modules/yearn/vaults/history', YearnVaultsHistoryResource), ('/blockchains/ETH/modules/yearn/vaultsv2/balances', YearnVaultsV2BalancesResource), ('/blockchains/ETH/modules/yearn/vaultsv2/history', YearnVaultsV2HistoryResource), ('/blockchains/ETH/modules/liquity/balances', LiquityTrovesResource), ('/blockchains/ETH/modules/liquity/events/trove', LiquityTrovesHistoryResource), ('/blockchains/ETH/modules/liquity/events/staking', LiquityStakingHistoryResource), ('/blockchains/ETH/modules/liquity/staking', LiquityStakingResource), ('/blockchains/ETH/modules/pickle/dill', PickleDillResource), ('/blockchains/ETH/modules/loopring/balances', LoopringBalancesResource), ('/blockchains/<string:blockchain>', BlockchainsAccountsResource), ('/blockchains/BTC/xpub', BTCXpubResource), ('/blockchains/AVAX/transactions', AvalancheTransactionsResource), ( '/blockchains/AVAX/transactions/<string:address>', AvalancheTransactionsResource, 'per_address_avalanche_transactions_resource', ), ('/blockchains/AVAX/erc20details/', ERC20TokenInfoAVAX), ('/assets', OwnedAssetsResource), ('/assets/types', AssetsTypesResource), ('/assets/replace', AssetsReplaceResource), ('/assets/all', AllAssetsResource), ('/assets/ethereum', EthereumAssetsResource), ('/assets/prices/current', CurrentAssetsPriceResource), ('/assets/prices/historical', HistoricalAssetsPriceResource), ('/assets/ignored', IgnoredAssetsResource), ('/assets/updates', AssetUpdatesResource), ('/assets/user', UserAssetsResource), ('/actions/ignored', IgnoredActionsResource), ('/info', InfoResource), ('/ping', PingResource), ('/import', DataImportResource), ('/nfts', NFTSResource), ('/nfts/balances', NFTSBalanceResource), ('/database/info', DatabaseInfoResource), ('/database/backups', DatabaseBackupsResource), ('/locations/associated', AssociatedLocations), ('/staking/kraken', StakingResource), ('/snapshot/download', DBSnapshotDownloadingResource), ('/snapshot/export', DBSnapshotExportingResource), ('/snapshot/import', DBSnapshotImportingResource), ('/snapshot/delete', DBSnapshotDeletingResource), ('/ens/reverse', ReverseEnsResource), ] logger = logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def setup_urls( rest_api: RestAPI, blueprint: Blueprint, urls: URLS, ) -> None: for url_tuple in urls: if len(url_tuple) == 2: route, resource_cls = url_tuple # type: ignore endpoint = resource_cls.__name__.lower() elif len(url_tuple) == 3: route, resource_cls, endpoint = url_tuple # type: ignore else: raise ValueError(f"Invalid URL format: {url_tuple!r}") blueprint.add_url_rule( route, view_func=resource_cls.as_view(endpoint, rest_api_object=rest_api), ) def endpoint_not_found(e: NotFound) -> Response: msg = 'invalid endpoint' # The isinstance check is because I am not sure if `e` is always going to # be a "NotFound" error here if isinstance(e, NotFound): msg = e.description return api_response(wrap_in_fail_result(msg), HTTPStatus.NOT_FOUND) @parser.error_handler # type: ignore @resource_parser.error_handler @ignore_kwarg_parser.error_handler def handle_request_parsing_error( err: ValidationError, _request: werkzeug.local.LocalProxy, _schema: Schema, error_status_code: Optional[int], # pylint: disable=unused-argument error_headers: Optional[Dict], # pylint: disable=unused-argument ) -> None: """ This handles request parsing errors generated for example by schema field validation failing.""" msg = str(err) if isinstance(err.messages, dict): # first key is just the location. Ignore key = list(err.messages.keys())[0] msg = json.dumps(err.messages[key]) elif isinstance(err.messages, list): msg = ','.join(err.messages) err_response = jsonify(result=None, message=msg) err_response.status_code = HTTPStatus.BAD_REQUEST abort(err_response) class APIServer(): _api_prefix = '/api/1' def __init__( self, rest_api: RestAPI, ws_notifier: RotkiNotifier, cors_domain_list: List[str] = None, ) -> None: flask_app = Flask(__name__) if cors_domain_list: CORS(flask_app, origins=cors_domain_list) blueprint = create_blueprint(self._api_prefix) setup_urls( blueprint=blueprint, rest_api=rest_api, urls=URLS_V1, ) self.rest_api = rest_api self.rotki_notifier = ws_notifier self.flask_app = flask_app self.blueprint = blueprint self.wsgiserver: Optional[WSGIServer] = None self.flask_app.register_blueprint(self.blueprint) self.ws_server: Optional[WebSocketServer] = None self.flask_app.errorhandler(HTTPStatus.NOT_FOUND)(endpoint_not_found) self.flask_app.register_error_handler(Exception, self.unhandled_exception) @staticmethod def unhandled_exception(exception: Exception) -> Response: """ Flask.errorhandler when an exception wasn't correctly handled """ log.critical( 'Unhandled exception when processing endpoint request', exc_info=True, exception=str(exception), ) return api_response(wrap_in_fail_result(str(exception)), HTTPStatus.INTERNAL_SERVER_ERROR) def run(self, host: str = '127.0.0.1', port: int = 5042, **kwargs: Any) -> None: """This is only used for the data faker and not used in production""" self.flask_app.run(host=host, port=port, **kwargs) def start( self, host: str = '127.0.0.1', rest_port: int = 5042, websockets_port: int = 5043, ) -> None: """This is used to start the API server in production""" wsgi_logger = logging.getLogger(__name__ + '.pywsgi') self.wsgiserver = WSGIServer( listener=(host, rest_port), application=self.flask_app, log=wsgi_logger, error_log=wsgi_logger, ) msg = f'rotki REST API server is running at: {host}:{rest_port}' print(msg) log.info(msg) self.wsgiserver.start() self.ws_server = WebSocketServer( listener=(host, websockets_port), application=WebsocketResource([ ('^/', RotkiWSApp), ]), debug=False, environ={'rotki_notifier': self.rotki_notifier}, ) msg = f'rotki Websockets API server is running at: {host}:{websockets_port}' print(msg) log.info(msg) self.ws_server.start() def stop(self, timeout: int = 5) -> None: """Stops the API server. If handlers are running after timeout they are killed""" if self.wsgiserver is not None: self.wsgiserver.stop(timeout) self.wsgiserver = None if self.ws_server is not None: self.ws_server.stop(timeout) self.wsgiserver = None self.rest_api.stop()
[ "logging.getLogger", "rotkehlchen.api.v1.resources.create_blueprint", "rotkehlchen.logging.RotkehlchenLogsAdapter", "flask_cors.CORS", "flask.Flask", "json.dumps", "rotkehlchen.api.rest.wrap_in_fail_result", "gevent.pywsgi.WSGIServer", "flask.abort", "geventwebsocket.Resource", "flask.jsonify" ]
[((11731, 11758), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (11748, 11758), False, 'import logging\n'), ((11765, 11795), 'rotkehlchen.logging.RotkehlchenLogsAdapter', 'RotkehlchenLogsAdapter', (['logger'], {}), '(logger)\n', (11787, 11795), False, 'from rotkehlchen.logging import RotkehlchenLogsAdapter\n'), ((13526, 13559), 'flask.jsonify', 'jsonify', ([], {'result': 'None', 'message': 'msg'}), '(result=None, message=msg)\n', (13533, 13559), False, 'from flask import Blueprint, Flask, Response, abort, jsonify\n'), ((13618, 13637), 'flask.abort', 'abort', (['err_response'], {}), '(err_response)\n', (13623, 13637), False, 'from flask import Blueprint, Flask, Response, abort, jsonify\n'), ((12675, 12699), 'rotkehlchen.api.rest.wrap_in_fail_result', 'wrap_in_fail_result', (['msg'], {}), '(msg)\n', (12694, 12699), False, 'from rotkehlchen.api.rest import RestAPI, api_response, wrap_in_fail_result\n'), ((13398, 13427), 'json.dumps', 'json.dumps', (['err.messages[key]'], {}), '(err.messages[key])\n', (13408, 13427), False, 'import json\n'), ((13878, 13893), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (13883, 13893), False, 'from flask import Blueprint, Flask, Response, abort, jsonify\n'), ((13997, 14031), 'rotkehlchen.api.v1.resources.create_blueprint', 'create_blueprint', (['self._api_prefix'], {}), '(self._api_prefix)\n', (14013, 14031), False, 'from rotkehlchen.api.v1.resources import AaveBalancesResource, AaveHistoryResource, AccountingReportDataResource, AccountingReportsResource, AdexBalancesResource, AdexHistoryResource, AllAssetsResource, AllBalancesResource, AssetIconsResource, AssetMovementsResource, AssetsReplaceResource, AssetsTypesResource, AssetUpdatesResource, AssociatedLocations, AsyncTasksResource, AvalancheTransactionsResource, BalancerBalancesResource, BalancerEventsHistoryResource, BalancerTradesHistoryResource, BinanceAvailableMarkets, BinanceUserMarkets, BlockchainBalancesResource, BlockchainsAccountsResource, BTCXpubResource, CompoundBalancesResource, CompoundHistoryResource, CounterpartiesResource, CurrentAssetsPriceResource, DatabaseBackupsResource, DatabaseInfoResource, DataImportResource, DBSnapshotDeletingResource, DBSnapshotDownloadingResource, DBSnapshotExportingResource, DBSnapshotImportingResource, DefiBalancesResource, ERC20TokenInfo, ERC20TokenInfoAVAX, Eth2DailyStatsResource, Eth2StakeDepositsResource, Eth2StakeDetailsResource, Eth2ValidatorsResource, EthereumAirdropsResource, EthereumAssetsResource, EthereumModuleDataResource, EthereumModuleResource, EthereumTransactionsResource, ExchangeBalancesResource, ExchangeRatesResource, ExchangesDataResource, ExchangesResource, ExternalServicesResource, HistoricalAssetsPriceResource, HistoryActionableItemsResource, HistoryBaseEntryResource, HistoryDownloadingResource, HistoryExportingResource, HistoryProcessingResource, HistoryStatusResource, IgnoredActionsResource, IgnoredAssetsResource, InfoResource, LedgerActionsResource, LiquityStakingHistoryResource, LiquityStakingResource, LiquityTrovesHistoryResource, LiquityTrovesResource, LoopringBalancesResource, MakerdaoDSRBalanceResource, MakerdaoDSRHistoryResource, MakerdaoVaultDetailsResource, MakerdaoVaultsResource, ManuallyTrackedBalancesResource, MessagesResource, NamedEthereumModuleDataResource, NamedOracleCacheResource, NFTSBalanceResource, NFTSResource, OraclesResource, OwnedAssetsResource, PeriodicDataResource, PickleDillResource, PingResource, QueriedAddressesResource, ReverseEnsResource, SettingsResource, StakingResource, StatisticsAssetBalanceResource, StatisticsNetvalueResource, StatisticsRendererResource, StatisticsValueDistributionResource, SushiswapBalancesResource, SushiswapEventsHistoryResource, SushiswapTradesHistoryResource, TagsResource, TradesResource, UniswapBalancesResource, UniswapEventsHistoryResource, UniswapTradesHistoryResource, UserAssetsResource, UserPasswordChangeResource, UserPremiumKeyResource, UserPremiumSyncResource, UsersByNameResource, UsersResource, WatchersResource, YearnVaultsBalancesResource, YearnVaultsHistoryResource, YearnVaultsV2BalancesResource, YearnVaultsV2HistoryResource, create_blueprint\n'), ((15525, 15564), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.pywsgi')"], {}), "(__name__ + '.pywsgi')\n", (15542, 15564), False, 'import logging\n'), ((15591, 15702), 'gevent.pywsgi.WSGIServer', 'WSGIServer', ([], {'listener': '(host, rest_port)', 'application': 'self.flask_app', 'log': 'wsgi_logger', 'error_log': 'wsgi_logger'}), '(listener=(host, rest_port), application=self.flask_app, log=\n wsgi_logger, error_log=wsgi_logger)\n', (15601, 15702), False, 'from gevent.pywsgi import WSGIServer\n'), ((13935, 13976), 'flask_cors.CORS', 'CORS', (['flask_app'], {'origins': 'cors_domain_list'}), '(flask_app, origins=cors_domain_list)\n', (13939, 13976), False, 'from flask_cors import CORS\n'), ((16015, 16054), 'geventwebsocket.Resource', 'WebsocketResource', (["[('^/', RotkiWSApp)]"], {}), "([('^/', RotkiWSApp)])\n", (16032, 16054), True, 'from geventwebsocket import Resource as WebsocketResource, WebSocketServer\n')]
import numpy as np pos = [] normals = [] p = [[-0.4722227, -0.24517583, -0.6370031]] n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]] pos.append(p) normals.append(n) p = [[-0.2549828, -0.24587737, -0.63704705]] n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]] pos.append(p) normals.append(n) p = [[-0.25787751, -0.38255749, -0.63705089]] n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]] pos.append(p) normals.append(n) p = [[-0.47206733, -0.38317576, -0.6370076]] n = [[2.02215104e-04, -3.23903880e-05, 9.99999979e-01]] pos.append(p) normals.append(n) #Contact lgripper/handrail #Left p = [[0.3651077, 0.33419711, 0.63609439]] n = [[-3.39491173e-05, 9.99999875e-01, 4.99472000e-04]] pos.append(p) normals.append(n) #Right #p = [[0.36510907, 0.29419711, 0.63607441]] #p = [[0.3651077, 0.33419711, 0.63609439]] #n = [[3.44761855e-05, -9.99999874e-01, -5.00077386e-04]] #pos.append(p) #normals.append(n) #Bottom #p = [[0.34212609, 0.31418314, 0.66248165]] #n = [[-6.56636734e-01, -3.99160434e-04, 7.54206895e-01]] #pos.append(p) #normals.append(n) # ##Top p = [[0.38480749, 0.31420908, 0.61345819]] n = [[6.56636734e-01, 4.00439950e-04, -7.54206894e-01]] pos.append(p) normals.append(n) pos = [np.array(px).T for px in pos] #for p in pos: # p[2, 0] = 0.0 normals = [np.array(nx).T for nx in normals]
[ "numpy.array" ]
[((1222, 1234), 'numpy.array', 'np.array', (['px'], {}), '(px)\n', (1230, 1234), True, 'import numpy as np\n'), ((1295, 1307), 'numpy.array', 'np.array', (['nx'], {}), '(nx)\n', (1303, 1307), True, 'import numpy as np\n')]
import setuptools setuptools.setup( name="qualityforward", version="1.1", author="<NAME>", author_email="<EMAIL>", description="Python library for QualityForward API", long_description="This is python library for QualityForward API. QualityForward is cloud based test management service.", long_description_content_type="text/markdown", url="https://cloud.veriserve.co.jp/", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3.7", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ] )
[ "setuptools.find_packages" ]
[((422, 448), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (446, 448), False, 'import setuptools\n')]
from django.db import models from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, AbstractUser from django.utils import timezone from django.utils.translation import gettext as _ from django import forms from django.contrib.auth.hashers import make_password from django.contrib.auth import get_user_model from django.contrib.auth.models import User from phonenumber_field.modelfields import PhoneNumberField from datetime import datetime class CarTrip(models.Model): class Meta: verbose_name = _('carTrip') verbose_name_plural = _('cartrips') def __str__(self): return f'{self.driver_name} Car Trip' driver_name = models.CharField(max_length=200) destination = models.CharField(max_length=200) number_of_seats = models.IntegerField('number of seats') trip_date = models.CharField(max_length=200) pub_date = models.DateTimeField('date published') @classmethod def create(cls , driver_name, destination, number_of_seats, trip_date): trip = cls(driver_name= driver_name, destination=destination, number_of_seats=number_of_seats, trip_date=trip_date, pub_date=datetime.now() ) return trip def was_published_recently(self): now = timezone.now() return now - datetime.timedelta(days=1) <= self.pub_date <= now class Relation(models.Model): class Meta: verbose_name = _('relation') verbose_name_plural = _('relation') trip_number = models.IntegerField('trip_number') hiker_name = models.CharField(max_length=200) def __str__(self ): return f'{self.hiker_name} going on trip id = {self.trip_number}' @classmethod def create(cls , trip_number, hiker_name): rel = cls(trip_number=trip_number, hiker_name=hiker_name, ) return rel
[ "django.utils.translation.gettext", "django.db.models.IntegerField", "django.utils.timezone.now", "datetime.datetime.now", "django.db.models.DateTimeField", "datetime.datetime.timedelta", "django.db.models.CharField" ]
[((675, 707), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (691, 707), False, 'from django.db import models\n'), ((726, 758), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (742, 758), False, 'from django.db import models\n'), ((781, 819), 'django.db.models.IntegerField', 'models.IntegerField', (['"""number of seats"""'], {}), "('number of seats')\n", (800, 819), False, 'from django.db import models\n'), ((836, 868), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (852, 868), False, 'from django.db import models\n'), ((884, 922), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""date published"""'], {}), "('date published')\n", (904, 922), False, 'from django.db import models\n'), ((1571, 1605), 'django.db.models.IntegerField', 'models.IntegerField', (['"""trip_number"""'], {}), "('trip_number')\n", (1590, 1605), False, 'from django.db import models\n'), ((1623, 1655), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1639, 1655), False, 'from django.db import models\n'), ((529, 541), 'django.utils.translation.gettext', '_', (['"""carTrip"""'], {}), "('carTrip')\n", (530, 541), True, 'from django.utils.translation import gettext as _\n'), ((572, 585), 'django.utils.translation.gettext', '_', (['"""cartrips"""'], {}), "('cartrips')\n", (573, 585), True, 'from django.utils.translation import gettext as _\n'), ((1336, 1350), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1348, 1350), False, 'from django.utils import timezone\n'), ((1494, 1507), 'django.utils.translation.gettext', '_', (['"""relation"""'], {}), "('relation')\n", (1495, 1507), True, 'from django.utils.translation import gettext as _\n'), ((1538, 1551), 'django.utils.translation.gettext', '_', (['"""relation"""'], {}), "('relation')\n", (1539, 1551), True, 'from django.utils.translation import gettext as _\n'), ((1226, 1240), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1238, 1240), False, 'from datetime import datetime\n'), ((1372, 1398), 'datetime.datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1390, 1398), False, 'from datetime import datetime\n')]
import numpy with open ("dic.txt", "w", encoding="utf-8") as dic: for x in range(5, 790, 1): if 92 < x <= 113: dic.write('"'+str(x)+'"'+":"+ '"'+'1'+'",') elif 113 < x <= 133: dic.write('"'+str(x)+'"'+":"+ '"'+'2'+'",') elif 133 < x <= 153: dic.write('"'+str(x)+'"'+":"+ '"'+'3'+'",') elif 153 < x <= 173: dic.write('"'+str(x)+'"'+":"+ '"'+'4'+'",') elif 173 < x <= 193: dic.write('"'+str(x)+'"'+":"+ '"'+'5'+'",') elif 193 < x <= 213: dic.write('"'+str(x)+'"'+":"+ '"'+'6'+'",') elif 213 < x <= 233: dic.write('"'+str(x)+'"'+":"+ '"'+'7'+'",') elif 233 < x <= 253: dic.write('"'+str(x)+'"'+":"+ '"'+'8'+'",') elif 253 < x <= 273: dic.write('"'+str(x)+'"'+":"+ '"'+'9'+'",') elif 273 < x <= 293: dic.write('"'+str(x)+'"'+":"+ '"'+'10'+'",') elif 293 < x <= 313: dic.write('"'+str(x)+'"'+":"+ '"'+'11'+'",') elif 313 < x <= 333: dic.write('"'+str(x)+'"'+":"+ '"'+'12'+'",') elif 333 < x <= 353: dic.write('"'+str(x)+'"'+":"+ '"'+'13'+'",') elif 353 < x <= 373: dic.write('"'+str(x)+'"'+":"+ '"'+'14'+'",') elif 373 < x <= 393: dic.write('"'+str(x)+'"'+":"+ '"'+'15'+'",') elif 393 < x <= 413: dic.write('"'+str(x)+'"'+":"+ '"'+'16'+'",') elif 413 < x <= 433: dic.write('"'+str(x)+'"'+":"+ '"'+'17'+'",') elif 433 < x <= 453: dic.write('"'+str(x)+'"'+":"+ '"'+'18'+'",') elif 453 < x <= 473: dic.write('"'+str(x)+'"'+":"+ '"'+'19'+'",') elif 473 < x <= 493: dic.write('"'+str(x)+'"'+":"+ '"'+'20'+'",') elif 493 < x <= 513: dic.write('"'+str(x)+'"'+":"+ '"'+'21'+'",') elif 513 < x <= 533: dic.write('"'+str(x)+'"'+":"+ '"'+'22'+'",') elif 533 < x <= 553: dic.write('"'+str(x)+'"'+":"+ '"'+'23'+'",') elif 553 < x <= 573: dic.write('"'+str(x)+'"'+":"+ '"'+'24'+'",') elif 573 < x <= 593: dic.write('"'+str(x)+'"'+":"+ '"'+'25'+'",') elif 593 < x <= 613: dic.write('"'+str(x)+'"'+":"+ '"'+'26'+'",') elif 613 < x <= 633: dic.write('"'+str(x)+'"'+":"+ '"'+'27'+'",') elif 633 < x <= 653: dic.write('"'+str(x)+'"'+":"+ '"'+'28'+'",') elif 653 < x <= 673: dic.write('"'+str(x)+'"'+":"+ '"'+'29'+'",') elif 673 < x <= 693: dic.write('"'+str(x)+'"'+":"+ '"'+'30'+'",') elif 693 < x <= 713: dic.write('"'+str(x)+'"'+":"+ '"'+'31'+'",') elif 713 < x <= 733: dic.write('"'+str(x)+'"'+":"+ '"'+'32'+'",') elif 733 < x <= 753: dic.write('"'+str(x)+'"'+":"+ '"'+'33'+'",') elif 753 < x <= 773: dic.write('"'+str(x)+'"'+":"+ '"'+'34'+'",') elif 773 < x <= 793: dic.write('"'+str(x)+'"'+":"+ '"'+'35'+'",') elif 4 < x <= 15: dic.write('"'+str(x)+'"'+":"+ '"'+'36'+'",') elif 15 < x <= 25: dic.write('"'+str(x)+'"'+":"+ '"'+'37'+'",') elif 25 < x <= 35: dic.write('"'+str(x)+'"'+":"+ '"'+'38'+'",') elif 35 < x <= 45: dic.write('"'+str(x)+'"'+":"+ '"'+'39'+'",') elif 45 < x <= 55: dic.write('"'+str(x)+'"'+":"+ '"'+'40'+'",') elif 55 < x <= 65: dic.write('"'+str(x)+'"'+":"+ '"'+'41'+'",') elif 65 < x <= 75: dic.write('"'+str(x)+'"'+":"+ '"'+'42'+'",') elif 75 < x <= 85: dic.write('"'+str(x)+'"'+":"+ '"'+'43'+'",') elif 85 < x <= 92: dic.write('"'+str(x)+'"'+":"+ '"'+'44'+'",') with open ("time.txt", "w", encoding="utf-8") as duree: for y in numpy.arange(0, 1.7, 0.01): if 0 < y <= 0.1: duree.write('"'+str(y)+'"'+":"+ '"'+'80'+'",') elif 0.1 < y <= 0.2: duree.write('"'+str(y)+'"'+":"+ '"'+'81'+'",') elif 0.2 < y <= 0.3: duree.write('"'+str(y)+'"'+":"+ '"'+'82'+'",') elif 0.3 < y <= 0.4: duree.write('"'+str(y)+'"'+":"+ '"'+'83'+'",') elif 0.4 < y <= 0.5: duree.write('"'+str(y)+'"'+":"+ '"'+'84'+'",') elif 0.5 < y <= 0.6: duree.write('"'+str(y)+'"'+":"+ '"'+'85'+'",') elif 0.6 < y <= 0.7: duree.write('"'+str(y)+'"'+":"+ '"'+'86'+'",') elif 0.7 < y <= 0.8: duree.write('"'+str(y)+'"'+":"+ '"'+'87'+'",') elif 0.8 < y <= 0.9: duree.write('"'+str(y)+'"'+":"+ '"'+'88'+'",') elif 0.9 < y <= 1: duree.write('"'+str(y)+'"'+":"+ '"'+'89'+'",') elif 1 < y <= 1.1: duree.write('"'+str(y)+'"'+":"+ '"'+'90'+'",') elif 1.1 < y <= 1.2: duree.write('"'+str(y)+'"'+":"+ '"'+'91'+'",') elif 1.2 < y <= 1.3: duree.write('"'+str(y)+'"'+":"+ '"'+'92'+'",') elif 1.3 < y <= 1.4: duree.write('"'+str(y)+'"'+":"+ '"'+'93'+'",') elif 1.4 < y <= 1.5: duree.write('"'+str(y)+'"'+":"+ '"'+'94'+'",') elif 1.5 < y <= 1.6: duree.write('"'+str(y)+'"'+":"+ '"'+'95'+'",') elif 1.6 < y <= 1.7: duree.write('"'+str(y)+'"'+":"+ '"'+'96'+'",')
[ "numpy.arange" ]
[((4023, 4049), 'numpy.arange', 'numpy.arange', (['(0)', '(1.7)', '(0.01)'], {}), '(0, 1.7, 0.01)\n', (4035, 4049), False, 'import numpy\n')]
# -*- coding: utf-8 -*- ''' ============= scrim.globals ============= Defines variables passed into the python script via Environment Variables by scrim scripts. If SCRIM_SHELL is None, then the python script was not executed by a scrim script. SHELLS (list): list of available shells SCRIM_SHELL (str): Parent shell, one of the above SHELLS SCRIM_PATH (str): Path to output shell script SCRIM_AUTO_WRITE (bool): Write to SCRIM_PATH when python exits? SCRIM_SCRIPT (str): Path to the scrim script that invoked python SCRIM_DEBUG (bool): Is scrim script running in debug mode? ''' from __future__ import absolute_import import os __all__ = [ 'SHELLS', 'SCRIM_SHELL', 'SCRIM_PATH', 'SCRIM_AUTO_WRITE', 'SCRIM_SCRIPT', 'SCRIM_DEBUG' ] SHELLS = [ 'powershell.exe', 'cmd.exe', 'bash' ] SCRIM_SHELL = os.environ.get('SCRIM_SHELL', None) SCRIM_PATH = os.environ.get('SCRIM_PATH', None) SCRIM_AUTO_WRITE = bool(os.environ.get('SCRIM_AUTO_WRITE', False)) SCRIM_SCRIPT = os.environ.get('SCRIM_SCRIPT', None) SCRIM_DEBUG = bool(os.environ.get('SCRIM_DEBUG', False))
[ "os.environ.get" ]
[((841, 876), 'os.environ.get', 'os.environ.get', (['"""SCRIM_SHELL"""', 'None'], {}), "('SCRIM_SHELL', None)\n", (855, 876), False, 'import os\n'), ((890, 924), 'os.environ.get', 'os.environ.get', (['"""SCRIM_PATH"""', 'None'], {}), "('SCRIM_PATH', None)\n", (904, 924), False, 'import os\n'), ((1007, 1043), 'os.environ.get', 'os.environ.get', (['"""SCRIM_SCRIPT"""', 'None'], {}), "('SCRIM_SCRIPT', None)\n", (1021, 1043), False, 'import os\n'), ((949, 990), 'os.environ.get', 'os.environ.get', (['"""SCRIM_AUTO_WRITE"""', '(False)'], {}), "('SCRIM_AUTO_WRITE', False)\n", (963, 990), False, 'import os\n'), ((1063, 1099), 'os.environ.get', 'os.environ.get', (['"""SCRIM_DEBUG"""', '(False)'], {}), "('SCRIM_DEBUG', False)\n", (1077, 1099), False, 'import os\n')]
import csnd6 class Control: def __init__(self, volume, frequency): engine = csnd6.Csound() engine.SetOption("-odac") engine.Compile("osc.csd") thread = csnd6.CsoundPerformanceThread(engine) thread.Play() self.engine = engine self.thread = thread self.set_volume(volume) self.set_frequency(frequency) def set_volume(self, volume): self.engine.SetChannel("volume", volume) def set_frequency(self, frequency): self.engine.SetChannel("frequency", frequency) def close(self): self.thread.Stop() self.thread.Join()
[ "csnd6.Csound", "csnd6.CsoundPerformanceThread" ]
[((89, 103), 'csnd6.Csound', 'csnd6.Csound', ([], {}), '()\n', (101, 103), False, 'import csnd6\n'), ((191, 228), 'csnd6.CsoundPerformanceThread', 'csnd6.CsoundPerformanceThread', (['engine'], {}), '(engine)\n', (220, 228), False, 'import csnd6\n')]
# [LICENSE] # Copyright (c) 2020, Alliance for Sustainable Energy. # All rights reserved. # # Redistribution and use in source and binary forms, # with or without modification, are permitted provided # that the following conditions are met: # # 1. Redistributions of source code must retain the above # copyright notice, this list of conditions and the # following disclaimer. # # 2. Redistributions in binary form must reproduce the # above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other # materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or # promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # [/LICENSE] import pytest def pytest_addoption(parser): parser.addoption( "--no-clean-up", action="store_true", default=False, help="Pass this option to leave test outputs in place" ) @pytest.fixture(scope="session",autouse=True) def clean_up(request): return (not request.config.getoption('--no-clean-up'))
[ "pytest.fixture" ]
[((1818, 1863), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (1832, 1863), False, 'import pytest\n')]
from django.db import models # Create your models here. class Tipo(models.Model): descrizione = models.CharField(blank=False, null=False, max_length=128) def __unicode__(self): return "{}".format(self.descrizione) class Meta: verbose_name_plural = 'Tipi' class Marca(models.Model): descrizione = models.CharField(blank=False, null=False, max_length=128) def __unicode__(self): return "{}".format(self.descrizione) class Meta: verbose_name_plural = 'Marche' class Modello(models.Model): descrizione = models.CharField(blank=False, null=False, max_length=128) marca = models.ForeignKey(Marca, null=False, blank=False) tipo = models.ForeignKey(Tipo, null=False, blank=False) def __unicode__(self): return "{}".format(self.descrizione) class Meta: verbose_name_plural = 'Modelli' class Alimentazione(models.Model): descrizione = models.CharField(blank=False, null=False, max_length=128) def __unicode__(self): return "{}".format(self.descrizione) class Meta: verbose_name_plural = 'Alimentazioni' class Mezzo(models.Model): telaio = models.CharField(blank=False, null=False, max_length=128) colore = models.CharField(blank=False, null=False, max_length=128) alimentazione = models.ForeignKey(Alimentazione, null=False, blank=False) modello = models.ForeignKey(Modello, null=False, blank=False) def __unicode__(self): return "{} {}".format(self.telaio, self.modello) class Meta: verbose_name_plural = 'Mezzi' class Targa(models.Model): numero = models.CharField(null=False, blank=False, max_length=16) dal = models.DateField() al = models.DateField() mezzo = models.ForeignKey(Mezzo, null=False, blank=False) def __unicode__(self): return "{}".format(self.numero) class Meta: verbose_name_plural = 'Targhe'
[ "django.db.models.DateField", "django.db.models.CharField", "django.db.models.ForeignKey" ]
[((104, 161), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'null': '(False)', 'max_length': '(128)'}), '(blank=False, null=False, max_length=128)\n', (120, 161), False, 'from django.db import models\n'), ((336, 393), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'null': '(False)', 'max_length': '(128)'}), '(blank=False, null=False, max_length=128)\n', (352, 393), False, 'from django.db import models\n'), ((572, 629), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'null': '(False)', 'max_length': '(128)'}), '(blank=False, null=False, max_length=128)\n', (588, 629), False, 'from django.db import models\n'), ((642, 691), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Marca'], {'null': '(False)', 'blank': '(False)'}), '(Marca, null=False, blank=False)\n', (659, 691), False, 'from django.db import models\n'), ((703, 751), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Tipo'], {'null': '(False)', 'blank': '(False)'}), '(Tipo, null=False, blank=False)\n', (720, 751), False, 'from django.db import models\n'), ((937, 994), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'null': '(False)', 'max_length': '(128)'}), '(blank=False, null=False, max_length=128)\n', (953, 994), False, 'from django.db import models\n'), ((1173, 1230), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'null': '(False)', 'max_length': '(128)'}), '(blank=False, null=False, max_length=128)\n', (1189, 1230), False, 'from django.db import models\n'), ((1244, 1301), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'null': '(False)', 'max_length': '(128)'}), '(blank=False, null=False, max_length=128)\n', (1260, 1301), False, 'from django.db import models\n'), ((1322, 1379), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Alimentazione'], {'null': '(False)', 'blank': '(False)'}), '(Alimentazione, null=False, blank=False)\n', (1339, 1379), False, 'from django.db import models\n'), ((1394, 1445), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Modello'], {'null': '(False)', 'blank': '(False)'}), '(Modello, null=False, blank=False)\n', (1411, 1445), False, 'from django.db import models\n'), ((1628, 1684), 'django.db.models.CharField', 'models.CharField', ([], {'null': '(False)', 'blank': '(False)', 'max_length': '(16)'}), '(null=False, blank=False, max_length=16)\n', (1644, 1684), False, 'from django.db import models\n'), ((1695, 1713), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1711, 1713), False, 'from django.db import models\n'), ((1723, 1741), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1739, 1741), False, 'from django.db import models\n'), ((1754, 1803), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Mezzo'], {'null': '(False)', 'blank': '(False)'}), '(Mezzo, null=False, blank=False)\n', (1771, 1803), False, 'from django.db import models\n')]
import pickle from time import sleep import googleapiclient.errors from transliterate import translit from logs.logging import get_logger from api_google.google_api_sheets import get_sheets_service, get_multiple_ranges from api_google.google_api_directory import get_directory_service, get_users_for_domain, \ get_groups_for_domain, create_group, add_user_to_group from api_google.google_api_groupsettings import get_groupsettings_service, \ get_group_settings, update_group_settings from config.config import sync_sheets_and_groups, path_data_directory def main(): logger = get_logger('sync_sheets_and_groups', sync_sheets_and_groups['logging_level']) data_path = path_data_directory / 'sync_sheets_and_groups' data_path.mkdir(parents=True, exist_ok=True) synced_users_path = data_path / 'synced_users.pickle' while True: # number_of_registered_users = 0 # synced_users_dictionary_creation = False # # # Getting a list of users who have already been synced # if synced_users_path.exists(): # logger.debug('Reading synced users from: %s', synced_users_path) # with open(synced_users_path, 'rb') as f: # synced_users = pickle.load(f) # else: # logger.info('Creating synced users dictionary') # synced_users = dict() # synced_users_dictionary_creation = True try: service_directory = get_directory_service() service_sheets = get_sheets_service() # ranges = get_multiple_ranges( # service_sheets, # sync_sheets_and_groups['spreadsheet_id'], # sync_sheets_and_groups['range_names'] # ) # # with open(data_path / 'ranges.pickle', 'wb') as file: # pickle.dump(ranges, file) with open(data_path / 'ranges.pickle', 'rb') as file: ranges = pickle.load(file) # # [logger.debug(x) for x in ranges] # group_results = [] # for group in ranges[0]['values']: # group_name = group[0].split(" ", 1)[0] # # email = (translit(group_name, "ru", reversed=True)).lower() \ # + "@" \ # + sync_sheets_and_groups['google_domain'] # # try: # group_results.append(create_group(service_directory, email, group_name, "")) # except googleapiclient.errors.HttpError as exception: # # If group already exists among other things # logger.error(exception, exc_info=False) # # logger.debug(group_name, email) # # group_results.sort(key=lambda x: x['name']) # with open(data_path / 'group_results.pickle', 'wb') as file: # pickle.dump(group_results, file) with open(data_path / 'group_results.pickle', 'rb') as file: group_results = pickle.load(file) # # [logger.debug(x) for x in group_results] created_group_names = [x['name'] for x in group_results] [logger.debug(x) for x in created_group_names] # # A client should wait 1 minute before adding users or sending messages to a new group # sleep(60) students = dict(zip( [i[0] if i else "" for i in ranges[1]['values']], [i[0] if i else "" for i in ranges[2]['values']] )) logger.debug(students.items()) leaders = dict(zip( [i[0] if i else "" for i in ranges[3]['values']], [i[0] if i else "" for i in ranges[4]['values']] )) logger.debug(leaders.items()) group_users = {} for group in ranges[0]['values']: id = group[0].split(" ", 1)[0] if id not in created_group_names: logger.debug("Skipping group: ", id) continue else: logger.debug("Adding users to group: ", id) group_users[id] = [] # Leader email group_users[id].append( [leaders[group[1]], 'MEMBER'] ) # Member emails for i in range(2, len(group)): group_users[id].append( [students[group[i]], 'MEMBER'] ) # Mandatory user group_users[id] += sync_sheets_and_groups['mandatory_members'] with open(data_path / 'group_users.pickle', 'wb') as file: pickle.dump(group_users, file) with open(data_path / 'group_users.pickle', 'rb') as file: group_users = pickle.load(file) [logger.debug(x) for x in group_users] # # Add users to groups # user_results = [] # for group in group_users: # for group_user in group_users[group]: # user_results.append( # add_user_to_group(service, group, group_user[0], group_user[1]) # ) # # with open(data_path / 'user_results.pickle', 'wb') as file: # pickle.dump(user_results, file) # with open(data_path / 'user_results.pickle', 'rb') as file: # user_results = pickle.load(file) # # [logger.debug(x) for x in user_results] # students = dict(zip( # [i[0] if i else "" for i in ranges[1]['values']], # [i[0] if i else "" for i in ranges[2]['values']] # )) # # leaders = dict(zip( # [i[0] if i else "" for i in ranges[3]['values']], # [i[0] if i else "" for i in ranges[4]['values']] # )) # if id not in synced_users: # synced_users[id] = set() # # member_emails = set() # # # Leader email # member_emails.add( # leaders[group[1]] # ) # # # Member emails # for i in range(2, len(group)): # member_emails.add( # students[group[i]] # ) # # # Mandatory emails # member_emails |= set(sync_sheets_and_groups['mandatory_members']) # # # Synced users # member_emails -= synced_users[id] # synced_users[id] |= member_emails # # member_emails = list(member_emails) # # logger.debug('Name: %s - Description: %s - Users: %s', # name, description, member_emails) # # if not synced_users_dictionary_creation: # # TODO # number_of_registered_users += len(member_emails) # # logger.debug('Result: %s', result) # # ----- # # Might need rework # # ----- # # service = get_groupsettings_service() # # group_emails = [] # for group_name in group_names: # group_emails.append( # (translit(group_name, "ru", reversed=True)).lower() \ # + "@" \ # + create_google_groups['google_domain'] # ) # # with open(data_path / 'group_emails.pickle', 'wb') as file: # pickle.dump(group_emails, file) # with open(data_path / 'group_emails.pickle', 'rb') as file: # group_emails = pickle.load(file) # # [logger.debug(x) for x in group_emails] # # settings_results = [] # for group_email in group_emails: # settings_results.append( # update_group_settings( # service, # group_email, # { # "whoCanJoin": "INVITED_CAN_JOIN", # "whoCanViewMembership": "ALL_IN_DOMAIN_CAN_VIEW", # "whoCanViewGroup": "ALL_IN_DOMAIN_CAN_VIEW", # "whoCanPostMessage": "ALL_IN_DOMAIN_CAN_POST", # "isArchived": "true" # } # ) # ) # # with open(data_path / 'settings_results.pickle', 'wb') as file: # pickle.dump(settings_results, file) # with open(data_path / 'settings_results.pickle', 'rb') as file: # settings_results = pickle.load(file) # # [logger.debug(x) for x in settings_results] except Exception as exception: logger.error(exception, exc_info=True) # logger.debug('Writing synced users to: %s', synced_users_path) # with open(synced_users_path, 'wb') as f: # pickle.dump(synced_users, f) # # logger.info('Update finished. Registered %s users. Sleeping for %s seconds.', # number_of_registered_users, sync_sheets_and_groups['sleep_time']) sleep(sync_sheets_and_groups['sleep_time']) if __name__ == '__main__': main()
[ "pickle.dump", "pickle.load", "logs.logging.get_logger", "time.sleep", "api_google.google_api_sheets.get_sheets_service", "api_google.google_api_directory.get_directory_service" ]
[((591, 668), 'logs.logging.get_logger', 'get_logger', (['"""sync_sheets_and_groups"""', "sync_sheets_and_groups['logging_level']"], {}), "('sync_sheets_and_groups', sync_sheets_and_groups['logging_level'])\n", (601, 668), False, 'from logs.logging import get_logger\n'), ((9521, 9564), 'time.sleep', 'sleep', (["sync_sheets_and_groups['sleep_time']"], {}), "(sync_sheets_and_groups['sleep_time'])\n", (9526, 9564), False, 'from time import sleep\n'), ((1462, 1485), 'api_google.google_api_directory.get_directory_service', 'get_directory_service', ([], {}), '()\n', (1483, 1485), False, 'from api_google.google_api_directory import get_directory_service, get_users_for_domain, get_groups_for_domain, create_group, add_user_to_group\n'), ((1515, 1535), 'api_google.google_api_sheets.get_sheets_service', 'get_sheets_service', ([], {}), '()\n', (1533, 1535), False, 'from api_google.google_api_sheets import get_sheets_service, get_multiple_ranges\n'), ((1964, 1981), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1975, 1981), False, 'import pickle\n'), ((3084, 3101), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3095, 3101), False, 'import pickle\n'), ((4799, 4829), 'pickle.dump', 'pickle.dump', (['group_users', 'file'], {}), '(group_users, file)\n', (4810, 4829), False, 'import pickle\n'), ((4931, 4948), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (4942, 4948), False, 'import pickle\n')]
#!/usr/bin/env python # Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. """ Lidar/BB check for CARLA This script obtains the LiDAR's point cloud corresponding to all the vehicles of the scene and make sure that they are inside the bounding box of the corresponding actor. This is done in a predefined route in Town03 with a high speed and several agressive turns. In a nutshell, the script have a queue that is filled in each frame with a lidar point cloud and an structure for storing the Bounding Boxes. This last one is emulated as a sensor filling the queue in the on_tick callback of the carla.world. In this way, we make sure that we are correctly syncronizing the lidar point cloud and BB/actor transformations. Then, we select the points corresponding to each actor (car) in the scene and check they are inside the bounding boxes of that actor, all in each vehicle frame of reference. Important Data structure description: + Lidar data structure: four element tuple with: - [0] Frame - [1] Sensor name: 'semlidar' - [2] Point cloud in the form of a numpy dictionary with all semantic lidar information - [3] Global transformation of the sensor + Bounding box data structure: four element tuple with: - [0] Frame - [1] Sensor name: 'bb' - [2] List of actor information: each a tuple with: - [0] Actor id - [1] Actor type (blueprint's name) - [0] Actor's global transformation - [0] Actor's bounding box + ActorTrace class: Takes the Lidar data structure and one actor information and check if all the data points related with this actor are inside its BB. This is done in the local coordinate frame of the actor and should be done like: trace = ActorTrace(actor_info, lidar_data) trace.process() trace.check_lidar_data() """ import glob import os import sys import numpy as np from queue import Queue from queue import Empty try: sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( sys.version_info.major, sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) except IndexError: pass import carla class ActorTrace(object): """Class that store and process information about an actor at certain moment.""" def __init__(self, actor, lidar): self.set_lidar(lidar) self.set_actor(actor) self._lidar_pc_local = np.array([]) self._bb_vertices = np.array([]) self._bb_minlimits = [0, 0, 0] self._bb_maxlimits = [0, 0, 0] def set_lidar(self, lidar): self._frame = lidar[0] self._lidar_data = lidar[2] self._lidar_transf = lidar[3] def set_actor(self, actor): self._actor_id = actor[0] self._actor_type = actor[1] self._actor_transf = actor[2] self._actor_bb = actor[3] def process(self): # Filter lidar points that correspond to my actor id data_actor = self._lidar_data[self._lidar_data['ObjIdx'] == self._actor_id] # Take the xyz point cloud data and transform it to actor's frame points = np.array([data_actor['x'], data_actor['y'], data_actor['z']]).T points = np.append(points, np.ones((points.shape[0], 1)), axis=1) points = np.dot(self._lidar_transf.get_matrix(), points.T).T # sensor -> world points = np.dot(self._actor_transf.get_inverse_matrix(), points.T).T # world -> actor points = points[:, :-1] # Saving the points in 'local' coordinates self._lidar_pc_local = points # We compute the limits in the local frame of reference using the # vertices of the bounding box vertices = self._actor_bb.get_local_vertices() ver_py = [] for v in vertices: ver_py.append([v.x, v.y, v.z]) ver_np = np.array(ver_py) self._bb_vertices = ver_np self._bb_minlimits = ver_np.min(axis=0) - 0.001 self._bb_maxlimits = ver_np.max(axis=0) + 0.001 def print(self, print_if_empty = False): if self._lidar_pc_local.shape[0] > 0 or print_if_empty: np.savetxt("veh_data_%d_%s_%d.out" % (self._frame, self._actor_type, self._actor_id), self._lidar_pc_local) np.savetxt("bb_data_%d_%s_%d.out" % (self._frame, self._actor_type, self._actor_id), self._bb_vertices) def lidar_is_outside_bb(self, check_axis = [True, True, True]): lidar_pc = self._lidar_pc_local if check_axis[0]: xmin = self._bb_minlimits[0] xmax = self._bb_maxlimits[0] out = np.any((lidar_pc[:,0] > xmax) | (lidar_pc[:,0] < xmin)) if out: print("Problem with x axis") return True if check_axis[1]: ymin = self._bb_minlimits[1] ymax = self._bb_maxlimits[1] out = np.any((lidar_pc[:, 1] > ymax) | (lidar_pc[:, 1] < ymin)) if out: print("Problem with y axis") return True if check_axis[2]: zmin = self._bb_minlimits[2] zmax = self._bb_maxlimits[2] out = np.any((lidar_pc[:, 2] > zmax) | (lidar_pc[:, 2] < zmin)) if out: print("Problem with z axis") return True return False def check_lidar_data(self): if self.lidar_is_outside_bb(): print("Error!!! Points of lidar point cloud are outside its BB for car %d: %s " % (self._actor_id, self._actor_type)) self.print() return False else: return True def wait(world, frames=100, queue = None, slist = None): for i in range(0, frames): world.tick() if queue != None and slist != None: try: for _i in range (0, len(slist)): s_frame = queue.get(True, 1.0) except Empty: print(" Some of the sensor information is missed") # Sensor callback. # This is where you receive the sensor data and # process it as you liked and the important part is that, # at the end, it should include an element into the sensor queue. def lidar_callback(sensor_data, sensor_queue, sensor_name): sensor_pc_local = np.frombuffer(sensor_data.raw_data, dtype=np.dtype([ ('x', np.float32), ('y', np.float32), ('z', np.float32), ('CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)])) sensor_transf = sensor_data.transform sensor_queue.put((sensor_data.frame, sensor_name, sensor_pc_local, sensor_transf)) def bb_callback(snapshot, world, sensor_queue, sensor_name): data_array = [] vehicles = world.get_actors().filter('vehicle.*') for actor in vehicles: data_array.append((actor.id, actor.type_id, actor.get_transform(), actor.bounding_box)) sensor_queue.put((snapshot.frame, sensor_name, data_array)) def move_spectator(world, actor): actor_tr = actor.get_transform() spectator_transform = carla.Transform(actor_tr.location, actor_tr.rotation) spectator_transform.location -= actor_tr.get_forward_vector() * 5 spectator_transform.location -= actor_tr.get_up_vector() * 3 spectator = world.get_spectator() spectator.set_transform(spectator_transform) def world_callback(snapshot, world, sensor_queue, sensor_name, actor): move_spectator(world, actor) bb_callback(snapshot, world, sensor_queue, sensor_name) def process_sensors(w_frame, sensor_queue, sensor_number): if sensor_number != 2: print("Error!!! Sensor number should be two") sl_data = None bb_data = None try: for i in range (0, sensor_number): s_frame = sensor_queue.get(True, 1.0) while s_frame[0] != w_frame: print("Warning! Missmatch for sensor %s in the frame timestamp (w: %d, s: %d)" % (s_frame[1], w_frame, s_frame[0])) print("This could be due to accumulated data for previous steps") s_frame = sensor_queue.get(True, 1.0) if s_frame[1] == "semlidar": sl_data = s_frame elif s_frame[1] == "bb": bb_data = s_frame #print(" Frame: %d Sensor: %s Len: %d " % (s_frame[0], s_frame[1], len(s_frame[2]))) except Empty: print("Error!!! The needeinformation is not here!!!") return if sl_data == None or bb_data == None: print("Error!!! Missmatch for sensor %s in the frame timestamp (w: %d, s: %d)" % (s_frame[1], w_frame, s_frame[0])) for actor_data in bb_data[2]: trace_vehicle = ActorTrace(actor_data, sl_data) trace_vehicle.process() trace_vehicle.check_lidar_data() class SpawnCar(object): def __init__(self, location, rotation, filter="vehicle.*", autopilot = False, velocity = None): self._filter = filter self._transform = carla.Transform(location, rotation) self._autopilot = autopilot self._velocity = velocity self._actor = None self._world = None def spawn(self, world): self._world = world actor_BP = world.get_blueprint_library().filter(self._filter)[0] self._actor = world.spawn_actor(actor_BP, self._transform) self._actor.set_autopilot(True) return self._actor def destroy(self): if self._actor != None: self._actor.destroy() CarPropList = [ SpawnCar(carla.Location(x=83, y= -40, z=5), carla.Rotation(yaw=-90), filter= "*lincoln*", autopilot=True), SpawnCar(carla.Location(x=83, y= -30, z=3), carla.Rotation(yaw=-90), filter= "*ambulance*", autopilot=True), SpawnCar(carla.Location(x=83, y= -20, z=3), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True), SpawnCar(carla.Location(x=120, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*isetta*", autopilot=True), SpawnCar(carla.Location(x=100, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*etron*", autopilot=True), SpawnCar(carla.Location(x=140, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*model3*", autopilot=True), SpawnCar(carla.Location(x=160, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*impala*", autopilot=False), SpawnCar(carla.Location(x=180, y= -3.5, z=2), carla.Rotation(yaw=+180), filter= "*a2*", autopilot=True), SpawnCar(carla.Location(x=60, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*model3*", autopilot=True), SpawnCar(carla.Location(x=80, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*etron*", autopilot=True), SpawnCar(carla.Location(x=100, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*mustan*", autopilot=True), SpawnCar(carla.Location(x=120, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*isetta*", autopilot=True), SpawnCar(carla.Location(x=140, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*impala*", autopilot=True), SpawnCar(carla.Location(x=160, y= +6, z=2), carla.Rotation(yaw=+00), filter= "*prius*", autopilot=True), SpawnCar(carla.Location(x=234, y= +20,z=2), carla.Rotation(yaw=+90), filter= "*dodge*", autopilot=True), SpawnCar(carla.Location(x=234, y= +40,z=2), carla.Rotation(yaw=+90), filter= "*isetta*", autopilot=True), SpawnCar(carla.Location(x=234, y= +80,z=2), carla.Rotation(yaw=+90), filter= "*tt*", autopilot=True), SpawnCar(carla.Location(x=243, y= -40,z=2), carla.Rotation(yaw=-90), filter= "*etron*", autopilot=True), SpawnCar(carla.Location(x=243, y= -20,z=2), carla.Rotation(yaw=-90), filter= "*mkz2017*", autopilot=True), SpawnCar(carla.Location(x=243, y= +00,z=2), carla.Rotation(yaw=-90), filter= "*mustan*", autopilot=True), SpawnCar(carla.Location(x=243, y= +20,z=2), carla.Rotation(yaw=-90), filter= "*dodge*", autopilot=True), SpawnCar(carla.Location(x=243, y= +40,z=2), carla.Rotation(yaw=-90), filter= "*charger2020*", autopilot=True), SpawnCar(carla.Location(x=243, y= +60,z=2), carla.Rotation(yaw=-90), filter= "*lincoln2020*", autopilot=True), SpawnCar(carla.Location(x=243, y= +80,z=2), carla.Rotation(yaw=-90), filter= "*tt*", autopilot=True), SpawnCar(carla.Location(x=243, y=+100,z=2), carla.Rotation(yaw=-90), filter= "*a2*", autopilot=True), SpawnCar(carla.Location(x=243, y=+120,z=2), carla.Rotation(yaw=-90), filter= "*wrangler_rubicon*", autopilot=True), SpawnCar(carla.Location(x=243, y=+140,z=2), carla.Rotation(yaw=-90), filter= "*c3*", autopilot=True) ] def spawn_prop_vehicles(world): for car in CarPropList: car.spawn(world) def destroy_prop_vehicles(): for car in CarPropList: car.destroy() def main(): # We start creating the client client = carla.Client('localhost', 2000) client.set_timeout(2.0) world = client.get_world() try: # We need to save the settings to be able to recover them at the end # of the script to leave the server in the same state that we found it. original_settings = world.get_settings() settings = world.get_settings() # We set CARLA syncronous mode settings.fixed_delta_seconds = 0.05 settings.synchronous_mode = True world.apply_settings(settings) traffic_manager = client.get_trafficmanager(8000) traffic_manager.set_synchronous_mode(True) # We create the sensor queue in which we keep track of the information # already received. This structure is thread safe and can be # accessed by all the sensors callback concurrently without problem. sensor_queue = Queue() # Spawning ego vehicle actor_BP = world.get_blueprint_library().filter("vehicle.lincoln.mkz2017")[0] car_tr = carla.Transform(carla.Location(x=239, y=125, z=0.9), carla.Rotation(yaw=-88.5)) actor = world.spawn_actor(actor_BP, car_tr) world.tick() move_spectator(world, actor) spawn_prop_vehicles(world) wait(world, 10) # We create all the sensors and keep them in a list for convenience. sensor_list = [] lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast_semantic') lidar_bp.set_attribute('channels', '64') lidar_bp.set_attribute('points_per_second', '500000') lidar_bp.set_attribute('range', '300') lidar_bp.set_attribute('upper_fov', '10.0') lidar_bp.set_attribute('lower_fov', '-90.0') lidar_tr = carla.Transform(carla.Location(z=3), carla.Rotation(yaw=0)) lidar = world.spawn_actor(lidar_bp, lidar_tr, attach_to=actor) lidar.listen(lambda data: lidar_callback(data, sensor_queue, "semlidar")) world.on_tick(lambda snapshot: world_callback(snapshot, world, sensor_queue, "bb", actor)) sensor_list.append(lidar) sensor_list.append(actor) # actor acts as a 'sensor' to simplify bb-lidar data comparison # Set autopilot for main vehicle actor.enable_constant_velocity(carla.Vector3D(20, 0, 0)) for _i in range(0, 100): # Tick the server world.tick() w_frame = world.get_snapshot().frame process_sensors(w_frame, sensor_queue, len(sensor_list)) actor.disable_constant_velocity() finally: world.apply_settings(original_settings) # Destroy all the actors destroy_prop_vehicles() for sensor in sensor_list: sensor.destroy() if __name__ == "__main__": try: main() except KeyboardInterrupt: print(' - Exited by user.')
[ "numpy.dtype", "numpy.ones", "carla.Transform", "carla.Vector3D", "carla.Location", "numpy.any", "numpy.array", "carla.Client", "numpy.savetxt", "queue.Queue", "carla.Rotation", "glob.glob" ]
[((7225, 7278), 'carla.Transform', 'carla.Transform', (['actor_tr.location', 'actor_tr.rotation'], {}), '(actor_tr.location, actor_tr.rotation)\n', (7240, 7278), False, 'import carla\n'), ((12931, 12962), 'carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (12943, 12962), False, 'import carla\n'), ((2629, 2641), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2637, 2641), True, 'import numpy as np\n'), ((2670, 2682), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2678, 2682), True, 'import numpy as np\n'), ((4060, 4076), 'numpy.array', 'np.array', (['ver_py'], {}), '(ver_py)\n', (4068, 4076), True, 'import numpy as np\n'), ((9115, 9150), 'carla.Transform', 'carla.Transform', (['location', 'rotation'], {}), '(location, rotation)\n', (9130, 9150), False, 'import carla\n'), ((9665, 9697), 'carla.Location', 'carla.Location', ([], {'x': '(83)', 'y': '(-40)', 'z': '(5)'}), '(x=83, y=-40, z=5)\n', (9679, 9697), False, 'import carla\n'), ((9702, 9725), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (9716, 9725), False, 'import carla\n'), ((9779, 9811), 'carla.Location', 'carla.Location', ([], {'x': '(83)', 'y': '(-30)', 'z': '(3)'}), '(x=83, y=-30, z=3)\n', (9793, 9811), False, 'import carla\n'), ((9816, 9839), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (9830, 9839), False, 'import carla\n'), ((9895, 9927), 'carla.Location', 'carla.Location', ([], {'x': '(83)', 'y': '(-20)', 'z': '(3)'}), '(x=83, y=-20, z=3)\n', (9909, 9927), False, 'import carla\n'), ((9932, 9955), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (9946, 9955), False, 'import carla\n'), ((10007, 10041), 'carla.Location', 'carla.Location', ([], {'x': '(120)', 'y': '(-3.5)', 'z': '(2)'}), '(x=120, y=-3.5, z=2)\n', (10021, 10041), False, 'import carla\n'), ((10044, 10068), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10058, 10068), False, 'import carla\n'), ((10120, 10154), 'carla.Location', 'carla.Location', ([], {'x': '(100)', 'y': '(-3.5)', 'z': '(2)'}), '(x=100, y=-3.5, z=2)\n', (10134, 10154), False, 'import carla\n'), ((10157, 10181), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10171, 10181), False, 'import carla\n'), ((10232, 10266), 'carla.Location', 'carla.Location', ([], {'x': '(140)', 'y': '(-3.5)', 'z': '(2)'}), '(x=140, y=-3.5, z=2)\n', (10246, 10266), False, 'import carla\n'), ((10269, 10293), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10283, 10293), False, 'import carla\n'), ((10345, 10379), 'carla.Location', 'carla.Location', ([], {'x': '(160)', 'y': '(-3.5)', 'z': '(2)'}), '(x=160, y=-3.5, z=2)\n', (10359, 10379), False, 'import carla\n'), ((10382, 10406), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10396, 10406), False, 'import carla\n'), ((10459, 10493), 'carla.Location', 'carla.Location', ([], {'x': '(180)', 'y': '(-3.5)', 'z': '(2)'}), '(x=180, y=-3.5, z=2)\n', (10473, 10493), False, 'import carla\n'), ((10496, 10520), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+180)'}), '(yaw=+180)\n', (10510, 10520), False, 'import carla\n'), ((10568, 10599), 'carla.Location', 'carla.Location', ([], {'x': '(60)', 'y': '(+6)', 'z': '(2)'}), '(x=60, y=+6, z=2)\n', (10582, 10599), False, 'import carla\n'), ((10605, 10627), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (10619, 10627), False, 'import carla\n'), ((10681, 10712), 'carla.Location', 'carla.Location', ([], {'x': '(80)', 'y': '(+6)', 'z': '(2)'}), '(x=80, y=+6, z=2)\n', (10695, 10712), False, 'import carla\n'), ((10718, 10740), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (10732, 10740), False, 'import carla\n'), ((10793, 10825), 'carla.Location', 'carla.Location', ([], {'x': '(100)', 'y': '(+6)', 'z': '(2)'}), '(x=100, y=+6, z=2)\n', (10807, 10825), False, 'import carla\n'), ((10830, 10852), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (10844, 10852), False, 'import carla\n'), ((10906, 10938), 'carla.Location', 'carla.Location', ([], {'x': '(120)', 'y': '(+6)', 'z': '(2)'}), '(x=120, y=+6, z=2)\n', (10920, 10938), False, 'import carla\n'), ((10943, 10965), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (10957, 10965), False, 'import carla\n'), ((11019, 11051), 'carla.Location', 'carla.Location', ([], {'x': '(140)', 'y': '(+6)', 'z': '(2)'}), '(x=140, y=+6, z=2)\n', (11033, 11051), False, 'import carla\n'), ((11056, 11078), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (11070, 11078), False, 'import carla\n'), ((11132, 11164), 'carla.Location', 'carla.Location', ([], {'x': '(160)', 'y': '(+6)', 'z': '(2)'}), '(x=160, y=+6, z=2)\n', (11146, 11164), False, 'import carla\n'), ((11169, 11191), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+0)'}), '(yaw=+0)\n', (11183, 11191), False, 'import carla\n'), ((11244, 11277), 'carla.Location', 'carla.Location', ([], {'x': '(234)', 'y': '(+20)', 'z': '(2)'}), '(x=234, y=+20, z=2)\n', (11258, 11277), False, 'import carla\n'), ((11281, 11304), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+90)'}), '(yaw=+90)\n', (11295, 11304), False, 'import carla\n'), ((11356, 11389), 'carla.Location', 'carla.Location', ([], {'x': '(234)', 'y': '(+40)', 'z': '(2)'}), '(x=234, y=+40, z=2)\n', (11370, 11389), False, 'import carla\n'), ((11393, 11416), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+90)'}), '(yaw=+90)\n', (11407, 11416), False, 'import carla\n'), ((11469, 11502), 'carla.Location', 'carla.Location', ([], {'x': '(234)', 'y': '(+80)', 'z': '(2)'}), '(x=234, y=+80, z=2)\n', (11483, 11502), False, 'import carla\n'), ((11506, 11529), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(+90)'}), '(yaw=+90)\n', (11520, 11529), False, 'import carla\n'), ((11578, 11611), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(-40)', 'z': '(2)'}), '(x=243, y=-40, z=2)\n', (11592, 11611), False, 'import carla\n'), ((11615, 11638), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (11629, 11638), False, 'import carla\n'), ((11690, 11723), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(-20)', 'z': '(2)'}), '(x=243, y=-20, z=2)\n', (11704, 11723), False, 'import carla\n'), ((11727, 11750), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (11741, 11750), False, 'import carla\n'), ((11804, 11836), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+0)', 'z': '(2)'}), '(x=243, y=+0, z=2)\n', (11818, 11836), False, 'import carla\n'), ((11841, 11864), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (11855, 11864), False, 'import carla\n'), ((11917, 11950), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+20)', 'z': '(2)'}), '(x=243, y=+20, z=2)\n', (11931, 11950), False, 'import carla\n'), ((11954, 11977), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (11968, 11977), False, 'import carla\n'), ((12029, 12062), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+40)', 'z': '(2)'}), '(x=243, y=+40, z=2)\n', (12043, 12062), False, 'import carla\n'), ((12066, 12089), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12080, 12089), False, 'import carla\n'), ((12147, 12180), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+60)', 'z': '(2)'}), '(x=243, y=+60, z=2)\n', (12161, 12180), False, 'import carla\n'), ((12184, 12207), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12198, 12207), False, 'import carla\n'), ((12265, 12298), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+80)', 'z': '(2)'}), '(x=243, y=+80, z=2)\n', (12279, 12298), False, 'import carla\n'), ((12302, 12325), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12316, 12325), False, 'import carla\n'), ((12374, 12408), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+100)', 'z': '(2)'}), '(x=243, y=+100, z=2)\n', (12388, 12408), False, 'import carla\n'), ((12411, 12434), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12425, 12434), False, 'import carla\n'), ((12483, 12517), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+120)', 'z': '(2)'}), '(x=243, y=+120, z=2)\n', (12497, 12517), False, 'import carla\n'), ((12520, 12543), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12534, 12543), False, 'import carla\n'), ((12606, 12640), 'carla.Location', 'carla.Location', ([], {'x': '(243)', 'y': '(+140)', 'z': '(2)'}), '(x=243, y=+140, z=2)\n', (12620, 12640), False, 'import carla\n'), ((12643, 12666), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-90)'}), '(yaw=-90)\n', (12657, 12666), False, 'import carla\n'), ((13801, 13808), 'queue.Queue', 'Queue', ([], {}), '()\n', (13806, 13808), False, 'from queue import Queue\n'), ((2166, 2324), 'glob.glob', 'glob.glob', (["('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major, sys.\n version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))"], {}), "('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major,\n sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64')\n )\n", (2175, 2324), False, 'import glob\n'), ((3335, 3396), 'numpy.array', 'np.array', (["[data_actor['x'], data_actor['y'], data_actor['z']]"], {}), "([data_actor['x'], data_actor['y'], data_actor['z']])\n", (3343, 3396), True, 'import numpy as np\n'), ((3434, 3463), 'numpy.ones', 'np.ones', (['(points.shape[0], 1)'], {}), '((points.shape[0], 1))\n', (3441, 3463), True, 'import numpy as np\n'), ((4348, 4460), 'numpy.savetxt', 'np.savetxt', (["('veh_data_%d_%s_%d.out' % (self._frame, self._actor_type, self._actor_id))", 'self._lidar_pc_local'], {}), "('veh_data_%d_%s_%d.out' % (self._frame, self._actor_type, self.\n _actor_id), self._lidar_pc_local)\n", (4358, 4460), True, 'import numpy as np\n'), ((4468, 4576), 'numpy.savetxt', 'np.savetxt', (["('bb_data_%d_%s_%d.out' % (self._frame, self._actor_type, self._actor_id))", 'self._bb_vertices'], {}), "('bb_data_%d_%s_%d.out' % (self._frame, self._actor_type, self.\n _actor_id), self._bb_vertices)\n", (4478, 4576), True, 'import numpy as np\n'), ((4809, 4866), 'numpy.any', 'np.any', (['((lidar_pc[:, 0] > xmax) | (lidar_pc[:, 0] < xmin))'], {}), '((lidar_pc[:, 0] > xmax) | (lidar_pc[:, 0] < xmin))\n', (4815, 4866), True, 'import numpy as np\n'), ((5085, 5142), 'numpy.any', 'np.any', (['((lidar_pc[:, 1] > ymax) | (lidar_pc[:, 1] < ymin))'], {}), '((lidar_pc[:, 1] > ymax) | (lidar_pc[:, 1] < ymin))\n', (5091, 5142), True, 'import numpy as np\n'), ((5363, 5420), 'numpy.any', 'np.any', (['((lidar_pc[:, 2] > zmax) | (lidar_pc[:, 2] < zmin))'], {}), '((lidar_pc[:, 2] > zmax) | (lidar_pc[:, 2] < zmin))\n', (5369, 5420), True, 'import numpy as np\n'), ((6515, 6659), 'numpy.dtype', 'np.dtype', (["[('x', np.float32), ('y', np.float32), ('z', np.float32), ('CosAngle', np.\n float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)]"], {}), "([('x', np.float32), ('y', np.float32), ('z', np.float32), (\n 'CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)])\n", (6523, 6659), True, 'import numpy as np\n'), ((13960, 13995), 'carla.Location', 'carla.Location', ([], {'x': '(239)', 'y': '(125)', 'z': '(0.9)'}), '(x=239, y=125, z=0.9)\n', (13974, 13995), False, 'import carla\n'), ((13997, 14022), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(-88.5)'}), '(yaw=-88.5)\n', (14011, 14022), False, 'import carla\n'), ((14686, 14705), 'carla.Location', 'carla.Location', ([], {'z': '(3)'}), '(z=3)\n', (14700, 14705), False, 'import carla\n'), ((14707, 14728), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(0)'}), '(yaw=0)\n', (14721, 14728), False, 'import carla\n'), ((15204, 15228), 'carla.Vector3D', 'carla.Vector3D', (['(20)', '(0)', '(0)'], {}), '(20, 0, 0)\n', (15218, 15228), False, 'import carla\n')]
# <NAME> # Software Engineering 001 # jumble_solver.py # 2/17/2021 # Assignment: # Consider the game "Jumble" # https://www.sandiegouniontribune.com/sd-jumble-daily-htmlstory.html # Create a Python program to find the individual words in Jumble puzzles such # that INJURE prints after entering the following: solve("JNUIER") from PyDictionary import PyDictionary # Installation: pip install PyDictionary from math import factorial from random import shuffle def solve(jumble): combos = [] chars = list(jumble.upper()) dict = PyDictionary() # Maximum possible unique combinations of chars limit = factorial(len(chars)) while len(combos) < limit: # Generates random string from chars shuffle(chars) tmp = "".join(chars) # Appends tmp to combos list only if it is unique if tmp not in combos: combos.append(tmp) # Prints tmp only if it returns an English meaning if dict.meaning(tmp, disable_errors = True): print(tmp) break
[ "PyDictionary.PyDictionary", "random.shuffle" ]
[((540, 554), 'PyDictionary.PyDictionary', 'PyDictionary', ([], {}), '()\n', (552, 554), False, 'from PyDictionary import PyDictionary\n'), ((727, 741), 'random.shuffle', 'shuffle', (['chars'], {}), '(chars)\n', (734, 741), False, 'from random import shuffle\n')]
# Copyright (c) 2018 <NAME>, Inc. # # This file is part of Mycroft Skills Manager # (see https://github.com/MycroftAI/mycroft-skills-manager). # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import logging import os import shutil import subprocess import yaml from contextlib import contextmanager from difflib import SequenceMatcher from functools import wraps from git import Repo, GitError from git.exc import GitCommandError from lazy import lazy from os.path import exists, join, basename, dirname, isfile from shutil import rmtree, move from subprocess import PIPE, Popen from tempfile import mktemp, gettempdir from threading import Lock from typing import Callable from pako import PakoManager from msm import SkillRequirementsException, git_to_msm_exceptions from msm.exceptions import PipRequirementsException, \ SystemRequirementsException, AlreadyInstalled, SkillModified, \ AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException from msm.util import cached_property, Git LOG = logging.getLogger(__name__) # Branches which can be switched from when updating # TODO Make this configurable SWITCHABLE_BRANCHES = ['master'] # default constraints to use if no are given DEFAULT_CONSTRAINTS = '/etc/mycroft/constraints.txt' FIVE_MINUTES = 300 @contextmanager def work_dir(directory): old_dir = os.getcwd() os.chdir(directory) try: yield finally: os.chdir(old_dir) def _backup_previous_version(func: Callable = None): """Private decorator to back up previous skill folder""" @wraps(func) def wrapper(self, *args, **kwargs): self.old_path = None if self.is_local: self.old_path = join(gettempdir(), self.name) if exists(self.old_path): rmtree(self.old_path) shutil.copytree(self.path, self.old_path) try: func(self, *args, **kwargs) # Modified skill or GitError should not restore working copy except (SkillModified, GitError, GitException): raise except Exception: LOG.info('Problem performing action. Restoring skill to ' 'previous state...') if exists(self.path): rmtree(self.path) if self.old_path and exists(self.old_path): shutil.copytree(self.old_path, self.path) self.is_local = exists(self.path) raise finally: # Remove temporary path if needed if self.old_path and exists(self.old_path): rmtree(self.old_path) return wrapper class SkillEntry(object): pip_lock = Lock() manifest_yml_format = { 'dependencies': { 'system': {}, 'exes': [], 'skill': [], 'python': [] } } def __init__(self, name, path, url='', sha='', msm=None): url = url.rstrip('/') url = url[:-len('.git')] if url.endswith('.git') else url self.path = path self.url = url self.sha = sha self.msm = msm if msm: u = url.lower() self.meta_info = msm.repo.skills_meta_info.get(u, {}) else: self.meta_info = {} if name is not None: self.name = name elif 'name' in self.meta_info: self.name = self.meta_info['name'] else: self.name = basename(path) # TODO: Handle git:// urls as well from_github = False if url.startswith('https://'): url_tokens = url.rstrip("/").split("/") from_github = url_tokens[-3] == 'github.com' if url else False self.author = self.extract_author(url) if from_github else '' self.id = self.extract_repo_id(url) if from_github else self.name self.is_local = exists(path) self.old_path = None # Path of previous version while upgrading @property def is_beta(self): return not self.sha or self.sha == 'HEAD' @property def is_dirty(self): """True if different from the version in the mycroft-skills repo. Considers a skill dirty if - the checkout sha doesn't match the mycroft-skills repo - the skill doesn't exist in the mycroft-skills repo - the skill is not a git repo - has local modifications """ if not exists(self.path): return False try: checkout = Git(self.path) mod = checkout.status(porcelain=True, untracked_files='no') != '' current_sha = checkout.rev_parse('HEAD') except GitCommandError: # Not a git checkout return True skill_shas = {d[0]: d[3] for d in self.msm.repo.get_skill_data()} return (self.name not in skill_shas or current_sha != skill_shas[self.name] or mod) @cached_property(ttl=FIVE_MINUTES) def skill_gid(self): """Format skill gid for the skill. This property does some Git gymnastics to determine its return value. When a device boots, each skill accesses this property several times. To reduce the amount of boot time, cache the value returned by this property. Cache expires five minutes after it is generated. """ LOG.debug('Generating skill_gid for ' + self.name) gid = '' if self.is_dirty: gid += '@|' if self.meta_info != {}: gid += self.meta_info['skill_gid'] else: name = self.name.split('.')[0] gid += name return gid def __str__(self): return self.name def attach(self, remote_entry): """Attach a remote entry to a local entry""" self.name = remote_entry.name self.sha = remote_entry.sha self.url = remote_entry.url self.author = remote_entry.author return self @classmethod def from_folder(cls, path, msm=None, use_cache=True): """Find or create skill entry from folder path. Arguments: path: path of skill folder msm: msm instance to use for caching and extended information retrieval. use_cache: Enable/Disable cache usage. defaults to True """ if msm and use_cache: skills = {skill.path: skill for skill in msm.local_skills.values()} if path in skills: return skills[path] return cls(None, path, cls.find_git_url(path), msm=msm) @classmethod def create_path(cls, folder, url, name=''): return join(folder, '{}.{}'.format( name or cls.extract_repo_name(url), cls.extract_author(url) ).lower()) @staticmethod def extract_repo_name(url): s = url.rstrip('/').split("/")[-1] a, b, c = s.rpartition('.git') if not c: return a return s @staticmethod def extract_author(url): return url.rstrip('/').split("/")[-2].split(':')[-1] @classmethod def extract_repo_id(cls, url): return '{}:{}'.format(cls.extract_author(url).lower(), cls.extract_repo_name(url)).lower() @staticmethod def _tokenize(x): return x.replace('-', ' ').split() @staticmethod def _extract_tokens(s, tokens): s = s.lower().replace('-', ' ') extracted = [] for token in tokens: extracted += [token] * s.count(token) s = s.replace(token, '') s = ' '.join(i for i in s.split(' ') if i) tokens = [i for i in s.split(' ') if i] return s, tokens, extracted @classmethod def _compare(cls, a, b): return SequenceMatcher(a=a, b=b).ratio() def match(self, query, author=None): search, search_tokens, search_common = self._extract_tokens( query, ['skill', 'fallback', 'mycroft'] ) name, name_tokens, name_common = self._extract_tokens( self.name, ['skill', 'fallback', 'mycroft'] ) weights = [ (9, self._compare(name, search)), (9, self._compare(name.split(' '), search_tokens)), (2, self._compare(name_common, search_common)), ] if author: author_weight = self._compare(self.author, author) weights.append((5, author_weight)) author_weight = author_weight else: author_weight = 1.0 return author_weight * ( sum(weight * val for weight, val in weights) / sum(weight for weight, val in weights) ) def run_pip(self, constraints=None): if not self.dependent_python_packages: return False # Use constraints to limit the installed versions if constraints and not exists(constraints): LOG.error('Couldn\'t find the constraints file') return False elif exists(DEFAULT_CONSTRAINTS): constraints = DEFAULT_CONSTRAINTS LOG.info('Installing requirements.txt for ' + self.name) can_pip = os.access(dirname(sys.executable), os.W_OK | os.X_OK) pip_args = [sys.executable, '-m', 'pip', 'install'] if constraints: pip_args += ['-c', constraints] if not can_pip: pip_args = ['sudo', '-n'] + pip_args with self.pip_lock: """ Iterate over the individual Python packages and install them one by one to enforce the order specified in the manifest. """ for dependent_python_package in self.dependent_python_packages: pip_command = pip_args + [dependent_python_package] proc = Popen(pip_command, stdout=PIPE, stderr=PIPE) pip_code = proc.wait() if pip_code != 0: stderr = proc.stderr.read().decode() if pip_code == 1 and 'sudo:' in stderr and pip_args[0] == 'sudo': raise PipRequirementsException( 2, '', 'Permission denied while installing pip ' 'dependencies. Please run in virtualenv or use sudo' ) raise PipRequirementsException( pip_code, proc.stdout.read().decode(), stderr ) return True def install_system_deps(self): self.run_requirements_sh() system_packages = { exe: (packages or '').split() for exe, packages in self.dependent_system_packages.items() } LOG.info('Installing system requirements...') all_deps = system_packages.pop('all', []) try: manager = PakoManager() success = manager.install(all_deps, overrides=system_packages) except RuntimeError as e: LOG.warning('Failed to launch package manager: {}'.format(e)) success = False missing_exes = [ exe for exe in self.dependencies.get('exes') or [] if not shutil.which(exe) ] if missing_exes: if not success: LOG.warning('Failed to install dependencies.') if all_deps: LOG.warning('Please install manually: {}'.format( ' '.join(all_deps) )) raise SkillRequirementsException('Could not find exes: {}'.format( ', '.join(missing_exes) )) return success def run_requirements_sh(self): setup_script = join(self.path, "requirements.sh") if not exists(setup_script): return False with work_dir(self.path): rc = subprocess.call(["bash", setup_script]) if rc != 0: LOG.error("Requirements.sh failed with error code: " + str(rc)) raise SystemRequirementsException(rc) LOG.info("Successfully ran requirements.sh for " + self.name) return True def run_skill_requirements(self): if not self.msm: raise ValueError('Pass msm to SkillEntry to install skill deps') try: for skill_dep in self.dependent_skills: LOG.info("Installing skill dependency: {}".format(skill_dep)) try: self.msm.install(skill_dep) except AlreadyInstalled: pass except Exception as e: raise SkillRequirementsException(e) def verify_info(self, info, fmt): if not info: return if not isinstance(info, type(fmt)): LOG.warning('Invalid value type manifest.yml for {}: {}'.format( self.name, type(info) )) return if not isinstance(info, dict) or not fmt: return for key in info: if key not in fmt: LOG.warning('Unknown key in manifest.yml for {}: {}'.format( self.name, key )) continue self.verify_info(info[key], fmt[key]) @lazy def skill_info(self): yml_path = join(self.path, 'manifest.yml') if exists(yml_path): LOG.info('Reading from manifest.yml') with open(yml_path) as f: info = yaml.safe_load(f) self.verify_info(info, self.manifest_yml_format) return info or {} return {} @lazy def dependencies(self): return self.skill_info.get('dependencies') or {} @lazy def dependent_skills(self): skills = set() reqs = join(self.path, "skill_requirements.txt") if exists(reqs): with open(reqs, "r") as f: for i in f.readlines(): skill = i.strip() if skill: skills.add(skill) for i in self.dependencies.get('skill') or []: skills.add(i) return list(skills) @lazy def dependent_python_packages(self): reqs = join(self.path, "requirements.txt") req_lines = [] if exists(reqs): with open(reqs, "r") as f: req_lines += f.readlines() req_lines += self.dependencies.get('python') or [] # Strip comments req_lines = [l.split('#')[0].strip() for l in req_lines] return [i for i in req_lines if i] # Strip empty lines @lazy def dependent_system_packages(self): return self.dependencies.get('system') or {} def remove(self): if not self.is_local: raise AlreadyRemoved(self.name) try: rmtree(self.path) self.is_local = False except OSError as e: raise RemoveException(str(e)) LOG.info('Successfully removed ' + self.name) @_backup_previous_version def install(self, constraints=None): if self.is_local: raise AlreadyInstalled(self.name) LOG.info("Downloading skill: " + self.url) try: tmp_location = mktemp() Repo.clone_from(self.url, tmp_location) self.is_local = True Git(tmp_location).reset(self.sha or 'HEAD', hard=True) except GitCommandError as e: raise CloneException(e.stderr) if isfile(join(tmp_location, '__init__.py')): move(join(tmp_location, '__init__.py'), join(tmp_location, '__init__')) try: move(tmp_location, self.path) if self.msm: self.run_skill_requirements() self.install_system_deps() self.run_pip(constraints) finally: if isfile(join(self.path, '__init__')): move(join(self.path, '__init__'), join(self.path, '__init__.py')) LOG.info('Successfully installed ' + self.name) def update_deps(self, constraints=None): if self.msm: self.run_skill_requirements() self.install_system_deps() self.run_pip(constraints) def _find_sha_branch(self): git = Git(self.path) sha_branches = git.branch( contains=self.sha, all=True ).split('\n') sha_branch = [b for b in sha_branches if ' -> ' not in b][0] sha_branch = sha_branch.strip('* \n').replace('remotes/', '') for remote in git.remote().split('\n'): sha_branch = sha_branch.replace(remote + '/', '') return sha_branch @_backup_previous_version def update(self): if not self.is_local: raise NotInstalled('{} is not installed'.format(self.name)) git = Git(self.path) with git_to_msm_exceptions(): sha_before = git.rev_parse('HEAD') modified_files = git.status(porcelain=True, untracked='no') if modified_files != '': raise SkillModified('Uncommitted changes:\n' + modified_files) git.fetch() current_branch = git.rev_parse('--abbrev-ref', 'HEAD').strip() if self.sha and current_branch in SWITCHABLE_BRANCHES: # Check out correct branch git.checkout(self._find_sha_branch()) git.merge(self.sha or 'origin/HEAD', ff_only=True) sha_after = git.rev_parse('HEAD') if sha_before != sha_after: self.update_deps() LOG.info('Updated ' + self.name) # Trigger reload by modifying the timestamp os.utime(join(self.path, '__init__.py')) return True else: LOG.info('Nothing new for ' + self.name) return False @staticmethod def find_git_url(path): """Get the git url from a folder""" try: LOG.debug( 'Attempting to retrieve the remote origin URL config for ' 'skill in path ' + path ) return Git(path).config('remote.origin.url') except GitError: return '' def __repr__(self): return '<SkillEntry {}>'.format(' '.join( '{}={}'.format(attr, self.__dict__[attr]) for attr in ['name', 'author', 'is_local'] ))
[ "logging.getLogger", "pako.PakoManager", "git.Repo.clone_from", "msm.exceptions.PipRequirementsException", "msm.exceptions.AlreadyInstalled", "os.path.exists", "shutil.move", "threading.Lock", "msm.git_to_msm_exceptions", "subprocess.Popen", "functools.wraps", "msm.util.Git", "subprocess.call", "msm.exceptions.SystemRequirementsException", "msm.exceptions.CloneException", "msm.exceptions.AlreadyRemoved", "msm.SkillRequirementsException", "shutil.which", "os.path.dirname", "msm.util.cached_property", "difflib.SequenceMatcher", "os.path.join", "msm.exceptions.SkillModified", "os.getcwd", "os.chdir", "shutil.copytree", "tempfile.mktemp", "yaml.safe_load", "tempfile.gettempdir", "os.path.basename", "shutil.rmtree" ]
[((1772, 1799), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1789, 1799), False, 'import logging\n'), ((2091, 2102), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2100, 2102), False, 'import os\n'), ((2107, 2126), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (2115, 2126), False, 'import os\n'), ((2311, 2322), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2316, 2322), False, 'from functools import wraps\n'), ((3407, 3413), 'threading.Lock', 'Lock', ([], {}), '()\n', (3411, 3413), False, 'from threading import Lock\n'), ((5653, 5686), 'msm.util.cached_property', 'cached_property', ([], {'ttl': 'FIVE_MINUTES'}), '(ttl=FIVE_MINUTES)\n', (5668, 5686), False, 'from msm.util import cached_property, Git\n'), ((2171, 2188), 'os.chdir', 'os.chdir', (['old_dir'], {}), '(old_dir)\n', (2179, 2188), False, 'import os\n'), ((4596, 4608), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (4602, 4608), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((12448, 12482), 'os.path.join', 'join', (['self.path', '"""requirements.sh"""'], {}), "(self.path, 'requirements.sh')\n", (12452, 12482), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((14031, 14062), 'os.path.join', 'join', (['self.path', '"""manifest.yml"""'], {}), "(self.path, 'manifest.yml')\n", (14035, 14062), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((14074, 14090), 'os.path.exists', 'exists', (['yml_path'], {}), '(yml_path)\n', (14080, 14090), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((14515, 14556), 'os.path.join', 'join', (['self.path', '"""skill_requirements.txt"""'], {}), "(self.path, 'skill_requirements.txt')\n", (14519, 14556), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((14568, 14580), 'os.path.exists', 'exists', (['reqs'], {}), '(reqs)\n', (14574, 14580), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((14947, 14982), 'os.path.join', 'join', (['self.path', '"""requirements.txt"""'], {}), "(self.path, 'requirements.txt')\n", (14951, 14982), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((15017, 15029), 'os.path.exists', 'exists', (['reqs'], {}), '(reqs)\n', (15023, 15029), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((17023, 17037), 'msm.util.Git', 'Git', (['self.path'], {}), '(self.path)\n', (17026, 17037), False, 'from msm.util import cached_property, Git\n'), ((17579, 17593), 'msm.util.Git', 'Git', (['self.path'], {}), '(self.path)\n', (17582, 17593), False, 'from msm.util import cached_property, Git\n'), ((2491, 2512), 'os.path.exists', 'exists', (['self.old_path'], {}), '(self.old_path)\n', (2497, 2512), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((2564, 2605), 'shutil.copytree', 'shutil.copytree', (['self.path', 'self.old_path'], {}), '(self.path, self.old_path)\n', (2579, 2605), False, 'import shutil\n'), ((5144, 5161), 'os.path.exists', 'exists', (['self.path'], {}), '(self.path)\n', (5150, 5161), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((5224, 5238), 'msm.util.Git', 'Git', (['self.path'], {}), '(self.path)\n', (5227, 5238), False, 'from msm.util import cached_property, Git\n'), ((9757, 9784), 'os.path.exists', 'exists', (['DEFAULT_CONSTRAINTS'], {}), '(DEFAULT_CONSTRAINTS)\n', (9763, 9784), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((9926, 9949), 'os.path.dirname', 'dirname', (['sys.executable'], {}), '(sys.executable)\n', (9933, 9949), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((11591, 11604), 'pako.PakoManager', 'PakoManager', ([], {}), '()\n', (11602, 11604), False, 'from pako import PakoManager\n'), ((12498, 12518), 'os.path.exists', 'exists', (['setup_script'], {}), '(setup_script)\n', (12504, 12518), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((12597, 12636), 'subprocess.call', 'subprocess.call', (["['bash', setup_script]"], {}), "(['bash', setup_script])\n", (12612, 12636), False, 'import subprocess\n'), ((12752, 12783), 'msm.exceptions.SystemRequirementsException', 'SystemRequirementsException', (['rc'], {}), '(rc)\n', (12779, 12783), False, 'from msm.exceptions import PipRequirementsException, SystemRequirementsException, AlreadyInstalled, SkillModified, AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException\n'), ((15502, 15527), 'msm.exceptions.AlreadyRemoved', 'AlreadyRemoved', (['self.name'], {}), '(self.name)\n', (15516, 15527), False, 'from msm.exceptions import PipRequirementsException, SystemRequirementsException, AlreadyInstalled, SkillModified, AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException\n'), ((15553, 15570), 'shutil.rmtree', 'rmtree', (['self.path'], {}), '(self.path)\n', (15559, 15570), False, 'from shutil import rmtree, move\n'), ((15847, 15874), 'msm.exceptions.AlreadyInstalled', 'AlreadyInstalled', (['self.name'], {}), '(self.name)\n', (15863, 15874), False, 'from msm.exceptions import PipRequirementsException, SystemRequirementsException, AlreadyInstalled, SkillModified, AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException\n'), ((15967, 15975), 'tempfile.mktemp', 'mktemp', ([], {}), '()\n', (15973, 15975), False, 'from tempfile import mktemp, gettempdir\n'), ((15988, 16027), 'git.Repo.clone_from', 'Repo.clone_from', (['self.url', 'tmp_location'], {}), '(self.url, tmp_location)\n', (16003, 16027), False, 'from git import Repo, GitError\n'), ((16227, 16260), 'os.path.join', 'join', (['tmp_location', '"""__init__.py"""'], {}), "(tmp_location, '__init__.py')\n", (16231, 16260), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((16390, 16419), 'shutil.move', 'move', (['tmp_location', 'self.path'], {}), '(tmp_location, self.path)\n', (16394, 16419), False, 'from shutil import rmtree, move\n'), ((17608, 17631), 'msm.git_to_msm_exceptions', 'git_to_msm_exceptions', ([], {}), '()\n', (17629, 17631), False, 'from msm import SkillRequirementsException, git_to_msm_exceptions\n'), ((2451, 2463), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (2461, 2463), False, 'from tempfile import mktemp, gettempdir\n'), ((2530, 2551), 'shutil.rmtree', 'rmtree', (['self.old_path'], {}), '(self.old_path)\n', (2536, 2551), False, 'from shutil import rmtree, move\n'), ((2956, 2973), 'os.path.exists', 'exists', (['self.path'], {}), '(self.path)\n', (2962, 2973), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((3151, 3168), 'os.path.exists', 'exists', (['self.path'], {}), '(self.path)\n', (3157, 3168), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((3283, 3304), 'os.path.exists', 'exists', (['self.old_path'], {}), '(self.old_path)\n', (3289, 3304), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((3322, 3343), 'shutil.rmtree', 'rmtree', (['self.old_path'], {}), '(self.old_path)\n', (3328, 3343), False, 'from shutil import rmtree, move\n'), ((4175, 4189), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (4183, 4189), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((8517, 8542), 'difflib.SequenceMatcher', 'SequenceMatcher', ([], {'a': 'a', 'b': 'b'}), '(a=a, b=b)\n', (8532, 8542), False, 'from difflib import SequenceMatcher\n'), ((9637, 9656), 'os.path.exists', 'exists', (['constraints'], {}), '(constraints)\n', (9643, 9656), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((10556, 10600), 'subprocess.Popen', 'Popen', (['pip_command'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(pip_command, stdout=PIPE, stderr=PIPE)\n', (10561, 10600), False, 'from subprocess import PIPE, Popen\n'), ((13342, 13371), 'msm.SkillRequirementsException', 'SkillRequirementsException', (['e'], {}), '(e)\n', (13368, 13371), False, 'from msm import SkillRequirementsException, git_to_msm_exceptions\n'), ((14203, 14220), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (14217, 14220), False, 'import yaml\n'), ((16183, 16207), 'msm.exceptions.CloneException', 'CloneException', (['e.stderr'], {}), '(e.stderr)\n', (16197, 16207), False, 'from msm.exceptions import PipRequirementsException, SystemRequirementsException, AlreadyInstalled, SkillModified, AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException\n'), ((16280, 16313), 'os.path.join', 'join', (['tmp_location', '"""__init__.py"""'], {}), "(tmp_location, '__init__.py')\n", (16284, 16313), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((16332, 16362), 'os.path.join', 'join', (['tmp_location', '"""__init__"""'], {}), "(tmp_location, '__init__')\n", (16336, 16362), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((16608, 16635), 'os.path.join', 'join', (['self.path', '"""__init__"""'], {}), "(self.path, '__init__')\n", (16612, 16635), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((17812, 17868), 'msm.exceptions.SkillModified', 'SkillModified', (["('Uncommitted changes:\\n' + modified_files)"], {}), "('Uncommitted changes:\\n' + modified_files)\n", (17825, 17868), False, 'from msm.exceptions import PipRequirementsException, SystemRequirementsException, AlreadyInstalled, SkillModified, AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException\n'), ((18430, 18460), 'os.path.join', 'join', (['self.path', '"""__init__.py"""'], {}), "(self.path, '__init__.py')\n", (18434, 18460), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((2991, 3008), 'shutil.rmtree', 'rmtree', (['self.path'], {}), '(self.path)\n', (2997, 3008), False, 'from shutil import rmtree, move\n'), ((3042, 3063), 'os.path.exists', 'exists', (['self.old_path'], {}), '(self.old_path)\n', (3048, 3063), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((3081, 3122), 'shutil.copytree', 'shutil.copytree', (['self.old_path', 'self.path'], {}), '(self.old_path, self.path)\n', (3096, 3122), False, 'import shutil\n'), ((11923, 11940), 'shutil.which', 'shutil.which', (['exe'], {}), '(exe)\n', (11935, 11940), False, 'import shutil\n'), ((16073, 16090), 'msm.util.Git', 'Git', (['tmp_location'], {}), '(tmp_location)\n', (16076, 16090), False, 'from msm.util import cached_property, Git\n'), ((16659, 16686), 'os.path.join', 'join', (['self.path', '"""__init__"""'], {}), "(self.path, '__init__')\n", (16663, 16686), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((16709, 16739), 'os.path.join', 'join', (['self.path', '"""__init__.py"""'], {}), "(self.path, '__init__.py')\n", (16713, 16739), False, 'from os.path import exists, join, basename, dirname, isfile\n'), ((18853, 18862), 'msm.util.Git', 'Git', (['path'], {}), '(path)\n', (18856, 18862), False, 'from msm.util import cached_property, Git\n'), ((10847, 10980), 'msm.exceptions.PipRequirementsException', 'PipRequirementsException', (['(2)', '""""""', '"""Permission denied while installing pip dependencies. Please run in virtualenv or use sudo"""'], {}), "(2, '',\n 'Permission denied while installing pip dependencies. Please run in virtualenv or use sudo'\n )\n", (10871, 10980), False, 'from msm.exceptions import PipRequirementsException, SystemRequirementsException, AlreadyInstalled, SkillModified, AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException\n')]
#tshark -r input.pcap -qz "follow,tcp,raw,0" import struct import sys import binascii import subprocess result = subprocess.Popen( ["tshark", "-r", sys.argv[1], "-qz", "follow,tcp,raw,0"], stdout=subprocess.PIPE) sys.stdout.buffer.write(b"FPC\x80") for i in range(4): result.stdout.readline() dp=result.stdout.readline().split(b":")[2] sp=result.stdout.readline().split(b":")[2] sys.stdout.buffer.write(struct.pack('>H', int(sp))) sys.stdout.buffer.write(struct.pack('>H', int(dp))) for l in result.stdout.readlines(): s2c = 0 if l[0] == 9: l = l[1:] s2c = 1 try: r = binascii.unhexlify(l[:-1]) except: continue sys.stdout.buffer.write(struct.pack('>B', int(s2c))) sys.stdout.buffer.write(r) sys.stdout.buffer.write(b"FPC0")
[ "subprocess.Popen", "sys.stdout.buffer.write", "binascii.unhexlify" ]
[((115, 217), 'subprocess.Popen', 'subprocess.Popen', (["['tshark', '-r', sys.argv[1], '-qz', 'follow,tcp,raw,0']"], {'stdout': 'subprocess.PIPE'}), "(['tshark', '-r', sys.argv[1], '-qz', 'follow,tcp,raw,0'],\n stdout=subprocess.PIPE)\n", (131, 217), False, 'import subprocess\n'), ((244, 279), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (["b'FPC\\x80'"], {}), "(b'FPC\\x80')\n", (267, 279), False, 'import sys\n'), ((757, 783), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (['r'], {}), '(r)\n', (780, 783), False, 'import sys\n'), ((788, 820), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (["b'FPC0'"], {}), "(b'FPC0')\n", (811, 820), False, 'import sys\n'), ((640, 666), 'binascii.unhexlify', 'binascii.unhexlify', (['l[:-1]'], {}), '(l[:-1])\n', (658, 666), False, 'import binascii\n')]
""" Created: November 11, 2020 Author: <NAME> Python Version 3.9 This program is meant to make the process of collecting the different filters from AIJ excel spreadsheets faster. The user enters however many nights they have and the program goes through and checks those text files for the different columns for,HJD, Amag, and Amag error for the B and V filters. The program will also calculate the R magnitude from the rel flux of T1. There are error catching statements within the program so if the user mistypes, the program will not crash and close on them. """ import pandas as pd from os import path def main(c): # warning prompts for the user to read to make sure this program works correctly if c == 0: # warning prompts for the user to read to make sure this program works correctly print() print("Make sure you have turned the output xls files from AIJ into tab delimited text files. " "Since these xls files are corrupt for reading directly from.") print("You will also need to go into each night and filter and " "make the HJD column 6 decimals instead of the output of 3 within Excel.") print() else: print() while True: # checks to see whether you have entered a number and a correct filter letter try: num = int(input("Number of nights you have: ")) filter_name = input("Which filter are these nights in (B, V, R): ") if filter_name.upper() == "B" or filter_name.upper() == "V" or filter_name.upper() == "R": break else: print("Please enter B, V, or R for your filter.") print() continue except ValueError: print("You have entered an invalid number for your number of nights. Please enter a number.") print("") get_filters(num) def get_filters(n): """ Takes a number of nights for a given filter and takes out the HJD, either A_Mag1 or T1_flux, and error for mag or flux :param n: Number of observation nights :param f: The filter letter being used :return: the output text files for each night in a given filter """ total_hjd = [] total_amag = [] total_error = [] # checks for either the b, v, r filter as either upper or lowercase will work for i in range(n): while True: # makes sure the file pathway is real and points to some file # (does not check if that file is the correct one though) try: # an example pathway for the files # E:\Research\Data\NSVS_254037\2018.10.12-reduced\Check\V\2018.10.12.APASS.V_measurements.txt file = input("Enter night %d file path: " % (i+1)) if path.exists(file): break else: continue except FileNotFoundError: print("Please enter a correct file path") # noinspection PyUnboundLocalVariable df = pd.read_csv(file, delimiter="\t") # set parameters to lists from the file by the column header hjd = [] amag = [] amag_error = [] try: hjd = list(df["HJD"]) amag = list(df["Source_AMag_T1"]) amag_error = list(df["Source_AMag_Err_T1"]) except KeyError: print("The file you entered does not have the columns of HJD, Source_AMag_T1, or Source_AMag_Err_T1. " "Please re-enter the file path and make sure its the correct file.") c = 1 main(c) total_hjd.append(hjd) total_amag.append(amag) total_error.append(amag_error) # converts the Dataframe embedded lists into a normal flat list new_hjd = [item for elem in total_hjd for item in elem] new_amag = [item for elem in total_amag for item in elem] new_error = [item for elem in total_error for item in elem] # outputs the new file to dataframe and then into a text file for use in Peranso or PHOEBE data = pd.DataFrame({ "HJD": new_hjd, "AMag": new_amag, "AMag Error": new_error }) print("") output = input("What is the file output name (with file extension .txt): ") data.to_csv(output, index=False, header=False, sep='\t') print("") print("Fished saving the file to the same location as this program.") # Press the green button in the gutter to run the script. if __name__ == '__main__': count = 0 main(count)
[ "pandas.DataFrame", "os.path.exists", "pandas.read_csv" ]
[((4117, 4190), 'pandas.DataFrame', 'pd.DataFrame', (["{'HJD': new_hjd, 'AMag': new_amag, 'AMag Error': new_error}"], {}), "({'HJD': new_hjd, 'AMag': new_amag, 'AMag Error': new_error})\n", (4129, 4190), True, 'import pandas as pd\n'), ((3072, 3105), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delimiter': '"""\t"""'}), "(file, delimiter='\\t')\n", (3083, 3105), True, 'import pandas as pd\n'), ((2820, 2837), 'os.path.exists', 'path.exists', (['file'], {}), '(file)\n', (2831, 2837), False, 'from os import path\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-09-02 05:23 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('songwriter', '0005_auto_20170824_1726'), ] operations = [ migrations.AlterModelOptions( name='author', options={'ordering': ['lastname', 'firstname']}, ), migrations.AlterModelOptions( name='chord', options={'ordering': ['note']}, ), migrations.AlterModelOptions( name='editor', options={'ordering': ['name']}, ), migrations.AlterModelOptions( name='theme', options={'ordering': ['name']}, ), ]
[ "django.db.migrations.AlterModelOptions" ]
[((295, 392), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""author"""', 'options': "{'ordering': ['lastname', 'firstname']}"}), "(name='author', options={'ordering': [\n 'lastname', 'firstname']})\n", (323, 392), False, 'from django.db import migrations\n'), ((432, 506), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""chord"""', 'options': "{'ordering': ['note']}"}), "(name='chord', options={'ordering': ['note']})\n", (460, 506), False, 'from django.db import migrations\n'), ((551, 626), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""editor"""', 'options': "{'ordering': ['name']}"}), "(name='editor', options={'ordering': ['name']})\n", (579, 626), False, 'from django.db import migrations\n'), ((671, 745), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""theme"""', 'options': "{'ordering': ['name']}"}), "(name='theme', options={'ordering': ['name']})\n", (699, 745), False, 'from django.db import migrations\n')]
"""Loading MNIST dataset. """ import struct import numpy as np class MNIST: """ Loading MNIST dataset. In the directory of MNIST dataset, there should be the following files: - Training set: - train-images-idx3-ubyte - train-labels-idx1-ubyte - Test set: - t10k-images-idx3-ubyte - t10k-labels-idx1-ubyte Functions --------- next_batch() image_pair(index: int) sample_batch(batch_index: int) to_ndarray() Attributes ---------- data_type: Can be either `"test"` or `"train"`. path: Path for MNIST data. data_size: Size of the dataset. Default value `None` means using all data in MNIST. batch_size: Size of the mini-batch. Default value `None` means using the whole dataset as a mini-batch. binarize: Whether to binarize the images (using 0 and 1 values). Default value is True. reshape: Whether to reshape the images into 2D arrays. Default value is False. one_hot: whether to use one-hot encoding for labels (e.g. using vector `[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]` for 0). Default value is False. """ IMAGE_SIZE = 784 LABEL_SIZE = 1 _IMAGE_SIZE_FMT = ">784B" _LABEL_SIZE_FMT = ">B" IMAGE_SHAPE = (28, 28) batch_index = 0 def __init__(self, data_type: str, path: str, data_size: int = None, batch_size: int = None, binarize=True, reshape=False, one_hot=False): self.data_type = data_type self.path = path # Options self.binarize = binarize self.reshape = reshape self.one_hot = one_hot # Data buffer # `data_size` will be updated according to the actual data image_buf, label_buf = self._read_file() # Size if data_size is None: # `len(image_buf)` may not be exactly divided by 784 self.data_size = len(image_buf) // self.IMAGE_SIZE else: self.data_size = data_size if batch_size is None: self.batch_size = self.data_size else: if batch_size <= self.data_size: self.batch_size = batch_size else: raise ValueError("batch size larger than data size") self.batch_num = self.data_size // self.batch_size # Data self._images = self._get_image(image_buf) self._labels = self._get_label(label_buf) def _read_file(self): if self.data_type == "test": image_file_name = self.path + "t10k-images-idx3-ubyte" label_file_name = self.path + "t10k-labels-idx1-ubyte" elif self.data_type == "train": image_file_name = self.path + "train-images-idx3-ubyte" label_file_name = self.path + "train-labels-idx1-ubyte" else: raise ValueError("only type \"test\" and \"train\" are available") # "rb" means reading + binary mode with open(image_file_name, "rb") as image_file: image_buf = image_file.read() with open(label_file_name, "rb") as label_file: label_buf = label_file.read() return image_buf, label_buf def _get_image(self, image_buf): """Get an image array from `image_buf`. This is the structure of the image file (training set): [offset] [type] [value] [description] 0000 32 bit integer 0x00000803(2051) magic number 0004 32 bit integer 60000 number of images 0008 32 bit integer 28 number of rows 0012 32 bit integer 28 number of columns 0016 unsigned byte ?? pixel 0017 unsigned byte ?? pixel ........ xxxx unsigned byte ?? pixel """ image_buf_len = self.data_size * self.IMAGE_SIZE + 16 image_offset = 16 image_arr = [] while image_offset < image_buf_len: temp = struct.unpack_from(self._IMAGE_SIZE_FMT, image_buf, image_offset) if self.binarize: temp = np.vectorize(lambda x: 0 if x <= 127 else 1)(temp) if self.reshape: temp = np.reshape(temp, self.IMAGE_SHAPE) image_arr.append(temp) image_offset += self.IMAGE_SIZE return image_arr def _get_label(self, label_buf): """Get an label array from `label_buf`. This is the structure of the label file (training set): [offset] [type] [value] [description] 0000 32 bit integer 0x00000801(2049) magic number (MSB first) 0004 32 bit integer 60000 number of items 0008 unsigned byte ?? label 0009 unsigned byte ?? label ........ xxxx unsigned byte ?? label """ label_buf_len = self.data_size * self.LABEL_SIZE + 8 label_offset = 8 label_arr = [] while label_offset < label_buf_len: temp = struct.unpack_from(self._LABEL_SIZE_FMT, label_buf, label_offset)[0] if self.one_hot: vec = np.zeros(10) vec[temp] = 1 label_arr.append(vec) else: label_arr.append(temp) label_offset += self.LABEL_SIZE return label_arr def next_batch(self): """Increase `batch_index` by 1, then return a mini-batch of (image, label) tuples.""" this_batch = self.batch(self.batch_index) self.batch_index = (self.batch_index + 1) % self.batch_num return this_batch def image_pair(self, index: int): """Return a (image, label) tuple at `index`.""" if index < self.data_size: return self._images[index], self._labels[index] raise IndexError("image index out of range") def batch(self, batch_index: int): """Return a mini-batch of (image, label) tuples at `batch_index`.""" if batch_index < self.batch_num: begin = batch_index * self.batch_size end = (batch_index + 1) * self.batch_size return self._images[begin:end], self._labels[begin:end] raise IndexError("batch index out of range") def to_ndarray(self): """Return the raw data tuple `(images, labels)` as `np.ndarray`. """ images = [] labels = [] for i in range(self.batch_num): image, label = self.batch(i) images.append(image) labels.append(label) return np.asarray(images), np.asarray(labels) def _test(): data = MNIST("train", MNIST_PATH, data_size=200, batch_size=8, reshape=True, one_hot=False, binarize=False) print("Meta-data:") print("\tDataset size:", data.data_size) print("\tBatch size:", data.batch_size) col_num = 4 row_num = data.batch_size // col_num + 1 _test_random_images(data, col_num, row_num) _test_random_batch(data, col_num, row_num) _test_next_batch(data, col_num, row_num) def _test_random_images(data, col_num, row_num): images = [] labels = [] for _ in range(10): index = random.randrange(data.data_size) image, label = data.image_pair(index) images.append(image) labels.append(label) _plot(images, labels, col_num=col_num, row_num=row_num) def _test_random_batch(data, col_num, row_num): index = random.randrange(data.batch_num) images, labels = data.batch(index) _plot(images, labels, col_num=col_num, row_num=row_num) def _test_next_batch(data, col_num, row_num): for _ in range(3): images, labels = data.next_batch() _plot(images, labels, col_num=col_num, row_num=row_num) def _plot(images, labels, col_num, row_num): for i, (image, label) in enumerate(zip(images, labels)): plt.subplot(row_num, col_num, i + 1) plt.imshow(image, cmap="gray") plt.axis('off') plt.title(str(label)) plt.show() def _test_numpy(): images, labels = MNIST("train", MNIST_PATH, data_size=200, batch_size=8, reshape=False, one_hot=False, binarize=False).to_ndarray() print(images.shape) # shape = (num_batches, batch_size, num_visible) print(np.moveaxis(images, 0, -1).shape) # shape = (batch_size, num_visible, num_batches) print(labels.shape) # shape = (num_batches, batch_size) if __name__ == "__main__": import random import matplotlib.pyplot as plt # Local MNIST data MNIST_PATH = "../../machine-learning/data/mnist/" _test() _test_numpy()
[ "matplotlib.pyplot.imshow", "numpy.reshape", "random.randrange", "struct.unpack_from", "numpy.asarray", "numpy.zeros", "numpy.vectorize", "numpy.moveaxis", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ]
[((7734, 7766), 'random.randrange', 'random.randrange', (['data.batch_num'], {}), '(data.batch_num)\n', (7750, 7766), False, 'import random\n'), ((8294, 8304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8302, 8304), True, 'import matplotlib.pyplot as plt\n'), ((7475, 7507), 'random.randrange', 'random.randrange', (['data.data_size'], {}), '(data.data_size)\n', (7491, 7507), False, 'import random\n'), ((8160, 8196), 'matplotlib.pyplot.subplot', 'plt.subplot', (['row_num', 'col_num', '(i + 1)'], {}), '(row_num, col_num, i + 1)\n', (8171, 8196), True, 'import matplotlib.pyplot as plt\n'), ((8205, 8235), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (8215, 8235), True, 'import matplotlib.pyplot as plt\n'), ((8244, 8259), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8252, 8259), True, 'import matplotlib.pyplot as plt\n'), ((4190, 4255), 'struct.unpack_from', 'struct.unpack_from', (['self._IMAGE_SIZE_FMT', 'image_buf', 'image_offset'], {}), '(self._IMAGE_SIZE_FMT, image_buf, image_offset)\n', (4208, 4255), False, 'import struct\n'), ((6836, 6854), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (6846, 6854), True, 'import numpy as np\n'), ((6856, 6874), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (6866, 6874), True, 'import numpy as np\n'), ((8620, 8646), 'numpy.moveaxis', 'np.moveaxis', (['images', '(0)', '(-1)'], {}), '(images, 0, -1)\n', (8631, 8646), True, 'import numpy as np\n'), ((4412, 4446), 'numpy.reshape', 'np.reshape', (['temp', 'self.IMAGE_SHAPE'], {}), '(temp, self.IMAGE_SHAPE)\n', (4422, 4446), True, 'import numpy as np\n'), ((5305, 5370), 'struct.unpack_from', 'struct.unpack_from', (['self._LABEL_SIZE_FMT', 'label_buf', 'label_offset'], {}), '(self._LABEL_SIZE_FMT, label_buf, label_offset)\n', (5323, 5370), False, 'import struct\n'), ((5425, 5437), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (5433, 5437), True, 'import numpy as np\n'), ((4309, 4353), 'numpy.vectorize', 'np.vectorize', (['(lambda x: 0 if x <= 127 else 1)'], {}), '(lambda x: 0 if x <= 127 else 1)\n', (4321, 4353), True, 'import numpy as np\n')]
import threading, queue, time, os, pickle # from queue import Queue import numpy as np import tensorflow as tf import sarnet_td3.common.tf_util as U from tensorflow.python.keras.backend import set_session lock = threading.Lock() class MultiTrainTD3(threading.Thread): def __init__(self, input_queue, output_queue, args=(), kwargs=None): threading.Thread.__init__(self, args=(), kwargs=None) self.input_queue = input_queue self.output_queue = output_queue self.daemon = True self.trainers = args[0] self.args = args[1] self.buffer_op = args[2] self.num_env = args[3] self.sess = args[4] self.num_agents = args[5] self.num_adversaries = args[6] self.ep_rewards = [[0.0] for _ in range(self.num_env)] self.ep_end_rewards = [[0.0] for _ in range(self.num_env)] self.ep_success = [[0.0] for _ in range(self.num_env)] self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)] self.agent_info = [[[[]] for i in range(self.num_agents)] for _ in range(self.num_env)] # self.agent_info = [[[[]]] for _ in range(self.num_env)] self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve self.final_ep_end_rewards = [] self.final_ep_ag_rewards = [] # agent rewards for training curve self.save_rate = self.args.max_episode_len * 100 self.save_n_ep = self.num_env * 10 self.print_step = -int(self.save_n_ep / self.num_env) self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units)) self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units)) self.time_prev = time.time() def run(self): # print(threading.currentThread().getName(), self.receive_messages) with self.sess.as_default(): # Freeze graph to avoid memory leaks # self.sess.graph.finalize() while True: try: action, p_index, data = self.input_queue.get() if action is "None": # If you send `None`, the thread will exit. return elif action is "get_action": out = self.get_action(data, p_index) self.output_queue.put(out) elif action is "get_qdebug": out = self.get_qdebug(data, p_index) self.output_queue.put(out) elif action is "get_loss": out = self.get_loss(data, p_index) self.output_queue.put(out) elif action is "write_tboard": self.write_tboard(data) elif action is "add_to_buffer": self.buffer_op.collect_exp(data) elif action is "save_rew_info": self.save_rew_info(data) elif action is "save_benchmark": out = self.save_benchmark(data) self.output_queue.put(out) elif action is "reset_rew_info": self.reset_rew_info() elif action is "save_model_rew": if not (self.args.benchmark or self.args.display): self.save_model(data) self.plot_rewards(data) except queue.Empty: continue def get_action(self, data, p_index): with lock: agent = self.trainers[p_index] obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t, is_train = data obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim] obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim] p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t[p_index], is_train) # print(np.shape(obs_n_t)) act_j_t, state_j_t1, mem_j_t1, attn_j_t = agent.action(p_input_j, is_train) if self.args.encoder_model == "LSTM" or self.args.encoder_model != "DDPG": c_j_t1, h_j_t1 = state_j_t1 else: h_j_t1 = state_j_t1 c_j_t1 = state_j_t1 if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}: mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units)) return act_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t def get_qdebug(self, data, p_index): with lock: # with sess.as_default(): agent = self.trainers[p_index] obs_n_t, action_n_t, q1_h_n_t, q2_h_n_t = data obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim] obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim] q1_j_input = agent.prep_q_input(obs_n_t, action_n_t, q1_h_n_t[p_index]) _, q1_h_j_t1 = agent.q1_debug['q_values'](*(q1_j_input)) if self.args.td3: q2_input = agent.prep_q_input(obs_n_t, action_n_t, q2_h_n_t[p_index]) _, q2_h_j_t1 = agent.q2_debug['q_values'](*(q2_input)) else: q2_h_j_t1 = [] return q1_h_j_t1, q2_h_j_t1 def get_loss(self, data, p_index): with lock: # with sess.as_default(): agent = self.trainers[p_index] train_step = data loss = agent.update(self.trainers, self.buffer_op, train_step) return loss def write_tboard(self, data): with lock: loss, train_step, writer, summary_ops, summary_vars, num_agents = data # Tensorboard episode_b_rewards = [] for j in range(self.num_env): if self.args.env_type == "mpe": episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:])) else: episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:])) episode_b_rewards = np.mean(np.array(episode_b_rewards)) num_steps = train_step * self.num_env # Add to tensorboard only when actor agent is updated if loss[0][1] is not None: fd = {} for i, key in enumerate(summary_vars): if i == 0: fd[key] = episode_b_rewards else: agnt_idx = int((i - 1) / 5) if agnt_idx == num_agents: agnt_idx -= 1 if loss[agnt_idx] is not None: fd[key] = loss[agnt_idx][int((i - 1) % 5)] summary_str = U.get_session().run(summary_ops, feed_dict=fd) writer.add_summary(summary_str, num_steps) writer.flush() def save_rew_info(self, data): with lock: rew_n, info_n, ep_step = data # rew_n (num_env, num_agents) if self.args.env_type == "mpe": for j in range(self.num_env): for i, rew in enumerate(rew_n[j]): if ep_step >= self.args.max_episode_len - 10: # Compute only last 10 episode step rewards self.ep_end_rewards[j][-1] += rew self.ep_rewards[j][-1] += rew self.agent_rewards[j][i][-1] += rew elif self.args.env_type == "ic3net": for j in range(self.num_env): self.ep_success[j][-1] += info_n[j] if self.args.benchmark and self.args.env_type == "mpe": for j in range(self.num_env): for i, info in enumerate(info_n[j]): self.agent_info[j][i][-1].append(info) def reset_rew_info(self): with lock: for j in range(self.num_env): self.ep_rewards[j].append(0) self.ep_success[j].append(0) self.ep_end_rewards[j].append(0) for i in range(self.num_agents): self.agent_rewards[j][i].append(0) if self.args.benchmark: for j in range(self.num_env): for i in range(self.num_agents): self.agent_info[j][i].append([[]]) def save_benchmark(self, data): with lock: exp_name, exp_itr = data benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir) if not os.path.exists(benchmark_dir): os.mkdir(benchmark_dir) file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl' print('Finished benchmarking, now saving...') # pickle_info = [self.agent_info[j] for j in range(self.num_env)] with open(file_name, 'wb') as fp: # Dump files as [num_env, [# agents, [#ep, [#stps, [dim]]]] pickle.dump(self.agent_info, fp) return "bench_saved" def save_model(self, data): with lock: # train_step = t_step * num_env train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data # Policy File if num_episodes % (self.save_n_ep) == 0: save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step) U.save_state(save_dir, self.sess, saver=saver) # episode_rewards, agent_rewards, final_ep_rewards, final_ep_ag_rewards = rewards if self.args.env_type == "mpe": # print statement depends on whether or not there are adversaries if self.num_adversaries == 0: episode_b_rewards = [] ep_end_b_rewards = [] ep_ag_b_rewards = [] for j in range(self.num_env): episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:])) ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:])) episode_b_rewards = np.mean(np.array(episode_b_rewards)) ep_end_b_rewards = np.mean(ep_end_b_rewards) / 10. for i in range(self.num_agents): temp_ag_reward = [] for j in range(self.num_env): temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:])) ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward))) print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format( train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3))) with open(data_file, "a+") as f: f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format( train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)) + "\n") else: episode_b_rewards = [] ep_end_b_rewards = [] ep_ag_b_rewards = [] for j in range(self.num_env): episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:])) ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:])) episode_b_rewards = np.mean(np.array(episode_b_rewards)) ep_end_b_rewards = np.mean(ep_end_b_rewards) for i in range(self.num_agents): temp_ag_reward = [] for j in range(self.num_env): temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:])) ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward))) print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format( train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards], round(time.time() - self.time_prev, 3)) + "\n") with open(data_file, "a+") as f: f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format( train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards], round(time.time() - self.time_prev, 3)) + "\n") # Keep track of final episode reward self.final_ep_rewards.append(episode_b_rewards) self.final_ep_end_rewards.append(ep_end_b_rewards) for rew in ep_ag_b_rewards: self.final_ep_ag_rewards.append(rew) self.time_prev = time.time() def plot_rewards(self, data): with lock: train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir) if not os.path.exists(plot_dir): os.mkdir(plot_dir) rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl' with open(rew_file_name, 'wb') as fp: pickle.dump(self.final_ep_rewards, fp) rew_ep_end_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards_ep_end.pkl' with open(rew_ep_end_file_name, 'wb') as fp: pickle.dump(self.final_ep_end_rewards, fp) agrew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_agrewards.pkl' with open(agrew_file_name, 'wb') as fp: pickle.dump(self.final_ep_ag_rewards, fp) """ REINFORCE Threads """ class MultiTrainVPG(threading.Thread): def __init__(self, input_queue, output_queue, args=(), kwargs=None): threading.Thread.__init__(self, args=(), kwargs=None) self.input_queue = input_queue self.output_queue = output_queue self.daemon = True self.trainers = args[0] self.args = args[1] self.buffer_op = args[2] self.num_env = args[3] self.sess = args[4] self.num_agents = args[5] self.num_adversaries = args[6] self.ep_rewards = [[0.0] for _ in range(self.num_env)] self.ep_success = [[0.0] for _ in range(self.num_env)] self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)] self.agent_info = [[[[]]] for _ in range(self.num_env)] self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve self.final_ep_ag_rewards = [] # agent rewards for training curve self.save_rate = self.args.max_episode_len * 100 if self.args.env_type == "mpe": self.print_step = -int(self.save_rate / self.num_env) else: # print for episode end only (success rate) self.print_step = -int(self.save_rate / (self.num_env * self.args.max_episode_len)) self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units)) self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units)) self.time_prev = time.time() def run(self): # print(threading.currentThread().getName(), self.receive_messages) with self.sess.as_default(): # Freeze graph to avoid memory leaks # self.sess.graph.finalize() while True: try: action, p_index, data = self.input_queue.get() if action is "None": # If you send `None`, the thread will exit. return elif action is "get_action": out = self.get_action(data, p_index) self.output_queue.put(out) elif action is "get_loss": out = self.get_loss(data, p_index) self.output_queue.put(out) elif action is "write_tboard": self.write_tboard(data) elif action is "add_to_buffer": self.buffer_op.collect_exp(data) elif action is "add_to_buffer_reinforce": self.buffer_op.collect_exp(data) elif action is "save_rew_info": self.save_rew_info(data) elif action is "save_benchmark": out = self.save_benchmark(data) self.output_queue.put(out) elif action is "reset_rew_info": self.reset_rew_info() elif action is "save_model_rew": if not (self.args.benchmark or self.args.display): self.save_model(data) self.plot_rewards(data) except queue.Empty: continue def get_action(self, data, p_index): with lock: agent = self.trainers[p_index] obs_n_t, h_n_t, c_n_t, mem_n_t, is_train = data obs_n_t = np.stack(obs_n_t, axis=-2) obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim] p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, is_train) act_j_t, act_soft_j_t, state_j_t1, mem_j_t1, attn_j_t, value_j_t = agent.action(p_input_j, is_train) if self.args.encoder_model == "LSTM": c_j_t1, h_j_t1 = state_j_t1 else: h_j_t1 = state_j_t1 c_j_t1 = state_j_t1 if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}: mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units)) return act_j_t, act_soft_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t, value_j_t def get_loss(self, data, p_index): with lock: # with sess.as_default(): train_step, buffer_data = data agent = self.trainers[p_index] loss = agent.update(self.trainers, buffer_data, train_step) return loss def write_tboard(self, data): with lock: loss, train_step, writer, summary_ops, summary_vars, num_agents = data # Tensorboard episode_b_rewards = [] for j in range(self.num_env): if self.args.env_type == "mpe": episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:])) else: episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:])) episode_b_rewards = np.mean(np.array(episode_b_rewards)) num_steps = train_step * self.num_env # Add to tensorboard only when actor agent is updated if loss[0][1] is not None: fd = {} for i, key in enumerate(summary_vars): if i == 0: fd[key] = episode_b_rewards else: agnt_idx = int((i - 1) / 5) if agnt_idx == num_agents: agnt_idx -= 1 if loss[agnt_idx] is not None: fd[key] = loss[agnt_idx][int((i - 1) % 5)] summary_str = U.get_session().run(summary_ops, feed_dict=fd) writer.add_summary(summary_str, num_steps) writer.flush() def save_rew_info(self, data): with lock: rew_n, info_n, terminal = data if self.args.env_type == "mpe": for j in range(self.num_env): for i, rew in enumerate(rew_n[j]): self.ep_rewards[j][-1] += rew self.agent_rewards[j][i][-1] += rew elif self.args.env_type == "ic3net": for j in range(self.num_env): self.ep_success[j][-1] += info_n[j] if self.args.benchmark and self.args.env_type == "mpe": for j in range(self.num_env): for i, info in enumerate(info_n[j]): self.agent_info[-1][i].append(info_n[0]['n']) def reset_rew_info(self): with lock: for j in range(self.num_env): self.ep_rewards[j].append(0) self.ep_success[j].append(0) for i in range(self.num_agents): self.agent_rewards[j][i].append(0) if self.args.benchmark: for j in range(self.num_env): self.agent_info[j].append([[]]) def save_benchmark(self, data): with lock: exp_name, exp_itr = data benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir) if not os.path.exists(benchmark_dir): os.mkdir(benchmark_dir) file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl' print('Finished benchmarking, now saving...') with open(file_name, 'wb') as fp: pickle.dump(self.ep_success, fp) return "bench_saved" def save_model(self, data): with lock: # train_step = t_step * num_env train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data # Policy File save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step) U.save_state(save_dir, self.sess, saver=saver) episode_b_success = [] for j in range(self.num_env): episode_b_success.append(np.mean(self.ep_success[j][self.print_step:])) episode_b_success = np.mean(np.array(episode_b_success)) / self.args.max_episode_len print("steps: {}, episodes: {}, mean episode success: {}, time: {}".format( train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n") with open(data_file, "a+") as f: f.write("\n" + "steps: {}, episodes: {}, mean episode success: {}, time: {}".format( train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n") self.final_ep_rewards.append(episode_b_success) def plot_rewards(self, data): with lock: train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir) if not os.path.exists(plot_dir): os.mkdir(plot_dir) rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl' with open(rew_file_name, 'wb') as fp: pickle.dump(self.final_ep_rewards, fp) def get_gputhreads(trainers, args, buffer_op, num_env, num_agents, num_adv): threads = [] sess = tf.compat.v1.get_default_session() for t in range(args.num_gpu_threads): input_q = queue.Queue() output_q = queue.Queue() if args.policy_grad == "maddpg": threads.append(MultiTrainTD3(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv))) elif args.policy_grad == "reinforce": threads.append( MultiTrainVPG(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv))) threads[t].start() time.sleep(1) return threads def close_gputhreads(threads): for t in threads: t.input_queue.put(("None", None, None)) for t in threads: t.join() print('GPU trainers cancelled') return
[ "threading.Thread.__init__", "os.path.exists", "numpy.mean", "pickle.dump", "sarnet_td3.common.tf_util.save_state", "threading.Lock", "tensorflow.compat.v1.get_default_session", "os.path.join", "time.sleep", "numpy.stack", "numpy.zeros", "numpy.array", "os.mkdir", "numpy.expand_dims", "queue.Queue", "time.time", "sarnet_td3.common.tf_util.get_session" ]
[((212, 228), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (226, 228), False, 'import threading, queue, time, os, pickle\n'), ((23871, 23905), 'tensorflow.compat.v1.get_default_session', 'tf.compat.v1.get_default_session', ([], {}), '()\n', (23903, 23905), True, 'import tensorflow as tf\n'), ((351, 404), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {'args': '()', 'kwargs': 'None'}), '(self, args=(), kwargs=None)\n', (376, 404), False, 'import threading, queue, time, os, pickle\n'), ((1582, 1636), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.critic_units)'}), '(shape=(self.num_env, self.args.critic_units))\n', (1590, 1636), True, 'import numpy as np\n'), ((1661, 1714), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.value_units)'}), '(shape=(self.num_env, self.args.value_units))\n', (1669, 1714), True, 'import numpy as np\n'), ((1740, 1751), 'time.time', 'time.time', ([], {}), '()\n', (1749, 1751), False, 'import threading, queue, time, os, pickle\n'), ((14661, 14714), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {'args': '()', 'kwargs': 'None'}), '(self, args=(), kwargs=None)\n', (14686, 14714), False, 'import threading, queue, time, os, pickle\n'), ((15844, 15898), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.critic_units)'}), '(shape=(self.num_env, self.args.critic_units))\n', (15852, 15898), True, 'import numpy as np\n'), ((15923, 15976), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.value_units)'}), '(shape=(self.num_env, self.args.value_units))\n', (15931, 15976), True, 'import numpy as np\n'), ((16003, 16014), 'time.time', 'time.time', ([], {}), '()\n', (16012, 16014), False, 'import threading, queue, time, os, pickle\n'), ((23966, 23979), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (23977, 23979), False, 'import threading, queue, time, os, pickle\n'), ((23999, 24012), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (24010, 24012), False, 'import threading, queue, time, os, pickle\n'), ((24414, 24427), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (24424, 24427), False, 'import threading, queue, time, os, pickle\n'), ((3740, 3766), 'numpy.stack', 'np.stack', (['obs_n_t'], {'axis': '(-2)'}), '(obs_n_t, axis=-2)\n', (3748, 3766), True, 'import numpy as np\n'), ((3825, 3856), 'numpy.expand_dims', 'np.expand_dims', (['obs_n_t'], {'axis': '(1)'}), '(obs_n_t, axis=1)\n', (3839, 3856), True, 'import numpy as np\n'), ((4779, 4805), 'numpy.stack', 'np.stack', (['obs_n_t'], {'axis': '(-2)'}), '(obs_n_t, axis=-2)\n', (4787, 4805), True, 'import numpy as np\n'), ((4864, 4895), 'numpy.expand_dims', 'np.expand_dims', (['obs_n_t'], {'axis': '(1)'}), '(obs_n_t, axis=1)\n', (4878, 4895), True, 'import numpy as np\n'), ((8566, 8636), 'os.path.join', 'os.path.join', (['"""./exp_data"""', 'exp_name', 'exp_itr', 'self.args.benchmark_dir'], {}), "('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)\n", (8578, 8636), False, 'import threading, queue, time, os, pickle\n'), ((13622, 13688), 'os.path.join', 'os.path.join', (['"""./exp_data"""', 'exp_name', 'exp_itr', 'self.args.plots_dir'], {}), "('./exp_data', exp_name, exp_itr, self.args.plots_dir)\n", (13634, 13688), False, 'import threading, queue, time, os, pickle\n'), ((17953, 17979), 'numpy.stack', 'np.stack', (['obs_n_t'], {'axis': '(-2)'}), '(obs_n_t, axis=-2)\n', (17961, 17979), True, 'import numpy as np\n'), ((18002, 18033), 'numpy.expand_dims', 'np.expand_dims', (['obs_n_t'], {'axis': '(1)'}), '(obs_n_t, axis=1)\n', (18016, 18033), True, 'import numpy as np\n'), ((21579, 21649), 'os.path.join', 'os.path.join', (['"""./exp_data"""', 'exp_name', 'exp_itr', 'self.args.benchmark_dir'], {}), "('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)\n", (21591, 21649), False, 'import threading, queue, time, os, pickle\n'), ((22385, 22431), 'sarnet_td3.common.tf_util.save_state', 'U.save_state', (['save_dir', 'self.sess'], {'saver': 'saver'}), '(save_dir, self.sess, saver=saver)\n', (22397, 22431), True, 'import sarnet_td3.common.tf_util as U\n'), ((23381, 23447), 'os.path.join', 'os.path.join', (['"""./exp_data"""', 'exp_name', 'exp_itr', 'self.args.plots_dir'], {}), "('./exp_data', exp_name, exp_itr, self.args.plots_dir)\n", (23393, 23447), False, 'import threading, queue, time, os, pickle\n'), ((4438, 4491), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.value_units)'}), '(shape=(self.num_env, self.args.value_units))\n', (4446, 4491), True, 'import numpy as np\n'), ((6170, 6197), 'numpy.array', 'np.array', (['episode_b_rewards'], {}), '(episode_b_rewards)\n', (6178, 6197), True, 'import numpy as np\n'), ((8656, 8685), 'os.path.exists', 'os.path.exists', (['benchmark_dir'], {}), '(benchmark_dir)\n', (8670, 8685), False, 'import threading, queue, time, os, pickle\n'), ((8703, 8726), 'os.mkdir', 'os.mkdir', (['benchmark_dir'], {}), '(benchmark_dir)\n', (8711, 8726), False, 'import threading, queue, time, os, pickle\n'), ((9124, 9156), 'pickle.dump', 'pickle.dump', (['self.agent_info', 'fp'], {}), '(self.agent_info, fp)\n', (9135, 9156), False, 'import threading, queue, time, os, pickle\n'), ((9587, 9633), 'sarnet_td3.common.tf_util.save_state', 'U.save_state', (['save_dir', 'self.sess'], {'saver': 'saver'}), '(save_dir, self.sess, saver=saver)\n', (9599, 9633), True, 'import sarnet_td3.common.tf_util as U\n'), ((13443, 13454), 'time.time', 'time.time', ([], {}), '()\n', (13452, 13454), False, 'import threading, queue, time, os, pickle\n'), ((13708, 13732), 'os.path.exists', 'os.path.exists', (['plot_dir'], {}), '(plot_dir)\n', (13722, 13732), False, 'import threading, queue, time, os, pickle\n'), ((13750, 13768), 'os.mkdir', 'os.mkdir', (['plot_dir'], {}), '(plot_dir)\n', (13758, 13768), False, 'import threading, queue, time, os, pickle\n'), ((13966, 14004), 'pickle.dump', 'pickle.dump', (['self.final_ep_rewards', 'fp'], {}), '(self.final_ep_rewards, fp)\n', (13977, 14004), False, 'import threading, queue, time, os, pickle\n'), ((14223, 14265), 'pickle.dump', 'pickle.dump', (['self.final_ep_end_rewards', 'fp'], {}), '(self.final_ep_end_rewards, fp)\n', (14234, 14265), False, 'import threading, queue, time, os, pickle\n'), ((14469, 14510), 'pickle.dump', 'pickle.dump', (['self.final_ep_ag_rewards', 'fp'], {}), '(self.final_ep_ag_rewards, fp)\n', (14480, 14510), False, 'import threading, queue, time, os, pickle\n'), ((18547, 18600), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_env, self.args.value_units)'}), '(shape=(self.num_env, self.args.value_units))\n', (18555, 18600), True, 'import numpy as np\n'), ((19504, 19531), 'numpy.array', 'np.array', (['episode_b_rewards'], {}), '(episode_b_rewards)\n', (19512, 19531), True, 'import numpy as np\n'), ((21669, 21698), 'os.path.exists', 'os.path.exists', (['benchmark_dir'], {}), '(benchmark_dir)\n', (21683, 21698), False, 'import threading, queue, time, os, pickle\n'), ((21716, 21739), 'os.mkdir', 'os.mkdir', (['benchmark_dir'], {}), '(benchmark_dir)\n', (21724, 21739), False, 'import threading, queue, time, os, pickle\n'), ((21983, 22015), 'pickle.dump', 'pickle.dump', (['self.ep_success', 'fp'], {}), '(self.ep_success, fp)\n', (21994, 22015), False, 'import threading, queue, time, os, pickle\n'), ((23467, 23491), 'os.path.exists', 'os.path.exists', (['plot_dir'], {}), '(plot_dir)\n', (23481, 23491), False, 'import threading, queue, time, os, pickle\n'), ((23509, 23527), 'os.mkdir', 'os.mkdir', (['plot_dir'], {}), '(plot_dir)\n', (23517, 23527), False, 'import threading, queue, time, os, pickle\n'), ((23725, 23763), 'pickle.dump', 'pickle.dump', (['self.final_ep_rewards', 'fp'], {}), '(self.final_ep_rewards, fp)\n', (23736, 23763), False, 'import threading, queue, time, os, pickle\n'), ((22550, 22595), 'numpy.mean', 'np.mean', (['self.ep_success[j][self.print_step:]'], {}), '(self.ep_success[j][self.print_step:])\n', (22557, 22595), True, 'import numpy as np\n'), ((22637, 22664), 'numpy.array', 'np.array', (['episode_b_success'], {}), '(episode_b_success)\n', (22645, 22664), True, 'import numpy as np\n'), ((5969, 6014), 'numpy.mean', 'np.mean', (['self.ep_rewards[j][self.print_step:]'], {}), '(self.ep_rewards[j][self.print_step:])\n', (5976, 6014), True, 'import numpy as np\n'), ((6083, 6128), 'numpy.mean', 'np.mean', (['self.ep_success[j][self.print_step:]'], {}), '(self.ep_success[j][self.print_step:])\n', (6090, 6128), True, 'import numpy as np\n'), ((6817, 6832), 'sarnet_td3.common.tf_util.get_session', 'U.get_session', ([], {}), '()\n', (6830, 6832), True, 'import sarnet_td3.common.tf_util as U\n'), ((11951, 11976), 'numpy.mean', 'np.mean', (['ep_end_b_rewards'], {}), '(ep_end_b_rewards)\n', (11958, 11976), True, 'import numpy as np\n'), ((19303, 19348), 'numpy.mean', 'np.mean', (['self.ep_rewards[j][self.print_step:]'], {}), '(self.ep_rewards[j][self.print_step:])\n', (19310, 19348), True, 'import numpy as np\n'), ((19417, 19462), 'numpy.mean', 'np.mean', (['self.ep_success[j][self.print_step:]'], {}), '(self.ep_success[j][self.print_step:])\n', (19424, 19462), True, 'import numpy as np\n'), ((20151, 20166), 'sarnet_td3.common.tf_util.get_session', 'U.get_session', ([], {}), '()\n', (20164, 20166), True, 'import sarnet_td3.common.tf_util as U\n'), ((10363, 10390), 'numpy.array', 'np.array', (['episode_b_rewards'], {}), '(episode_b_rewards)\n', (10371, 10390), True, 'import numpy as np\n'), ((10435, 10460), 'numpy.mean', 'np.mean', (['ep_end_b_rewards'], {}), '(ep_end_b_rewards)\n', (10442, 10460), True, 'import numpy as np\n'), ((11879, 11906), 'numpy.array', 'np.array', (['episode_b_rewards'], {}), '(episode_b_rewards)\n', (11887, 11906), True, 'import numpy as np\n'), ((10161, 10206), 'numpy.mean', 'np.mean', (['self.ep_rewards[j][self.print_step:]'], {}), '(self.ep_rewards[j][self.print_step:])\n', (10168, 10206), True, 'import numpy as np\n'), ((10260, 10309), 'numpy.mean', 'np.mean', (['self.ep_end_rewards[j][self.print_step:]'], {}), '(self.ep_end_rewards[j][self.print_step:])\n', (10267, 10309), True, 'import numpy as np\n'), ((11677, 11722), 'numpy.mean', 'np.mean', (['self.ep_rewards[j][self.print_step:]'], {}), '(self.ep_rewards[j][self.print_step:])\n', (11684, 11722), True, 'import numpy as np\n'), ((11776, 11825), 'numpy.mean', 'np.mean', (['self.ep_end_rewards[j][self.print_step:]'], {}), '(self.ep_end_rewards[j][self.print_step:])\n', (11783, 11825), True, 'import numpy as np\n'), ((22849, 22860), 'time.time', 'time.time', ([], {}), '()\n', (22858, 22860), False, 'import threading, queue, time, os, pickle\n'), ((10684, 10735), 'numpy.mean', 'np.mean', (['self.agent_rewards[j][i][self.print_step:]'], {}), '(self.agent_rewards[j][i][self.print_step:])\n', (10691, 10735), True, 'import numpy as np\n'), ((10796, 10820), 'numpy.array', 'np.array', (['temp_ag_reward'], {}), '(temp_ag_reward)\n', (10804, 10820), True, 'import numpy as np\n'), ((12194, 12245), 'numpy.mean', 'np.mean', (['self.agent_rewards[j][i][self.print_step:]'], {}), '(self.agent_rewards[j][i][self.print_step:])\n', (12201, 12245), True, 'import numpy as np\n'), ((12306, 12330), 'numpy.array', 'np.array', (['temp_ag_reward'], {}), '(temp_ag_reward)\n', (12314, 12330), True, 'import numpy as np\n'), ((11041, 11052), 'time.time', 'time.time', ([], {}), '()\n', (11050, 11052), False, 'import threading, queue, time, os, pickle\n'), ((23108, 23119), 'time.time', 'time.time', ([], {}), '()\n', (23117, 23119), False, 'import threading, queue, time, os, pickle\n'), ((12640, 12651), 'time.time', 'time.time', ([], {}), '()\n', (12649, 12651), False, 'import threading, queue, time, os, pickle\n'), ((11364, 11375), 'time.time', 'time.time', ([], {}), '()\n', (11373, 11375), False, 'import threading, queue, time, os, pickle\n'), ((13058, 13069), 'time.time', 'time.time', ([], {}), '()\n', (13067, 13069), False, 'import threading, queue, time, os, pickle\n')]
from flask import Flask, request, render_template from sklearn.externals import joblib from feature import * pipeline = joblib.load('pipeline.sav') app = Flask(__name__) @app.route('/') def home(): return render_template('index.html') @app.route('/api',methods=['POST']) def get_delay(): result=request.form query_title = result['title'] query_author = result['author'] query_text = result['maintext'] print(query_text) query = get_all_query(query_title, query_author, query_text) ##user_input = {'query':query} pred = pipeline.predict(query) print(pred) dic = {1:'real',0:'fake'} return f'<html><body><h1>{dic[pred[0]]}</h1> <form action="/"> <button type="submit">back </button> </form></body></html>' if __name__ == '__main__': app.run(port=8080, debug=True)
[ "flask.render_template", "sklearn.externals.joblib.load", "flask.Flask" ]
[((122, 149), 'sklearn.externals.joblib.load', 'joblib.load', (['"""pipeline.sav"""'], {}), "('pipeline.sav')\n", (133, 149), False, 'from sklearn.externals import joblib\n'), ((157, 172), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (162, 172), False, 'from flask import Flask, request, render_template\n'), ((213, 242), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (228, 242), False, 'from flask import Flask, request, render_template\n')]
from plaid import Client from backend.link_token import LinkToken from general_falcon_webserver import WebApp client = Client(client_id='5e2e3527dd6924001167e8e8', secret='<KEY>', environment='sandbox') app = WebApp() app.add_route('link', LinkToken(client)) app.launch_webserver()
[ "plaid.Client", "general_falcon_webserver.WebApp", "backend.link_token.LinkToken" ]
[((121, 209), 'plaid.Client', 'Client', ([], {'client_id': '"""5e2e3527dd6924001167e8e8"""', 'secret': '"""<KEY>"""', 'environment': '"""sandbox"""'}), "(client_id='5e2e3527dd6924001167e8e8', secret='<KEY>', environment=\n 'sandbox')\n", (127, 209), False, 'from plaid import Client\n'), ((212, 220), 'general_falcon_webserver.WebApp', 'WebApp', ([], {}), '()\n', (218, 220), False, 'from general_falcon_webserver import WebApp\n'), ((244, 261), 'backend.link_token.LinkToken', 'LinkToken', (['client'], {}), '(client)\n', (253, 261), False, 'from backend.link_token import LinkToken\n')]
# # Copyright 2018, 2020 <NAME> # 2019-2020 <NAME> # 2015-2016 <NAME> # # ### MIT license # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # """ Tests to understand the difficulties in extracting hurst from noisy data """ import numpy as np import scipy import matplotlib.pyplot as plt import PyCo.Tools as Tools import SurfaceTopography as Surf def plot_naive(surface, lam_max): fig = plt.figure() ax=fig.add_subplot(111) ax.set_yscale('log') ax.set_xscale('log') surf = Tools.CharacterisePeriodicSurface(surface) q = surf.q C = surf.C H, alpha = surf.estimate_hurst_naive(lambda_max=lam_max, full_output=True) print("H = {}, alpha = {}".format(H, alpha)) ax.loglog(q, C, alpha=.1) mean, err, q_g = surf.grouped_stats(100) mask = np.isfinite(mean) mean = mean[mask] err = err[:, mask] q_g = q_g[mask] ax.errorbar(q_g, mean, yerr=err) ax.set_title("Naive: H={:.2f}, h_rms={:.2e}".format(H, np.sqrt((surface.heights() ** 2).mean()))) a, b = np.polyfit(np.log(q), np.log(C), 1) ax.plot(q, q**(-2-2*H)*alpha, label="{}, H={:.2f}".format('fit', H)) ax.legend(loc='best') def plot_grad_C0(surface, H_in, lam_max): surf = Tools.CharacterisePeriodicSurface(surface) q_min = 2*np.pi/lam_max sl = surf.q > q_min q = surf.q[sl] C = surf.C[sl] dim = 2 def C0_of_H(H): return ((q**(-3-2*H)).sum() / (q**(-5-4*H)/C).sum()) def objective(H, C0): return ((1 - C0*q**(-2*H-2)/C)**2 / q**(dim-1)).sum() C0 = C0_of_H(H_in) O0 = objective(H_in, C0) c_s = np.linspace(0, 2*C0, 51) o_s = np.zeros_like(c_s) for i, c in enumerate(c_s): o_s[i] = objective(H_in, c) fig = plt.figure() ax=fig.add_subplot(111) fig.suptitle('grad(C0)') ax.plot(c_s, o_s, marker= '+') ax.scatter(C0, O0, marker='x', label = 'root', c='r') ax.grid(True) print("C0 = {}, obj0 = {}".format(C0, O0)) return C0 def plot_grad_H(surface, lam_max): surf = Tools.CharacterisePeriodicSurface(surface) q_min = 2*np.pi/lam_max sl = surf.q > q_min q = surf.q[sl]# np.array(surf.q[sl][0], surf.q[sl][-1]) C = surf.C[sl]# np.array(surf.C[sl][0], surf.C[sl][-1]) dim = 2 def C0_of_H(H): return ((C**2/q**(-5-dim-4*H)).sum() / (C/q**(-3-dim-2*H)).sum()) def grad_h(H, C0): return (4*C0/C*np.log(q)*q**(-1-2*H-dim)*(1 - C0*q**(-2-2*H)/C)).sum() def objective(H, C0): return ((c/q**(-2*H-2) - C0)**2 / q**(dim-1)).sum() def full_obj(H): C0 = C0_of_H(H) return ((1 - C0/C*q**(-2*H-2))**2 / q**(dim-1)).sum() h_s = np.linspace(.0, 2., 51) o_s = np.zeros_like(h_s) g_s = np.zeros_like(h_s) for i, h in enumerate(h_s): c = C0_of_H(h) o_s[i] = objective(h, c) g_s[i] = grad_h(h, c) H_opt, obj_opt, err, nfeq = scipy.optimize.fminbound(full_obj, 0, 2, full_output=True) if err != 0: raise Exception() fig = plt.figure() ax=fig.add_subplot(211) ax.set_xlim(h_s[0], h_s[-1]) fig.suptitle('grad(H)') ax.plot(h_s, o_s, marker= '+') ax.grid(True) ax.scatter(H_opt, obj_opt, marker='x', label = 'root', c='r') ax=fig.add_subplot(212) ax.set_xlim(h_s[0], h_s[-1]) ax.plot(h_s, g_s, marker= '+') grad_opt = grad_h(H_opt, C0_of_H(H_opt)) ax.scatter(H_opt, grad_opt, marker='x', label = 'root', c='r') #res = scipy.optimize.fmin #print("H_out = {}, obj0 = {}".format(C0, O0)) ax.grid(True) return H_opt, C0_of_H(H_opt) def compare_to_PyPy(surface, lam_max, H_ref, C0_ref): fig = plt.figure() ax=fig.add_subplot(111) ax.set_yscale('log') ax.set_xscale('log') surf = Tools.CharacterisePeriodicSurface(surface) q_min = 2*np.pi/lam_max sl = surf.q > q_min q = surf.q C = surf.C H, alpha, res = surf.estimate_hurst_alt(lambda_max=lam_max, full_output=True) print("H = {}, alpha = {}".format(H, alpha)) ax.loglog(q, C, alpha=.1) mean, err, q_g = surf.grouped_stats(100) mask = np.isfinite(mean) mean = mean[mask] err = err[:, mask] q_g = q_g[mask] ax.errorbar(q_g, mean, yerr=err) ax.set_title("New: H_pypy={:.2f}, H_ref = {:.2f}, h_rms={:.2e}".format(H, H_ref, np.sqrt((surface.heights() ** 2).mean()))) ax.plot(q[sl], q[sl]**(-2-2*H)*alpha, label="{}, H={:.4f}".format('fit', H), lw = 3) ax.plot(q[sl], q[sl]**(-2-2*H_ref)*C0_ref, label="{}, H={:.4f}".format('ref_fit', H_ref), lw = 3) ax.legend(loc='best') fig = plt.figure() ax = fig.add_subplot(111) ax.loglog(q[sl], C[sl]/(q[sl]**(-2-2*H_ref)*C0_ref), alpha=.1) ax.errorbar(q_g, mean/(q_g**(-2-2*H_ref)*C0_ref), yerr=err/(q_g**(-2-2*H_ref)*C0_ref)) def main(): siz = 2000e-9 lam_max = .2*siz size = (siz, siz) hurst = .75 h_rms = 3.24e-8 res = 128 nb_grid_pts = (res, res) seed = 2 surface = Tools.RandomSurfaceGaussian( nb_grid_pts, size, hurst, h_rms, lambda_max=lam_max, seed=seed).get_surface() plot_naive(surface, lam_max) plot_grad_C0(surface, hurst, lam_max) H, C0 = plot_grad_H(surface, lam_max) print("H_ref = {}, C0_ref = {}".format(H, C0)) compare_to_PyPy(surface, lam_max, H, C0) if __name__ == "__main__": main() plt.show()
[ "PyCo.Tools.CharacterisePeriodicSurface", "scipy.optimize.fminbound", "numpy.log", "PyCo.Tools.RandomSurfaceGaussian", "matplotlib.pyplot.figure", "numpy.linspace", "numpy.isfinite", "numpy.zeros_like", "matplotlib.pyplot.show" ]
[((1421, 1433), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1431, 1433), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1566), 'PyCo.Tools.CharacterisePeriodicSurface', 'Tools.CharacterisePeriodicSurface', (['surface'], {}), '(surface)\n', (1557, 1566), True, 'import PyCo.Tools as Tools\n'), ((1811, 1828), 'numpy.isfinite', 'np.isfinite', (['mean'], {}), '(mean)\n', (1822, 1828), True, 'import numpy as np\n'), ((2234, 2276), 'PyCo.Tools.CharacterisePeriodicSurface', 'Tools.CharacterisePeriodicSurface', (['surface'], {}), '(surface)\n', (2267, 2276), True, 'import PyCo.Tools as Tools\n'), ((2641, 2667), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * C0)', '(51)'], {}), '(0, 2 * C0, 51)\n', (2652, 2667), True, 'import numpy as np\n'), ((2676, 2694), 'numpy.zeros_like', 'np.zeros_like', (['c_s'], {}), '(c_s)\n', (2689, 2694), True, 'import numpy as np\n'), ((2775, 2787), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2785, 2787), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3106), 'PyCo.Tools.CharacterisePeriodicSurface', 'Tools.CharacterisePeriodicSurface', (['surface'], {}), '(surface)\n', (3097, 3106), True, 'import PyCo.Tools as Tools\n'), ((3739, 3764), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0)', '(51)'], {}), '(0.0, 2.0, 51)\n', (3750, 3764), True, 'import numpy as np\n'), ((3773, 3791), 'numpy.zeros_like', 'np.zeros_like', (['h_s'], {}), '(h_s)\n', (3786, 3791), True, 'import numpy as np\n'), ((3802, 3820), 'numpy.zeros_like', 'np.zeros_like', (['h_s'], {}), '(h_s)\n', (3815, 3820), True, 'import numpy as np\n'), ((3973, 4031), 'scipy.optimize.fminbound', 'scipy.optimize.fminbound', (['full_obj', '(0)', '(2)'], {'full_output': '(True)'}), '(full_obj, 0, 2, full_output=True)\n', (3997, 4031), False, 'import scipy\n'), ((4086, 4098), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4096, 4098), True, 'import matplotlib.pyplot as plt\n'), ((4715, 4727), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4725, 4727), True, 'import matplotlib.pyplot as plt\n'), ((4818, 4860), 'PyCo.Tools.CharacterisePeriodicSurface', 'Tools.CharacterisePeriodicSurface', (['surface'], {}), '(surface)\n', (4851, 4860), True, 'import PyCo.Tools as Tools\n'), ((5160, 5177), 'numpy.isfinite', 'np.isfinite', (['mean'], {}), '(mean)\n', (5171, 5177), True, 'import numpy as np\n'), ((5642, 5654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5652, 5654), True, 'import matplotlib.pyplot as plt\n'), ((6401, 6411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6409, 6411), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2065), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (2062, 2065), True, 'import numpy as np\n'), ((2067, 2076), 'numpy.log', 'np.log', (['C'], {}), '(C)\n', (2073, 2076), True, 'import numpy as np\n'), ((6026, 6122), 'PyCo.Tools.RandomSurfaceGaussian', 'Tools.RandomSurfaceGaussian', (['nb_grid_pts', 'size', 'hurst', 'h_rms'], {'lambda_max': 'lam_max', 'seed': 'seed'}), '(nb_grid_pts, size, hurst, h_rms, lambda_max=\n lam_max, seed=seed)\n', (6053, 6122), True, 'import PyCo.Tools as Tools\n'), ((3445, 3454), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (3451, 3454), True, 'import numpy as np\n')]
import threading import requests import json import os from nose.tools import * from server import Httpd app_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "app") class TestServerlessMock(object): def test_ok(self): ok_(True) def setUp(self): self.httpd = Httpd(app_path, 0) thread = threading.Thread(target=self.httpd.serve, args=()) thread.daemon = True thread.start() self.prefix = "http://localhost:%d" % self.httpd.port def tearDown(self): self.httpd.shutdown() def test_return_hello_world(self): response = requests.get(self.url("")) eq_("Hello World", response.text) def test_simple_get(self): response = requests.get(self.url("/simple_get")) eq_(200, response.status_code) data = response.json() eq_(200, data.get("statusCode")) body = json.loads(data.get("body")) eq_("Go Serverless v1.0! Your function executed successfully!", body.get("message")) def test_simple_get_and_ignore_query_string(self): response = requests.get(self.url("/simple_get?status=unknown")) eq_(200, response.status_code) data = response.json() eq_(200, data.get("statusCode")) body = json.loads(data.get("body")) eq_("Go Serverless v1.0! Your function executed successfully!", body.get("message")) def test_simple_post(self): response = requests.post(self.url("/simple_post")) eq_(200, response.status_code) data = response.json() eq_(201, data.get("statusCode")) def test_post_with_payload(self): response = requests.post(self.url("/post_with_payload"), data=json.dumps({"id" : 123})) eq_(200, response.status_code) data = response.json() eq_(200, data.get("statusCode")) eq_({"id" : 123}, data.get("body")) def test_post_with_payload_and_template(self): response = requests.post(self.url("/post_with_payload_and_template"), data=json.dumps({"id" : 123})) eq_(200, response.status_code) data = response.json() eq_(200, data.get("statusCode")) eq_({"body" : {"id" : 123}}, data.get("body")) def test_post_with_payload_and_template_without_any_function(self): response = requests.post(self.url("/post_with_payload_and_template_without_any_function"), data=json.dumps({"id" : 123})) eq_(200, response.status_code) data = response.json() eq_(200, data.get("statusCode")) eq_({"action" : "trigger"}, data.get("body")) def url(self, path): return "%s%s" % (self.prefix, path)
[ "os.path.realpath", "threading.Thread", "json.dumps", "server.Httpd" ]
[((146, 172), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (162, 172), False, 'import os\n'), ((302, 320), 'server.Httpd', 'Httpd', (['app_path', '(0)'], {}), '(app_path, 0)\n', (307, 320), False, 'from server import Httpd\n'), ((338, 388), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.httpd.serve', 'args': '()'}), '(target=self.httpd.serve, args=())\n', (354, 388), False, 'import threading\n'), ((1719, 1742), 'json.dumps', 'json.dumps', (["{'id': 123}"], {}), "({'id': 123})\n", (1729, 1742), False, 'import json\n'), ((2036, 2059), 'json.dumps', 'json.dumps', (["{'id': 123}"], {}), "({'id': 123})\n", (2046, 2059), False, 'import json\n'), ((2406, 2429), 'json.dumps', 'json.dumps', (["{'id': 123}"], {}), "({'id': 123})\n", (2416, 2429), False, 'import json\n')]
# Generated by Django 3.1.12 on 2021-06-24 18:12 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('trip', '0006_remove_travelmodel_driver'), ] operations = [ migrations.AddField( model_name='tripmodel', name='tickets_sold', field=models.PositiveSmallIntegerField(default=0), ), migrations.AlterField( model_name='travelmodel', name='trip', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='travel_trip_set', related_query_name='travel_trip_set', to='trip.tripmodel'), ), ]
[ "django.db.models.PositiveSmallIntegerField", "django.db.models.ForeignKey" ]
[((381, 424), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (413, 424), False, 'from django.db import migrations, models\n'), ((549, 712), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""travel_trip_set"""', 'related_query_name': '"""travel_trip_set"""', 'to': '"""trip.tripmodel"""'}), "(on_delete=django.db.models.deletion.PROTECT, related_name\n ='travel_trip_set', related_query_name='travel_trip_set', to=\n 'trip.tripmodel')\n", (566, 712), False, 'from django.db import migrations, models\n')]
#!/usr/bin/python import sys import os import shutil from glob import glob from PyQt5.QtCore import (Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg, QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl) from PyQt5.QtGui import QIcon, QDesktopServices from PyQt5.QtWidgets import (QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog) from P13pt.spectrumfitter.dataloader import DataLoader from P13pt.spectrumfitter.navigator import Navigator from P13pt.spectrumfitter.fitter import Fitter from P13pt.spectrumfitter.plotter import Plotter from P13pt.spectrumfitter.load_fitresults import load_fitresults from P13pt.params_from_filename import params_from_filename class MainWindow(QMainWindow): session_file = None def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.settings = QSettings("Mercury", "SpectrumFitter") # set up data loading area self.dock_loader = QDockWidget('Data loading', self) self.dock_loader.setObjectName('loader') self.loader = DataLoader() self.dock_loader.setWidget(self.loader) # set up data navigator self.dock_navigator = QDockWidget('Data navigation', self) self.dock_navigator.setObjectName('navigator') self.navigator = Navigator() self.dock_navigator.setWidget(self.navigator) # set up plotter self.plotter = Plotter() self.setCentralWidget(self.plotter) # set up fitter self.dock_fitter = QDockWidget('Fitting', self) self.dock_fitter.setObjectName('fitter') self.fitter = Fitter() self.dock_fitter.setWidget(self.fitter) # set up the dock positions self.addDockWidget(Qt.TopDockWidgetArea, self.dock_loader) self.addDockWidget(Qt.LeftDockWidgetArea, self.dock_navigator) self.addDockWidget(Qt.RightDockWidgetArea, self.dock_fitter) # set up menus fileMenu = self.menuBar().addMenu('File') self.act_new_session = QAction('New session', self) self.act_load_session = QAction('Load session', self) self.act_save_session = QAction('Save session', self) self.act_save_session_as = QAction('Save session as...', self) for a in [self.act_new_session, self.act_load_session, self.act_save_session, self.act_save_session_as]: fileMenu.addAction(a) self.recent_menu = fileMenu.addMenu('Recent sessions') self.update_recent_list() fileMenu.addSeparator() self.act_save_image = QAction('Save spectrum as image', self) self.act_save_allimages = QAction('Save all spectra as images', self) for a in [self.act_save_image, self.act_save_allimages]: fileMenu.addAction(a) viewMenu = self.menuBar().addMenu('View') for w in [self.dock_loader, self.dock_navigator, self.dock_fitter]: viewMenu.addAction(w.toggleViewAction()) self.act_restore_default_view = QAction('Restore default', self) viewMenu.addAction(self.act_restore_default_view) self.act_toggle_display_style = QAction('Toggle display style', self) self.act_toggle_display_style.setShortcut(Qt.Key_F8) viewMenu.addAction(self.act_toggle_display_style) toolsMenu = self.menuBar().addMenu('Tools') self.act_install_builtin_models = QAction('Install built-in models', self) toolsMenu.addAction(self.act_install_builtin_models) self.act_open_model_folder = QAction('Open model folder', self) toolsMenu.addAction(self.act_open_model_folder) # make connections self.loader.dataset_changed.connect(self.dataset_changed) self.loader.new_file_in_dataset.connect(self.navigator.new_file_in_dataset) self.loader.deembedding_changed.connect(self.deembedding_changed) self.navigator.selection_changed.connect(self.selection_changed) self.fitter.fit_changed.connect(lambda: self.plotter.plot_fit(self.fitter.model)) self.fitter.fitted_param_changed.connect(self.plotter.fitted_param_changed) self.fitter.btn_fitall.clicked.connect(self.fit_all) self.act_new_session.triggered.connect(self.new_session) self.act_load_session.triggered.connect(self.load_session) self.act_save_session.triggered.connect(self.save_session) self.act_save_session_as.triggered.connect(self.save_session_as) self.act_save_image.triggered.connect(self.save_image) self.act_save_allimages.triggered.connect(self.save_all_images) self.act_restore_default_view.triggered.connect(lambda: self.restoreState(self.default_state)) self.act_toggle_display_style.triggered.connect(self.toggle_display_style) self.act_install_builtin_models.triggered.connect(self.install_builtin_models) self.act_open_model_folder.triggered.connect(self.open_model_folder) # set up fitted parameter (this has to be done after making connections, so that fitter and plotter sync) self.fitter.fitted_param = '-Y12' # default value # create new session self.new_session() # show window self.show() self.default_state = self.saveState() # restore layout from config (this has to be done AFTER self.show()) if self.settings.contains('geometry'): self.restoreGeometry(self.settings.value("geometry")) if self.settings.contains('windowState'): self.restoreState(self.settings.value("windowState")) def closeEvent(self, event): self.settings.setValue("geometry", self.saveGeometry()) self.settings.setValue("windowState", self.saveState()) super(MainWindow, self).closeEvent(event) def dataset_changed(self): self.fitter.empty_cache() self.navigator.update_file_list(self.loader.dut_files) for a in [self.act_save_session, self.act_save_session_as, self.act_save_image, self.act_save_allimages]: a.setEnabled(True) def toggle_display_style(self): if self.plotter.display_style == 'MP': self.plotter.display_style = 'RI' else: self.plotter.display_style = 'MP' self.deembedding_changed() # TODO: rename and/or remove redundancies, c.f. deembedding_changed() def deembedding_changed(self): # TODO: reduce redundancy with selection_changed() i = self.navigator.file_list.currentRow() spectrum = self.loader.get_spectrum(i) if spectrum is not None: #TODO: show parameters on plot self.plotter.plot(spectrum, {}) else: self.plotter.clear() self.fitter.update_network(spectrum, self.loader.dut_files[i]) def selection_changed(self, i): if i < 0: # when file_list is cleared: return QApplication.setOverrideCursor(Qt.WaitCursor) # TODO: the argument here should be a filename, not the index spectrum = self.loader.get_spectrum(i) if spectrum is not None: self.plotter.plot(spectrum, params_from_filename(self.loader.dut_files[i])) else: self.plotter.clear() self.fitter.update_network(spectrum, self.loader.dut_files[i]) QApplication.restoreOverrideCursor() def new_session(self): self.session_file = None self.setWindowTitle('Spectrum Fitter - New session') self.fitter.unload_model() self.loader.clear() self.navigator.clear() self.plotter.clear() for a in [self.act_save_session, self.act_save_session_as, self.act_save_image, self.act_save_allimages]: a.setEnabled(False) @pyqtSlot() def save_session_as(self, res_file=None): if not res_file: res_file, filter = QFileDialog.getSaveFileName(self, 'Fit results file', filter='*.txt') if not res_file: return res_folder = os.path.dirname(res_file) try: with open(res_file, 'w') as f: # write the header f.write('# fitting results generated by P13pt spectrum fitter\n') if len(self.loader.dut_files) == 1: f.write('# dut: ' + os.path.join( os.path.relpath(self.loader.dut_folder, res_folder), self.loader.dut_files[0] ).replace('\\', '/') + '\n') else: f.write('# dut: ' + os.path.relpath(self.loader.dut_folder, res_folder).replace('\\', '/') + '\n') if self.loader.thru and self.loader.thru_toggle_status: f.write('# thru: ' + os.path.relpath(self.loader.thru_file, res_folder).replace('\\', '/') + '\n') if self.loader.dummy and self.loader.dummy_toggle_status: f.write('# dummy: ' + os.path.relpath(self.loader.dummy_file, res_folder).replace('\\', '/') + '\n') f.write('# fitted_param: ' + self.plotter.fitted_param + '\n') try: ra = float(self.loader.txt_ra.text()) except: ra = 0. if not ra == 0: f.write('# ra: ' + str(ra) + '\n') if self.fitter.model: f.write('# model: ' + os.path.basename(self.fitter.model_file).replace('\\', '/') + '\n') f.write('# model_func: ' + self.fitter.cmb_modelfunc.currentText() + '\n') # TODO: this all could clearly be done in a more elegant way if self.fitter.cmb_fitmethod.currentText() != 'No fit methods found': f.write('# fit_method: ' + self.fitter.cmb_fitmethod.currentText() + '\n') # determine columns f.write('# filename\t') for p in params_from_filename(self.loader.dut_files[0]): f.write(p + '\t') f.write('\t'.join([p for p in self.fitter.model.params])) f.write('\n') # write data filelist = sorted([filename for filename in self.fitter.model_params]) for filename in filelist: f.write(filename + '\t') # TODO: what if some filenames do not contain all parameters? should catch exceptions for p in params_from_filename(self.loader.dut_files[0]): f.write(str(params_from_filename(filename)[p]) + '\t') f.write('\t'.join([str(self.fitter.model_params[filename][p]) for p in self.fitter.model.params])) f.write('\n') except EnvironmentError as e: QMessageBox.critical(self, 'Error', 'Could not save session: '+str(e)) return self.update_recent_list(res_file) self.setWindowTitle('Spectrum Fitter - '+res_file) self.session_file = res_file def save_session(self): self.save_session_as(self.session_file) @pyqtSlot() def load_session(self, res_file=None): if not res_file: res_file, filter = QFileDialog.getOpenFileName(self, 'Fit results file', filter='*.txt') if not res_file: return res_folder = os.path.dirname(res_file) self.new_session() # read the data try: data, dut, thru, dummy, ra, fitter_info = load_fitresults(res_file, readfilenameparams=False, extrainfo=True) except IOError as e: QMessageBox.warning(self, 'Error', 'Could not load data: '+str(e)) return # using os.path.realpath to get rid of relative path remainders ("..") self.loader.load_dataset(dut=os.path.realpath(os.path.join(res_folder, dut)) if dut else None, thru=os.path.realpath(os.path.join(res_folder, thru)) if thru else None, dummy=os.path.realpath(os.path.join(res_folder, dummy)) if dummy else None, ra=ra if ra else None) # if a fitted_param was provided in the session file, set it up if 'fitted_param' in fitter_info: self.fitter.fitted_param = fitter_info['fitted_param'] # if a model was provided in the session file, load this model and the provided data if 'model' in fitter_info: self.fitter.load_model(filename=fitter_info['model'], info=fitter_info, data=data if data else None) # update the fitter with the first spectrum in the list self.fitter.update_network(self.loader.get_spectrum(0), self.loader.dut_files[0]) self.update_recent_list(res_file) self.setWindowTitle('Spectrum Fitter - '+res_file) self.session_file = res_file #TODO: this is not really in the right place @pyqtSlot() def fit_all(self): totalnum = len(self.loader.dut_files) progressdialog = QProgressDialog('Fitting all spectra...', 'Cancel', 0, totalnum-1, self) progressdialog.setWindowTitle('Progress') progressdialog.setModal(True) progressdialog.setAutoClose(True) progressdialog.show() for i in range(totalnum): QApplication.processEvents() if progressdialog.wasCanceled(): break self.navigator.file_list.setCurrentRow(i) self.fitter.fit_model() progressdialog.setValue(i) def save_image(self): basename, ext = os.path.splitext(self.loader.dut_files[self.navigator.file_list.currentRow()]) filename, filter = QFileDialog.getSaveFileName(self, 'Choose file', os.path.join(self.loader.dut_folder, basename+'.png'), filter='*.png;;*.jpg;;*.eps') if filename: self.plotter.save_fig(filename) def save_all_images(self): foldername = QFileDialog.getExistingDirectory(self, 'Choose folder', self.loader.dut_folder) totalnum = len(self.loader.dut_files) progressdialog = QProgressDialog('Saving all images...', 'Cancel', 0, totalnum - 1, self) progressdialog.setWindowTitle('Progress') progressdialog.setModal(True) progressdialog.setAutoClose(True) progressdialog.show() for i in range(totalnum): QApplication.processEvents() if progressdialog.wasCanceled(): break self.navigator.file_list.setCurrentRow(i) basename, ext = os.path.splitext(self.loader.dut_files[self.navigator.file_list.currentRow()]) self.plotter.save_fig(os.path.join(foldername, basename+'.png')) progressdialog.setValue(i) def load_recent(self): action = self.sender() self.load_session(action.text()) def update_recent_list(self, filename=None): recentlist = list(self.settings.value('recentSessions')) if self.settings.contains('recentSessions') \ else [] if filename: if filename in recentlist: recentlist.remove(filename) recentlist.insert(0, filename) recentlist = recentlist[0:5] self.settings.setValue('recentSessions', recentlist) self.recent_menu.clear() for r in recentlist: a = QAction(r, self) self.recent_menu.addAction(a) a.triggered.connect(self.load_recent) def install_builtin_models(self): builtin_folder = os.path.join(os.path.dirname(__file__), 'models') for filename in sorted(glob(os.path.join(builtin_folder, '*.py'))): # check if the file already exists in the models folder if os.path.exists(os.path.join(self.fitter.models_dir, os.path.basename(filename))): answer = QMessageBox.question(self, 'File already exists', 'The file: '+os.path.basename(filename)+ 'already exists in your models folder. Would you like to replace it?') if answer != QMessageBox.Yes: continue # if file does not exist or user does not mind replacing it, let's copy: shutil.copyfile(filename, os.path.join(self.fitter.models_dir, os.path.basename(filename))) def open_model_folder(self): QDesktopServices.openUrl(QUrl.fromLocalFile(self.fitter.models_dir)) def msghandler(type, context, message): if type == QtInfoMsg: QMessageBox.information(None, 'Info', message) elif type == QtDebugMsg: QMessageBox.information(None, 'Debug', message) elif type == QtCriticalMsg: QMessageBox.critical(None, 'Critical', message) elif type == QtWarningMsg: QMessageBox.warning(None, 'Warning', message) elif type == QtFatalMsg: QMessageBox.critical(None, 'Fatal error', message) def main(): qInstallMessageHandler(msghandler) # CD into directory where this script is saved d = os.path.dirname(__file__) if d != '': os.chdir(d) app = QApplication(sys.argv) app.setWindowIcon(QIcon('audacity.png')) mainwindow = MainWindow() # Start the main loop. ret = app.exec_() sys.exit(ret) if __name__ == '__main__': main()
[ "PyQt5.QtGui.QIcon", "PyQt5.QtCore.QUrl.fromLocalFile", "P13pt.spectrumfitter.plotter.Plotter", "PyQt5.QtWidgets.QProgressDialog", "PyQt5.QtWidgets.QApplication.processEvents", "PyQt5.QtWidgets.QApplication", "sys.exit", "PyQt5.QtWidgets.QDockWidget", "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "P13pt.params_from_filename.params_from_filename", "PyQt5.QtCore.qInstallMessageHandler", "P13pt.spectrumfitter.load_fitresults.load_fitresults", "os.path.relpath", "P13pt.spectrumfitter.navigator.Navigator", "PyQt5.QtWidgets.QMessageBox.information", "os.path.dirname", "PyQt5.QtWidgets.QApplication.restoreOverrideCursor", "PyQt5.QtWidgets.QApplication.setOverrideCursor", "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "P13pt.spectrumfitter.fitter.Fitter", "os.path.join", "PyQt5.QtWidgets.QMessageBox.critical", "PyQt5.QtCore.pyqtSlot", "PyQt5.QtWidgets.QAction", "os.chdir", "P13pt.spectrumfitter.dataloader.DataLoader", "PyQt5.QtCore.QSettings", "os.path.basename", "PyQt5.QtWidgets.QMessageBox.warning", "PyQt5.QtWidgets.QFileDialog.getSaveFileName" ]
[((7840, 7850), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (7848, 7850), False, 'from PyQt5.QtCore import Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg, QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl\n'), ((11290, 11300), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (11298, 11300), False, 'from PyQt5.QtCore import Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg, QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl\n'), ((13173, 13183), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (13181, 13183), False, 'from PyQt5.QtCore import Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg, QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl\n'), ((17347, 17381), 'PyQt5.QtCore.qInstallMessageHandler', 'qInstallMessageHandler', (['msghandler'], {}), '(msghandler)\n', (17369, 17381), False, 'from PyQt5.QtCore import Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg, QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl\n'), ((17442, 17467), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (17457, 17467), False, 'import os\n'), ((17507, 17529), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (17519, 17529), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((17661, 17674), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (17669, 17674), False, 'import sys\n'), ((959, 997), 'PyQt5.QtCore.QSettings', 'QSettings', (['"""Mercury"""', '"""SpectrumFitter"""'], {}), "('Mercury', 'SpectrumFitter')\n", (968, 997), False, 'from PyQt5.QtCore import Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg, QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl\n'), ((1061, 1094), 'PyQt5.QtWidgets.QDockWidget', 'QDockWidget', (['"""Data loading"""', 'self'], {}), "('Data loading', self)\n", (1072, 1094), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((1166, 1178), 'P13pt.spectrumfitter.dataloader.DataLoader', 'DataLoader', ([], {}), '()\n', (1176, 1178), False, 'from P13pt.spectrumfitter.dataloader import DataLoader\n'), ((1290, 1326), 'PyQt5.QtWidgets.QDockWidget', 'QDockWidget', (['"""Data navigation"""', 'self'], {}), "('Data navigation', self)\n", (1301, 1326), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((1407, 1418), 'P13pt.spectrumfitter.navigator.Navigator', 'Navigator', ([], {}), '()\n', (1416, 1418), False, 'from P13pt.spectrumfitter.navigator import Navigator\n'), ((1522, 1531), 'P13pt.spectrumfitter.plotter.Plotter', 'Plotter', ([], {}), '()\n', (1529, 1531), False, 'from P13pt.spectrumfitter.plotter import Plotter\n'), ((1628, 1656), 'PyQt5.QtWidgets.QDockWidget', 'QDockWidget', (['"""Fitting"""', 'self'], {}), "('Fitting', self)\n", (1639, 1656), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((1728, 1736), 'P13pt.spectrumfitter.fitter.Fitter', 'Fitter', ([], {}), '()\n', (1734, 1736), False, 'from P13pt.spectrumfitter.fitter import Fitter\n'), ((2134, 2162), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""New session"""', 'self'], {}), "('New session', self)\n", (2141, 2162), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((2195, 2224), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Load session"""', 'self'], {}), "('Load session', self)\n", (2202, 2224), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((2257, 2286), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Save session"""', 'self'], {}), "('Save session', self)\n", (2264, 2286), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((2322, 2357), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Save session as..."""', 'self'], {}), "('Save session as...', self)\n", (2329, 2357), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((2664, 2703), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Save spectrum as image"""', 'self'], {}), "('Save spectrum as image', self)\n", (2671, 2703), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((2738, 2781), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Save all spectra as images"""', 'self'], {}), "('Save all spectra as images', self)\n", (2745, 2781), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((3101, 3133), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Restore default"""', 'self'], {}), "('Restore default', self)\n", (3108, 3133), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((3232, 3269), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Toggle display style"""', 'self'], {}), "('Toggle display style', self)\n", (3239, 3269), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((3484, 3524), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Install built-in models"""', 'self'], {}), "('Install built-in models', self)\n", (3491, 3524), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((3623, 3657), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""Open model folder"""', 'self'], {}), "('Open model folder', self)\n", (3630, 3657), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((6994, 7039), 'PyQt5.QtWidgets.QApplication.setOverrideCursor', 'QApplication.setOverrideCursor', (['Qt.WaitCursor'], {}), '(Qt.WaitCursor)\n', (7024, 7039), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((7406, 7442), 'PyQt5.QtWidgets.QApplication.restoreOverrideCursor', 'QApplication.restoreOverrideCursor', ([], {}), '()\n', (7440, 7442), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((8088, 8113), 'os.path.dirname', 'os.path.dirname', (['res_file'], {}), '(res_file)\n', (8103, 8113), False, 'import os\n'), ((11535, 11560), 'os.path.dirname', 'os.path.dirname', (['res_file'], {}), '(res_file)\n', (11550, 11560), False, 'import os\n'), ((13281, 13355), 'PyQt5.QtWidgets.QProgressDialog', 'QProgressDialog', (['"""Fitting all spectra..."""', '"""Cancel"""', '(0)', '(totalnum - 1)', 'self'], {}), "('Fitting all spectra...', 'Cancel', 0, totalnum - 1, self)\n", (13296, 13355), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((14316, 14395), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QFileDialog.getExistingDirectory', (['self', '"""Choose folder"""', 'self.loader.dut_folder'], {}), "(self, 'Choose folder', self.loader.dut_folder)\n", (14348, 14395), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((14523, 14595), 'PyQt5.QtWidgets.QProgressDialog', 'QProgressDialog', (['"""Saving all images..."""', '"""Cancel"""', '(0)', '(totalnum - 1)', 'self'], {}), "('Saving all images...', 'Cancel', 0, totalnum - 1, self)\n", (14538, 14595), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((16937, 16983), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['None', '"""Info"""', 'message'], {}), "(None, 'Info', message)\n", (16960, 16983), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((17484, 17495), 'os.chdir', 'os.chdir', (['d'], {}), '(d)\n', (17492, 17495), False, 'import os\n'), ((17552, 17573), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""audacity.png"""'], {}), "('audacity.png')\n", (17557, 17573), False, 'from PyQt5.QtGui import QIcon, QDesktopServices\n'), ((7953, 8022), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QFileDialog.getSaveFileName', (['self', '"""Fit results file"""'], {'filter': '"""*.txt"""'}), "(self, 'Fit results file', filter='*.txt')\n", (7980, 8022), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((11400, 11469), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Fit results file"""'], {'filter': '"""*.txt"""'}), "(self, 'Fit results file', filter='*.txt')\n", (11427, 11469), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((11681, 11748), 'P13pt.spectrumfitter.load_fitresults.load_fitresults', 'load_fitresults', (['res_file'], {'readfilenameparams': '(False)', 'extrainfo': '(True)'}), '(res_file, readfilenameparams=False, extrainfo=True)\n', (11696, 11748), False, 'from P13pt.spectrumfitter.load_fitresults import load_fitresults\n'), ((13567, 13595), 'PyQt5.QtWidgets.QApplication.processEvents', 'QApplication.processEvents', ([], {}), '()\n', (13593, 13595), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((14058, 14113), 'os.path.join', 'os.path.join', (['self.loader.dut_folder', "(basename + '.png')"], {}), "(self.loader.dut_folder, basename + '.png')\n", (14070, 14113), False, 'import os\n'), ((14803, 14831), 'PyQt5.QtWidgets.QApplication.processEvents', 'QApplication.processEvents', ([], {}), '()\n', (14829, 14831), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((15788, 15804), 'PyQt5.QtWidgets.QAction', 'QAction', (['r', 'self'], {}), '(r, self)\n', (15795, 15804), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((15974, 15999), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15989, 15999), False, 'import os\n'), ((16818, 16860), 'PyQt5.QtCore.QUrl.fromLocalFile', 'QUrl.fromLocalFile', (['self.fitter.models_dir'], {}), '(self.fitter.models_dir)\n', (16836, 16860), False, 'from PyQt5.QtCore import Qt, qInstallMessageHandler, QtInfoMsg, QtCriticalMsg, QtDebugMsg, QtWarningMsg, QtFatalMsg, QSettings, pyqtSlot, QStandardPaths, QUrl\n'), ((17021, 17068), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['None', '"""Debug"""', 'message'], {}), "(None, 'Debug', message)\n", (17044, 17068), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((7231, 7277), 'P13pt.params_from_filename.params_from_filename', 'params_from_filename', (['self.loader.dut_files[i]'], {}), '(self.loader.dut_files[i])\n', (7251, 7277), False, 'from P13pt.params_from_filename import params_from_filename\n'), ((15094, 15137), 'os.path.join', 'os.path.join', (['foldername', "(basename + '.png')"], {}), "(foldername, basename + '.png')\n", (15106, 15137), False, 'import os\n'), ((16048, 16084), 'os.path.join', 'os.path.join', (['builtin_folder', '"""*.py"""'], {}), "(builtin_folder, '*.py')\n", (16060, 16084), False, 'import os\n'), ((17109, 17156), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', (['None', '"""Critical"""', 'message'], {}), "(None, 'Critical', message)\n", (17129, 17156), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((10071, 10117), 'P13pt.params_from_filename.params_from_filename', 'params_from_filename', (['self.loader.dut_files[0]'], {}), '(self.loader.dut_files[0])\n', (10091, 10117), False, 'from P13pt.params_from_filename import params_from_filename\n'), ((16223, 16249), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (16239, 16249), False, 'import os\n'), ((16722, 16748), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (16738, 16748), False, 'import os\n'), ((17196, 17241), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['None', '"""Warning"""', 'message'], {}), "(None, 'Warning', message)\n", (17215, 17241), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((10636, 10682), 'P13pt.params_from_filename.params_from_filename', 'params_from_filename', (['self.loader.dut_files[0]'], {}), '(self.loader.dut_files[0])\n', (10656, 10682), False, 'from P13pt.params_from_filename import params_from_filename\n'), ((12010, 12039), 'os.path.join', 'os.path.join', (['res_folder', 'dut'], {}), '(res_folder, dut)\n', (12022, 12039), False, 'import os\n'), ((12114, 12144), 'os.path.join', 'os.path.join', (['res_folder', 'thru'], {}), '(res_folder, thru)\n', (12126, 12144), False, 'import os\n'), ((12221, 12252), 'os.path.join', 'os.path.join', (['res_folder', 'dummy'], {}), '(res_folder, dummy)\n', (12233, 12252), False, 'import os\n'), ((17279, 17329), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', (['None', '"""Fatal error"""', 'message'], {}), "(None, 'Fatal error', message)\n", (17299, 17329), False, 'from PyQt5.QtWidgets import QApplication, QMessageBox, QMainWindow, QDockWidget, QAction, QFileDialog, QProgressDialog\n'), ((16341, 16367), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (16357, 16367), False, 'import os\n'), ((8683, 8734), 'os.path.relpath', 'os.path.relpath', (['self.loader.dut_folder', 'res_folder'], {}), '(self.loader.dut_folder, res_folder)\n', (8698, 8734), False, 'import os\n'), ((8875, 8925), 'os.path.relpath', 'os.path.relpath', (['self.loader.thru_file', 'res_folder'], {}), '(self.loader.thru_file, res_folder)\n', (8890, 8925), False, 'import os\n'), ((9069, 9120), 'os.path.relpath', 'os.path.relpath', (['self.loader.dummy_file', 'res_folder'], {}), '(self.loader.dummy_file, res_folder)\n', (9084, 9120), False, 'import os\n'), ((9525, 9565), 'os.path.basename', 'os.path.basename', (['self.fitter.model_file'], {}), '(self.fitter.model_file)\n', (9541, 9565), False, 'import os\n'), ((8454, 8505), 'os.path.relpath', 'os.path.relpath', (['self.loader.dut_folder', 'res_folder'], {}), '(self.loader.dut_folder, res_folder)\n', (8469, 8505), False, 'import os\n'), ((10724, 10754), 'P13pt.params_from_filename.params_from_filename', 'params_from_filename', (['filename'], {}), '(filename)\n', (10744, 10754), False, 'from P13pt.params_from_filename import params_from_filename\n')]
from pycket.error import SchemeException from rpython.tool.pairtype import extendabletype from rpython.rlib import jit, objectmodel class W_ProtoObject(object): """ abstract base class of both actual values (W_Objects) and multiple return values (Values)""" _attrs_ = [] _settled_ = True def as_real_value(self): raise NotImplementedError("not a real value!") def num_values(val): raise NotImplementedError("not a real value!") def get_value(val, index): raise NotImplementedError("not a real value!") def get_all_values(self): raise NotImplementedError("not a real value!") class W_Object(W_ProtoObject): __metaclass__ = extendabletype _attrs_ = [] errorname = "%%%%unreachable%%%%" def __init__(self): raise NotImplementedError("abstract base class") def num_values(self): return 1 def get_value(self, index): assert index == 0 return self def get_all_values(self): return [self] def iscallable(self): return False def call(self, args, env, cont): raise SchemeException("%s is not callable" % self.tostring()) def call_with_extra_info(self, args, env, cont, calling_app): return self.call(args, env, cont) def enable_jitting(self): pass # need to override in callables that are based on an AST # an arity is a pair of a list of numbers and either -1 or a non-negative integer def get_arity(self): from pycket.interpreter import Arity if self.iscallable(): return Arity.unknown else: raise SchemeException("%s does not have arity" % self.tostring()) def is_proper_list(self): return False def is_impersonator(self): return self.is_chaperone() def is_chaperone(self): return False def is_proxy(self): return self.is_chaperone() or self.is_impersonator() def get_proxied(self): return self def get_properties(self): return {} def is_non_interposing_chaperone(self): return False def immutable(self): return False def equal(self, other): return self is other # default implementation def eqv(self, other): return self is other # default implementation def hash_equal(self): return objectmodel.compute_hash(self) # default implementation hash_eqv = hash_equal def tostring(self): return str(self) # for expose @classmethod def make_unwrapper(cls): if cls is W_Object: return lambda x: x, '' def unwrap(w_object): if isinstance(w_object, cls): return w_object return None return unwrap, cls.errorname class SingletonMeta(type): def __new__(cls, name, bases, dct): result = type.__new__(cls, name, bases, dct) result.singleton = result() return result
[ "rpython.rlib.objectmodel.compute_hash" ]
[((2382, 2412), 'rpython.rlib.objectmodel.compute_hash', 'objectmodel.compute_hash', (['self'], {}), '(self)\n', (2406, 2412), False, 'from rpython.rlib import jit, objectmodel\n')]
import numpy as np from stable_baselines import PPO2 from stable_baselines.common.policies import CnnPolicy from stable_baselines.a2c.utils import conv, linear, conv_to_fc from src.envs import CMDP, FrozenLakeEnvCustomMap from src.envs.frozen_lake.frozen_maps import MAPS from src.students import LagrangianStudent, identity_transfer from src.online_learning import ExponetiatedGradient from src.teacher import FrozenLakeEvaluationLogger, TeacherFrozenEnv, \ create_intervention, SmallFrozenTeacherEnv from src.teacher.frozen_lake_env import SmallFrozenTrainingObservation, SmallFrozenNonStationaryBandits from src.envs.frozen_lake.utils import create_intervention_from_map, \ OptimalAgent, add_teacher import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) __all__ = ['create_teacher_env', 'small_base_cenv_fn'] def constraint(info=None, **kwargs): return {'g': float(info['next_state_type'] in 'H')} def small_base_env_fn(): # Base MDP world_map = MAPS['small'] not_slipping_prob = 0.8 env_kwargs = dict(desc=world_map, not_slipping_prob=not_slipping_prob, base_r_mapping=None, timeout=200) return FrozenLakeEnvCustomMap(**env_kwargs) # Base CMDP def small_base_cenv_fn(): return CMDP(small_base_env_fn(), constraint, constraints_values=[0], n_constraints=1, avg_constraint=True) def make_base_small_cenvs(): # Base MDP world_map = MAPS['small'] # # 2 interventions # dist = [1, 1] # tau = [0.1, 0] # buff_size = [1, 0] # avg_constraint = [True, True] # 3 Interventions dist = [2, 1, 1] tau = [0.1, 0.1, 0] buff_size = [1, 1, 0] avg_constraint = [True, True, True] interventions = [] for d, t, b, avg in zip(dist, tau, buff_size, avg_constraint): interventions.append( create_intervention( small_base_cenv_fn, create_intervention_from_map(add_teacher(world_map, d)), [t], b, use_vec=True, avg_constraint=avg) ) assert callable(interventions[0]) test_env = create_intervention( small_base_cenv_fn(), create_intervention_from_map(add_teacher( world_map)), [0.0], 0, avg_constraint=True) return interventions, test_env ############################## TEACHER ENV ################################### def my_small_cnn(scaled_images, **kwargs): activ = tf.nn.relu layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=3, stride=1, **kwargs)) layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=3, stride=1, **kwargs)) layer_3 = conv_to_fc(layer_2) return activ( linear(layer_3, 'fc1', n_hidden=32, init_scale=np.sqrt(2))) def create_teacher_env(new_br_kwargs={}, new_online_kwargs={}, original=False, obs_from_training=False, non_stationary_bandit=False): # Student definition br_kwargs = dict(policy=CnnPolicy, verbose=0, n_steps=128, ent_coef=0.05, cliprange=0.2, learning_rate=1e-3, noptepochs=9, policy_kwargs={'cnn_extractor': my_small_cnn}) br_kwargs.update(new_br_kwargs) # Define online kwargs online_kwargs = dict(B=0.5, eta=1.0) online_kwargs.update(new_online_kwargs) student_cls = LagrangianStudent n_envs = 4 use_sub_proc_env = False student_default_kwargs = {'env': None, 'br_algo': PPO2, 'online_algo': ExponetiatedGradient, 'br_kwargs': br_kwargs, 'online_kwargs': online_kwargs, 'lagrangian_ronuds': 2, 'curriculum_transfer': identity_transfer, 'br_uses_vec_env': True, 'use_sub_proc_env': use_sub_proc_env, 'n_envs': n_envs, } student_ranges_dict = {} # Teacher interventions if original: # To preserve the teacher env interface while training in the # original environment, we introduce a dummy intervention # condition that is always False. def dummy_intervention(**kwargs): return 0 _, test_env = make_base_small_cenvs() intervention = create_intervention( base_cenv=small_base_cenv_fn, interventions=[dummy_intervention], taus=[0], buf_size=0, use_vec=True, avg_constraint=True) interventions = [intervention] else: interventions, test_env = make_base_small_cenvs() learning_steps = 4800 * 2 time_steps_lim = learning_steps * 10 test_episode_timeout = 200 test_episode_number = 5 if obs_from_training: env_cls = SmallFrozenTrainingObservation elif non_stationary_bandit: env_cls = SmallFrozenNonStationaryBandits else: env_cls = SmallFrozenTeacherEnv return env_cls(student_cls=student_cls, student_default_kwargs=student_default_kwargs, interventions=interventions, final_env=test_env, logger_cls=FrozenLakeEvaluationLogger, student_ranges_dict=student_ranges_dict, learning_steps=learning_steps, test_episode_number=test_episode_number, test_episode_timeout=test_episode_timeout, time_steps_lim=time_steps_lim, normalize_obs=False)
[ "stable_baselines.a2c.utils.conv_to_fc", "numpy.sqrt", "src.envs.FrozenLakeEnvCustomMap", "stable_baselines.a2c.utils.conv", "src.teacher.create_intervention", "tensorflow.compat.v1.logging.set_verbosity", "src.envs.frozen_lake.utils.add_teacher" ]
[((741, 803), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (775, 803), True, 'import tensorflow as tf\n'), ((1242, 1278), 'src.envs.FrozenLakeEnvCustomMap', 'FrozenLakeEnvCustomMap', ([], {}), '(**env_kwargs)\n', (1264, 1278), False, 'from src.envs import CMDP, FrozenLakeEnvCustomMap\n'), ((2792, 2811), 'stable_baselines.a2c.utils.conv_to_fc', 'conv_to_fc', (['layer_2'], {}), '(layer_2)\n', (2802, 2811), False, 'from stable_baselines.a2c.utils import conv, linear, conv_to_fc\n'), ((2562, 2636), 'stable_baselines.a2c.utils.conv', 'conv', (['scaled_images', '"""c1"""'], {'n_filters': '(32)', 'filter_size': '(3)', 'stride': '(1)'}), "(scaled_images, 'c1', n_filters=32, filter_size=3, stride=1, **kwargs)\n", (2566, 2636), False, 'from stable_baselines.a2c.utils import conv, linear, conv_to_fc\n'), ((2683, 2751), 'stable_baselines.a2c.utils.conv', 'conv', (['layer_1', '"""c2"""'], {'n_filters': '(64)', 'filter_size': '(3)', 'stride': '(1)'}), "(layer_1, 'c2', n_filters=64, filter_size=3, stride=1, **kwargs)\n", (2687, 2751), False, 'from stable_baselines.a2c.utils import conv, linear, conv_to_fc\n'), ((4559, 4711), 'src.teacher.create_intervention', 'create_intervention', ([], {'base_cenv': 'small_base_cenv_fn', 'interventions': '[dummy_intervention]', 'taus': '[0]', 'buf_size': '(0)', 'use_vec': '(True)', 'avg_constraint': '(True)'}), '(base_cenv=small_base_cenv_fn, interventions=[\n dummy_intervention], taus=[0], buf_size=0, use_vec=True, avg_constraint\n =True)\n', (4578, 4711), False, 'from src.teacher import FrozenLakeEvaluationLogger, TeacherFrozenEnv, create_intervention, SmallFrozenTeacherEnv\n'), ((2280, 2302), 'src.envs.frozen_lake.utils.add_teacher', 'add_teacher', (['world_map'], {}), '(world_map)\n', (2291, 2302), False, 'from src.envs.frozen_lake.utils import create_intervention_from_map, OptimalAgent, add_teacher\n'), ((2885, 2895), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2892, 2895), True, 'import numpy as np\n'), ((2050, 2075), 'src.envs.frozen_lake.utils.add_teacher', 'add_teacher', (['world_map', 'd'], {}), '(world_map, d)\n', (2061, 2075), False, 'from src.envs.frozen_lake.utils import create_intervention_from_map, OptimalAgent, add_teacher\n')]
name = "scanapi" import click import logging from scanapi.tree.api_tree import APITree from scanapi.reporter import Reporter from scanapi.requests_maker import RequestsMaker from scanapi.settings import SETTINGS from scanapi.yaml_loader import load_yaml @click.command() @click.option( "-s", "--spec-path", "spec_path", type=click.Path(exists=True), default=SETTINGS["spec_path"], ) @click.option("-o", "--output-path", "output_path") @click.option( "-r", "--reporter", "reporter", type=click.Choice(["console", "markdown", "html"]), default=SETTINGS["reporter"], ) @click.option("-t", "--template", "template") @click.option( "--log-level", "log_level", type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]), default="INFO", ) def scan(spec_path, output_path, reporter, template, log_level): """Automated Testing and Documentation for your REST API.""" logging.basicConfig(level=log_level) logger = logging.getLogger(__name__) SETTINGS.update({"spec_path": spec_path, "output_path": output_path}) # custom templates to be implemented later if template is not None: logger.warn("Custom templates are not supported yet. Soon to be. Hang tight.") spec_path = SETTINGS["spec_path"] try: api_spec = load_yaml(spec_path) except FileNotFoundError as e: error_message = f"Could not find spec file: {spec_path}. {str(e)}" logger.error(error_message) return try: api_tree = APITree(api_spec) except Exception as e: error_message = "Error loading API spec." error_message = "{} {}".format(error_message, str(e)) logger.error(error_message) return RequestsMaker(api_tree).make_all() Reporter(output_path, reporter, template).write(api_tree.responses.values())
[ "logging.getLogger", "logging.basicConfig", "click.Choice", "click.option", "scanapi.tree.api_tree.APITree", "scanapi.yaml_loader.load_yaml", "scanapi.requests_maker.RequestsMaker", "click.Path", "scanapi.reporter.Reporter", "click.command", "scanapi.settings.SETTINGS.update" ]
[((259, 274), 'click.command', 'click.command', ([], {}), '()\n', (272, 274), False, 'import click\n'), ((408, 458), 'click.option', 'click.option', (['"""-o"""', '"""--output-path"""', '"""output_path"""'], {}), "('-o', '--output-path', 'output_path')\n", (420, 458), False, 'import click\n'), ((611, 655), 'click.option', 'click.option', (['"""-t"""', '"""--template"""', '"""template"""'], {}), "('-t', '--template', 'template')\n", (623, 655), False, 'import click\n'), ((938, 974), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'log_level'}), '(level=log_level)\n', (957, 974), False, 'import logging\n'), ((988, 1015), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1005, 1015), False, 'import logging\n'), ((1020, 1089), 'scanapi.settings.SETTINGS.update', 'SETTINGS.update', (["{'spec_path': spec_path, 'output_path': output_path}"], {}), "({'spec_path': spec_path, 'output_path': output_path})\n", (1035, 1089), False, 'from scanapi.settings import SETTINGS\n'), ((1321, 1341), 'scanapi.yaml_loader.load_yaml', 'load_yaml', (['spec_path'], {}), '(spec_path)\n', (1330, 1341), False, 'from scanapi.yaml_loader import load_yaml\n'), ((1532, 1549), 'scanapi.tree.api_tree.APITree', 'APITree', (['api_spec'], {}), '(api_spec)\n', (1539, 1549), False, 'from scanapi.tree.api_tree import APITree\n'), ((345, 368), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (355, 368), False, 'import click\n'), ((527, 572), 'click.Choice', 'click.Choice', (["['console', 'markdown', 'html']"], {}), "(['console', 'markdown', 'html'])\n", (539, 572), False, 'import click\n'), ((716, 779), 'click.Choice', 'click.Choice', (["['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']"], {}), "(['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])\n", (728, 779), False, 'import click\n'), ((1745, 1768), 'scanapi.requests_maker.RequestsMaker', 'RequestsMaker', (['api_tree'], {}), '(api_tree)\n', (1758, 1768), False, 'from scanapi.requests_maker import RequestsMaker\n'), ((1784, 1825), 'scanapi.reporter.Reporter', 'Reporter', (['output_path', 'reporter', 'template'], {}), '(output_path, reporter, template)\n', (1792, 1825), False, 'from scanapi.reporter import Reporter\n')]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import re import logging import subprocess import errno, os, pty import shlex from subprocess import Popen, PIPE from ConfigReader import configuration import mysql.connector from mysql.connector import errorcode from common.Singleton import Singleton from DBImportConfig import import_config from DBImportOperation import common_operations from datetime import datetime, timedelta import pandas as pd import numpy as np import time class operation(object, metaclass=Singleton): def __init__(self, Hive_DB=None, Hive_Table=None): logging.debug("Executing etl_operations.__init__()") self.Hive_DB = None self.Hive_Table = None self.mysql_conn = None self.mysql_cursor = None self.startDate = None self.common_operations = common_operations.operation(Hive_DB, Hive_Table) self.import_config = import_config.config(Hive_DB, Hive_Table) if Hive_DB != None and Hive_Table != None: self.setHiveTable(Hive_DB, Hive_Table) else: # If the class already is initialized, we just pull the parameters and set them here self.Hive_DB = self.common_operations.Hive_DB self.Hive_Table = self.common_operations.Hive_Table self.startDate = self.import_config.startDate logging.debug("Executing etl_operations.__init__() - Finished") def setHiveTable(self, Hive_DB, Hive_Table): """ Sets the parameters to work against a new Hive database and table """ self.Hive_DB = Hive_DB.lower() self.Hive_Table = Hive_Table.lower() self.common_operations.setHiveTable(self.Hive_DB, self.Hive_Table) self.import_config.setHiveTable(self.Hive_DB, self.Hive_Table) try: self.import_config.getImportConfig() self.startDate = self.import_config.startDate self.import_config.lookupConnectionAlias() except: self.import_config.remove_temporary_files() sys.exit(1) def remove_temporary_files(self): self.import_config.remove_temporary_files() def connectToHive(self,): logging.debug("Executing etl_operations.connectToHive()") try: self.common_operations.connectToHive() except Exception as ex: logging.error(ex) self.import_config.remove_temporary_files() sys.exit(1) logging.debug("Executing etl_operations.connectToHive() - Finished") def mergeHiveTables(self, sourceDB, sourceTable, targetDB, targetTable, historyDB = None, historyTable=None, targetDeleteDB = None, targetDeleteTable=None, createHistoryAudit=False, sourceIsIncremental=False, sourceIsImportTable=False, softDelete=False, mergeTime=datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), datalakeSource=None, PKColumns=None, hiveMergeJavaHeap=None, oracleFlashbackSource=False, mssqlChangeTrackingSource=False, deleteNotUpdatedRows=False, oracleFlashbackImportTable=None, mssqlChangeTrackingImportTable=None): """ Merge source table into Target table. Also populate a History Audit table if selected """ logging.debug("Executing etl_operations.mergeHiveTables()") targetColumns = self.common_operations.getHiveColumns(hiveDB=targetDB, hiveTable=targetTable, includeType=False, includeComment=False) columnMerge = self.common_operations.getHiveColumnNameDiff(sourceDB=sourceDB, sourceTable=sourceTable, targetDB=targetDB, targetTable=targetTable, importTool = self.import_config.importTool, sourceIsImportTable=True) if PKColumns == None: PKColumns = self.common_operations.getPKfromTable(hiveDB=targetDB, hiveTable=targetTable, quotedColumns=False) datalakeIUDExists = False datalakeInsertExists = False datalakeUpdateExists = False datalakeDeleteExists = False datalakeSourceExists = False for index, row in targetColumns.iterrows(): if row['name'] == "datalake_iud": datalakeIUDExists = True if row['name'] == "datalake_insert": datalakeInsertExists = True if row['name'] == "datalake_update": datalakeUpdateExists = True if row['name'] == "datalake_delete": datalakeDeleteExists = True if row['name'] == "datalake_source": datalakeSourceExists = True if hiveMergeJavaHeap != None: query = "set hive.tez.container.size=%s"%(hiveMergeJavaHeap) self.common_operations.executeHiveQuery(query) query = "merge into `%s`.`%s` as T \n"%(targetDB, targetTable) query += "using `%s`.`%s` as S \n"%(sourceDB, sourceTable) query += "on \n" for i, targetColumn in enumerate(PKColumns.split(",")): try: sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName'] except IndexError: logging.error("Primary Key cant be found in the source target table. Please check PK override") self.import_config.remove_temporary_files() sys.exit(1) if sourceColumn == None: logging.error("ERROR: Problem determine column name in source table for primary key column '%s'"%(targetColumn)) self.import_config.remove_temporary_files() sys.exit(1) if i == 0: query += " T.`%s` = S.`%s` "%(targetColumn, sourceColumn) else: query += "and\n T.`%s` = S.`%s` "%(targetColumn, sourceColumn) query += "\n" query += "when matched " if sourceIsIncremental == False: # If the source is not incremental, it means that we need to check all the values in # all columns as we dont know if the row have changed or not query += "and (\n" firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): foundPKcolumn = False for column in PKColumns.split(","): if row['targetName'] == column: foundPKcolumn = True if foundPKcolumn == False: if firstIteration == True: query += " " firstIteration = False else: query += " or " query += "T.`%s` != S.`%s` "%(row['targetName'], row['sourceName']) query += "or ( T.`%s` is null and S.`%s` is not null ) "%(row['targetName'], row['sourceName']) query += "or ( T.`%s` is not null and S.`%s` is null ) "%(row['targetName'], row['sourceName']) query += "\n" if softDelete == True and datalakeIUDExists == True: # If a row is deleted and then inserted again with the same values in all fields, this will still trigger an update query += " or T.datalake_iud = 'D' \n" query += ") \n" if oracleFlashbackSource == True: query += "and ( S.datalake_flashback_operation is null or S.datalake_flashback_operation != 'D' ) \n" if mssqlChangeTrackingSource == True: query += "and ( S.datalake_mssql_changetrack_operation is null or S.datalake_mssql_changetrack_operation != 'D' ) \n" query += "then update set " firstIteration = True nonPKcolumnFound = False for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): foundPKcolumn = False for column in PKColumns.split(","): if row['targetName'] == column: foundPKcolumn = True if foundPKcolumn == False: if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " `%s` = S.`%s`"%(row['targetName'], row['sourceName']) nonPKcolumnFound = True if nonPKcolumnFound == False: # This will happen if there are only columns that is part of the PK in the table. Impossible to merge it with full history logging.error("This table only have columns that is part of the PrimaryKey. Merge operations cant be used") self.import_config.remove_temporary_files() sys.exit(1) if datalakeIUDExists == True: query += ", \n `datalake_iud` = 'U'" if datalakeUpdateExists == True: query += ", \n `datalake_update` = '%s'"%(mergeTime) if datalakeSourceExists == True and datalakeSource != None: query += ", \n `datalake_source` = '%s'"%(datalakeSource) query += " \n" if oracleFlashbackSource == True: query += "when matched and S.datalake_flashback_operation = 'D' then delete \n" if mssqlChangeTrackingSource == True: query += "when matched and S.datalake_mssql_changetrack_operation = 'D' then delete \n" query += "when not matched " if oracleFlashbackSource == True: query += "and ( S.datalake_flashback_operation is null or S.datalake_flashback_operation != 'D' ) \n" if mssqlChangeTrackingSource == True: query += "and ( S.datalake_mssql_changetrack_operation is null or S.datalake_mssql_changetrack_operation != 'D' ) \n" query += "then insert values ( " firstIteration = True for index, row in targetColumns.iterrows(): ColumnName = row['name'] sourceColumnName = columnMerge.loc[columnMerge['targetName'] == ColumnName]['sourceName'].fillna('').iloc[0] if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" if sourceColumnName != "": query += " S.`%s`"%(sourceColumnName) elif ColumnName == "datalake_iud": query += " 'I'" elif ColumnName == "datalake_insert": query += " '%s'"%(mergeTime) elif ColumnName == "datalake_update": query += " '%s'"%(mergeTime) elif ColumnName == "datalake_source": query += " '%s'"%(datalakeSource) else: query += " NULL" query += " \n) \n" # print("==============================================================") # print(query) # self.import_config.remove_temporary_files() # sys.exit(1) ## query = query.replace('\n', '') self.common_operations.executeHiveQuery(query) if deleteNotUpdatedRows == True: # This is used by Oracle Flashback and MSSQL Change Tracking imports when doing a reinitialization of the data and we need to # remove the rows that was not updated query = "delete from `%s`.`%s` where datalake_update != '%s' "%(targetDB, targetTable, mergeTime) self.common_operations.executeHiveQuery(query) # If a row was previously deleted and now inserted again and we are using Soft Delete, # then the information in the datalake_iud, datalake_insert and datalake_delete is wrong. if softDelete == True: query = "update `%s`.`%s` set "%(targetDB, targetTable) query += " datalake_iud = 'I', " query += " datalake_insert = datalake_update, " query += " datalake_delete = null " query += "where " query += " datalake_iud = 'U' and " query += " datalake_delete is not null" # print("==============================================================") # print(query) # query = query.replace('\n', '') self.common_operations.executeHiveQuery(query) # Statement to select all rows that was changed in the Target table and insert them to the History table if createHistoryAudit == True and historyDB != None and historyTable != None and oracleFlashbackSource == False: query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable) query += "( " firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " `%s`"%(row['targetName']) if datalakeSourceExists == True: query += ",\n `datalake_source`" query += ",\n `datalake_iud`" query += ",\n `datalake_timestamp`" query += "\n) \n" query += "select " firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " `%s`"%(row['targetName']) if datalakeSourceExists == True: query += ",\n '%s'"%(datalakeSource) query += ",\n `datalake_iud`" query += ",\n `datalake_update`" query += "\nfrom `%s`.`%s` \n"%(targetDB, targetTable) query += "where datalake_update = '%s'"%(mergeTime) self.common_operations.executeHiveQuery(query) if sourceIsIncremental == False and targetDeleteDB != None and targetDeleteTable != None: # Start with truncating the History Delete table as we need to rebuild this one from scratch to determine what rows are deleted query = "truncate table `%s`.`%s`"%(targetDeleteDB, targetDeleteTable) self.common_operations.executeHiveQuery(query) # Insert all rows (PK columns only) that exists in the Target Table but dont exists in the Import table (the ones that was deleted) query = "insert into table `%s`.`%s` \n(`"%(targetDeleteDB, targetDeleteTable) query += "`, `".join(PKColumns.split(",")) query += "`) \nselect T.`" query += "`, T.`".join(PKColumns.split(",")) query += "` \nfrom `%s`.`%s` as T \n"%(targetDB, targetTable) query += "left outer join `%s`.`%s` as S \n"%(sourceDB, sourceTable) query += "on \n" for i, targetColumn in enumerate(PKColumns.split(",")): sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName'] if i == 0: query += " T.`%s` = S.`%s` "%(targetColumn, sourceColumn) else: query += "and\n T.`%s` = S.`%s` "%(targetColumn, sourceColumn) query += "\nwhere \n" for i, targetColumn in enumerate(PKColumns.split(",")): sourceColumn = columnMerge.loc[columnMerge['Exist'] == 'both'].loc[columnMerge['targetName'] == targetColumn].iloc[0]['sourceName'] if i == 0: query += " S.`%s` is null "%(sourceColumn) else: query += "and\n S.`%s` is null "%(sourceColumn) self.common_operations.executeHiveQuery(query) if oracleFlashbackSource == True and createHistoryAudit == True: # If it is a history merge with Oracle Flashback, we need to handle the deletes separatly query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable) query += "( " firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " `%s`"%(row['targetName']) if datalakeSourceExists == True: query += ",\n `datalake_source`" query += ",\n `datalake_iud`" query += ",\n `datalake_timestamp`" query += "\n) \n" query += "select " firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " `%s`"%(row['targetName']) if datalakeSourceExists == True: query += ",\n '%s'"%(datalakeSource) query += ",\n `datalake_flashback_operation` as `datalake_iud`" query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime) query += "\nfrom `%s`.`%s`"%(sourceDB, oracleFlashbackImportTable) self.common_operations.executeHiveQuery(query) if mssqlChangeTrackingSource == True and createHistoryAudit == True: # If it is a history merge with MSSQL Change Traging, we need to handle the deletes separatly query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable) query += "( " firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " `%s`"%(row['targetName']) if datalakeSourceExists == True: query += ",\n `datalake_source`" query += ",\n `datalake_iud`" query += ",\n `datalake_timestamp`" query += "\n) \n" query += "select " firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " `%s`"%(row['targetName']) if datalakeSourceExists == True: query += ",\n '%s'"%(datalakeSource) query += ",\n `datalake_mssql_changetrack_operation` as `datalake_iud`" query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime) query += "\nfrom `%s`.`%s`"%(sourceDB, mssqlChangeTrackingImportTable) self.common_operations.executeHiveQuery(query) # Insert the deleted rows into the History table. Without this, it's impossible to see what values the column had before the delete if sourceIsIncremental == False and createHistoryAudit == True and historyDB != None and historyTable != None and targetDeleteDB != None and targetDeleteTable != None: query = "insert into table `%s`.`%s` \n"%(historyDB, historyTable) query += "( " firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " `%s`"%(row['targetName']) if datalakeSourceExists == True: query += ",\n `datalake_source`" query += ",\n `datalake_iud`" query += ",\n `datalake_timestamp`" query += "\n) \n" query += "select " firstIteration = True for index, row in columnMerge.loc[columnMerge['Exist'] == 'both'].iterrows(): if firstIteration == True: firstIteration = False query += " \n" else: query += ", \n" query += " T.`%s`"%(row['targetName']) if datalakeSourceExists == True: query += ",\n '%s' as `datalake_source`"%(datalakeSource) query += ",\n 'D' as `datalake_iud`" query += ",\n timestamp('%s') as `datalake_timestamp`"%(mergeTime) query += "\nfrom `%s`.`%s` as D \n"%(targetDeleteDB, targetDeleteTable) query += "left join `%s`.`%s` as T \n"%(targetDB, targetTable) query += "on \n" for i, column in enumerate(PKColumns.split(",")): if i == 0: query += " T.`%s` = D.`%s` "%(column, column) else: query += "and\n T.`%s` = D.`%s` "%(column, column) # print("==============================================================") # print(query) # query = query.replace('\n', '') self.common_operations.executeHiveQuery(query) if sourceIsIncremental == False and targetDeleteDB != None and targetDeleteTable != None: # Use the merge command to delete found rows between the Delete Table and the History Table query = "merge into `%s`.`%s` as T \n"%(targetDB, targetTable) query += "using `%s`.`%s` as D \n"%(targetDeleteDB, targetDeleteTable) query += "on \n" for i, column in enumerate(PKColumns.split(",")): if i == 0: query += " T.`%s` = D.`%s` "%(column, column) else: query += "and\n T.`%s` = D.`%s` "%(column, column) if softDelete == True: query += "and\n T.`datalake_delete` != 'D' " query += "\n" if softDelete == False: query += "when matched then delete \n" else: query += "when matched then update set \n" query += "datalake_iud = 'D', \n" query += "datalake_update = timestamp('%s'), \n"%(mergeTime) query += "datalake_delete = timestamp('%s') "%(mergeTime) # print("==============================================================") # print(query) # query = query.replace('\n', '') self.common_operations.executeHiveQuery(query) logging.debug("Executing etl_operations.mergeHiveTables() - Finished")
[ "logging.debug", "DBImportOperation.common_operations.operation", "datetime.datetime.now", "sys.exit", "logging.error", "DBImportConfig.import_config.config" ]
[((1331, 1383), 'logging.debug', 'logging.debug', (['"""Executing etl_operations.__init__()"""'], {}), "('Executing etl_operations.__init__()')\n", (1344, 1383), False, 'import logging\n'), ((1535, 1583), 'DBImportOperation.common_operations.operation', 'common_operations.operation', (['Hive_DB', 'Hive_Table'], {}), '(Hive_DB, Hive_Table)\n', (1562, 1583), False, 'from DBImportOperation import common_operations\n'), ((1607, 1648), 'DBImportConfig.import_config.config', 'import_config.config', (['Hive_DB', 'Hive_Table'], {}), '(Hive_DB, Hive_Table)\n', (1627, 1648), False, 'from DBImportConfig import import_config\n'), ((1989, 2052), 'logging.debug', 'logging.debug', (['"""Executing etl_operations.__init__() - Finished"""'], {}), "('Executing etl_operations.__init__() - Finished')\n", (2002, 2052), False, 'import logging\n'), ((2710, 2767), 'logging.debug', 'logging.debug', (['"""Executing etl_operations.connectToHive()"""'], {}), "('Executing etl_operations.connectToHive()')\n", (2723, 2767), False, 'import logging\n'), ((2930, 2998), 'logging.debug', 'logging.debug', (['"""Executing etl_operations.connectToHive() - Finished"""'], {}), "('Executing etl_operations.connectToHive() - Finished')\n", (2943, 2998), False, 'import logging\n'), ((3634, 3693), 'logging.debug', 'logging.debug', (['"""Executing etl_operations.mergeHiveTables()"""'], {}), "('Executing etl_operations.mergeHiveTables()')\n", (3647, 3693), False, 'import logging\n'), ((19630, 19700), 'logging.debug', 'logging.debug', (['"""Executing etl_operations.mergeHiveTables() - Finished"""'], {}), "('Executing etl_operations.mergeHiveTables() - Finished')\n", (19643, 19700), False, 'import logging\n'), ((7952, 8069), 'logging.error', 'logging.error', (['"""This table only have columns that is part of the PrimaryKey. Merge operations cant be used"""'], {}), "(\n 'This table only have columns that is part of the PrimaryKey. Merge operations cant be used'\n )\n", (7965, 8069), False, 'import logging\n'), ((8110, 8121), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8118, 8121), False, 'import sys\n'), ((2586, 2597), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2594, 2597), False, 'import sys\n'), ((2847, 2864), 'logging.error', 'logging.error', (['ex'], {}), '(ex)\n', (2860, 2864), False, 'import logging\n'), ((2915, 2926), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2923, 2926), False, 'import sys\n'), ((3265, 3279), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3277, 3279), False, 'from datetime import datetime, timedelta\n'), ((5449, 5571), 'logging.error', 'logging.error', (['("ERROR: Problem determine column name in source table for primary key column \'%s\'"\n % targetColumn)'], {}), '(\n "ERROR: Problem determine column name in source table for primary key column \'%s\'"\n % targetColumn)\n', (5462, 5571), False, 'import logging\n'), ((5614, 5625), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5622, 5625), False, 'import sys\n'), ((5256, 5361), 'logging.error', 'logging.error', (['"""Primary Key cant be found in the source target table. Please check PK override"""'], {}), "(\n 'Primary Key cant be found in the source target table. Please check PK override'\n )\n", (5269, 5361), False, 'import logging\n'), ((5404, 5415), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5412, 5415), False, 'import sys\n')]
import numpy as np import os from scanorama import * from scipy.sparse import vstack from process import load_names from experiments import * from utils import * NAMESPACE = 'zeisel' METHOD = 'svd' DIMRED = 100 data_names = [ 'data/mouse_brain/zeisel/amygdala', 'data/mouse_brain/zeisel/cerebellum', 'data/mouse_brain/zeisel/cortex1', 'data/mouse_brain/zeisel/cortex2', 'data/mouse_brain/zeisel/cortex3', 'data/mouse_brain/zeisel/enteric', 'data/mouse_brain/zeisel/hippocampus', 'data/mouse_brain/zeisel/hypothalamus', 'data/mouse_brain/zeisel/medulla', 'data/mouse_brain/zeisel/midbraindorsal', 'data/mouse_brain/zeisel/midbrainventral', 'data/mouse_brain/zeisel/olfactory', 'data/mouse_brain/zeisel/pons', 'data/mouse_brain/zeisel/spinalcord', 'data/mouse_brain/zeisel/striatumdorsal', 'data/mouse_brain/zeisel/striatumventral', 'data/mouse_brain/zeisel/sympathetic', 'data/mouse_brain/zeisel/thalamus', ] if __name__ == '__main__': datasets, genes_list, n_cells = load_names(data_names, norm=False) datasets, genes = merge_datasets(datasets, genes_list) X = vstack(datasets) if not os.path.isfile('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE)): log('Dimension reduction with {}...'.format(METHOD)) X_dimred = reduce_dimensionality( normalize(X), method=METHOD, dimred=DIMRED ) log('Dimensionality = {}'.format(X_dimred.shape[1])) np.savetxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE), X_dimred) else: X_dimred = np.loadtxt('data/dimred/{}_{}.txt'.format(METHOD, NAMESPACE)) from ample import gs, uniform, srs #samp_idx = gs(X_dimred, 20000, replace=False) #samp_idx = uniform(X_dimred, 20000, replace=False) samp_idx = srs(X_dimred, 20000, replace=False) #from anndata import AnnData #import scanpy.api as sc #adata = AnnData(X=X_dimred[samp_idx, :]) #sc.pp.neighbors(adata, use_rep='X') #sc.tl.louvain(adata, resolution=1.5, key_added='louvain') # #louv_labels = np.array(adata.obs['louvain'].tolist()) #le = LabelEncoder().fit(louv_labels) #cell_labels = le.transform(louv_labels) # #np.savetxt('data/cell_labels/zeisel_louvain.txt', cell_labels) labels = ( open('data/cell_labels/zeisel_cluster.txt') .read().rstrip().split('\n') ) le = LabelEncoder().fit(labels) cell_labels = le.transform(labels) experiments( X_dimred, NAMESPACE, n_seeds=2, cell_labels=cell_labels, kmeans_ami=True, louvain_ami=True, rare=True, rare_label=le.transform(['Ependymal'])[0], ) exit() embedding = visualize( [ X_dimred[samp_idx, :] ], cell_labels[samp_idx], NAMESPACE + '_srs{}'.format(len(samp_idx)), [ str(ct) for ct in sorted(set(cell_labels)) ], perplexity=100, n_iter=500, image_suffix='.png', viz_cluster=True ) exit() cell_labels = ( open('data/cell_labels/zeisel_louvain.txt') .read().rstrip().split('\n') ) le = LabelEncoder().fit(cell_labels) cell_labels = le.transform(cell_labels) astro = set([ 32, 38, 40, ]) oligo = set([ 2, 5, 12, 20, 23, 33, 37, ]) focus = set([ 15, 36, 41 ]) labels = [] aob_labels = [] for cl in cell_labels: if cl in focus: labels.append(0) aob_labels.append('both') elif cl in astro or cl in oligo: labels.append(1) if cl in astro: aob_labels.append('astro') else: aob_labels.append('oligo') else: labels.append(2) aob_labels.append('none') labels = np.array(labels) aob_labels = np.array(aob_labels) X = np.log1p(normalize(X[samp_idx, :])) from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin #astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'astro', NAMESPACE) #astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'oligo', NAMESPACE) #astro_oligo_joint(X, genes, 'GJA1', 'MBP', aob_labels, 'both', NAMESPACE) #astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'astro', NAMESPACE) #astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'oligo', NAMESPACE) #astro_oligo_joint(X, genes, 'GJA1', 'PLP1', aob_labels, 'both', NAMESPACE) astro_oligo_violin(X, genes, 'GJA1', aob_labels, NAMESPACE) astro_oligo_violin(X, genes, 'MBP', aob_labels, NAMESPACE) astro_oligo_violin(X, genes, 'PLP1', aob_labels, NAMESPACE) viz_genes = [ #'GJA1', 'MBP', 'PLP1', 'TRF', #'CST3', 'CPE', 'FTH1', 'APOE', 'MT1', 'NDRG2', 'TSPAN7', #'PLP1', 'MAL', 'PTGDS', 'CLDN11', 'APOD', 'QDPR', 'MAG', 'ERMN', #'PLP1', 'MAL', 'PTGDS', 'MAG', 'CLDN11', 'APOD', 'FTH1', #'ERMN', 'MBP', 'ENPP2', 'QDPR', 'MOBP', 'TRF', #'CST3', 'SPARCL1', 'PTN', 'CD81', 'APOE', 'ATP1A2', 'ITM2B' ] cell_labels = ( open('data/cell_labels/zeisel_cluster.txt') .read().rstrip().split('\n') ) le = LabelEncoder().fit(cell_labels) cell_labels = le.transform(cell_labels) embedding = visualize( [ X_dimred[samp_idx, :] ], cell_labels[samp_idx], NAMESPACE + '_astro{}'.format(len(samp_idx)), [ str(ct) for ct in sorted(set(cell_labels)) ], gene_names=viz_genes, gene_expr=X, genes=genes, perplexity=100, n_iter=500, image_suffix='.png', viz_cluster=True ) #visualize_dropout(X, embedding, image_suffix='.png', # viz_prefix=NAMESPACE + '_dropout') from differential_entropies import differential_entropies differential_entropies(X_dimred, labels)
[ "scipy.sparse.vstack", "ample.srs", "process.load_names", "numpy.array", "differential_entropies.differential_entropies", "mouse_brain_astrocyte.astro_oligo_violin" ]
[((1047, 1081), 'process.load_names', 'load_names', (['data_names'], {'norm': '(False)'}), '(data_names, norm=False)\n', (1057, 1081), False, 'from process import load_names\n'), ((1149, 1165), 'scipy.sparse.vstack', 'vstack', (['datasets'], {}), '(datasets)\n', (1155, 1165), False, 'from scipy.sparse import vstack\n'), ((1811, 1846), 'ample.srs', 'srs', (['X_dimred', '(20000)'], {'replace': '(False)'}), '(X_dimred, 20000, replace=False)\n', (1814, 1846), False, 'from ample import gs, uniform, srs\n'), ((3771, 3787), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3779, 3787), True, 'import numpy as np\n'), ((3805, 3825), 'numpy.array', 'np.array', (['aob_labels'], {}), '(aob_labels)\n', (3813, 3825), True, 'import numpy as np\n'), ((4438, 4497), 'mouse_brain_astrocyte.astro_oligo_violin', 'astro_oligo_violin', (['X', 'genes', '"""GJA1"""', 'aob_labels', 'NAMESPACE'], {}), "(X, genes, 'GJA1', aob_labels, NAMESPACE)\n", (4456, 4497), False, 'from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin\n'), ((4502, 4560), 'mouse_brain_astrocyte.astro_oligo_violin', 'astro_oligo_violin', (['X', 'genes', '"""MBP"""', 'aob_labels', 'NAMESPACE'], {}), "(X, genes, 'MBP', aob_labels, NAMESPACE)\n", (4520, 4560), False, 'from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin\n'), ((4565, 4624), 'mouse_brain_astrocyte.astro_oligo_violin', 'astro_oligo_violin', (['X', 'genes', '"""PLP1"""', 'aob_labels', 'NAMESPACE'], {}), "(X, genes, 'PLP1', aob_labels, NAMESPACE)\n", (4583, 4624), False, 'from mouse_brain_astrocyte import astro_oligo_joint, astro_oligo_violin\n'), ((5765, 5805), 'differential_entropies.differential_entropies', 'differential_entropies', (['X_dimred', 'labels'], {}), '(X_dimred, labels)\n', (5787, 5805), False, 'from differential_entropies import differential_entropies\n')]
import fcntl import os import socket import struct import warnings import subprocess import logging import base64 logger = logging.getLogger(__name__) # Dummy socket used for fcntl functions _socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) class AddrMeta(type): @property def maxvalue(cls): return (0x1 << (cls.bytelen * 8)) - 1 class Addr(metaclass=AddrMeta): bytelen = 0 def __init__(self, addr): self._str = None self._int = None self._bytes = None if isinstance(addr, type(self)): self._str = addr._str self._bytes = addr._bytes self._int = addr._int elif isinstance(addr, str): self._str = addr elif isinstance(addr, int): self._int = addr elif isinstance(addr, bytes): if len(addr) == self.bytelen: self._bytes = addr else: self._str = addr.decode('utf-8') else: raise ValueError('Cannot create {!s} from {!s}'.format(type(self), type(addr))) # Operations def __and__(self, other): return type(self)(int(self) & int(other)) def __or__(self, other): return type(self)(int(self) | int(other)) def __xor__(self, other): return type(self)(int(self) ^ int(other)) def __invert__(self): return type(self)(int(self) ^ self.maxvalue) # Conversions def __str__(self): if self._str is None: self._str = self.bytes_to_str(bytes(self)) return self._str def __int__(self): return int.from_bytes(bytes(self), byteorder='big') def __bytes__(self): if self._bytes is None: if self._str is not None: self._bytes = self.str_to_bytes(self._str) elif self._int is not None: self._bytes = self._int.to_bytes(self.bytelen, byteorder='big') return self._bytes def __repr__(self): return '<{0}.{1} {2!s}>'.format(__name__, type(self).__name__, self) class Ip(Addr): bytelen = 4 @staticmethod def bytes_to_str(b): return socket.inet_ntoa(b) @staticmethod def str_to_bytes(s): return socket.inet_aton(s) def slash(self): x, i = int(self), 0 while x & 0x1 == 0: x >>= 1 i += 1 return 32 - i class Mac(Addr): bytelen = 6 @staticmethod def bytes_to_str(b): return ':'.join('%02x' % byte for byte in b) @staticmethod def str_to_bytes(s): return bytes.fromhex(s.replace(':', '')) def _ifctl(ifname, code): if isinstance(ifname, str): ifname = ifname.encode('utf-8') return fcntl.ioctl( _socket.fileno(), code, struct.pack('256s', ifname[:15]) ) def ifaddr(ifname): return Ip(_ifctl(ifname, 0x8915)[20:24]) # SIOCGIFADDR def ifmask(ifname): return Ip(_ifctl(ifname, 0x891b)[20:24]) # SIOCGIFNETMASK def ifhwaddr(ifname): return Mac(_ifctl(ifname, 0x8927)[18:24]) # SIOCGIFHWADDR def cidr(ip, mask): return "{!s}/{:d}".format(ip, mask.slash()) def parsecidr(ipnet): ipstr, maskstr = ipnet.split('/') ip = Ip(ipstr) mask = Ip(0xffffffff ^ ((0x00000001 << (32-int(maskstr)))-1)) return ip, mask def ifcidr(ifname): return cidr(ifaddr(ifname), ifmask(ifname)) class OpenVpnError(Exception): def __init__(self, instance, msg): self.instance = instance super().__init__(msg) class OpenVpn: exe = 'openvpn' initmsg = b'Initialization Sequence Completed' def __init__(self, **kwargs): if 'daemonize' in kwargs: warnings.warn("This class will not be able to close a daemonized tunnel", warnings.Warning) self.options = kwargs self.initialized = False self._process = None def args(self): result = [] for name, value in self.options.items(): result.append('--{!s}'.format(name)) # None is special to indicate the option have no value if value is not None: result.append(str(value)) return result def check(self): if self._process is not None: self._process.poll() code = self._process.returncode if code is not None and code != 0: raise OpenVpnError(self, "`openvpn {:s}` exited with error code: {:d}".format(" ".join(self.args()), code)) def running(self): return self._process is not None and self._process.poll() is None @staticmethod def maketun(): os.makedirs('/dev/net', exist_ok=True) subprocess.run(['mknod', '/dev/net/tun', 'c', '10', '200'], check=True) def connect(self): if not os.path.exists('/dev/net/tun'): self.maketun() if not self.running(): self.initialized = False self._process = subprocess.Popen( [self.exe] + self.args(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) self.check() def disconnect(self): if self.running(): self._process.terminate() os.waitpid(self._process.pid, 0) def waitforinit(self): if not self.initialized: for line in self._process.stdout: logger.debug("openvpn: %s", line.decode('utf-8').strip()) if self.initmsg in line: self.initialized = True break else: self.check() raise OpenVpnError(self, "OpenVPN exited with code 0, but did not display init msg") def __enter__(self): self.connect() return self def __exit__(self, *args, **kwargs): self.disconnect()
[ "logging.getLogger", "os.path.exists", "socket.socket", "os.makedirs", "os.waitpid", "subprocess.run", "struct.pack", "socket.inet_aton", "warnings.warn", "socket.inet_ntoa" ]
[((124, 151), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (141, 151), False, 'import logging\n'), ((203, 251), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (216, 251), False, 'import socket\n'), ((2152, 2171), 'socket.inet_ntoa', 'socket.inet_ntoa', (['b'], {}), '(b)\n', (2168, 2171), False, 'import socket\n'), ((2231, 2250), 'socket.inet_aton', 'socket.inet_aton', (['s'], {}), '(s)\n', (2247, 2250), False, 'import socket\n'), ((2786, 2818), 'struct.pack', 'struct.pack', (['"""256s"""', 'ifname[:15]'], {}), "('256s', ifname[:15])\n", (2797, 2818), False, 'import struct\n'), ((4621, 4659), 'os.makedirs', 'os.makedirs', (['"""/dev/net"""'], {'exist_ok': '(True)'}), "('/dev/net', exist_ok=True)\n", (4632, 4659), False, 'import os\n'), ((4668, 4739), 'subprocess.run', 'subprocess.run', (["['mknod', '/dev/net/tun', 'c', '10', '200']"], {'check': '(True)'}), "(['mknod', '/dev/net/tun', 'c', '10', '200'], check=True)\n", (4682, 4739), False, 'import subprocess\n'), ((3679, 3774), 'warnings.warn', 'warnings.warn', (['"""This class will not be able to close a daemonized tunnel"""', 'warnings.Warning'], {}), "('This class will not be able to close a daemonized tunnel',\n warnings.Warning)\n", (3692, 3774), False, 'import warnings\n'), ((4779, 4809), 'os.path.exists', 'os.path.exists', (['"""/dev/net/tun"""'], {}), "('/dev/net/tun')\n", (4793, 4809), False, 'import os\n'), ((5256, 5288), 'os.waitpid', 'os.waitpid', (['self._process.pid', '(0)'], {}), '(self._process.pid, 0)\n', (5266, 5288), False, 'import os\n')]
from collections import defaultdict def return_default(): return 0 REAL=open("18.txt").readlines() SAMPLE=open("18.sample").readlines() OPEN="." TREE="|" LUMBERYARD="#" import copy def safe_grid_get(grid, x, y, missing=None): if x < 0 or y < 0: return missing if y >= len(grid): return missing if x >= len(grid[y]): return missing return grid[y][x] def parse_lines(lines): return list(map(lambda l: list(l.strip()), lines)) def next_sq(grid, x, y): around = defaultdict(return_default) for dy in [-1, 0, 1]: for dx in [-1, 0, 1]: if dx == 0 and dy == 0: continue a = safe_grid_get(grid, x + dx, y + dy) if a is not None: around[a] += 1 here = grid[y][x] if here == OPEN: if around[TREE] >= 3: return TREE else: return OPEN elif here == TREE: if around[LUMBERYARD] >= 3: return LUMBERYARD else: return TREE else: assert here == LUMBERYARD if around[LUMBERYARD] >= 1 and around[TREE] >= 1: return LUMBERYARD else: return OPEN def resource_value(board): lands = defaultdict(return_default) for y in range(len(board)): for x in range(len(board[0])): lands[board[y][x]] += 1 return lands[TREE] * lands[LUMBERYARD] def solve(lines, minutes): cache = {} old_board = parse_lines(lines) for minute in range(minutes): board = copy.deepcopy(old_board) for y in range(len(board)): for x in range(len(board[0])): board[y][x] = next_sq(old_board, x, y) old_board = board key = "\n".join(map(lambda r: "".join(r), board)) # print(key) if key in cache: print(minute, cache[key]) else: cache[key] = (minute, resource_value(board)) return resource_value(board) sample = solve(SAMPLE, 10) assert sample == 1147 print("*** SAMPLE PASSED ***") # print(solve(REAL, 10000)) loop = """598 570 191420 599 571 189168 600 572 185082 601 573 185227 602 574 185320 603 575 185790 604 576 186120 605 577 189956 606 578 190068 607 579 191080 608 580 190405 # too low 609 581 193795 610 582 190950 611 583 193569 612 584 194350 613 585 196308 614 586 195364 615 587 197911 616 588 199755 617 589 201144 618 590 201607 619 591 203580 620 592 201260 621 593 201950 622 594 200675 # TOO HIGH 623 595 202208 624 596 200151 625 597 198948 626 570 191420 627 571 189168 628 572 185082 629 573 185227 630 574 185320 631 575 185790 632 576 186120 633 577 189956 634 578 190068 635 579 191080 636 580 190405 637 581 193795""" num = 1000000000 nmod = 28 for num in range(570, 638): print(num, (num - 570) % nmod + 570) num = 1000000000 - 1 print(num, (num - 570) % nmod + 570 + nmod)
[ "collections.defaultdict", "copy.deepcopy" ]
[((519, 546), 'collections.defaultdict', 'defaultdict', (['return_default'], {}), '(return_default)\n', (530, 546), False, 'from collections import defaultdict\n'), ((1250, 1277), 'collections.defaultdict', 'defaultdict', (['return_default'], {}), '(return_default)\n', (1261, 1277), False, 'from collections import defaultdict\n'), ((1558, 1582), 'copy.deepcopy', 'copy.deepcopy', (['old_board'], {}), '(old_board)\n', (1571, 1582), False, 'import copy\n')]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('activity_log', '0003_activitylog_extra_data'), ] operations = [ migrations.AlterField( model_name='activitylog', name='datetime', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='datetime', db_index=True), ), migrations.AlterField( model_name='activitylog', name='ip_address', field=models.GenericIPAddressField(blank=True, null=True, verbose_name='user IP', db_index=True), ), migrations.AlterField( model_name='activitylog', name='request_url', field=models.CharField(db_index=True, verbose_name='url', max_length=256), ), ]
[ "django.db.models.DateTimeField", "django.db.models.GenericIPAddressField", "django.db.models.CharField" ]
[((397, 497), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""datetime"""', 'db_index': '(True)'}), "(default=django.utils.timezone.now, verbose_name=\n 'datetime', db_index=True)\n", (417, 497), False, 'from django.db import models, migrations\n'), ((623, 717), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""user IP"""', 'db_index': '(True)'}), "(blank=True, null=True, verbose_name='user IP',\n db_index=True)\n", (651, 717), False, 'from django.db import models, migrations\n'), ((845, 912), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'verbose_name': '"""url"""', 'max_length': '(256)'}), "(db_index=True, verbose_name='url', max_length=256)\n", (861, 912), False, 'from django.db import models, migrations\n')]
import numpy import pytest import os from shutil import rmtree from numpy.testing import assert_allclose import scipy.stats import scipy.integrate import scipy.special from fgivenx.mass import PMF, compute_pmf def gaussian_pmf(y, mu=0, sigma=1): return scipy.special.erfc(numpy.abs(y-mu)/numpy.sqrt(2)/sigma) def test_gaussian(): numpy.random.seed(0) nsamp = 5000 samples = numpy.random.randn(nsamp) y = numpy.random.uniform(-3, 3, 10) m = PMF(samples, y) m_ = gaussian_pmf(y) assert_allclose(m, m_, rtol=3e-1) def test_PMF(): # Compute samples numpy.random.seed(0) nsamp = 100 samples = numpy.concatenate((-5+numpy.random.randn(nsamp//2), 5+numpy.random.randn(nsamp//2))) # Compute PMF y = numpy.random.uniform(-10, 10, 10) m = PMF(samples, y) # Compute PMF via monte carlo N = 100000 kernel = scipy.stats.gaussian_kde(samples) s = kernel.resample(N)[0] m_ = [sum(kernel(s) <= kernel(y_i))/float(N) for y_i in y] assert_allclose(m, m_, atol=3*N**-0.5) # Compute PMF via quadrature m_ = [scipy.integrate.quad(lambda x: kernel(x)*(kernel(x) <= kernel(y_i)), -numpy.inf, numpy.inf, limit=500)[0] for y_i in y] assert_allclose(m, m_, atol=1e-4) assert_allclose([0, 0], PMF(samples, [-1e3, 1e3])) samples = [0, 0] m = PMF(samples, y) assert_allclose(m, numpy.zeros_like(y)) def test_compute_pmf(): with pytest.raises(TypeError): compute_pmf(None, None, wrong_argument=None) cache = '.test_cache/test' numpy.random.seed(0) nsamp = 5000 a, b, e, f = 0, 1, 0, 1 m = numpy.random.normal(a, b, nsamp) c = numpy.random.normal(e, f, nsamp) nx = 100 x = numpy.linspace(-1, 1, nx) fsamps = (numpy.outer(x, m) + c) ny = 100 y = numpy.linspace(-3, 3, ny) assert(not os.path.isfile(cache + '_masses.pkl')) m = compute_pmf(fsamps, y, cache=cache) assert(os.path.isfile(cache + '_masses.pkl')) m_ = [gaussian_pmf(y, a*xi+e, numpy.sqrt(b**2*xi**2+f**2)) for xi in x] assert_allclose(m.transpose(), m_, atol=3e-1) m = compute_pmf(fsamps, y, cache=cache) assert_allclose(m.transpose(), m_, atol=3e-1) rmtree('.test_cache')
[ "numpy.random.normal", "numpy.abs", "fgivenx.mass.compute_pmf", "fgivenx.mass.PMF", "numpy.sqrt", "numpy.testing.assert_allclose", "numpy.zeros_like", "shutil.rmtree", "os.path.isfile", "numpy.linspace", "numpy.outer", "pytest.raises", "numpy.random.seed", "numpy.random.uniform", "numpy.random.randn" ]
[((342, 362), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (359, 362), False, 'import numpy\n'), ((394, 419), 'numpy.random.randn', 'numpy.random.randn', (['nsamp'], {}), '(nsamp)\n', (412, 419), False, 'import numpy\n'), ((428, 459), 'numpy.random.uniform', 'numpy.random.uniform', (['(-3)', '(3)', '(10)'], {}), '(-3, 3, 10)\n', (448, 459), False, 'import numpy\n'), ((468, 483), 'fgivenx.mass.PMF', 'PMF', (['samples', 'y'], {}), '(samples, y)\n', (471, 483), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((513, 545), 'numpy.testing.assert_allclose', 'assert_allclose', (['m', 'm_'], {'rtol': '(0.3)'}), '(m, m_, rtol=0.3)\n', (528, 545), False, 'from numpy.testing import assert_allclose\n'), ((591, 611), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (608, 611), False, 'import numpy\n'), ((787, 820), 'numpy.random.uniform', 'numpy.random.uniform', (['(-10)', '(10)', '(10)'], {}), '(-10, 10, 10)\n', (807, 820), False, 'import numpy\n'), ((829, 844), 'fgivenx.mass.PMF', 'PMF', (['samples', 'y'], {}), '(samples, y)\n', (832, 844), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((1039, 1081), 'numpy.testing.assert_allclose', 'assert_allclose', (['m', 'm_'], {'atol': '(3 * N ** -0.5)'}), '(m, m_, atol=3 * N ** -0.5)\n', (1054, 1081), False, 'from numpy.testing import assert_allclose\n'), ((1287, 1322), 'numpy.testing.assert_allclose', 'assert_allclose', (['m', 'm_'], {'atol': '(0.0001)'}), '(m, m_, atol=0.0001)\n', (1302, 1322), False, 'from numpy.testing import assert_allclose\n'), ((1407, 1422), 'fgivenx.mass.PMF', 'PMF', (['samples', 'y'], {}), '(samples, y)\n', (1410, 1422), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((1618, 1638), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (1635, 1638), False, 'import numpy\n'), ((1692, 1724), 'numpy.random.normal', 'numpy.random.normal', (['a', 'b', 'nsamp'], {}), '(a, b, nsamp)\n', (1711, 1724), False, 'import numpy\n'), ((1733, 1765), 'numpy.random.normal', 'numpy.random.normal', (['e', 'f', 'nsamp'], {}), '(e, f, nsamp)\n', (1752, 1765), False, 'import numpy\n'), ((1787, 1812), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', 'nx'], {}), '(-1, 1, nx)\n', (1801, 1812), False, 'import numpy\n'), ((1871, 1896), 'numpy.linspace', 'numpy.linspace', (['(-3)', '(3)', 'ny'], {}), '(-3, 3, ny)\n', (1885, 1896), False, 'import numpy\n'), ((1960, 1995), 'fgivenx.mass.compute_pmf', 'compute_pmf', (['fsamps', 'y'], {'cache': 'cache'}), '(fsamps, y, cache=cache)\n', (1971, 1995), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((2007, 2044), 'os.path.isfile', 'os.path.isfile', (["(cache + '_masses.pkl')"], {}), "(cache + '_masses.pkl')\n", (2021, 2044), False, 'import os\n'), ((2182, 2217), 'fgivenx.mass.compute_pmf', 'compute_pmf', (['fsamps', 'y'], {'cache': 'cache'}), '(fsamps, y, cache=cache)\n', (2193, 2217), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((2273, 2294), 'shutil.rmtree', 'rmtree', (['""".test_cache"""'], {}), "('.test_cache')\n", (2279, 2294), False, 'from shutil import rmtree\n'), ((1350, 1381), 'fgivenx.mass.PMF', 'PMF', (['samples', '[-1000.0, 1000.0]'], {}), '(samples, [-1000.0, 1000.0])\n', (1353, 1381), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((1446, 1465), 'numpy.zeros_like', 'numpy.zeros_like', (['y'], {}), '(y)\n', (1462, 1465), False, 'import numpy\n'), ((1503, 1527), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1516, 1527), False, 'import pytest\n'), ((1537, 1581), 'fgivenx.mass.compute_pmf', 'compute_pmf', (['None', 'None'], {'wrong_argument': 'None'}), '(None, None, wrong_argument=None)\n', (1548, 1581), False, 'from fgivenx.mass import PMF, compute_pmf\n'), ((1827, 1844), 'numpy.outer', 'numpy.outer', (['x', 'm'], {}), '(x, m)\n', (1838, 1844), False, 'import numpy\n'), ((1913, 1950), 'os.path.isfile', 'os.path.isfile', (["(cache + '_masses.pkl')"], {}), "(cache + '_masses.pkl')\n", (1927, 1950), False, 'import os\n'), ((2081, 2118), 'numpy.sqrt', 'numpy.sqrt', (['(b ** 2 * xi ** 2 + f ** 2)'], {}), '(b ** 2 * xi ** 2 + f ** 2)\n', (2091, 2118), False, 'import numpy\n'), ((278, 295), 'numpy.abs', 'numpy.abs', (['(y - mu)'], {}), '(y - mu)\n', (287, 295), False, 'import numpy\n'), ((294, 307), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (304, 307), False, 'import numpy\n'), ((664, 694), 'numpy.random.randn', 'numpy.random.randn', (['(nsamp // 2)'], {}), '(nsamp // 2)\n', (682, 694), False, 'import numpy\n'), ((729, 759), 'numpy.random.randn', 'numpy.random.randn', (['(nsamp // 2)'], {}), '(nsamp // 2)\n', (747, 759), False, 'import numpy\n')]
"""Config flow for Vera.""" from __future__ import annotations from collections.abc import Mapping import logging import re from typing import Any import pyvera as pv from requests.exceptions import RequestException import voluptuous as vol from homeassistant import config_entries from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_EXCLUDE, CONF_LIGHTS, CONF_SOURCE from homeassistant.core import callback from homeassistant.helpers import entity_registry as er from .const import CONF_CONTROLLER, CONF_LEGACY_UNIQUE_ID, DOMAIN LIST_REGEX = re.compile("[^0-9]+") _LOGGER = logging.getLogger(__name__) def fix_device_id_list(data: list[Any]) -> list[int]: """Fix the id list by converting it to a supported int list.""" return str_to_int_list(list_to_str(data)) def str_to_int_list(data: str) -> list[int]: """Convert a string to an int list.""" return [int(s) for s in LIST_REGEX.split(data) if len(s) > 0] def list_to_str(data: list[Any]) -> str: """Convert an int list to a string.""" return " ".join([str(i) for i in data]) def new_options(lights: list[int], exclude: list[int]) -> dict: """Create a standard options object.""" return {CONF_LIGHTS: lights, CONF_EXCLUDE: exclude} def options_schema(options: Mapping[str, Any] = None) -> dict: """Return options schema.""" options = options or {} return { vol.Optional( CONF_LIGHTS, default=list_to_str(options.get(CONF_LIGHTS, [])), ): str, vol.Optional( CONF_EXCLUDE, default=list_to_str(options.get(CONF_EXCLUDE, [])), ): str, } def options_data(user_input: dict) -> dict: """Return options dict.""" return new_options( str_to_int_list(user_input.get(CONF_LIGHTS, "")), str_to_int_list(user_input.get(CONF_EXCLUDE, "")), ) class OptionsFlowHandler(config_entries.OptionsFlow): """Options for the component.""" def __init__(self, config_entry: ConfigEntry) -> None: """Init object.""" self.config_entry = config_entry async def async_step_init(self, user_input: dict = None): """Manage the options.""" if user_input is not None: return self.async_create_entry( title="", data=options_data(user_input), ) return self.async_show_form( step_id="init", data_schema=vol.Schema(options_schema(self.config_entry.options)), ) class VeraFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Vera config flow.""" @staticmethod @callback def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlowHandler: """Get the options flow.""" return OptionsFlowHandler(config_entry) async def async_step_user(self, user_input: dict = None): """Handle user initiated flow.""" if user_input is not None: return await self.async_step_finish( { **user_input, **options_data(user_input), **{CONF_SOURCE: config_entries.SOURCE_USER}, **{CONF_LEGACY_UNIQUE_ID: False}, } ) return self.async_show_form( step_id="user", data_schema=vol.Schema( {**{vol.Required(CONF_CONTROLLER): str}, **options_schema()} ), ) async def async_step_import(self, config: dict): """Handle a flow initialized by import.""" # If there are entities with the legacy unique_id, then this imported config # should also use the legacy unique_id for entity creation. entity_registry = er.async_get(self.hass) use_legacy_unique_id = ( len( [ entry for entry in entity_registry.entities.values() if entry.platform == DOMAIN and entry.unique_id.isdigit() ] ) > 0 ) return await self.async_step_finish( { **config, **{CONF_SOURCE: config_entries.SOURCE_IMPORT}, **{CONF_LEGACY_UNIQUE_ID: use_legacy_unique_id}, } ) async def async_step_finish(self, config: dict): """Validate and create config entry.""" base_url = config[CONF_CONTROLLER] = config[CONF_CONTROLLER].rstrip("/") controller = pv.VeraController(base_url) # Verify the controller is online and get the serial number. try: await self.hass.async_add_executor_job(controller.refresh_data) except RequestException: _LOGGER.error("Failed to connect to vera controller %s", base_url) return self.async_abort( reason="cannot_connect", description_placeholders={"base_url": base_url} ) await self.async_set_unique_id(controller.serial_number) self._abort_if_unique_id_configured(config) return self.async_create_entry(title=base_url, data=config)
[ "logging.getLogger", "voluptuous.Required", "pyvera.VeraController", "re.compile", "homeassistant.helpers.entity_registry.async_get" ]
[((586, 607), 're.compile', 're.compile', (['"""[^0-9]+"""'], {}), "('[^0-9]+')\n", (596, 607), False, 'import re\n'), ((618, 645), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (635, 645), False, 'import logging\n'), ((3752, 3775), 'homeassistant.helpers.entity_registry.async_get', 'er.async_get', (['self.hass'], {}), '(self.hass)\n', (3764, 3775), True, 'from homeassistant.helpers import entity_registry as er\n'), ((4515, 4542), 'pyvera.VeraController', 'pv.VeraController', (['base_url'], {}), '(base_url)\n', (4532, 4542), True, 'import pyvera as pv\n'), ((3385, 3414), 'voluptuous.Required', 'vol.Required', (['CONF_CONTROLLER'], {}), '(CONF_CONTROLLER)\n', (3397, 3414), True, 'import voluptuous as vol\n')]
from pprint import pprint import SCons.Builder from SCons.Script import * import json import os import copy import collections import xml.etree.ElementTree as ET from mplabx import MPLABXProperties MAKEFILE_TEXT = ''' MKDIR=mkdir CP=cp CCADMIN=CCadmin RANLIB=ranlib build: .build-post .build-pre: .build-post: .build-impl clean: .clean-post .clean-pre: .clean-post: .clean-impl clobber: .clobber-post .clobber-pre: .clobber-post: .clobber-impl all: .all-post .all-pre: .all-post: .all-impl help: .help-post .help-pre: .help-post: .help-impl include nbproject/Makefile-impl.mk include nbproject/Makefile-variables.mk ''' PROJECT_XML_TEXT = ''' <project> <type>com.microchip.mplab.nbide.embedded.makeproject</type> <configuration> <data> <name /> <sourceRootList /> <confList /> </data> </configuration> </project> ''' CONFIGURATIONS_XML_TEXT = ''' <configurationDescriptor version="65"> <logicalFolder name="root" displayName="root" projectFiles="true" /> <sourceRootList /> <projectmakefile>Makefile</projectmakefile> <confs /> </configurationDescriptor> ''' CONFIGURATION_ELEMENT_TEXT = ''' <conf type="2"> <toolsSet> <targetDevice /> <languageToolchain /> <languageToolchainVersion /> </toolsSet> <HI-TECH-COMP /> <HI-TECH-LINK /> <XC8-config-global /> </conf> ''' def nested_dict(): return collections.defaultdict(nested_dict) def merge(destination, source): for key, value in source.items(): if isinstance(value, dict): # get node or create one node = destination.setdefault(key, {}) merge(node, value) else: destination[key] = value return destination def build_mplabx_nbproject_configuration( env, name: str, properties: MPLABXProperties, additional_compiler_properties: dict[str, str] = {}, additional_linker_properties: dict[str, str] = {}, additional_xc8_properties: dict[str, str] = {}, ): defines_str = ';'.join(env['CPPDEFINES']) includes_str = ';'.join([env.Dir(path).abspath for path in env['CPPPATH']]) default_compiler_properties = { 'define-macros': f'{defines_str}', 'extra-include-directories': f'{includes_str}', } root = ET.fromstring(CONFIGURATION_ELEMENT_TEXT) root.set('name', name) root.find('./toolsSet/targetDevice').text = properties.device root.find('./toolsSet/languageToolchain').text = properties.toolchain root.find('./toolsSet/languageToolchainVersion').text = properties.toolchain_version group_properties_mapping = { 'HI-TECH-COMP': default_compiler_properties | properties.compiler_properties | additional_compiler_properties, 'HI-TECH-LINK': properties.linker_properties | additional_linker_properties, 'XC8-config-global': properties.xc8_properties | additional_xc8_properties, } for group_name, group_properties in group_properties_mapping.items(): for key, value in group_properties.items(): root.find(group_name).append(ET.Element('property', key=key, value=value)) # ET.dump(root) return env.Value(root) def _create_file_hierarchy(source_relpaths: list[str]): hierarchy = nested_dict() # Put all entries into the hierarchy, keyed from dirname to basename for source_relpath in sorted(source_relpaths): dirname, basename = os.path.split(source_relpath) hierarchy[dirname][basename] = source_relpath # Split all directory keys further while True: found_nested = False modified_hierarchy = nested_dict() for parent_key, entries in hierarchy.items(): dirname, basename = os.path.split(parent_key) if dirname: merge(modified_hierarchy[dirname][basename], entries) found_nested = True else: merge(modified_hierarchy[parent_key], entries) hierarchy = modified_hierarchy if not found_nested: break return hierarchy def _build_xml_files(project_name: str, project_dir, confs: list, source_files: list[str]): # Create the `configurations.xml` and `project.xml` file configurations_xml_root = ET.fromstring(CONFIGURATIONS_XML_TEXT) project_xml_root = ET.fromstring(PROJECT_XML_TEXT) project_xml_root.set('xmlns', 'http://www.netbeans.org/ns/project/1') project_xml_root.find('./configuration/data').set('xmlns', 'http://www.netbeans.org/ns/make-project/1') project_xml_root.find('./configuration/data/name').text = project_name # Add each configuration to the two XML files for configuration_node in confs: # Modify each configuration to make absolute paths relative to the project directory modified_node = copy.deepcopy(configuration_node.read()) for includes_element in modified_node.findall('.//property[@key="extra-include-directories"]'): includes_value = includes_element.get('value') includes_relative = ';'.join([os.path.relpath(abspath, project_dir.abspath) for abspath in includes_value.split(';')]) includes_element.set('value', includes_relative) configurations_xml_root.find('./confs').append(modified_node) # Update the `project.xml` configuration list project_conf_list_element = project_xml_root.find('./configuration/data/confList') project_conf_elem_element = ET.SubElement(project_conf_list_element, 'confElem') project_conf_name_element = ET.SubElement(project_conf_elem_element, 'name') project_conf_name_element.text = configuration_node.read().get('name') project_conf_text_element = ET.SubElement(project_conf_elem_element, 'text') project_conf_text_element.text = '2' # Generate the source root list, which will have a single root (common path for all sources) common_root_path = os.path.commonpath([os.path.abspath(path) for path in source_files]) source_root_relpath = os.path.relpath(common_root_path, project_dir.abspath) configurations_source_root_element = ET.Element('Elem') configurations_source_root_element.text = source_root_relpath configurations_xml_root.find('./sourceRootList').append(configurations_source_root_element) project_source_root_element = ET.Element('sourceRootElem') project_source_root_element.text = os.path.relpath(common_root_path, project_dir.abspath) project_xml_root.find('./configuration/data/sourceRootList').append(project_source_root_element) # Generate all logical folders and private files root_logical_folder = configurations_xml_root.find('./logicalFolder[@name="root"]') source_relpaths = [os.path.relpath(source_path, common_root_path) for source_path in source_files] source_hierarchy = _create_file_hierarchy(source_relpaths) def _walk_tree(parent_element: ET.Element, tree: dict): for key, data in tree.items(): if isinstance(data, dict): folder_element = ET.SubElement(parent_element, 'logicalFolder', name=key, displayName=key, projectFiles="true") _walk_tree(folder_element, data) elif isinstance(data, str): item_element = ET.SubElement(parent_element, 'itemPath') item_element.text = os.path.relpath(data, project_dir.abspath) _walk_tree(root_logical_folder, source_hierarchy) # Generate an item for the build Makefile ET.SubElement(root_logical_folder, 'itemPath').text = 'Makefile' return (configurations_xml_root, project_xml_root) def build_mplabx_nbproject(target, source, env): ''' target - (singleton list) - Directory node to the project folder source - (list) - XML value nodes for each project configuration ''' project_dir = target[0] nbproject_dir = project_dir.Dir('nbproject') configurations_xml_file = nbproject_dir.File('configurations.xml') project_xml_file = nbproject_dir.File('project.xml') makefile_file = project_dir.File('Makefile') # Make the directories env.Execute(Mkdir(project_dir)) env.Execute(Mkdir(nbproject_dir)) # Generate the XML files confs = source configurations_xml_root, project_xml_root = _build_xml_files( project_name=os.path.basename(str(project_dir)), project_dir=project_dir, confs=confs, source_files=env['source_files']) with open(str(configurations_xml_file), 'w') as f: ET.indent(configurations_xml_root, space=' ') ET.ElementTree(configurations_xml_root).write(f, encoding='unicode') with open(str(project_xml_file), 'w') as f: ET.indent(project_xml_root, space=' ') ET.ElementTree(project_xml_root).write(f, encoding='unicode') with open(str(makefile_file), 'w') as f: f.write(MAKEFILE_TEXT) _mplabx_nbproject_builder = SCons.Builder.Builder(action=build_mplabx_nbproject) def generate(env): env.AddMethod(build_mplabx_nbproject_configuration, 'MplabxNbprojectConfiguration') env['BUILDERS']['MplabxNbproject'] = _mplabx_nbproject_builder def exists(env): return 1
[ "xml.etree.ElementTree.indent", "os.path.split", "xml.etree.ElementTree.Element", "xml.etree.ElementTree.SubElement", "xml.etree.ElementTree.ElementTree", "collections.defaultdict", "os.path.abspath", "xml.etree.ElementTree.fromstring", "os.path.relpath" ]
[((1424, 1460), 'collections.defaultdict', 'collections.defaultdict', (['nested_dict'], {}), '(nested_dict)\n', (1447, 1460), False, 'import collections\n'), ((2310, 2351), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['CONFIGURATION_ELEMENT_TEXT'], {}), '(CONFIGURATION_ELEMENT_TEXT)\n', (2323, 2351), True, 'import xml.etree.ElementTree as ET\n'), ((4268, 4306), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['CONFIGURATIONS_XML_TEXT'], {}), '(CONFIGURATIONS_XML_TEXT)\n', (4281, 4306), True, 'import xml.etree.ElementTree as ET\n'), ((4335, 4366), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['PROJECT_XML_TEXT'], {}), '(PROJECT_XML_TEXT)\n', (4348, 4366), True, 'import xml.etree.ElementTree as ET\n'), ((6040, 6094), 'os.path.relpath', 'os.path.relpath', (['common_root_path', 'project_dir.abspath'], {}), '(common_root_path, project_dir.abspath)\n', (6055, 6094), False, 'import os\n'), ((6137, 6155), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""Elem"""'], {}), "('Elem')\n", (6147, 6155), True, 'import xml.etree.ElementTree as ET\n'), ((6353, 6381), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""sourceRootElem"""'], {}), "('sourceRootElem')\n", (6363, 6381), True, 'import xml.etree.ElementTree as ET\n'), ((6421, 6475), 'os.path.relpath', 'os.path.relpath', (['common_root_path', 'project_dir.abspath'], {}), '(common_root_path, project_dir.abspath)\n', (6436, 6475), False, 'import os\n'), ((3439, 3468), 'os.path.split', 'os.path.split', (['source_relpath'], {}), '(source_relpath)\n', (3452, 3468), False, 'import os\n'), ((5477, 5529), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['project_conf_list_element', '"""confElem"""'], {}), "(project_conf_list_element, 'confElem')\n", (5490, 5529), True, 'import xml.etree.ElementTree as ET\n'), ((5566, 5614), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['project_conf_elem_element', '"""name"""'], {}), "(project_conf_elem_element, 'name')\n", (5579, 5614), True, 'import xml.etree.ElementTree as ET\n'), ((5730, 5778), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['project_conf_elem_element', '"""text"""'], {}), "(project_conf_elem_element, 'text')\n", (5743, 5778), True, 'import xml.etree.ElementTree as ET\n'), ((6742, 6788), 'os.path.relpath', 'os.path.relpath', (['source_path', 'common_root_path'], {}), '(source_path, common_root_path)\n', (6757, 6788), False, 'import os\n'), ((7503, 7549), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root_logical_folder', '"""itemPath"""'], {}), "(root_logical_folder, 'itemPath')\n", (7516, 7549), True, 'import xml.etree.ElementTree as ET\n'), ((8524, 8570), 'xml.etree.ElementTree.indent', 'ET.indent', (['configurations_xml_root'], {'space': '""" """'}), "(configurations_xml_root, space=' ')\n", (8533, 8570), True, 'import xml.etree.ElementTree as ET\n'), ((8705, 8744), 'xml.etree.ElementTree.indent', 'ET.indent', (['project_xml_root'], {'space': '""" """'}), "(project_xml_root, space=' ')\n", (8714, 8744), True, 'import xml.etree.ElementTree as ET\n'), ((3737, 3762), 'os.path.split', 'os.path.split', (['parent_key'], {}), '(parent_key)\n', (3750, 3762), False, 'import os\n'), ((5965, 5986), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (5980, 5986), False, 'import os\n'), ((3105, 3149), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""property"""'], {'key': 'key', 'value': 'value'}), "('property', key=key, value=value)\n", (3115, 3149), True, 'import xml.etree.ElementTree as ET\n'), ((7057, 7155), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['parent_element', '"""logicalFolder"""'], {'name': 'key', 'displayName': 'key', 'projectFiles': '"""true"""'}), "(parent_element, 'logicalFolder', name=key, displayName=key,\n projectFiles='true')\n", (7070, 7155), True, 'import xml.etree.ElementTree as ET\n'), ((8579, 8618), 'xml.etree.ElementTree.ElementTree', 'ET.ElementTree', (['configurations_xml_root'], {}), '(configurations_xml_root)\n', (8593, 8618), True, 'import xml.etree.ElementTree as ET\n'), ((8753, 8785), 'xml.etree.ElementTree.ElementTree', 'ET.ElementTree', (['project_xml_root'], {}), '(project_xml_root)\n', (8767, 8785), True, 'import xml.etree.ElementTree as ET\n'), ((5075, 5120), 'os.path.relpath', 'os.path.relpath', (['abspath', 'project_dir.abspath'], {}), '(abspath, project_dir.abspath)\n', (5090, 5120), False, 'import os\n'), ((7272, 7313), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['parent_element', '"""itemPath"""'], {}), "(parent_element, 'itemPath')\n", (7285, 7313), True, 'import xml.etree.ElementTree as ET\n'), ((7350, 7392), 'os.path.relpath', 'os.path.relpath', (['data', 'project_dir.abspath'], {}), '(data, project_dir.abspath)\n', (7365, 7392), False, 'import os\n')]
#! /usr/bin/env python # Author: <NAME> # Contact: <EMAIL> # Revision: $Revision: 3915 $ # Date: $Date: 2005-10-02 03:06:42 +0200 (Sun, 02 Oct 2005) $ # Copyright: This module has been placed in the public domain. """ Tests for docutils.transforms.peps. """ from __init__ import DocutilsTestSupport from docutils.transforms.peps import TargetNotes from docutils.parsers.rst import Parser def suite(): parser = Parser() s = DocutilsTestSupport.TransformTestSuite(parser) s.generateTests(totest) return s totest = {} totest['target_notes'] = ((TargetNotes,), [ ["""\ No references or targets exist, therefore no "References" section should be generated. """, """\ <document source="test data"> <paragraph> No references or targets exist, therefore no "References" section should be generated. """], ["""\ A target exists, here's the reference_. A "References" section should be generated. .. _reference: http://www.example.org """, """\ <document source="test data"> <paragraph> A target exists, here's the \n\ <reference name="reference" refname="reference"> reference \n\ <footnote_reference auto="1" ids="id3" refname="TARGET_NOTE: id2"> . A "References" section should be generated. <target ids="reference" names="reference" refuri="http://www.example.org"> <section ids="id1"> <title> References <footnote auto="1" ids="id2" names="TARGET_NOTE:\ id2"> <paragraph> <reference refuri="http://www.example.org"> http://www.example.org """], ]) if __name__ == '__main__': import unittest unittest.main(defaultTest='suite')
[ "unittest.main", "docutils.parsers.rst.Parser", "__init__.DocutilsTestSupport.TransformTestSuite" ]
[((419, 427), 'docutils.parsers.rst.Parser', 'Parser', ([], {}), '()\n', (425, 427), False, 'from docutils.parsers.rst import Parser\n'), ((436, 482), '__init__.DocutilsTestSupport.TransformTestSuite', 'DocutilsTestSupport.TransformTestSuite', (['parser'], {}), '(parser)\n', (474, 482), False, 'from __init__ import DocutilsTestSupport\n'), ((1690, 1724), 'unittest.main', 'unittest.main', ([], {'defaultTest': '"""suite"""'}), "(defaultTest='suite')\n", (1703, 1724), False, 'import unittest\n')]
# Generated by Django 4.0.2 on 2022-03-15 22:43 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('rental_property', '0010_alter_rentalunit_status'), ] operations = [ migrations.AlterModelOptions( name='rentalunit', options={'verbose_name_plural': 'Rental Houses'}, ), ]
[ "django.db.migrations.AlterModelOptions" ]
[((240, 342), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""rentalunit"""', 'options': "{'verbose_name_plural': 'Rental Houses'}"}), "(name='rentalunit', options={\n 'verbose_name_plural': 'Rental Houses'})\n", (268, 342), False, 'from django.db import migrations\n')]
from datetime import datetime import base64 import os import re import requests import sys import urllib.parse import xmltodict import xbmc import xbmcgui import xbmcplugin import xbmcaddon import xbmcvfs __PLUGIN_ID__ = "plugin.audio.podcasts" # see https://forum.kodi.tv/showthread.php?tid=112916 _MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] GPODDER_API = { "login": "%s/api/2/auth/%s/login.json", "subscriptions": "%s/subscriptions/%s.%s" } settings = xbmcaddon.Addon(id=__PLUGIN_ID__) addon_dir = xbmcvfs.translatePath(settings.getAddonInfo('path')) class HttpStatusError(Exception): message = "" def __init__(self, msg): self.message = msg class Mediathek: _GROUPS = 10 _ENTRIES = 10 addon_handle = None def __init__(self): pass def _parse_outlines_from_opml(self, outline): if type(outline) is not list: outline = [outline] entries = [] for i, o in enumerate(outline): name = o["@title"] if "@title" in o else o["@text"] if not name and "@xmlUrl" in o: m = re.match( "^https?:\/\/([^\/]+).*\/?.*\/([^\/]+)\/?$", o["@xmlUrl"]) if m: name = "%s %s...%s" % (settings.getLocalizedString( 32053), m.groups()[0][:20], m.groups()[1][-40:]) entry = { "path": str(i), "name": name, "node": [] } if "@type" in o and o["@type"] == "rss" and "@xmlUrl" in o: entry["params"] = [{ "rss": o["@xmlUrl"] }] entries.append(entry) elif "outline" in o: entry["node"] = self._parse_outlines_from_opml( o["outline"]) entries.append(entry) return entries def _play_latest(self, url): try: title, description, image, items = self._load_rss(url) item = items[0] li = self._create_list_item(item) xbmcplugin.setResolvedUrl(self.addon_handle, True, li) except HttpStatusError as error: xbmcgui.Dialog().notification(settings.getLocalizedString(32090), error.message) def _create_list_item(self, item): li = xbmcgui.ListItem(label=item["name"]) if "description" in item: li.setProperty("label2", item["description"]) if "stream_url" in item: li.setPath(item["stream_url"]) if "type" in item: if item["type"] == "video": li.setInfo(item["type"], { "title": item["name"], "plot": item["description"] if "description" in item else "" }) elif item["type"] == "music": li.setInfo(item["type"], { "title": item["name"] }) if "icon" in item and item["icon"]: li.setArt({"icon": item["icon"]}) else: li.setArt({"icon": os.path.join( addon_dir, "resources", "assets", "icon.png")} ) if "date" in item and item["date"]: if "setDateTime" in dir(li): # available since Kodi v20 li.setDateTime(item["date"].strftime("%Y-%m-%dT%H:%M:%SZ")) else: pass if "specialsort" in item: li.setProperty("SpecialSort", item["specialsort"]) if "duration" in item and item["duration"] >= 0: li.setInfo("music", {"duration": item["duration"]}) li.setInfo("video", {"duration": item["duration"]}) return li def _add_list_item(self, entry, path): def _build_param_string(params, current=""): if params == None: return current for obj in params: for name in obj: enc_value = base64.urlsafe_b64encode( obj[name].encode("utf-8")) current += "?" if len(current) == 0 else "&" current += name + "=" + str(enc_value, "utf-8") return current if path == "/": path = "" item_path = path + "/" + entry["path"] param_string = "" if "params" in entry: param_string = _build_param_string(entry["params"], current=param_string) li = self._create_list_item(entry) if "stream_url" in entry: url = entry["stream_url"] else: url = "".join( ["plugin://", __PLUGIN_ID__, item_path, param_string]) is_folder = "node" in entry li.setProperty("IsPlayable", "false" if is_folder else "true") xbmcplugin.addDirectoryItem(handle=self.addon_handle, listitem=li, url=url, isFolder=is_folder) def _http_request(self, url, headers={}, method="GET"): useragent = f"{settings.getAddonInfo('id')}/{settings.getAddonInfo('version')} (Kodi/{xbmc.getInfoLabel('System.BuildVersionShort')})" headers["User-Agent"] = useragent if method == "GET": req = requests.get elif method == "POST": req = requests.post else: raise HttpStatusError(settings.getLocalizedString(32091) % method) try: res = req(url, headers=headers) except requests.exceptions.RequestException as error: xbmc.log("Request Exception: %s" % str(error), xbmc.LOGERROR) raise HttpStatusError(settings.getLocalizedString(32092)) if res.status_code == 200: return res.text, res.cookies else: raise HttpStatusError(settings.getLocalizedString( 32093) % (res.status_code, url)) def _load_rss(self, url): def _parse_item(_ci): if "enclosure" in _ci and "@url" in _ci["enclosure"]: stream_url = _ci["enclosure"]["@url"] if _ci["enclosure"]["@type"].split("/")[0] == "video": _type = "video" else: _type = "music" elif "guid" in _ci and _ci["guid"]: # not supported yet return None else: return None if "itunes:image" in _ci and "@href" in _ci["itunes:image"]: item_image = _ci["itunes:image"]["@href"] else: item_image = image if "pubDate" in _ci: _f = re.findall( "(\d{1,2}) (\w{3}) (\d{4}) (\d{2}):(\d{2}):(\d{2})", _ci["pubDate"]) if _f: _m = _MONTHS.index(_f[0][1]) + 1 pubDate = datetime(year=int(_f[0][2]), month=_m, day=int(_f[0][0]), hour=int( _f[0][3]), minute=int(_f[0][4]), second=int(_f[0][5])) else: pubDate = None if "itunes:duration" in _ci: try: duration = int(_ci["itunes:duration"]) #if duration is already in seconds except: try: #try converting HH:MM:SS or MM:SS string to integer seconds durationList = _ci["itunes:duration"].split(":") if len(durationList) == 3: #HH:MM:SS duration = int(durationList[0]) * 3600 + int(durationList[1]) * 60 + int(durationList[2]) elif len(durationList) == 2: #MM:SS duration = int(durationList[0]) * 60 + int(durationList[1]) else: duration = -1 except: duration = -1 else: duration = -1 return { "name": _ci["title"], "description": _ci["description"] if "description" in _ci else "", "date": pubDate, "icon": item_image, "stream_url": stream_url, "type": _type, "duration": duration } res, cookies = self._http_request(url) if not res.startswith("<?xml"): raise HttpStatusError("%s %s" % ( settings.getLocalizedString(32094), url)) else: rss_feed = xmltodict.parse(res) channel = rss_feed["rss"]["channel"] title = channel["title"] if "title" in channel else "" description = channel["description"] if "description" in channel else "" if "image" in channel and "url" in channel["image"]: image = channel["image"]["url"] elif "itunes:image" in channel: image = channel["itunes:image"]["@href"] else: image = None items = [] if type(channel["item"]) is list: for _ci in channel["item"]: item = _parse_item(_ci) if item is not None: items += [item] else: item = _parse_item(channel["item"]) if item is not None: items += [item] return title, description, image, items def _render_rss(self, path, url): def _update_Image(path, image): if path.startswith("/pod-"): _p = path[5:].split("/") settings.setSetting("group_%i_rss_%i_icon" % (int(_p[0]), int(_p[1])), image) try: title, description, image, items = self._load_rss(url) if image: _update_Image(path, image) except HttpStatusError as error: xbmc.log("HTTP Status Error: %s, path=%s" % (error.message, path), xbmc.LOGERROR) xbmcgui.Dialog().notification(settings.getLocalizedString(32090), error.message) else: if len(items) > 0 and settings.getSetting("anchor") == "true": entry = { "path": "latest", "name": "%s (%s)" % (title, settings.getLocalizedString(32052)), "description": description, "icon": image, "date": datetime.now(), "specialsort": "top", "type": items[0]["type"], "params": [ { "play_latest": url } ] } self._add_list_item(entry, path) for item in items: li = self._create_list_item(item) xbmcplugin.addDirectoryItem(handle=self.addon_handle, listitem=li, url=item["stream_url"], isFolder=False) if "setDateTime" in dir(li): # available since Kodi v20 xbmcplugin.addSortMethod( self.addon_handle, xbmcplugin.SORT_METHOD_DATE) xbmcplugin.endOfDirectory(self.addon_handle) def _browse(self, dir_structure, path, updateListing=False): def _get_node_by_path(path): if path == "/": return dir_structure[0] tokens = path.split("/")[1:] node = dir_structure[0] while len(tokens) > 0: path = tokens.pop(0) for n in node["node"]: if n["path"] == path: node = n break return node node = _get_node_by_path(path) for entry in node["node"]: self._add_list_item(entry, path) xbmcplugin.addSortMethod( self.addon_handle, xbmcplugin.SORT_METHOD_FULLPATH) xbmcplugin.addSortMethod( self.addon_handle, xbmcplugin.SORT_METHOD_LABEL) xbmcplugin.endOfDirectory( self.addon_handle, updateListing=updateListing) def _parse_opml(self, data): opml_data = xmltodict.parse(data) entries = self._parse_outlines_from_opml( opml_data["opml"]["body"]["outline"]) return opml_data["opml"]["head"]["title"], entries def _open_opml_file(self, path): with open(path) as _opml_file: return _opml_file.read() def _build_dir_structure(self): groups = [] # opml files / podcasts lists for g in range(self._GROUPS): if settings.getSetting("opml_file_%i" % g) == "": continue path = os.path.join( addon_dir, settings.getSetting("opml_file_%i" % g)) try: name, nodes = self._parse_opml(self._open_opml_file(path)) groups.append({ "path": "opml-%i" % g, "name": name, "node": nodes }) except: xbmc.log("Cannot read opml file %s" % path, xbmc.LOGERROR) # rss feeds from settings for g in range(self._GROUPS): if settings.getSetting("group_%i_enable" % g) == "false": continue entries = [] for e in range(self._ENTRIES): if settings.getSetting("group_%i_rss_%i_enable" % (g, e)) == "false": continue icon = settings.getSetting("group_%i_rss_%i_icon" % (g, e)) entries += [{ "path": "%i" % e, "name": settings.getSetting("group_%i_rss_%i_name" % (g, e)), "params": [ { "rss": settings.getSetting("group_%i_rss_%i_url" % (g, e)) } ], "icon": icon, "node": [] }] groups += [{ "path": "pod-%i" % g, "name": settings.getSetting("group_%i_name" % g), "node": entries }] return [ { # root "path": "", "node": groups } ] def handle(self, argv): def decode_param(encoded_param): return base64.urlsafe_b64decode(encoded_param).decode("utf-8") self.addon_handle = int(argv[1]) path = urllib.parse.urlparse(argv[0]).path.replace("//", "/") url_params = urllib.parse.parse_qs(argv[2][1:]) if "rss" in url_params: url = decode_param(url_params["rss"][0]) self._render_rss(path, url) elif "play_latest" in url_params: url = decode_param(url_params["play_latest"][0]) self._play_latest(url) else: _dir_structure = self._build_dir_structure() self._browse(dir_structure=_dir_structure, path=path) def _login_at_gpodder(self): auth_string = "%s:%s" % (settings.getSetting( "gpodder_username"), settings.getSetting("gpodder_password")) b64auth = { "Authorization": "Basic %s" % base64.urlsafe_b64encode(auth_string.encode("utf-8")).decode("utf-8") } response, cookies = self._http_request( GPODDER_API["login"] % (settings.getSetting("gpodder_hostname"), settings.getSetting("gpodder_username")), b64auth, "POST") if "sessionid" not in cookies: raise HttpStatusError(settings.getLocalizedString(32095)) return cookies["sessionid"] def _load_gpodder_subscriptions(self, sessionid): session_cookie = { "Cookie": "%s=%s" % ("sessionid", sessionid) } response, cookies = self._http_request( GPODDER_API["subscriptions"] % (settings.getSetting("gpodder_hostname"), settings.getSetting( "gpodder_username"), "opml"), session_cookie) return response def _select_opml_file(self): path = xbmcgui.Dialog().browse( type=1, heading=settings.getLocalizedString(32070), shares="", mask=".xml|.opml") if path == "": return None, None try: return self._parse_opml(self._open_opml_file(path)) except: xbmc.log("Cannot read opml file %s" % path, xbmc.LOGERROR) return None, None def _select_feeds(self, name, entries, freeslots): selection = [e["name"] for e in entries if "params" in e and len(e["params"]) == 1 and "rss" in e["params"][0]] ok = False while not ok: feeds = xbmcgui.Dialog().multiselect( settings.getLocalizedString(32071), selection) if feeds == None: ok = True elif len(feeds) == 0: xbmcgui.Dialog().ok(settings.getLocalizedString(32072), settings.getLocalizedString(32073)) elif len(feeds) > freeslots: xbmcgui.Dialog().ok(settings.getLocalizedString(32074), settings.getLocalizedString(32075) % freeslots) else: ok = True return feeds def _select_target_group(self): names = list() freeslots = list() for g in range(self._GROUPS): free = sum("false" == settings.getSetting( "group_%i_rss_%i_enable" % (g, r)) for r in range(self._ENTRIES)) freeslots.append(free) names.append("%s %i: %s (%i %s)" % ( settings.getLocalizedString(32000), g + 1, settings.getSetting("group_%i_name" % g), free, settings.getLocalizedString(32077) )) selected = xbmcgui.Dialog().select(settings.getLocalizedString(32076), names) if selected > -1 and freeslots[selected] == 0: xbmcgui.Dialog().ok(heading=settings.getLocalizedString(32078), message=settings.getLocalizedString(32084)) return -1, 0 elif selected == -1: return -1, 0 else: return selected, freeslots[selected] def _apply_to_group(self, entries, group, feeds): settings.setSetting("group_%i_enable" % group, "True") i, j = 0, 0 while(i < self._ENTRIES): if j < len(feeds) and "false" == settings.getSetting("group_%i_rss_%i_enable" % (group, i)): settings.setSetting("group_%i_rss_%i_enable" % (group, i), "True") settings.setSetting("group_%i_rss_%i_name" % (group, i), entries[feeds[j]]["name"]) settings.setSetting("group_%i_rss_%i_url" % ( group, i), entries[feeds[j]]["params"][0]["rss"]) settings.setSetting("group_%i_rss_%i_icon" % (group, i), "") j += 1 i += 1 def _save_opml_file(self, data): opml = xmltodict.parse(data) filename = "%s.opml" % re.sub( "[^A-Za-z0-9']", " ", opml["opml"]["head"]["title"]) path = xbmcgui.Dialog().browse( type=3, heading=settings.getLocalizedString(32080), shares="") if not path: return None, None try: fullpath = "%s%s" % (path, filename) with open(fullpath, "w") as _file: _file.write(data) return fullpath, filename except: xbmcgui.Dialog().ok(heading=settings.getLocalizedString( 32081), message=settings.getLocalizedString(32082)) return None, None def _select_target_opml_slot(self, heading, multi=False): selection = list() for g in range(self._GROUPS): filename = settings.getSetting("opml_file_%i" % g) selection.append("%s %i%s" % (settings.getLocalizedString( 32023), g + 1, ": %s" % filename if filename else "")) dialog = xbmcgui.Dialog().multiselect if multi else xbmcgui.Dialog().select return dialog(heading, selection) def import_opml(self): # Step 1: Select target group group, freeslots = self._select_target_group() if group == -1: return # Step 2: Select file name, entries = self._select_opml_file() if name == None: return # Step 3: Select feeds feeds = self._select_feeds(name, entries, freeslots) if feeds == None: return # Step 4: Confirm self._apply_to_group(entries, group, feeds) # Success xbmcgui.Dialog().notification(settings.getLocalizedString( 32085), settings.getLocalizedString(32086)) def import_gpodder_subscriptions(self): # Step 1: Select target group group, freeslots = self._select_target_group() if group == -1: return # Step 2: query subscriptions from gPodder try: sessionid = self._login_at_gpodder() name, entries = self._parse_opml( self._load_gpodder_subscriptions(sessionid)) except HttpStatusError as error: xbmcgui.Dialog().ok(settings.getLocalizedString(32090), error.message) return # Step 3: Select feeds feeds = self._select_feeds(name, entries, freeslots) if feeds == None: return # Step 4: Apply to group self._apply_to_group(entries, group, feeds) # Success xbmcgui.Dialog().notification(settings.getLocalizedString( 32085), settings.getLocalizedString(32086)) def download_gpodder_subscriptions(self): # Step 1: download subscriptions from gPodder try: sessionid = self._login_at_gpodder() opml_data = self._load_gpodder_subscriptions(sessionid) except HttpStatusError as error: xbmcgui.Dialog().ok(settings.getLocalizedString(32090), error.message) return # Step 2: Save file in folder path, filename = self._save_opml_file(opml_data) if not path: return # Success xbmcgui.Dialog().notification(settings.getLocalizedString( 32085), "%s %s" % (settings.getLocalizedString(32083), filename)) # Step 3: Select target opml slot slot = self._select_target_opml_slot( settings.getLocalizedString(32079)) if slot == -1: return settings.setSetting("opml_file_%i" % slot, path) # Success xbmcgui.Dialog().notification(settings.getLocalizedString( 32085), settings.getLocalizedString(32086)) def unassign_opml(self): # Step 1: Select slots slots = self._select_target_opml_slot( settings.getLocalizedString(32087), multi=True) if slots == None or len(slots) == 0: return # Step 2: empty slots for slot in slots: settings.setSetting("opml_file_%i" % slot, " ") # Success xbmcgui.Dialog().notification(settings.getLocalizedString( 32085), settings.getLocalizedString(32086)) if __name__ == '__main__': mediathek = Mediathek() if sys.argv[1] == "import_gpodder_subscriptions": mediathek.import_gpodder_subscriptions() elif sys.argv[1] == "import_opml": mediathek.import_opml() elif sys.argv[1] == "download_gpodder_subscriptions": mediathek.download_gpodder_subscriptions() elif sys.argv[1] == "unassign_opml": mediathek.unassign_opml() else: mediathek.handle(sys.argv)
[ "xmltodict.parse", "base64.urlsafe_b64decode", "xbmcplugin.setResolvedUrl", "xbmc.log", "re.match", "xbmc.getInfoLabel", "os.path.join", "re.findall", "datetime.datetime.now", "xbmcaddon.Addon", "xbmcgui.ListItem", "xbmcgui.Dialog", "re.sub", "xbmcplugin.endOfDirectory", "xbmcplugin.addSortMethod", "xbmcplugin.addDirectoryItem" ]
[((529, 562), 'xbmcaddon.Addon', 'xbmcaddon.Addon', ([], {'id': '__PLUGIN_ID__'}), '(id=__PLUGIN_ID__)\n', (544, 562), False, 'import xbmcaddon\n'), ((2400, 2436), 'xbmcgui.ListItem', 'xbmcgui.ListItem', ([], {'label': "item['name']"}), "(label=item['name'])\n", (2416, 2436), False, 'import xbmcgui\n'), ((4894, 4993), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', ([], {'handle': 'self.addon_handle', 'listitem': 'li', 'url': 'url', 'isFolder': 'is_folder'}), '(handle=self.addon_handle, listitem=li, url=url,\n isFolder=is_folder)\n', (4921, 4993), False, 'import xbmcplugin\n'), ((12009, 12085), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', (['self.addon_handle', 'xbmcplugin.SORT_METHOD_FULLPATH'], {}), '(self.addon_handle, xbmcplugin.SORT_METHOD_FULLPATH)\n', (12033, 12085), False, 'import xbmcplugin\n'), ((12107, 12180), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', (['self.addon_handle', 'xbmcplugin.SORT_METHOD_LABEL'], {}), '(self.addon_handle, xbmcplugin.SORT_METHOD_LABEL)\n', (12131, 12180), False, 'import xbmcplugin\n'), ((12203, 12276), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', (['self.addon_handle'], {'updateListing': 'updateListing'}), '(self.addon_handle, updateListing=updateListing)\n', (12228, 12276), False, 'import xbmcplugin\n'), ((12345, 12366), 'xmltodict.parse', 'xmltodict.parse', (['data'], {}), '(data)\n', (12360, 12366), False, 'import xmltodict\n'), ((19694, 19715), 'xmltodict.parse', 'xmltodict.parse', (['data'], {}), '(data)\n', (19709, 19715), False, 'import xmltodict\n'), ((2155, 2209), 'xbmcplugin.setResolvedUrl', 'xbmcplugin.setResolvedUrl', (['self.addon_handle', '(True)', 'li'], {}), '(self.addon_handle, True, li)\n', (2180, 2209), False, 'import xbmcplugin\n'), ((8608, 8628), 'xmltodict.parse', 'xmltodict.parse', (['res'], {}), '(res)\n', (8623, 8628), False, 'import xmltodict\n'), ((11342, 11386), 'xbmcplugin.endOfDirectory', 'xbmcplugin.endOfDirectory', (['self.addon_handle'], {}), '(self.addon_handle)\n', (11367, 11386), False, 'import xbmcplugin\n'), ((19747, 19806), 're.sub', 're.sub', (['"""[^A-Za-z0-9\']"""', '""" """', "opml['opml']['head']['title']"], {}), '("[^A-Za-z0-9\']", \' \', opml[\'opml\'][\'head\'][\'title\'])\n', (19753, 19806), False, 'import re\n'), ((1171, 1245), 're.match', 're.match', (['"""^https?:\\\\/\\\\/([^\\\\/]+).*\\\\/?.*\\\\/([^\\\\/]+)\\\\/?$"""', "o['@xmlUrl']"], {}), "('^https?:\\\\/\\\\/([^\\\\/]+).*\\\\/?.*\\\\/([^\\\\/]+)\\\\/?$', o['@xmlUrl'])\n", (1179, 1245), False, 'import re\n'), ((5254, 5299), 'xbmc.getInfoLabel', 'xbmc.getInfoLabel', (['"""System.BuildVersionShort"""'], {}), "('System.BuildVersionShort')\n", (5271, 5299), False, 'import xbmc\n'), ((6775, 6865), 're.findall', 're.findall', (['"""(\\\\d{1,2}) (\\\\w{3}) (\\\\d{4}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2})"""', "_ci['pubDate']"], {}), "('(\\\\d{1,2}) (\\\\w{3}) (\\\\d{4}) (\\\\d{2}):(\\\\d{2}):(\\\\d{2})', _ci[\n 'pubDate'])\n", (6785, 6865), False, 'import re\n'), ((9943, 10029), 'xbmc.log', 'xbmc.log', (["('HTTP Status Error: %s, path=%s' % (error.message, path))", 'xbmc.LOGERROR'], {}), "('HTTP Status Error: %s, path=%s' % (error.message, path), xbmc.\n LOGERROR)\n", (9951, 10029), False, 'import xbmc\n'), ((10911, 11022), 'xbmcplugin.addDirectoryItem', 'xbmcplugin.addDirectoryItem', ([], {'handle': 'self.addon_handle', 'listitem': 'li', 'url': "item['stream_url']", 'isFolder': '(False)'}), "(handle=self.addon_handle, listitem=li, url=item\n ['stream_url'], isFolder=False)\n", (10938, 11022), False, 'import xbmcplugin\n'), ((11236, 11308), 'xbmcplugin.addSortMethod', 'xbmcplugin.addSortMethod', (['self.addon_handle', 'xbmcplugin.SORT_METHOD_DATE'], {}), '(self.addon_handle, xbmcplugin.SORT_METHOD_DATE)\n', (11260, 11308), False, 'import xbmcplugin\n'), ((16524, 16540), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (16538, 16540), False, 'import xbmcgui\n'), ((16803, 16861), 'xbmc.log', 'xbmc.log', (["('Cannot read opml file %s' % path)", 'xbmc.LOGERROR'], {}), "('Cannot read opml file %s' % path, xbmc.LOGERROR)\n", (16811, 16861), False, 'import xbmc\n'), ((18435, 18451), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (18449, 18451), False, 'import xbmcgui\n'), ((19836, 19852), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (19850, 19852), False, 'import xbmcgui\n'), ((20708, 20724), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (20722, 20724), False, 'import xbmcgui\n'), ((20751, 20767), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (20765, 20767), False, 'import xbmcgui\n'), ((21350, 21366), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (21364, 21366), False, 'import xbmcgui\n'), ((22263, 22279), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (22277, 22279), False, 'import xbmcgui\n'), ((22917, 22933), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (22931, 22933), False, 'import xbmcgui\n'), ((23318, 23334), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (23332, 23334), False, 'import xbmcgui\n'), ((23811, 23827), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (23825, 23827), False, 'import xbmcgui\n'), ((3143, 3201), 'os.path.join', 'os.path.join', (['addon_dir', '"""resources"""', '"""assets"""', '"""icon.png"""'], {}), "(addon_dir, 'resources', 'assets', 'icon.png')\n", (3155, 3201), False, 'import os\n'), ((10489, 10503), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10501, 10503), False, 'from datetime import datetime\n'), ((13260, 13318), 'xbmc.log', 'xbmc.log', (["('Cannot read opml file %s' % path)", 'xbmc.LOGERROR'], {}), "('Cannot read opml file %s' % path, xbmc.LOGERROR)\n", (13268, 13318), False, 'import xbmc\n'), ((14662, 14701), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (['encoded_param'], {}), '(encoded_param)\n', (14686, 14701), False, 'import base64\n'), ((17152, 17168), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (17166, 17168), False, 'import xbmcgui\n'), ((18569, 18585), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (18583, 18585), False, 'import xbmcgui\n'), ((2265, 2281), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (2279, 2281), False, 'import xbmcgui\n'), ((10058, 10074), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (10072, 10074), False, 'import xbmcgui\n'), ((20200, 20216), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (20214, 20216), False, 'import xbmcgui\n'), ((21922, 21938), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (21936, 21938), False, 'import xbmcgui\n'), ((22664, 22680), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (22678, 22680), False, 'import xbmcgui\n'), ((17351, 17367), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (17365, 17367), False, 'import xbmcgui\n'), ((17536, 17552), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (17550, 17552), False, 'import xbmcgui\n')]
import re import time from lemoncheesecake.events import TestSessionSetupEndEvent, TestSessionTeardownEndEvent, \ TestEndEvent, SuiteSetupEndEvent, SuiteTeardownEndEvent, SuiteEndEvent, SteppedEvent from lemoncheesecake.reporting.report import ReportLocation DEFAULT_REPORT_SAVING_STRATEGY = "at_each_failed_test" def _is_end_of_result_event(event): if isinstance(event, TestEndEvent): return ReportLocation.in_test(event.test) if isinstance(event, SuiteSetupEndEvent): return ReportLocation.in_suite_setup(event.suite) if isinstance(event, SuiteTeardownEndEvent): return ReportLocation.in_suite_teardown(event.suite) if isinstance(event, TestSessionSetupEndEvent): return ReportLocation.in_test_session_setup() if isinstance(event, TestSessionTeardownEndEvent): return ReportLocation.in_test_session_teardown() return None def save_at_each_suite_strategy(event, _): return isinstance(event, SuiteEndEvent) def save_at_each_test_strategy(event, _): return _is_end_of_result_event(event) is not None def save_at_each_failed_test_strategy(event, report): location = _is_end_of_result_event(event) if location: result = report.get(location) return result and result.status == "failed" else: return False def save_at_each_log_strategy(event, _): return isinstance(event, SteppedEvent) class SaveAtInterval(object): def __init__(self, interval): self.interval = interval self.last_saving = None def __call__(self, event, report): now = time.time() if self.last_saving: must_be_saved = now > self.last_saving + self.interval if must_be_saved: self.last_saving = now return must_be_saved else: self.last_saving = now # not a saving but an initialization return False def make_report_saving_strategy(expression): # first, try with a static expression static_expressions = { "at_end_of_tests": None, # no need to an intermediate report saving in this case "at_each_suite": save_at_each_suite_strategy, "at_each_test": save_at_each_test_strategy, "at_each_failed_test": save_at_each_failed_test_strategy, "at_each_log": save_at_each_log_strategy, "at_each_event": save_at_each_log_strategy # deprecated since 1.4.5, "at_each_log" must be used instead } try: return static_expressions[expression] except KeyError: pass # second, try with "every_Ns" m = re.compile(r"^every[_ ](\d+)s$").match(expression) if m: return SaveAtInterval(int(m.group(1))) # ok... nothing we know about raise ValueError("Invalid expression '%s' for report saving strategy" % expression)
[ "lemoncheesecake.reporting.report.ReportLocation.in_suite_teardown", "re.compile", "lemoncheesecake.reporting.report.ReportLocation.in_test_session_setup", "time.time", "lemoncheesecake.reporting.report.ReportLocation.in_test", "lemoncheesecake.reporting.report.ReportLocation.in_test_session_teardown", "lemoncheesecake.reporting.report.ReportLocation.in_suite_setup" ]
[((413, 447), 'lemoncheesecake.reporting.report.ReportLocation.in_test', 'ReportLocation.in_test', (['event.test'], {}), '(event.test)\n', (435, 447), False, 'from lemoncheesecake.reporting.report import ReportLocation\n'), ((510, 552), 'lemoncheesecake.reporting.report.ReportLocation.in_suite_setup', 'ReportLocation.in_suite_setup', (['event.suite'], {}), '(event.suite)\n', (539, 552), False, 'from lemoncheesecake.reporting.report import ReportLocation\n'), ((618, 663), 'lemoncheesecake.reporting.report.ReportLocation.in_suite_teardown', 'ReportLocation.in_suite_teardown', (['event.suite'], {}), '(event.suite)\n', (650, 663), False, 'from lemoncheesecake.reporting.report import ReportLocation\n'), ((732, 770), 'lemoncheesecake.reporting.report.ReportLocation.in_test_session_setup', 'ReportLocation.in_test_session_setup', ([], {}), '()\n', (768, 770), False, 'from lemoncheesecake.reporting.report import ReportLocation\n'), ((842, 883), 'lemoncheesecake.reporting.report.ReportLocation.in_test_session_teardown', 'ReportLocation.in_test_session_teardown', ([], {}), '()\n', (881, 883), False, 'from lemoncheesecake.reporting.report import ReportLocation\n'), ((1599, 1610), 'time.time', 'time.time', ([], {}), '()\n', (1608, 1610), False, 'import time\n'), ((2601, 2633), 're.compile', 're.compile', (['"""^every[_ ](\\\\d+)s$"""'], {}), "('^every[_ ](\\\\d+)s$')\n", (2611, 2633), False, 'import re\n')]
"""Genshin chronicle notes.""" import datetime import typing import pydantic from genshin.models.genshin import character from genshin.models.model import Aliased, APIModel __all__ = ["Expedition", "ExpeditionCharacter", "Notes"] def _process_timedelta(time: typing.Union[int, datetime.timedelta, datetime.datetime]) -> datetime.datetime: if isinstance(time, int): time = datetime.datetime.fromtimestamp(time).astimezone() if isinstance(time, datetime.timedelta): time = datetime.datetime.now().astimezone() + time if time < datetime.datetime(2000, 1, 1).astimezone(): delta = datetime.timedelta(seconds=int(time.timestamp())) time = datetime.datetime.now().astimezone() + delta time = time.replace(second=0, microsecond=0) return time class ExpeditionCharacter(character.BaseCharacter): """Expedition character.""" class Expedition(APIModel): """Real-Time note expedition.""" character: ExpeditionCharacter = Aliased("avatar_side_icon") status: typing.Literal["Ongoing", "Finished"] remaining_time: datetime.timedelta = Aliased("remained_time") @property def finished(self) -> bool: """Whether the expedition has finished.""" return self.remaining_time <= datetime.timedelta(0) @property def completion_time(self) -> datetime.datetime: return datetime.datetime.now().astimezone() + self.remaining_time @pydantic.validator("character", pre=True) def __complete_character(cls, v: typing.Any) -> ExpeditionCharacter: if isinstance(v, str): return ExpeditionCharacter(icon=v) # type: ignore return v class TransformerTimedelta(datetime.timedelta): """Transformer recovery time.""" @property def timedata(self) -> typing.Tuple[int, int, int, int]: seconds: int = super().seconds days: int = super().days hour, second = divmod(seconds, 3600) minute, second = divmod(second, 60) return days, hour, minute, second @property def hours(self) -> int: return self.timedata[1] @property def minutes(self) -> int: return self.timedata[2] @property def seconds(self) -> int: return self.timedata[3] class Notes(APIModel): """Real-Time notes.""" current_resin: int max_resin: int remaining_resin_recovery_time: datetime.timedelta = Aliased("resin_recovery_time") current_realm_currency: int = Aliased("current_home_coin") max_realm_currency: int = Aliased("max_home_coin") remaining_realm_currency_recovery_time: datetime.timedelta = Aliased("home_coin_recovery_time") completed_commissions: int = Aliased("finished_task_num") max_commissions: int = Aliased("total_task_num") claimed_commission_reward: bool = Aliased("is_extra_task_reward_received") remaining_resin_discounts: int = Aliased("remain_resin_discount_num") max_resin_discounts: int = Aliased("resin_discount_num_limit") remaining_transformer_recovery_time: typing.Optional[TransformerTimedelta] expeditions: typing.Sequence[Expedition] max_expeditions: int = Aliased("max_expedition_num") @property def resin_recovery_time(self) -> datetime.datetime: """The remaining time until resin recovery in seconds.""" return datetime.datetime.now().astimezone() + self.remaining_resin_recovery_time @property def realm_currency_recovery_time(self) -> datetime.datetime: """The remaining time until realm currency recovery in seconds.""" return datetime.datetime.now().astimezone() + self.remaining_realm_currency_recovery_time @property def transformer_recovery_time(self) -> typing.Optional[datetime.datetime]: """The remaining time until realm currency recovery in seconds.""" if self.remaining_transformer_recovery_time is None: return None remaining = datetime.datetime.now().astimezone() + self.remaining_transformer_recovery_time return remaining @pydantic.root_validator(pre=True) def __flatten_transformer(cls, values: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]: if "transformer_recovery_time" in values: return values if values.get("transformer") and values["transformer"]["obtained"]: t = values["transformer"]["recovery_time"] delta = TransformerTimedelta(days=t["Day"], hours=t["Hour"], minutes=t["Minute"], seconds=t["Second"]) values["remaining_transformer_recovery_time"] = delta else: values["remaining_transformer_recovery_time"] = None return values
[ "datetime.datetime", "genshin.models.model.Aliased", "datetime.datetime.fromtimestamp", "pydantic.root_validator", "pydantic.validator", "datetime.datetime.now", "datetime.timedelta" ]
[((988, 1015), 'genshin.models.model.Aliased', 'Aliased', (['"""avatar_side_icon"""'], {}), "('avatar_side_icon')\n", (995, 1015), False, 'from genshin.models.model import Aliased, APIModel\n'), ((1107, 1131), 'genshin.models.model.Aliased', 'Aliased', (['"""remained_time"""'], {}), "('remained_time')\n", (1114, 1131), False, 'from genshin.models.model import Aliased, APIModel\n'), ((1437, 1478), 'pydantic.validator', 'pydantic.validator', (['"""character"""'], {'pre': '(True)'}), "('character', pre=True)\n", (1455, 1478), False, 'import pydantic\n'), ((2410, 2440), 'genshin.models.model.Aliased', 'Aliased', (['"""resin_recovery_time"""'], {}), "('resin_recovery_time')\n", (2417, 2440), False, 'from genshin.models.model import Aliased, APIModel\n'), ((2476, 2504), 'genshin.models.model.Aliased', 'Aliased', (['"""current_home_coin"""'], {}), "('current_home_coin')\n", (2483, 2504), False, 'from genshin.models.model import Aliased, APIModel\n'), ((2535, 2559), 'genshin.models.model.Aliased', 'Aliased', (['"""max_home_coin"""'], {}), "('max_home_coin')\n", (2542, 2559), False, 'from genshin.models.model import Aliased, APIModel\n'), ((2625, 2659), 'genshin.models.model.Aliased', 'Aliased', (['"""home_coin_recovery_time"""'], {}), "('home_coin_recovery_time')\n", (2632, 2659), False, 'from genshin.models.model import Aliased, APIModel\n'), ((2694, 2722), 'genshin.models.model.Aliased', 'Aliased', (['"""finished_task_num"""'], {}), "('finished_task_num')\n", (2701, 2722), False, 'from genshin.models.model import Aliased, APIModel\n'), ((2750, 2775), 'genshin.models.model.Aliased', 'Aliased', (['"""total_task_num"""'], {}), "('total_task_num')\n", (2757, 2775), False, 'from genshin.models.model import Aliased, APIModel\n'), ((2814, 2854), 'genshin.models.model.Aliased', 'Aliased', (['"""is_extra_task_reward_received"""'], {}), "('is_extra_task_reward_received')\n", (2821, 2854), False, 'from genshin.models.model import Aliased, APIModel\n'), ((2893, 2929), 'genshin.models.model.Aliased', 'Aliased', (['"""remain_resin_discount_num"""'], {}), "('remain_resin_discount_num')\n", (2900, 2929), False, 'from genshin.models.model import Aliased, APIModel\n'), ((2961, 2996), 'genshin.models.model.Aliased', 'Aliased', (['"""resin_discount_num_limit"""'], {}), "('resin_discount_num_limit')\n", (2968, 2996), False, 'from genshin.models.model import Aliased, APIModel\n'), ((3150, 3179), 'genshin.models.model.Aliased', 'Aliased', (['"""max_expedition_num"""'], {}), "('max_expedition_num')\n", (3157, 3179), False, 'from genshin.models.model import Aliased, APIModel\n'), ((4045, 4078), 'pydantic.root_validator', 'pydantic.root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4068, 4078), False, 'import pydantic\n'), ((1268, 1289), 'datetime.timedelta', 'datetime.timedelta', (['(0)'], {}), '(0)\n', (1286, 1289), False, 'import datetime\n'), ((389, 426), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['time'], {}), '(time)\n', (420, 426), False, 'import datetime\n'), ((560, 589), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (577, 589), False, 'import datetime\n'), ((501, 524), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (522, 524), False, 'import datetime\n'), ((685, 708), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (706, 708), False, 'import datetime\n'), ((1372, 1395), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1393, 1395), False, 'import datetime\n'), ((3332, 3355), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3353, 3355), False, 'import datetime\n'), ((3576, 3599), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3597, 3599), False, 'import datetime\n'), ((3934, 3957), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3955, 3957), False, 'import datetime\n')]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-docstring import tvm from tvm import meta_schedule as ms from tvm.ir import IRModule from tvm.meta_schedule.testing.conv2d_winograd_cpu import conv2d_winograd_cpu from tvm.target import Target from tvm.tir.schedule import Schedule, Trace def _get_mod(): # pylint: disable=invalid-name def inline(sch: Schedule): b1 = sch.get_block(name="A") b2 = sch.get_block(name="B") sch.compute_inline(block=b1) sch.compute_inline(block=b2) def input_tile_data_pad(sch: Schedule): b78 = sch.get_block(name="input_tile") l80 = sch.sample_compute_location(block=b78, decision=4) sch.compute_at(block=b78, loop=l80, preserve_unit_loops=True) b81 = sch.get_block(name="data_pad") l83 = sch.sample_compute_location(block=b81, decision=-2) sch.compute_at(block=b81, loop=l83, preserve_unit_loops=True) def data_pack(sch: Schedule): b18 = sch.get_block(name="data_pack") l19, l20, l21, l22, l23, l24 = sch.get_loops(block=b18) sch.unroll(loop=l19) sch.unroll(loop=l20) v25, v26 = sch.sample_perfect_tile( n=2, loop=l21, max_innermost_factor=64, decision=[9, 1], ) l27, l28 = sch.split(loop=l21, factors=[v25, v26]) v29, v30 = sch.sample_perfect_tile( n=2, loop=l22, max_innermost_factor=64, decision=[32, 4], ) l31, l32 = sch.split(loop=l22, factors=[v29, v30]) sch.unroll(loop=l23) sch.unroll(loop=l24) sch.reorder(l27, l31, l28, l32, l19, l20, l23, l24) def bgemm(sch: Schedule): bgemm = sch.get_block(name="bgemm") write_cache = sch.cache_write( block=bgemm, write_buffer_index=0, storage_scope="global", ) sch.annotate( block_or_loop=bgemm, ann_key="meta_schedule.tiling_structure", ann_val="SSRSRS", ) # b33, b34 = b34, b33 l35, l36, l37, l38, l39 = sch.get_loops(block=bgemm) v40, v41, v42, v43 = sch.sample_perfect_tile( n=4, loop=l35, max_innermost_factor=64, decision=[1, 2, 3, 1], ) l44, l45, l46, l47 = sch.split(loop=l35, factors=[v40, v41, v42, v43]) v48, v49, v50, v51 = sch.sample_perfect_tile( n=4, loop=l36, max_innermost_factor=64, decision=[1, 1, 1, 6], ) l52, l53, l54, l55 = sch.split(loop=l36, factors=[v48, v49, v50, v51]) v56, v57, v58, v59 = sch.sample_perfect_tile( n=4, loop=l37, max_innermost_factor=64, decision=[1, 1, 1, 9], ) l60, l61, l62, l63 = sch.split(loop=l37, factors=[v56, v57, v58, v59]) v64, v65, v66, v67 = sch.sample_perfect_tile( n=4, loop=l38, max_innermost_factor=64, decision=[2, 1, 16, 4], ) l68, l69, l70, l71 = sch.split(loop=l38, factors=[v64, v65, v66, v67]) v72, v73 = sch.sample_perfect_tile( n=2, loop=l39, max_innermost_factor=64, decision=[16, 8], ) l74, l75 = sch.split(loop=l39, factors=[v72, v73]) sch.reorder( # fmt: off l44, l52, l60, l68, l45, l53, l61, l69, l74, l46, l54, l62, l70, l75, l47, l55, l63, l71, # fmt: on ) sch.reverse_compute_at(block=write_cache, loop=l69, preserve_unit_loops=True) def inverse(sch: Schedule): b3 = sch.get_block(name="inverse") l4, l5, l6, l7, l8, l9 = sch.get_loops(block=b3) sch.unroll(loop=l4) sch.unroll(loop=l5) v10, v11 = sch.sample_perfect_tile( n=2, loop=l6, max_innermost_factor=64, decision=[1, 9], ) l12, l13 = sch.split(loop=l6, factors=[v10, v11]) v14, v15 = sch.sample_perfect_tile( n=2, loop=l7, max_innermost_factor=64, decision=[2, 64], ) l16, l17 = sch.split(loop=l7, factors=[v14, v15]) sch.unroll(loop=l8) sch.unroll(loop=l9) sch.reorder(l12, l16, l13, l17, l4, l5, l8, l9) # pylint: enable=invalid-name sch = Schedule(mod=conv2d_winograd_cpu) inline(sch) data_pack(sch) input_tile_data_pad(sch) bgemm(sch) inverse(sch) return sch.mod def test_conv2d_winograd_cpu(): mod = conv2d_winograd_cpu mod = IRModule({"main": mod}) target = Target("llvm --num-cores=16") context = ms.TuneContext( mod=mod, target=target, task_name="Custom Search Space Task", space_generator=ms.space_generator.PostOrderApply(), sch_rules=ms.default_config.schedule_rules( None, target, ), ) context.initialize() post_order_apply = context.space_generator (sch,) = post_order_apply.generate_design_space(mod) decisions = dict( zip( [i for i in sch.trace.insts[:-4] if i.kind.name.startswith("Sample")], [ # data_pack [9, 1], [32, 4], # input_tile 4, # data_pad -2, # inverse [1, 9], [2, 64], # bgemm [1, 2, 3, 1], [1, 1, 1, 6], [1, 1, 1, 9], [2, 1, 16, 4], [16, 8], ], ) ) trace = Trace(sch.trace.insts[:-4], decisions=decisions) sch = Schedule(mod=mod) trace.apply_to_schedule(sch, remove_postproc=False) answer = sch.mod expected = _get_mod() tvm.ir.assert_structural_equal(answer, expected) if __name__ == "__main__": test_conv2d_winograd_cpu()
[ "tvm.tir.schedule.Schedule", "tvm.meta_schedule.default_config.schedule_rules", "tvm.tir.schedule.Trace", "tvm.ir.assert_structural_equal", "tvm.target.Target", "tvm.ir.IRModule", "tvm.meta_schedule.space_generator.PostOrderApply" ]
[((5272, 5305), 'tvm.tir.schedule.Schedule', 'Schedule', ([], {'mod': 'conv2d_winograd_cpu'}), '(mod=conv2d_winograd_cpu)\n', (5280, 5305), False, 'from tvm.tir.schedule import Schedule, Trace\n'), ((5495, 5518), 'tvm.ir.IRModule', 'IRModule', (["{'main': mod}"], {}), "({'main': mod})\n", (5503, 5518), False, 'from tvm.ir import IRModule\n'), ((5532, 5561), 'tvm.target.Target', 'Target', (['"""llvm --num-cores=16"""'], {}), "('llvm --num-cores=16')\n", (5538, 5561), False, 'from tvm.target import Target\n'), ((6567, 6615), 'tvm.tir.schedule.Trace', 'Trace', (['sch.trace.insts[:-4]'], {'decisions': 'decisions'}), '(sch.trace.insts[:-4], decisions=decisions)\n', (6572, 6615), False, 'from tvm.tir.schedule import Schedule, Trace\n'), ((6626, 6643), 'tvm.tir.schedule.Schedule', 'Schedule', ([], {'mod': 'mod'}), '(mod=mod)\n', (6634, 6643), False, 'from tvm.tir.schedule import Schedule, Trace\n'), ((6751, 6799), 'tvm.ir.assert_structural_equal', 'tvm.ir.assert_structural_equal', (['answer', 'expected'], {}), '(answer, expected)\n', (6781, 6799), False, 'import tvm\n'), ((5702, 5737), 'tvm.meta_schedule.space_generator.PostOrderApply', 'ms.space_generator.PostOrderApply', ([], {}), '()\n', (5735, 5737), True, 'from tvm import meta_schedule as ms\n'), ((5757, 5803), 'tvm.meta_schedule.default_config.schedule_rules', 'ms.default_config.schedule_rules', (['None', 'target'], {}), '(None, target)\n', (5789, 5803), True, 'from tvm import meta_schedule as ms\n')]
#!/usr/bin/env python import argparse from igf_data.task_tracking.igf_slack import IGF_slack from igf_data.process.data_transfer.sync_seqrun_data_on_remote import Sync_seqrun_data_from_remote parser = argparse.ArgumentParser() parser.add_argument('-r','--remote_server', required=True, help='Remote server address') parser.add_argument('-p','--remote_base_path', required=True, help='Seqrun directory path in remote dir') parser.add_argument('-d','--dbconfig', required=True, help='Database configuration file path') parser.add_argument('-o','--output_dir', required=True, help='Local output directory path') parser.add_argument('-n','--slack_config', required=True, help='Slack configuration file path') args = parser.parse_args() remote_server = args.remote_server remote_base_path = args.remote_base_path dbconfig = args.dbconfig output_dir = args.output_dir slack_config = args.slack_config if __name__=='__main__': try: slack_obj=IGF_slack(slack_config=slack_config) ## FIX ME except Exception as e: message = 'Error while syncing sequencing run directory from remote server: {0}'.format(e) slack_obj.post_message_to_channel(message,reaction='fail') raise ValueError(message)
[ "argparse.ArgumentParser", "igf_data.task_tracking.igf_slack.IGF_slack" ]
[((202, 227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (225, 227), False, 'import argparse\n'), ((944, 980), 'igf_data.task_tracking.igf_slack.IGF_slack', 'IGF_slack', ([], {'slack_config': 'slack_config'}), '(slack_config=slack_config)\n', (953, 980), False, 'from igf_data.task_tracking.igf_slack import IGF_slack\n')]
from typing import Callable, Iterable, TypeVar T = TypeVar('T') Num = TypeVar('Num', int, float) def sumBy(array: Iterable[T], iteratee: Callable[[T], Num] = None, start: Num = 0) -> Num: if iteratee is None: return sum([y for y in array], start) return sum([iteratee(y) for y in array], start)
[ "typing.TypeVar" ]
[((52, 64), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (59, 64), False, 'from typing import Callable, Iterable, TypeVar\n'), ((71, 97), 'typing.TypeVar', 'TypeVar', (['"""Num"""', 'int', 'float'], {}), "('Num', int, float)\n", (78, 97), False, 'from typing import Callable, Iterable, TypeVar\n')]
import grpc from consts import PORT, SERVER_CERT from grpc_generated_files import api_pb2, api_pb2_grpc def main(stub): request = api_pb2.ApiRequest( name="Shivam", message="Hey there!" ) response = stub.ApiEndpoint(request) print(response) if __name__ == "__main__": with open(SERVER_CERT, 'rb') as f: server_cert = f.read() creds = grpc.ssl_channel_credentials(server_cert) # the server IP should be in the common name of the certificate channel = grpc.secure_channel(f'localhost:{PORT}', creds) stub = api_pb2_grpc.ApiStub(channel) main(stub)
[ "grpc.ssl_channel_credentials", "grpc_generated_files.api_pb2.ApiRequest", "grpc.secure_channel", "grpc_generated_files.api_pb2_grpc.ApiStub" ]
[((137, 192), 'grpc_generated_files.api_pb2.ApiRequest', 'api_pb2.ApiRequest', ([], {'name': '"""Shivam"""', 'message': '"""Hey there!"""'}), "(name='Shivam', message='Hey there!')\n", (155, 192), False, 'from grpc_generated_files import api_pb2, api_pb2_grpc\n'), ((387, 428), 'grpc.ssl_channel_credentials', 'grpc.ssl_channel_credentials', (['server_cert'], {}), '(server_cert)\n', (415, 428), False, 'import grpc\n'), ((511, 558), 'grpc.secure_channel', 'grpc.secure_channel', (['f"""localhost:{PORT}"""', 'creds'], {}), "(f'localhost:{PORT}', creds)\n", (530, 558), False, 'import grpc\n'), ((570, 599), 'grpc_generated_files.api_pb2_grpc.ApiStub', 'api_pb2_grpc.ApiStub', (['channel'], {}), '(channel)\n', (590, 599), False, 'from grpc_generated_files import api_pb2, api_pb2_grpc\n')]
import pyblish.api import avalon.api from openpype.api import version_up from openpype.action import get_errored_plugins_from_data class IncrementCurrentFile(pyblish.api.InstancePlugin): """Increment the current file. Saves the current scene with an increased version number. """ label = "Increment current file" order = pyblish.api.IntegratorOrder + 9.0 hosts = ["houdini"] families = ["colorbleed.usdrender", "redshift_rop"] targets = ["local"] def process(self, instance): # This should be a ContextPlugin, but this is a workaround # for a bug in pyblish to run once for a family: issue #250 context = instance.context key = "__hasRun{}".format(self.__class__.__name__) if context.data.get(key, False): return else: context.data[key] = True context = instance.context errored_plugins = get_errored_plugins_from_data(context) if any( plugin.__name__ == "HoudiniSubmitPublishDeadline" for plugin in errored_plugins ): raise RuntimeError( "Skipping incrementing current file because " "submission to deadline failed." ) # Filename must not have changed since collecting host = avalon.api.registered_host() current_file = host.current_file() assert ( context.data["currentFile"] == current_file ), "Collected filename from current scene name." new_filepath = version_up(current_file) host.save(new_filepath)
[ "openpype.action.get_errored_plugins_from_data", "openpype.api.version_up" ]
[((922, 960), 'openpype.action.get_errored_plugins_from_data', 'get_errored_plugins_from_data', (['context'], {}), '(context)\n', (951, 960), False, 'from openpype.action import get_errored_plugins_from_data\n'), ((1549, 1573), 'openpype.api.version_up', 'version_up', (['current_file'], {}), '(current_file)\n', (1559, 1573), False, 'from openpype.api import version_up\n')]
from builtins import str from builtins import range from robust.simulations.simulate import filter_gamma_result_dict from SimPleAC_save import load_obj import pickle as pickle import numpy as np import matplotlib.pyplot as plt from SimPleAC_pof_simulate import pof_parameters if __name__ == "__main__": # Retrieving pof parameters [model, methods, gammas, number_of_iterations, min_num_of_linear_sections, max_num_of_linear_sections, verbosity, linearization_tolerance, number_of_time_average_solves, uncertainty_sets, nominal_solution, directly_uncertain_vars_subs, parallel, nominal_number_of_constraints, nominal_solve_time] = pof_parameters() method = methods[0] # only care about Best Pairs # Loading results margin = {} nGammas = nmargins = len(gammas) margins = gammas margin['solutions'] = {} for i in range(nmargins): margin['solutions'][margins[i]] = pickle.load(open("marginResults/" + str(margins[i]), 'rb')) margin['number_of_constraints'] = load_obj('marginnumber_of_constraints', 'marginResults') margin['simulation_results'] = load_obj('marginsimulation_results', 'marginResults') gamma = {} gamma['solutions'] = {} for i in range(nGammas): for j in range(len(methods)): for k in range((len(uncertainty_sets))): gamma['solutions'][gammas[i], methods[j]['name'], uncertainty_sets[k]] = pickle.load(open( "gammaResults\\" + str((gammas[i], methods[j]['name'], uncertainty_sets[k])), 'rb')) gamma['solve_times'] = load_obj('gammasolve_times', 'gammaResults') gamma['simulation_results'] = load_obj('gammasimulation_results', 'gammaResults') gamma['number_of_constraints'] = load_obj('gammanumber_of_constraints', 'gammaResults') # Plotting of cost and probability of failure objective_name = 'Total fuel weight' objective_units = 'N' title = '' filteredResults = [margin['solutions'], filter_gamma_result_dict(gamma['solutions'], 1, method['name'], 2, 'box'), filter_gamma_result_dict(gamma['solutions'], 1, method['name'], 2, 'ellipsoidal')] filteredSimulations = [margin['simulation_results'], filter_gamma_result_dict(gamma['simulation_results'], 1, method['name'], 2, 'box'), filter_gamma_result_dict(gamma['simulation_results'], 1, method['name'], 2, 'ellipsoidal')] objective_varkey = 'W_{f_m}' legend_keys = ['margins', 'box', 'ellipsoidal'] edgecolors = ['#FFBF00', '#CC0000', '#008000'] facecolors = ['#FFE135','#FF2052', '#8DB600'] fig, ax1 = plt.subplots() ax2 = ax1.twinx() lines = [] mincost = 1e10 maxcost = 0 for i in range(len(legend_keys)): sims = list(filteredSimulations[i].items()) pofs = [] objective_costs = [] objective_stddev = [] for j in sims: pofs.append(j[1][0]) objective_costs.append(j[1][1]) objective_stddev.append(j[1][2]) mincost = np.min([mincost] + objective_costs) maxcost = np.max([maxcost] + objective_costs) lines.append(ax1.plot(gammas, objective_costs, color=edgecolors[i], label=legend_keys[i] + ', cost')) inds = np.nonzero(np.ones(len(gammas)) - pofs)[0] uppers = [objective_costs[ind] + objective_stddev[ind] for ind in inds] lowers = [objective_costs[ind] - objective_stddev[ind] for ind in inds] x = [gammas[ind] for ind in inds] ax1.fill_between(x, lowers, uppers, alpha=0.5, edgecolor = edgecolors[i], facecolor = facecolors[i]) lines.append(ax2.plot(gammas, pofs, color=edgecolors[i], label=legend_keys[i] + ', PoF')) ax1.set_xlabel(r'Uncertainty Set Scaling Factor $\Gamma$', fontsize=12) ax1.set_ylabel('Cost [' + objective_name + ' (' + objective_units.capitalize() + ')]', fontsize=12) ax2.set_ylabel("Probability of Failure", fontsize=12) ax1.set_ylim([mincost, maxcost]) ax2.set_ylim([0, 1]) plt.title(title, fontsize=12) labs = [lines[l][0].get_label() for l in [1,3,5,0,2,4]] ax1.legend(labs, loc="lower right", fontsize=9, numpoints=1) # ax1.legend(loc="lower right", fontsize=10, numpoints=1) # fig.legend(loc="lower right", fontsize=10, numpoints=1) plt.show()
[ "matplotlib.pyplot.show", "SimPleAC_pof_simulate.pof_parameters", "builtins.str", "numpy.max", "builtins.range", "numpy.min", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "robust.simulations.simulate.filter_gamma_result_dict", "SimPleAC_save.load_obj" ]
[((659, 675), 'SimPleAC_pof_simulate.pof_parameters', 'pof_parameters', ([], {}), '()\n', (673, 675), False, 'from SimPleAC_pof_simulate import pof_parameters\n'), ((868, 883), 'builtins.range', 'range', (['nmargins'], {}), '(nmargins)\n', (873, 883), False, 'from builtins import range\n'), ((1093, 1149), 'SimPleAC_save.load_obj', 'load_obj', (['"""marginnumber_of_constraints"""', '"""marginResults"""'], {}), "('marginnumber_of_constraints', 'marginResults')\n", (1101, 1149), False, 'from SimPleAC_save import load_obj\n'), ((1185, 1238), 'SimPleAC_save.load_obj', 'load_obj', (['"""marginsimulation_results"""', '"""marginResults"""'], {}), "('marginsimulation_results', 'marginResults')\n", (1193, 1238), False, 'from SimPleAC_save import load_obj\n'), ((1296, 1310), 'builtins.range', 'range', (['nGammas'], {}), '(nGammas)\n', (1301, 1310), False, 'from builtins import range\n'), ((1662, 1706), 'SimPleAC_save.load_obj', 'load_obj', (['"""gammasolve_times"""', '"""gammaResults"""'], {}), "('gammasolve_times', 'gammaResults')\n", (1670, 1706), False, 'from SimPleAC_save import load_obj\n'), ((1741, 1792), 'SimPleAC_save.load_obj', 'load_obj', (['"""gammasimulation_results"""', '"""gammaResults"""'], {}), "('gammasimulation_results', 'gammaResults')\n", (1749, 1792), False, 'from SimPleAC_save import load_obj\n'), ((1830, 1884), 'SimPleAC_save.load_obj', 'load_obj', (['"""gammanumber_of_constraints"""', '"""gammaResults"""'], {}), "('gammanumber_of_constraints', 'gammaResults')\n", (1838, 1884), False, 'from SimPleAC_save import load_obj\n'), ((2756, 2770), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2768, 2770), True, 'import matplotlib.pyplot as plt\n'), ((4170, 4199), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(12)'}), '(title, fontsize=12)\n', (4179, 4199), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4461, 4463), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2158), 'robust.simulations.simulate.filter_gamma_result_dict', 'filter_gamma_result_dict', (["gamma['solutions']", '(1)', "method['name']", '(2)', '"""box"""'], {}), "(gamma['solutions'], 1, method['name'], 2, 'box')\n", (2109, 2158), False, 'from robust.simulations.simulate import filter_gamma_result_dict\n'), ((2183, 2268), 'robust.simulations.simulate.filter_gamma_result_dict', 'filter_gamma_result_dict', (["gamma['solutions']", '(1)', "method['name']", '(2)', '"""ellipsoidal"""'], {}), "(gamma['solutions'], 1, method['name'], 2,\n 'ellipsoidal')\n", (2207, 2268), False, 'from robust.simulations.simulate import filter_gamma_result_dict\n'), ((2350, 2436), 'robust.simulations.simulate.filter_gamma_result_dict', 'filter_gamma_result_dict', (["gamma['simulation_results']", '(1)', "method['name']", '(2)', '"""box"""'], {}), "(gamma['simulation_results'], 1, method['name'], 2,\n 'box')\n", (2374, 2436), False, 'from robust.simulations.simulate import filter_gamma_result_dict\n'), ((2462, 2556), 'robust.simulations.simulate.filter_gamma_result_dict', 'filter_gamma_result_dict', (["gamma['simulation_results']", '(1)', "method['name']", '(2)', '"""ellipsoidal"""'], {}), "(gamma['simulation_results'], 1, method['name'], 2,\n 'ellipsoidal')\n", (2486, 2556), False, 'from robust.simulations.simulate import filter_gamma_result_dict\n'), ((3173, 3208), 'numpy.min', 'np.min', (['([mincost] + objective_costs)'], {}), '([mincost] + objective_costs)\n', (3179, 3208), True, 'import numpy as np\n'), ((3227, 3262), 'numpy.max', 'np.max', (['([maxcost] + objective_costs)'], {}), '([maxcost] + objective_costs)\n', (3233, 3262), True, 'import numpy as np\n'), ((1031, 1046), 'builtins.str', 'str', (['margins[i]'], {}), '(margins[i])\n', (1034, 1046), False, 'from builtins import str\n'), ((1569, 1626), 'builtins.str', 'str', (["(gammas[i], methods[j]['name'], uncertainty_sets[k])"], {}), "((gammas[i], methods[j]['name'], uncertainty_sets[k]))\n", (1572, 1626), False, 'from builtins import str\n')]
#!/usr/bin/env python from __future__ import absolute_import import numpy as np import os import pytest import tempfile import training_data class TestTrainingData(): def test_add(self): td = training_data.training_data() assert np.array_equal(td.get_x(), np.empty([0, 4, 4], dtype=np.int)) assert np.array_equal(td.get_y_digit(), np.empty([0, 1], dtype=np.int)) assert np.allclose(td.get_reward(), np.empty([0, 1], dtype=np.float)) assert np.array_equal(td.get_next_x(), np.empty([0, 4, 4], dtype=np.int)) assert np.array_equal(td.get_done(), np.empty([0, 1], dtype=np.bool)) td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]), True) assert np.array_equal(td.get_x(), np.ones([1, 4, 4], dtype=np.int)) assert np.array_equal(td.get_y_digit(), np.array([[1]], dtype=np.int)) assert np.allclose(td.get_reward(), np.array([[4]], dtype=np.float)) assert np.array_equal(td.get_next_x(), np.zeros([1, 4, 4], dtype=np.int)) assert np.array_equal(td.get_done(), np.array([[1]], dtype=np.bool)) def test_get_x_stacked(self): td = training_data.training_data() td.add(np.full([4, 4], 2), 0, 4, np.zeros([4, 4])) td.add(np.full([4, 4], 8), 1, 8, np.ones([4, 4])) td.add(np.full([4, 4], 2048), 1, 8, np.ones([4, 4])) expected_x_stacked = np.array([ [ [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ], [ [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ], [ [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]] ] ], dtype=np.int) assert np.array_equal(td.get_x_stacked(), expected_x_stacked) def test_get_y_one_hot(self): td = training_data.training_data() td.add(np.ones([4, 4]), 0, 4, np.zeros([4, 4])) td.add(np.zeros([4, 4]), 1, 8, np.ones([4, 4])) td.add(np.zeros([4, 4]), 3, 8, np.ones([4, 4])) td.add(np.zeros([4, 4]), 2, 8, np.ones([4, 4])) expected_y_one_hot = np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0] ], dtype=np.int) assert np.array_equal(td.get_y_one_hot(), expected_y_one_hot) def test_get_total_reward(self): td = training_data.training_data() td.add(np.ones([4, 4]), 0, 4, np.zeros([4, 4])) td.add(np.zeros([4, 4]), 1, 8, np.ones([4, 4])) td.add(np.zeros([4, 4]), 3, 16, np.ones([4, 4])) td.add(np.zeros([4, 4]), 2, 32, np.ones([4, 4])) assert td.get_total_reward() == 60 def test_get_highest_tile(self): td = training_data.training_data() td.add(np.full((4, 4), 1), 0, 4, np.full((4, 4), 2)) td.add(np.full((4, 4), 2), 0, 4, np.full((4, 4), 4)) assert td.get_highest_tile() == 4 def test_get_n(self): td = training_data.training_data() td.add(np.ones([4, 4]), 1, 4, np.zeros([4, 4])) td.add(np.zeros([4, 4]), 2, 8, np.ones([4, 4])) (state, action, reward, next_state, done) = td.get_n(1) assert np.array_equal(state, np.zeros([4, 4], dtype=np.int)) assert action == 2 assert reward == pytest.approx(8.) assert np.array_equal(next_state, np.ones([4, 4], dtype=np.int)) def test_hflip(self): td = training_data.training_data() board1 = np.array([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) board2 = np.array([[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) td.add(board1, 1, 2, board2) td.add(board2, 2, 0, board1) td.hflip() expected_x = np.array([ [[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]] ], dtype=np.int) expected_y_digit = np.array([ [3], [2] ], dtype=np.int) expected_reward = np.array([ [2], [0], ], dtype=np.float) expected_next_x = np.array([ [[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ], dtype=np.int) assert np.array_equal(td.get_x(), expected_x) assert np.array_equal(td.get_y_digit(), expected_y_digit) assert np.allclose(td.get_reward(), expected_reward) assert np.allclose(td.get_next_x(), expected_next_x) def test_rotate(self): td = training_data.training_data() board1 = np.array([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) board2 = np.array([[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) td.add(board1, 1, 2, board2) td.add(board2, 2, 0, board1) td.rotate(3) expected_x = np.array([ [[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]] ], dtype=np.int) expected_y_digit = np.array([ [0], [1] ], dtype=np.int) expected_reward = np.array([ [2], [0], ], dtype=np.float) expected_next_x = np.array([ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]] ], dtype=np.int) assert np.array_equal(td.get_x(), expected_x) assert np.array_equal(td.get_y_digit(), expected_y_digit) assert np.allclose(td.get_reward(), expected_reward) assert np.array_equal(td.get_next_x(), expected_next_x) def test_augment(self): td = training_data.training_data() initial_board = np.array([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) next_board = np.array([[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) td.add(initial_board, 1, 4, next_board) td.augment() assert td.size() == 8 expected_x = np.array([ [[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ], dtype=np.int) expected_y_digit = np.array([ [1], [3], [2], [0], [3], [1], [0], [2] ], dtype=np.int) expected_reward = np.array([ [4], [4], [4], [4], [4], [4], [4], [4] ], dtype=np.float) expected_next_x = np.array([ [[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], # Original [[2, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]], # Hflip'd [[0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 2]], # Original, rotated 90 degrees [[0, 0, 0, 2], [0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]], # Hflip, rotated 90 degrees [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 2, 0], [2, 0, 0, 0]], # Original, rotated 180 degrees [[0, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 2]], # Hflip, rotated 180 degrees [[2, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]], # Original, rotate 270 degrees [[0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [2, 0, 0, 0]] # Hflip, rotated 270 degrees ], dtype=np.int) assert np.array_equal(td.get_x(), expected_x) assert np.array_equal(td.get_y_digit(), expected_y_digit) assert np.allclose(td.get_reward(), expected_reward) assert np.array_equal(td.get_next_x(), expected_next_x) def test_merge(self): td = training_data.training_data() td.add(np.ones([1, 4, 4]), 1, 16, np.zeros([1, 4, 4])) td2 = training_data.training_data() td2.add(np.zeros([1, 4, 4]), 2, 0, np.ones([1, 4, 4])) td.merge(td2) expected_x = np.array([ [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ], dtype=np.int) expected_y_digit = np.array([ [1], [2] ], dtype=np.int) expected_reward = np.array([ [16], [0] ], dtype=np.float) expected_next_x = np.array([ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]] ], dtype=np.int) assert np.array_equal(td.get_x(), expected_x) assert np.array_equal(td.get_y_digit(), expected_y_digit) assert np.allclose(td.get_reward(), expected_reward) assert np.array_equal(td.get_next_x(), expected_next_x) def test_split(self): td = training_data.training_data() td.add(np.ones([1, 4, 4]), 1, 16, np.zeros([1, 4, 4])) td2 = training_data.training_data() td2.add(np.zeros([1, 4, 4]), 2, 0, np.ones([1, 4, 4])) td.merge(td2) a, b = td.split() assert np.array_equal(a.get_x(), np.ones([1, 4, 4])) assert np.array_equal(a.get_y_digit(), [[1]]) assert np.array_equal(a.get_reward(), [[16]]) assert np.array_equal(a.get_next_x(), np.zeros([1, 4, 4])) assert np.array_equal(b.get_x(), np.zeros([1, 4, 4])) assert np.array_equal(b.get_y_digit(), [[2]]) assert np.array_equal(b.get_reward(), [[0]]) assert np.array_equal(b.get_next_x(), np.ones([1, 4, 4])) def test_sample(self): td = training_data.training_data() td.add(np.zeros([1, 4, 4]), 0, 0, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 1, 1, np.ones([1, 4, 4])) sample = td.sample([1]) assert sample.size() == 1 assert sample.get_y_digit() in [[[0]], [[1]]] if sample.get_y_digit() == 0: assert np.array_equal(sample.get_x(), np.zeros([1, 4, 4])) if sample.get_y_digit() == 1: assert np.array_equal(sample.get_x(), np.ones([1, 4, 4])) def test_size(self): td = training_data.training_data() assert td.size() == 0 td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4])) assert td.size() == 1 def test_log2_rewards(self): # Set up training data td = training_data.training_data() td.add(np.ones([1, 4, 4]), 0, 0, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 3, 16, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 0, 75, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 1, 2048, np.zeros([1, 4, 4])) td.log2_rewards() expected_reward = np.array([ [0], [1], [2], [4], [6.2288], [11] ], dtype=np.float) assert np.allclose(td.get_reward(), expected_reward) expected_action = np.array([ [0], [1], [2], [3], [0], [1] ], dtype=np.int) assert np.allclose(td.get_y_digit(), expected_action) def test_get_discounted_return(self): # Set up training data td = training_data.training_data() td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 3, 2, np.zeros([1, 4, 4])) # Test using default gamma value of 0.9 td2 = td.copy() discounted_return = td2.get_discounted_return() expected_return = np.array([ [20.218], [18.02], [17.8], [2.0] ], dtype=np.float) assert np.allclose(discounted_return, expected_return) # Test using gamma value of 0, should have no effect on rewards td2 = td.copy() discounted_return = td2.get_discounted_return(gamma=0.0) expected_return = np.array([ [4], [2], [16], [2] ], dtype=np.float) assert np.allclose(discounted_return, expected_return) # Test end of episode td3 = training_data.training_data() td3.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]), False) td3.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]), True) td3.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]), False) td3.add(np.ones([1, 4, 4]), 3, 2, np.zeros([1, 4, 4]), True) discounted_return = td3.get_discounted_return() expected_return = np.array([ [5.8], [2.0], [17.8], [2.0] ], dtype=np.float) assert np.allclose(discounted_return, expected_return) def test_normalize_rewards(self): # Test calculating mean and standard deviation td = training_data.training_data() td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 3, 8, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 0, 16, np.zeros([1, 4, 4])) td.normalize_rewards() expected_reward = np.array([ [-0.8165], [-0.8165], [0.], [1.633], ], dtype=np.float) assert np.allclose(td.get_reward(), expected_reward) # Test specifying mean and standard deviation td = training_data.training_data() td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 3, 8, np.zeros([1, 4, 4])) td.add(np.ones([1, 4, 4]), 0, 16, np.zeros([1, 4, 4])) td.normalize_rewards(mean=8, sd=1) expected_reward = np.array([ [-4.], [-4.], [0.], [8.], ], dtype=np.float) assert np.allclose(td.get_reward(), expected_reward) def test_normalize_boards(self): # Test calculating mean and standard deviation td = training_data.training_data() td.add(np.full((1, 4, 4), 4), 1, 4, np.full((1, 4, 4), 8)) td.add(np.full((1, 4, 4), 8), 2, 4, np.full((1, 4, 4), 16)) td.add(np.full((1, 4, 4), 16), 3, 4, np.full((1, 4, 4), 32)) td.add(np.full((1, 4, 4), 32), 4, 4, np.full((1, 4, 4), 64)) td.normalize_boards() mean = 15. sd = 10.7238052947636 a = (4. - mean) / sd b = (8. - mean) / sd c = (16. - mean) / sd d = (32. - mean) / sd e = (64. - mean) / sd expected_x = np.array([ [[a, a, a, a], [a, a, a, a], [a, a, a, a], [a, a, a, a]], [[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c, c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d, d, d], [d, d, d, d], [d, d, d, d]] ], dtype=np.float) assert np.allclose(td.get_x(), expected_x) expected_next_x = np.array([ [[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c, c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d, d, d], [d, d, d, d], [d, d, d, d]], [[e, e, e, e], [e, e, e, e], [e, e, e, e], [e, e, e, e]] ], dtype=np.float) assert np.allclose(td.get_next_x(), expected_next_x) def test_save_restore(self): # Set up training data td = training_data.training_data() td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4])) td.add(np.zeros([1, 4, 4]), 1, 2, np.ones([1, 4, 4])) td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4])) td.add(np.zeros([1, 4, 4]), 3, 2, np.ones([1, 4, 4])) temp_dir = tempfile.mkdtemp() temp_filename = os.path.join(temp_dir, 'data.csv') td.export_csv(temp_filename) td2 = training_data.training_data() td2.import_csv(temp_filename) expected_x = np.array([ [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ], dtype=np.int) expected_y_digit = np.array([ [0], [1], [2], [3] ], dtype=np.int) expected_reward = np.array([ [4], [2], [16], [2] ], dtype=np.float) expected_next_x = np.array([ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]] ], dtype=np.int) assert np.array_equal(td2.get_x(), expected_x) assert np.array_equal(td2.get_y_digit(), expected_y_digit) assert np.allclose(td2.get_reward(), expected_reward) assert np.array_equal(td2.get_next_x(), expected_next_x) os.remove(temp_filename) os.rmdir(temp_dir) def test_shuffle(self): td = training_data.training_data() n = 5 for i in range(n): # Use "is odd" for done td.add(np.full((1, 4, 4), i), i, i, np.full((1, 4, 4), i), (i % 2) == 1) td.shuffle() for i in range(n): # Find where this has been shuffled too index_of_val = np.where(td.get_y_digit() == i)[0].item(0) # Check that all parts of this equal i arrays = td.get_n(index_of_val) for a in arrays: if a.dtype is np.dtype(np.bool): assert((a == ((i % 2) == 1)).all()) else: assert((a == i).all()) def test_make_boards_unique(self): td = training_data.training_data() td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4])) td.add(np.zeros([1, 4, 4]), 1, 2, np.ones([1, 4, 4])) td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4])) td.add(np.zeros([1, 4, 4]), 3, 2, np.ones([1, 4, 4])) td.make_boards_unique() expected_x = np.array([ [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ], dtype=np.int) expected_y_digit = np.array([ [0], [1] ], dtype=np.int) expected_reward = np.array([ [4], [2] ], dtype=np.float) expected_next_x = np.array([ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]] ], dtype=np.int) assert np.array_equal(td.get_x(), expected_x) assert np.array_equal(td.get_y_digit(), expected_y_digit) assert np.allclose(td.get_reward(), expected_reward) assert np.array_equal(td.get_next_x(), expected_next_x) if __name__ == '__main__': import pytest pytest.main()
[ "pytest.approx", "numpy.allclose", "numpy.ones", "os.path.join", "pytest.main", "training_data.training_data", "numpy.array", "os.rmdir", "tempfile.mkdtemp", "numpy.empty", "numpy.zeros", "numpy.full", "numpy.dtype", "os.remove" ]
[((22956, 22969), 'pytest.main', 'pytest.main', ([], {}), '()\n', (22967, 22969), False, 'import pytest\n'), ((208, 237), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (235, 237), False, 'import training_data\n'), ((1140, 1169), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (1167, 1169), False, 'import training_data\n'), ((1377, 3983), 'numpy.array', 'np.array', (['[[[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0]]], [[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, \n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0]], [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 1, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]], [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, \n 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, \n 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]]]'], {'dtype': 'np.int'}), '([[[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0]]], [[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0]], [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]], [[[0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,\n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, \n 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]]], dtype\n =np.int)\n', (1385, 3983), True, 'import numpy as np\n'), ((4654, 4683), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (4681, 4683), False, 'import training_data\n'), ((4937, 5022), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]'], {'dtype': 'np.int'}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.int\n )\n', (4945, 5022), True, 'import numpy as np\n'), ((5201, 5230), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (5228, 5230), False, 'import training_data\n'), ((5551, 5580), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (5578, 5580), False, 'import training_data\n'), ((5785, 5814), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (5812, 5814), False, 'import training_data\n'), ((6243, 6272), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (6270, 6272), False, 'import training_data\n'), ((6290, 6356), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (6298, 6356), True, 'import numpy as np\n'), ((6455, 6521), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (6463, 6521), True, 'import numpy as np\n'), ((6717, 6861), 'numpy.array', 'np.array', (['[[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [\n 0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0,\n 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (6725, 6861), True, 'import numpy as np\n'), ((6923, 6957), 'numpy.array', 'np.array', (['[[3], [2]]'], {'dtype': 'np.int'}), '([[3], [2]], dtype=np.int)\n', (6931, 6957), True, 'import numpy as np\n'), ((7022, 7058), 'numpy.array', 'np.array', (['[[2], [0]]'], {'dtype': 'np.float'}), '([[2], [0]], dtype=np.float)\n', (7030, 7058), True, 'import numpy as np\n'), ((7124, 7268), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 1], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0,\n 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (7132, 7268), True, 'import numpy as np\n'), ((7586, 7615), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (7613, 7615), False, 'import training_data\n'), ((7633, 7699), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (7641, 7699), True, 'import numpy as np\n'), ((7798, 7864), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [2, 4, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (7806, 7864), True, 'import numpy as np\n'), ((8062, 8206), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]]], dtype=np.int)\n', (8070, 8206), True, 'import numpy as np\n'), ((8268, 8302), 'numpy.array', 'np.array', (['[[0], [1]]'], {'dtype': 'np.int'}), '([[0], [1]], dtype=np.int)\n', (8276, 8302), True, 'import numpy as np\n'), ((8367, 8403), 'numpy.array', 'np.array', (['[[2], [0]]'], {'dtype': 'np.float'}), '([[2], [0]], dtype=np.float)\n', (8375, 8403), True, 'import numpy as np\n'), ((8469, 8613), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]]], dtype=np.int)\n', (8477, 8613), True, 'import numpy as np\n'), ((8935, 8964), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (8962, 8964), False, 'import training_data\n'), ((8989, 9055), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (8997, 9055), True, 'import numpy as np\n'), ((9179, 9245), 'numpy.array', 'np.array', (['[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (9187, 9245), True, 'import numpy as np\n'), ((9459, 9972), 'numpy.array', 'np.array', (['[[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 1], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0, 0, 1],\n [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1],\n [0, 0, 0, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]], [[0, 0, 0, 0],\n [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [1, 0, 0, 0],\n [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0,\n 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0,\n 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0,\n 0, 1], [0, 0, 0, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0,\n 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]], [[0, \n 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [1,\n 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (9467, 9972), True, 'import numpy as np\n'), ((10085, 10149), 'numpy.array', 'np.array', (['[[1], [3], [2], [0], [3], [1], [0], [2]]'], {'dtype': 'np.int'}), '([[1], [3], [2], [0], [3], [1], [0], [2]], dtype=np.int)\n', (10093, 10149), True, 'import numpy as np\n'), ((10286, 10352), 'numpy.array', 'np.array', (['[[4], [4], [4], [4], [4], [4], [4], [4]]'], {'dtype': 'np.float'}), '([[4], [4], [4], [4], [4], [4], [4], [4]], dtype=np.float)\n', (10294, 10352), True, 'import numpy as np\n'), ((10489, 11002), 'numpy.array', 'np.array', (['[[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[2, 0, 0, 0], [\n 0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 2, 0],\n [0, 0, 0, 0], [0, 0, 0, 2]], [[0, 0, 0, 2], [0, 0, 0, 0], [0, 0, 2, 0],\n [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 2, 0], [2, 0, 0, 0]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 2]], [[2, 0, 0, 0],\n [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 2, 0, 0],\n [0, 0, 0, 0], [2, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[2, 0,\n 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0,\n 2, 0], [0, 0, 0, 0], [0, 0, 0, 2]], [[0, 0, 0, 2], [0, 0, 0, 0], [0, 0,\n 2, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 2, 0], [2, 0,\n 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 2]], [[2, \n 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0,\n 2, 0, 0], [0, 0, 0, 0], [2, 0, 0, 0]]], dtype=np.int)\n', (10497, 11002), True, 'import numpy as np\n'), ((11575, 11604), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (11602, 11604), False, 'import training_data\n'), ((11682, 11711), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (11709, 11711), False, 'import training_data\n'), ((11818, 11962), 'numpy.array', 'np.array', (['[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (11826, 11962), True, 'import numpy as np\n'), ((12024, 12058), 'numpy.array', 'np.array', (['[[1], [2]]'], {'dtype': 'np.int'}), '([[1], [2]], dtype=np.int)\n', (12032, 12058), True, 'import numpy as np\n'), ((12123, 12160), 'numpy.array', 'np.array', (['[[16], [0]]'], {'dtype': 'np.float'}), '([[16], [0]], dtype=np.float)\n', (12131, 12160), True, 'import numpy as np\n'), ((12225, 12369), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [\n 1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1,\n 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=np.int)\n', (12233, 12369), True, 'import numpy as np\n'), ((12689, 12718), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (12716, 12718), False, 'import training_data\n'), ((12796, 12825), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (12823, 12825), False, 'import training_data\n'), ((13449, 13478), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (13476, 13478), False, 'import training_data\n'), ((13979, 14008), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (14006, 14008), False, 'import training_data\n'), ((14209, 14238), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (14236, 14238), False, 'import training_data\n'), ((14668, 14730), 'numpy.array', 'np.array', (['[[0], [1], [2], [4], [6.2288], [11]]'], {'dtype': 'np.float'}), '([[0], [1], [2], [4], [6.2288], [11]], dtype=np.float)\n', (14676, 14730), True, 'import numpy as np\n'), ((14844, 14898), 'numpy.array', 'np.array', (['[[0], [1], [2], [3], [0], [1]]'], {'dtype': 'np.int'}), '([[0], [1], [2], [3], [0], [1]], dtype=np.int)\n', (14852, 14898), True, 'import numpy as np\n'), ((15074, 15103), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (15101, 15103), False, 'import training_data\n'), ((15508, 15568), 'numpy.array', 'np.array', (['[[20.218], [18.02], [17.8], [2.0]]'], {'dtype': 'np.float'}), '([[20.218], [18.02], [17.8], [2.0]], dtype=np.float)\n', (15516, 15568), True, 'import numpy as np\n'), ((15610, 15657), 'numpy.allclose', 'np.allclose', (['discounted_return', 'expected_return'], {}), '(discounted_return, expected_return)\n', (15621, 15657), True, 'import numpy as np\n'), ((15846, 15893), 'numpy.array', 'np.array', (['[[4], [2], [16], [2]]'], {'dtype': 'np.float'}), '([[4], [2], [16], [2]], dtype=np.float)\n', (15854, 15893), True, 'import numpy as np\n'), ((15935, 15982), 'numpy.allclose', 'np.allclose', (['discounted_return', 'expected_return'], {}), '(discounted_return, expected_return)\n', (15946, 15982), True, 'import numpy as np\n'), ((16028, 16057), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (16055, 16057), False, 'import training_data\n'), ((16419, 16474), 'numpy.array', 'np.array', (['[[5.8], [2.0], [17.8], [2.0]]'], {'dtype': 'np.float'}), '([[5.8], [2.0], [17.8], [2.0]], dtype=np.float)\n', (16427, 16474), True, 'import numpy as np\n'), ((16516, 16563), 'numpy.allclose', 'np.allclose', (['discounted_return', 'expected_return'], {}), '(discounted_return, expected_return)\n', (16527, 16563), True, 'import numpy as np\n'), ((16671, 16700), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (16698, 16700), False, 'import training_data\n'), ((17007, 17071), 'numpy.array', 'np.array', (['[[-0.8165], [-0.8165], [0.0], [1.633]]'], {'dtype': 'np.float'}), '([[-0.8165], [-0.8165], [0.0], [1.633]], dtype=np.float)\n', (17015, 17071), True, 'import numpy as np\n'), ((17226, 17255), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (17253, 17255), False, 'import training_data\n'), ((17574, 17630), 'numpy.array', 'np.array', (['[[-4.0], [-4.0], [0.0], [8.0]]'], {'dtype': 'np.float'}), '([[-4.0], [-4.0], [0.0], [8.0]], dtype=np.float)\n', (17582, 17630), True, 'import numpy as np\n'), ((17821, 17850), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (17848, 17850), False, 'import training_data\n'), ((18372, 18642), 'numpy.array', 'np.array', (['[[[a, a, a, a], [a, a, a, a], [a, a, a, a], [a, a, a, a]], [[b, b, b, b], [\n b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c, c, c], [c, c, c, c],\n [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d, d, d], [d, d, d, d],\n [d, d, d, d]]]'], {'dtype': 'np.float'}), '([[[a, a, a, a], [a, a, a, a], [a, a, a, a], [a, a, a, a]], [[b, b,\n b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c, c, c], [c, c,\n c, c], [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d, d, d], [d, d,\n d, d], [d, d, d, d]]], dtype=np.float)\n', (18380, 18642), True, 'import numpy as np\n'), ((18770, 19040), 'numpy.array', 'np.array', (['[[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c, c, c], [\n c, c, c, c], [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d, d, d],\n [d, d, d, d], [d, d, d, d]], [[e, e, e, e], [e, e, e, e], [e, e, e, e],\n [e, e, e, e]]]'], {'dtype': 'np.float'}), '([[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]], [[c, c,\n c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]], [[d, d, d, d], [d, d,\n d, d], [d, d, d, d], [d, d, d, d]], [[e, e, e, e], [e, e, e, e], [e, e,\n e, e], [e, e, e, e]]], dtype=np.float)\n', (18778, 19040), True, 'import numpy as np\n'), ((19230, 19259), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (19257, 19259), False, 'import training_data\n'), ((19529, 19547), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (19545, 19547), False, 'import tempfile\n'), ((19572, 19606), 'os.path.join', 'os.path.join', (['temp_dir', '"""data.csv"""'], {}), "(temp_dir, 'data.csv')\n", (19584, 19606), False, 'import os\n'), ((19659, 19688), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (19686, 19688), False, 'import training_data\n'), ((19749, 20017), 'numpy.array', 'np.array', (['[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1],\n [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],\n [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1,\n 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0,\n 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (19757, 20017), True, 'import numpy as np\n'), ((20095, 20139), 'numpy.array', 'np.array', (['[[0], [1], [2], [3]]'], {'dtype': 'np.int'}), '([[0], [1], [2], [3]], dtype=np.int)\n', (20103, 20139), True, 'import numpy as np\n'), ((20228, 20275), 'numpy.array', 'np.array', (['[[4], [2], [16], [2]]'], {'dtype': 'np.float'}), '([[4], [2], [16], [2]], dtype=np.float)\n', (20236, 20275), True, 'import numpy as np\n'), ((20364, 20632), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [\n 1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0, 0, 0],\n [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1],\n [1, 1, 1, 1]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1,\n 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1,\n 1, 1], [1, 1, 1, 1]]], dtype=np.int)\n', (20372, 20632), True, 'import numpy as np\n'), ((20940, 20964), 'os.remove', 'os.remove', (['temp_filename'], {}), '(temp_filename)\n', (20949, 20964), False, 'import os\n'), ((20973, 20991), 'os.rmdir', 'os.rmdir', (['temp_dir'], {}), '(temp_dir)\n', (20981, 20991), False, 'import os\n'), ((21034, 21063), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (21061, 21063), False, 'import training_data\n'), ((21744, 21773), 'training_data.training_data', 'training_data.training_data', ([], {}), '()\n', (21771, 21773), False, 'import training_data\n'), ((22076, 22220), 'numpy.array', 'np.array', (['[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, 0, 0], [\n 0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.int'}), '([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0,\n 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.int)\n', (22084, 22220), True, 'import numpy as np\n'), ((22282, 22316), 'numpy.array', 'np.array', (['[[0], [1]]'], {'dtype': 'np.int'}), '([[0], [1]], dtype=np.int)\n', (22290, 22316), True, 'import numpy as np\n'), ((22381, 22417), 'numpy.array', 'np.array', (['[[4], [2]]'], {'dtype': 'np.float'}), '([[4], [2]], dtype=np.float)\n', (22389, 22417), True, 'import numpy as np\n'), ((22482, 22626), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 1, 1], [\n 1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]'], {'dtype': 'np.int'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[1, 1,\n 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=np.int)\n', (22490, 22626), True, 'import numpy as np\n'), ((280, 313), 'numpy.empty', 'np.empty', (['[0, 4, 4]'], {'dtype': 'np.int'}), '([0, 4, 4], dtype=np.int)\n', (288, 313), True, 'import numpy as np\n'), ((363, 393), 'numpy.empty', 'np.empty', (['[0, 1]'], {'dtype': 'np.int'}), '([0, 1], dtype=np.int)\n', (371, 393), True, 'import numpy as np\n'), ((439, 471), 'numpy.empty', 'np.empty', (['[0, 1]'], {'dtype': 'np.float'}), '([0, 1], dtype=np.float)\n', (447, 471), True, 'import numpy as np\n'), ((520, 553), 'numpy.empty', 'np.empty', (['[0, 4, 4]'], {'dtype': 'np.int'}), '([0, 4, 4], dtype=np.int)\n', (528, 553), True, 'import numpy as np\n'), ((600, 631), 'numpy.empty', 'np.empty', (['[0, 1]'], {'dtype': 'np.bool'}), '([0, 1], dtype=np.bool)\n', (608, 631), True, 'import numpy as np\n'), ((648, 666), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (655, 666), True, 'import numpy as np\n'), ((674, 693), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (682, 693), True, 'import numpy as np\n'), ((743, 775), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {'dtype': 'np.int'}), '([1, 4, 4], dtype=np.int)\n', (750, 775), True, 'import numpy as np\n'), ((825, 854), 'numpy.array', 'np.array', (['[[1]]'], {'dtype': 'np.int'}), '([[1]], dtype=np.int)\n', (833, 854), True, 'import numpy as np\n'), ((900, 931), 'numpy.array', 'np.array', (['[[4]]'], {'dtype': 'np.float'}), '([[4]], dtype=np.float)\n', (908, 931), True, 'import numpy as np\n'), ((980, 1013), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {'dtype': 'np.int'}), '([1, 4, 4], dtype=np.int)\n', (988, 1013), True, 'import numpy as np\n'), ((1060, 1090), 'numpy.array', 'np.array', (['[[1]]'], {'dtype': 'np.bool'}), '([[1]], dtype=np.bool)\n', (1068, 1090), True, 'import numpy as np\n'), ((1185, 1203), 'numpy.full', 'np.full', (['[4, 4]', '(2)'], {}), '([4, 4], 2)\n', (1192, 1203), True, 'import numpy as np\n'), ((1211, 1227), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (1219, 1227), True, 'import numpy as np\n'), ((1244, 1262), 'numpy.full', 'np.full', (['[4, 4]', '(8)'], {}), '([4, 4], 8)\n', (1251, 1262), True, 'import numpy as np\n'), ((1270, 1285), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (1277, 1285), True, 'import numpy as np\n'), ((1302, 1323), 'numpy.full', 'np.full', (['[4, 4]', '(2048)'], {}), '([4, 4], 2048)\n', (1309, 1323), True, 'import numpy as np\n'), ((1331, 1346), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (1338, 1346), True, 'import numpy as np\n'), ((4699, 4714), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (4706, 4714), True, 'import numpy as np\n'), ((4722, 4738), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4730, 4738), True, 'import numpy as np\n'), ((4755, 4771), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4763, 4771), True, 'import numpy as np\n'), ((4779, 4794), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (4786, 4794), True, 'import numpy as np\n'), ((4811, 4827), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4819, 4827), True, 'import numpy as np\n'), ((4835, 4850), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (4842, 4850), True, 'import numpy as np\n'), ((4867, 4883), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4875, 4883), True, 'import numpy as np\n'), ((4891, 4906), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (4898, 4906), True, 'import numpy as np\n'), ((5246, 5261), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5253, 5261), True, 'import numpy as np\n'), ((5269, 5285), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5277, 5285), True, 'import numpy as np\n'), ((5302, 5318), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5310, 5318), True, 'import numpy as np\n'), ((5326, 5341), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5333, 5341), True, 'import numpy as np\n'), ((5358, 5374), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5366, 5374), True, 'import numpy as np\n'), ((5383, 5398), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5390, 5398), True, 'import numpy as np\n'), ((5415, 5431), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5423, 5431), True, 'import numpy as np\n'), ((5440, 5455), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5447, 5455), True, 'import numpy as np\n'), ((5596, 5614), 'numpy.full', 'np.full', (['(4, 4)', '(1)'], {}), '((4, 4), 1)\n', (5603, 5614), True, 'import numpy as np\n'), ((5622, 5640), 'numpy.full', 'np.full', (['(4, 4)', '(2)'], {}), '((4, 4), 2)\n', (5629, 5640), True, 'import numpy as np\n'), ((5657, 5675), 'numpy.full', 'np.full', (['(4, 4)', '(2)'], {}), '((4, 4), 2)\n', (5664, 5675), True, 'import numpy as np\n'), ((5683, 5701), 'numpy.full', 'np.full', (['(4, 4)', '(4)'], {}), '((4, 4), 4)\n', (5690, 5701), True, 'import numpy as np\n'), ((5830, 5845), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5837, 5845), True, 'import numpy as np\n'), ((5853, 5869), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5861, 5869), True, 'import numpy as np\n'), ((5886, 5902), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (5894, 5902), True, 'import numpy as np\n'), ((5910, 5925), 'numpy.ones', 'np.ones', (['[4, 4]'], {}), '([4, 4])\n', (5917, 5925), True, 'import numpy as np\n'), ((6028, 6058), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {'dtype': 'np.int'}), '([4, 4], dtype=np.int)\n', (6036, 6058), True, 'import numpy as np\n'), ((6112, 6130), 'pytest.approx', 'pytest.approx', (['(8.0)'], {}), '(8.0)\n', (6125, 6130), False, 'import pytest\n'), ((6172, 6201), 'numpy.ones', 'np.ones', (['[4, 4]'], {'dtype': 'np.int'}), '([4, 4], dtype=np.int)\n', (6179, 6201), True, 'import numpy as np\n'), ((11620, 11638), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (11627, 11638), True, 'import numpy as np\n'), ((11647, 11666), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (11655, 11666), True, 'import numpy as np\n'), ((11728, 11747), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (11736, 11747), True, 'import numpy as np\n'), ((11755, 11773), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (11762, 11773), True, 'import numpy as np\n'), ((12734, 12752), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12741, 12752), True, 'import numpy as np\n'), ((12761, 12780), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12769, 12780), True, 'import numpy as np\n'), ((12842, 12861), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12850, 12861), True, 'import numpy as np\n'), ((12869, 12887), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12876, 12887), True, 'import numpy as np\n'), ((12978, 12996), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (12985, 12996), True, 'import numpy as np\n'), ((13152, 13171), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13160, 13171), True, 'import numpy as np\n'), ((13214, 13233), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13222, 13233), True, 'import numpy as np\n'), ((13388, 13406), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13395, 13406), True, 'import numpy as np\n'), ((13494, 13513), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13502, 13513), True, 'import numpy as np\n'), ((13521, 13540), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13529, 13540), True, 'import numpy as np\n'), ((13557, 13575), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13564, 13575), True, 'import numpy as np\n'), ((13583, 13601), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13590, 13601), True, 'import numpy as np\n'), ((14054, 14072), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14061, 14072), True, 'import numpy as np\n'), ((14080, 14099), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14088, 14099), True, 'import numpy as np\n'), ((14254, 14272), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14261, 14272), True, 'import numpy as np\n'), ((14280, 14299), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14288, 14299), True, 'import numpy as np\n'), ((14316, 14334), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14323, 14334), True, 'import numpy as np\n'), ((14342, 14361), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14350, 14361), True, 'import numpy as np\n'), ((14378, 14396), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14385, 14396), True, 'import numpy as np\n'), ((14404, 14423), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14412, 14423), True, 'import numpy as np\n'), ((14440, 14458), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14447, 14458), True, 'import numpy as np\n'), ((14467, 14486), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14475, 14486), True, 'import numpy as np\n'), ((14503, 14521), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14510, 14521), True, 'import numpy as np\n'), ((14530, 14549), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14538, 14549), True, 'import numpy as np\n'), ((14566, 14584), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14573, 14584), True, 'import numpy as np\n'), ((14595, 14614), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (14603, 14614), True, 'import numpy as np\n'), ((15119, 15137), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15126, 15137), True, 'import numpy as np\n'), ((15145, 15164), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15153, 15164), True, 'import numpy as np\n'), ((15181, 15199), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15188, 15199), True, 'import numpy as np\n'), ((15207, 15226), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15215, 15226), True, 'import numpy as np\n'), ((15243, 15261), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15250, 15261), True, 'import numpy as np\n'), ((15270, 15289), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15278, 15289), True, 'import numpy as np\n'), ((15306, 15324), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15313, 15324), True, 'import numpy as np\n'), ((15332, 15351), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (15340, 15351), True, 'import numpy as np\n'), ((16074, 16092), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16081, 16092), True, 'import numpy as np\n'), ((16100, 16119), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16108, 16119), True, 'import numpy as np\n'), ((16144, 16162), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16151, 16162), True, 'import numpy as np\n'), ((16170, 16189), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16178, 16189), True, 'import numpy as np\n'), ((16213, 16231), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16220, 16231), True, 'import numpy as np\n'), ((16240, 16259), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16248, 16259), True, 'import numpy as np\n'), ((16284, 16302), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16291, 16302), True, 'import numpy as np\n'), ((16310, 16329), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16318, 16329), True, 'import numpy as np\n'), ((16716, 16734), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16723, 16734), True, 'import numpy as np\n'), ((16742, 16761), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16750, 16761), True, 'import numpy as np\n'), ((16778, 16796), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16785, 16796), True, 'import numpy as np\n'), ((16804, 16823), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16812, 16823), True, 'import numpy as np\n'), ((16840, 16858), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16847, 16858), True, 'import numpy as np\n'), ((16866, 16885), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16874, 16885), True, 'import numpy as np\n'), ((16902, 16920), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16909, 16920), True, 'import numpy as np\n'), ((16929, 16948), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (16937, 16948), True, 'import numpy as np\n'), ((17271, 17289), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17278, 17289), True, 'import numpy as np\n'), ((17297, 17316), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17305, 17316), True, 'import numpy as np\n'), ((17333, 17351), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17340, 17351), True, 'import numpy as np\n'), ((17359, 17378), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17367, 17378), True, 'import numpy as np\n'), ((17395, 17413), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17402, 17413), True, 'import numpy as np\n'), ((17421, 17440), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17429, 17440), True, 'import numpy as np\n'), ((17457, 17475), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17464, 17475), True, 'import numpy as np\n'), ((17484, 17503), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (17492, 17503), True, 'import numpy as np\n'), ((17866, 17887), 'numpy.full', 'np.full', (['(1, 4, 4)', '(4)'], {}), '((1, 4, 4), 4)\n', (17873, 17887), True, 'import numpy as np\n'), ((17895, 17916), 'numpy.full', 'np.full', (['(1, 4, 4)', '(8)'], {}), '((1, 4, 4), 8)\n', (17902, 17916), True, 'import numpy as np\n'), ((17933, 17954), 'numpy.full', 'np.full', (['(1, 4, 4)', '(8)'], {}), '((1, 4, 4), 8)\n', (17940, 17954), True, 'import numpy as np\n'), ((17962, 17984), 'numpy.full', 'np.full', (['(1, 4, 4)', '(16)'], {}), '((1, 4, 4), 16)\n', (17969, 17984), True, 'import numpy as np\n'), ((18001, 18023), 'numpy.full', 'np.full', (['(1, 4, 4)', '(16)'], {}), '((1, 4, 4), 16)\n', (18008, 18023), True, 'import numpy as np\n'), ((18031, 18053), 'numpy.full', 'np.full', (['(1, 4, 4)', '(32)'], {}), '((1, 4, 4), 32)\n', (18038, 18053), True, 'import numpy as np\n'), ((18070, 18092), 'numpy.full', 'np.full', (['(1, 4, 4)', '(32)'], {}), '((1, 4, 4), 32)\n', (18077, 18092), True, 'import numpy as np\n'), ((18100, 18122), 'numpy.full', 'np.full', (['(1, 4, 4)', '(64)'], {}), '((1, 4, 4), 64)\n', (18107, 18122), True, 'import numpy as np\n'), ((19275, 19293), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19282, 19293), True, 'import numpy as np\n'), ((19301, 19320), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19309, 19320), True, 'import numpy as np\n'), ((19337, 19356), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19345, 19356), True, 'import numpy as np\n'), ((19364, 19382), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19371, 19382), True, 'import numpy as np\n'), ((19399, 19417), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19406, 19417), True, 'import numpy as np\n'), ((19426, 19445), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19434, 19445), True, 'import numpy as np\n'), ((19462, 19481), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19470, 19481), True, 'import numpy as np\n'), ((19489, 19507), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (19496, 19507), True, 'import numpy as np\n'), ((21789, 21807), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21796, 21807), True, 'import numpy as np\n'), ((21815, 21834), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21823, 21834), True, 'import numpy as np\n'), ((21851, 21870), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21859, 21870), True, 'import numpy as np\n'), ((21878, 21896), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21885, 21896), True, 'import numpy as np\n'), ((21913, 21931), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21920, 21931), True, 'import numpy as np\n'), ((21940, 21959), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21948, 21959), True, 'import numpy as np\n'), ((21976, 21995), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (21984, 21995), True, 'import numpy as np\n'), ((22003, 22021), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (22010, 22021), True, 'import numpy as np\n'), ((13811, 13830), 'numpy.zeros', 'np.zeros', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13819, 13830), True, 'import numpy as np\n'), ((13920, 13938), 'numpy.ones', 'np.ones', (['[1, 4, 4]'], {}), '([1, 4, 4])\n', (13927, 13938), True, 'import numpy as np\n'), ((21160, 21181), 'numpy.full', 'np.full', (['(1, 4, 4)', 'i'], {}), '((1, 4, 4), i)\n', (21167, 21181), True, 'import numpy as np\n'), ((21189, 21210), 'numpy.full', 'np.full', (['(1, 4, 4)', 'i'], {}), '((1, 4, 4), i)\n', (21196, 21210), True, 'import numpy as np\n'), ((21551, 21568), 'numpy.dtype', 'np.dtype', (['np.bool'], {}), '(np.bool)\n', (21559, 21568), True, 'import numpy as np\n')]
import json import os from os.path import join from random import shuffle import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.preprocessing import MinMaxScaler, normalize from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split from sklearn.metrics import accuracy_score from transformers import BertTokenizer, BertConfig, BartTokenizer def make_vector(text, tokenizer): token_ids = tokenizer.encode(text)[1:-1] count_vector = np.zeros(tokenizer.vocab_size, dtype=np.int16) for ID in token_ids: count_vector[ID] += 1 return count_vector def dataloader(data_dir, batch_size=5000): names = [x[:-6] for x in os.listdir(data_dir) if x[-5:] == '3.txt'] index = 0 while index < len(names): cur_names = names[index:index+batch_size] tuples = [] for name in cur_names: hard = open(join(data_dir, f'{name}.0.txt')).read() simple = open(join(data_dir, f'{name}.3.txt')).read() tuples.append((hard, simple)) yield tuples index += batch_size def construct_dataset(tuples, tokenizer): X = np.empty((2*len(tuples), tokenizer.vocab_size), dtype=np.int16) y = np.empty(2*len(tuples), dtype=np.int16) index = 0 for s,t in tuples: X[index] = make_vector(s, tokenizer) X[index+1] = make_vector(t, tokenizer) y[index] = 0 y[index+1] = 1 index += 2 return X, y def get_vocab(tokenizer): tokens = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(tokenizer.vocab_size)] return tokens def simple_term_counts(data_dir='data/newsela/articles'): tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-xsum') model = LogisticRegression(max_iter=100) for batch in dataloader(data_dir): X, y = construct_dataset(batch, tokenizer) #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) #apply feature scaling #X_train = normalize(X_train) #X_test = normalize(X_test) #model.fit(X_train, y_train) #predictions = model.predict(X_test) #print(accuracy_score(y_test, predictions)) X = normalize(X) model.fit(X, y) vocab = get_vocab(tokenizer) weights = np.squeeze(model.coef_, axis=0).tolist() sorted_weights = filter(lambda x: len(x[1].strip()) > 0, zip(range(tokenizer.vocab_size), vocab, weights)) sorted_weights = list(sorted(sorted_weights, key=lambda x: x[2])) with open('data/logr_weights/bart_freq_newsela_ids.txt', 'w') as f: for ID, word, weight in sorted_weights: f.write(f'{ID} {weight}\n') with open('data/logr_weights/bart_freq_newsela_tokens.txt', 'w') as f: for ID, word, weight in sorted_weights: f.write(f'{word} {weight}\n') print(simple_term_counts())
[ "os.listdir", "os.path.join", "sklearn.linear_model.LogisticRegression", "numpy.squeeze", "numpy.zeros", "transformers.BartTokenizer.from_pretrained", "sklearn.preprocessing.normalize" ]
[((576, 622), 'numpy.zeros', 'np.zeros', (['tokenizer.vocab_size'], {'dtype': 'np.int16'}), '(tokenizer.vocab_size, dtype=np.int16)\n', (584, 622), True, 'import numpy as np\n'), ((1803, 1860), 'transformers.BartTokenizer.from_pretrained', 'BartTokenizer.from_pretrained', (['"""facebook/bart-large-xsum"""'], {}), "('facebook/bart-large-xsum')\n", (1832, 1860), False, 'from transformers import BertTokenizer, BertConfig, BartTokenizer\n'), ((1873, 1905), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(100)'}), '(max_iter=100)\n', (1891, 1905), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2334, 2346), 'sklearn.preprocessing.normalize', 'normalize', (['X'], {}), '(X)\n', (2343, 2346), False, 'from sklearn.preprocessing import MinMaxScaler, normalize\n'), ((777, 797), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (787, 797), False, 'import os\n'), ((2419, 2450), 'numpy.squeeze', 'np.squeeze', (['model.coef_'], {'axis': '(0)'}), '(model.coef_, axis=0)\n', (2429, 2450), True, 'import numpy as np\n'), ((999, 1030), 'os.path.join', 'join', (['data_dir', 'f"""{name}.0.txt"""'], {}), "(data_dir, f'{name}.0.txt')\n", (1003, 1030), False, 'from os.path import join\n'), ((1065, 1096), 'os.path.join', 'join', (['data_dir', 'f"""{name}.3.txt"""'], {}), "(data_dir, f'{name}.3.txt')\n", (1069, 1096), False, 'from os.path import join\n')]
import unittest, tempfile from pygromos.simulations.hpc_queuing.job_scheduling.schedulers import simulation_scheduler from pygromos.data.simulation_parameters_templates import template_md from pygromos.data.topology_templates import blank_topo_template from pygromos.simulations.hpc_queuing.submission_systems import DUMMY from pygromos.files.gromos_system.gromos_system import Gromos_System from pygromos.tests.in_testfiles import in_test_file_path from pygromos.tests.test_files import out_test_root_dir class test_MD_scheduler(unittest.TestCase): submissionSystem = DUMMY def setUp(self) -> None: self.tmp_test_dir = tempfile.mkdtemp(dir=out_test_root_dir, prefix="scheduling_Dummy_") def test_do(self): in_cnf = in_test_file_path+"/cnf/in_cnf1.cnf" out_dir_path = self.tmp_test_dir in_simSystem = Gromos_System(system_name="test_do", work_folder=out_dir_path, in_top_path=blank_topo_template, in_cnf_path=in_cnf, in_imd_path=template_md, in_gromosXX_bin_dir=None, in_gromosPP_bin_dir=None) submission_system = self.submissionSystem() simulation_scheduler.do(in_simSystem=in_simSystem, out_dir_path=out_dir_path, submission_system=submission_system, simulation_run_num=2, verbose= True)
[ "pygromos.simulations.hpc_queuing.job_scheduling.schedulers.simulation_scheduler.do", "tempfile.mkdtemp", "pygromos.files.gromos_system.gromos_system.Gromos_System" ]
[((640, 707), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'dir': 'out_test_root_dir', 'prefix': '"""scheduling_Dummy_"""'}), "(dir=out_test_root_dir, prefix='scheduling_Dummy_')\n", (656, 707), False, 'import unittest, tempfile\n'), ((848, 1049), 'pygromos.files.gromos_system.gromos_system.Gromos_System', 'Gromos_System', ([], {'system_name': '"""test_do"""', 'work_folder': 'out_dir_path', 'in_top_path': 'blank_topo_template', 'in_cnf_path': 'in_cnf', 'in_imd_path': 'template_md', 'in_gromosXX_bin_dir': 'None', 'in_gromosPP_bin_dir': 'None'}), "(system_name='test_do', work_folder=out_dir_path, in_top_path=\n blank_topo_template, in_cnf_path=in_cnf, in_imd_path=template_md,\n in_gromosXX_bin_dir=None, in_gromosPP_bin_dir=None)\n", (861, 1049), False, 'from pygromos.files.gromos_system.gromos_system import Gromos_System\n'), ((1173, 1332), 'pygromos.simulations.hpc_queuing.job_scheduling.schedulers.simulation_scheduler.do', 'simulation_scheduler.do', ([], {'in_simSystem': 'in_simSystem', 'out_dir_path': 'out_dir_path', 'submission_system': 'submission_system', 'simulation_run_num': '(2)', 'verbose': '(True)'}), '(in_simSystem=in_simSystem, out_dir_path=\n out_dir_path, submission_system=submission_system, simulation_run_num=2,\n verbose=True)\n', (1196, 1332), False, 'from pygromos.simulations.hpc_queuing.job_scheduling.schedulers import simulation_scheduler\n')]
# -*- coding: utf-8 -*- """ Created on Sat Dec 30 17:03:01 2017 @author: misakawa """ from pattern_matching import Match, when, var, T, t, _, overwrite from numpy.random import randint @overwrite(var[(t == int) | (t == float)], var[(t == int) | (t == float)]) def add(a, b): return a + b @when(var[t == str], var[t == str]) def add(a, b): return a + b class Bound1: pass class Bound2: pass class Bound3(Bound1, Bound2): def __repr__(self): return "bound3" class Bound4(Bound3): pass @when(_[(t != Bound3) & (t < Bound4)]) def add(): return 2 @when(_) def add(): return 3 assert add(1, 1) == 2 assert add(Bound2()) == 2 assert add(Bound3()) == 3 @when(_[int], _[Bound1], var) def add(u): return u assert add(1, Bound1(), 'last') == 'last' def is_type(x): return isinstance(x, type) m = Match(1, 2, (3, int)) [a, b, c] = m.case(var[int], var, *var[tuple]).get assert a == 1 and b == 2 and c == ((3, int), ) [c2] = m.case((_, _, (_, var.when(is_type)))).get assert c2 == int @overwrite(_ == None) def summary(): return 0 @when([var[int], *(_ == [])], var) def summary(head, res): return head + res @when([var[int], *var[list]], var) def summary(head, tail, res): return summary(tail, res + head) @when(var[list]) def summary(lst): return summary(lst, 0) assert summary(list(range(100))) == 4950 @overwrite([var, *var]) def qsort(head, tail): lowers = [i for i in tail if i < head] highers = [i for i in tail if i >= head] return qsort(lowers) + [head] + qsort(highers) @when(var) def qsort(lst): return lst qsort(randint(0, 500, size=(1200, ))) @when(_[t.when(lambda _: _ == int)]) def trait_test(): return 1 assert trait_test(1) == 1 class Population: num: int = 1000 @when(var[t.when(lambda _: hasattr(_, 'num'))]) def trait_test(x): return x.num assert trait_test(Population()) == 1000
[ "pattern_matching.t.when", "pattern_matching.overwrite", "pattern_matching.when", "numpy.random.randint", "pattern_matching.var.when", "pattern_matching.Match" ]
[((190, 263), 'pattern_matching.overwrite', 'overwrite', (['var[(t == int) | (t == float)]', 'var[(t == int) | (t == float)]'], {}), '(var[(t == int) | (t == float)], var[(t == int) | (t == float)])\n', (199, 263), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((299, 333), 'pattern_matching.when', 'when', (['var[t == str]', 'var[t == str]'], {}), '(var[t == str], var[t == str])\n', (303, 333), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((533, 570), 'pattern_matching.when', 'when', (['_[(t != Bound3) & (t < Bound4)]'], {}), '(_[(t != Bound3) & (t < Bound4)])\n', (537, 570), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((598, 605), 'pattern_matching.when', 'when', (['_'], {}), '(_)\n', (602, 605), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((709, 737), 'pattern_matching.when', 'when', (['_[int]', '_[Bound1]', 'var'], {}), '(_[int], _[Bound1], var)\n', (713, 737), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((862, 883), 'pattern_matching.Match', 'Match', (['(1)', '(2)', '(3, int)'], {}), '(1, 2, (3, int))\n', (867, 883), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1053, 1073), 'pattern_matching.overwrite', 'overwrite', (['(_ == None)'], {}), '(_ == None)\n', (1062, 1073), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1105, 1138), 'pattern_matching.when', 'when', (['[var[int], *(_ == [])]', 'var'], {}), '([var[int], *(_ == [])], var)\n', (1109, 1138), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1188, 1221), 'pattern_matching.when', 'when', (['[var[int], *var[list]]', 'var'], {}), '([var[int], *var[list]], var)\n', (1192, 1221), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1292, 1307), 'pattern_matching.when', 'when', (['var[list]'], {}), '(var[list])\n', (1296, 1307), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1399, 1421), 'pattern_matching.overwrite', 'overwrite', (['[var, *var]'], {}), '([var, *var])\n', (1408, 1421), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1587, 1596), 'pattern_matching.when', 'when', (['var'], {}), '(var)\n', (1591, 1596), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1636, 1665), 'numpy.random.randint', 'randint', (['(0)', '(500)'], {'size': '(1200,)'}), '(0, 500, size=(1200,))\n', (1643, 1665), False, 'from numpy.random import randint\n'), ((1678, 1704), 'pattern_matching.t.when', 't.when', (['(lambda _: _ == int)'], {}), '(lambda _: _ == int)\n', (1684, 1704), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n'), ((1008, 1025), 'pattern_matching.var.when', 'var.when', (['is_type'], {}), '(is_type)\n', (1016, 1025), False, 'from pattern_matching import Match, when, var, T, t, _, overwrite\n')]
# ---------------------------------------------------------------------- # | # | EnvironmentDiffs.py # | # | <NAME> <<EMAIL>> # | 2018-06-02 22:19:34 # | # ---------------------------------------------------------------------- # | # | Copyright <NAME> 2018-22. # | Distributed under the Boost Software License, Version 1.0. # | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # | # ---------------------------------------------------------------------- """Displays changes made by an environment during activation.""" import json import os import sys import textwrap import six import CommonEnvironment from CommonEnvironment import CommandLine from CommonEnvironment.Shell.All import CurrentShell from RepositoryBootstrap import Constants # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- @CommandLine.EntryPoint @CommandLine.Constraints( output_stream=None, ) def Before( decorate=False, output_stream=sys.stdout, ): _Display(GetOriginalEnvironment(), output_stream, decorate) return 0 # ---------------------------------------------------------------------- @CommandLine.EntryPoint @CommandLine.Constraints( output_stream=None, ) def After( decorate=False, output_stream=sys.stdout, ): original_env = GetOriginalEnvironment() # Compare to the current environment this_env = dict(os.environ) differences = {} for k, v in six.iteritems(this_env): if ( k not in original_env or original_env[k] != v ): differences[k] = v _Display(differences, output_stream, decorate) return 0 # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- def GetOriginalEnvironment(): # Get the original environment generated_dir = os.getenv(Constants.DE_REPO_GENERATED_NAME) assert os.path.isdir(generated_dir), generated_dir original_environment_filename = os.path.join(generated_dir, Constants.GENERATED_ACTIVATION_ORIGINAL_ENVIRONMENT_FILENAME) assert os.path.isfile(original_environment_filename), original_environment_filename with open(original_environment_filename) as f: return json.load(f) # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- def _Display(content, output_stream, decorate): if not isinstance(content, six.string_types): content = json.dumps(content) if decorate: output_stream.write(textwrap.dedent( """\ //--//--//--//--//--//--//--//--//--//--//--//--//--//--//--// {} //--//--//--//--//--//--//--//--//--//--//--//--//--//--//--// """).format(content)) else: output_stream.write(content) # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- if __name__ == "__main__": try: sys.exit(CommandLine.Main()) except KeyboardInterrupt: pass
[ "CommonEnvironment.CommandLine.Main", "textwrap.dedent", "os.getenv", "json.dumps", "os.path.join", "os.path.split", "os.path.isfile", "CommonEnvironment.CommandLine.Constraints", "os.path.isdir", "json.load", "CommonEnvironment.ThisFullpath", "six.iteritems" ]
[((917, 949), 'CommonEnvironment.ThisFullpath', 'CommonEnvironment.ThisFullpath', ([], {}), '()\n', (947, 949), False, 'import CommonEnvironment\n'), ((979, 1010), 'os.path.split', 'os.path.split', (['_script_fullpath'], {}), '(_script_fullpath)\n', (992, 1010), False, 'import os\n'), ((1188, 1231), 'CommonEnvironment.CommandLine.Constraints', 'CommandLine.Constraints', ([], {'output_stream': 'None'}), '(output_stream=None)\n', (1211, 1231), False, 'from CommonEnvironment import CommandLine\n'), ((1524, 1567), 'CommonEnvironment.CommandLine.Constraints', 'CommandLine.Constraints', ([], {'output_stream': 'None'}), '(output_stream=None)\n', (1547, 1567), False, 'from CommonEnvironment import CommandLine\n'), ((1840, 1863), 'six.iteritems', 'six.iteritems', (['this_env'], {}), '(this_env)\n', (1853, 1863), False, 'import six\n'), ((2366, 2409), 'os.getenv', 'os.getenv', (['Constants.DE_REPO_GENERATED_NAME'], {}), '(Constants.DE_REPO_GENERATED_NAME)\n', (2375, 2409), False, 'import os\n'), ((2422, 2450), 'os.path.isdir', 'os.path.isdir', (['generated_dir'], {}), '(generated_dir)\n', (2435, 2450), False, 'import os\n'), ((2505, 2599), 'os.path.join', 'os.path.join', (['generated_dir', 'Constants.GENERATED_ACTIVATION_ORIGINAL_ENVIRONMENT_FILENAME'], {}), '(generated_dir, Constants.\n GENERATED_ACTIVATION_ORIGINAL_ENVIRONMENT_FILENAME)\n', (2517, 2599), False, 'import os\n'), ((2607, 2652), 'os.path.isfile', 'os.path.isfile', (['original_environment_filename'], {}), '(original_environment_filename)\n', (2621, 2652), False, 'import os\n'), ((2754, 2766), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2763, 2766), False, 'import json\n'), ((3110, 3129), 'json.dumps', 'json.dumps', (['content'], {}), '(content)\n', (3120, 3129), False, 'import json\n'), ((3737, 3755), 'CommonEnvironment.CommandLine.Main', 'CommandLine.Main', ([], {}), '()\n', (3753, 3755), False, 'from CommonEnvironment import CommandLine\n'), ((3179, 3389), 'textwrap.dedent', 'textwrap.dedent', (['""" //--//--//--//--//--//--//--//--//--//--//--//--//--//--//--//\n {}\n //--//--//--//--//--//--//--//--//--//--//--//--//--//--//--//\n """'], {}), '(\n """ //--//--//--//--//--//--//--//--//--//--//--//--//--//--//--//\n {}\n //--//--//--//--//--//--//--//--//--//--//--//--//--//--//--//\n """\n )\n', (3194, 3389), False, 'import textwrap\n')]
from datetime import datetime from os.path import join from tests.base import TestCase, main, assets, copy_of_directory from ocrd_utils import ( initLogging, VERSION, MIMETYPE_PAGE ) from ocrd_models import OcrdMets # pylint: disable=protected-access,deprecated-method,too-many-public-methods class TestOcrdMets(TestCase): def setUp(self): self.mets = OcrdMets(filename=assets.url_of('SBB0000F29300010000/data/mets.xml')) initLogging() def test_unique_identifier(self): self.assertEqual(self.mets.unique_identifier, 'http://resolver.staatsbibliothek-berlin.de/SBB0000F29300010000', 'Right identifier') self.mets.unique_identifier = 'foo' self.assertEqual(self.mets.unique_identifier, 'foo', 'Right identifier after change') def test_unique_identifier_from_nothing(self): mets = OcrdMets.empty_mets() self.assertEqual(mets.unique_identifier, None, 'no identifier') mets.unique_identifier = 'foo' self.assertEqual(mets.unique_identifier, 'foo', 'Right identifier after change') as_string = mets.to_xml().decode('utf-8') self.assertIn('ocrd/core v%s' % VERSION, as_string) self.assertIn('CREATEDATE="%d-%d-%02dT' % ( datetime.now().year, datetime.now().month, datetime.now().day, ), as_string) def test_str(self): mets = OcrdMets(content='<mets/>') self.assertEqual(str(mets), 'OcrdMets[fileGrps=[],files=[]]') def test_override_constructor_args(self): id2file = {'foo': {}} mets = OcrdMets(id2file, content='<mets/>') self.assertEqual(mets._file_by_id, id2file) def test_file_groups(self): self.assertEqual(len(self.mets.file_groups), 17, '17 file groups') def test_find_files(self): self.assertEqual(len(self.mets.find_files()), 35, '35 files total') self.assertEqual(len(self.mets.find_files(fileGrp='OCR-D-IMG')), 3, '3 files in "OCR-D-IMG"') self.assertEqual(len(self.mets.find_files(pageId='PHYS_0001')), 17, '17 files for page "PHYS_0001"') self.assertEqual(len(self.mets.find_files(pageId='PHYS_0001-NOTEXIST')), 0, '0 pages for "PHYS_0001-NOTEXIST"') self.assertEqual(len(self.mets.find_files(mimetype='image/tiff')), 13, '13 image/tiff') self.assertEqual(len(self.mets.find_files(mimetype=MIMETYPE_PAGE)), 20, '20 ' + MIMETYPE_PAGE) self.assertEqual(len(self.mets.find_files(url='OCR-D-IMG/FILE_0005_IMAGE.tif')), 1, '1 xlink:href="OCR-D-IMG/FILE_0005_IMAGE.tif"') def test_find_files_local_only(self): self.assertEqual(len(self.mets.find_files(pageId='PHYS_0001', local_only=True)), 3, '3 local files for page "PHYS_0001"') def test_physical_pages(self): self.assertEqual(len(self.mets.physical_pages), 3, '3 physical pages') def test_physical_pages_from_empty_mets(self): mets = OcrdMets(content="<mets></mets>") self.assertEqual(len(mets.physical_pages), 0, 'no physical page') mets.add_file('OUTPUT', ID="foo123", pageId="foobar") self.assertEqual(len(mets.physical_pages), 1, '1 physical page') def test_add_group(self): mets = OcrdMets.empty_mets() self.assertEqual(len(mets.file_groups), 0, '0 file groups') mets.add_file_group('TEST') self.assertEqual(len(mets.file_groups), 1, '1 file groups') mets.add_file_group('TEST') self.assertEqual(len(mets.file_groups), 1, '1 file groups') def test_add_file(self): mets = OcrdMets.empty_mets() self.assertEqual(len(mets.file_groups), 0, '0 file groups') self.assertEqual(len(mets.find_files(fileGrp='OUTPUT')), 0, '0 files in "OUTPUT"') f = mets.add_file('OUTPUT', ID="foo123", mimetype="bla/quux", pageId="foobar") f2 = mets.add_file('OUTPUT', ID="foo1232", mimetype="bla/quux", pageId="foobar") self.assertEqual(f.pageId, 'foobar', 'pageId set') self.assertEqual(len(mets.file_groups), 1, '1 file groups') self.assertEqual(len(mets.find_files(fileGrp='OUTPUT')), 2, '2 files in "OUTPUT"') mets.set_physical_page_for_file('barfoo', f, order='300', orderlabel="page 300") self.assertEqual(f.pageId, 'barfoo', 'pageId changed') mets.set_physical_page_for_file('quux', f2, order='302', orderlabel="page 302") self.assertEqual(f2.pageId, 'quux', 'pageId changed') mets.set_physical_page_for_file('barfoo', f2, order='301', orderlabel="page 301") self.assertEqual(f2.pageId, 'barfoo', 'pageId changed') self.assertEqual(len(mets.file_groups), 1, '1 file group') def test_add_file_ID_fail(self): f = self.mets.add_file('OUTPUT', ID='best-id-ever', mimetype="beep/boop") self.assertEqual(f.ID, 'best-id-ever', "ID kept") with self.assertRaises(Exception) as cm: self.mets.add_file('OUTPUT', ID='best-id-ever', mimetype="boop/beep") self.assertEqual(str(cm.exception), "File with ID='best-id-ever' already exists") f2 = self.mets.add_file('OUTPUT', ID='best-id-ever', mimetype="boop/beep", force=True) self.assertEqual(f._el, f2._el) def test_filegrp_from_file(self): f = self.mets.find_files(fileGrp='OCR-D-IMG')[0] self.assertEqual(f.fileGrp, 'OCR-D-IMG') def test_add_file_no_id(self): with self.assertRaisesRegex(Exception, "Must set ID of the mets:file"): self.mets.add_file('FOO') def test_add_file_no_pageid(self): f = self.mets.add_file('OUTPUT', mimetype="bla/quux", ID="foo3") self.assertEqual(f.pageId, None, 'No pageId') def test_file_pageid(self): f = self.mets.find_files()[0] self.assertEqual(f.pageId, 'PHYS_0001') f.pageId = 'foo' self.assertEqual(f.pageId, 'foo') def test_agent(self): # Processor(workspace=self.workspace) mets = self.mets beforelen = len(mets.agents) mets.add_agent('foo bar v0.0.1', 'OTHER', 'OTHER', 'YETOTHERSTILL') # print(['%s'%x for x in mets.agents]) self.assertEqual(len(mets.agents), beforelen + 1) def test_metshdr(self): """ Test whether metsHdr is created on-demand """ mets = OcrdMets(content="<mets></mets>") self.assertFalse(mets._tree.getroot().getchildren()) mets.add_agent() self.assertEqual(len(mets._tree.getroot().getchildren()), 1) def test_nocontent_nofilename(self): with self.assertRaisesRegex(Exception, "Must pass 'filename' or 'content' to"): OcrdMets() def test_encoding_entities(self): mets = OcrdMets(content=""" <mets> <metsHdr> <agent> <name>Őh śéé Áŕ</name> <note>OCR-D</note> </agent> </metsHdr> </mets> """) self.assertIn('Őh śéé Áŕ', mets.to_xml().decode('utf-8')) def test_remove_file_group(self): """ Test removal of filegrp """ with copy_of_directory(assets.path_to('SBB0000F29300010000/data')) as tempdir: mets = OcrdMets(filename=join(tempdir, 'mets.xml')) self.assertEqual(len(mets.file_groups), 17) self.assertEqual(len(mets.find_files()), 35) # print() # before = sorted([x.ID for x in mets.find_files()]) with self.assertRaisesRegex(Exception, "not empty"): mets.remove_file_group('OCR-D-GT-ALTO') mets.remove_file_group('OCR-D-GT-PAGE', recursive=True) # print([x for x in before if x not in sorted([x.ID for x in mets.find_files()])]) self.assertEqual(len(mets.file_groups), 16) self.assertEqual(len(mets.find_files()), 33) if __name__ == '__main__': main()
[ "tests.base.assets.path_to", "tests.base.main", "ocrd_utils.initLogging", "os.path.join", "ocrd_models.OcrdMets.empty_mets", "datetime.datetime.now", "ocrd_models.OcrdMets", "tests.base.assets.url_of" ]
[((7830, 7836), 'tests.base.main', 'main', ([], {}), '()\n', (7834, 7836), False, 'from tests.base import TestCase, main, assets, copy_of_directory\n'), ((457, 470), 'ocrd_utils.initLogging', 'initLogging', ([], {}), '()\n', (468, 470), False, 'from ocrd_utils import initLogging, VERSION, MIMETYPE_PAGE\n'), ((855, 876), 'ocrd_models.OcrdMets.empty_mets', 'OcrdMets.empty_mets', ([], {}), '()\n', (874, 876), False, 'from ocrd_models import OcrdMets\n'), ((1400, 1427), 'ocrd_models.OcrdMets', 'OcrdMets', ([], {'content': '"""<mets/>"""'}), "(content='<mets/>')\n", (1408, 1427), False, 'from ocrd_models import OcrdMets\n'), ((1590, 1626), 'ocrd_models.OcrdMets', 'OcrdMets', (['id2file'], {'content': '"""<mets/>"""'}), "(id2file, content='<mets/>')\n", (1598, 1626), False, 'from ocrd_models import OcrdMets\n'), ((2920, 2953), 'ocrd_models.OcrdMets', 'OcrdMets', ([], {'content': '"""<mets></mets>"""'}), "(content='<mets></mets>')\n", (2928, 2953), False, 'from ocrd_models import OcrdMets\n'), ((3209, 3230), 'ocrd_models.OcrdMets.empty_mets', 'OcrdMets.empty_mets', ([], {}), '()\n', (3228, 3230), False, 'from ocrd_models import OcrdMets\n'), ((3552, 3573), 'ocrd_models.OcrdMets.empty_mets', 'OcrdMets.empty_mets', ([], {}), '()\n', (3571, 3573), False, 'from ocrd_models import OcrdMets\n'), ((6272, 6305), 'ocrd_models.OcrdMets', 'OcrdMets', ([], {'content': '"""<mets></mets>"""'}), "(content='<mets></mets>')\n", (6280, 6305), False, 'from ocrd_models import OcrdMets\n'), ((6668, 6894), 'ocrd_models.OcrdMets', 'OcrdMets', ([], {'content': '"""\n <mets>\n <metsHdr>\n <agent>\n <name>Őh śéé Áŕ</name>\n <note>OCR-D</note>\n </agent>\n </metsHdr>\n </mets>\n """'}), '(content=\n """\n <mets>\n <metsHdr>\n <agent>\n <name>Őh śéé Áŕ</name>\n <note>OCR-D</note>\n </agent>\n </metsHdr>\n </mets>\n """\n )\n', (6676, 6894), False, 'from ocrd_models import OcrdMets\n'), ((6603, 6613), 'ocrd_models.OcrdMets', 'OcrdMets', ([], {}), '()\n', (6611, 6613), False, 'from ocrd_models import OcrdMets\n'), ((397, 447), 'tests.base.assets.url_of', 'assets.url_of', (['"""SBB0000F29300010000/data/mets.xml"""'], {}), "('SBB0000F29300010000/data/mets.xml')\n", (410, 447), False, 'from tests.base import TestCase, main, assets, copy_of_directory\n'), ((7078, 7120), 'tests.base.assets.path_to', 'assets.path_to', (['"""SBB0000F29300010000/data"""'], {}), "('SBB0000F29300010000/data')\n", (7092, 7120), False, 'from tests.base import TestCase, main, assets, copy_of_directory\n'), ((7171, 7196), 'os.path.join', 'join', (['tempdir', '"""mets.xml"""'], {}), "(tempdir, 'mets.xml')\n", (7175, 7196), False, 'from os.path import join\n'), ((1251, 1265), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1263, 1265), False, 'from datetime import datetime\n'), ((1284, 1298), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1296, 1298), False, 'from datetime import datetime\n'), ((1318, 1332), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1330, 1332), False, 'from datetime import datetime\n')]
""" Environment for basic obstacle avoidance controlling a robotic arm from UR. In this environment the obstacle is only moving up and down in a vertical line in front of the robot. The goal is for the robot to stay within a predefined minimum distance to the moving obstacle. When feasible the robot should continue to the original configuration, otherwise wait for the obstacle to move away before proceeding """ import numpy as np from typing import Tuple from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2 from robo_gym.envs.simulation_wrapper import Simulation from robo_gym.envs.ur.ur_base_avoidance_env import URBaseAvoidanceEnv # base, shoulder, elbow, wrist_1, wrist_2, wrist_3 JOINT_POSITIONS = [-1.57, -1.31, -1.31, -2.18, 1.57, 0.0] DEBUG = True MINIMUM_DISTANCE = 0.3 # the distance [cm] the robot should keep to the obstacle class BasicAvoidanceUR(URBaseAvoidanceEnv): """Universal Robots UR basic obstacle avoidance environment. Args: rs_address (str): Robot Server address. Formatted as 'ip:port'. Defaults to None. fix_base (bool): Wether or not the base joint stays fixed or is moveable. Defaults to False. fix_shoulder (bool): Wether or not the shoulder joint stays fixed or is moveable. Defaults to False. fix_elbow (bool): Wether or not the elbow joint stays fixed or is moveable. Defaults to False. fix_wrist_1 (bool): Wether or not the wrist 1 joint stays fixed or is moveable. Defaults to False. fix_wrist_2 (bool): Wether or not the wrist 2 joint stays fixed or is moveable. Defaults to False. fix_wrist_3 (bool): Wether or not the wrist 3 joint stays fixed or is moveable. Defaults to True. ur_model (str): determines which ur model will be used in the environment. Defaults to 'ur5'. include_polar_to_elbow (bool): determines wether or not the polar coordinates to the elbow joint are included in the state. Defaults to False. Attributes: ur (:obj:): Robot utilities object. client (:obj:str): Robot Server client. real_robot (bool): True if the environment is controlling a real robot. """ max_episode_steps = 1000 def _set_initial_robot_server_state(self, rs_state, fixed_object_position = None) -> robot_server_pb2.State: if fixed_object_position: state_msg = super()._set_initial_robot_server_state(rs_state=rs_state, fixed_object_position=fixed_object_position) return state_msg z_amplitude = np.random.default_rng().uniform(low=0.09, high=0.35) z_frequency = 0.125 z_offset = np.random.default_rng().uniform(low=0.2, high=0.6) string_params = {"object_0_function": "triangle_wave"} float_params = {"object_0_x": 0.12, "object_0_y": 0.34, "object_0_z_amplitude": z_amplitude, "object_0_z_frequency": z_frequency, "object_0_z_offset": z_offset} state = {} state_msg = robot_server_pb2.State(state = state, float_params = float_params, string_params = string_params, state_dict = rs_state) return state_msg def reset(self, joint_positions = JOINT_POSITIONS, fixed_object_position = None) -> np.array: """Environment reset. Args: joint_positions (list[6] or np.array[6]): robot joint positions in radians. fixed_object_position (list[3]): x,y,z fixed position of object """ self.prev_action = np.zeros(6) state = super().reset(joint_positions = joint_positions, fixed_object_position = fixed_object_position) return state def reward(self, rs_state, action) -> Tuple[float, bool, dict]: env_state = self._robot_server_state_to_env_state(rs_state) reward = 0 done = False info = {} # Reward weights close_distance_weight = -2 delta_joint_weight = 1 action_usage_weight = 1 rapid_action_weight = -0.2 # Difference in joint position current vs. starting position delta_joint_pos = env_state[9:15] # Calculate distance to the obstacle obstacle_coord = np.array([rs_state['object_0_to_ref_translation_x'], rs_state['object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']]) ee_coord = np.array([rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']]) forearm_coord = np.array([rs_state['forearm_to_ref_translation_x'], rs_state['forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']]) distance_to_ee = np.linalg.norm(obstacle_coord - ee_coord) distance_to_forearm = np.linalg.norm(obstacle_coord - forearm_coord) distance_to_target = np.min([distance_to_ee, distance_to_forearm]) # Reward staying close to the predefined joint position if abs(env_state[-6:]).sum() < 0.1 * action.size: reward += delta_joint_weight * (1 - (abs(delta_joint_pos).sum()/(0.1 * action.size))) * (1/1000) # Reward for not acting if abs(action).sum() <= action.size: reward += action_usage_weight * (1 - (np.square(action).sum()/action.size)) * (1/1000) # Negative reward if actions change to rapidly between steps for i in range(len(action)): if abs(action[i] - self.prev_action[i]) > 0.5: reward += rapid_action_weight * (1/1000) # Negative reward if the obstacle is close than the predefined minimum distance if distance_to_target < MINIMUM_DISTANCE: reward += close_distance_weight * (1/self.max_episode_steps) # Check if there is a collision collision = True if rs_state['in_collision'] == 1 else False if collision: done = True info['final_status'] = 'collision' info['target_coord'] = obstacle_coord self.last_position_on_success = [] if self.elapsed_steps >= self.max_episode_steps: done = True info['final_status'] = 'success' info['target_coord'] = obstacle_coord self.last_position_on_success = [] return reward, done, info def step(self, action) -> Tuple[np.array, float, bool, dict]: if type(action) == list: action = np.array(action) state, reward, done, info = super().step(action) self.prev_action = self.add_fixed_joints(action) return state, reward, done, info class BasicAvoidanceURSim(BasicAvoidanceUR, Simulation): cmd = "roslaunch ur_robot_server ur_robot_server.launch \ world_name:=tabletop_sphere50.world \ reference_frame:=base_link \ max_velocity_scale_factor:=0.2 \ action_cycle_rate:=20 \ rviz_gui:=false \ gazebo_gui:=true \ objects_controller:=true \ rs_mode:=1moving2points \ n_objects:=1.0 \ object_0_model_name:=sphere50 \ object_0_frame:=target" def __init__(self, ip=None, lower_bound_port=None, upper_bound_port=None, gui=False, ur_model='ur5', **kwargs): self.cmd = self.cmd + ' ' + 'ur_model:=' + ur_model Simulation.__init__(self, self.cmd, ip, lower_bound_port, upper_bound_port, gui, **kwargs) BasicAvoidanceUR.__init__(self, rs_address=self.robot_server_ip, ur_model=ur_model, **kwargs) class BasicAvoidanceURRob(BasicAvoidanceUR): real_robot = True # roslaunch ur_robot_server ur_robot_server.launch ur_model:=ur5 real_robot:=true rviz_gui:=true gui:=true reference_frame:=base max_velocity_scale_factor:=0.2 action_cycle_rate:=20 rs_mode:=moving
[ "numpy.random.default_rng", "numpy.min", "robo_gym_server_modules.robot_server.grpc_msgs.python.robot_server_pb2.State", "numpy.square", "numpy.array", "numpy.zeros", "numpy.linalg.norm", "robo_gym.envs.simulation_wrapper.Simulation.__init__" ]
[((3080, 3196), 'robo_gym_server_modules.robot_server.grpc_msgs.python.robot_server_pb2.State', 'robot_server_pb2.State', ([], {'state': 'state', 'float_params': 'float_params', 'string_params': 'string_params', 'state_dict': 'rs_state'}), '(state=state, float_params=float_params,\n string_params=string_params, state_dict=rs_state)\n', (3102, 3196), False, 'from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2\n'), ((3619, 3630), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (3627, 3630), True, 'import numpy as np\n'), ((4315, 4464), 'numpy.array', 'np.array', (["[rs_state['object_0_to_ref_translation_x'], rs_state[\n 'object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']\n ]"], {}), "([rs_state['object_0_to_ref_translation_x'], rs_state[\n 'object_0_to_ref_translation_y'], rs_state[\n 'object_0_to_ref_translation_z']])\n", (4323, 4464), True, 'import numpy as np\n'), ((4474, 4600), 'numpy.array', 'np.array', (["[rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'],\n rs_state['ee_to_ref_translation_z']]"], {}), "([rs_state['ee_to_ref_translation_x'], rs_state[\n 'ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])\n", (4482, 4600), True, 'import numpy as np\n'), ((4620, 4761), 'numpy.array', 'np.array', (["[rs_state['forearm_to_ref_translation_x'], rs_state[\n 'forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']]"], {}), "([rs_state['forearm_to_ref_translation_x'], rs_state[\n 'forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']])\n", (4628, 4761), True, 'import numpy as np\n'), ((4782, 4823), 'numpy.linalg.norm', 'np.linalg.norm', (['(obstacle_coord - ee_coord)'], {}), '(obstacle_coord - ee_coord)\n', (4796, 4823), True, 'import numpy as np\n'), ((4855, 4901), 'numpy.linalg.norm', 'np.linalg.norm', (['(obstacle_coord - forearm_coord)'], {}), '(obstacle_coord - forearm_coord)\n', (4869, 4901), True, 'import numpy as np\n'), ((4932, 4977), 'numpy.min', 'np.min', (['[distance_to_ee, distance_to_forearm]'], {}), '([distance_to_ee, distance_to_forearm])\n', (4938, 4977), True, 'import numpy as np\n'), ((7402, 7496), 'robo_gym.envs.simulation_wrapper.Simulation.__init__', 'Simulation.__init__', (['self', 'self.cmd', 'ip', 'lower_bound_port', 'upper_bound_port', 'gui'], {}), '(self, self.cmd, ip, lower_bound_port, upper_bound_port,\n gui, **kwargs)\n', (7421, 7496), False, 'from robo_gym.envs.simulation_wrapper import Simulation\n'), ((6536, 6552), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (6544, 6552), True, 'import numpy as np\n'), ((2549, 2572), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (2570, 2572), True, 'import numpy as np\n'), ((2649, 2672), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (2670, 2672), True, 'import numpy as np\n'), ((5362, 5379), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (5371, 5379), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import pyqtgraph as pg import numpy as np class CustomWidget(pg.GraphicsWindow): pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') def __init__(self, parent=None, **kargs): pg.GraphicsWindow.__init__(self, **kargs) self.setParent(parent) self.setWindowTitle('pyqtgraph example: Scrolling Plots') self.p = self.addPlot(labels = {'left':'Position', 'bottom':'Time'}) self.data = np.zeros(10) self.curve = self.p.plot(self.data, pen='b') if __name__ == '__main__': w = CustomWidget() w.show()
[ "pyqtgraph.setConfigOption", "numpy.zeros", "pyqtgraph.GraphicsWindow.__init__" ]
[((133, 170), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""background"""', '"""w"""'], {}), "('background', 'w')\n", (151, 170), True, 'import pyqtgraph as pg\n'), ((175, 212), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""foreground"""', '"""k"""'], {}), "('foreground', 'k')\n", (193, 212), True, 'import pyqtgraph as pg\n'), ((267, 308), 'pyqtgraph.GraphicsWindow.__init__', 'pg.GraphicsWindow.__init__', (['self'], {}), '(self, **kargs)\n', (293, 308), True, 'import pyqtgraph as pg\n'), ((504, 516), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (512, 516), True, 'import numpy as np\n')]
"""Tests for collapsible definition lists. When the option ``html_collapsible_definitions`` is ``True``, some HTML classes should be added to some definition lists but not all of them. """ from pathlib import Path import pytest from sphinx.application import Sphinx from .util import parse_html @pytest.mark.sphinx( "html", testroot="collapsible", confoverrides={"html_theme": "sphinxawesome_theme"}, freshenv=True, ) def test_no_permalinks(app: Sphinx) -> None: """It tests that there are no permalinks.""" app.config.html_permalinks = False # type: ignore[attr-defined] app.build() tree = parse_html(Path(app.outdir) / "index.html") dl = tree("dl") assert len(dl) == 2 headerlinks = tree("a", class_="headerlink") assert len(headerlinks) == 0 @pytest.mark.sphinx( "html", testroot="collapsible", confoverrides={"html_theme": "sphinxawesome_theme"}, freshenv=True, ) def test_no_collapsible_definitions(app: Sphinx) -> None: """By default, no classes should be added.""" app.build() tree = parse_html(Path(app.outdir) / "index.html") dl = tree("dl") assert len(dl) == 2 assert str(dl[0]).replace("\n", "") == ( '<dl class="simple"><dt>term</dt><dd><p>definition</p></dd></dl>' ) assert dl[1]["class"] == ["std", "option", "code-definition"] dt, dd = (c for c in dl[1].children if c.strip is None) assert dt.name == "dt" assert "accordion" not in dt["class"] assert dd.name == "dd" assert "class" not in dd expand_more_button = dt("button", class_="expand-more") assert len(expand_more_button) == 0 @pytest.mark.sphinx( "html", testroot="collapsible", confoverrides={"html_theme": "sphinxawesome_theme"}, freshenv=True, ) def test_collapsible_definitions(app: Sphinx) -> None: """It tests the correct classes being added to the definition lists. It should not add the classes to normal definition lists. """ # if specified in 'confoverrides', this returns a warning app.config.html_collapsible_definitions = True # type: ignore[attr-defined] app.build() tree = parse_html(Path(app.outdir) / "index.html") dl = tree("dl") assert len(dl) == 2 assert str(dl[0]).replace("\n", "") == ( '<dl class="simple"><dt>term</dt><dd><p>definition</p></dd></dl>' ) assert "code-definition" in dl[1]["class"] dt, dd = (c for c in dl[1].children if c.strip is None) assert dt.name == "dt" assert dt["class"] == ["sig", "sig-object", "std", "accordion"] assert dd.name == "dd" assert dd["class"] == ["panel"] expand_more_button = dt("button", class_="expand-more") assert len(expand_more_button) == 1
[ "pytest.mark.sphinx", "pathlib.Path" ]
[((302, 425), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (['"""html"""'], {'testroot': '"""collapsible"""', 'confoverrides': "{'html_theme': 'sphinxawesome_theme'}", 'freshenv': '(True)'}), "('html', testroot='collapsible', confoverrides={\n 'html_theme': 'sphinxawesome_theme'}, freshenv=True)\n", (320, 425), False, 'import pytest\n'), ((803, 926), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (['"""html"""'], {'testroot': '"""collapsible"""', 'confoverrides': "{'html_theme': 'sphinxawesome_theme'}", 'freshenv': '(True)'}), "('html', testroot='collapsible', confoverrides={\n 'html_theme': 'sphinxawesome_theme'}, freshenv=True)\n", (821, 926), False, 'import pytest\n'), ((1646, 1769), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (['"""html"""'], {'testroot': '"""collapsible"""', 'confoverrides': "{'html_theme': 'sphinxawesome_theme'}", 'freshenv': '(True)'}), "('html', testroot='collapsible', confoverrides={\n 'html_theme': 'sphinxawesome_theme'}, freshenv=True)\n", (1664, 1769), False, 'import pytest\n'), ((641, 657), 'pathlib.Path', 'Path', (['app.outdir'], {}), '(app.outdir)\n', (645, 657), False, 'from pathlib import Path\n'), ((1087, 1103), 'pathlib.Path', 'Path', (['app.outdir'], {}), '(app.outdir)\n', (1091, 1103), False, 'from pathlib import Path\n'), ((2164, 2180), 'pathlib.Path', 'Path', (['app.outdir'], {}), '(app.outdir)\n', (2168, 2180), False, 'from pathlib import Path\n')]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from network import NN from evaluate import accuracy def read_data(fpath): iris = pd.read_csv(fpath) iris.loc[iris['species'] == 'virginica', 'species'] = 0 iris.loc[iris['species'] == 'versicolor', 'species'] = 1 iris.loc[iris['species'] == 'setosa', 'species'] = 2 iris = iris[iris['species'] != 2] return iris[['petal_length', 'petal_width']].values, iris[['species']].values.astype('uint8') def plot_data(X, y): plt.scatter(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral) plt.title("IRIS DATA | Blue - Versicolor, Red - Virginica ") plt.xlabel('Petal Length') plt.ylabel('Petal Width') plt.show() def train_test_split(X, y, ratio=0.8): indices = np.arange(X.shape[0]) np.random.shuffle(indices) train_len = int(X.shape[0] * ratio) return X[indices[:train_len]], y[indices[:train_len]], X[indices[train_len:]], y[indices[train_len:]] if __name__ == '__main__': X, y = read_data('iris.csv') # comment the following line if you don't need the plot anymore plot_data(X, y) X_train, y_train, X_test, y_test = train_test_split(X, y, 0.7) nn = NN(len(X[0]), 5, 1) output = nn.feedforward(X_train) print(output) print(f'w1 before backward propagation: \n{nn.w1} \nw2 before backward propagation:\n{nn.w2}') nn.backward(X_train, y_train, output) print(f'w1 after backward propagation: \n{nn.w1} \nw2 after backward propagation:\n{nn.w2}') nn.train(X_train, y_train) print("Accuracy:") print(accuracy(nn, X_test, y_test))
[ "numpy.random.shuffle", "pandas.read_csv", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "evaluate.accuracy", "numpy.arange", "matplotlib.pyplot.show" ]
[((159, 177), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {}), '(fpath)\n', (170, 177), True, 'import pandas as pd\n'), ((519, 587), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'y[:, 0]', 's': '(40)', 'cmap': 'plt.cm.Spectral'}), '(X[:, 0], X[:, 1], c=y[:, 0], s=40, cmap=plt.cm.Spectral)\n', (530, 587), True, 'import matplotlib.pyplot as plt\n'), ((592, 652), 'matplotlib.pyplot.title', 'plt.title', (['"""IRIS DATA | Blue - Versicolor, Red - Virginica """'], {}), "('IRIS DATA | Blue - Versicolor, Red - Virginica ')\n", (601, 652), True, 'import matplotlib.pyplot as plt\n'), ((657, 683), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal Length"""'], {}), "('Petal Length')\n", (667, 683), True, 'import matplotlib.pyplot as plt\n'), ((688, 713), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal Width"""'], {}), "('Petal Width')\n", (698, 713), True, 'import matplotlib.pyplot as plt\n'), ((718, 728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (726, 728), True, 'import matplotlib.pyplot as plt\n'), ((784, 805), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (793, 805), True, 'import numpy as np\n'), ((810, 836), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (827, 836), True, 'import numpy as np\n'), ((1586, 1614), 'evaluate.accuracy', 'accuracy', (['nn', 'X_test', 'y_test'], {}), '(nn, X_test, y_test)\n', (1594, 1614), False, 'from evaluate import accuracy\n')]
import unittest import unittest.mock from programy.storage.entities.nodes import NodesStore class NodesStoreTest(unittest.TestCase): def test_load(self): store = NodesStore() with self.assertRaises(NotImplementedError): collector = unittest.mock.Mock() store.load(collector)
[ "programy.storage.entities.nodes.NodesStore", "unittest.mock.Mock" ]
[((178, 190), 'programy.storage.entities.nodes.NodesStore', 'NodesStore', ([], {}), '()\n', (188, 190), False, 'from programy.storage.entities.nodes import NodesStore\n'), ((268, 288), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {}), '()\n', (286, 288), False, 'import unittest\n')]
import graphviz def convert_to_visualize(graph_ir, vgraph): for name, graph in graph_ir.items(): if name == '_training_config': continue with vgraph.subgraph(name='cluster'+name) as subgraph: subgraph.attr(color='blue') cell_node = {} ioput = {'_inputs': '{}-{}'.format(name, '_'.join(graph['inputs'])), '_outputs': '{}-{}'.format(name, '_'.join(graph['outputs']))} subgraph.node(ioput['_inputs']) subgraph.node(ioput['_outputs']) for node_name, node_value in graph['nodes'].items(): value = node_value['operation'] if value['type'] == '_cell': cell_input_name = '{}-{}'.format(value['cell_name'], '_'.join(graph_ir[value['cell_name']]['inputs'])) cell_output_name = '{}-{}'.format(value['cell_name'], '_'.join(graph_ir[value['cell_name']]['outputs'])) cell_node[node_name] = (cell_input_name, cell_output_name) print('cell: ', node_name, cell_input_name, cell_output_name) else: subgraph.node(node_name) for edge in graph['edges']: src = edge['head'][0] if src == '_inputs': src = ioput['_inputs'] elif src in cell_node: src = cell_node[src][1] dst = edge['tail'][0] if dst == '_outputs': dst = ioput['_outputs'] elif dst in cell_node: dst = cell_node[dst][0] subgraph.edge(src, dst) def visualize_model(graph_ir): vgraph = graphviz.Digraph('G', filename='vgraph', format='jpg') convert_to_visualize(graph_ir, vgraph) vgraph.render()
[ "graphviz.Digraph" ]
[((1710, 1764), 'graphviz.Digraph', 'graphviz.Digraph', (['"""G"""'], {'filename': '"""vgraph"""', 'format': '"""jpg"""'}), "('G', filename='vgraph', format='jpg')\n", (1726, 1764), False, 'import graphviz\n')]
import unittest class PrefixNotIncluded(unittest.TestCase): def test_not_included(self): pass if __name__ == '__main__': unittest.main()
[ "unittest.main" ]
[((139, 154), 'unittest.main', 'unittest.main', ([], {}), '()\n', (152, 154), False, 'import unittest\n')]
''' This is the class to create a scrolling background. Because the background was so large, it was made to be a .jpg. ''' import pygame, os class Background(pygame.sprite.Sprite): # Initialize the sprite. def __init__(self,disp): pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load(os.path.join("images", "spacebackground.jpg")) self.image = self.image.convert() self.rect = self.image.get_rect() self.dx = 10 self.reset() # Constantly have the sprite move to the left. # If the right side of the image moves beyond the right side of the screen, reset the image. def update(self): self.rect.left -= self.dx if self.rect.right <= 800: self.reset() # Reset the image's left side to the left side of the screen. def reset(self): self.rect.left = 0
[ "pygame.sprite.Sprite.__init__", "os.path.join" ]
[((257, 292), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (286, 292), False, 'import pygame, os\n'), ((332, 377), 'os.path.join', 'os.path.join', (['"""images"""', '"""spacebackground.jpg"""'], {}), "('images', 'spacebackground.jpg')\n", (344, 377), False, 'import pygame, os\n')]
# -*- coding: utf-8 -*- """Linear module for dqn algorithms - Author: <NAME> - Contact: <EMAIL> """ import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from rl_algorithms.common.helper_functions import numpy2floattensor device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class NoisyLinear(nn.Module): """Noisy linear module for NoisyNet. References: https://github.com/higgsfield/RL-Adventure/blob/master/5.noisy%20dqn.ipynb https://github.com/Kaixhin/Rainbow/blob/master/model.py Attributes: in_features (int): input size of linear module out_features (int): output size of linear module std_init (float): initial std value weight_mu (nn.Parameter): mean value weight parameter weight_sigma (nn.Parameter): std value weight parameter bias_mu (nn.Parameter): mean value bias parameter bias_sigma (nn.Parameter): std value bias parameter """ def __init__(self, in_features: int, out_features: int, std_init: float = 0.5): """Initialize.""" super(NoisyLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.std_init = std_init self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features)) self.weight_sigma = nn.Parameter(torch.Tensor(out_features, in_features)) self.register_buffer("weight_epsilon", torch.Tensor(out_features, in_features)) self.bias_mu = nn.Parameter(torch.Tensor(out_features)) self.bias_sigma = nn.Parameter(torch.Tensor(out_features)) self.register_buffer("bias_epsilon", torch.Tensor(out_features)) self.reset_parameters() self.reset_noise() def reset_parameters(self): """Reset trainable network parameters (factorized gaussian noise).""" mu_range = 1 / math.sqrt(self.in_features) self.weight_mu.data.uniform_(-mu_range, mu_range) self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features)) self.bias_mu.data.uniform_(-mu_range, mu_range) self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features)) @staticmethod def scale_noise(size: int) -> torch.Tensor: """Set scale to make noise (factorized gaussian noise).""" x = numpy2floattensor(np.random.normal(loc=0.0, scale=1.0, size=size), device) return x.sign().mul(x.abs().sqrt()) def reset_noise(self): """Make new noise.""" epsilon_in = self.scale_noise(self.in_features) epsilon_out = self.scale_noise(self.out_features) # outer product self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in)) self.bias_epsilon.copy_(epsilon_out) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward method implementation. We don't use separate statements on train / eval mode. It doesn't show remarkable difference of performance. """ return F.linear( x, self.weight_mu + self.weight_sigma * self.weight_epsilon, self.bias_mu + self.bias_sigma * self.bias_epsilon, ) class NoisyLinearConstructor: """Constructor class for changing hyper parameters of NoisyLinear. Attributes: std_init (float): initial std value """ def __init__(self, std_init: float = 0.5): """Initialize.""" self.std_init = std_init def __call__(self, in_features: int, out_features: int) -> NoisyLinear: """Return NoisyLinear instance set hyper parameters""" return NoisyLinear(in_features, out_features, self.std_init) class NoisyMLPHandler: """Includes methods to handle noisy linear.""" def reset_noise(self): """Re-sample noise""" for _, module in self.named_children(): module.reset_noise()
[ "torch.nn.functional.linear", "numpy.random.normal", "math.sqrt", "torch.Tensor", "torch.cuda.is_available" ]
[((305, 330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (328, 330), False, 'import torch\n'), ((3051, 3177), 'torch.nn.functional.linear', 'F.linear', (['x', '(self.weight_mu + self.weight_sigma * self.weight_epsilon)', '(self.bias_mu + self.bias_sigma * self.bias_epsilon)'], {}), '(x, self.weight_mu + self.weight_sigma * self.weight_epsilon, self.\n bias_mu + self.bias_sigma * self.bias_epsilon)\n', (3059, 3177), True, 'import torch.nn.functional as F\n'), ((1313, 1352), 'torch.Tensor', 'torch.Tensor', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (1325, 1352), False, 'import torch\n'), ((1395, 1434), 'torch.Tensor', 'torch.Tensor', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (1407, 1434), False, 'import torch\n'), ((1483, 1522), 'torch.Tensor', 'torch.Tensor', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (1495, 1522), False, 'import torch\n'), ((1561, 1587), 'torch.Tensor', 'torch.Tensor', (['out_features'], {}), '(out_features)\n', (1573, 1587), False, 'import torch\n'), ((1628, 1654), 'torch.Tensor', 'torch.Tensor', (['out_features'], {}), '(out_features)\n', (1640, 1654), False, 'import torch\n'), ((1701, 1727), 'torch.Tensor', 'torch.Tensor', (['out_features'], {}), '(out_features)\n', (1713, 1727), False, 'import torch\n'), ((1923, 1950), 'math.sqrt', 'math.sqrt', (['self.in_features'], {}), '(self.in_features)\n', (1932, 1950), False, 'import math\n'), ((2392, 2439), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': 'size'}), '(loc=0.0, scale=1.0, size=size)\n', (2408, 2439), True, 'import numpy as np\n'), ((2062, 2089), 'math.sqrt', 'math.sqrt', (['self.in_features'], {}), '(self.in_features)\n', (2071, 2089), False, 'import math\n'), ((2198, 2226), 'math.sqrt', 'math.sqrt', (['self.out_features'], {}), '(self.out_features)\n', (2207, 2226), False, 'import math\n')]
# Advent of Code - 2015 - Day 7 # --- Day 7: Some Assembly Required --- # This year, Santa brought little Bobby Tables a set of wires and bitwise logic gates! Unfortunately, little Bobby is a little under the recommended age range, and he needs help assembling the circuit. # Each wire has an identifier (some lowercase letters) and can carry a 16-bit signal (a number from 0 to 65535). A signal is provided to each wire by a gate, another wire, or some specific value. Each wire can only get a signal from one source, but can provide its signal to multiple destinations. A gate provides no signal until all of its inputs have a signal. # The included instructions booklet describes how to connect the parts together: x AND y -> z means to connect wires x and y to an AND gate, and then connect its output to wire z. # For example: # 123 -> x means that the signal 123 is provided to wire x. # x AND y -> z means that the bitwise AND of wire x and wire y is provided to wire z. # p LSHIFT 2 -> q means that the value from wire p is left-shifted by 2 and then provided to wire q. # NOT e -> f means that the bitwise complement of the value from wire e is provided to wire f. # Other possible gates include OR (bitwise OR) and RSHIFT (right-shift). If, for some reason, you'd like to emulate the circuit instead, almost all programming languages (for example, C, JavaScript, or Python) provide operators for these gates. # For example, here is a simple circuit: # 123 -> x # 456 -> y # x AND y -> d # x OR y -> e # x LSHIFT 2 -> f # y RSHIFT 2 -> g # NOT x -> h # NOT y -> i # After it is run, these are the signals on the wires: # d: 72 # e: 507 # f: 492 # g: 114 # h: 65412 # i: 65079 # x: 123 # y: 456 # In little Bobby's kit's instructions booklet (provided as your puzzle input), what signal is ultimately provided to wire a? import time, math def createCircuitDict(): global circuitStrings global circuitDict # this function takes the string as input (circuitStrings) and converts them (parses them) into a dictionary (circuitDict) for circuitLine in circuitStrings: # the string "->" is the delimeter (sp?) between the left side (input) and the wire name (dictionary key) leftSide = circuitLine[0 : circuitLine.find("->") - 1] # if debug: # print("leftSide:", leftSide) rightSide = circuitLine[circuitLine.find("->") + 3 : ] # if debug: # print("rightSide:", rightSide) # we set the outputValue to nan (not a number) as a way of checking if we have successfully evaluated the wires inputs or not: default = nan, not evaluated outputValue = math.nan # check for numeric input string -- this is easy, just make it the output if leftSide.isnumeric(): leftSide = int(leftSide) outputValue = leftSide # simple -- the input to this wire is also it's output # check for duplicate wire names (dictionary keys) in the input string if circuitDict.get(rightSide) != None: print("Weird... dictionary key ", rightSide, "already exists. This shouldn't happen.") circuitDict[rightSide] = {"input" : leftSide, "output" : outputValue} def evaluateInput(circuit, operator): global circuitDict # if debug: # print(circuit, operator) # check left argument for circuit name or number inputWire1 = circuitDict[circuit]["input"][: circuitDict[circuit]["input"].find(operator) - 1] inputWire2 = circuitDict[circuit]["input"][circuitDict[circuit]["input"].find(operator) + len(operator) + 1 : ] # if debug: # print(circuit, "=", inputWire1, operator, inputWire2) # look up the output of the inputWire if inputWire1.isnumeric(): input1 = int(inputWire1) else: input1 = circuitDict[inputWire1]["output"] if inputWire2.isnumeric(): input2 = int(inputWire2) else: input2 = circuitDict[inputWire2]["output"] if math.isnan(input1): # print("input wire 1 isn't calculated yet") pass elif math.isnan(input2): # print("input wire 2 isn't calculated yet") pass else: # do the bitwise complement on the input number and assign it to the output of this wire if operator == "AND": circuitDict[circuit]["output"] = input1 & input2 elif operator == "OR": circuitDict[circuit]["output"] = input1 | input2 elif operator == "LSHIFT": circuitDict[circuit]["output"] = input1 << input2 elif operator == "RSHIFT": circuitDict[circuit]["output"] = input1 >> input2 else: print("Unknown operator", operator) # check for rollunder 0 # this occurs because we are using a signed integer for what should be an unsigned 16-bit integer # TODO figure out if Python has an unsigned 16-bit integer type if circuitDict[circuit]["output"] < 0: # if debug: # print("result under zero, fix it") circuitDict[circuit]["output"] = 65535 + circuitDict[circuit]["output"] def doConnection(): global circuitDict unfinishedCount = len(circuitDict) lowCount = unfinishedCount while unfinishedCount: unfinishedCount = len(circuitDict) if debug: print("lowCount", lowCount) for circuit in circuitDict: # if the output is not a number, evaluate the input if math.isnan(circuitDict[circuit]["output"]): # parse the left side # we can have NOT, AND, OR, LSHIFT, and RSHIFT as possible commands if "NOT" in circuitDict[circuit]["input"]: # operation is logical NOT, invert the input line to be the output inputWire1 = circuitDict[circuit]["input"][circuitDict[circuit]["input"].find("NOT")+4 : ] # if debug: # print(circuit, "= NOT", inputWire1) # look up the output of the inputWire if inputWire1.isnumeric(): input1 = int(inputWire1) else: input1 = circuitDict[inputWire1]["output"] if math.isnan(input1): # print("input wire isn't calculated yet") pass else: # do the bitwise complement on the input number and assign it to the output of this wire circuitDict[circuit]["output"] = ~input1 # check for rollunder 0 if circuitDict[circuit]["output"] < 0: # if debug: # print("result under zero, fix it") circuitDict[circuit]["output"] = 65536 + circuitDict[circuit]["output"] elif "AND" in circuitDict[circuit]["input"]: evaluateInput(circuit, "AND") elif "OR" in circuitDict[circuit]["input"]: evaluateInput(circuit, "OR") elif "LSHIFT" in circuitDict[circuit]["input"]: evaluateInput(circuit, "LSHIFT") elif "RSHIFT" in circuitDict[circuit]["input"]: evaluateInput(circuit, "RSHIFT") else: # simplest case -- one input only! # copy the input wire # this could be improved by doing it only if the inputWire is resolved inputWire1 = circuitDict[circuit]["input"] if debug: print("simplest case circuit", circuit, " inputWire", inputWire1) circuitDict[circuit]["output"] = circuitDict[inputWire1]["output"] else: # this circuit is done, move on # if debug: # print("circuit",circuit,"is done with output ", circuitDict[circuit]["output"], "Break.") pass if math.isnan(circuitDict[circuit]["output"]) is False: # this output is calculated, decrement the unfinished counter unfinishedCount -= 1 if unfinishedCount < lowCount: lowCount = unfinishedCount # if debug: # print("unfinishedCount", unfinishedCount) startTime = time.perf_counter() # time in seconds (float) debug = False timing = True unitTesting = False # maybe a dictionary again? # circuitStrings = {"a" : {"input" : 1, "output" : NaN}} # parse the input text file to set up the circuitStrings inputs, then just roll through the dictionary to calculate the outputs # how will I be sure that the output has been calculated to be the input for the next circuitStrings? # can I assume the input file is "in order"? Probably not. # does this mean some sort of recursion algorithm? # maybe if I populate the outputs with 'NaN' (or Python equivalent) then check that it's not that before using it's output # I can make it recurse through the inputs, calculating any that have fully realized inputs? circuitStrings = [] circuitDict = {} # unit tests, kind of if unitTesting: print("Unit Testing") circuitStrings = ["123 -> x","456 -> y", "x AND y -> d", "x OR y -> e", "x LSHIFT 2 -> f", "y RSHIFT 2 -> g", "NOT x -> h", "NOT y -> i"] else: # read the input text file into a variable called presents with open("2015/day7/input-part2.txt","r") as inputString: circuitStrings = inputString.readlines() # remove newlines for i in range(0, len(circuitStrings)): circuitStrings[i] = circuitStrings[i].rstrip() # parse the input to create the dictionary createCircuitDict() doConnection() # show the circuits if debug: for circuit in circuitDict: print(circuit,":",circuitDict[circuit]) if unitTesting: testPass = False testPassOutput = {"d": {"output" : 72}, "e": {"output" : 507}, "f": {"output" : 492}, "g": {"output" : 114}, "h": {"output" : 65412}, "i": {"output" : 65079}, "x": {"output" : 123}, "y": {"output" : 456}} for wire in testPassOutput: testPassWire = testPassOutput[wire]["output"] circuitWire = circuitDict[wire]["output"] if debug: print("wire", wire, "test:", testPassWire, "calc:", circuitWire) testPass = testPassWire == circuitWire if testPass is False: break print("testPass:", testPass) else: print(circuitDict["a"]["output"]) # this answer for my input is 46065 (part 1), 14134 (part 2) endTime = time.perf_counter() # time in seconds (float) if timing: print("Execution took ", endTime - startTime, " seconds.")
[ "time.perf_counter", "math.isnan" ]
[((8497, 8516), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8514, 8516), False, 'import time, math\n'), ((10704, 10723), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (10721, 10723), False, 'import time, math\n'), ((4004, 4022), 'math.isnan', 'math.isnan', (['input1'], {}), '(input1)\n', (4014, 4022), False, 'import time, math\n'), ((4099, 4117), 'math.isnan', 'math.isnan', (['input2'], {}), '(input2)\n', (4109, 4117), False, 'import time, math\n'), ((5513, 5555), 'math.isnan', 'math.isnan', (["circuitDict[circuit]['output']"], {}), "(circuitDict[circuit]['output'])\n", (5523, 5555), False, 'import time, math\n'), ((8130, 8172), 'math.isnan', 'math.isnan', (["circuitDict[circuit]['output']"], {}), "(circuitDict[circuit]['output'])\n", (8140, 8172), False, 'import time, math\n'), ((6313, 6331), 'math.isnan', 'math.isnan', (['input1'], {}), '(input1)\n', (6323, 6331), False, 'import time, math\n')]
import logging import pytest from . import auth from hydroengine_service import dgds_functions logger = logging.getLogger(__name__) class TestDGDSFunctions: @pytest.mark.parametrize('source, start_date, end_date, limit', [ ('projects/dgds-gee/bathymetry/gebco/2019', None, None, 10), ('projects/dgds-gee/glossis/currents', None, None, None), ('projects/dgds-gee/glossis/waterlevel', '2020-11-01', '2020-12-01', None), ('projects/dgds-gee/glossis/wind', '2020-11-01', '2020-11-10', 10), ('projects/dgds-gee/glossis/waveheight', None, None, None), ('projects/dgds-gee/gloffis/weather', None, None, 5), ('projects/dgds-gee/gloffis/hydro', None, None, 5), ('projects/dgds-gee/metocean/waves/percentiles', None, None, 5), ('projects/dgds-gee/chasm/waves', None, None, None), ('projects/dgds-gee/chasm/wind', None, None, None), ('projects/dgds-gee/crucial/evaporation_deficit', None, None, None), ('projects/dgds-gee/crucial/groundwater_declining_trend', None, None, None), ('projects/dgds-gee/msfd/chlorophyll', None, None, None) ]) def test_get_image_collection_info(self, source, start_date, end_date, limit): image_date_list = dgds_functions.get_image_collection_info(source, start_date, end_date, limit) assert len(image_date_list) >= 1 assert "imageId" in image_date_list[0] assert "date" in image_date_list[0]
[ "logging.getLogger", "pytest.mark.parametrize", "hydroengine_service.dgds_functions.get_image_collection_info" ]
[((107, 134), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (124, 134), False, 'import logging\n'), ((167, 1097), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""source, start_date, end_date, limit"""', "[('projects/dgds-gee/bathymetry/gebco/2019', None, None, 10), (\n 'projects/dgds-gee/glossis/currents', None, None, None), (\n 'projects/dgds-gee/glossis/waterlevel', '2020-11-01', '2020-12-01',\n None), ('projects/dgds-gee/glossis/wind', '2020-11-01', '2020-11-10', \n 10), ('projects/dgds-gee/glossis/waveheight', None, None, None), (\n 'projects/dgds-gee/gloffis/weather', None, None, 5), (\n 'projects/dgds-gee/gloffis/hydro', None, None, 5), (\n 'projects/dgds-gee/metocean/waves/percentiles', None, None, 5), (\n 'projects/dgds-gee/chasm/waves', None, None, None), (\n 'projects/dgds-gee/chasm/wind', None, None, None), (\n 'projects/dgds-gee/crucial/evaporation_deficit', None, None, None), (\n 'projects/dgds-gee/crucial/groundwater_declining_trend', None, None,\n None), ('projects/dgds-gee/msfd/chlorophyll', None, None, None)]"], {}), "('source, start_date, end_date, limit', [(\n 'projects/dgds-gee/bathymetry/gebco/2019', None, None, 10), (\n 'projects/dgds-gee/glossis/currents', None, None, None), (\n 'projects/dgds-gee/glossis/waterlevel', '2020-11-01', '2020-12-01',\n None), ('projects/dgds-gee/glossis/wind', '2020-11-01', '2020-11-10', \n 10), ('projects/dgds-gee/glossis/waveheight', None, None, None), (\n 'projects/dgds-gee/gloffis/weather', None, None, 5), (\n 'projects/dgds-gee/gloffis/hydro', None, None, 5), (\n 'projects/dgds-gee/metocean/waves/percentiles', None, None, 5), (\n 'projects/dgds-gee/chasm/waves', None, None, None), (\n 'projects/dgds-gee/chasm/wind', None, None, None), (\n 'projects/dgds-gee/crucial/evaporation_deficit', None, None, None), (\n 'projects/dgds-gee/crucial/groundwater_declining_trend', None, None,\n None), ('projects/dgds-gee/msfd/chlorophyll', None, None, None)])\n", (190, 1097), False, 'import pytest\n'), ((1633, 1710), 'hydroengine_service.dgds_functions.get_image_collection_info', 'dgds_functions.get_image_collection_info', (['source', 'start_date', 'end_date', 'limit'], {}), '(source, start_date, end_date, limit)\n', (1673, 1710), False, 'from hydroengine_service import dgds_functions\n')]
from tweepy import StreamListener, OAuthHandler, Stream from configs import Configs import sys class StdOutListener(StreamListener): def __init__(self, kafka_producer, topic): super().__init__() self.kafka_producer = kafka_producer self.topic = topic """ A listener handles tweets that are received from the stream. """ def on_data(self, data): self.kafka_producer.produce(topic=self.topic, value=data) print(data) return True def on_error(self, status): print(status) def exit_gracefully(kafka_producer): if kafka_producer is not None: kafka_producer.flush(30) print('kafka producer flushed') sys.exit(0) def create_twitter_client(kafka_producer, configs): listener = StdOutListener(kafka_producer, configs.kafka_topic) auth = OAuthHandler(configs.consumer_key, configs.consumer_secret) auth.set_access_token(configs.access_token_key, configs.access_token_secret) return Stream(auth, listener) def create_kafka_producer(): # https://www.confluent.io/blog/introduction-to-apache-kafka-for-python-programmers/ from confluent_kafka import Producer p = Producer({'bootstrap.servers': 'localhost:9092', 'acks': 'all', 'enable.idempotence': 'true', 'compression.type': 'snappy'}) return p configs = Configs() producer = None try: producer = create_kafka_producer() client = create_twitter_client(producer, configs) client.filter(track=configs.twitter_topics) finally: exit_gracefully(producer)
[ "tweepy.Stream", "configs.Configs", "confluent_kafka.Producer", "sys.exit", "tweepy.OAuthHandler" ]
[((1395, 1404), 'configs.Configs', 'Configs', ([], {}), '()\n', (1402, 1404), False, 'from configs import Configs\n'), ((701, 712), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (709, 712), False, 'import sys\n'), ((845, 904), 'tweepy.OAuthHandler', 'OAuthHandler', (['configs.consumer_key', 'configs.consumer_secret'], {}), '(configs.consumer_key, configs.consumer_secret)\n', (857, 904), False, 'from tweepy import StreamListener, OAuthHandler, Stream\n'), ((998, 1020), 'tweepy.Stream', 'Stream', (['auth', 'listener'], {}), '(auth, listener)\n', (1004, 1020), False, 'from tweepy import StreamListener, OAuthHandler, Stream\n'), ((1191, 1319), 'confluent_kafka.Producer', 'Producer', (["{'bootstrap.servers': 'localhost:9092', 'acks': 'all', 'enable.idempotence':\n 'true', 'compression.type': 'snappy'}"], {}), "({'bootstrap.servers': 'localhost:9092', 'acks': 'all',\n 'enable.idempotence': 'true', 'compression.type': 'snappy'})\n", (1199, 1319), False, 'from confluent_kafka import Producer\n')]
""" Stability analysis of the D2Q4 solver for the advection equation d_t(u) + c_x d_x(u) + c_y d_y(u) = 0 """ import sympy as sp import pylbm # pylint: disable=invalid-name # symbolic variables U, X, Y = sp.symbols('U, X, Y') # symbolic parameters LA, CX, CY = sp.symbols('lambda, cx, cy', constants=True) S_1, S_2 = sp.symbols('s1, s2', constants=True) # numerical parameters la = 1. # velocity of the scheme s_1, s_2 = 2., 1. # relaxation parameters c_x, c_y = 0.5, 0.25 # velocity of the advection equation dico = { 'dim': 2, 'scheme_velocity': LA, 'schemes': [ { 'velocities': [1, 2, 3, 4], 'conserved_moments': U, 'polynomials': [1, X, Y, X**2-Y**2], 'relaxation_parameters': [0, S_1, S_1, S_2], 'equilibrium': [ U, CX*U, CY*U, (CX**2-CY**2)*U ], }, ], 'parameters': { LA: la, S_1: s_1, S_2: s_2, CX: c_x, CY: c_y, }, 'relative_velocity': [CX, CY], } scheme = pylbm.Scheme(dico) stab = pylbm.Stability(scheme) stab.visualize({ 'parameters': { CX: { 'range': [0, 1], 'init': c_x, 'step': 0.01, }, CY: { 'range': [0, 1], 'init': c_y, 'step': 0.01, }, S_1: { 'name': r"$s_1$", 'range': [0, 2], 'init': s_1, 'step': 0.01, }, S_2: { 'name': r"$s_2$", 'range': [0, 2], 'init': s_2, 'step': 0.01, }, }, 'number_of_wave_vectors': 4096, })
[ "sympy.symbols", "pylbm.Stability", "pylbm.Scheme" ]
[((215, 236), 'sympy.symbols', 'sp.symbols', (['"""U, X, Y"""'], {}), "('U, X, Y')\n", (225, 236), True, 'import sympy as sp\n'), ((273, 317), 'sympy.symbols', 'sp.symbols', (['"""lambda, cx, cy"""'], {'constants': '(True)'}), "('lambda, cx, cy', constants=True)\n", (283, 317), True, 'import sympy as sp\n'), ((329, 365), 'sympy.symbols', 'sp.symbols', (['"""s1, s2"""'], {'constants': '(True)'}), "('s1, s2', constants=True)\n", (339, 365), True, 'import sympy as sp\n'), ((1102, 1120), 'pylbm.Scheme', 'pylbm.Scheme', (['dico'], {}), '(dico)\n', (1114, 1120), False, 'import pylbm\n'), ((1128, 1151), 'pylbm.Stability', 'pylbm.Stability', (['scheme'], {}), '(scheme)\n', (1143, 1151), False, 'import pylbm\n')]
from numpy import reshape def vec(x): return reshape(x, (-1,) + x.shape[2:], order="F") def unvec(x, shape): return reshape(x, shape, order="F")
[ "numpy.reshape" ]
[((51, 93), 'numpy.reshape', 'reshape', (['x', '((-1,) + x.shape[2:])'], {'order': '"""F"""'}), "(x, (-1,) + x.shape[2:], order='F')\n", (58, 93), False, 'from numpy import reshape\n'), ((128, 156), 'numpy.reshape', 'reshape', (['x', 'shape'], {'order': '"""F"""'}), "(x, shape, order='F')\n", (135, 156), False, 'from numpy import reshape\n')]
# -*- coding: utf-8 -*- # ******************************************************* # Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved. # SPDX-License-Identifier: MIT # ******************************************************* # * # * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT # * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN, # * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED # * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, # * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE. """Unit tests for the analysis engine""" import pytest from cbc_binary_toolkit import InitializationError from cbc_binary_toolkit.config import Config from cbc_binary_toolkit.engine import LocalEngineManager from cbc_binary_toolkit.schemas import EngineResponseSchema from tests.component.engine_fixtures.mock_engine import MockLocalEngine from tests.component.schema_fixtures.mock_data import VALID_BINARY_METADATA, MISSING_FIELDS_BINARY_METADATA ENGINE_NAME = "MockEngine" @pytest.fixture(scope="session") def config(): """Configuration for all the test cases in this module.""" return Config.load(f""" id: cbc_binary_toolkit version: 0.0.1 engine: name: {ENGINE_NAME} type: local _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory Test: TestPassed """) # ==================================== Unit TESTS BELOW ==================================== def test_create_engine(config): """Test successful creation of MockLocalEngine""" manager = LocalEngineManager(config) assert isinstance(manager.create_engine(), MockLocalEngine) def test_analyze(config): """Test analyze pass through""" manager = LocalEngineManager(config) assert EngineResponseSchema.validate(manager.analyze(VALID_BINARY_METADATA)) @pytest.mark.parametrize("input", [ MISSING_FIELDS_BINARY_METADATA, {} ]) def test_analyze_invalid_schema(config, input): """Test analyze pass through""" manager = LocalEngineManager(config) result = manager.analyze(input) if result["binary_hash"] is not None: result = EngineResponseSchema.validate(result) assert not result["success"] @pytest.mark.parametrize("engine_config, exception", [ [""" id: cbc_binary_toolkit engine: name: {ENGINE_NAME} type: unknown num_threads: 1 _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory """, InitializationError], [""" id: cbc_binary_toolkit engine: name: {ENGINE_NAME} type: local _provider: INVALID.INVALID """, ImportError], [""" id: cbc_binary_toolkit engine: name: {ENGINE_NAME} type: local _provider: cbc_binary_toolkit.engine.LocalEngineFactory """, NotImplementedError], [f""" id: cbc_binary_toolkit version: 0.0.1 engine: name: {ENGINE_NAME} type: local _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory """, AssertionError] ]) def test_failed_init(engine_config, exception): """Test raised exceptions on init of LocalEngineManager""" config = Config.load(engine_config) with pytest.raises(exception): LocalEngineManager(config)
[ "cbc_binary_toolkit.schemas.EngineResponseSchema.validate", "pytest.mark.parametrize", "pytest.raises", "cbc_binary_toolkit.engine.LocalEngineManager", "pytest.fixture", "cbc_binary_toolkit.config.Config.load" ]
[((1051, 1082), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1065, 1082), False, 'import pytest\n'), ((1884, 1954), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input"""', '[MISSING_FIELDS_BINARY_METADATA, {}]'], {}), "('input', [MISSING_FIELDS_BINARY_METADATA, {}])\n", (1907, 1954), False, 'import pytest\n'), ((2260, 3186), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""engine_config, exception"""', '[[\n """\n id: cbc_binary_toolkit\n engine:\n name: {ENGINE_NAME}\n type: unknown\n num_threads: 1\n _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory\n """\n , InitializationError], [\n """\n id: cbc_binary_toolkit\n engine:\n name: {ENGINE_NAME}\n type: local\n _provider: INVALID.INVALID\n """\n , ImportError], [\n """\n id: cbc_binary_toolkit\n engine:\n name: {ENGINE_NAME}\n type: local\n _provider: cbc_binary_toolkit.engine.LocalEngineFactory\n """\n , NotImplementedError], [\n f"""\n id: cbc_binary_toolkit\n version: 0.0.1\n engine:\n name: {ENGINE_NAME}\n type: local\n _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory\n """\n , AssertionError]]'], {}), '(\'engine_config, exception\', [[\n """\n id: cbc_binary_toolkit\n engine:\n name: {ENGINE_NAME}\n type: unknown\n num_threads: 1\n _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory\n """\n , InitializationError], [\n """\n id: cbc_binary_toolkit\n engine:\n name: {ENGINE_NAME}\n type: local\n _provider: INVALID.INVALID\n """\n , ImportError], [\n """\n id: cbc_binary_toolkit\n engine:\n name: {ENGINE_NAME}\n type: local\n _provider: cbc_binary_toolkit.engine.LocalEngineFactory\n """\n , NotImplementedError], [\n f"""\n id: cbc_binary_toolkit\n version: 0.0.1\n engine:\n name: {ENGINE_NAME}\n type: local\n _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory\n """\n , AssertionError]])\n', (2283, 3186), False, 'import pytest\n'), ((1171, 1415), 'cbc_binary_toolkit.config.Config.load', 'Config.load', (['f"""\n id: cbc_binary_toolkit\n version: 0.0.1\n engine:\n name: {ENGINE_NAME}\n type: local\n _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory\n Test: TestPassed\n """'], {}), '(\n f"""\n id: cbc_binary_toolkit\n version: 0.0.1\n engine:\n name: {ENGINE_NAME}\n type: local\n _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory\n Test: TestPassed\n """\n )\n', (1182, 1415), False, 'from cbc_binary_toolkit.config import Config\n'), ((1602, 1628), 'cbc_binary_toolkit.engine.LocalEngineManager', 'LocalEngineManager', (['config'], {}), '(config)\n', (1620, 1628), False, 'from cbc_binary_toolkit.engine import LocalEngineManager\n'), ((1772, 1798), 'cbc_binary_toolkit.engine.LocalEngineManager', 'LocalEngineManager', (['config'], {}), '(config)\n', (1790, 1798), False, 'from cbc_binary_toolkit.engine import LocalEngineManager\n'), ((2063, 2089), 'cbc_binary_toolkit.engine.LocalEngineManager', 'LocalEngineManager', (['config'], {}), '(config)\n', (2081, 2089), False, 'from cbc_binary_toolkit.engine import LocalEngineManager\n'), ((3289, 3315), 'cbc_binary_toolkit.config.Config.load', 'Config.load', (['engine_config'], {}), '(engine_config)\n', (3300, 3315), False, 'from cbc_binary_toolkit.config import Config\n'), ((2186, 2223), 'cbc_binary_toolkit.schemas.EngineResponseSchema.validate', 'EngineResponseSchema.validate', (['result'], {}), '(result)\n', (2215, 2223), False, 'from cbc_binary_toolkit.schemas import EngineResponseSchema\n'), ((3325, 3349), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (3338, 3349), False, 'import pytest\n'), ((3359, 3385), 'cbc_binary_toolkit.engine.LocalEngineManager', 'LocalEngineManager', (['config'], {}), '(config)\n', (3377, 3385), False, 'from cbc_binary_toolkit.engine import LocalEngineManager\n')]
from flask import Flask, render_template, request app = Flask(__name__) @app.route("/") def hello(): return render_template("index.html", name="WORLD!") @app.route("/about") def about(): return render_template("about.html")
[ "flask.render_template", "flask.Flask" ]
[((57, 72), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (62, 72), False, 'from flask import Flask, render_template, request\n'), ((112, 156), 'flask.render_template', 'render_template', (['"""index.html"""'], {'name': '"""WORLD!"""'}), "('index.html', name='WORLD!')\n", (127, 156), False, 'from flask import Flask, render_template, request\n'), ((201, 230), 'flask.render_template', 'render_template', (['"""about.html"""'], {}), "('about.html')\n", (216, 230), False, 'from flask import Flask, render_template, request\n')]
import os import cv2 import albumentations as A from albumentations.pytorch import ToTensorV2 from torch.utils.data import Dataset, DataLoader from sklearn.model_selection import train_test_split __all__ = ['CatDogDataset', 'fetch_dataloader'] class CatDogDataset(Dataset): def __init__(self, file_paths, labels, transform=None): self.file_paths = file_paths self.labels = labels self.transform = transform def __len__(self): return len(self.file_paths) def __getitem__(self, idx): label = self.labels[idx] file_path = self.file_paths[idx] image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: transformed = self.transform(image=image) image = transformed["image"] return image, label def fetch_dataloader(types, data_dir, batch_size, num_workers): dataloaders = {} train_dir = os.path.join(data_dir, "train") train_files = sorted(os.listdir(train_dir)) train_labels = [] for file in train_files: if "cat" in file: train_labels.append(0) else: train_labels.append(1) train_file_paths = [os.path.join(train_dir, path) for path in train_files] train_file_paths, val_file_paths, train_labels, val_labels = train_test_split( train_file_paths, train_labels, stratify=train_labels, random_state=42 ) train_transform = A.Compose([ A.SmallestMaxSize(max_size=256), A.HorizontalFlip(p=0.5), A.RandomCrop(224, 224), A.Normalize(), ToTensorV2() ]) eval_transform = A.Compose([ A.SmallestMaxSize(max_size=256), A.CenterCrop(224, 224), A.Normalize(), ToTensorV2() ]) for split in ['train', 'val', 'test']: if split in types: if split == 'train': dl = DataLoader(CatDogDataset(train_file_paths, train_labels, train_transform), batch_size, shuffle=True, num_workers=num_workers) elif split == "val": dl = DataLoader(CatDogDataset(val_file_paths, val_labels, eval_transform), batch_size, shuffle=False, num_workers=num_workers) dataloaders[split] = dl return dataloaders
[ "albumentations.pytorch.ToTensorV2", "os.listdir", "sklearn.model_selection.train_test_split", "os.path.join", "albumentations.RandomCrop", "albumentations.Normalize", "cv2.cvtColor", "albumentations.CenterCrop", "albumentations.HorizontalFlip", "cv2.imread", "albumentations.SmallestMaxSize" ]
[((1004, 1035), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (1016, 1035), False, 'import os\n'), ((1391, 1483), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_file_paths', 'train_labels'], {'stratify': 'train_labels', 'random_state': '(42)'}), '(train_file_paths, train_labels, stratify=train_labels,\n random_state=42)\n', (1407, 1483), False, 'from sklearn.model_selection import train_test_split\n'), ((651, 672), 'cv2.imread', 'cv2.imread', (['file_path'], {}), '(file_path)\n', (661, 672), False, 'import cv2\n'), ((689, 727), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (701, 727), False, 'import cv2\n'), ((1061, 1082), 'os.listdir', 'os.listdir', (['train_dir'], {}), '(train_dir)\n', (1071, 1082), False, 'import os\n'), ((1270, 1299), 'os.path.join', 'os.path.join', (['train_dir', 'path'], {}), '(train_dir, path)\n', (1282, 1299), False, 'import os\n'), ((1537, 1568), 'albumentations.SmallestMaxSize', 'A.SmallestMaxSize', ([], {'max_size': '(256)'}), '(max_size=256)\n', (1554, 1568), True, 'import albumentations as A\n'), ((1578, 1601), 'albumentations.HorizontalFlip', 'A.HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1594, 1601), True, 'import albumentations as A\n'), ((1611, 1633), 'albumentations.RandomCrop', 'A.RandomCrop', (['(224)', '(224)'], {}), '(224, 224)\n', (1623, 1633), True, 'import albumentations as A\n'), ((1643, 1656), 'albumentations.Normalize', 'A.Normalize', ([], {}), '()\n', (1654, 1656), True, 'import albumentations as A\n'), ((1666, 1678), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (1676, 1678), False, 'from albumentations.pytorch import ToTensorV2\n'), ((1728, 1759), 'albumentations.SmallestMaxSize', 'A.SmallestMaxSize', ([], {'max_size': '(256)'}), '(max_size=256)\n', (1745, 1759), True, 'import albumentations as A\n'), ((1769, 1791), 'albumentations.CenterCrop', 'A.CenterCrop', (['(224)', '(224)'], {}), '(224, 224)\n', (1781, 1791), True, 'import albumentations as A\n'), ((1801, 1814), 'albumentations.Normalize', 'A.Normalize', ([], {}), '()\n', (1812, 1814), True, 'import albumentations as A\n'), ((1824, 1836), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (1834, 1836), False, 'from albumentations.pytorch import ToTensorV2\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 17 16:12:56 2020 @author: dylanroyston """ # import/configure packages import numpy as np import pandas as pd #import pyarrow as pa import librosa import librosa.display from pathlib import Path #import Ipython.display as ipd #import matplotlib.pyplot as plt from pyspark.sql import * import pyspark.sql.functions as f from pyspark import SparkConf, SparkContext, SQLContext import boto3 from tinytag import TinyTag as tt import soundfile as sf import audioread from pydub import AudioSegment from io import BytesIO #from io import BytesIO import os import sys import time import struct sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/lib") #import config time_seq = [] ##### # create local Spark instance (for non-cluster dev) sc = SparkContext('local') spark = SparkSession (sc) spark.conf.set("spark.sql.execution.arrow.enabled", "true") # define Spark config def spark_conf(): conf = SparkConf().setAppName("decompress_audio_files") sc = SparkContext(conf=conf) spark = SparkSession.builder.getOrCreate() return spark spark = spark_conf() spark.conf.set("spark.sql.execution.arrow.enabled", "true") ##### # Function to write spark-dataframe to mySQL def write_df_to_psql(df, tablename): psql_user = os.environ.get('PSQL_USR') psql_pwd = os.environ.get('PSQL_PWD') df.write.format('jdbc').options( url='jdbc:postgresql://10.0.0.6:5432/spectralize', dbtable=tablename, user=psql_user, #password=<PASSWORD>).mode('append').save() password=psql_pwd).save() ##### # function to read audio files from S3 bucket and extract tags def read_audio_files(): # basic initialization time_seq.append(['start-read-audio', time.time()]) # DataFrame schema File_Tags = Row("s3_key", "song_id", "album", "albumartist", "artist", "audio_offset", "bitrate", "channels", "comment", "composer", "disc", "disc_total", "duration", "filesize", "genre", "samplerate", "title", "track", "track_total", "year") spec_labels = [] for sn in range(0,128): spec_labels.append('spec' + str(sn+1)) spec_df_labels = ['song_id','timeseries'] + spec_labels Spec_Tags = Row(spec_df_labels) # configure S3 access s3_bucket = 'mdp-spectralize-pal' number_of_files = 0 s3 = boto3.resource('s3') boto_client = boto3.client('s3') bucket = s3.Bucket(s3_bucket) number_of_files=0 file_limit=100 #local_path = './local_file.' known_ext = [".mp3", ".wav", ".m4a"] #read each file from S3 bucket for obj in bucket.objects.all(): s3_key = obj.key audio_obj_stream = boto_client.get_object(Bucket=s3_bucket, Key=s3_key) audio_obj = BytesIO(audio_obj_stream['Body'].read()) song = bytes(audio_obj) song = sf.SoundFile(audio_obj) song = open(audio_obj, 'rb').read() song = audioread.audio_open(audio_obj) # extract tags from mp3 files #if "mp3" in s3_key: #if any(ext in s3_key for ext in known_ext): #print(number_of_files) #ext = s3_key[-4:] #local_path = './localfile' + ext number_of_files+=1 #bucket.download_file(s3_key, local_path) local_path = '/home/dylanroyston/Music/spectralize_data/01 Konoha Densetsu.mp3' song = open(local_path, 'rb').read() ##### tags tags = tt.get(local_path) tags = tt.get(audio_obj) # extract tags from tinytag object indiv_tags = (s3_key, number_of_files, tags.album, tags.albumartist, tags.artist, tags.audio_offset, tags.bitrate, tags.channels, tags.comment, tags.composer, tags.disc, tags.disc_total, tags.duration, tags.filesize, tags.genre, tags.samplerate, tags.title, tags.track, tags.track_total, tags.year) # convert tuple object to list indiv_tag_list = list(indiv_tags) indiv_tag_list = [str(i) for i in indiv_tag_list] tag_seq=[] tag_seq.append(indiv_tag_list) tags_pdf = pd.DataFrame(data=tag_seq) tag_df = spark.createDataFrame(tags_pdf, schema=File_Tags) ##### audio # load audio file with Librosa #y, sr = librosa.load(str(Path(local_path)), sr=None) y, sr = librosa.load(local_path, sr=None) # create indexing variables (song_id, timestamp) # song_id defined as "repeat(number_of_files)" song_num = pd.Series([number_of_files]) num_points = len(y) song_id = song_num.repeat(num_points) song_id = song_id.to_numpy() # timeseries defined as "1 : length(audio_data)" timeseries = np.arange(num_points) timeseries = timeseries.transpose() full_audio = {'song_id': song_id, 'timeseries': timeseries, 'intensity': y} # create combined dataframe audio_pdf = pd.DataFrame(data = full_audio) audio_df = spark.createDataFrame(audio_pdf) ##### spectral S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128, fmax=10000) log_S = librosa.power_to_db(S, ref=np.max) log_S = log_S.transpose() # song_id defined as "repeat(number_of_files)" song_num = pd.Series([number_of_files]) num_points = len(S.transpose()) song_id = song_num.repeat(num_points) song_id = song_id.to_numpy() # timeseries defined as "1 : length(audio_data)" timeseries = np.arange(num_points) timeseries = timeseries.transpose() full_index = {'song_id': song_id, 'timeseries': timeseries} index_pdf = pd.DataFrame(full_index) spec_pdf = pd.DataFrame(data=log_S, columns=spec_labels) full_spec = pd.concat([index_pdf, spec_pdf], axis=1) spec_df = spark.createDataFrame(full_spec) ##### write dataframes to psql write_df_to_psql(tag_df, 'clean_metadata') write_df_to_psql(audio_df, 'clean_audio') write_df_to_psql(spec_df, 'clean_spec') # stop process when file_limit is crossed (small batches) if (number_of_files >= file_limit): break ##### time_seq.append(['end read-file', time.time()]) #df_tags = spark.createDataFrame(tag_seq, schema=File_Tags) #df_audio = spark.createDataFrame(audio_seq) #df_spec = spark.createDataFrame(audio_seq, schema=Spec_Tags) # Additional run to #df_audio_data = spark.createDataFrame(file_audio_data) #process_df(df_audio_data) ##### if __name__ == '__main__': time_seq.append(['start', time.time()]) read_audio_files()
[ "pandas.Series", "librosa.feature.melspectrogram", "boto3.client", "audioread.audio_open", "os.environ.get", "os.path.abspath", "pyspark.SparkConf", "librosa.power_to_db", "boto3.resource", "tinytag.TinyTag.get", "pandas.concat", "time.time", "pandas.DataFrame", "pyspark.SparkContext", "soundfile.SoundFile", "numpy.arange", "librosa.load" ]
[((833, 854), 'pyspark.SparkContext', 'SparkContext', (['"""local"""'], {}), "('local')\n", (845, 854), False, 'from pyspark import SparkConf, SparkContext, SQLContext\n'), ((1048, 1071), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (1060, 1071), False, 'from pyspark import SparkConf, SparkContext, SQLContext\n'), ((1331, 1357), 'os.environ.get', 'os.environ.get', (['"""PSQL_USR"""'], {}), "('PSQL_USR')\n", (1345, 1357), False, 'import os\n'), ((1377, 1403), 'os.environ.get', 'os.environ.get', (['"""PSQL_PWD"""'], {}), "('PSQL_PWD')\n", (1391, 1403), False, 'import os\n'), ((2473, 2493), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (2487, 2493), False, 'import boto3\n'), ((2517, 2535), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (2529, 2535), False, 'import boto3\n'), ((3060, 3083), 'soundfile.SoundFile', 'sf.SoundFile', (['audio_obj'], {}), '(audio_obj)\n', (3072, 3083), True, 'import soundfile as sf\n'), ((3162, 3193), 'audioread.audio_open', 'audioread.audio_open', (['audio_obj'], {}), '(audio_obj)\n', (3182, 3193), False, 'import audioread\n'), ((3770, 3788), 'tinytag.TinyTag.get', 'tt.get', (['local_path'], {}), '(local_path)\n', (3776, 3788), True, 'from tinytag import TinyTag as tt\n'), ((3804, 3821), 'tinytag.TinyTag.get', 'tt.get', (['audio_obj'], {}), '(audio_obj)\n', (3810, 3821), True, 'from tinytag import TinyTag as tt\n'), ((4539, 4565), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'tag_seq'}), '(data=tag_seq)\n', (4551, 4565), True, 'import pandas as pd\n'), ((4818, 4851), 'librosa.load', 'librosa.load', (['local_path'], {'sr': 'None'}), '(local_path, sr=None)\n', (4830, 4851), False, 'import librosa\n'), ((5002, 5030), 'pandas.Series', 'pd.Series', (['[number_of_files]'], {}), '([number_of_files])\n', (5011, 5030), True, 'import pandas as pd\n'), ((5229, 5250), 'numpy.arange', 'np.arange', (['num_points'], {}), '(num_points)\n', (5238, 5250), True, 'import numpy as np\n'), ((5501, 5530), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'full_audio'}), '(data=full_audio)\n', (5513, 5530), True, 'import pandas as pd\n'), ((5686, 5750), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['y'], {'sr': 'sr', 'n_mels': '(128)', 'fmax': '(10000)'}), '(y, sr=sr, n_mels=128, fmax=10000)\n', (5716, 5750), False, 'import librosa\n'), ((5767, 5801), 'librosa.power_to_db', 'librosa.power_to_db', (['S'], {'ref': 'np.max'}), '(S, ref=np.max)\n', (5786, 5801), False, 'import librosa\n'), ((5919, 5947), 'pandas.Series', 'pd.Series', (['[number_of_files]'], {}), '([number_of_files])\n', (5928, 5947), True, 'import pandas as pd\n'), ((6158, 6179), 'numpy.arange', 'np.arange', (['num_points'], {}), '(num_points)\n', (6167, 6179), True, 'import numpy as np\n'), ((6313, 6337), 'pandas.DataFrame', 'pd.DataFrame', (['full_index'], {}), '(full_index)\n', (6325, 6337), True, 'import pandas as pd\n'), ((6366, 6411), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'log_S', 'columns': 'spec_labels'}), '(data=log_S, columns=spec_labels)\n', (6378, 6411), True, 'import pandas as pd\n'), ((6441, 6481), 'pandas.concat', 'pd.concat', (['[index_pdf, spec_pdf]'], {'axis': '(1)'}), '([index_pdf, spec_pdf], axis=1)\n', (6450, 6481), True, 'import pandas as pd\n'), ((702, 727), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (717, 727), False, 'import os\n'), ((992, 1003), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (1001, 1003), False, 'from pyspark import SparkConf, SparkContext, SQLContext\n'), ((1816, 1827), 'time.time', 'time.time', ([], {}), '()\n', (1825, 1827), False, 'import time\n'), ((6953, 6964), 'time.time', 'time.time', ([], {}), '()\n', (6962, 6964), False, 'import time\n'), ((7337, 7348), 'time.time', 'time.time', ([], {}), '()\n', (7346, 7348), False, 'import time\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' """ Скрипт выводит список ip государственных организаций. """ import ipaddress import sys import requests rs = requests.get('https://jarib.github.io/anon-history/RuGovEdits/ru/latest/ranges.json') # Проверка удачного запроса и полученных данных if not rs or not rs.json() or 'ranges' not in rs.json(): print('Не получилось получить список ip государственных организаций') sys.exit() # Получение и сортировка элементов по названию организации items = sorted(rs.json()['ranges'].items(), key=lambda x: x[0]) ip_counter = 0 for i, (name, ip_network_list) in enumerate(items, 1): print(f'{i}. {name}') # Получение ip с маской подсети for ip_network in ip_network_list: print(f' {ip_network}:') # Получение ip подсети net4 = ipaddress.ip_network(ip_network) # Перебор ip адресов указанной организации for ip in net4.hosts(): print(f' {ip}') ip_counter += 1 print() print('Всего ip:', ip_counter)
[ "ipaddress.ip_network", "requests.get", "sys.exit" ]
[((191, 281), 'requests.get', 'requests.get', (['"""https://jarib.github.io/anon-history/RuGovEdits/ru/latest/ranges.json"""'], {}), "(\n 'https://jarib.github.io/anon-history/RuGovEdits/ru/latest/ranges.json')\n", (203, 281), False, 'import requests\n'), ((461, 471), 'sys.exit', 'sys.exit', ([], {}), '()\n', (469, 471), False, 'import sys\n'), ((853, 885), 'ipaddress.ip_network', 'ipaddress.ip_network', (['ip_network'], {}), '(ip_network)\n', (873, 885), False, 'import ipaddress\n')]
# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """Misc helpers""" import math import random import re import signal import typing as t from datetime import datetime from enum import Enum from functools import reduce from inspect import isabstract from string import ascii_letters from subprocess import list2cmdline as _list2cmdline from typing import Mapping as Map import numpy as np from exot.exceptions import * __all__ = ( "call_with_leaves", "dict_depth", "dict_diff", "find_attributes", "flatten_dict", "get_concrete_subclasses", "get_subclasses", "get_valid_access_paths", "getitem", "has_method", "has_property", "has_type", "has_variable", "is_abstract", "is_scalar_numeric", "leaves", "list2cmdline", "map_to_leaves", "mro_getattr", "mro_hasattr", "random_string", "safe_eval", "sanitise_ansi", "setgetattr", "setitem", "stub_recursively", "unpack__all__", "validate_helper", "get_cores_and_schedules", ) """ Signatures ---------- call_with_leaves :: (function: Callable[[Any], Any], obj: ~T, _seq: bool = True) -> None dict_depth :: (obj: Any, level: int = 0) -> int dict_diff :: (left: Mapping, right: Mapping) -> List[Dict] find_attributes :: (attr: str, klass: Any) -> List flatten_dict :: (obj: Mapping, sep: str = '.') -> Mapping get_concrete_subclasses :: (klass, recursive=True, derived=True) -> List get_subclasses :: (klass, recursive=True, derived=True) -> List get_valid_access_paths :: (obj: Mapping, _limit: int = 8192, _leaf_only: bool = False, _use_lists: bool = True, _fallthrough_empty: bool = True) -> Generator getitem :: (obj: Mapping, query: Union[str, Tuple], *args: Any, sep: str = '/') -> Any has_method :: (klass: Union[type, object], name: str) -> bool has_property :: (klass: Union[type, object], name: str) -> bool has_type :: (klass: Union[type, object]) -> bool has_variable :: (klass: Union[type, object], name: str) -> bool is_abstract :: (klass: Union[type, object]) -> bool is_scalar_numeric :: (value: t.Any) -> bool map_to_leaves :: (function: Callable[[Any], Any], obj: ~T, _seq: bool = True) -> Any mro_getattr :: (cls: type, attr: str, *args: Any) -> Any mro_hasattr :: (cls: type, attr: str) -> bool random_string :: (length: int) -> str safe_eval :: (to_eval: str, expect: Tuple[type], timeout: int) -> object sanitise_ansi :: (value Union[List[str], str]) -> Union[List[str], str] setgetattr :: (klass: Union[type, object], attr: str, default: Any) -> None setitem :: (obj: MutableMapping, query: Tuple, value: Any) -> None stub_recursively :: (obj: ~T, stub: Any = None, _stub_list_elements: bool = True) -> Optional[~T] unpack__all__ :: (*imports: Collection[str]) -> Tuple[str] validate_helper :: (what: Mapping, key: Any, *types: type, msg: str = '') -> NoReturn """ def call_with_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> None: """Calls a function on leaves of an object A leaf is considered to be an object that is not a Mapping (or, when _seq is set, also not a Sequence except a string, which is also a Sequence). Args: function (t.Callable[[t.Any], t.Any]): The callable obj (t.T): The tree-like or sequence-like object _seq (bool, optional): Should sequences be considered?. Defaults to True. """ def inner(obj: t.T) -> t.Any: if isinstance(obj, Map): for v in obj.values(): inner(v) elif _seq and isinstance(obj, (t.List, t.Set)): for v in obj: inner(v) else: return function(obj) inner(obj) def dict_depth(obj: t.Any, level: int = 0) -> int: """Get maximum depth of a dict-like object Args: obj (t.Any): The dict-like object level (int): For internal use only. Defaults to 0. .. note:: The depth of a non-dict-like object is considered to be 0. An empty dict increases the depth if `_empty_increments` is True. Examples: >>> dict_depth(1) # returns 0 >>> dict_depth([1,2,3]) # returns 0 >>> dict_depth({1: 1, 2: 2}) # returns 1 >>> dict_depth({1: {2: {3: 3}}}) # returns 3 >>> dict_depth({1: {2: {3: {}}}}) # returns 4 """ if not isinstance(obj, Map) or not obj: return level return max(dict_depth(v, level + 1) for k, v in obj.items()) def dict_diff(left: Map, right: Map) -> t.List[t.Dict]: """Get the difference between 2 dict-like objects Args: left (Map): The left dict-like object right (Map): The right dict-like object The value returned is a list of dictionaries with keys ["path", "left", "right"] which contain the query path and the differences between the left and right mapping. If a key is missing in either mapping, it will be indicated as a "None". `math.nan` (not-a-number) is used for default values in the comparison because of the property: `math.nan != math.nan`. Simple None cannot be used, since it would not handle keys that both have a value of None. In general, this function might report false-positives for keys that contain the math.nan (or np.nan) value simply due to this property. There is no workaround available. """ left_paths = set(get_valid_access_paths(left, _leaf_only=True, _use_lists=False)) right_paths = set(get_valid_access_paths(right, _leaf_only=True, _use_lists=False)) return list( { "path": path, "left": getitem(left, path, math.nan), "right": getitem(right, path, math.nan), } for path in left_paths.union(right_paths) if getitem(left, path, math.nan) != getitem(right, path, math.nan) ) def find_attributes(klass: t.Any, attr: str) -> t.List: """Find attributes in any of a class'es bases Args: klass (t.Any): The type object attr (str): The attribute Returns: t.List: List of found instances of the attribute in the class hierarchy """ if not isinstance(attr, str): raise TypeError(attr) mro = klass.__mro__ if hasattr(klass, "__mro__") else type(klass).mro() return [attr for base in mro if hasattr(base, attr)] def flatten_dict(obj: Map, sep: str = ".") -> Map: """Flatten a dict to a 1-level dict combining keys with a separator Args: obj (Map): The dict-like object sep (str): The separator used when combining keys. Defaults to ".". Returns: Map: A flattened object of same type as 'obj'. .. warning:: Flattening will enforce all keys to be string-types! `reducer` is a function accepted by the functools.reduce function, which is of form: f(a, b) where _a_ is the accumulated value, and _b_ is the updated value from the iterable. The .items() function produces key-value tuple-pairs. These can be expanded with *, e.g. `*("a", "b")` will expand to `"a", "b"`. This property is used to expand the `kv_pair` below. Example walkthrough on `flatten_dict({'a': 1, 'b': {'c': {'d': 2}}})`: :: `outer` <- obj: {'a': 1, 'b': {'c': {'d': 2}}}, prefix: '' `reducer` <- key: 'a', value: 1 `inner` <- acc: {}, key: 'a', value: 1, prefix: '' `inner` -> {'a': 1} `reducer` -> {'a': 1} `reducer` <- key: 'b', value: {'c': {'d': 2}} `inner` <- acc: {'a': 1}, key: 'b', value: {'c': {'d': 2}}, prefix: '' `outer` <- obj: {'c': {'d': 2}}, prefix: 'b.' `reducer` <- key: 'c', value: {'d': 2} `inner` <- acc: {}, key: 'c', value: {'d': 2}, prefix: 'b.' `outer` <- obj: {'d': 2}, prefix: 'b.c.' `reducer` <- key: 'd', value: 2 `inner` <- acc: {}, key: 'd', value: 2, prefix: 'b.c.' `inner` -> {'b.c.d': 2} `reducer` -> {'b.c.d': 2} `outer` -> {'b.c.d': 2} `inner` -> {'b.c.d': 2} `reducer` -> {'b.c.d': 2} `outer` -> {'b.c.d': 2} `inner` -> {'a': 1, 'b.c.d': 2} `reducer` -> {'a': 1, 'b.c.d': 2} `outer` -> {'a': 1, 'b.c.d': 2} """ if not isinstance(obj, Map): raise TypeError("flatten_dict works only on dict-like types", type(obj)) _t = type(obj) def outer(obj: Map, prefix: str) -> Map: def reducer(accumulator: Map, kv_pair: t.Tuple): return inner(accumulator, *kv_pair, prefix) return reduce(reducer, obj.items(), _t()) def inner(accumulator: Map, key: str, value: t.Any, prefix: str) -> Map: if isinstance(value, Map): return _t(**accumulator, **outer(value, prefix + key + sep)) else: return _t(**accumulator, **_t({prefix + key: value})) return outer(obj, "") def expand_dict(obj: Map, sep: str = ".") -> Map: """Expands a flattened mapping by splitting keys with the given separator Args: obj (Map): The flattened dict-like object to unflatten sep (str, optional): The key separator Raises: TypeError: If wrong type is supplied ValueError: If a non-flat dict is supplied Returns: Map: The expanded mapping object of same type as 'obj'. Example: >>> d = {'a': 1, 'b': 2, 'c.ca': 1, 'c.cb': 2} >>> expand_dict(d) {'a': 1, 'b': 2, 'c': {'ca': 1, 'cb': 2}} """ if not isinstance(obj, Map): raise TypeError("expand_dict works only on dict-like types", type(obj)) if dict_depth(obj) != 1: raise ValueError( "expand_dict works only on flat dict-like types, " "got a mapping of depth: {}".format(dict_depth(obj)) ) def inner(obj): accumulator = type(obj)() for k, v in obj.items(): *head, last = k.split(sep) _ = accumulator # Create missing paths for part in head: if part not in _: _[part] = type(obj)() _ = _[part] _[last] = v return accumulator return inner(obj) def get_concrete_subclasses(klass, recursive: bool = True, derived: bool = True) -> t.List: """Get a list of non-abstract subclasses of a type Args: klass (t.Type): The type object recursive (bool): Should the classes be extracted recursively? Defaults to True. derived (bool): Use the 'derived' property of SubclassTracker-enhanced types? [True] Returns: t.List: A list of concrete subclasses of the type """ from exot.util.mixins import _SubclassTracker as __ if derived and hasattr(klass, __.concrete): return list(getattr(klass, __.concrete)) subclasses = get_subclasses(klass, recursive=recursive) return [k for k in subclasses if not isabstract(k)] def get_subclasses(klass, recursive: bool = True, derived: bool = True) -> t.List: """Get a list of subclasses of a type Args: klass (t.Type): The type object recursive (bool): Should the classes be extracted recursively? Defaults to True. derived (bool): Use the 'derived' property of SubclassTracker-enhanced types? [True] Returns: t.List: A list of concrete subclasses of the type """ from exot.util.mixins import _SubclassTracker as __ if not (hasattr(klass, "__subclasses__") or hasattr(klass, __.derived)): raise TypeError(f"__subclasses__ or {__.derived} attribute missing", klass) if derived: return list(getattr(klass, __.derived)) subclasses = klass.__subclasses__() def walker(k): first, *rest = k if len(rest): walker(rest) if first not in subclasses: subclasses.append(first) if hasattr(first, "__subclasses__"): _ = first.__subclasses__() if len(_): walker(_) if recursive: walker(subclasses) return subclasses def get_valid_access_paths( obj: Map, _limit: int = 8192, _leaf_only: bool = False, _use_lists: bool = True, _fallthrough_empty: bool = True, ) -> t.Generator[t.Tuple, None, None]: """Generate valid key sequences in a dict, optionally including lists Args: obj (Map): The dict-like object _limit (int): Maximum number of paths that can be created with list-like elements. _leaf_only (bool): Provide paths for only the leaves of the mapping. Defaults to True. _use_lists (bool): Provide paths for list-like elements in the mapping. Defaults to True. _fallthrough_empty (bool): Discard empty list- or dict-like elements? Defaults to True. Details: If `_leaf_only` is set, only paths to leaves will be produced, a leaf being a value that is not a mapping (or list). If `_use_lists` is set, lists will also be *recursively* checked for valid paths. if `_fallthrough_empty` is set, an empty dict or list will yield an empty tuple, rendering a parent path. Returns: t.Generator[t.Tuple,None,None]: A generator that yields the access paths (tuples). Examples: >>> # Only leaves: >>> d = {'a1': {'a2': None}, 'b2': None} >>> list(get_valid_access_paths(d, _leaf_only=True)) [('a1', 'a2'), ('b2',)] >>> # All paths: >>> list(get_valid_access_paths(d, _leaf_only=False)) [('a1',), ('a1', 'a2'), ('b2',)] """ def thrower(o: object, t: type, n: str) -> t.NoReturn: if not isinstance(o, t): raise TypeError( f"get_valid_access_paths expected {t!r} for {n!r}, got: {type(o)!r}" ) thrower(obj, Map, "obj") thrower(_limit, int, "_limit") thrower(_leaf_only, bool, "_leaf_only") thrower(_use_lists, bool, "_use_lists") thrower(_fallthrough_empty, bool, "_fallthrough_empty") def inner(obj: t.Union[Map, t.List, t.Set]) -> t.Generator: if _fallthrough_empty and not obj: yield tuple() # if obj is a mapping if isinstance(obj, Map): for k, v in obj.items(): # if the value in obj is also a mapping... if isinstance(v, Map): if not _leaf_only: yield (k,) # ... make a recursive call for vv in inner(v): yield (k,) + vv # if the value in obj is a list... elif _use_lists and isinstance(v, (t.List, t.Set)): # ... first yield the valid path to the key containing the list if v and not _leaf_only: yield (k,) elif not v and _fallthrough_empty: yield (k,) # ... loop through elements, and keep track of indexes for idx, vv in enumerate(v): # if an element is also a mapping or list... if isinstance(vv, (Map, (t.List, t.Set))): # ... make a recursive call for vvv in inner(vv): yield (k,) + (idx,) + vvv else: # ... otherwise yield keypath + idx yield (k,) + (idx,) # if the value is neither a mapping nor a list, yield the key else: yield (k,) # if obj is a list-like sequence if _use_lists and isinstance(obj, (t.List, t.Set)): # might be tricky to generate valid sequences for large lists! if _limit and len(obj) >= _limit: raise ValueError( f"get_key_sequences list limit of {_limit} exceeded: {len(obj)}" ) for idx, v in enumerate(obj): if isinstance(v, (Map, (t.List, t.Set))): for vv in inner(v): yield (idx,) + vv else: yield (idx,) return inner(obj) def getitem(obj: Map, query: t.Union[str, t.Tuple], *args: t.Any, sep: str = "/") -> t.Any: """Get a value from a dict-like object using an XPath-like query, or a tuple-path Accesses an object that provides a dict-like interface using a query: either a tuple representing the path, or a string where consecutive keys are separated with a separator, e.g. "key1/key2". Returns the value of the object at the given key-sequence. Returns a default value if provided, or throws a LookupError. Args: obj (Map): a mapping query (t.Union[str, t.Tuple]): a query path using a separated string or a tuple *args (t.Any): an optional default value, similar to `getattr` sep (str, optional): a separator string used to split a string query path Returns: t.Any: the value stored in obj for the given query, or the default value Raises: LookupError: if query not found and no default value is provided TypeError: if obj is not a mapping, or query is not a str or tuple """ if not isinstance(obj, Map): raise TypeError("'obj' must be an instance of Mapping, e.g. dict", type(obj)) if not isinstance(query, (str, t.Tuple)): raise TypeError("'query' must be a str or a tuple", type(query)) if len(args) > 1: raise TypeError(f"getitem accepts at most 3 positional args, got {len(args)}") _obj = obj # handler for tuple queries if isinstance(query, t.Tuple): _valid = get_valid_access_paths(obj) if query not in _valid: if args: return args[0] else: raise LookupError(f"query {query!r} not found") else: for node in query: _obj = _obj[node] return _obj # handler for string queries else: try: # loop through components in the query, consecutively accessing the mapping for node in query.split(sep): # handle empty nodes in the query, e.g. when query="a///b" -> "a/b" if not node: continue if isinstance(_obj, Map): for k in _obj.keys(): node = type(k)(node) if str(k) == node else node elif isinstance(_obj, (t.List, t.Set)): try: node = int(node) except TypeError: raise LookupError( f"{node} not convertible to int when attempting to access " f"a list {_obj!r}" ) _obj = _obj[node] return _obj except LookupError as Error: if args: return args[0] else: Error.args += (query,) raise def has_method(klass: t.Union[type, object], name: str) -> bool: """Check if a method exists in any of a klass'es bases Args: klass (t.Union[type, object]): The type or object name (str): The name of the method Returns: bool: True if has a method with the given name. """ candidates = find_attributes(klass, name) if not candidates: return False def is_callable(c): return isinstance(getattr(klass, str(c), None), t.Callable) return all(is_callable(f) for f in candidates) def has_property(klass: t.Union[type, object], name: str) -> bool: """Check if a variable exists in any of a klass'es bases Args: klass (t.Union[type, object]): The type or object name (str): The name of the property Returns: bool: True if has a property with the given name. """ candidates = find_attributes(klass, name) if not candidates: return False def is_property(c): return not isinstance(getattr(klass, str(c), None), property) return all(is_property(f) for f in candidates) def has_type(klass: t.Union[type, object]) -> bool: """Check if a type or instance has a Type member type that derives from Enum Args: klass (t.Union[type, object]): The type or object Returns: bool: True if has the "Type" attribute. """ if not isinstance(klass, (type, object)): raise TypeError(klass) return issubclass(getattr(klass, "Type", type(None)), Enum) def has_variable(klass: t.Union[type, object], name: str) -> bool: """Check if a variable exists in any of a klass'es bases Args: klass (t.Union[type, object]): The type or object name (str): The name of the variable Returns: bool: True if has a variable with the given name. """ candidates = find_attributes(klass, name) if not candidates: return False def is_not_callable(c): return not isinstance(getattr(klass, str(c), None), t.Callable) return all(is_not_callable(f) for f in candidates) def is_abstract(klass: t.Union[type, object]) -> bool: """Check if a type or instance is abstract Args: klass (t.Union[type, object]): The type or object Returns: bool: True if the type/instance is abstract. """ if not isinstance(klass, (type, object)): raise TypeError(klass) if hasattr(klass, "__abstractmethods__"): return 0 != len(getattr(klass, "__abstractmethods__")) else: from inspect import isabstract return isabstract(klass) def is_scalar_numeric(value: t.Any) -> bool: """Check if is an int, a float, or a NumPy variant thereof Args: value (t.Any): The value to inspect Returns: bool: True if scalar and numeric. """ return isinstance(value, (float, int, np.integer, np.floating)) def leaves(obj: Map) -> t.Generator: """Get leaves of a mapping Args: obj (Map): The dict-like object Returns: t.Generator: A generator that yields the leaf elements of the mapping. """ paths = get_valid_access_paths(obj, _leaf_only=True, _use_lists=False) return (getitem(obj, path) for path in paths) def list2cmdline(seq: t.Iterable) -> str: """Translates a sequence of arguments into a command line string with "None" removal Args: seq (t.Iterable): The sequence of arguments Returns: str: The command-line string """ seq = [_ for _ in seq if _ is not None] return _list2cmdline(seq) def map_to_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> t.Any: """Map a function to leaves of an object A leaf is considered to be an object that is not a Mapping (or, when _seq is set, also not a Sequence except a string, which is also a Sequence). Args: function (t.Callable[[t.Any], t.Any]): a function or signatude "a -> a" obj (t.T): a dict-like, list-like, or plain object _seq (bool, optional): map on elements of lists? Returns: t.T: the obj with transformed elements """ def inner(obj: t.T) -> t.Any: if isinstance(obj, Map): return type(obj)({k: inner(v) for k, v in obj.items()}) elif _seq and isinstance(obj, (t.List, t.Set)): return type(obj)(inner(v) for v in obj) else: return function(obj) return inner(obj) def mro_getattr(cls: type, attr: str, *args: t.Any) -> t.Any: """Get an attribute from a type's class hierarchy Args: cls (type): The type attr (str): The attribute *args (t.Any): The default value (like in Python's default getattr) Returns: t.Any: The attribute, or when not found the default value (if provided) Raises: TypeError: Not called on a type TypeError: Wrong number of arguments AttributeError: Attribute not found and no default value provided """ if not isinstance(cls, type): raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}") if len(args) > 1: raise TypeError(f"mro_getattr expected at most 3 arguments, got {2 + len(args)}") for klass in cls.mro()[1:]: if hasattr(klass, attr): # return first matching attribute return getattr(klass, attr) if args: # if provided, return args[0], i.e. the a default value return args[0] else: raise AttributeError(f"type object {cls.__name__!r} has not attribute {attr!r}") def mro_hasattr(cls: type, attr: str) -> bool: """Check if an attribute exists in a type's class hierarchy Args: cls (type): The type attr (str): The attribute Returns: bool: True if has the attribute. Raises: TypeError: Not called on a type """ if not isinstance(cls, type): raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}") for klass in cls.mro()[1:]: if hasattr(klass, attr): return True return False def random_string(length: int) -> str: """Make a random string of specified length Args: length (int): The desired random string length Returns: str: The random string """ assert isinstance(length, int), f"'length' must be an int, got: {type(length)}" return "".join(random.choices(ascii_letters, k=length)) def timestamp() -> str: """Make a timestamp with current time Returns: str: The timestamp in ISO format """ return datetime.now().isoformat("_", timespec="seconds").replace(":", "-") def safe_eval( to_eval: str, *, expect: t.Tuple[type] = (list, np.ndarray), timeout: int = 10 ) -> object: """Evaluate a restricted subset of Python (and numpy) from a string Args: to_eval (str): The string to evaluate expect (t.Tuple[type]): The list of expected resulting types. Defaults to list, ndarray. timeout (int): The timeout after which the call fails in seconds. Defaults to 10. The `safe_eval` function allows using a subset of commands, listed in `_globals` and `_locals`, which includes a few numpy functions: linspace, arange, array, rand, and randint. Examples: >>> safe_eval("linspace(1, 10, 10, dtype=int).tolist()") [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> safe_eval("__import__('os').getcwd()") NameError Traceback (most recent call last) ... NameError: name '__import__' is not defined >>> safe_eval("range(5)") TypeError Traceback (most recent call last) ... TypeError: eval produced a <class 'range'>, expected: (<class 'list'>, <class 'numpy.ndarray'>) >>> safe_eval("list(round(rand(), 2) for _ in range(5))") [0.96, 0.41, 0.9, 0.98, 0.02] """ assert isinstance(to_eval, str), "'to_eval' must be a str" assert isinstance(expect, tuple), "'expect' must be a tuple" assert all(isinstance(_, type) for _ in expect), "'expect' must contain only types" _locals = {} _globals = { "__builtins__": {}, "list": list, "range": range, "len": len, "int": int, "float": float, "min": min, "max": max, "round": round, "linspace": np.linspace, "geomspace": np.geomspace, "logspace": np.logspace, "hstack": np.hstack, "vstack": np.vstack, "split": np.split, "arange": np.arange, "array": np.array, "rand": np.random.rand, "randint": np.random.randint, } class AlarmException(Exception): pass def signal_handler(number: int, frame): assert number == signal.SIGALRM.value raise AlarmException() signal.signal(signal.SIGALRM, signal_handler) signal.alarm(timeout) try: _ = eval(to_eval, _globals, _locals) except AlarmException: raise TimeoutError(f"safe_eval took longer than {timeout} seconds") else: signal.signal(signal.SIGALRM, signal.SIG_IGN) signal.alarm(0) if not isinstance(_, expect): raise EvalTypeError(f"eval produced a {type(_)}, expected: {expect}") return _ def sanitise_ansi(value: t.Union[t.List[str], str]) -> t.Union[t.List[str], str]: """Remove all ANSI escape characters from a str or a list of str Args: value (t.Union[t.List[str], str]): The string or list of strings Returns: t.Union[t.List[str], str]: The sanitised string or a list of sanitised strings """ _ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]") if isinstance(value, str): return _ansi_escape.sub("", value) elif isinstance(value, t.List): return list(map(lambda x: _ansi_escape.sub("", x).strip(), value)) else: raise TypeError("sanitise_ansi accepts only str or lists of str") def setgetattr(klass: t.Union[type, object], attr: str, default: t.Any) -> None: """Combines `setattr` and `getattr` to set attributes Args: klass (t.Union[type, object]): The type or object attr (str): The attribute default (t.Any): The default value """ if not any([isinstance(klass, type), isinstance(klass, object)]): raise TypeError("'klass' should be a type or an object", klass) if not isinstance(attr, str): raise TypeError("'attr' should be a str") if not attr: raise ValueError("'attr' should not be empty") setattr(klass, attr, getattr(klass, attr, default)) def setitem(obj: t.MutableMapping, query: t.Tuple, value: t.Any, force: bool = False) -> None: """Set a value in a dict-like object using a tuple-path query Args: obj (t.MutableMapping): a mutable mapping query (t.Tuple): a query path as a tuple value (t.Any): value to set Raises: TypeError: if obj is not a mutable mapping """ if not isinstance(obj, t.MutableMapping): raise TypeError("'obj' needs to be a mutable mapping", type(obj)) _obj = obj _valid = get_valid_access_paths(obj) if query not in _valid: if not force: raise KeyError(f"query-path {query!r} not found") else: for node in query[:-1]: if node not in _obj: _obj = dict() _obj = _obj[node] else: for node in query[:-1]: _obj = _obj[node] _obj[query[-1]] = value def stub_recursively( obj: t.T, stub: t.Any = None, _stub_list_elements: bool = True ) -> t.Optional[t.T]: """Produce a copy with all leaf values recursively set to a 'stub' value Args: obj (t.T): the object to stub stub (t.Any, optional): the value to set the leaf elements to _stub_list_elements (bool, optional): stub individual elements in collections? Returns: (t.T, optional): the stubbed object """ def inner(obj): if isinstance(obj, Map): return type(obj)((k, inner(v)) for k, v in obj.items()) elif _stub_list_elements and isinstance(obj, (t.List, t.Set)): return type(obj)(inner(v) for v in obj) else: return stub return inner(obj) def unpack__all__(*imports: t.Collection[str]) -> t.Tuple[str]: """Upacks a list of lists/tuples into a 1-dimensional list Args: *imports (t.Collection[str]): The collections of strings in "__all__" Returns: t.Tuple[str]: The flattened imports as a tuple of strings. """ from itertools import chain _name = f"{__name__}.unpack__all__" if not all(isinstance(e, (t.List, t.Tuple)) for e in imports): raise TypeError(f"{_name}: arguments should be lists or tuples") _ = chain(*imports) assert all( issubclass(type(e), str) for e in _ ), f"{_name}: values in unpacked containers were not scalar or 'str'" return tuple(_) def validate_helper(what: t.Mapping, key: t.Any, *types: type, msg: str = "") -> t.NoReturn: """Validate types of key in a mapping using key-paths Args: what (t.Mapping): The mapping key (t.Any): The key *types (type): The valid types msg (str): An additional error message. Defaults to "". """ if not isinstance(what, t.Mapping): raise TypeError(f"validate_helper works only on mappings, got {type(what)}") if not types: raise TypeError(f"validate helper expects at least 1 'types' argument") if isinstance(key, str) or not isinstance(key, t.Iterable): key = tuple([key]) elif not isinstance(key, tuple): key = tuple(key) # The `config` property setter guarantees that `config` is a fully # mutated AttributeDict, therefore :meth:`getattr` can be used. if not isinstance(getitem(what, key, None), types): raise MisconfiguredError( "{0}config key: '{1!s}' should be of type {2!r}, got {3!s}".format( f"{msg} " if msg else "", key, types, type(getitem(what, key, None)) ) ) def get_cores_and_schedules(environments_apps_zones: t.Mapping) -> set: e_a_z = environments_apps_zones _cores_and_schedules = set() for env in e_a_z: for app in e_a_z[env]: if app != "src": continue _path_to_cores = ("app_config", "generator", "cores") _path_to_schedule_tag = ("zone_config", "schedule_tag") access_paths = list(get_valid_access_paths(e_a_z[env][app])) if _path_to_cores not in access_paths: raise LayerMisconfigured( f"{env!r}->{app!r} must have a 'generator.cores' config key" ) if _path_to_schedule_tag not in access_paths: _ = e_a_z[env][app]["zone"] raise LayerMisconfigured( f"{env!r}.{_!r} of app {app!r} must have a schedule_tag" ) _cores_and_schedules.add( ( len(getitem(e_a_z[env][app], _path_to_cores)), getitem(e_a_z[env][app], _path_to_schedule_tag), ) ) return _cores_and_schedules
[ "itertools.chain", "signal.signal", "inspect.isabstract", "subprocess.list2cmdline", "re.compile", "datetime.datetime.now", "random.choices", "signal.alarm" ]
[((24516, 24534), 'subprocess.list2cmdline', '_list2cmdline', (['seq'], {}), '(seq)\n', (24529, 24534), True, 'from subprocess import list2cmdline as _list2cmdline\n'), ((29757, 29802), 'signal.signal', 'signal.signal', (['signal.SIGALRM', 'signal_handler'], {}), '(signal.SIGALRM, signal_handler)\n', (29770, 29802), False, 'import signal\n'), ((29807, 29828), 'signal.alarm', 'signal.alarm', (['timeout'], {}), '(timeout)\n', (29819, 29828), False, 'import signal\n'), ((30567, 30616), 're.compile', 're.compile', (['"""(\\\\x9B|\\\\x1B\\\\[)[0-?]*[ -\\\\/]*[@-~]"""'], {}), "('(\\\\x9B|\\\\x1B\\\\[)[0-?]*[ -\\\\/]*[@-~]')\n", (30577, 30616), False, 'import re\n'), ((33760, 33775), 'itertools.chain', 'chain', (['*imports'], {}), '(*imports)\n', (33765, 33775), False, 'from itertools import chain\n'), ((23543, 23560), 'inspect.isabstract', 'isabstract', (['klass'], {}), '(klass)\n', (23553, 23560), False, 'from inspect import isabstract\n'), ((27386, 27425), 'random.choices', 'random.choices', (['ascii_letters'], {'k': 'length'}), '(ascii_letters, k=length)\n', (27400, 27425), False, 'import random\n'), ((30005, 30050), 'signal.signal', 'signal.signal', (['signal.SIGALRM', 'signal.SIG_IGN'], {}), '(signal.SIGALRM, signal.SIG_IGN)\n', (30018, 30050), False, 'import signal\n'), ((30059, 30074), 'signal.alarm', 'signal.alarm', (['(0)'], {}), '(0)\n', (30071, 30074), False, 'import signal\n'), ((12778, 12791), 'inspect.isabstract', 'isabstract', (['k'], {}), '(k)\n', (12788, 12791), False, 'from inspect import isabstract\n'), ((27569, 27583), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (27581, 27583), False, 'from datetime import datetime\n')]
from django.contrib.auth import get_user_model from rest_auth.registration.serializers import ( RegisterSerializer as BaseRegisterSerializer, ) from rest_auth.registration.serializers import ( SocialLoginSerializer as BaseSocialLoginSerializer, ) from rest_auth.serializers import LoginSerializer as BaseLoginSerializer from rest_auth.serializers import ( PasswordResetConfirmSerializer as BasePasswordResetConfirmSerializer, ) from rest_auth.serializers import UserDetailsSerializer as BaseUserDetailsSerializer from rest_framework import serializers from rest_framework.exceptions import ValidationError from core.models import Profile # noinspection PyAbstractClass class LoginSerializer(BaseLoginSerializer): """ Extends the default LoginSerializer in order to return custom error messages """ def validate(self, attrs): try: return super().validate(attrs) except serializers.ValidationError as ex: ex.detail = "The email or password you entered is incorrect!" raise ex # noinspection PyAbstractClass class PasswordResetConfirmSerializer(BasePasswordResetConfirmSerializer): """ Extends the default PasswordResetConfirmSerializer in order to return custom error messages """ def validate(self, attrs): try: return super().validate(attrs) except serializers.ValidationError as ex: if "new_password2" in ex.detail: ex.detail = ex.detail["new_password2"][0] else: ex.detail = "Could not reset password. Reset token expired or invalid." raise ex # noinspection PyAbstractClass class CustomSocialLoginSerializer(BaseSocialLoginSerializer): """ Extends default SocialLoginSerializer to add additional details to some failed login attempts """ def validate(self, attrs): try: res = super().validate(attrs) return res except ValidationError as ex: if "User is already registered with this e-mail address." in ex.detail: ex.detail[0] = ( "User is already registered with this e-mail address. " "Please login using the form above." ) raise ex # noinspection PyAbstractClass class RegisterSerializer(BaseRegisterSerializer): email = serializers.EmailField(required=True) password = serializers.CharField(write_only=True) first_name = serializers.CharField(write_only=True) last_name = serializers.CharField(write_only=True) # legacy compat zip = serializers.CharField(write_only=True, required=False) zipcode = serializers.CharField(write_only=True, required=False) # Overrides the default required password fields password1 = None password2 = None def get_cleaned_data(self): return { "username": self.validated_data.get("email", ""), "email": self.validated_data.get("email", ""), # allauth uses password1 internally for creation "password1": self.validated_data.get("password", ""), "first_name": self.validated_data.get("first_name", ""), "last_name": self.validated_data.get("last_name", ""), "zipcode": self.validated_data.get("zipcode", ""), } def validate(self, data): return data UserModel = get_user_model() class ProfileSerializer(serializers.ModelSerializer): class Meta: model = Profile fields = "__all__" class UserDetailsSerializer(BaseUserDetailsSerializer): profile = ProfileSerializer() class Meta: model = UserModel fields = ("username", "email", "first_name", "last_name", "profile") read_only_fields = ("email",) def to_representation(self, instance: UserModel) -> dict: """Move fields from Profile to user representation.""" representation = super().to_representation(instance) profile = representation.pop("profile") representation["zipcode"] = profile["zipcode"] representation["is_mentor"] = profile["is_mentor"] return representation class UserSerializer(BaseUserDetailsSerializer): profile = ProfileSerializer() class Meta: model = UserModel fields = ("username", "email", "first_name", "last_name", "profile") read_only_fields = ("email",) def to_representation(self, instance: UserModel) -> dict: """Move fields from Profile to user representation.""" representation = super().to_representation(instance) profile = representation.pop("profile") profile.pop("user") for key, val in profile.items(): representation[key] = val return representation
[ "django.contrib.auth.get_user_model", "rest_framework.serializers.EmailField", "rest_framework.serializers.CharField" ]
[((3424, 3440), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (3438, 3440), False, 'from django.contrib.auth import get_user_model\n'), ((2399, 2436), 'rest_framework.serializers.EmailField', 'serializers.EmailField', ([], {'required': '(True)'}), '(required=True)\n', (2421, 2436), False, 'from rest_framework import serializers\n'), ((2452, 2490), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'write_only': '(True)'}), '(write_only=True)\n', (2473, 2490), False, 'from rest_framework import serializers\n'), ((2508, 2546), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'write_only': '(True)'}), '(write_only=True)\n', (2529, 2546), False, 'from rest_framework import serializers\n'), ((2563, 2601), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'write_only': '(True)'}), '(write_only=True)\n', (2584, 2601), False, 'from rest_framework import serializers\n'), ((2632, 2686), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'write_only': '(True)', 'required': '(False)'}), '(write_only=True, required=False)\n', (2653, 2686), False, 'from rest_framework import serializers\n'), ((2701, 2755), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'write_only': '(True)', 'required': '(False)'}), '(write_only=True, required=False)\n', (2722, 2755), False, 'from rest_framework import serializers\n')]
import os import shutil import sys import tarfile def include_package(envoy_api_protos, rst_file_path, prefix): # `envoy_api_rst_files` is a list of file paths for .proto.rst files # generated by protodoc # # we are only interested in the proto files generated for envoy protos, # not for non-envoy dependencies if ("pkg/" + prefix) not in rst_file_path: return None # derive the "canonical" path from the filepath canonical = f"{rst_file_path.split('pkg/' + prefix)[1]}" # we are only interested in the actual v3 protos, not their dependencies if (prefix + canonical) not in envoy_api_protos: return None return canonical def main(): proto_srcs = sys.argv[1] envoy_api_rst_files = sys.argv[1:-1] output_filename = sys.argv[-1] with open(proto_srcs) as f: # the contents of `proto_srcs` are the result of a bazel genquery, # containing bazel target rules, eg: # # @envoy_api//envoy/watchdog/v3:abort_action.proto # # this transforms them to a list with a "canonical" form of: # # envoy/watchdog/v3/abort_action.proto.rst # envoy_api_protos = [ f"{src.split('//')[1].replace(':', '/')}.rst" for src in f.read().split("\n") if src ] for rst_file_path in envoy_api_rst_files: canonical = include_package(envoy_api_protos, rst_file_path, "envoy/") if canonical is None: canonical = include_package(envoy_api_protos, rst_file_path, "contrib/envoy/") if canonical is None: continue target = os.path.join("rst-out/api-v3", canonical) if not os.path.exists(os.path.dirname(target)): os.makedirs(os.path.dirname(target)) shutil.copy(rst_file_path, target) # output the generated rst files to a tarfile for consumption # by other bazel rules with tarfile.open(output_filename, "w") as tar: tar.add("rst-out", arcname=".") if __name__ == "__main__": main()
[ "os.path.dirname", "tarfile.open", "os.path.join", "shutil.copy" ]
[((1634, 1675), 'os.path.join', 'os.path.join', (['"""rst-out/api-v3"""', 'canonical'], {}), "('rst-out/api-v3', canonical)\n", (1646, 1675), False, 'import os\n'), ((1789, 1823), 'shutil.copy', 'shutil.copy', (['rst_file_path', 'target'], {}), '(rst_file_path, target)\n', (1800, 1823), False, 'import shutil\n'), ((1927, 1961), 'tarfile.open', 'tarfile.open', (['output_filename', '"""w"""'], {}), "(output_filename, 'w')\n", (1939, 1961), False, 'import tarfile\n'), ((1706, 1729), 'os.path.dirname', 'os.path.dirname', (['target'], {}), '(target)\n', (1721, 1729), False, 'import os\n'), ((1756, 1779), 'os.path.dirname', 'os.path.dirname', (['target'], {}), '(target)\n', (1771, 1779), False, 'import os\n')]
import os from collections import OrderedDict, defaultdict from conans.model.ref import PackageReference from conans.util.files import save class RowResult(object): def __init__(self, remote, reference, data): self.remote = remote self.reference = reference self._data = data @property def recipe(self): return self.reference @property def package_id(self): return self._data['id'] @property def outdated(self): return self._data['outdated'] def row(self, headers): """ Returns package data according to headers """ assert isinstance(headers, Headers), "Wrong type: {}".format(type(headers)) for it in headers.keys: try: yield getattr(self, it) except AttributeError: yield self._data[it] for it in headers.settings: yield self._data['settings'].get(it, None) for it in headers.options: yield self._data['options'].get(it, None) if headers.requires: prefs = [PackageReference.loads(it) for it in self._data['requires']] yield ', '.join(map(str, [it.ref for it in prefs])) class Headers(object): _preferred_ordering = ['os', 'arch', 'compiler', 'build_type'] def __init__(self, settings, options, requires, keys): # Keys: columns to classify self.keys = keys self.options = options self.requires = requires # - Order settings _settings = defaultdict(list) for it in settings: try: category, _ = it.split('.', 1) except ValueError: _settings[it].append(it) else: _settings[category].append(it) self.settings = [] for it in self._preferred_ordering: if it in _settings: self.settings.extend(sorted(_settings[it])) for it, values in _settings.items(): if it not in self._preferred_ordering: self.settings.extend(sorted(values)) def row(self, n_rows=2): """ Retrieve list of headers as a single list (1-row) or as a list of tuples with settings organized by categories (2-row). Example output: 1-row: ['os', 'arch', 'compiler', 'compiler.version', 'compiler.libcxx', 'build_type'] 2-row: [('os', ['']), ('arch', ['']), ('compiler', ['', 'version', 'libcxx']),] """ headers = list(self.keys) if n_rows == 1: headers.extend(self.settings + self.options) if self.requires: headers.append('requires') return headers elif n_rows == 2: headers = [(it, ['']) for it in headers] settings = self._group_settings(self.settings) headers.extend(settings) headers.append(('options', self.options)) if self.requires: headers.append(('requires', [''])) return headers else: raise NotImplementedError("not yet") @staticmethod def _group_settings(settings): """ From one row to two-rows using '.' as separator """ ret = OrderedDict() for setting in settings: try: category, value = setting.split(".", 1) except ValueError: ret.setdefault(setting, []).append('') else: ret.setdefault(category, []).append(value) return [(key, values) for key, values in ret.items()] class Results(object): def __init__(self, results): self._results = results # Collect data inspecting the packages _settings = set() _options = set() _remotes = set() self.requires = False for it in results: _remotes.add(it['remote']) for p in it['items'][0]['packages']: _settings = _settings.union(list(p['settings'].keys())) _options = _options.union(list(p['options'].keys())) if len(p['requires']): self.requires = True self.settings = list(_settings) self.options = list(_options) self.remotes = list(_remotes) def get_headers(self, keys=('remote', 'reference', 'outdated', 'package_id')): return Headers(self.settings, self.options, self.requires, keys=keys) def packages(self): for it in self._results: remote = it['remote'] reference = it['items'][0]['recipe']['id'] for p in it['items'][0]['packages']: r = RowResult(remote, reference, p) yield r def html_binary_graph(search_info, reference, table_filename, template): # Adapt data to the template (think twice about the format before documenting) search = {'reference': str(reference)} results = Results(search_info) # Render and save template_folder = os.path.dirname(template.filename) content = template.render(search=search, results=results, base_template_path=template_folder) save(table_filename, content)
[ "conans.model.ref.PackageReference.loads", "collections.OrderedDict", "os.path.dirname", "collections.defaultdict", "conans.util.files.save" ]
[((5024, 5058), 'os.path.dirname', 'os.path.dirname', (['template.filename'], {}), '(template.filename)\n', (5039, 5058), False, 'import os\n'), ((5161, 5190), 'conans.util.files.save', 'save', (['table_filename', 'content'], {}), '(table_filename, content)\n', (5165, 5190), False, 'from conans.util.files import save\n'), ((1537, 1554), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1548, 1554), False, 'from collections import OrderedDict, defaultdict\n'), ((3266, 3279), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3277, 3279), False, 'from collections import OrderedDict, defaultdict\n'), ((1087, 1113), 'conans.model.ref.PackageReference.loads', 'PackageReference.loads', (['it'], {}), '(it)\n', (1109, 1113), False, 'from conans.model.ref import PackageReference\n')]
from typing import Dict import numpy as np import tensorflow as tf import verres as V class ConstantSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, learning_rate: float): super().__init__() self.learning_rate = float(learning_rate) def __call__(self, step): return self.learning_rate def get_config(self): return dict(learning_rate=self.learning_rate) class LinearLRSchedule(tf.keras.callbacks.Callback): def __init__(self, cycle_length: int, steps_per_epoch: int, lr_map: Dict[int, float], initial_lr: float = None): super().__init__() self.schedule = None self.pointer = 0 self.cycle_length = None self.make_schedule(cycle_length, steps_per_epoch, lr_map, initial_lr) def make_schedule(self, cycle_length: int, steps_per_epoch: int, lr_map: Dict[int, float], initial_lr: float = None): self.cycle_length = cycle_length schedule = np.empty(self.cycle_length * steps_per_epoch, dtype="float32") if 0 not in lr_map: if initial_lr is None: raise RuntimeError("Either pass the initial learning rate in the lr_map or as a dedicated parameter!") else: lr_map = lr_map.copy() initial_lr = lr_map.pop(0) start_step = 0 current_lr = initial_lr for end_epoch, next_lr in sorted(lr_map.items(), key=lambda it: it[0]): steps = end_epoch * steps_per_epoch - start_step schedule[start_step:start_step+steps] = np.linspace( current_lr, next_lr, num=steps, endpoint=False, dtype="float32") start_step += steps current_lr = next_lr schedule[start_step:] = current_lr self.schedule = schedule def on_batch_end(self, batch, logs=None): self.model.optimizer.lr = self.schedule[self.pointer] self.pointer += 1 self.pointer %= self.cycle_length def on_epoch_end(self, epoch, logs=None): logs["lr"] = self.schedule[self.pointer] def factory(spec: dict) -> tf.optimizers.schedules.LearningRateSchedule: name = spec.pop("name", "default") if name.lower() in {"default", "constant"}: scheduler = ConstantSchedule(float(spec["learning_rate"])) else: scheduler_type = getattr(tf.optimizers.schedules, name, None) if scheduler_type is None: raise KeyError(f"No such scheduler: {name}") scheduler = scheduler_type(**spec) print(f" [Verres.schedule] - Factory built: {name}") return scheduler
[ "numpy.linspace", "numpy.empty" ]
[((1143, 1205), 'numpy.empty', 'np.empty', (['(self.cycle_length * steps_per_epoch)'], {'dtype': '"""float32"""'}), "(self.cycle_length * steps_per_epoch, dtype='float32')\n", (1151, 1205), True, 'import numpy as np\n'), ((1725, 1801), 'numpy.linspace', 'np.linspace', (['current_lr', 'next_lr'], {'num': 'steps', 'endpoint': '(False)', 'dtype': '"""float32"""'}), "(current_lr, next_lr, num=steps, endpoint=False, dtype='float32')\n", (1736, 1801), True, 'import numpy as np\n')]
import json import string from datetime import datetime import deap import numpy as np import hmm from discriminator import Discriminator from ea import EA import random_search DEFAULT_PARAMS = { # Discriminator CNN model "model": "CNNModel3", # Algorithm Parameters "states": 5, "symbols": 5, "epochs": 10, "epoch_size": 500, "batch_size": 200, "seq_len": 20, "pop_size": 25, "gens": 50, "offspring_prop": 1.0, "cx_prob": 0.0, "mut_fn": "uniform", "mut_prob": 1.0, "mut_rate": None, # None - default to 1/N where N is number of genes # Implementation Parameters "_pool_size": 4, "_random_search": True, # Also run an elitist random search over #gens to compare performance } def param_assert(params): assert params["states"] > 0 assert 0 < params["symbols"] <= 26 assert 0.0 <= params["offspring_prop"] <= 1.0 assert 0.0 <= params["cx_prob"] <= 1.0 assert 0.0 <= params["mut_prob"] <= 1.0 assert (params["mut_rate"] is None) or (0.0 <= params["mut_rate"] <= 1.0) def run(param_subset): # Overwrite the default values of the provided parameters params = {**DEFAULT_PARAMS, **param_subset} print(params) param_assert(params) x = params["states"] y = string.ascii_lowercase[: params["symbols"]] s = [1.0] + [0.0] * (x - 1) # Random HMM that will act as the 'true' underlying distribution real_hmm = hmm.random_hmm(x, y, s) # Different random HMM that will be used to benchmark the best solution we find rand_hmm = hmm.random_hmm(x, y, s) d = Discriminator( real_hmm, params["epoch_size"], params["batch_size"], params["seq_len"], model=params["model"], pool_size=params["_pool_size"], ) print("Pre-training discriminator...") accs, losses = d.initial_train(params["epochs"]) acc = accs[-1] loss = losses[-1] print(f"Pre-trained discriminiator accuracy: {acc}, loss: {loss}") g = EA( discriminator=d, pop_size=params["pop_size"], states=x, symbols=len(y), offpr=params["offspring_prop"], cxpb=params["cx_prob"], mut_fn=params["mut_fn"], mutpb=params["mut_prob"], mut_rate=params["mut_rate"], ) print("Running generator...") final_pop, _, logbook = g.run(params["gens"]) best_ind = deap.tools.selBest(final_pop, 1)[0] best_hmm = hmm.HMM(x, np.array(list(y)), best_ind[0], best_ind[1], np.array(s)) if params["_random_search"]: print("Running random search benchmark...") rs_best_hmm, rs_best_acc = random_search.run( d, params["states"], params["symbols"], params["gens"] ) else: rs_best_hmm, rs_best_acc = None, None return real_hmm, best_hmm, rand_hmm, rs_best_hmm, logbook def experiment(params, runs): all_params = {**DEFAULT_PARAMS, **params} do_rand_search = all_params["_random_search"] mean_fitnesses = [] best_l2s = [] rand_l2s = [] if do_rand_search: rs_l2s = [] for i in range(runs): print(f"Run {i+1}") real_hmm, best_hmm, rand_hmm, rs_best_hmm, logbook = run(params) best_l2 = hmm.total_l2_diff(real_hmm, best_hmm) rand_l2 = hmm.total_l2_diff(real_hmm, rand_hmm) if do_rand_search: rs_l2 = hmm.total_l2_diff(real_hmm, rs_best_hmm) mean_fitnesses.append(logbook.select("mean")) best_l2s.append(best_l2) rand_l2s.append(rand_l2) extra_msg = "" if do_rand_search: rs_l2s.append(rs_l2) extra_msg = f", RandSearch L2: {rs_l2}" print(f"Best L2: {best_l2}, Rand L2: {rand_l2}{extra_msg}") exp_data = { "params": all_params, "mean_fitnesses": mean_fitnesses, "best_l2s": best_l2s, "rand_l2s": rand_l2s, } if do_rand_search: exp_data["rs_l2s"] = rs_l2s exp_file = f'experiments/exp_{datetime.now().strftime("%y%m%d-%H%M%S%f")}.json' with open(exp_file, "w") as f: json.dump(exp_data, f, indent=4) return exp_data def main(): real_hmm, best_hmm, best_l2 = run(DEFAULT_PARAMS) print( f""" Real HMM: {real_hmm} Best HMM: {best_hmm} Best L2: {best_l2} """ ) if __name__ == "__main__": main()
[ "discriminator.Discriminator", "random_search.run", "hmm.total_l2_diff", "numpy.array", "datetime.datetime.now", "deap.tools.selBest", "hmm.random_hmm", "json.dump" ]
[((1444, 1467), 'hmm.random_hmm', 'hmm.random_hmm', (['x', 'y', 's'], {}), '(x, y, s)\n', (1458, 1467), False, 'import hmm\n'), ((1567, 1590), 'hmm.random_hmm', 'hmm.random_hmm', (['x', 'y', 's'], {}), '(x, y, s)\n', (1581, 1590), False, 'import hmm\n'), ((1600, 1746), 'discriminator.Discriminator', 'Discriminator', (['real_hmm', "params['epoch_size']", "params['batch_size']", "params['seq_len']"], {'model': "params['model']", 'pool_size': "params['_pool_size']"}), "(real_hmm, params['epoch_size'], params['batch_size'], params[\n 'seq_len'], model=params['model'], pool_size=params['_pool_size'])\n", (1613, 1746), False, 'from discriminator import Discriminator\n'), ((2406, 2438), 'deap.tools.selBest', 'deap.tools.selBest', (['final_pop', '(1)'], {}), '(final_pop, 1)\n', (2424, 2438), False, 'import deap\n'), ((2513, 2524), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (2521, 2524), True, 'import numpy as np\n'), ((2647, 2720), 'random_search.run', 'random_search.run', (['d', "params['states']", "params['symbols']", "params['gens']"], {}), "(d, params['states'], params['symbols'], params['gens'])\n", (2664, 2720), False, 'import random_search\n'), ((3241, 3278), 'hmm.total_l2_diff', 'hmm.total_l2_diff', (['real_hmm', 'best_hmm'], {}), '(real_hmm, best_hmm)\n', (3258, 3278), False, 'import hmm\n'), ((3297, 3334), 'hmm.total_l2_diff', 'hmm.total_l2_diff', (['real_hmm', 'rand_hmm'], {}), '(real_hmm, rand_hmm)\n', (3314, 3334), False, 'import hmm\n'), ((4091, 4123), 'json.dump', 'json.dump', (['exp_data', 'f'], {'indent': '(4)'}), '(exp_data, f, indent=4)\n', (4100, 4123), False, 'import json\n'), ((3382, 3422), 'hmm.total_l2_diff', 'hmm.total_l2_diff', (['real_hmm', 'rs_best_hmm'], {}), '(real_hmm, rs_best_hmm)\n', (3399, 3422), False, 'import hmm\n'), ((3998, 4012), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4010, 4012), False, 'from datetime import datetime\n')]
#!/usr/bin/env python3 ''' This code traverses a directories of evaluation log files and record evaluation scores as well as plotting the results. ''' import os import argparse import json import copy from shutil import copyfile import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from utils import * MAX_ALIGN_STEPS = 75000 - 1 # This depends on the evaluation code used to generate the logs def generate_csv(log_dir, csv_file): ''' Traverse and read log files, and then output csv file from the eval data. - file to be generated: 'eval_scores.csv' - columns: state_machine_id, timesteps, rot_error ''' df = pd.DataFrame(columns=['state_machine_id', 'state_machine_name', 'timesteps', 'rot_error']) model_names = extract_model_names(log_dir) # Traverse all episodes and add each entry to data frame for state_machine_id, episode_idx, episode_dir in traverse_all_episodes(log_dir): json_util = JsonUtil(os.path.join(episode_dir, 'goal.json')) entry = { 'state_machine_id': state_machine_id, 'state_machine_name': model_names[state_machine_id], **json_util.load() } # Handling the timesteps==-1 case if entry['reachfinish'] == -1: entry['reachfinish'] = MAX_ALIGN_STEPS if entry['reachstart'] == -1: raise ValueError('\'reachstart\' in {episode_dir}/goal.json does not contain a valid value.') # Rename dict keys entry['timesteps'] = entry.pop('reachfinish') - entry.pop('reachstart') entry['rot_error'] = entry.pop('align_obj_error') entry['init_rot_error'] = entry.pop('init_align_obj_error', None) # Add a new entry entry['rot_error_diff'] = entry['init_rot_error'] - entry['rot_error'] df = df.append(entry, ignore_index=True) # df.append works differently from python since it is stupid df.to_csv(csv_file, index=False) def generate_plot(input_csv_file, plot_file): data = pd.read_csv(input_csv_file) sns.scatterplot(data=data, x="timesteps", y="rot_error", hue="state_machine_name", alpha=0.8) plt.savefig(plot_file)
[ "matplotlib.pyplot.savefig", "pandas.read_csv", "os.path.join", "seaborn.scatterplot", "pandas.DataFrame" ]
[((659, 753), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['state_machine_id', 'state_machine_name', 'timesteps', 'rot_error']"}), "(columns=['state_machine_id', 'state_machine_name', 'timesteps',\n 'rot_error'])\n", (671, 753), True, 'import pandas as pd\n'), ((2020, 2047), 'pandas.read_csv', 'pd.read_csv', (['input_csv_file'], {}), '(input_csv_file)\n', (2031, 2047), True, 'import pandas as pd\n'), ((2052, 2150), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'data', 'x': '"""timesteps"""', 'y': '"""rot_error"""', 'hue': '"""state_machine_name"""', 'alpha': '(0.8)'}), "(data=data, x='timesteps', y='rot_error', hue=\n 'state_machine_name', alpha=0.8)\n", (2067, 2150), True, 'import seaborn as sns\n'), ((2150, 2172), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_file'], {}), '(plot_file)\n', (2161, 2172), True, 'import matplotlib.pyplot as plt\n'), ((974, 1012), 'os.path.join', 'os.path.join', (['episode_dir', '"""goal.json"""'], {}), "(episode_dir, 'goal.json')\n", (986, 1012), False, 'import os\n')]
from os import path from typing import Union from datetime import datetime from flask import Flask, request, redirect, render_template from flask_wtf import CSRFProtect from werkzeug.utils import secure_filename from data import db_session from data.posts import Posts from forms.edit_post_form import EditPostForm app = Flask(__name__) app.config['SECRET_KEY'] = 'SECRET_KEY' csrf_protect = CSRFProtect(app) UPLOAD_FOLDER = 'static/posts_img/' app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER DATA_BASE = 'db/blog.sqlite' app.config['DATA_BASE'] = DATA_BASE def edit_post_in_data_base(form: EditPostForm, post: Union[Posts, None]): db_sess = db_session.create_session() post_title = form.title.data post_text = form.text.data post_author = form.author.data post_image = form.image.data # --- Фотография --- if not post_image: post_image_name = '' # Картинки нет else: current_id = db_sess.query(Posts).order_by(Posts.id.desc()).first() current_id = current_id.id + 1 if current_id else 1 real_image_name = secure_filename(post_image.filename) post_image_name = f'{current_id}{real_image_name[real_image_name.rfind("."):]}' post_image.save(path.join(app.config['UPLOAD_FOLDER'], post_image_name)) # --- Фотография --- if not post: # Добавление поста post = Posts() post.title = post_title post.image_name = post_image_name post.text = post_text post.author = post_author post.date = datetime.now() db_sess.add(post) else: # редактирование post.title = post_title post.image_name = post_image_name post.text = post_text post.author = post_author post.date = datetime.now() db_sess.merge(post) db_sess.commit() db_sess.close() return redirect('/') @app.route('/') def index(): params = {'title': 'Blog', 'UPLOAD_FOLDER': app.config['UPLOAD_FOLDER']} db_sess = db_session.create_session() posts = db_sess.query(Posts).order_by(Posts.id.desc()).all() view = render_template('blog.html', **params, posts=posts) db_sess.close() return view @app.route('/add_post', methods=['GET', 'POST']) def add_post(): params = {'title': 'Добавление поста', 'action_type': 'Добавление поста', 'submit_text': 'Добавить'} form = EditPostForm() params['form'] = form if form.validate_on_submit(): return edit_post_in_data_base(form, None) return render_template('edit_post.html', **params) @app.route('/edit_post/<int:post_id>', methods=['GET', 'POST']) def edit_post(post_id: int): params = {'title': 'Редактирование поста', 'action_type': 'Редактирование поста', 'submit_text': 'Редактировать'} form = EditPostForm() params['form'] = form db_sess = db_session.create_session() post: Posts = db_sess.query(Posts).filter(Posts.id == post_id).first() db_sess.close() if not post: return redirect('/') if request.method == 'GET': form.title.data = post.title form.text.data = post.text form.author.data = post.author elif form.validate_on_submit(): return edit_post_in_data_base(form, post) return render_template('edit_post.html', **params) @app.route('/delete_post/<int:post_id>') def delete_post(post_id: int): db_sess = db_session.create_session() post = db_sess.query(Posts).filter(Posts.id == post_id).first() if post: db_sess.delete(post) db_sess.commit() db_sess.close() return redirect('/') def main(): db_session.global_init(app.config['DATA_BASE']) app.run('127.0.0.1', 8080) if __name__ == '__main__': main()
[ "flask.render_template", "flask.Flask", "forms.edit_post_form.EditPostForm", "data.posts.Posts", "os.path.join", "flask.redirect", "data.db_session.create_session", "datetime.datetime.now", "data.posts.Posts.id.desc", "werkzeug.utils.secure_filename", "flask_wtf.CSRFProtect", "data.db_session.global_init" ]
[((327, 342), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'from flask import Flask, request, redirect, render_template\n'), ((399, 415), 'flask_wtf.CSRFProtect', 'CSRFProtect', (['app'], {}), '(app)\n', (410, 415), False, 'from flask_wtf import CSRFProtect\n'), ((653, 680), 'data.db_session.create_session', 'db_session.create_session', ([], {}), '()\n', (678, 680), False, 'from data import db_session\n'), ((1857, 1870), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (1865, 1870), False, 'from flask import Flask, request, redirect, render_template\n'), ((1994, 2021), 'data.db_session.create_session', 'db_session.create_session', ([], {}), '()\n', (2019, 2021), False, 'from data import db_session\n'), ((2099, 2150), 'flask.render_template', 'render_template', (['"""blog.html"""'], {'posts': 'posts'}), "('blog.html', **params, posts=posts)\n", (2114, 2150), False, 'from flask import Flask, request, redirect, render_template\n'), ((2373, 2387), 'forms.edit_post_form.EditPostForm', 'EditPostForm', ([], {}), '()\n', (2385, 2387), False, 'from forms.edit_post_form import EditPostForm\n'), ((2511, 2554), 'flask.render_template', 'render_template', (['"""edit_post.html"""'], {}), "('edit_post.html', **params)\n", (2526, 2554), False, 'from flask import Flask, request, redirect, render_template\n'), ((2780, 2794), 'forms.edit_post_form.EditPostForm', 'EditPostForm', ([], {}), '()\n', (2792, 2794), False, 'from forms.edit_post_form import EditPostForm\n'), ((2836, 2863), 'data.db_session.create_session', 'db_session.create_session', ([], {}), '()\n', (2861, 2863), False, 'from data import db_session\n'), ((3248, 3291), 'flask.render_template', 'render_template', (['"""edit_post.html"""'], {}), "('edit_post.html', **params)\n", (3263, 3291), False, 'from flask import Flask, request, redirect, render_template\n'), ((3380, 3407), 'data.db_session.create_session', 'db_session.create_session', ([], {}), '()\n', (3405, 3407), False, 'from data import db_session\n'), ((3577, 3590), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (3585, 3590), False, 'from flask import Flask, request, redirect, render_template\n'), ((3609, 3656), 'data.db_session.global_init', 'db_session.global_init', (["app.config['DATA_BASE']"], {}), "(app.config['DATA_BASE'])\n", (3631, 3656), False, 'from data import db_session\n'), ((1081, 1117), 'werkzeug.utils.secure_filename', 'secure_filename', (['post_image.filename'], {}), '(post_image.filename)\n', (1096, 1117), False, 'from werkzeug.utils import secure_filename\n'), ((1365, 1372), 'data.posts.Posts', 'Posts', ([], {}), '()\n', (1370, 1372), False, 'from data.posts import Posts\n'), ((1532, 1546), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1544, 1546), False, 'from datetime import datetime\n'), ((1760, 1774), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1772, 1774), False, 'from datetime import datetime\n'), ((2992, 3005), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (3000, 3005), False, 'from flask import Flask, request, redirect, render_template\n'), ((1230, 1285), 'os.path.join', 'path.join', (["app.config['UPLOAD_FOLDER']", 'post_image_name'], {}), "(app.config['UPLOAD_FOLDER'], post_image_name)\n", (1239, 1285), False, 'from os import path\n'), ((2065, 2080), 'data.posts.Posts.id.desc', 'Posts.id.desc', ([], {}), '()\n', (2078, 2080), False, 'from data.posts import Posts\n'), ((969, 984), 'data.posts.Posts.id.desc', 'Posts.id.desc', ([], {}), '()\n', (982, 984), False, 'from data.posts import Posts\n')]
# ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Neon backend wrapper for the NervanaGPU library. Most functions are thin wrappers around functions from the NervanaGPU class, the GPUTensor is taken directly from NervanaGPU as well. NervanaGPU is available at `<https://github.com/NervanaSystems/nervanagpu>` """ import logging from neon.backends.backend import Backend from nervanagpu import NervanaGPU from neon.diagnostics.timing_decorators import FlopsDecorator import pycuda.driver as drv import numpy as np logger = logging.getLogger(__name__) class GPU(Backend): """ Sets up a NervanaGPU based backend for matrix operations. Note that some functions defined in the generic Backend class such as cross-map pooling and normalization and are not implemented for this backend. """ default_dtype = np.float32 def __init__(self, rng_seed, stochastic_round=False, device_id=0): import pycuda.driver as drv drv.init() global ctx ctx = drv.Device(device_id).make_context() import atexit atexit.register(ctx.pop) self.ng = NervanaGPU(stochastic_round=stochastic_round) logger.info("Initialized NervanaGPU with stochastic_round=%s", stochastic_round) self.rng_seed = rng_seed self.rng_init() self.device_id = device_id if device_id is not None else 0 def __getstate__(self): """ Defines what and how we go about serializing an instance of this class. Returns: self.__dict__: The full contents of the backend class instance, except for the mem_pool which is on device and cannot be serialized. """ if hasattr(self, 'mem_pool') and self.mem_pool is not None: self.mem_pool_pickle = {'shape': self.mem_pool.shape, 'dtype': np.float32} self.mem_pool = None return self.__dict__ def __setstate__(self, state): """ Defines how we go about deserializing into an instance of this class. Arguments: self.__dict__: The full contents of the backend class instance, except for the mem_pool which is on device and cannot be serialized. """ self.__dict__.update(state) self.mem_pool = self.ng.empty(self.mem_pool_pickle['shape'], dtype=self.mem_pool_pickle['dtype']) def init_mempool(self, shape, dtype=default_dtype): """ Allocates a memory pool for temporary storage """ self.mem_pool = self.ng.empty(shape, dtype=dtype) def alloc_host_mem(self, shape, dtype=default_dtype): return drv.pagelocked_empty(shape, dtype, order="C", mem_flags=0) def create_stream(self): return drv.Stream() def synchronize(self): pass def async_copy(self, dest, src, stream=None): drv.memcpy_htod_async(dest.gpudata, src, stream) def rng_init(self): """ Initialize and seed the pseudo random number genrator. Random numbers are generated on the host using numpy, then transfered to device. """ seed = None if 'rng_seed' in self.__dict__: seed = self.rng_seed logger.info("Seeding random number generator with: %s", str(seed)) np.random.seed(seed) def flop_timing_init(self, decorate_fc, decorate_conv, decorate_ew): """ Initialize FLOP timing. Wraps the specified MOP calls via a decorator to record elapsed time and number of operations. Arguments: decorate_fc (list): string giving the function names of fully connected layer forward/backward/update calls to time. decorate_conv (list): string giving the function names of convolutional layer forward/backward/update calls to time. decorate_ew (list): string giving the function names of element-wise calls to time. Notes: Must be called prior to first flop_timing_start call """ self.start = drv.Event() self.end = drv.Event() self.flop_timer = FlopsDecorator(self) self.flop_timer.decorate(decorate_fc=decorate_fc, decorate_conv=decorate_conv, decorate_ew=decorate_ew) def flop_timinig_start(self): """ Start a new FLOP timer. Returns: None: dummy value (not used) """ return self.start.record() def flop_timing_finish(self, start_time): """ Complete current FLOP timing. Arguments: start_time (unused): ignored. Returns: float: elapsed time in seconds since prior flop_timing_start call. """ self.end.record() self.end.synchronize() return self.end.time_since(self.start) def uniform(self, low=0.0, high=1.0, size=1, dtype=default_dtype, persist_values=True, name=None): """ generate numpy random number and convert to a GPUTensor. If called with dype=None it will probably explode """ ary = np.random.uniform(low, high, size) return self.ng.array(ary, dtype=dtype, name=name) def normal(self, loc=0.0, scale=1.0, size=1, dtype=default_dtype, persist_values=True, name=None): """ Gaussian/Normal random number sample generation """ ary = np.random.normal(loc, scale, size) return self.ng.array(ary, dtype=dtype, name=name) def fprop_fc(self, out, inputs, weights, layer=None): """ Forward propagate the inputs of a fully connected network layer to produce output pre-activations (ready for transformation by an activation function). Arguments: out (GPUTensor): Where to store the forward propagated results. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. weights (GPUTensor): The weight coefficient values for this layer. layer (Layer): The layer object. """ self.ng.dot(weights, inputs, out) def bprop_fc(self, out, weights, deltas, layer=None): """ Backward propagate the error through a fully connected network layer. Arguments: out (GPUTensor): Where to store the backward propagated errors. weights (GPUTensor): The weight coefficient values for this layer. deltas (GPUTensor): The error values for this layer layer (Layer): The layer object. """ self.ng.dot(weights.T, deltas, out) def update_fc(self, out, inputs, deltas, layer=None): """ Compute the updated gradient for a fully connected network layer. Arguments: out (GPUTensor): Where to store the updated gradient value. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. deltas (GPUTensor): The error values for this layer layer (Layer): The layer object. """ self.ng.dot(deltas, inputs.T, out) def update_fc_bias(self, err, out): """ Compute the updated bias gradient for a fully connected network layer. Arguments: out (GPUTensor): Where to store the updated gradient value. err (GPUTensor): backpropagated error """ self.ng.sum(err, axis=1, out=out) def add_fc_bias(self, inputs, bias): """ Add the bias for a fully connected network layer. Arguments: inputs (GPUTensor): the input to update. bias (GPUTensor): the amount to increment """ self.ng.add(inputs, bias, out=inputs) def fprop_conv(self, out, inputs, weights, ofmshape, ofmsize, ofmlocs, ifmshape, links, nifm, padding, stride, ngroups, fpropbuf, local=False): """ Forward propagate the inputs of a convolutional network layer to produce output pre-activations (ready for transformation by an activation function). Arguments: out (GPUTensor): Where to store the forward propagated results. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. weights (GPUTensor): The weight coefficient values for this layer. ofmshape (tuple): Dimensions of each output feature map (typically number of height and width neurons). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. ifmshape (tuple): Dimensions of each input feature map (typically number of height and width neurons). For this backend we expect these values to be square. links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the convolution operation. stride (int): Number of neurons to shift the filter at each step. ngroups (int): Number of groups. fpropbuf (GPUTensor): Temporary storage buffer used to hold the convolved outputs for a single receptive field. Not used for this backend. local (bool, optional): Whether to do local filtering (True) or convolution (False, the default) """ ''' N: Number of images in mini-batch C: Number of input feature maps K: Number of output feature maps D: Depth of input image H: Height of input image W: Width of input image T: Depth of filter kernel R: Height of filter kernel S: Width of filter kernel ''' self.ng.fprop_conv(layer=fpropbuf, I=inputs, F=weights, O=out, alpha=1.0, repeat=1) def bprop_conv(self, out, weights, deltas, ofmshape, ofmsize, ofmlocs, ifmshape, links, padding, stride, nifm, ngroups, bpropbuf, local=False): """ Backward propagate the error through a convolutional network layer. Arguments: out (GPUTensor): Where to store the backward propagated errors. weights (GPUTensor): The weight coefficient values for this layer. deltas (GPUTensor): The error values for this layer ofmshape (tuple): Dimensions of each output feature map (typically height and width). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. ifmshape (tuple): Dimensions of each input feature map (typically height and width). links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the convolution operation. stride (int): Number of neurons to shift the filter at each step. ngroups (int): Number of groups. bpropbuf (GPUTensor): Temporary storage buffer used to hold the backpropagated error for a single receptive field local (bool, optional): Whether to do local filtering (True) or convolution (False, the default) """ self.ng.bprop_conv(layer=bpropbuf, F=weights, E=deltas, grad_I=out, alpha=1.0, repeat=1) def update_conv(self, out, inputs, weights, deltas, ofmshape, ofmsize, ofmlocs, ifmshape, links, nifm, padding, stride, ngroups, fwidth, updatebuf, local=False, layer=None): """ Compute the updated gradient for a convolutional network layer. Arguments: out (GPUTensor): Where to store the updated gradient value. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. weights (GPUTensor): The weight coefficient values for this layer. deltas (GPUTensor): The error values for this layer ofmshape (tuple): Dimensions of each output feature map (typically height and width). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. ifmshape (tuple): Dimensions of each input feature map (typically height and width). links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the convolution operation. stride (int): Number of neurons to shift the filter at each step. ngroups (int): Number of groups. fwidth (int): Filter width. updatebuf (GPUTensor): Temporary storage buffer used to hold the updated gradient for a single receptive field local (bool, optional): Whether to do local filtering (True) or convolution (False, the default) layer (Layer): The layer object. """ self.ng.update_conv(layer=updatebuf, I=inputs, E=deltas, grad_F=out, alpha=1.0, repeat=1) def fprop_pool(self, out, inputs, op, ofmshape, ofmsize, ofmlocs, fshape, ifmshape, links, nifm, padding, stride, fpropbuf): """ Forward propagate the inputs of a Pooling network layer to produce output pre-activations (ready for transformation by an activation function). Arguments: out (GPUTensor): Where to store the forward propagated results. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. op (string): The type of pooling operation to apply. We support "max", "avg", "l2" currently. ofmshape (tuple): Dimensions of each output feature map (typically number of height and width neurons). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. fshape (tuple): Dimensions of each filter (typically height and width). ifmshape (tuple): Dimensions of each input feature map (typically number of height and width neurons). links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the pooling operation. stride (int): Number of neurons to shift the filter at each step. fpropbuf (GPUTensor): Temporary storage buffer used to hold the pooled outputs for a single receptive field. """ op = op.lower() if op == "max": self.ng.fprop_pool(layer=fpropbuf, I=inputs, O=out, repeat=1) else: raise AttributeError("unexpected pooling op type: %s", op) def bprop_pool(self, out, fouts, inputs, deltas, op, ofmshape, ofmsize, ofmlocs, fshape, fpsize, ifmshape, links, nifm, padding, stride, bpropbuf): """ Backward propagate the error through a pooling network layer. Arguments: out (GPUTensor): Where to store the backward propagated errors. fouts (GPUTensor): Forward propagated outputs from the previous layer. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. deltas (GPUTensor): The error values for this layer op (string): The type of pooling operation to apply. We support "max", "avg", "l2" currently. ofmshape (tuple): Dimensions of each output feature map (typically height and width). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. fshape (tuple): Dimensions of each filter (typically height and width). fpsize (int): The size of each filter. ifmshape (tuple): Dimensions of each input feature map (typically height and width). links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the pooling operation. stride (int): Number of neurons to shift the filter at each step. bpropbuf (GPUTensor): Temporary storage buffer used to hold the backpropagated error for a single receptive field """ op = op.lower() if op == "max": self.ng.bprop_pool(layer=bpropbuf, I=inputs, E=deltas, grad_I=out, repeat=1) else: raise AttributeError("unexpected pooling op type: %s", op) def logistic(self, x, out): """ Logistic sigmoid nonlinearity, 1/(1+exp(-x)) Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor """ self.ng.sig(x, out=out) return out def transpose(self, untransposed, transposed): transposed[:] = untransposed.T def crossent(self, y, t, partial, out, epsilon, doscale, ismulti=False): """ Computes cross entropy cost. Arguments: y (GPUTensor): Model outputs t (GPUTensor): Targets partial (GPUTensor): temporary buffer used for 2D reduction out (GPUTensor): Storage for the cross entropy output epsilon (float): constant for numerical stability doscale (boolean): If True, cross_entropy is scaled by batch size ismulti (boolean): If True, compute multi class cross_entropy """ sumbuf = partial.reshape((partial.size, 1))[:partial.shape[0]] if ismulti: self.ng.sum(-t * self.ng.log(y + epsilon), axis=None, partial=sumbuf, out=out) else: self.ng.sum((t - 1) * self.ng.log(1 - y + epsilon) - t * self.ng.log(y + epsilon), axis=None, partial=sumbuf, out=out) if doscale: out[:] = out / y.shape[1] return out def logistic_compound(self, inputs, outputs): """ Applies logistic function and its derivative to the dataset passed. Arguments: inputs (GPUTensor): Input data to be transformed. This also acts as storage for the output of the derivative function. outputs (GPUTensor): Storage for the transformed output. """ # Apply the logistic function. outputs[:] = self.ng.sig(inputs) inputs[:] = (1.0 - outputs) * inputs def rectlin(self, x, out): """ Rectified Linear nonlinearity Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor """ self.ng.maximum(x, 0., out=out) return out def rectlin_derivative(self, x, out): """ Rectified linear nonlinearity derivative Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor """ self.ng.greater(x, 0, out=out) return out def rectleaky(self, x, slope, out): """ Leaky rectified linear nonlinearity Arguments: x (GPUTensor): Input tensor slope (float): amount of gradient to apply when unit is not active out (GPUTensor): Output tensor """ out[:] = self.ng.maximum(x, x*slope) def rectleaky_derivative(self, x, slope, out): """ Leaky rectified linear nonlinearity derivative Arguments: x (GPUTensor): Input tensor slope (float): amount of gradient to apply when unit is not active out (GPUTensor): Output tensor """ out[:] = self.ng.greater(x, 0) * (1.0 - slope) + slope def sum(self, tsr, axes, out): """ Sum Arguments: tsr (GPUTensor): Input tensor axes (int): Axis along which the reduction is performed. If axes is None, the tensor is flattened and reduced over both dimensions. out (GPUTensor): Output tensor """ if axes is None: sze = tsr.shape[0]*tsr.shape[1] self.ng.sum(tsr.reshape(sze, 1), axis=0, out=out) else: self.ng.sum(tsr, axis=axes, out=out) return out def norm(self, tsr, order=None, axis=None, out=None): """ Calculates and returns the vector p-norms of the GPUTensor along the specified axis. The p-norm is defined on a vector A as :math:`||A||_p = \sum_i(|A_i|^p)^{1/p}`. Arguments: tsr (GPUTensor): the GPUTensor on which to find the norms order (int): The order or p upon which the norm is calculated. Valid values include: None, inf, -inf, 0, 1, -1, 2, -2, ... axis (int): The axis along which to compute vector norms. out (GPUTensor): where to write the results to. Must be of the expected result shape. Returns: GPUTensor: p-norm of tsr along the specified axis. Raises: IndexError if invalid axis specified AttributeError if invalid order specified See Also: `numpy.linalg.norm` """ if not isinstance(axis, int) or axis < 0 or axis >= len(tsr.shape): raise IndexError("invalid axis value: %s", axis) if not isinstance(order, (int, float)): raise AttributeError("invalid order value: %s", order) if out is None: raise AttributeError("No output tensor speficied", order) if order == float('Inf'): self.ng.max(self.fabs(tsr), axis, out) elif order == float('-Inf'): self.ng.min(self.fabs(tsr), axis, out) elif order == 0: tmp = self.zeros(tsr.shape) self.ng.not_equal(tsr, tmp, tmp) self.ng.sum(tmp, axis, out) else: tmp = self.empty(tsr.shape) self.ng.power(self.fabs(tsr), order, tmp) self.ng.sum(tmp, axis, out) self.ng.power(out, (1.0 / order), out) return out def mean(self, tsr, axes, out): """ Calculates the arithmetic mean of the elements along the specified axes. Arguments: tsr (GPUTensor): Input tensor axes (int): Axis along which the reduction is performed. If axes is None, the tensor is flattened and reduced over both dimensions. out (GPUTensor): Output tensor """ if axes is None: sze = tsr.shape[0]*tsr.shape[1] self.ng.mean(tsr.reshape(sze, 1), axis=0, out=out) else: self.ng.mean(tsr, axis=axes, out=out) return out def min(self, tsr, axes, out): """ Calculates the minimum of the elements along the specified axes. Arguments: tsr (GPUTensor): Input tensor axes (int): Axis along which the reduction is performed. If axes is None, the tensor is flattened and reduced over both dimensions. out (GPUTensor): Output tensor """ if axes is None: sze = tsr.shape[0]*tsr.shape[1] self.ng.min(tsr.reshape(sze, 1), axis=0, out=out) else: self.ng.min(tsr, axis=axes, out=out) return out def max(self, tsr, axes, out): """ Calculates the maximum of the elements along the specified axes. Arguments: tsr (GPUTensor): Input tensor axes (int): Axis along which the reduction is performed. If axes is None, the tensor is flattened and reduced over both dimensions. out (GPUTensor): Output tensor """ if axes is None: sze = tsr.shape[0]*tsr.shape[1] self.ng.max(tsr.reshape(sze, 1), axis=0, out=out) else: self.ng.max(tsr, axis=axes, out=out) return out def variance(self, tsr, axes, out, mean=None): """ Calculates the variance of the elements along the specified axes. Arguments: tsr (GPUTensor): the tensor on which to compute the variance axes (int, list, optional): the dimension(s) along which to variance. If set to None, we will variance over all dimensions. out (GPUTensor): where the result will be stored. mean (GPUTensor): the tensor containing mean of tsr Returns: GPUTensor: reference to out """ if mean is None: logger.error("GPUTensor requires mean to be specified.") raise ValueError("mean not specified") self.ng.mean(self.ng.square(tsr-mean), axis=axes, out=out) return out def fabs(self, x, out): """ Calculates absolute value of the elements in a tensor Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor Returns: GPUTensor: reference to out """ self.ng.fabs(x, out=out) return out def sqrt(self, x, out): """ Calculates square root of the elements in a tensor Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor Returns: GPUTensor: reference to out """ self.ng.sqrt(x, out=out) return out def zeros(self, shape, dtype=default_dtype, persist_values=True): """ Allocate a new GPUTensor and fill it with zeros. Arguments: shape (tupel): Shape of the desired GPUTensor dtype (dtype): Optional datatype persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: GPUTensor: output """ return self.ng.zeros(shape, dtype=dtype) def ones(self, shape, dtype=default_dtype, persist_values=True): """ Allocate a new GPUTensor and fill it with ones. Arguments: shape (tupel): Shape of the desired GPUTensor dtype (dtype): Optional datatype persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: GPUTensor: output """ return self.ng.ones(shape, dtype=dtype) def zeros_like(self, ary, dtype=default_dtype, persist_values=True, name=None): """ Instantiate a new instance of this backend's Tensor class, with the shape taken from ary and populating each element with a value of 0. Arguments: ary (tensor object): Tensor to inherit the dimensions of. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.backend.Backend.empty`, :py:func:`~neon.backends.backend.Backend.ones`, :py:func:`~neon.backends.backend.Backend.array` """ return self.zeros(ary.shape, dtype=dtype, persist_values=persist_values) def empty_like(self, ary, dtype=default_dtype, persist_values=True, name=None): """ Instantiate a new instance of this backend's Tensor class, with the shape taken from ary. Arguments: ary (tensor object): Tensor to inherit the dimensions of. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.backend.Backend.empty`, :py:func:`~neon.backends.backend.Backend.ones`, :py:func:`~neon.backends.backend.Backend.array` """ return self.empty(ary.shape, dtype=dtype, persist_values=persist_values, name=name) def empty(self, shape, dtype=default_dtype, persist_values=True, name=None): """ Allocate a new GPUTensor. Arguments: shape (tupel): Shape of the desired GPUTensor dtype (dtype): Optional datatype persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: GPUTensor: output """ return self.ng.empty(shape, dtype=dtype) def copy(self, ary): """ returns a copy of ary """ res = self.empty_like(ary) res.copy(ary) return res def array(self, ary, dtype=default_dtype, persist_values=True, name=None, allocator=drv.mem_alloc): """ Allocate a new GPUTensor and fill it with supplied numpy array. Arguments: ary (ndarray): Numpy array with source data dtype (dtype, optional): Optional datatype persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls name (string): Name for the GPUTensor allocator (pycuda): Pycuda memory allocator Returns: GPUTensor: output """ return self.ng.array(ary, dtype=dtype, name=name) def add(self, left, right, out): """ Elementwise addition Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.add(left, right, out=out) return out def subtract(self, left, right, out): """ Elementwise subtraction Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.subtract(left, right, out=out) return out def multiply(self, left, right, out): """ Elementwise multiplication Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.multiply(left, right, out=out) return out def divide(self, left, right, out): """ Elementwise division Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.divide(left, right, out=out) return out def greater(self, left, right, out): """ Elementwise greater than testing Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.greater(left, right, out=out) return out def equal(self, left, right, out): """ Performs element-wise equality testing on each element of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.equal(left, right, out=out) return out def not_equal(self, left, right, out): """ Elementwise not equal testing Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.not_equal(left, right, out=out) return out def clip(self, a, a_min, a_max, out): """ Elementwise clipping between a range of specified values Arguments: a (GPUTensor): input tensor. a_min (float): floor value. a_max (float): ceiling value. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.clip(a, a_min, a_max, out=out) return out def log(self, a, out): """ Elementwise base-e logarithm Arguments: a (GPUTensor): input tensor. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.log(a, out=out) return out def tanh(self, a, out): """ Elementwise tanh Arguments: a (GPUTensor): input tensor. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.tanh(a, out=out) return out def argmax(self, a, out, axis=0): """ Calculates the indices of the maximal element value along the specified axis. If multiple elements contain the maximum, only the elements of the first are returned. Arguments: tsr (GPUTensor): The GPUTensor on which to find the maximum indices axis (int): The dimension along which to find the maximum. If set to None, find the overall maximum index of a flattened representation of tsr. out (GPUTensor): Where to store the result. Should be of the appropriate type and expected shape Returns: GPUTensor: reference to out """ self.ng.argmax(a, out=out, axis=axis) return out def softmax(self, x, out): """ Softmax nonlinearity. Computes exp(x-max(x)) / sum_i exp(x_i-max(x_i)) Arguments: x (GPUTensor): input tensor. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ out[:] = (self.ng.reciprocal(self.ng.sum( self.ng.exp(x - self.ng.max(x, axis=0)), axis=0)) * self.ng.exp(x - self.ng.max(x, axis=0))) return out def softmax_gradient(self, y, err, out): """ Gradient of the softmax nonlinearity. Arguments: y (GPUTensor): input tensor. err (GPUTensor): backpropagated error. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ raise NotImplementedError("Softmax gradient should use shortcut") return out def make_binary_mask(self, tsr, keepthresh=0.5, dtype=default_dtype): """ Create a binary mask for dropout layers. Arguments: tsr (GPUTensor): Output tensor keepthresh (float): fraction of ones """ self.ng.dropout(keep=keepthresh, out=tsr) def gdm_compound(self, ps_item, us_item, vs_item, momentum_coef, learning_rate, epoch): """ Perform gradient descent update with momentum. Arguments: ps_item (GPUTensor): parameter tensor (e.g. a weight matrix) us_item (GPUTensor): update tensor, contains gradient wrt. weights vs_item (GPUTensor): velocity tensor. momentum_coef (float): momentum coefficient. learning_rate (float): learning rate. epoch (int): epoch (used in conjunction with diagnostics). Outputs are written to vs_item (updated velocity) and ps_item (updated weights) """ vs_item[:] = vs_item * momentum_coef - us_item * learning_rate ps_item[:] = ps_item + vs_item def gdmwd_compound(self, ps_item, us_item, vs_item, momentum_coef, learning_rate, wd, epoch): """ Perform gradient descent update with momentum and weight decay. Arguments: ps_item (GPUTensor): parameter tensor (e.g. a weight matrix) us_item (GPUTensor): update tensor, contains gradient wrt. weights vs_item (GPUTensor): velocity tensor. momentum_coef (float): momentum coefficient. learning_rate (float): learning rate. wd (float): weight decay parameter. epoch (int): epoch (used in conjunction with diagnostics). Outputs: ps_item, the updated weights. vs_item, the updated velocity. us_item, used as a temp buffer. """ vs_item[:] = (vs_item * momentum_coef - us_item * learning_rate - ps_item * learning_rate * wd) ps_item[:] = ps_item + vs_item def exp_mavg(self, mavg, newval, rho): """ Calculate the exponential moving average Arguments: mavg: The running value of the moving average newval: New sample to be added to the moving average rho: Interpolation value """ mavg[:] = rho * mavg + (1.0 - rho) * newval def ada_update(self, ps_item, us_item, gs_item, ds_item, ls_item, ss_item, rho, epsilon): """ Update rule for AdaDelta (Zeiler, http://arxiv.org/abs/1212.5701) Arguments: ps_item: weight / parameter (will be updated) us_item: update gs_item: expected value of Gradient Squared (will be updated) ds_item: expected value of Delta Squared (will be updated) ls_item: learning rate (will be updated) ss_item: Scratch Space rho: decay constant (determines window size) epsilon: small positive constant for numerical stability """ # Accumulate E[Grad^2] gs_item[:] = gs_item * rho + (1.0 - rho) * us_item * us_item # Calculate Updates ls_item[:] = self.ng.sqrt((ds_item + epsilon) / (gs_item + epsilon)) * (-1.0) * us_item # Accumulate E[Delt^2] ds_item[:] = ds_item * rho + (1.0 - rho) * ls_item * ls_item # Final update to the params ps_item[:] = ps_item + ls_item def rms_update(self, params, updates, run_squares, velocity, scratch_space, gamma, epsilon, learning_rate, momentum_coef): # Update running squares run_squares[:] = gamma * run_squares + (1. - gamma) * updates * updates # Now scale the gradient by lr / rms(grad) (with a epsilon term for # stability) and use it to update the params if momentum_coef == 0: params[:] = params - learning_rate * updates * self.ng.reciprocal( self.ng.sqrt(run_squares) + epsilon) else: velocity[:] = velocity * momentum_coef - \ learning_rate * updates * \ self.ng.reciprocal(self.ng.sqrt(run_squares) + epsilon) params[:] = params + velocity def fprop_bn_compound(self, inputs, beta, gamma, eps, xhat, xmean, xvar, gmean, gvar, rho, out): """ Batch normalization forward pass, compounded to run in 3 kernel calls. Arguments: inputs: input data to be normalized beta: location parameter gamma: scale parameter eps: small constant for numerical stability xvar: variance (updated) xhat: normalized input (updated) out: normalized and rescaled input (updated) """ xvar[:] = self.ng.var(inputs, axis=1) xmean[:] = self.ng.mean(inputs, axis=1) gmean[:] = gmean * rho + (1.0 - rho) * xmean gvar[:] = gvar * rho + (1.0 - rho) * xvar xvar[:] = self.ng.reciprocal(self.ng.sqrt(xvar + eps)) xhat[:] = xvar * (inputs - xmean) out[:] = xhat * gamma + beta return out def bprop_bn_compound(self, xhat, error, xvar, gamma, beta_updates, gamma_updates): """ Batch normalization backward pass, compounded to run with 4 kernel calls. Arguments: xhat: normalized input data (updated) error: backpropagated deltas (updated) xvar: precomputed variance gamma: scale parameter beta_updates: gradient update for beta (updated) gamma_updates: gradient update for gamma (updated) """ gamma_updates[:] = self.ng.sum(xhat * error, axis=1) beta_updates[:] = self.ng.sum(error, axis=1) xhat[:] = (xhat * gamma_updates + beta_updates) / float(xhat.shape[1]) error[:] = xvar * gamma * (error - xhat)
[ "logging.getLogger", "nervanagpu.NervanaGPU", "numpy.random.normal", "pycuda.driver.pagelocked_empty", "pycuda.driver.Stream", "pycuda.driver.Device", "pycuda.driver.memcpy_htod_async", "pycuda.driver.init", "neon.diagnostics.timing_decorators.FlopsDecorator", "numpy.random.seed", "numpy.random.uniform", "pycuda.driver.Event", "atexit.register" ]
[((1219, 1246), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1236, 1246), False, 'import logging\n'), ((1654, 1664), 'pycuda.driver.init', 'drv.init', ([], {}), '()\n', (1662, 1664), True, 'import pycuda.driver as drv\n'), ((1765, 1789), 'atexit.register', 'atexit.register', (['ctx.pop'], {}), '(ctx.pop)\n', (1780, 1789), False, 'import atexit\n'), ((1808, 1853), 'nervanagpu.NervanaGPU', 'NervanaGPU', ([], {'stochastic_round': 'stochastic_round'}), '(stochastic_round=stochastic_round)\n', (1818, 1853), False, 'from nervanagpu import NervanaGPU\n'), ((3495, 3553), 'pycuda.driver.pagelocked_empty', 'drv.pagelocked_empty', (['shape', 'dtype'], {'order': '"""C"""', 'mem_flags': '(0)'}), "(shape, dtype, order='C', mem_flags=0)\n", (3515, 3553), True, 'import pycuda.driver as drv\n'), ((3599, 3611), 'pycuda.driver.Stream', 'drv.Stream', ([], {}), '()\n', (3609, 3611), True, 'import pycuda.driver as drv\n'), ((3712, 3760), 'pycuda.driver.memcpy_htod_async', 'drv.memcpy_htod_async', (['dest.gpudata', 'src', 'stream'], {}), '(dest.gpudata, src, stream)\n', (3733, 3760), True, 'import pycuda.driver as drv\n'), ((4142, 4162), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4156, 4162), True, 'import numpy as np\n'), ((5029, 5040), 'pycuda.driver.Event', 'drv.Event', ([], {}), '()\n', (5038, 5040), True, 'import pycuda.driver as drv\n'), ((5060, 5071), 'pycuda.driver.Event', 'drv.Event', ([], {}), '()\n', (5069, 5071), True, 'import pycuda.driver as drv\n'), ((5098, 5118), 'neon.diagnostics.timing_decorators.FlopsDecorator', 'FlopsDecorator', (['self'], {}), '(self)\n', (5112, 5118), False, 'from neon.diagnostics.timing_decorators import FlopsDecorator\n'), ((6134, 6168), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high', 'size'], {}), '(low, high, size)\n', (6151, 6168), True, 'import numpy as np\n'), ((6440, 6474), 'numpy.random.normal', 'np.random.normal', (['loc', 'scale', 'size'], {}), '(loc, scale, size)\n', (6456, 6474), True, 'import numpy as np\n'), ((1698, 1719), 'pycuda.driver.Device', 'drv.Device', (['device_id'], {}), '(device_id)\n', (1708, 1719), True, 'import pycuda.driver as drv\n')]
# -*- encoding: utf-8 -*- import os import pickle import sys import time import glob import unittest import unittest.mock import numpy as np import pandas as pd import sklearn.datasets from smac.scenario.scenario import Scenario from smac.facade.roar_facade import ROAR from autosklearn.util.backend import Backend from autosklearn.automl import AutoML import autosklearn.automl from autosklearn.data.xy_data_manager import XYDataManager from autosklearn.metrics import accuracy, log_loss, balanced_accuracy import autosklearn.pipeline.util as putil from autosklearn.util.logging_ import setup_logger, get_logger from autosklearn.constants import MULTICLASS_CLASSIFICATION, BINARY_CLASSIFICATION, REGRESSION from smac.tae.execute_ta_run import StatusType sys.path.append(os.path.dirname(__file__)) from base import Base # noqa (E402: module level import not at top of file) class AutoMLStub(AutoML): def __init__(self): self.__class__ = AutoML self._task = None class AutoMLTest(Base, unittest.TestCase): _multiprocess_can_split_ = True def setUp(self): super().setUp() self.automl = AutoMLStub() self.automl._shared_mode = False self.automl._seed = 42 self.automl._backend = unittest.mock.Mock(spec=Backend) self.automl._delete_output_directories = lambda: 0 def test_refit_shuffle_on_fail(self): backend_api = self._create_backend('test_refit_shuffle_on_fail') failing_model = unittest.mock.Mock() failing_model.fit.side_effect = [ValueError(), ValueError(), None] failing_model.fit_transformer.side_effect = [ ValueError(), ValueError(), (None, {})] failing_model.get_max_iter.return_value = 100 auto = AutoML(backend_api, 20, 5) ensemble_mock = unittest.mock.Mock() ensemble_mock.get_selected_model_identifiers.return_value = [(1, 1, 50.0)] auto.ensemble_ = ensemble_mock for budget_type in [None, 'iterations']: auto._budget_type = budget_type auto.models_ = {(1, 1, 50.0): failing_model} # Make sure a valid 2D array is given to automl X = np.array([1, 2, 3]).reshape(-1, 1) y = np.array([1, 2, 3]) auto.refit(X, y) self.assertEqual(failing_model.fit.call_count, 3) self.assertEqual(failing_model.fit_transformer.call_count, 3) del auto self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_only_loads_ensemble_models(self): def side_effect(ids, *args, **kwargs): return models if ids is identifiers else {} # Add a resampling strategy as this is required by load_models self.automl._resampling_strategy = 'holdout' identifiers = [(1, 2), (3, 4)] models = [42] load_ensemble_mock = unittest.mock.Mock() load_ensemble_mock.get_selected_model_identifiers.return_value = identifiers self.automl._backend.load_ensemble.return_value = load_ensemble_mock self.automl._backend.load_models_by_identifiers.side_effect = side_effect self.automl._load_models() self.assertEqual(models, self.automl.models_) self.assertIsNone(self.automl.cv_models_) self.automl._resampling_strategy = 'cv' models = [42] self.automl._backend.load_cv_models_by_identifiers.side_effect = side_effect self.automl._load_models() self.assertEqual(models, self.automl.cv_models_) def test_check_for_models_if_no_ensemble(self): models = [42] self.automl._backend.load_ensemble.return_value = None self.automl._backend.list_all_models.return_value = models self.automl._disable_evaluator_output = False self.automl._load_models() def test_raises_if_no_models(self): self.automl._backend.load_ensemble.return_value = None self.automl._backend.list_all_models.return_value = [] self.automl._resampling_strategy = 'holdout' self.automl._disable_evaluator_output = False self.assertRaises(ValueError, self.automl._load_models) self.automl._disable_evaluator_output = True self.automl._load_models() def test_fit(self): backend_api = self._create_backend('test_fit') X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, metric=accuracy, ) automl.fit( X_train, Y_train, task=MULTICLASS_CLASSIFICATION, ) score = automl.score(X_test, Y_test) self.assertGreaterEqual(score, 0.8) self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_delete_non_candidate_models(self): backend_api = self._create_backend( 'test_delete', delete_tmp_folder_after_terminate=False) seed = 555 X, Y, _, _ = putil.get_dataset('iris') automl = autosklearn.automl.AutoML( backend_api, time_left_for_this_task=30, per_run_time_limit=5, ensemble_nbest=3, seed=seed, initial_configurations_via_metalearning=0, resampling_strategy='holdout', include_estimators=['sgd'], include_preprocessors=['no_preprocessing'], metric=accuracy, ) automl.fit(X, Y, task=MULTICLASS_CLASSIFICATION, X_test=X, y_test=Y) # Assert at least one model file has been deleted and that there were no # deletion errors log_file_path = glob.glob(os.path.join( backend_api.temporary_directory, 'AutoML(' + str(seed) + '):*.log')) with open(log_file_path[0]) as log_file: log_content = log_file.read() self.assertIn('Deleted files of non-candidate model', log_content) self.assertNotIn('Failed to delete files of non-candidate model', log_content) self.assertNotIn('Failed to lock model', log_content) # Assert that the files of the models used by the ensemble weren't deleted model_files = backend_api.list_all_models(seed=seed) model_files_idx = set() for m_file in model_files: # Extract the model identifiers from the filename m_file = os.path.split(m_file)[1].replace('.model', '').split('.', 2) model_files_idx.add((int(m_file[0]), int(m_file[1]), float(m_file[2]))) ensemble_members_idx = set(automl.ensemble_.identifiers_) self.assertTrue(ensemble_members_idx.issubset(model_files_idx)) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_fit_roar(self): def get_roar_object_callback( scenario_dict, seed, ta, ta_kwargs, **kwargs ): """Random online adaptive racing. http://ml.informatik.uni-freiburg.de/papers/11-LION5-SMAC.pdf""" scenario = Scenario(scenario_dict) return ROAR( scenario=scenario, rng=seed, tae_runner=ta, tae_runner_kwargs=ta_kwargs, ) backend_api = self._create_backend('test_fit_roar') X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, initial_configurations_via_metalearning=0, get_smac_object_callback=get_roar_object_callback, metric=accuracy, ) setup_logger() automl._logger = get_logger('test_fit_roar') automl.fit( X_train, Y_train, task=MULTICLASS_CLASSIFICATION, ) score = automl.score(X_test, Y_test) self.assertGreaterEqual(score, 0.8) self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_binary_score_and_include(self): """ Test fix for binary classification prediction taking the index 1 of second dimension in prediction matrix """ backend_api = self._create_backend('test_binary_score_and_include') data = sklearn.datasets.make_classification( n_samples=400, n_features=10, n_redundant=1, n_informative=3, n_repeated=1, n_clusters_per_class=2, random_state=1) X_train = data[0][:200] Y_train = data[1][:200] X_test = data[0][200:] Y_test = data[1][200:] automl = autosklearn.automl.AutoML( backend_api, 20, 5, include_estimators=['sgd'], include_preprocessors=['no_preprocessing'], metric=accuracy, ) automl.fit(X_train, Y_train, task=BINARY_CLASSIFICATION) self.assertEqual(automl._task, BINARY_CLASSIFICATION) # TODO, the assumption from above is not really tested here # Also, the score method should be removed, it only makes little sense score = automl.score(X_test, Y_test) self.assertGreaterEqual(score, 0.4) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_automl_outputs(self): backend_api = self._create_backend('test_automl_outputs') X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') name = 'iris' data_manager_file = os.path.join( backend_api.temporary_directory, '.auto-sklearn', 'datamanager.pkl' ) auto = autosklearn.automl.AutoML( backend_api, 20, 5, initial_configurations_via_metalearning=0, seed=100, metric=accuracy, ) setup_logger() auto._logger = get_logger('test_automl_outputs') auto.fit( X=X_train, y=Y_train, X_test=X_test, y_test=Y_test, dataset_name=name, task=MULTICLASS_CLASSIFICATION, ) # pickled data manager (without one hot encoding!) with open(data_manager_file, 'rb') as fh: D = pickle.load(fh) self.assertTrue(np.allclose(D.data['X_train'], X_train)) # Check that all directories are there fixture = ['cv_models', 'true_targets_ensemble.npy', 'start_time_100', 'datamanager.pkl', 'predictions_ensemble', 'ensembles', 'predictions_test', 'models'] self.assertEqual(sorted(os.listdir(os.path.join(backend_api.temporary_directory, '.auto-sklearn'))), sorted(fixture)) # At least one ensemble, one validation, one test prediction and one # model and one ensemble fixture = os.listdir(os.path.join(backend_api.temporary_directory, '.auto-sklearn', 'predictions_ensemble')) self.assertGreater(len(fixture), 0) fixture = glob.glob(os.path.join(backend_api.temporary_directory, '.auto-sklearn', 'models', '100.*.model')) self.assertGreater(len(fixture), 0) fixture = os.listdir(os.path.join(backend_api.temporary_directory, '.auto-sklearn', 'ensembles')) self.assertIn('100.0000000001.ensemble', fixture) # Start time start_time_file_path = os.path.join(backend_api.temporary_directory, '.auto-sklearn', "start_time_100") with open(start_time_file_path, 'r') as fh: start_time = float(fh.read()) self.assertGreaterEqual(time.time() - start_time, 10) del auto self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_do_dummy_prediction(self): datasets = { 'breast_cancer': BINARY_CLASSIFICATION, 'wine': MULTICLASS_CLASSIFICATION, 'diabetes': REGRESSION, } for name, task in datasets.items(): backend_api = self._create_backend('test_do_dummy_prediction') X_train, Y_train, X_test, Y_test = putil.get_dataset(name) datamanager = XYDataManager( X_train, Y_train, X_test, Y_test, task=task, dataset_name=name, feat_type=None, ) auto = autosklearn.automl.AutoML( backend_api, 20, 5, initial_configurations_via_metalearning=25, metric=accuracy, ) setup_logger() auto._logger = get_logger('test_do_dummy_predictions') auto._backend.save_datamanager(datamanager) D = backend_api.load_datamanager() # Check if data manager is correcly loaded self.assertEqual(D.info['task'], datamanager.info['task']) auto._do_dummy_prediction(D, 1) # Ensure that the dummy predictions are not in the current working # directory, but in the temporary directory. self.assertFalse(os.path.exists(os.path.join(os.getcwd(), '.auto-sklearn'))) self.assertTrue(os.path.exists(os.path.join( backend_api.temporary_directory, '.auto-sklearn', 'predictions_ensemble', 'predictions_ensemble_1_1_0.0.npy'))) del auto self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) @unittest.mock.patch('autosklearn.evaluation.ExecuteTaFuncWithQueue.run') def test_fail_if_dummy_prediction_fails(self, ta_run_mock): backend_api = self._create_backend('test_fail_if_dummy_prediction_fails') X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') datamanager = XYDataManager( X_train, Y_train, X_test, Y_test, task=2, feat_type=['Numerical' for i in range(X_train.shape[1])], dataset_name='iris', ) time_for_this_task = 30 per_run_time = 10 auto = autosklearn.automl.AutoML(backend_api, time_for_this_task, per_run_time, initial_configurations_via_metalearning=25, metric=accuracy, ) setup_logger() auto._logger = get_logger('test_fail_if_dummy_prediction_fails') auto._backend._make_internals_directory() auto._backend.save_datamanager(datamanager) # First of all, check that ta.run() is actually called. ta_run_mock.return_value = StatusType.SUCCESS, None, None, "test" auto._do_dummy_prediction(datamanager, 1) ta_run_mock.assert_called_once_with(1, cutoff=time_for_this_task) # Case 1. Check that function raises no error when statustype == success. # ta.run() returns status, cost, runtime, and additional info. ta_run_mock.return_value = StatusType.SUCCESS, None, None, "test" raised = False try: auto._do_dummy_prediction(datamanager, 1) except ValueError: raised = True self.assertFalse(raised, 'Exception raised') # Case 2. Check that if statustype returned by ta.run() != success, # the function raises error. ta_run_mock.return_value = StatusType.CRASHED, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.CRASHED ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) ta_run_mock.return_value = StatusType.ABORT, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.ABORT ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) ta_run_mock.return_value = StatusType.TIMEOUT, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.TIMEOUT ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) ta_run_mock.return_value = StatusType.MEMOUT, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.MEMOUT ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) ta_run_mock.return_value = StatusType.CAPPED, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.CAPPED ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) @unittest.mock.patch('autosklearn.smbo.AutoMLSMBO.run_smbo') def test_exceptions_inside_log_in_smbo(self, smbo_run_mock): # Make sure that any exception during the AutoML fit due to # SMAC are properly captured in a log file backend_api = self._create_backend('test_exceptions_inside_log') self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) automl = autosklearn.automl.AutoML( backend_api, 20, 5, metric=accuracy, ) output_file = 'test_exceptions_inside_log.log' setup_logger(output_file=output_file) logger = get_logger('test_exceptions_inside_log') # Create a custom exception to prevent other errors to slip in class MyException(Exception): pass X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') # The first call is on dummy predictor failure message = str(np.random.randint(100)) + '_run_smbo' smbo_run_mock.side_effect = MyException(message) with unittest.mock.patch('autosklearn.automl.AutoML._get_logger') as mock: mock.return_value = logger with self.assertRaises(MyException): automl.fit( X_train, Y_train, task=MULTICLASS_CLASSIFICATION, ) with open(output_file) as f: self.assertTrue(message in f.read()) # Cleanup os.unlink(output_file) self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_load_best_individual_model(self): backend_api = self._create_backend('test_fit') for metric in [log_loss, balanced_accuracy]: X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, metric=metric, ) with unittest.mock.patch( 'autosklearn.ensemble_builder.EnsembleBuilder.run' ) as mock_ensemble_run: mock_ensemble_run.side_effect = MemoryError automl.fit( X_train, Y_train, task=MULTICLASS_CLASSIFICATION, ) # A memory error occurs in the ensemble construction self.assertIsNone(automl._backend.load_ensemble(automl._seed)) # The load model is robust to this and loads the best model automl._load_models() self.assertIsNotNone(automl.ensemble_) # Just 1 model is there for ensemble and all weight must be on it get_models_with_weights = automl.get_models_with_weights() self.assertEqual(len(get_models_with_weights), 1) self.assertEqual(get_models_with_weights[0][0], 1.0) # Match a toy dataset if metric._sign < 0: self.assertLessEqual(automl.score(X_test, Y_test), 0.2) else: self.assertGreaterEqual(automl.score(X_test, Y_test), 0.8) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_fail_if_feat_type_on_pandas_input(self): """We do not support feat type when pandas is provided as an input """ backend_api = self._create_backend('test_fail_feat_pandas') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, metric=accuracy, ) X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]}) y_train = [1, 0] with self.assertRaisesRegex(ValueError, "feat_type cannot be provided when using pandas"): automl.fit( X_train, y_train, task=BINARY_CLASSIFICATION, feat_type=['Categorical', 'Numerical'], ) self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_fail_if_dtype_changes_automl(self): """We do not support changes in the input type. Once a estimator is fitted, it should not change data type """ backend_api = self._create_backend('test_fail_feat_typechange') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, metric=accuracy, ) X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]}) y_train = [1, 0] automl.InputValidator.validate(X_train, y_train, is_classification=True) with self.assertRaisesRegex(ValueError, "Auto-sklearn previously received features of type"): automl.fit( X_train.to_numpy(), y_train, task=BINARY_CLASSIFICATION, ) self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) if __name__ == "__main__": unittest.main()
[ "numpy.array", "autosklearn.util.logging_.get_logger", "unittest.main", "unittest.mock.patch", "autosklearn.pipeline.util.get_dataset", "os.path.split", "smac.facade.roar_facade.ROAR", "os.unlink", "pandas.DataFrame", "autosklearn.data.xy_data_manager.XYDataManager", "numpy.allclose", "unittest.mock.Mock", "pickle.load", "os.path.dirname", "time.time", "os.path.join", "os.getcwd", "autosklearn.automl.AutoML", "numpy.random.randint", "smac.scenario.scenario.Scenario", "autosklearn.util.logging_.setup_logger" ]
[((774, 799), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (789, 799), False, 'import os\n'), ((14263, 14335), 'unittest.mock.patch', 'unittest.mock.patch', (['"""autosklearn.evaluation.ExecuteTaFuncWithQueue.run"""'], {}), "('autosklearn.evaluation.ExecuteTaFuncWithQueue.run')\n", (14282, 14335), False, 'import unittest\n'), ((18352, 18411), 'unittest.mock.patch', 'unittest.mock.patch', (['"""autosklearn.smbo.AutoMLSMBO.run_smbo"""'], {}), "('autosklearn.smbo.AutoMLSMBO.run_smbo')\n", (18371, 18411), False, 'import unittest\n'), ((23669, 23684), 'unittest.main', 'unittest.main', ([], {}), '()\n', (23682, 23684), False, 'import unittest\n'), ((1255, 1287), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {'spec': 'Backend'}), '(spec=Backend)\n', (1273, 1287), False, 'import unittest\n'), ((1488, 1508), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {}), '()\n', (1506, 1508), False, 'import unittest\n'), ((1760, 1786), 'autosklearn.automl.AutoML', 'AutoML', (['backend_api', '(20)', '(5)'], {}), '(backend_api, 20, 5)\n', (1766, 1786), False, 'from autosklearn.automl import AutoML\n'), ((1811, 1831), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {}), '()\n', (1829, 1831), False, 'import unittest\n'), ((2910, 2930), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {}), '()\n', (2928, 2930), False, 'import unittest\n'), ((4412, 4437), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (4429, 4437), True, 'import autosklearn.pipeline.util as putil\n'), ((5206, 5231), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (5223, 5231), True, 'import autosklearn.pipeline.util as putil\n'), ((7685, 7710), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (7702, 7710), True, 'import autosklearn.pipeline.util as putil\n'), ((8027, 8041), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {}), '()\n', (8039, 8041), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((8067, 8094), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_fit_roar"""'], {}), "('test_fit_roar')\n", (8077, 8094), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((9910, 9935), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (9927, 9935), True, 'import autosklearn.pipeline.util as putil\n'), ((9986, 10071), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""datamanager.pkl"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn',\n 'datamanager.pkl')\n", (9998, 10071), False, 'import os\n'), ((10313, 10327), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {}), '()\n', (10325, 10327), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((10351, 10384), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_automl_outputs"""'], {}), "('test_automl_outputs')\n", (10361, 10384), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((12053, 12138), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""start_time_100"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn', 'start_time_100'\n )\n", (12065, 12138), False, 'import os\n'), ((14526, 14551), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (14543, 14551), True, 'import autosklearn.pipeline.util as putil\n'), ((15203, 15217), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {}), '()\n', (15215, 15217), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((15241, 15290), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_fail_if_dummy_prediction_fails"""'], {}), "('test_fail_if_dummy_prediction_fails')\n", (15251, 15290), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((18983, 19020), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {'output_file': 'output_file'}), '(output_file=output_file)\n', (18995, 19020), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((19038, 19078), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_exceptions_inside_log"""'], {}), "('test_exceptions_inside_log')\n", (19048, 19078), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((19250, 19275), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (19267, 19275), True, 'import autosklearn.pipeline.util as putil\n'), ((19897, 19919), 'os.unlink', 'os.unlink', (['output_file'], {}), '(output_file)\n', (19906, 19919), False, 'import os\n'), ((22167, 22207), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 1], 'c': [1, 2]}"], {}), "({'a': [1, 1], 'c': [1, 2]})\n", (22179, 22207), True, 'import pandas as pd\n'), ((23115, 23155), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 1], 'c': [1, 2]}"], {}), "({'a': [1, 1], 'c': [1, 2]})\n", (23127, 23155), True, 'import pandas as pd\n'), ((2233, 2252), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2241, 2252), True, 'import numpy as np\n'), ((7380, 7403), 'smac.scenario.scenario.Scenario', 'Scenario', (['scenario_dict'], {}), '(scenario_dict)\n', (7388, 7403), False, 'from smac.scenario.scenario import Scenario\n'), ((7423, 7500), 'smac.facade.roar_facade.ROAR', 'ROAR', ([], {'scenario': 'scenario', 'rng': 'seed', 'tae_runner': 'ta', 'tae_runner_kwargs': 'ta_kwargs'}), '(scenario=scenario, rng=seed, tae_runner=ta, tae_runner_kwargs=ta_kwargs)\n', (7427, 7500), False, 'from smac.facade.roar_facade import ROAR\n'), ((10714, 10729), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (10725, 10729), False, 'import pickle\n'), ((11416, 11506), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""predictions_ensemble"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn',\n 'predictions_ensemble')\n", (11428, 11506), False, 'import os\n'), ((11619, 11710), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""models"""', '"""100.*.model"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn', 'models',\n '100.*.model')\n", (11631, 11710), False, 'import os\n'), ((11823, 11898), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""ensembles"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn', 'ensembles')\n", (11835, 11898), False, 'import os\n'), ((12836, 12859), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['name'], {}), '(name)\n', (12853, 12859), True, 'import autosklearn.pipeline.util as putil\n'), ((12886, 12984), 'autosklearn.data.xy_data_manager.XYDataManager', 'XYDataManager', (['X_train', 'Y_train', 'X_test', 'Y_test'], {'task': 'task', 'dataset_name': 'name', 'feat_type': 'None'}), '(X_train, Y_train, X_test, Y_test, task=task, dataset_name=\n name, feat_type=None)\n', (12899, 12984), False, 'from autosklearn.data.xy_data_manager import XYDataManager\n'), ((13277, 13291), 'autosklearn.util.logging_.setup_logger', 'setup_logger', ([], {}), '()\n', (13289, 13291), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((13319, 13358), 'autosklearn.util.logging_.get_logger', 'get_logger', (['"""test_do_dummy_predictions"""'], {}), "('test_do_dummy_predictions')\n", (13329, 13358), False, 'from autosklearn.util.logging_ import setup_logger, get_logger\n'), ((19462, 19522), 'unittest.mock.patch', 'unittest.mock.patch', (['"""autosklearn.automl.AutoML._get_logger"""'], {}), "('autosklearn.automl.AutoML._get_logger')\n", (19481, 19522), False, 'import unittest\n'), ((20234, 20259), 'autosklearn.pipeline.util.get_dataset', 'putil.get_dataset', (['"""iris"""'], {}), "('iris')\n", (20251, 20259), True, 'import autosklearn.pipeline.util as putil\n'), ((10758, 10797), 'numpy.allclose', 'np.allclose', (["D.data['X_train']", 'X_train'], {}), "(D.data['X_train'], X_train)\n", (10769, 10797), True, 'import numpy as np\n'), ((12304, 12315), 'time.time', 'time.time', ([], {}), '()\n', (12313, 12315), False, 'import time\n'), ((19353, 19375), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (19370, 19375), True, 'import numpy as np\n'), ((20490, 20561), 'unittest.mock.patch', 'unittest.mock.patch', (['"""autosklearn.ensemble_builder.EnsembleBuilder.run"""'], {}), "('autosklearn.ensemble_builder.EnsembleBuilder.run')\n", (20509, 20561), False, 'import unittest\n'), ((2182, 2201), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2190, 2201), True, 'import numpy as np\n'), ((11112, 11174), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn')\n", (11124, 11174), False, 'import os\n'), ((13960, 14086), 'os.path.join', 'os.path.join', (['backend_api.temporary_directory', '""".auto-sklearn"""', '"""predictions_ensemble"""', '"""predictions_ensemble_1_1_0.0.npy"""'], {}), "(backend_api.temporary_directory, '.auto-sklearn',\n 'predictions_ensemble', 'predictions_ensemble_1_1_0.0.npy')\n", (13972, 14086), False, 'import os\n'), ((13828, 13839), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13837, 13839), False, 'import os\n'), ((6617, 6638), 'os.path.split', 'os.path.split', (['m_file'], {}), '(m_file)\n', (6630, 6638), False, 'import os\n')]
""" This implements an abstrace base class Ring . Rationale: Goal is to separate the datatype specification from the algorithms and containers for the following reasons: 1) It allows to directly use the algorithms *without* overhead. E.g. calling mul(z.data, x.data, y.data) has much less overhead than z = x.__mul__(y). data is to be kept as close as possible to machine primitives. E.g. data is array or tuple of arrays. 2) Potential reuse of an algorithm in several datatypes. 3) Relatively easy to connect high performance algorithms with a very highlevel abstract description. For instance, most programming languages allow calling C-functions. Therefore, the algorithms should be given as void fcn(int A, double B, ...) For instance, the datatype is a truncated Taylor polynomial R[t]/<t^D> of the class Foo. The underlying container is a simple array of doubles. """ import numpy class Ring(object): """ An abstract base class in an attempt to follow the DRY principle. It implements the algebraic class of a ring as defined on http://en.wikipedia.org/wiki/Ring_%28mathematics%29 The idea is that the set is described in data and the operations +,* etc. are implemented as functions that operate on the data. E.g. the factor ring of natural numbers modulo 4, x.data = 3 y.data = 2 then z = add(x,y) is implemented as def add(x,y): return self.__class__((x.data*y.data)%4) and one obtains z.data = 1 Warning: Since this class is only of little value it may be deprecated in the future. """ data = NotImplementedError() def totype(self, x): """ tries to convert x to an object of the class works for : scalar x, numpy.ndarray x Remark: at the moment, scalar x expanded as Ring with the same degree as self though. The reason is a missing implementation that works for graded rings of different degree. Once such implementations exist, this function should be adapted. """ if numpy.isscalar(x): xdata = self.__class__.__zeros_like__(self.data) self.__class__.__scalar_to_data__(xdata, x) return self.__class__(xdata) elif isinstance(x, numpy.ndarray): raise NotImplementedError('sorry, not implemented just yet') elif not isinstance(x, self.__class__): raise NotImplementedError('Cannot convert x\n type(x) = %s but expected type(x) = %s'%(str(type(x)))) else: return x def __add__(self, rhs): rhs = self.totype(rhs) retval = self.__class__(self.__class__.__zeros_like__(self.data)) self.__class__.add(retval.data, self.data, rhs.data) return retval def __sub__(self, rhs): rhs = self.totype(rhs) retval = self.__class__(self.__class__.__zeros_like__(self.data)) self.__class__.sub(retval.data, self.data, rhs.data) return retval def __mul__(self,rhs): rhs = self.totype(rhs) retval = self.__class__(self.__class__.__zeros_like__(self.data)) self.__class__.mul(retval.data, self.data, rhs.data) return retval def __truediv__(self,rhs): rhs = self.totype(rhs) retval = self.__class__(self.__class__.__zeros_like__(self.data)) self.__class__.div(retval.data, self.data, rhs.data) return retval def __radd__(self, lhs): return self + lhs def __rmul__(self, lhs): return self * lhs def zeros_like(self): return self.__class__(self.__class__.__zeros_like__(self.data)) def __str__(self): return str(self.data)
[ "numpy.isscalar" ]
[((2205, 2222), 'numpy.isscalar', 'numpy.isscalar', (['x'], {}), '(x)\n', (2219, 2222), False, 'import numpy\n')]
#################################### # author: <NAME> # course: 2020 Complete Python Bootcamps: From Zero to Hero in Python # purpose: lecture notes # description: Section 15 - Web Scraping # other: N/A #################################### # RULES # 1. always try to get permission before scraping, otherwise I might be blocked # 2. check the laws of whatever country we are operating in (for legal issues) # LIMITATIONS # each website is unique -> so for each website there must exist a Python script # an update to a website might brake my script import requests import bs4 # Grabbing a title result = requests.get("http://example.com") type(result) result.text # bs with lxml tranforms the previous raw html into the following soup = bs4.BeautifulSoup(result.text,'lxml') soup # returns the tag we specified as a list (i.e., there might be more than one) soup.select('title') soup.select('title')[0].getText() soup.select('p') site_paragraphs = soup.select('p') type(site_paragraphs[0]) # not a string, instead is a specialized bs object, # which is why we can do something like call .getText() # Grabbing a class (from CSS) using soup.select() # 'div' : all elements with 'div' tag # '#some_id' : elements containing id='some_id' # '.some_class' : elements containing class='some_class' # 'div span' : any element named span within a div element # 'div > span' : any element named span directly within a div element, with # nothing in between res = requests.get("https://en.wikipedia.org/wiki/Jonas_Salk") soup = bs4.BeautifulSoup(res.text,'lxml') soup.select('.toctext')[0].text soup.select('.toctext')[0].getText() for item in soup.select('.toctext'): print(item.text) # Grabbing an image #soup.select('img') # can return more than what is needeed (it will depend on # the website) soup.select('.thumbimage') jonas_salk = soup.select('.thumbimage')[0] jonas_salk['src'] # we can treat it as a dictionary image_link = requests.get('http://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Roosevelt_OConnor.jpg/220px-Roosevelt_OConnor.jpg') #image_link.content # raw content of the image which is a binary file #make sure to use the same format that the image has f = open('my_image_image.jpg','wb') # wb means write binary f.write(image_link.content) f.close() # Multiple elements across multiple pages # GOAL: get title of every book with a 2 star rating #Check that this also work with page 1 #http://books.toscrape.com/catalogue/page-2.html base_url = 'http://books.toscrape.com/catalogue/page-{}.html' req = requests.get(base_url.format(1)) soup = bs4.BeautifulSoup(req.text,'lxml') products = soup.select(".product_pod") # always check the length, in this case should be 20 example = products[0] # one way (not useful everytime) 'star-rating Two' in str(example) # another way (checking for the presence of a class) example.select('.star-rating.Three') # if there is a space in a class we should add a dot example.select('.star-rating.Two') # nothing example.select('a')[1]['title'] two_star_titles = [] for n in range(1,51): scrape_url = base_url.format(n) req = requests.get(base_url.format(1)) soup = bs4.BeautifulSoup(req.text,'lxml') books = soup.select(".product_pod") for book in books: if len(book.select('.star-rating.Two')) != 0: two_star_titles.append(book.select('a')[1]['title']) two_star_titles
[ "bs4.BeautifulSoup", "requests.get" ]
[((608, 642), 'requests.get', 'requests.get', (['"""http://example.com"""'], {}), "('http://example.com')\n", (620, 642), False, 'import requests\n'), ((743, 781), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['result.text', '"""lxml"""'], {}), "(result.text, 'lxml')\n", (760, 781), False, 'import bs4\n'), ((1520, 1576), 'requests.get', 'requests.get', (['"""https://en.wikipedia.org/wiki/Jonas_Salk"""'], {}), "('https://en.wikipedia.org/wiki/Jonas_Salk')\n", (1532, 1576), False, 'import requests\n'), ((1584, 1619), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (1601, 1619), False, 'import bs4\n'), ((2021, 2153), 'requests.get', 'requests.get', (['"""http://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Roosevelt_OConnor.jpg/220px-Roosevelt_OConnor.jpg"""'], {}), "(\n 'http://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Roosevelt_OConnor.jpg/220px-Roosevelt_OConnor.jpg'\n )\n", (2033, 2153), False, 'import requests\n'), ((2664, 2699), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['req.text', '"""lxml"""'], {}), "(req.text, 'lxml')\n", (2681, 2699), False, 'import bs4\n'), ((3243, 3278), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['req.text', '"""lxml"""'], {}), "(req.text, 'lxml')\n", (3260, 3278), False, 'import bs4\n')]
#!/usr/bin/env python3 import sys import os import re import json import traceback import pkg_resources import tarfile from collections import OrderedDict import anchore_engine.analyzers.utils, anchore_engine.utils def get_python_evidence(tfl, member, memberhash, evidence): global binary_package_el fullpath = "/{}".format(member.name) filename = os.path.basename(fullpath) el = {} el.update(binary_package_el) patt_bin = re.match("^python([0-9]+\.[0-9]+)$", filename) patt_lib = re.match("^libpython([0-9]+\.[0-9]+).so.*$", filename) if (patt_bin or patt_lib) and member.isreg(): f_vers = "" if patt_bin: f_vers = patt_bin.group(1) elif patt_lib: f_vers = patt_lib.group(1) with tfl.extractfile(member) as FH: for line in FH.readlines(): subline = line try: the_re = ".*{}\.([0-9]+[-_a-zA-Z0-9]*).*".format(f_vers) patt = re.match(anchore_engine.utils.ensure_bytes(the_re), subline) if patt and f_vers: b_vers = "{}.{}".format(f_vers, anchore_engine.utils.ensure_str(patt.group(1))) if b_vers.startswith(f_vers): el['name'] = 'python' el['version'] = b_vers el['location'] = fullpath evidence['python']['binary'].append( el ) break except Exception as err: raise err elif filename == "patchlevel.h" and member.isreg(): with tfl.extractfile(member) as FH: for line in FH.readlines(): line = line.strip() patt = re.match(b".*#define +PY_VERSION +\"*([0-9\.\-_a-zA-Z]+)\"*", line) if patt: h_vers = anchore_engine.utils.ensure_str(patt.group(1)) el['name'] = 'python' el['version'] = h_vers el['location'] = fullpath evidence['python']['devel'].append(el) break def get_golang_evidence(tfl, member, memberhash, evidence): global binary_package_el fullpath = "/{}".format(member.name) filename = os.path.basename(fullpath) el = {} el.update(binary_package_el) if filename in ['go'] and member.isreg(): with tfl.extractfile(member) as FH: for line in FH.readlines(): subline = line try: the_re = ".*go([0-9]+\.[0-9]+(\.[0-9]+|beta[0-9]+|alpha[0-9]+|rc[0-9]+)*).*" patt = re.match(anchore_engine.utils.ensure_bytes(the_re), subline) if patt: vers = anchore_engine.utils.ensure_str(patt.group(1)) el['name'] = 'go' el['version'] = vers el['location'] = fullpath evidence['go']['binary'].append( el ) break except Exception as err: raise err elif filename == "VERSION" and member.isreg(): with tfl.extractfile(member) as FH: for line in FH.readlines(): line = line.strip() patt = re.match(b".*go([0-9]+\.[0-9]+(\.[0-9]+|beta[0-9]+|alpha[0-9]+|rc[0-9]+)*).*", line) if patt: vers = anchore_engine.utils.ensure_str(patt.group(1)) final_loc = fullpath if memberhash.get(os.path.join(os.path.dirname(member.name), 'bin', 'go'), None): final_loc = os.path.join("/", os.path.dirname(member.name), 'bin', 'go') el['name'] = 'go' el['version'] = vers el['location'] = final_loc evidence['go']['devel'].append( el ) break def get_busybox_evidence(tfl, member, memberhash, distrodict, evidence): global binary_package_el fullpath = "/{}".format(member.name) filename = os.path.basename(fullpath) if filename == "busybox" and (member.isreg() or member.islnk()): # Perform any specific checks using prior metadata if distrodict.get('flavor', "") == 'BUSYB': patt = re.match(".*([0-9]+\.[0-9]+\.[0-9]+).*", distrodict.get('fullversion', "")) if patt: version = anchore_engine.utils.ensure_str(patt.group(1)) el = {} el.update(binary_package_el) el['name'] = 'busybox' el['version'] = version el['location'] = fullpath evidence['busybox']['binary'].append(el) analyzer_name = "package_list" try: config = anchore_engine.analyzers.utils.init_analyzer_cmdline(sys.argv, analyzer_name) except Exception as err: print(str(err)) sys.exit(1) imgname = config['imgid'] imgid = config['imgid_full'] outputdir = config['dirs']['outputdir'] unpackdir = config['dirs']['unpackdir'] squashtar = os.path.join(unpackdir, "squashed.tar") resultlist = {} version_found_map = {} binary_package_el = { 'name': None, 'version': None, 'location': None, 'type': 'binary', 'files': [], 'license': 'N/A', 'origin': 'N/A', 'metadata': json.dumps({}) } try: allfiles = {} if os.path.exists(unpackdir + "/anchore_allfiles.json"): with open(unpackdir + "/anchore_allfiles.json", 'r') as FH: allfiles = json.loads(FH.read()) else: fmap, allfiles = anchore_engine.analyzers.utils.get_files_from_squashtar(os.path.join(unpackdir, "squashed.tar")) with open(unpackdir + "/anchore_allfiles.json", 'w') as OFH: OFH.write(json.dumps(allfiles)) # read in previous analyzer output for helping to increase accuracy of findings fname = os.path.join(outputdir, 'pkgfiles.all') pkgfilesall = anchore_engine.analyzers.utils.read_kvfile_todict(fname) meta = anchore_engine.analyzers.utils.get_distro_from_squashtar(os.path.join(unpackdir, "squashed.tar"), unpackdir=unpackdir) distrodict = anchore_engine.analyzers.utils.get_distro_flavor(meta['DISTRO'], meta['DISTROVERS'], likedistro=meta['LIKEDISTRO']) # set up ordered dictionary structure for the runtimes and evidence types evidence = OrderedDict() for runtime in ['python', 'go', 'busybox']: evidence[runtime] = OrderedDict() for etype in ['binary', 'devel']: evidence[runtime][etype] = [] # Perform a per file routine to evaluate files for gathering binary package version evidence with tarfile.open(os.path.join(unpackdir, "squashed.tar"), mode='r', format=tarfile.PAX_FORMAT) as tfl: alltnames = tfl.getnames() alltfiles = {} for name in alltnames: alltfiles[name] = True memberhash = anchore_engine.analyzers.utils.get_memberhash(tfl) for member in list(memberhash.values()): try: get_python_evidence(tfl, member, memberhash, evidence) except Exception as err: print ("WARN: caught exception evaluating file ({}) for python runtime evidence: {}".format(member.name, str(err))) try: get_golang_evidence(tfl, member, memberhash, evidence) except Exception as err: print ("WARN: caught exception evaluating file ({}) for golang runtime evidence: {}".format(member.name, str(err))) try: get_busybox_evidence(tfl, member, memberhash, distrodict, evidence) except Exception as err: print ("WARN: caught exception evaluating file ({}) for busybox runtime evidence: {}".format(member.name, str(err))) resultlist = {} for runtime in evidence.keys(): #['python', 'go']: for e in evidence[runtime].keys(): #['binary', 'devel']: for t in evidence[runtime][e]: version = t.get('version') location = t.get('location') if location in pkgfilesall: print ("INFO: Skipping evidence {} - file is owned by OS package".format(location)) else: key = "{}-{}".format(runtime, version) if key not in version_found_map: result = {} result.update(binary_package_el) result.update(t) result['metadata'] = json.dumps({"evidence_type": e}) resultlist[location] = json.dumps(result) version_found_map[key] = True try: squashtar = os.path.join(unpackdir, "squashed.tar") hints = anchore_engine.analyzers.utils.get_hintsfile(unpackdir, squashtar) for pkg in hints.get('packages', []): pkg_type = pkg.get('type', "").lower() if pkg_type == 'binary': try: pkg_key, el = anchore_engine.analyzers.utils._hints_to_binary(pkg) try: resultlist[pkg_key] = json.dumps(el) except Exception as err: print ("WARN: unable to add binary package ({}) from hints - excpetion: {}".format(pkg_key, err)) except Exception as err: print ("WARN: bad hints record encountered - exception: {}".format(err)) except Exception as err: print ("WARN: problem honoring hints file - exception: {}".format(err)) except Exception as err: import traceback traceback.print_exc() print("WARN: analyzer unable to complete - exception: " + str(err)) if resultlist: ofile = os.path.join(outputdir, 'pkgs.binary') anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, resultlist) #print ("RESULT: {}".format(resultlist)) sys.exit(0)
[ "os.path.exists", "collections.OrderedDict", "json.dumps", "os.path.join", "re.match", "os.path.dirname", "os.path.basename", "sys.exit", "traceback.print_exc" ]
[((5244, 5283), 'os.path.join', 'os.path.join', (['unpackdir', '"""squashed.tar"""'], {}), "(unpackdir, 'squashed.tar')\n", (5256, 5283), False, 'import os\n'), ((10208, 10219), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (10216, 10219), False, 'import sys\n'), ((372, 398), 'os.path.basename', 'os.path.basename', (['fullpath'], {}), '(fullpath)\n', (388, 398), False, 'import os\n'), ((469, 516), 're.match', 're.match', (['"""^python([0-9]+\\\\.[0-9]+)$"""', 'filename'], {}), "('^python([0-9]+\\\\.[0-9]+)$', filename)\n", (477, 516), False, 'import re\n'), ((531, 586), 're.match', 're.match', (['"""^libpython([0-9]+\\\\.[0-9]+).so.*$"""', 'filename'], {}), "('^libpython([0-9]+\\\\.[0-9]+).so.*$', filename)\n", (539, 586), False, 'import re\n'), ((2364, 2390), 'os.path.basename', 'os.path.basename', (['fullpath'], {}), '(fullpath)\n', (2380, 2390), False, 'import os\n'), ((4228, 4254), 'os.path.basename', 'os.path.basename', (['fullpath'], {}), '(fullpath)\n', (4244, 4254), False, 'import os\n'), ((5505, 5519), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (5515, 5519), False, 'import json\n'), ((5552, 5604), 'os.path.exists', 'os.path.exists', (["(unpackdir + '/anchore_allfiles.json')"], {}), "(unpackdir + '/anchore_allfiles.json')\n", (5566, 5604), False, 'import os\n'), ((6061, 6100), 'os.path.join', 'os.path.join', (['outputdir', '"""pkgfiles.all"""'], {}), "(outputdir, 'pkgfiles.all')\n", (6073, 6100), False, 'import os\n'), ((6542, 6555), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6553, 6555), False, 'from collections import OrderedDict\n'), ((10047, 10085), 'os.path.join', 'os.path.join', (['outputdir', '"""pkgs.binary"""'], {}), "(outputdir, 'pkgs.binary')\n", (10059, 10085), False, 'import os\n'), ((5084, 5095), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5092, 5095), False, 'import sys\n'), ((6245, 6284), 'os.path.join', 'os.path.join', (['unpackdir', '"""squashed.tar"""'], {}), "(unpackdir, 'squashed.tar')\n", (6257, 6284), False, 'import os\n'), ((6632, 6645), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6643, 6645), False, 'from collections import OrderedDict\n'), ((8949, 8988), 'os.path.join', 'os.path.join', (['unpackdir', '"""squashed.tar"""'], {}), "(unpackdir, 'squashed.tar')\n", (8961, 8988), False, 'import os\n'), ((9925, 9946), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (9944, 9946), False, 'import traceback\n'), ((5810, 5849), 'os.path.join', 'os.path.join', (['unpackdir', '"""squashed.tar"""'], {}), "(unpackdir, 'squashed.tar')\n", (5822, 5849), False, 'import os\n'), ((6862, 6901), 'os.path.join', 'os.path.join', (['unpackdir', '"""squashed.tar"""'], {}), "(unpackdir, 'squashed.tar')\n", (6874, 6901), False, 'import os\n'), ((5942, 5962), 'json.dumps', 'json.dumps', (['allfiles'], {}), '(allfiles)\n', (5952, 5962), False, 'import json\n'), ((1828, 1895), 're.match', 're.match', (['b\'.*#define +PY_VERSION +"*([0-9\\\\.\\\\-_a-zA-Z]+)"*\'', 'line'], {}), '(b\'.*#define +PY_VERSION +"*([0-9\\\\.\\\\-_a-zA-Z]+)"*\', line)\n', (1836, 1895), False, 'import re\n'), ((3430, 3521), 're.match', 're.match', (["b'.*go([0-9]+\\\\.[0-9]+(\\\\.[0-9]+|beta[0-9]+|alpha[0-9]+|rc[0-9]+)*).*'", 'line'], {}), "(b'.*go([0-9]+\\\\.[0-9]+(\\\\.[0-9]+|beta[0-9]+|alpha[0-9]+|rc[0-9]+)*).*'\n , line)\n", (3438, 3521), False, 'import re\n'), ((8765, 8797), 'json.dumps', 'json.dumps', (["{'evidence_type': e}"], {}), "({'evidence_type': e})\n", (8775, 8797), False, 'import json\n'), ((8845, 8863), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (8855, 8863), False, 'import json\n'), ((9386, 9400), 'json.dumps', 'json.dumps', (['el'], {}), '(el)\n', (9396, 9400), False, 'import json\n'), ((3706, 3734), 'os.path.dirname', 'os.path.dirname', (['member.name'], {}), '(member.name)\n', (3721, 3734), False, 'import os\n'), ((3811, 3839), 'os.path.dirname', 'os.path.dirname', (['member.name'], {}), '(member.name)\n', (3826, 3839), False, 'import os\n')]