input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
1]
combs = combinations(hydrogen, 2)
for comb in combs:
if (comb[0], comb[1], 1) in bond_can_form:
bond_can_form.remove((comb[0], comb[1], 1))
# Remove the C-O triple bond
elif (self.atoms[bonds[0]] == 6 and self.atoms[bonds[1]] == 8) or (self.atoms[bonds[1]] == 8 and self.atoms[bonds[0]] == 6):
if bonds[2] == 2:
bond_can_form.remove((bonds[0], bonds[1], 1))
# Remove the original O-H bond (prevent OH bond type greater than 1 O=H)
elif (self.atoms[bonds[0]] == 1 and self.atoms[bonds[1]] == 8) or (self.atoms[bonds[1]] == 8 and self.atoms[bonds[0]] == 1):
if (bonds[0], bonds[1], 1) in bond_can_form:
bond_can_form.remove((bonds[0], bonds[1], 1))
elif (bonds[1], bonds[0], 1) in bond_can_form:
bond_can_form.remove((bonds[1], bonds[0], 1))
# Remove the C-C quadra bond
elif self.atoms[bonds[0]] == 6 and self.atoms[bonds[1]] == 6:
if bonds[2] == 3:
bond_can_form.remove((bonds[0], bonds[1], 1))
# Create bond can form list
if bonds[0] not in self.fixed_atoms or bonds[1] not in self.fixed_atoms:
order = bonds[2]
while order > 1:
bond_can_break.append((bonds[0], bonds[1], order - 1))
order -= 1
bond_can_break.append(bonds)
bond_can_form = sorted(bond_can_form, key = itemgetter(0))
bond_can_break = sorted(bond_can_break, key = itemgetter(0))
else:
bond_can_form = bonds_form_all[:]
# Remove the original C-H bond
for bonds in self.reactant_bonds:
order = bonds[2]
if (self.atoms[bonds[0]] == 6 and self.atoms[bonds[1]] == 1) or (self.atoms[bonds[1]] == 6 and self.atoms[bonds[0]] == 1):
bond_can_form.remove(bonds)
# Remove the C-O triple bond
elif (self.atoms[bonds[0]] == 6 and self.atoms[bonds[1]] == 8) or (self.atoms[bonds[1]] == 8 and self.atoms[bonds[0]] == 6):
if bonds[2] > 2:
bond_can_form.remove((bonds[0], bonds[1], 1))
# Remove the C-C quadra bond
elif self.atoms[bonds[0]] == 6 and self.atoms[bonds[1]] == 6:
if bonds[2] > 3:
bond_can_form.remove((bonds[0], bonds[1], 1))
while order > 1:
bond_can_break.append((bonds[0], bonds[1], order - 1))
order -= 1
bond_can_break.append(bonds)
bond_can_form = sorted(bond_can_form, key = itemgetter(0))
bond_can_break = sorted(bond_can_break, key = itemgetter(0))
return bond_can_form, bond_can_break
def generateProducts(self):
"""
Generate all possible products from the reactant under the constraints
of breaking a maximum of `nbreak` and forming a maximum of `nform`
bonds.
"""
if self.nbreak > 3 or self.nform > 3:
raise Exception('Breaking/forming bonds is limited to a maximum of 3')
# Extract valences as a mutable sequence
reactant_valences = [atom.OBAtom.GetExplicitValence() for atom in self.reac_mol]
# Initialize set for storing bonds of products
# A set is used to ensure that no duplicate products are added
products_bonds = set()
# Generate all possibilities for forming bonds
natoms = len(self.atoms)
bonds_form_all = [(atom1_idx, atom2_idx, 1)
for atom1_idx in range(natoms - 1)
for atom2_idx in range(atom1_idx + 1, natoms)]
# Generate bond can form and bond can break
bond_can_form, bond_can_break = self.gen_bond_can_form_and_bond_can_break(bonds_form_all)
# Generate products
bf_combinations = ((0, 1), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (3, 3))
for bf in bf_combinations:
if bf[0] <= self.nbreak and bf[1] <= self.nform:
self._generateProductsHelper(
bf[0],
bf[1],
products_bonds,
self.reactant_bonds,
reactant_valences,
bond_can_form,
bond_can_break
)
if products_bonds:
for bonds in products_bonds:
# for SSM calculation
break_bonds = [i for i in list(set(bonds) ^ set(self.reactant_bonds)) if i not in bonds]
form_bonds = [i for i in list(set(bonds) ^ set(self.reactant_bonds)) if i in bonds]
break_bonds_copy = break_bonds[:]
form_bonds_copy = form_bonds[:]
# deal with double bonds
for i in form_bonds_copy:
if i[2] > 1 and i in bonds:
for j in break_bonds_copy:
if i[0] == j[0] and i[1] == j[1]:
break_bonds.remove(j)
# deal with double bonds
for i in break_bonds_copy:
if i[2] > 1 and i in self.reactant_bonds:
for j in form_bonds_copy:
if i[0] == j[0] and i[1] == j[1]:
form_bonds.remove(j)
if self.check_bond_type(bonds):
mol = gen3D.makeMolFromAtomsAndBonds(self.atoms, bonds, spin=self.reac_mol.spin)
mol.setCoordsFromMol(self.reac_mol)
if self.check_bond_dissociation_energy_and_isomorphic_and_rings(bonds, break_bonds):
if self.use_inchi_key and mol.write('inchiKey').strip() not in self.reactant_inchikey:
# Remove the double bond in forming or breaking bond.
# Because in SSM or GSM double bond is only a little distance change.
# That is the double bond can't be a driving coordinate, ssm will automatically deal with this little distance change.
for i in form_bonds_copy:
for j in self.reactant_bonds:
if i[0] == j[0] and i[1] == j[1]:
if i in form_bonds:
form_bonds.remove(i)
for i in break_bonds_copy:
for j in bonds:
if i[0] == j[0] and i[1] == j[1]:
if i in break_bonds:
break_bonds.remove(i)
self.add_bonds.append(form_bonds)
self.break_bonds.append(break_bonds)
self.prod_mols.append(mol)
def check_bond_type(self, bonds):
bond_type = {}
for i in range(len(self.atoms)):
num = 0
for j in bonds:
if j[0] == i or j[1] == i:
num += j[2]
bond_type[i] = num
if 0 in bond_type.values():
return False
else:
for idx, i in enumerate(self.atoms):
# For catalyst (SnBEA)
if self.fixed_atoms:
if idx not in self.fixed_atoms:
if i == 6 and bond_type[idx] != 4:
return False
# elif i == 8 and bond_type[idx] != 2:
# return False
elif i == 8 and bond_type[idx] < 2:
return False
elif i == 8 and bond_type[idx] > 2:
if (idx, self.active_site_metal[0], 1) not in bonds and (self.active_site_metal[0], idx, 1) not in bonds:
return False
elif i == 14 and bond_type[idx] != 4:
return False
else:
# While the bronsted acid already have proton on the active site, then aborted.
if i == 8 and bond_type[idx] > 3:
return False
# Normal case (organic reaction)
else:
if i == 6 and bond_type[idx] != 4:
return False
elif i == 8 and bond_type[idx] != 2:
return False
elif i == 14 and bond_type[idx] != 4:
return False
elif i == 1 and bond_type[idx] != 1:
return False
return True
def check_bond_dissociation_energy_and_isomorphic_and_rings(self, bond_list, bbond_list):
energy = 0.0
reactant_graph = self.reac_mol_graph
atoms = [Atom(atomic_symbol=reactant_graph.atoms[i].label) for i in range(reactant_graph.n_atoms)]
product = Species(atoms)
graph = nx.Graph()
for i in range(reactant_graph.n_atoms):
graph.add_node(i, atom_label=reactant_graph.atoms[i].label, stereo=False)
if bond_list is not None:
[graph.add_edge(bond[0], bond[1], pi=False, active=False) for bond in bond_list]
product.graph = graph
# Filter the isomorphic
if is_isomorphic(reactant_graph.graph, product.graph):
return False
elif self.check_four_and_three_membered_rings(product.graph):
return False
else:
# Filter the bond dissociation energy
num = sum([bb[2] for bb in bbond_list])
for break_bond in bbond_list:
first_atom = reactant_graph.atoms[break_bond[0]].label
second_atom = reactant_graph.atoms[break_bond[1]].label
bond_type = break_bond[2]
supported_element = ['C', 'N', 'H', 'O', 'S', 'Cl', 'Si']
if first_atom not in supported_element or second_atom not in supported_element:
# use 100 instead
energy += 100
else:
# consider if break double bond (2-->1 not break 2) then the bond dissociation use double bond energy - single bond energy
if num >= 3 and bond_type >= 2:
try:
energy += props.bond_dissociation_energy[first_atom, second_atom, bond_type]
energy -= props.bond_dissociation_energy[first_atom, second_atom, bond_type - 1]
except:
energy += props.bond_dissociation_energy[second_atom, first_atom, bond_type]
energy -= props.bond_dissociation_energy[second_atom, first_atom, bond_type - 1]
else:
try:
energy += props.bond_dissociation_energy[first_atom, second_atom, bond_type]
except:
energy += props.bond_dissociation_energy[second_atom, first_atom, bond_type]
if energy / _constants.CAL2J > self.bond_dissociation_cutoff:
return False
else:
return True
def check_four_and_three_membered_rings(self, product):
# Filter the 3&4 membered ring
rings = nx.cycle_basis(product)
for ring in rings:
if len(ring) < 5 and set(self.reactant_carbon) >= set(ring):
return True
return False
def get_prods(self):
return self.prod_mols
def get_add_bonds(self):
return self.add_bonds
def get_break_bonds(self):
return self.break_bonds
def _generateProductsHelper(self, nbreak, nform, products, bonds, valences, bond_can_form, bond_can_break, bonds_broken=None):
"""
Generate products recursively given the number of bonds that should be
broken and formed, a set for storing the products, a sequence of atoms,
of bonds, and of valences. `bond_can_form` should contain a tuple of
tuples of bonds that contains all possibilities for forming bonds.
Nothing is returned, but formed products are added to `products`.
"""
if bonds_broken is None:
bonds_broken = []
if nbreak == 0 and nform == 0:
products.add((tuple(sorted(bonds))))
# if all(bonds_broken[num][0] not in self.fixed_atoms or bonds_broken[num][1] not in self.fixed_atoms for num in range(len(bonds_broken))):
# products.add((tuple(sorted(bonds))))
if nbreak > 0:
# Break bond
for bond_break in bond_can_break:
if bond_break not in bonds:
continue
bond_break_idx = bonds.index(bond_break)
valences_break = self.changeValences(valences, bond_break, -1)
bonds_break = self.breakBond(bonds, bond_break_idx)
# Keep track of bonds that have been broken
if bond_break_idx == 0:
bonds_broken.append(bond_break)
else:
bonds_broken[-1] = bond_break
# Call function recursively to break next bond
self._generateProductsHelper(
nbreak - 1,
nform,
products,
bonds_break,
valences_break,
bond_can_form,
bond_can_break,
bonds_broken
)
# Remove last bond that has been broken after loop terminates
del bonds_broken[-1]
elif nform > 0:
# Form bond
for bond_form in bond_can_form:
# Do not add bond if it has | |
mean_val, sumsqf_val)
# WIRE_ORDER_CUR_VAL_IDX, WIRE_ORDER_MEAN_VAL_IDX, WIRE_ORDER_SUMSQF_VAL_IDX
assert len(pt) == (WIRE_ORDER_SUMSQF_VAL_IDX+1)
points.append( pt )
return points
def _service_ros(self, framenumber, hw_roi_frame, chainbuf):
now = time.time()
if (now - self.pub_rate_lasttime) > 2.0:
self.pub_rate_lasttime = now
fps = (framenumber - self.pub_rate_lastframe) / 2.0
self.pub_rate_lastframe = framenumber
self.pub_rate.publish(fps)
#maybe this is racy, but its only for debugging. Don't serialize images
#if noone is subscribed
if self.pub_img_n_subscribed <= 0:
return
if self.pub_img_rate <= 0:
return
if now-self.pub_img_lasttime+0.005 > 1./(self.pub_img_rate):
msg = sensor_msgs.msg.Image()
msg.header.seq=framenumber
msg.header.stamp=rospy.Time.from_sec(now) # XXX TODO: once camera trigger is ROS node, get accurate timestamp
msg.header.frame_id = "0"
npbuf = np.array(hw_roi_frame)
(height,width) = npbuf.shape
msg.height = height
msg.width = width
msg.encoding = chainbuf.image_coding
pixel_format = chainbuf.image_coding
if pixel_format == 'MONO8':
msg.encoding = 'mono8'
elif pixel_format in ('RAW8:RGGB','MONO8:RGGB'):
msg.encoding = 'bayer_rggb8'
elif pixel_format in ('RAW8:BGGR','MONO8:BGGR'):
msg.encoding = 'bayer_bggr8'
elif pixel_format in ('RAW8:GBRG','MONO8:GBRG'):
msg.encoding = 'bayer_gbrg8'
elif pixel_format in ('RAW8:GRBG','MONO8:GRBG'):
msg.encoding = 'bayer_grbg8'
else:
raise ValueError('unknown pixel format "%s"'%pixel_format)
msg.step = width
msg.data = npbuf.tostring() # let numpy convert to string
self.pub_img.publish(msg)
self.pub_img_lasttime = now
def mainloop(self):
disable_ifi_warning = self.options.disable_ifi_warning
globals = self.globals
self._globals = globals
# questionable optimization: speed up by eliminating namespace lookups
bg_frame_number = -1
clear_background_isSet = globals['clear_background'].isSet
clear_background_clear = globals['clear_background'].clear
take_background_isSet = globals['take_background'].isSet
take_background_clear = globals['take_background'].clear
collecting_background_isSet = globals['collecting_background'].isSet
max_frame_size = FastImage.Size(self.max_width, self.max_height)
lbrt = self.realtime_analyzer.roi
l,b,r,t=lbrt
hw_roi_w = r-l+1
hw_roi_h = t-b+1
cur_roi_l = l
cur_roi_b = b
#cur_roi_l, cur_roi_b,hw_roi_w, hw_roi_h = self.cam.get_frame_roi()
cur_fisize = FastImage.Size(hw_roi_w, hw_roi_h)
bg_changed = True
use_roi2 = True
fi8ufactory = FastImage.FastImage8u
use_cmp_isSet = globals['use_cmp'].isSet
# hw_roi_frame = fi8ufactory( cur_fisize )
# self._hw_roi_frame = hw_roi_frame # make accessible to other code
old_ts = time.time()
old_fn = None
points = []
if self.posix_scheduler!='':
import posix_sched
mode_str, priority = self.posix_scheduler
mode = getattr(posix_sched,mode_str)
try:
sched_params = posix_sched.SchedParam(priority)
posix_sched.setscheduler(0, mode, sched_params)
msg = 'grab thread running with priority %s' % self.posix_scheduler
except Exception as x:
msg = 'could not adjust priority (PID %d): %s'%(os.getpid(),str(x))
else:
msg = 'grab thread running with default priority'
self.log_message_queue.put((self.cam_id,time.time(),msg))
LOG.info(msg)
#FastImage.set_debug(3) # let us see any images malloced, should only happen on hardware ROI size change
#################### initialize images ############
running_mean8u_im_full = self.realtime_analyzer.get_image_view('mean') # this is a view we write into
absdiff8u_im_full = self.realtime_analyzer.get_image_view('absdiff') # this is a view we write into
mask_im = self.realtime_analyzer.get_image_view('mask') # this is a view we write into
newmask_fi = FastImage.asfastimage( self.mask_image )
newmask_fi.get_8u_copy_put(mask_im, max_frame_size)
# allocate images and initialize if necessary
running_mean_im_full = FastImage.FastImage32f(max_frame_size)
self._running_mean_im_full = running_mean_im_full # make accessible to other code
fastframef32_tmp_full = FastImage.FastImage32f(max_frame_size)
mean2_full = FastImage.FastImage32f(max_frame_size)
self._mean2_full = mean2_full # make accessible to other code
std2_full = FastImage.FastImage32f(max_frame_size)
self._std2_full = std2_full # make accessible to other code
running_stdframe_full = FastImage.FastImage32f(max_frame_size)
self._running_stdframe_full = running_stdframe_full # make accessible to other code
compareframe_full = FastImage.FastImage32f(max_frame_size)
compareframe8u_full = self.realtime_analyzer.get_image_view('cmp') # this is a view we write into
self._compareframe8u_full = compareframe8u_full
running_sumsqf_full = FastImage.FastImage32f(max_frame_size)
running_sumsqf_full.set_val(0,max_frame_size)
self._running_sumsqf_full = running_sumsqf_full # make accessible to other code
noisy_pixels_mask_full = FastImage.FastImage8u(max_frame_size)
# set ROI views of full-frame images
running_mean8u_im = running_mean8u_im_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
running_mean_im = running_mean_im_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
fastframef32_tmp = fastframef32_tmp_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
mean2 = mean2_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
std2 = std2_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
running_stdframe = running_stdframe_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
compareframe = compareframe_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
compareframe8u = compareframe8u_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
running_sumsqf = running_sumsqf_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
noisy_pixels_mask = noisy_pixels_mask_full.roi(cur_roi_l, cur_roi_b, cur_fisize) # set ROI view
if self._initial_image_dict is not None:
# If we have initial values, load them.
# implicit conversion to float32
numpy.asarray(running_mean_im_full)[:,:] = self._initial_image_dict['mean']
numpy.asarray(running_sumsqf)[:,:] = self._initial_image_dict['sumsqf']
LOG.warn('WARNING: ignoring initial images and taking new background')
globals['take_background'].set()
else:
globals['take_background'].set()
running_mean_im.get_8u_copy_put( running_mean8u_im, cur_fisize )
#################### done initializing images ############
incoming_raw_frames_queue = globals['incoming_raw_frames']
incoming_raw_frames_queue_put = incoming_raw_frames_queue.put
initial_take_bg_state = None
while 1:
with camnode_utils.use_buffer_from_chain(self._chain) as chainbuf:
if chainbuf.quit_now:
break
chainbuf.updated_running_mean_image = None
chainbuf.updated_running_sumsqf_image = None
hw_roi_frame = chainbuf.get_buf()
cam_received_time = chainbuf.cam_received_time
if self.red_only_shared.get_nowait():
color_range_1 = self.color_range_1_shared.get_nowait()
color_range_2 = self.color_range_2_shared.get_nowait()
color_range_3 = self.color_range_3_shared.get_nowait()
if color_range_1 < color_range_2:
camnode_colors.replace_with_red_image( hw_roi_frame,
chainbuf.image_coding,
#camnode_colors.RED_CHANNEL)
camnode_colors.RED_COLOR,
color_range_1,
color_range_2,
color_range_3,
self.sat_thresh_shared.get_nowait())
else:
LOG.error('ERROR: color_range_2 >= color_range_1 -- skipping')
# get best guess as to when image was taken
timestamp=chainbuf.timestamp
framenumber=chainbuf.framenumber
# publish on ROS network
if not self.benchmark:
self._service_ros(framenumber, hw_roi_frame, chainbuf)
if 1:
if old_fn is None:
# no old frame
old_fn = framenumber-1
if framenumber-old_fn > 1:
n_frames_skipped = framenumber-old_fn-1
msg = ' %s frames apparently skipped: %d (%d vs %d)'%(self.cam_id, n_frames_skipped, framenumber, old_fn)
self.log_message_queue.put((self.cam_id,time.time(),msg))
LOG.warn(msg)
else:
n_frames_skipped = 0
diff = timestamp-old_ts
time_per_frame = diff/(n_frames_skipped+1)
if not disable_ifi_warning:
if time_per_frame > 2*self.shortest_IFI:
msg = 'Warning: IFI is %f on %s at %s (frame skipped?)'%(time_per_frame,self.cam_id,time.asctime())
self.log_message_queue.put((self.cam_id,time.time(),msg))
LOG.warn(msg)
old_ts = timestamp
old_fn = framenumber
xpoints = self.realtime_analyzer.do_work(hw_roi_frame,
timestamp, framenumber, use_roi2,
use_cmp_isSet(),
#max_duration_sec=0.010, # maximum 10 msec in here
max_duration_sec=self.shortest_IFI-0.0005, # give .5 msec for other processing
return_debug_values=1,
)
## if len(xpoints)>=self.max_num_points:
## msg = 'Warning: cannot save acquire points this frame because maximum number already acheived'
## LOG.warn(msg)
chainbuf.processed_points = xpoints
if NAUGHTY_BUT_FAST:
chainbuf.absdiff8u_im_full = absdiff8u_im_full
chainbuf.mean8u_im_full = running_mean8u_im_full
chainbuf.compareframe8u_full = compareframe8u_full
else:
chainbuf.absdiff8u_im_full = numpy.array(absdiff8u_im_full,copy=True)
chainbuf.mean8u_im_full = numpy.array(running_mean8u_im_full,copy=True)
chainbuf.compareframe8u_full = numpy.array(compareframe8u_full,copy=True)
points = self._convert_to_wire_order( xpoints, hw_roi_frame, running_mean_im, running_sumsqf)
# allow other thread to see images
imname = globals['export_image_name'] # figure out what is wanted # XXX theoretically could have threading issue
if imname == 'raw':
export_image = hw_roi_frame
else:
export_image = self.realtime_analyzer.get_image_view(imname) # get image
globals['most_recent_frame_potentially_corrupt'] = (0,0), export_image # give view of image, receiver must be careful
if 1:
# allow other thread to see raw image always (for saving)
if incoming_raw_frames_queue.qsize() >1000:
# chop off some old frames to prevent memory explosion
LOG.warn('ERROR: deleting old frames to make room for new ones! (and sleeping)')
for i in range(100):
incoming_raw_frames_queue.get_nowait()
incoming_raw_frames_queue_put(
(hw_roi_frame.get_8u_copy(hw_roi_frame.size), # save a copy
timestamp,
framenumber,
points,
self.realtime_analyzer.roi,
cam_received_time,
) )
do_bg_maint = False
if initial_take_bg_state is not None:
assert initial_take_bg_state == 'gather'
n_initial_take = 20
if 1:
initial_take_frames.append( numpy.array(hw_roi_frame,copy=True) )
if len( initial_take_frames ) >= n_initial_take:
initial_take_frames = numpy.array( initial_take_frames, dtype=numpy.float32 )
mean_frame = numpy.mean( initial_take_frames, axis=0)
sumsqf_frame = numpy.sum(initial_take_frames**2, axis=0)/len( initial_take_frames )
numpy.asarray(running_mean_im)[:,:] = mean_frame
numpy.asarray(running_sumsqf)[:,:] = sumsqf_frame
LOG.info('using slow method, calculated mean and sumsqf frames from first %d frames' % n_initial_take)
# we're done with initial transient, set stuff
do_bg_maint = True
initial_take_bg_state = None
del initial_take_frames
elif 0:
# faster approach (currently seems broken)
# accummulate sum
# I could re-write this to use IPP instead of
# numpy, but would that really matter much?
npy_view = numpy.asarray(hw_roi_frame)
numpy.asarray(running_mean_im)[:,:] = numpy.asarray(running_mean_im) + npy_view
numpy.asarray(running_sumsqf)[:,:] = numpy.asarray(running_sumsqf) + npy_view.astype(numpy.float32)**2
initial_take_frames_done += 1
del npy_view
if initial_take_frames_done >= n_initial_take:
# now divide to take average
numpy.asarray(running_mean_im)[:,:] = numpy.asarray(running_mean_im) / initial_take_frames_done
numpy.asarray(running_sumsqf)[:,:] = numpy.asarray(running_sumsqf) / initial_take_frames_done
# we're done with initial transient, set stuff
do_bg_maint = True
initial_take_bg_state = None
del initial_take_frames_done
if take_background_isSet():
LOG.info('taking new bg')
# reset background image with current frame as mean and 0 STD
if cur_fisize != max_frame_size:
LOG.warn('ERROR: can only take background image if not using ROI')
else:
if 0:
# old way
hw_roi_frame.get_32f_copy_put(running_sumsqf,max_frame_size)
running_sumsqf.toself_square(max_frame_size)
hw_roi_frame.get_32f_copy_put(running_mean_im,cur_fisize)
running_mean_im.get_8u_copy_put( running_mean8u_im, max_frame_size )
do_bg_maint = True
else:
initial_take_bg_state = 'gather'
if 1:
initial_take_frames = [ numpy.array(hw_roi_frame,copy=True) ] # for slow approach
elif 0:
initial_take_frames_done = 1 # for faster approach
# set running_mean_im
hw_roi_frame.get_32f_copy_put(running_mean_im,cur_fisize)
running_mean_im.get_8u_copy_put( running_mean8u_im, max_frame_size )
# set running_sumsqf
hw_roi_frame.get_32f_copy_put(running_sumsqf,max_frame_size)
running_sumsqf.toself_square(max_frame_size)
take_background_clear()
if collecting_background_isSet():
bg_frame_number += 1
if (bg_frame_number % self.bg_frame_interval == 0):
do_bg_maint = True
if do_bg_maint:
realtime_image_analysis.do_bg_maint(
#print 'doing slow bg maint, frame', chainbuf.framenumber
#tmpresult = motmot.realtime_image_analysis.slow.do_bg_maint(
running_mean_im,#in
hw_roi_frame,#in
cur_fisize,#in
self.bg_frame_alpha, #in
running_mean8u_im,
fastframef32_tmp,
running_sumsqf, #in
mean2,
std2,
running_stdframe,
self.n_sigma_shared.get_nowait(),#in
compareframe8u,
bright_non_gaussian_cutoff,#in
noisy_pixels_mask,#in
bright_non_gaussian_replacement,#in
bench=0 )
#debug=0)
#chainbuf.real_std_est= tmpresult
bg_changed = True
bg_frame_number = 0
if self.options.debug_std:
if framenumber % 200 == 0:
mean_std = numpy.mean( numpy.mean( numpy.array(running_stdframe,dtype=numpy.float32 )))
LOG.info('%s mean STD %.2f'%(self.cam_id,mean_std))
if clear_background_isSet():
# reset background image with 0 | |
<reponame>FabrizioCoder/Pyot
from typing import Dict, List, Mapping, Any, Set, Type, Union, get_type_hints
import inspect
import re
from pyot.conf.pipeline import pipelines
from pyot.pipeline.core import Pipeline
from pyot.pipeline.token import PipelineToken
from pyot.utils.copy import fast_copy
from .functional import lazy_property, laziable, parse_camelcase
class PyotLazy:
def __init__(self, clas: Union[Type["PyotStaticBase"], Type["PyotCoreBase"]], obj: Any, root: "PyotCoreBase"):
self.clas = clas
self.root = root
self.obj = obj
def __call__(self):
try:
if issubclass(self.clas, PyotCoreBase):
if isinstance(self.obj, list):
return [self.load_core(obj) for obj in self.obj]
return self.load_core(self.obj)
if isinstance(self.obj, list):
return [self.load_static(obj) for obj in self.obj]
return self.load_static(self.obj)
except Exception as e:
raise RuntimeError(f"Failed to lazy load '{self.clas.__name__}' object due to: ({type(e)}) {e}") from e
def load_static(self, obj):
instance: "PyotStaticBase" = self.clas(obj)
instance._meta.root = self.root
return instance.fill()
def load_core(self, obj):
kwargs = {}
if "version" in self.clas.Meta.arg_names:
try: kwargs["version"] = self.root.version
except AttributeError: pass
if "locale" in self.clas.Meta.arg_names:
try: kwargs["locale"] = self.root.locale
except AttributeError: pass
if "platform" in self.clas.Meta.arg_names and "platform" in self.root.Meta.arg_names:
try: kwargs["platform"] = self.root.platform
except AttributeError: pass
if "region" in self.clas.Meta.arg_names and "region" in self.root.Meta.arg_names:
try: kwargs["region"] = self.root.region
except AttributeError: pass
instance: "PyotCoreBase" = self.clas(**kwargs)
instance._meta.root = self.root
instance._meta.data = instance.transform(obj)
return instance.fill()
class PyotRoutingBase:
class Meta:
root: "PyotCoreBase"
pipeline: Pipeline
_meta: Meta
_region: str = None
_platform: str = None
_locale: str = None
_version: str = None
_regions: Set[str] = set()
_platforms: Set[str] = set()
_platform2regions: Dict[str, str] = {None: None}
@property
def region(self) -> str:
val = self._region or self._meta.root._region or \
self._platform2regions.get(self._platform or self._meta.root._platform, None)
if val is None:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute 'region'")
return val
@region.setter
def region(self, val: str):
val = val.lower()
if val not in self._regions:
raise ValueError(f"'{val}' is not a valid region")
self._region = val
@property
def platform(self) -> str:
val = self._platform or self._meta.root._platform
if val is None:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute 'platform'")
return val
@platform.setter
def platform(self, val: str):
val = val.lower()
if val not in self._platforms:
raise ValueError(f"'{val}' is not a valid platform")
self._platform = val
@property
def locale(self) -> str:
val = self._locale or self._meta.root._locale
if val is None:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute 'locale'")
return val
@locale.setter
def locale(self, val: str):
self._locale = val
@property
def version(self) -> str:
val = self._version or self._meta.root._version
if val is None:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute 'version'")
return val
@version.setter
def version(self, val: str):
self._version = val
@property
def metaroot(self) -> "PyotCoreBase":
return self._meta.root
@property
def metapipeline(self) -> Pipeline:
return self._meta.pipeline
class PyotMetaClass(type):
def __new__(cls, name, bases, attrs):
if 'Meta' not in attrs and cls.is_static_core(bases):
attrs['Meta'] = type('Meta', (cls.get_static_core(bases).Meta,), {'__module__': attrs['__module__'] + f".{name}"})
clas: "PyotStaticBase" = super().__new__(cls, name, bases, attrs)
clas.Meta.types = cls.get_types(clas)
clas.Meta.nomcltrs = {}
if cls.is_static_core(bases):
if issubclass(clas, PyotCoreBase):
try:
arg_names = set(inspect.getfullargspec(clas.__init__).args)
cls.set_server_type(clas, arg_names)
clas.Meta.arg_names = arg_names
except TypeError:
pass
clas.Meta.lazy_props = [prop.split(".") for prop in cls.get_lazy_props(clas, [])]
return clas
@staticmethod
def is_static_core(bases):
base_names = set()
for base in bases:
base_names |= {cl.__name__ for cl in inspect.getmro(base)}
return 'PyotStatic' in base_names or 'PyotCore' in base_names
@staticmethod
def get_static_core(bases):
deep_bases = set()
for base in bases:
deep_bases |= set(inspect.getmro(base))
try:
return next(base for base in deep_bases if base.__name__ == 'PyotCore')
except StopIteration:
return next(base for base in deep_bases if base.__name__ == 'PyotStatic')
@staticmethod
def get_types(clas: "PyotStaticBase"):
types = get_type_hints(clas)
for typ, clas in types.items():
try:
if clas.__origin__ is list:
types[typ] = clas.__args__[0]
except Exception:
pass
return types
@staticmethod
def get_lazy_props(clas: "PyotStaticBase", props, prefix=""):
props += [prefix + p for p in dir(clas) if isinstance(getattr(clas, p), lazy_property)]
types = {attr:cl for attr, cl in clas.Meta.types.items() if inspect.isclass(cl) and issubclass(cl, PyotStaticBase)}
for typ, cl in types.items():
PyotMetaClass.get_lazy_props(cl, props, prefix + typ + ".")
return props
@staticmethod
def set_server_type(clas: "PyotCoreBase", args):
for server in clas.Meta.server_type_names:
if server in args:
clas.Meta.server_type = server
return
class PyotStaticBase(PyotRoutingBase, metaclass=PyotMetaClass):
class Meta(PyotRoutingBase.Meta):
# Mutable objects should be overriden on inheritance
server_type: str = None
lazy_props: List[str]
nomcltrs: Dict[str, Any]
types: Dict[str, Any]
data: Dict[str, Any]
raws: Set[str] = set()
renamed: Dict[str, str] = {}
_meta: Meta
def __init__(self, data):
# Instantiate Meta class, isolating data dict
self._meta = self.Meta()
self._meta.data = data
def __getattr__(self, name):
try:
lazy = self.__dict__.pop('_lazy__' + name)
obj = lazy()
setattr(self, name, obj)
return obj
except (KeyError, AttributeError) as e:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") from e
def __getitem__(self, item):
return self._meta.data[item]
def qualkey(self, key: str) -> str:
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
newkey = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
if newkey in self._meta.renamed:
newkey = self._meta.renamed[newkey]
return newkey
def fill(self):
mapping = self._meta.nomcltrs
for key, val in self._meta.data.items():
try:
attr = mapping[key]
except KeyError:
attr = self.qualkey(key)
mapping[key] = attr
if laziable(val):
if attr in self._meta.raws:
setattr(self, attr, val)
continue
try:
setattr(self, '_lazy__' + attr, PyotLazy(self._meta.types[attr], val, self._meta.root))
except KeyError:
setattr(self, attr, val)
else:
setattr(self, attr, val)
return self
def load_lazy_properties(self, instance, prop, ind=0):
if ind == len(prop):
return
try:
attr = getattr(instance, prop[ind])
except AttributeError:
return
if isinstance(attr, list):
for val in attr:
self.load_lazy_properties(val, prop, ind + 1)
else:
self.load_lazy_properties(attr, prop, ind + 1)
def rdict(self):
dic = {}
for key, val in self._meta.types.items():
if key.startswith("_"):
continue
try:
obj = getattr(self, key)
except AttributeError:
continue
try:
if issubclass(val, PyotStaticBase):
try:
if isinstance(obj, list):
dic[key] = [ob.rdict() for ob in obj]
else:
dic[key] = obj.rdict()
except AttributeError:
pass
else:
dic[key] = obj
except TypeError:
pass
return dic
def dict(self, deepcopy=False, lazy_props=False, recursive=False):
if lazy_props:
for prop in self._meta.lazy_props:
self.load_lazy_properties(self, prop)
dic = self.rdict() if recursive else self._meta.data
return fast_copy(dic) if deepcopy else dic.copy()
class PyotCoreBase(PyotStaticBase):
class Meta(PyotStaticBase.Meta):
# Mutable objects should be overriden on inheritance
key: str
server: str
load: Mapping[str, Any]
query: Mapping[str, Any]
body: Mapping[str, Any]
arg_names: Set[str]
server_type: str
server_type_names = {"platform", "region"}
allow_query: bool = False
rules: Mapping[str, List[str]] = {}
raw_data: Any
_meta: Meta
def initialize(self, kwargs: Dict):
# Instantiate meta class and fill kwargs
self._meta = self.Meta()
self._meta.root = self
self._meta.query = {}
self._meta.data = {}
self._meta.body = {}
kwargs.pop("self")
for name, val in kwargs.items():
if val is not None:
self._meta.data[name] = val
setattr(self, name, val)
return self
def match_rule(self):
if len(self._meta.rules) == 0:
raise TypeError("This Pyot object is static")
for key, attr in self._meta.rules.items():
load = {}
for a in attr:
try:
checkonly = a.startswith('?')
if checkonly:
getattr(self, a[1:])
continue
load[a] = getattr(self, a)
except AttributeError:
break
else:
self._meta.key = key
self._meta.load = load
return self
raise TypeError("Incomplete values for pipeline token creation")
def match_server(self):
server_type = self._meta.server_type
if server_type:
server: str = getattr(self, server_type)
self._meta.server = server.lower()
else:
self._meta.server = None
return self
def pre_request(self, pipeline: str = None):
if pipeline:
self.pipeline(pipeline)
def post_request(self, data, deepcopy: bool):
data = self.filter(data)
self._meta.raw_data = fast_copy(data) if deepcopy else data
self._meta.data = self.transform(data)
self.fill()
async def token(self) -> PipelineToken:
'''Coroutine. Create a pipeline token that identifies this object (its parameters).'''
await self.setup()
self.match_rule()
self.match_server()
self.clean()
try:
return PipelineToken(self._meta.pipeline.model, self._meta.server, self._meta.key, self._meta.load, self._meta.query)
except AttributeError as e:
raise ValueError("Token creation failed, please ensure a pipeline is activated or provided") from e
async def get(self, pipeline: str = None, deepcopy: bool = False):
'''Coroutine. Make a GET request to the pipeline.'''
self.pre_request(pipeline)
data = await self._meta.pipeline.get(await self.token())
self.post_request(data, deepcopy)
return self
async def post(self, pipeline: str = None, deepcopy: bool = False):
'''Coroutine. Make a POST request to the pipeline.'''
self.pre_request(pipeline)
data = await self._meta.pipeline.post(await self.token(), self._meta.body)
self.post_request(data, deepcopy)
return self
async def put(self, pipeline: str = None, deepcopy: bool = False):
'''Coroutine. Make a PUT request to the pipeline.'''
self.pre_request(pipeline)
data = await self._meta.pipeline.put(await self.token(), self._meta.body)
self.post_request(data, deepcopy)
return self
def pipeline(self, name: str = None):
try:
self._meta.pipeline = pipelines[name]
except KeyError as e:
raise ValueError(f"Pipeline '{name}' does not exist, inactive or dead") from e
return self
def query(self, **kwargs):
'''Query parameters setter.'''
self._meta.query = parse_camelcase(locals())
return self
def body(self, **kwargs):
'''Body parameters setter.'''
self._meta.body = parse_camelcase(locals())
return self
def raw(self):
"""Return the raw response of the request, only available for Core objects"""
return self._meta.raw_data
@classmethod
def load(cls, raw_data: Any):
o | |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2018 <NAME>
#
# Authors : <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Contact email: <EMAIL>
# =============================================================================
"""
Abstract
--------
The wps module of the OWSlib package provides client-side functionality for executing invocations to a remote
Web Processing Server.
Disclaimer
----------
.. warning:: The owslib wps module should be considered in beta state: it has been tested versus only a handful of
WPS services (deployed by the USGS, BADC and PML).
More extensive testing is needed and feedback is appreciated.
Usage
-----
The module can be used to execute three types of requests versus a remote WPS endpoint:
# "GetCapabilities"
* use the method wps.getcapabilities(xml=None)
* the optional keyword argument "xml" may be used to avoid a real live request, and instead read the
WPS capabilities document from a cached XML file
# "DescribeProcess"
* use the method wps.describeprocess(identifier, xml=None)
* identifier is the process identifier, retrieved from the list obtained from a previous
"GetCapabilities" invocation
* the optional keyword argument "xml" may be used to avoid a real live request, and instead read the
WPS process description document from a cached XML file
# "Execute"
* use the method wps.execute(identifier, inputs, output=None, request=None, response=None),
which submits the job to the remote WPS server and returns a WPSExecution object that can be used to periodically
check the job status until completion (or error)
* the optional keyword argument "request" may be used to avoid re-building the request XML from input arguments,
and instead submit a request from a pre-made XML file
* alternatively, an "Execute" request can be built from input arguments by supplying the "identifier", "inputs"
and "output" arguments to the execute() method.
* "identifier" is the mandatory process identifier
* "inputs" is a dictionary of (key,value) pairs where:
* key is a named input parameter
* value is either a string, or any python object that supports a getXml() method
In particular, a few classes are included in the package to support a FeatuteCollection input:
* "WFSFeatureCollection" can be used in conjunction with "WFSQuery" to define a FEATURE_COLLECTION
retrieved from a live WFS server.
* "GMLMultiPolygonFeatureCollection" can be used to define one or more polygons
of (latitude, longitude) points.
* "output" is an optional output identifier to be included in the ResponseForm section of the request.
* the optional keyword argument "response" mey be used to avoid submitting a real live request, and instead
reading the WPS execution response document from a cached XML file (for debugging or testing purposes)
* the convenience module function monitorExecution() can be used to periodically check the status of a remote
running job, and eventually download the output either to a named file, or to a file specified by the server.
Examples
--------
The files examples/wps-usgs-script.py, examples/wps-pml-script-1.py and examples/wps-pml-script-2.py contain
real-world usage examples that submits a "GetCapabilities", "DescribeProcess" and "Execute" requests to
the live USGS and PML servers. To run:
* cd examples
* python wps-usgs-script.py
* python wps-pml-script-1.py
* python wps-pml-script-2.py
The file wps-client.py contains a command-line client that can be used to submit a "GetCapabilities",
"DescribeProcess" or "Execute" request to an arbitrary WPS server. For example, you can run it as follows:
* cd examples
* To prints out usage and example invocations: wps-client -help
* To execute a (fake) WPS invocation::
$ wps-client.py -v -u http://cida.usgs.gov/climate/gdp/process/WebProcessingService -r GetCapabilities -x ../tests/USGSCapabilities.xml # noqa
The directory tests/ includes several doctest-style files wps_*.txt that show how to interactively submit a
"GetCapabilities", "DescribeProcess" or "Execute" request, without making a live request but rather parsing the
response of cached XML response documents. To run:
* cd tests
* python -m doctest wps_*.txt
``(or python -m doctest -v wps_*.txt for verbose output)``
Also, the directory tests/ contains several examples of well-formed "Execute" requests:
* The files wps_USGSExecuteRequest*.xml contain requests that can be submitted to the live USGS WPS service.
* The files PMLExecuteRequest*.xml contain requests that can be submitted to the live PML WPS service.
"""
from owslib.etree import etree
from owslib.ows import DEFAULT_OWS_NAMESPACE, XLINK_NAMESPACE
from owslib.ows import ServiceIdentification, ServiceProvider, OperationsMetadata, BoundingBox
from time import sleep
from owslib.util import (testXMLValue, testXMLAttribute, build_get_url, clean_ows_url, dump, getTypedValue,
getNamespace, element_to_string, nspath, openURL, nspath_eval, log, Authentication)
from xml.dom.minidom import parseString
from owslib.namespaces import Namespaces
from urllib.parse import urlparse
import warnings
# namespace definition
n = Namespaces()
# These static namespaces are DEPRECIATED. Please don't use them.
# No great way of printing a message since there are at the file level
WPS_DEFAULT_NAMESPACE = n.get_namespace("wps")
WFS_NAMESPACE = n.get_namespace("wfs")
OGC_NAMESPACE = n.get_namespace("ogc")
GML_NAMESPACE = n.get_namespace("gml")
DRAW_NAMESPACE = n.get_namespace("draw")
GML_SCHEMA_LOCATION = "http://schemas.opengis.net/gml/3.1.1/base/feature.xsd"
DRAW_SCHEMA_LOCATION = 'http://cida.usgs.gov/climate/derivative/xsd/draw.xsd'
WFS_SCHEMA_LOCATION = 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd'
WPS_DEFAULT_SCHEMA_LOCATION = 'http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd'
WPS_DEFAULT_VERSION = '1.0.0'
# WPS execution modes
AUTO = 'auto'
SYNC = 'sync'
ASYNC = 'async'
def get_namespaces():
ns = n.get_namespaces(["ogc", "wfs", "wps", "gml", "xsi", "xlink"])
ns[None] = n.get_namespace("wps")
ns["ows"] = DEFAULT_OWS_NAMESPACE
return ns
namespaces = get_namespaces()
def is_reference(val):
"""
Checks if the provided value is a reference (URL).
"""
try:
parsed = urlparse(val)
is_ref = bool(parsed.scheme)
except Exception:
is_ref = False
return is_ref
def is_literaldata(val):
"""
Checks if the provided value is a string (includes unicode).
"""
return isinstance(val, str)
def is_boundingboxdata(val):
"""
Checks if the provided value is an implementation of ``BoundingBoxDataInput``.
"""
return isinstance(val, BoundingBoxDataInput)
def is_complexdata(val):
"""
Checks if the provided value is an implementation of ``IComplexDataInput``.
"""
return isinstance(val, IComplexDataInput)
def _fix_auth(auth, username=None, password=None, verify=None, cert=None):
"""Updates auth from deprecated parameters username, password, verify and cert."""
if any(p is not None for p in (username, password, verify, cert)):
message = 'The use of "username", "password", "verify", and "cert" is deprecated. ' + \
'Please use the "auth" keyword during class instantiation. ' + \
'These keywords will be removed in a future release.'
warnings.warn(message, DeprecationWarning)
if username is not None:
auth.username = username
if password is not None:
auth.password = password
if verify is not None:
auth.verify = verify
if cert is not None:
auth.cert = cert
return auth
class IComplexDataInput(object):
"""
Abstract interface representing complex input object for a WPS request.
"""
def getXml(self):
"""
Method that returns the object data as an XML snippet,
to be inserted into the WPS request document sent to the server.
"""
raise NotImplementedError
class WebProcessingService(object):
"""
Class that contains client-side functionality for invoking an OGC Web Processing Service (WPS).
Implements IWebProcessingService.
"""
def __init__(self, url, version=WPS_DEFAULT_VERSION, username=None, password=<PASSWORD>, verbose=False, skip_caps=False,
headers=None, verify=None, cert=None, timeout=None, auth=None, language=None):
"""
Initialization method resets the object status.
By default it will execute a GetCapabilities invocation to the remote service,
which can be skipped by using skip_caps=True.
Parameters username, password, verify and cert are deprecated. Please use auth parameter.
"""
self.auth = auth or Authentication()
_fix_auth(self.auth, username, password, verify, cert)
# fields passed in from object initializer
self.url = clean_ows_url(url)
self.version = version
self.verbose = verbose
self.headers = headers
self.timeout = timeout
self.language = language
# fields populated by method invocations
self._capabilities = None
self.identification = None
self.provider = None
self.operations = []
self.processes = []
self.languages = None
if not skip_caps:
self.getcapabilities()
def getcapabilities(self, xml=None):
"""
Method that requests a capabilities document from the remote WPS server and populates this object's metadata.
keyword argument xml: local XML GetCapabilities document, prevents actual HTTP invocation.
"""
# read capabilities document
reader = WPSCapabilitiesReader(
version=self.version,
verbose=self.verbose,
auth=self.auth,
language=self.language,
timeout=self.timeout,
)
if xml:
# read from stored XML file
self._capabilities = reader.readFromString(xml)
else:
self._capabilities = reader.readFromUrl(
self.url, headers=self.headers)
log.debug(element_to_string(self._capabilities))
# populate the capabilities metadata obects from the XML tree
self._parseCapabilitiesMetadata(self._capabilities)
def describeprocess(self, identifier, xml=None):
"""
Requests a process document from a WPS service and populates the process metadata.
Returns the process object or a list of process objects.
:param str identifier: The process id. If `all`, return a list of all processes available.
"""
# read capabilities document
reader = WPSDescribeProcessReader(
version=self.version,
verbose=self.verbose,
auth=self.auth,
language=self.language,
timeout=self.timeout,
)
if xml:
# read from stored XML file
rootElement = reader.readFromString(xml)
else:
# read from server
rootElement = reader.readFromUrl(
self.url, identifier, headers=self.headers)
log.info(element_to_string(rootElement))
# build metadata objects
processes = self._parseProcessMetadata(rootElement)
if identifier == 'all':
return processes
# return process with given identifier
for process in processes:
if process.identifier == identifier:
return process
raise ValueError('process with identifier {} not found'.format(identifier))
def execute(self, identifier, inputs, output=None, mode=ASYNC, lineage=False, request=None, response=None):
"""
| |
attr),
colorize(Color.CODE, name)
))
variables[name] = var
# is it an array of floating point numbers?
elif (match_obj := re.fullmatch(float_array_pattern, line)) is not None:
name = match_obj[4]
if name in variables:
error(f'Variable called {colorize(Color.CODE, name)} has already been declared.')
exit_failure()
elif re.fullmatch(name_pattern, name) is None:
error(f'{colorize(Color.CODE, name)} is not a valid name.')
exit_failure()
elif name in reserved_words:
error(f'{colorize(Color.CODE, name)} is one of the reserved words.')
exit_failure()
var = NumberArray()
var.element = Number()
var.name = name
var.id = len(variables)
var.element.float_digits = int(match_obj[2])
var.is_unique = False
var.is_increasing = False
var.is_decreasing = False
var.is_printed_horizontally = (match_obj[1] == 'row')
var.size_expr = process_expr(match_obj[3])
var.element.low_expr, var.element.high_expr = map(process_expr, split_two_expr(match_obj[6]))
if match_obj[5] == '(':
var.element.low_expr = ['('] + var.element.low_expr + [')', '+', '10', '**',
'(', '-', f'{var.element.float_digits}', ')', '/', '2']
if match_obj[7] == ')':
var.element.high_expr = ['('] + var.element.high_expr + [')', '-', '10', '**',
'(', '-', f'{var.element.float_digits}', ')', '/', '2']
for attr in match_obj[8].split():
if attr == 'unique':
if var.is_unique:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'unique'),
colorize(Color.CODE, name)
))
var.is_unique = True
elif attr == 'inc':
if var.is_increasing:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'inc'),
colorize(Color.CODE, name)
))
if var.is_decreasing:
error('{} and {} cannot be set at the same time (check for the attributes of {}).'.format(
colorize(Color.CODE, 'inc'),
colorize(Color.CODE, 'dec'),
colorize(Color.CODE, name)
))
exit_failure()
var.is_increasing = True
elif attr == 'dec':
if var.is_decreasing:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'dec'),
colorize(Color.CODE, name)
))
if var.is_increasing:
error('{} and {} cannot be set at the same time (check for the attributes of {}).'.format(
colorize(Color.CODE, 'inc'),
colorize(Color.CODE, 'dec'),
colorize(Color.CODE, name)
))
exit_failure()
var.is_decreasing = True
else:
warning('The attribute {} (for {}) is ignored since it is unknown.'.format(
colorize(Color.CODE, attr),
colorize(Color.CODE, name)
))
variables[name] = var
# is it an array of strings?
elif (match_obj := re.fullmatch(str_array_pattern, line)) is not None:
name = match_obj[4]
if name in variables:
error(f'Variable called {colorize(Color.CODE, name)} has already been declared.')
exit_failure()
elif re.fullmatch(name_pattern, name) is None:
error(f'{colorize(Color.CODE, name)} is not a valid name.')
exit_failure()
elif name in reserved_words:
error(f'{colorize(Color.CODE, name)} is one of the reserved words.')
exit_failure()
var = StringArray()
var.element = String()
var.name = name
var.id = len(variables)
var.element.character_set = parse_character_set(name, match_obj[2])
var.element.is_characterwise_unique = False
var.is_elementwise_unique = False
var.is_printed_horizontally = (match_obj[1] == 'row')
var.size_expr = process_expr(match_obj[3])
var.element.length_low_expr, var.element.length_high_expr = map(process_expr, split_two_expr(match_obj[6]))
if match_obj[5] == '(':
var.element.length_low_expr = ['('] + var.element.length_low_expr + [')', '+', '1']
if match_obj[7] == ')':
var.element.length_high_expr = ['('] + var.element.length_high_expr + [')', '-', '1']
for attr in match_obj[8].split():
if attr == 'unique':
if var.is_elementwise_unique:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'unique'),
colorize(Color.CODE, name)
))
var.is_elementwise_unique = True
elif attr == 'distinct':
if var.element.is_characterwise_unique:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'distinct'),
colorize(Color.CODE, name)
))
var.element.is_characterwise_unique = True
else:
warning('The attribute {} (for {}) is ignored since it is unknown.'.format(
colorize(Color.CODE, attr),
colorize(Color.CODE, name)
))
variables[name] = var
# is it a matrix of integers?
elif (match_obj := re.fullmatch(int_matrix_pattern, line)) is not None:
name = match_obj[2]
if name in variables:
error(f'Variable called {colorize(Color.CODE, name)} has already been declared.')
exit_failure()
elif re.fullmatch(name_pattern, name) is None:
error(f'{colorize(Color.CODE, name)} is not a valid name.')
exit_failure()
elif name in reserved_words:
error(f'{colorize(Color.CODE, name)} is one of the reserved words.')
exit_failure()
if match_obj[6] not in ('', 'unique'):
warning('The attribute {} (for {}) is ignored since it is unknown.'.format(
colorize(Color.CODE, match_obj[6]),
colorize(Color.CODE, name)
))
var = NumberMatrix()
var.element = Number()
var.name = name
var.id = len(variables)
var.element.float_digits = 0
var.is_unique = (match_obj[6] == 'unique')
var.size_r_expr, var.size_c_expr = map(process_expr, split_two_expr(match_obj[1]))
var.element.low_expr, var.element.high_expr = map(process_expr, split_two_expr(match_obj[4]))
if match_obj[3] == '(':
var.element.low_expr = ['('] + var.element.low_expr + [')', '+', '1']
if match_obj[5] == ')':
var.element.high_expr = ['('] + var.element.high_expr + [')', '-', '1']
variables[name] = var
# is it a matrix of floating point numbers?
elif (match_obj := re.fullmatch(float_matrix_pattern, line)) is not None:
name = match_obj[3]
if name in variables:
error(f'Variable called {colorize(Color.CODE, name)} has already been declared.')
exit_failure()
elif re.fullmatch(name_pattern, name) is None:
error(f'{colorize(Color.CODE, name)} is not a valid name.')
exit_failure()
elif name in reserved_words:
error(f'{colorize(Color.CODE, name)} is one of the reserved words.')
exit_failure()
if match_obj[7] not in ('', 'unique'):
warning('The attribute {} (for {}) is ignored since it is unknown.'.format(
colorize(Color.CODE, match_obj[7]),
colorize(Color.CODE, name)
))
var = NumberMatrix()
var.element = Number()
var.name = name
var.id = len(variables)
var.element.float_digits = int(match_obj[1])
var.is_unique = (match_obj[7] == 'unique')
var.size_r_expr, var.size_c_expr = map(process_expr, split_two_expr(match_obj[2]))
var.element.low_expr, var.element.high_expr = map(process_expr, split_two_expr(match_obj[5]))
if match_obj[4] == '(':
var.element.low_expr = ['('] + var.element.low_expr + [')', '+', '1']
if match_obj[6] == ')':
var.element.high_expr = ['('] + var.element.high_expr + [')', '-', '1']
variables[name] = var
# is it a tree?
elif (match_obj := re.fullmatch(tree_pattern, line)) is not None:
name = match_obj[2]
if name in variables:
error(f'Variable called {colorize(Color.CODE, name)} has already been declared.')
exit_failure()
elif re.fullmatch(name_pattern, name) is None:
error(f'{colorize(Color.CODE, name)} is not a valid name.')
exit_failure()
elif name in reserved_words:
error(f'{colorize(Color.CODE, name)} is one of the reserved words.')
exit_failure()
var = Graph()
var.name = name
var.id = len(variables)
var.number_of_vertices_expr = process_expr(match_obj[1])
var.is_printed_horizontally = True
var.is_tree = True
var.is_directed = False
var.is_connected = True
var.is_acyclic = True
var.no_multiple_edge = True
var.no_self_loop = True
var.n_indexed = 1
for attr in match_obj[3].split():
if attr == 'directed':
if var.is_directed:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'directed'),
colorize(Color.CODE, name)
))
var.is_directed = True
elif attr.endswith('_indexed') and (attr[0] != '_'):
if var.n_indexed != 1:
warning(f'The index attributes of {colorize(Color.CODE, name)} is set multiple times.')
try:
var.n_indexed = int(attr[: attr.index('_')])
except:
error('{} (in the attribute of {}) does not seem to be an integer.'.format(
colorize(Color.CODE, attr[: attr.index('_')]),
colorize(Color.CODE, name)
))
exit_failure()
else:
warning('The attribute {} (for {}) is ignored since it is unknown.'.format(
colorize(Color.CODE, attr),
colorize(Color.CODE, name)
))
variables[name] = var
continue
# is it a graph?
elif (match_obj := re.fullmatch(graph_pattern, line)) is not None:
name = match_obj[2]
if name in variables:
error(f'Variable called {colorize(Color.CODE, name)} has already been declared.')
exit_failure()
elif re.fullmatch(name_pattern, name) is None:
error(f'{colorize(Color.CODE, name)} is not a valid name.')
exit_failure()
elif name in reserved_words:
error(f'{colorize(Color.CODE, name)} is one of the reserved words.')
exit_failure()
var = Graph()
var.name = name
var.id = len(variables)
var.number_of_vertices_expr, var.number_of_edges_expr = map(process_expr, split_two_expr(match_obj[1]))
var.is_printed_horizontally = True
var.is_tree = False
var.is_directed = False
var.is_connected = False
var.is_acyclic = False
var.no_multiple_edge = False
var.no_self_loop = False
var.n_indexed = 1
for attr in match_obj[3].split():
if attr == 'simple':
var.no_multiple_edge = True
var.no_self_loop = True
elif attr == 'connected':
if var.is_connected:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'connected'),
colorize(Color.CODE, name)
))
var.is_connected = True
elif attr == 'directed':
if var.is_directed:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'directed'),
colorize(Color.CODE, name)
))
var.is_directed = True
elif attr == 'acyclic':
if var.is_acyclic:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'acyclic'),
colorize(Color.CODE, name)
))
var.is_acyclic = True
elif attr == 'no_self_loop':
if var.no_self_loop:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'no_self_loop'),
colorize(Color.CODE, name)
))
var.no_self_loop = True
elif attr == 'no_multiple_edge':
if var.no_multiple_edge:
warning('{} is set multiple times as an attribute of {}.'.format(
colorize(Color.CODE, 'no_multiple_edge'),
colorize(Color.CODE, name)
))
var.no_multiple_edge = True
elif attr.endswith('_indexed') and (attr[0] != '_'):
if var.n_indexed != 1:
warning(f'The index attributes of {colorize(Color.CODE, name)} is set multiple times.')
try:
var.n_indexed = int(attr[: attr.index('_')])
except:
error('{} (in the attribute of {}) does not seem to be an integer.'.format(
colorize(Color.CODE, attr[: attr.index('_')]),
colorize(Color.CODE, name)
))
exit_failure()
else:
warning('The attribute {} (for {}) is ignored since it is unknown.'.format(
colorize(Color.CODE, attr),
colorize(Color.CODE, name)
))
variables[name] = var
# fallback
else:
error(f'Failed to interpret {colorize(Color.CODE, line)}.')
exit_failure()
if (source == sys.stdin) and (sys.stdin.isatty()):
prompt()
override_statement = ''
if has_override_statement:
if (source == sys.stdin) and (sys.stdin.isatty()):
prompt()
for line in source:
inside_single_quote = False
inside_double_quote = False
for i in range(len(line)):
| |
else:
self.converter.generate_throw_expression(
self.builder.load(exception_slot)
)
else:
assert isinstance(controlFlowSwitch, llvmlite.ir.Value)
isReturn = self.builder.icmp_signed(">=", controlFlowSwitch, llvmI64(0))
with self.builder.if_else(isReturn) as (then, otherwise):
with then:
if return_slot is None:
self.builder.ret_void()
else:
self.builder.ret(self.builder.load(return_slot))
with otherwise:
self.converter.generate_throw_expression(
self.builder.load(exception_slot)
)
self.builder.unreachable()
def generate_trycatch_unwind(self, target_resume_block, generator):
if not self.incomingControlFlowSwitch:
assert self._block is None
return
controlFlowSwitch = self._generateControlFlowSwitchExpr()
with self.builder.goto_block(self._block):
tags = self.generate_tags()
if isinstance(controlFlowSwitch, int):
if controlFlowSwitch == 0:
raise Exception("Please don't jump to a try-catch")
if controlFlowSwitch >= 0:
block = self.parent_scope.acceptIncoming(
self.builder.block,
tags,
controlFlowSwitch - 1
)
self.builder.branch(block)
return
generator(tags, target_resume_block)
return
assert isinstance(controlFlowSwitch, llvmlite.ir.Value)
isReturn = self.builder.icmp_signed(
">",
controlFlowSwitch,
llvmI64(0)
)
with self.builder.if_then(isReturn):
block = self.parent_scope.acceptIncoming(
self.builder.block,
tags,
self.builder.sub(
controlFlowSwitch,
llvmI64(1)
)
)
self.builder.branch(block)
generator(tags, target_resume_block)
class FunctionConverter:
def __init__(self,
module,
globalDefinitions,
globalDefinitionLlvmValues,
function,
converter,
builder,
arg_assignments,
output_type,
external_function_references
):
self.function = function
# dict from name to GlobalVariableDefinition
self.globalDefinitions = globalDefinitions
self.globalDefinitionLlvmValues = globalDefinitionLlvmValues
self.module = module
self.converter = converter
self.builder = builder
self.arg_assignments = arg_assignments
self.output_type = output_type
self.external_function_references = external_function_references
self.tags_initialized = {}
self.stack_slots = {}
def tags_as(self, new_tags):
class scoper():
def __init__(scoper_self):
scoper_self.old_tags = None
def __enter__(scoper_self, *args):
scoper_self.old_tags = self.tags_initialized
self.tags_initialized = new_tags
def __exit__(scoper_self, *args):
self.tags_initialized = scoper_self.old_tags
return scoper()
def setup(self):
builder = self.builder
if self.output_type.matches.Void:
self.return_slot = None
else:
self.return_slot = builder.alloca(type_to_llvm_type(self.output_type))
self.exception_slot = builder.alloca(llvm_i8ptr, name="exception_slot")
# if populated, we are expected to write our return value to 'return_slot' and jump here
# on return
self.teardown_handler = TeardownHandler(self, None)
def finalize(self):
self.teardown_handler.generate_teardown(lambda tags: None, self.return_slot, self.exception_slot)
def generate_exception_landing_pad(self, block):
with self.builder.goto_block(block):
res = self.builder.landingpad(exception_type_llvm)
res.add_clause(
llvmlite.ir.CatchClause(
llvmlite.ir.Constant(llvm_i8ptr, None)
)
)
actual_exception = self.builder.call(
self.external_function_references["__cxa_begin_catch"],
[self.builder.extract_value(res, 0)]
)
result = self.builder.load(
self.builder.bitcast(
actual_exception,
llvm_i8ptr.as_pointer()
)
)
self.builder.store(result, self.exception_slot)
self.builder.call(
self.external_function_references["__cxa_end_catch"],
[self.builder.extract_value(res, 0)]
)
block = self.teardown_handler.acceptIncoming(
self.builder.block,
self.tags_initialized,
self.teardown_handler.controlFlowSwitchForException()
)
self.builder.branch(block)
def convert_teardown(self, teardown, justClearTags=False):
orig_tags = dict(self.tags_initialized)
if teardown.matches.Always:
if not justClearTags:
self.convert(teardown.expr)
else:
assert teardown.matches.ByTag
if teardown.tag in self.tags_initialized:
tagVal = self.tags_initialized[teardown.tag]
# mark that the tag is no longer active
del self.tags_initialized[teardown.tag]
del orig_tags[teardown.tag]
if not justClearTags:
if tagVal is True:
self.convert(teardown.expr)
else:
with self.builder.if_then(tagVal):
self.convert(teardown.expr)
assertTagDictsSame(self.tags_initialized, orig_tags)
def generate_exception_and_store_value(self, llvm_pointer_val):
exception_ptr = self.builder.bitcast(
self.builder.call(
self.external_function_references["__cxa_allocate_exception"],
[llvmI64(pointer_size)],
name="alloc_e"
),
llvm_i8ptr.as_pointer()
)
self.builder.store(
self.builder.bitcast(llvm_pointer_val, llvm_i8ptr),
exception_ptr
)
return self.builder.bitcast(exception_ptr, llvm_i8ptr)
def namedCallTargetToLLVM(self, target):
if target.external:
if target.name not in self.external_function_references:
func_type = llvmlite.ir.FunctionType(
type_to_llvm_type(target.output_type),
[type_to_llvm_type(x) for x in target.arg_types],
var_arg=target.varargs
)
if target.intrinsic:
self.external_function_references[target.name] = \
self.module.declare_intrinsic(target.name, fnty=func_type)
else:
self.external_function_references[target.name] = \
llvmlite.ir.Function(self.module, func_type, target.name)
func = self.external_function_references[target.name]
elif target.name in self.converter._externallyDefinedFunctionTypes:
# this function is defined in a shared object that we've loaded from a prior
# invocation
if target.name not in self.external_function_references:
func_type = llvmlite.ir.FunctionType(
type_to_llvm_type(target.output_type),
[type_to_llvm_type(x) for x in target.arg_types],
var_arg=target.varargs
)
assert target.name not in self.converter._function_definitions, target.name
self.external_function_references[target.name] = (
llvmlite.ir.Function(self.module, func_type, target.name)
)
func = self.external_function_references[target.name]
else:
func = self.converter._functions_by_name[target.name]
if func.module is not self.module:
# first, see if we'd like to inline this module
if (
self.converter.totalFunctionComplexity(target.name) < CROSS_MODULE_INLINE_COMPLEXITY
and self.converter.canBeInlined(target.name)
):
func = self.converter.repeatFunctionInModule(target.name, self.module)
else:
if target.name not in self.external_function_references:
self.external_function_references[target.name] = \
llvmlite.ir.Function(self.module, func.function_type, func.name)
func = self.external_function_references[target.name]
return TypedLLVMValue(
func,
native_ast.Type.Function(
args=target.arg_types,
output=target.output_type,
varargs=target.varargs,
can_throw=target.can_throw
).pointer()
)
def generate_throw_expression(self, llvm_pointer_val):
exception_ptr = self.generate_exception_and_store_value(llvm_pointer_val)
self.builder.call(
self.external_function_references["__cxa_throw"],
[exception_ptr] + [llvmlite.ir.Constant(llvm_i8ptr, None)] * 2
)
self.builder.unreachable()
def convert(self, expr):
"""Convert 'expr' into underlying llvm instructions.
Also, verify that if we return a value, our control flow
block is not terminated, and that if we don't return a value,
we don't have a dangling block.
"""
res = self._convert(expr)
if res is not None:
assert not self.builder.block.is_terminated, expr
else:
assert self.builder.block.is_terminated, expr
return res
def _convert(self, expr):
"""Actually convert 'expr' into underlying llvm instructions."""
if expr.matches.ApplyIntermediates:
res = TypedLLVMValue(None, native_ast.Type.Void())
priorName = []
priorRes = []
for i in expr.intermediates:
if i.matches.Terminal or i.matches.Effect:
res = self.convert(i.expr)
if res is None:
break
elif i.matches.StackSlot:
res = self.convert(i.expr)
if res is None:
break
elif i.matches.Simple:
lhs = self.convert(i.expr)
prior = self.arg_assignments.get(i.name, None)
self.arg_assignments[i.name] = lhs
priorName.append(i.name)
priorRes.append(prior)
if res is not None:
res = self.convert(expr.base)
while priorName:
name = priorName.pop()
prior = priorRes.pop()
if prior is not None:
self.arg_assignments[name] = prior
else:
del self.arg_assignments[name]
return res
if expr.matches.Let:
lhs = self.convert(expr.val)
prior = self.arg_assignments.get(expr.var, None)
self.arg_assignments[expr.var] = lhs
res = self.convert(expr.within)
if prior is not None:
self.arg_assignments[expr.var] = prior
else:
del self.arg_assignments[expr.var]
return res
if expr.matches.StackSlot:
if expr.name not in self.stack_slots:
if expr.type.matches.Void:
llvm_type = type_to_llvm_type(native_ast.Type.Struct(element_types=(), name="void"))
else:
llvm_type = type_to_llvm_type(expr.type)
with self.builder.goto_entry_block():
self.stack_slots[expr.name] = \
TypedLLVMValue(
self.builder.alloca(llvm_type, name=expr.name),
native_ast.Type.Pointer(value_type=expr.type)
)
assert self.stack_slots[expr.name].native_type.value_type == expr.type, \
"StackSlot %s supposed to have value %s but got %s" % (
expr.name,
self.stack_slots[expr.name].native_type.value_type,
expr.type
)
return self.stack_slots[expr.name]
if expr.matches.GlobalVariable:
if expr.name in self.globalDefinitions:
assert expr.metadata == self.globalDefinitions[expr.name].metadata
assert expr.type == self.globalDefinitions[expr.name].type
else:
llvm_type = type_to_llvm_type(expr.type)
self.globalDefinitions[expr.name] = GlobalVariableDefinition(
expr.name,
expr.type,
expr.metadata
)
self.globalDefinitionLlvmValues[expr.name] = TypedLLVMValue(
llvmlite.ir.GlobalVariable(self.module, llvm_type, expr.name),
native_ast.Type.Pointer(value_type=expr.type)
)
self.globalDefinitionLlvmValues[expr.name].llvm_value.linkage = "private"
self.globalDefinitionLlvmValues[expr.name].llvm_value.initializer = (
constant_to_typed_llvm_value(
self.module,
self.builder,
expr.type.zero().val
).llvm_value
)
return self.globalDefinitionLlvmValues[expr.name]
if expr.matches.Alloca:
if expr.type.matches.Void:
llvm_type = type_to_llvm_type(native_ast.Type.Struct(()))
else:
llvm_type = type_to_llvm_type(expr.type)
return TypedLLVMValue(self.builder.alloca(llvm_type), native_ast.Type.Pointer(expr.type))
if expr.matches.MakeStruct:
names_and_args = [(a[0], self.convert(a[1])) for a in expr.args]
names_and_types = [(a[0], a[1].native_type) for a in names_and_args]
exprs = [a[1].llvm_value for a in names_and_args]
types = [a.type for a in exprs]
value = llvmlite.ir.Constant(llvmlite.ir.LiteralStructType(types), None)
for i in range(len(exprs)):
value = self.builder.insert_value(value, exprs[i], i)
return TypedLLVMValue(value, native_ast.Type.Struct(names_and_types))
if expr.matches.StructElementByIndex:
val = self.convert(expr.left)
if val.native_type.matches.Struct:
i = expr.index
return TypedLLVMValue(
self.builder.extract_value(val.llvm_value, i),
val.native_type.element_types[i][1]
)
if expr.matches.Store:
ptr = self.convert(expr.ptr)
val = self.convert(expr.val)
if not val.native_type.matches.Void:
self.builder.store(val.llvm_value, ptr.llvm_value)
return TypedLLVMValue(None, native_ast.Type.Void())
if expr.matches.AtomicAdd:
ptr = self.convert(expr.ptr)
val = self.convert(expr.val)
return TypedLLVMValue(
self.builder.atomic_rmw("add", ptr.llvm_value, val.llvm_value, "monotonic"),
val.native_type
)
if expr.matches.Load:
ptr = self.convert(expr.ptr)
assert ptr.native_type.matches.Pointer, ptr.native_type
if ptr.native_type.value_type.matches.Void:
return TypedLLVMValue(None, ptr.native_type.value_type)
return TypedLLVMValue(self.builder.load(ptr.llvm_value), ptr.native_type.value_type)
if expr.matches.Constant:
return constant_to_typed_llvm_value(self.module, self.builder, expr.val)
if expr.matches.Cast:
lhs = self.convert(expr.left)
if lhs is None:
return
target_type = type_to_llvm_type(expr.to_type)
if lhs.native_type == expr.to_type:
return lhs
if lhs.native_type.matches.Pointer and expr.to_type.matches.Pointer:
return TypedLLVMValue(self.builder.bitcast(lhs.llvm_value, target_type), expr.to_type)
if lhs.native_type.matches.Pointer and expr.to_type.matches.Int:
return TypedLLVMValue(self.builder.ptrtoint(lhs.llvm_value, target_type), expr.to_type)
if lhs.native_type.matches.Int and expr.to_type.matches.Pointer:
return TypedLLVMValue(self.builder.inttoptr(lhs.llvm_value, target_type), expr.to_type)
if lhs.native_type.matches.Float and expr.to_type.matches.Int:
if expr.to_type.signed:
return TypedLLVMValue(self.builder.fptosi(lhs.llvm_value, target_type), expr.to_type)
else:
return TypedLLVMValue(self.builder.fptoui(lhs.llvm_value, target_type), expr.to_type)
elif lhs.native_type.matches.Float and expr.to_type.matches.Float:
if lhs.native_type.bits > expr.to_type.bits:
return TypedLLVMValue(self.builder.fptrunc(lhs.llvm_value, target_type), expr.to_type)
else:
return TypedLLVMValue(self.builder.fpext(lhs.llvm_value, target_type), expr.to_type)
elif lhs.native_type.matches.Int and expr.to_type.matches.Int:
if lhs.native_type.bits < expr.to_type.bits:
if lhs.native_type.signed:
return TypedLLVMValue(self.builder.sext(lhs.llvm_value, target_type), expr.to_type)
else:
return TypedLLVMValue(self.builder.zext(lhs.llvm_value, target_type), expr.to_type)
else:
return TypedLLVMValue(self.builder.trunc(lhs.llvm_value, target_type), expr.to_type)
elif lhs.native_type.matches.Int and expr.to_type.matches.Float:
if lhs.native_type.signed:
return TypedLLVMValue(self.builder.sitofp(lhs.llvm_value, target_type), expr.to_type)
else:
return TypedLLVMValue(self.builder.uitofp(lhs.llvm_value, target_type), expr.to_type)
else:
raise Exception(f"Invalid cast: {lhs.native_type} to {expr.to_type}")
if expr.matches.Return:
if expr.blockName is not None:
# assert expr.arg is None, expr.arg
if expr.arg is not None:
# write the value into the return slot
arg = self.convert(expr.arg)
if arg is None:
# the expression threw an exception so we can't actually
# return
return
if not self.output_type.matches.Void:
assert self.return_slot is not None
self.builder.store(arg.llvm_value, self.return_slot)
controlFlowSwitch = self.teardown_handler.controlFlowSwitchForReturn(name=expr.blockName)
block = self.teardown_handler.acceptIncoming(
self.builder.block,
self.tags_initialized,
controlFlowSwitch
)
self.builder.branch(block)
return
else:
# this is a naked 'return'
if expr.arg is not None:
# write the value into the return slot
arg = self.convert(expr.arg)
if arg is None:
# the expression threw an exception so we can't actually
# return
return
if not self.output_type.matches.Void:
assert self.return_slot is not None
self.builder.store(arg.llvm_value, self.return_slot)
controlFlowSwitch = self.teardown_handler.controlFlowSwitchForReturn(name=None)
block = self.teardown_handler.acceptIncoming(
self.builder.block,
self.tags_initialized,
controlFlowSwitch
)
self.builder.branch(block)
return
if expr.matches.Branch:
cond = self.convert(expr.cond)
if cond is None:
return None
cond_llvm = cond.llvm_value
zero_like = llvmlite.ir.Constant(cond_llvm.type, 0)
if cond.native_type.matches.Pointer:
cond_llvm = self.builder.ptrtoint(cond_llvm, llvm_i64)
cond_llvm = self.builder.icmp_signed("!=", cond_llvm, zero_like)
elif cond.native_type.matches.Int:
if cond_llvm.type.width != 1:
cond_llvm = self.builder.icmp_signed("!=", cond_llvm, zero_like)
elif cond.native_type.matches.Float:
cond_llvm = self.builder.fcmp_unordered("!=", cond_llvm, zero_like)
else:
return self.convert(expr.false)
orig_tags = dict(self.tags_initialized)
true_tags = dict(orig_tags)
false_tags = dict(orig_tags)
with self.builder.if_else(cond_llvm) as (then, otherwise):
with then:
self.tags_initialized = true_tags
true = self.convert(expr.true)
true_tags = self.tags_initialized
true_block = self.builder.block
with otherwise:
self.tags_initialized = false_tags
false = self.convert(expr.false)
false_tags = self.tags_initialized
false_block = self.builder.block
if true is None and false is None:
self.tags_initialized = orig_tags
self.builder.unreachable()
return None
if true is | |
# Copyright (c) 2020 Blackwing Intelligence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from binaryninja.architecture import Architecture
from binaryninja.binaryview import BinaryView, BinaryReader, AnalysisCompletionEvent
from binaryninja.enums import SymbolType, SegmentFlag, MessageBoxButtonSet, MessageBoxIcon
from binaryninja.interaction import show_message_box
from binaryninja.types import Symbol
from binaryninja import Settings
import binascii
import json
import os
import struct
import traceback
use_default_loader_settings = True
class iBoot64View(BinaryView):
name = "iBoot64Binja"
long_name = "iBoot64 View"
load_address = 0x0
def __init__(self, data):
BinaryView.__init__(self, parent_view=data, file_metadata=data.file)
self.data = data
def init(self):
self.raw = self.data
self.add_analysis_completion_event(self.on_complete)
try:
load_settings = self.get_load_settings(self.name)
if load_settings is None:
print("Load Settings is None")
self.arch = Architecture['aarch64']
self.platform = self.arch.standalone_platform
# return True
self.load_address = self.find_reset(self.data)
if self.load_address == -1:
print("Error: Could not find reset vector!")
self.load_address = 0
print("LOAD ADDRESS: " + hex(self.load_address))
# self.add_auto_segment(0, len(self.parent_view), 0, len(self.parent_view), SegmentFlag.SegmentReadable)
else:
print("Load Settings: ")
print(load_settings)
arch = load_settings.get_string("loader.architecture", self)
self.arch = Architecture[arch]
self.platform = self.arch.standalone_platform
# self.platform = Architecture['aarch64'].standalone_platform
self.load_address = int(load_settings.get_string("loader.imageBase", self))
self.add_auto_segment(self.load_address, len(self.parent_view), 0, len(self.parent_view),
SegmentFlag.SegmentReadable | SegmentFlag.SegmentExecutable)
self.add_entry_point(self.load_address)
self.define_auto_symbol(Symbol(SymbolType.FunctionSymbol, self.load_address, '_start'))
self.update_analysis()
# self.find_interesting()
return True
except:
print(traceback.format_exc())
return False
@classmethod
def is_valid_for_data(self, data):
try:
iBootVersionOffset = 0x280
iboot_version = data.get_ascii_string_at(iBootVersionOffset).value
if iboot_version.startswith("iBoot"):
# Save version to global for future ref?
# choice = show_message_box(
# "iBoot64Binja Loader",
# "This appears to be an iBoot binary - Load iBoot64Binja?",
# MessageBoxButtonSet.YesNoCancelButtonSet,
# MessageBoxIcon.InformationIcon)
# if choice == 1:
# return True
# else:
# return False
return True
return False
except AttributeError:
return False
@classmethod
def get_load_settings_for_data(self, data):
load_settings = Settings("mapped_load_settings")
if use_default_loader_settings:
load_settings = self.registered_view_type.get_default_load_settings_for_data(data)
# specify default load settings that can be overridden (from the UI)
overrides = ["loader.architecture", "loader.platform", "loader.entryPoint", "loader.imageBase",
"loader.segments", "loader.sections"]
for override in overrides:
if load_settings.contains(override):
load_settings.update_property(override, json.dumps({'readOnly': False}))
# override default setting value
load_settings.update_property("loader.imageBase", json.dumps({'default': 0}))
load_settings.update_property("loader.entryPoint", json.dumps({'default': 0}))
# # add custom arch setting
# load_settings.register_setting("loader.my_custom_arch.customLoadSetting",
# '{"title" : "My Custom Load Setting",\
# "type" : "boolean",\
# "default" : false,\
# "description" : "My custom load setting description."}')
return load_settings
def perform_get_entry_point(self):
return self.load_address
def perform_is_executable(self):
return True
def perform_is_relocatable(self):
return True
def perform_get_address_size(self):
return self.arch.address_size
# def find_reset_capstone(self, data):
# CODE = data[:1000]
# md = Cs(CS_ARCH_ARM64, CS_MODE_ARM)
# md.detail = True
# for i in md.disasm(CODE, 0x0):
# if i.mnemonic == 'ldr':
# offset = int(i.operands[1].value.imm)
# return struct.unpack("Q", CODE[offset:offset + 8])[0]
def load_defs(self):
cur_file_path = os.path.dirname(os.path.abspath(__file__))
symbol_file_path = os.path.join(cur_file_path, '..', 'data', 'defs.json')
print("Trying to load defs file at: {}".format(symbol_file_path))
with open(symbol_file_path, 'r') as f:
return json.load(f)
def on_complete(self, blah):
print("[+] Analysis complete. Finding interesting functions...")
self.find_interesting()
def resolve_string_refs(self, defs):
stringrefs = [sym for sym in defs['symbol'] if sym['heuristic'] == "stringref"]
for sym in stringrefs:
if self.define_func_from_stringref(sym['identifier'], sym['name']) == None:
print("[!] Can't find function {}".format(sym['name']))
def resolve_n_string_refs(self, defs):
stringrefs = [sym for sym in defs['symbol'] if sym['heuristic'] == "nstringrefs"]
for sym in stringrefs:
try:
refcount = sym['refcount']
if isinstance(refcount, int):
if self.define_func_from_n_stringrefs(sym['identifier'], sym['name'], sym['refcount']) == None:
print("[!] Can't find function {}".format(sym['name']))
except:
print("[!] Bad refcount for symbol {}: {}".format(sym['name'], sym['refcount']))
continue
def resolve_byte_sig_pattern(self, identifier):
pattern = []
for byte in identifier.split(' '):
if byte == '?':
pattern.append(byte)
elif byte != '':
pattern.append(int(byte, 16))
br = BinaryReader(self)
result = 0
length = len(pattern) - 1
for function in self.functions:
br.seek(function.start)
while self.get_functions_containing(br.offset + length) != None and function in self.get_functions_containing(br.offset + length):
found = True
count = 0
for entry in pattern:
byte = br.read8()
count += 1
if entry != byte and entry != '?':
found = False
break
br.offset -= count
if found:
result = br.offset
break
instruction_length = self.get_instruction_length(br.offset)
#account for unknown or bad instruction
if instruction_length == 0:
break
br.offset += instruction_length
if result != 0:
break
if result == 0:
return None
else:
return self.get_functions_containing(result)[0].lowest_address
def resolve_byte_sigs(self, defs):
bytesigs = [sym for sym in defs['symbol'] if sym['heuristic'] == "bytesig"]
for sym in bytesigs:
if "?" in sym['identifier']:
addr = self.resolve_byte_sig_pattern(sym['identifier'])
if addr:
self.define_function_at_address(addr, sym['name'])
else:
print("[!] Can't find function {}".format(sym['name']))
else:
try:
signature = binascii.unhexlify(sym['identifier'])
except binascii.Error:
print("[!] Bad Signature for {}! Must be hex encoded string, got: {}.".format(sym['name'], sym['identifier']))
return
if self.define_func_from_bytesignature(signature, sym['name']) == None:
print("[!] Can't find function {}".format(sym['name']))
def resolve_constants(self, defs):
constants = [sym for sym in defs['symbol'] if sym['heuristic'] == "constant"]
for sym in constants:
const = self.convert_const(sym['identifier'])
if const == None:
print("[!] Bad constant definition for symbol {}: {}".format(sym['name'], sym['identifier']))
elif self.define_func_from_constant(const, sym['name']) == None:
print("[!] Can't find function {}".format(sym['name']))
def resolve_xrefs_to(self, defs):
xrefs = [sym for sym in defs['symbol'] if sym['heuristic'] == "xrefsto"]
for sym in xrefs:
if self.define_func_from_xref_to(sym['identifier'], sym['name']) == None:
print("[!] Can't find function {}".format(sym['name']))
def convert_const(self, const):
try:
if isinstance(const, int):
return const
bin_const = binascii.unhexlify(const.replace('0x', ''))
if len(bin_const) == 2:
fmt = ">H"
elif len(bin_const) == 4:
fmt = ">I"
elif len(bin_const) == 8:
fmt = ">Q"
else:
return None
return struct.unpack(fmt, bin_const)[0]
except:
return None
def find_interesting(self):
defs = self.load_defs()
self.resolve_string_refs(defs)
self.resolve_byte_sigs(defs)
self.resolve_constants(defs)
self.resolve_n_string_refs(defs)
self.resolve_xrefs_to(defs)
def find_reset(self, data):
i = 0
end = data.find_next_data(0, 'iBoot for')
if end is None:
end = data.find_next_data(0, 'SecureROM for')
if end is None:
return None
while i < end:
# Have to hand disassemble bytes since analysis hasn't yet been performed.
instr, width = self.arch.get_instruction_text(data[i:], 0)
try:
if instr[0].text == 'ldr':
# Add current address to ldr argument for offset
offset = instr[4].value + i
return struct.unpack("Q", data[offset:offset + 8])[0]
i += width
except TypeError:
i += 1
continue
return None
# def find_panic(self):
# ptr = self.start
# while ptr < self.end:
# ptr = self.find_next_data(ptr, b'double panic in ')
# refs = self.get_code_refs(ptr)
# if refs:
# for i in refs:
# func_start = i.function.lowest_address
# # self.define_auto_symbol(Symbol(SymbolType.FunctionSymbol, func_start, '_panic'))
# self.define_user_symbol(Symbol(SymbolType.FunctionSymbol, func_start, '_panic'))
# # TODO: Improve - Currently breaks on first ref
# return func_start
# else:
# ptr = ptr + 1
# # Not sure the Binja idiomatic thing to return
# # return -1
# return None
def define_func_from_stringref(self, needle, func_name):
ptr = self.start
while ptr < self.end:
# using bv.find_next_data instead of bv.find_next_text here because it seems to be _way_ faster
# ptr = self.find_next_text(ptr, needle)
# ptr = self.find_next_data(ptr, bytes(needle.encode("utf-8")))
ptr = self.find_next_data(ptr, needle)
if not ptr:
break
refs = self.get_code_refs(ptr)
if refs:
func_start = refs[0].function.lowest_address
self.define_function_at_address(func_start, func_name)
# self.define_auto_symbol(Symbol(SymbolType.FunctionSymbol, func_start, func_name))
# print("[+] Added function {} at {}".format(func_name, hex(func_start)))
return func_start
else:
ptr = ptr + 1
return None
def define_func_from_n_stringrefs(self, needle, func_name, refcount):
ptr = self.start
while ptr < self.end:
refs = []
ptr = self.find_next_data(ptr, needle)
if not ptr:
break
for ref in self.get_code_refs(ptr):
refs.append(ref.function.lowest_address)
for func_start in refs:
if refs.count(func_start) == refcount:
self.define_function_at_address(func_start, func_name)
return func_start
ptr = ptr + 1
return None
def define_func_from_bytesignature(self, signature, func_name):
ptr = self.start
while ptr < self.end:
# Have to convert signature byearray to a string since find_next_data can't handle bytes on stable
# fixed on dev in: https://github.com/Vector35/binaryninja-api/commit/c18b89e4cabfc28081a7893ccd4cf8956c9a797f
signature = "".join(chr(x) for x in signature)
ptr = self.find_next_data(ptr, signature)
if not ptr:
break
# Only finds first occurance of signature - might | |
the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0)
:returns: None
"""
# preserve aspect ratio
x, y = self.size
if x > size[0]:
y = int(max(y * size[0] / x, 1))
x = int(size[0])
if y > size[1]:
x = int(max(x * size[1] / y, 1))
y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
self._instance = self.resize(size, resample, image=self._instance)
self.readonly = 0
self.pyaccess = None
def transform(self, size, method, data=None, resample=NEAREST,
fill=1, fillcolor=None):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
object::
class Example(Image.ImageTransformHandler):
def transform(size, method, data, resample, fill=1):
# Return result
It may also be an object with a :py:meth:`~method.getdata` method
that returns a tuple supplying new **method** and **data** values::
class Example(object):
def getdata(self):
method = Image.EXTENT
data = (0, 0, 100, 100)
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:param fill: If **method** is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
the arguments passed to it. Otherwise, it is unused.
:param fillcolor: Optional fill color for the area outside the
transform in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if method == EXTENT:
x0, y0, x1, y1 = data
part = self._instance[y0:y1, x0:x1]
_im = cv2.resize(part, size)
elif method == AFFINE:
x0, y0, x1, y1, x2, y2, x3, y3, x4, y4, x5, y5 = data
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2]])
pts2 = np.float32([[x3, y3], [x4, y4], [x5, y5]])
M = cv2.getAffineTransform(pts1,pts2)
_im = cv2.warpAffine(self._instance, M, size)
elif method == PERSPECTIVE or method == QUAD:
x0, y0, x1, y1, x2, y2, x3, y3 = data
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2], [x3, y3]])
pts2 = np.float32([[0,0],[size[0], 0], [0, size[1]], [size[0], size[1]]])
M = cv2.getPerspectiveTransform(pts1, pts2)
_im = cv2.warpPerspective(self._instance, M, size)
elif method == MESH:
_im = self._instance.copy()
for elem in data:
box, quad = elem
x0, y0, x1, y1, x2, y2, x3, y3 = quad
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2], [x3, y3]])
pts2 = np.float32([[box[0], box[1]],[box[2], box[1]], [box[0], box[3]], [box[2], box[3]]])
M = cv2.getPerspectiveTransform(pts1, pts2)
_im = cv2.warpPerspective(_im, M, size)
return Image(_im)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:returns: Returns a flipped or rotated copy of this image.
"""
w, h = self.size
if method == FLIP_LEFT_RIGHT:
_im = cv2.flip(self._instance, 1)
elif method == FLIP_TOP_BOTTOM:
_im = cv2.flip(self._instance, 0)
elif method == ROTATE_90:
_im = self.rotate_bound(270)
x = self.size[0]//2-self.size[1]//2
box = (0, x, self.size[0], x+self.size[1])
_im = self.crop(box, _im)
elif method == ROTATE_180:
_im = self.rotate(180, self._instance)
elif method == ROTATE_270:
_im = self.rotate_bound(90)
x = self.size[0]//2-self.size[1]//2
box = (0, x, self.size[0], x+self.size[1])
_im = self.crop(box, _im)
if isinstance(_im, Image):
return _im
elif isinstance(_im, np.ndarray):
return Image(_im)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
class FreeTypeFont(object):
"FreeType font wrapper (requires python library freetype-py)"
def __init__(self, font=None, size=10, index=0, encoding="",
layout_engine=None):
self.path = font
self.size = size
self.index = index
self.encoding = encoding
self.layout_engine = layout_engine
if os.path.isfile(self.path):
self.font = load(self.path, self.size+16)
else:
self.font = None
def getsize(text, ttf_font, scale=1.0, thickness=1):
if isinstance(ttf_font, freetype.Face):
slot = ttf_font.glyph
width, height, baseline = 0, 0, 0
previous = 0
for i,c in enumerate(text):
ttf_font.load_char(c)
bitmap = slot.bitmap
height = max(height, bitmap.rows + max(0,-(slot.bitmap_top-bitmap.rows)))
baseline = max(baseline, max(0,-(slot.bitmap_top-bitmap.rows)))
kerning = ttf_font.get_kerning(previous, c)
width += (slot.advance.x >> 6) + (kerning.x >> 6)
previous = c
else:
size = cv2.getTextSize(text, ttf_font, scale, thickness)
width = size[0][0]
height = size[0][1]
baseline = size[1]
return width, height, baseline
def getmask(text, ttf_font):
slot = ttf_font.glyph
width, height, baseline = getsize(text, ttf_font)
Z = np.zeros((height, width), dtype=np.ubyte)
x, y = 0, 0
previous = 0
for c in text:
ttf_font.load_char(c)
bitmap = slot.bitmap
top = slot.bitmap_top
left = slot.bitmap_left
w,h = bitmap.width, bitmap.rows
y = height-baseline-top
if y<=0: y=0
kerning = ttf_font.get_kerning(previous, c)
x += (kerning.x >> 6)
character = np.array(bitmap.buffer, dtype='uint8').reshape(h,w)
try:
Z[y:y+h,x:x+w] += character
except ValueError:
while x+w>Z.shape[1]:
x = x - 1
# print("new", x, y, w, h, character.shape, type(bitmap))
if x>0:
Z[:character.shape[0],x:x+w] += character
x += (slot.advance.x >> 6)
previous = c
return Z
def grab(bbox=None):
if mss_installed:
fh, filepath = tempfile.mkstemp('.png')
with mss.mss() as sct:
# The screen part to capture
if bbox is None:
filepath = sct.shot(mon=-1, output=filepath)
else:
monitor = {"top": bbox[1], "left": bbox[0], "width": bbox[2]-bbox[0], "height": bbox[3]-bbox[1]}
# Grab the data
sct_img = sct.grab(monitor)
# Save to the picture file
mss.tools.to_png(sct_img.rgb, sct_img.size, output=filepath)
return open(filepath)
else:
NotImplementedError("mss is not installed so there is no grab method available, install it with: pip install mss")
def grabclipboard():
if mss_installed:
if bitmap_classes_ok:
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp('.jpg')
os.close(fh)
commands = [
"set theFile to (open for access POSIX file \""
+ filepath + "\" with write permission)",
"try",
" write (the clipboard as JPEG picture) to theFile",
"end try",
"close access theFile"
]
script = ["osascript"]
for command in commands:
script += ["-e", command]
subprocess.call(script)
im = None
if os.stat(filepath).st_size != 0:
im = open(filepath)
os.unlink(filepath)
return im
else:
fh, filepath = tempfile.mkstemp('.bmp')
import win32clipboard, builtins
win32clipboard.OpenClipboard()
try:
if win32clipboard.IsClipboardFormatAvailable(win32clipboard.CF_DIB):
data = win32clipboard.GetClipboardData(win32clipboard.CF_DIB)
else:
data = None
finally:
win32clipboard.CloseClipboard()
if data is None: return None
bmih = BITMAPINFOHEADER()
ctypes.memmove(ctypes.pointer(bmih), data, SIZEOF_BITMAPINFOHEADER)
bmfh = BITMAPFILEHEADER()
ctypes.memset(ctypes.pointer(bmfh), 0, SIZEOF_BITMAPFILEHEADER) # zero structure
bmfh.bfType = ord('B') | (ord('M') << 8)
bmfh.bfSize = SIZEOF_BITMAPFILEHEADER + len(data) # file size
SIZEOF_COLORTABLE = 0
bmfh.bfOffBits = SIZEOF_BITMAPFILEHEADER + SIZEOF_BITMAPINFOHEADER + SIZEOF_COLORTABLE
with builtins.open(filepath, 'wb') as bmp_file:
bmp_file.write(bmfh)
bmp_file.write(data)
return open(filepath)
else:
raise NotImplementedError("grabclipboard is not available on your platform")
else:
NotImplementedError("mss is not installed so there is no grabclipboard method available, install it with: pip install mss")
def load(filename, size=12):
"""
Load a font file. This function loads a font object from the given
bitmap font file, and returns the corresponding font object.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
# face = Face('./VeraMono.ttf')
face = freetype.Face(filename)
face.set_char_size(size*size)
return face
def truetype(font=None, size=10, index=0, encoding="",
layout_engine=None):
"""
Load a TrueType or OpenType font from a file or file-like object,
and create a font object.
This function loads a font object from the given file or file-like
object, and creates a font object for a font of the given size.
This function requires the _imagingft service.
:param font: A filename or file-like | |
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import re
import warnings
import requests
from verta.tracking._organization import Organization
from ._internal_utils._utils import check_unnecessary_params_warning
from ._protos.public.modeldb import CommonService_pb2 as _CommonService
from .external import six
from .external.six.moves.urllib.parse import urlparse # pylint: disable=import-error, no-name-in-module
from ._internal_utils import (
_config_utils,
_request_utils,
_utils,
)
from verta import credentials
from verta.credentials import EmailCredentials, JWTCredentials
from .tracking import _Context
from .tracking.entities import (
Project,
Projects,
Experiment,
Experiments,
ExperimentRun,
ExperimentRuns,
)
from .registry.entities import (
RegisteredModel,
RegisteredModels,
RegisteredModelVersion,
RegisteredModelVersions,
)
from .dataset.entities import (
Dataset,
Datasets,
DatasetVersion,
)
from .endpoint import Endpoint
from .endpoint import Endpoints
from .endpoint.update import DirectUpdateStrategy
from .visibility import _visibility
from .monitoring.client import Client as MonitoringClient
class Client(object):
"""
Object for interfacing with the Verta backend.
.. deprecated:: 0.12.0
The `port` parameter will be removed in an upcoming version; please combine `port` with the first parameter,
e.g. `Client("localhost:8080")`.
.. deprecated:: 0.13.3
The `expt_runs` attribute will be removed in an upcoming version; consider using `proj.expt_runs` and
`expt.expt_runs` instead.
This class provides functionality for starting/resuming Projects, Experiments, and Experiment Runs.
Parameters
----------
host : str, optional
Hostname of the Verta Web App.
email : str, optional
Authentication credentials for managed service. If this does not sound familiar, then there
is no need to set it.
dev_key : str, optional
Authentication credentials for managed service. If this does not sound familiar, then there
is no need to set it.
max_retries : int, default 5
Maximum number of times to retry a request on a connection failure. This only attempts retries
on HTTP codes {502, 503, 504} which commonly occur during back end connection lapses.
ignore_conn_err : bool, default False
Whether to ignore connection errors and instead return successes with empty contents.
use_git : bool, default True
Whether to use a local Git repository for certain operations such as Code Versioning.
debug : bool, default False
Whether to print extra verbose information to aid in debugging.
extra_auth_headers : dict, default {}
Extra headers to include on requests, like to permit traffic through a restrictive application load balancer
_connect : str, default True
Whether to connect to server (``False`` for unit tests).
Attributes
----------
max_retries : int
Maximum number of times to retry a request on a connection failure. Changes to this value
propagate to any objects that are/were created from this client.
ignore_conn_err : bool
Whether to ignore connection errors and instead return successes with empty contents. Changes
to this value propagate to any objects that are/were created from this client.
debug : bool
Whether to print extra verbose information to aid in debugging. Changes to this value propagate
to any objects that are/were created from this client.
monitoring : :class:`verta.monitoring.client.Client`
Monitoring sub-client
proj : :class:`~verta.tracking.entities.Project` or None
Currently active project.
projects : :class:`~verta.tracking.entities.Projects`
Projects in the current default workspace.
expt : :class:`~verta.tracking.entities.Experiment` or None
Currently active experiment.
experiments : :class:`~verta.tracking.entities.Experiments`
Experiments in the current default workspace.
expt_runs : :class:`~verta.tracking.entities.ExperimentRuns`
Experiment runs in the current default workspace.
registered_models : :class:`~verta.registry.entities.RegisteredModels`
Registered models in the current default workspace.
registered_model_versions : :class:`~verta.registry.entities.RegisteredModelVersions`
Registered model versions in the current default workspace.
endpoints : :class:`~verta.endpoint.Endpoints`
Endpoints in the current default workspace.
datasets : :class:`~verta.dataset.entities.Datasets`
Datasets in the current default workspace.
"""
def __init__(self, host=None, port=None, email=None, dev_key=None,
max_retries=5, ignore_conn_err=False, use_git=True, debug=False, extra_auth_headers={}, jwt_token=None, jwt_token_sig=None, _connect=True):
self._load_config()
host = self._get_with_fallback(host, env_var="VERTA_HOST", config_var="host")
if host is None:
raise ValueError("`host` must be provided")
email = self._get_with_fallback(email, env_var=EmailCredentials.EMAIL_ENV, config_var="email")
dev_key = self._get_with_fallback(dev_key, env_var=EmailCredentials.DEV_KEY_ENV, config_var="dev_key")
jwt_token = self._get_with_fallback(jwt_token, env_var=JWTCredentials.JWT_TOKEN_ENV, config_var="jwt_token")
jwt_token_sig = self._get_with_fallback(jwt_token_sig, env_var=JWTCredentials.JWT_TOKEN_SIG_ENV, config_var="jwt_token_sig")
self.auth_credentials = credentials._build(email=email, dev_key=dev_key, jwt_token=jwt_token, jwt_token_sig=jwt_token_sig)
self._workspace = self._get_with_fallback(None, env_var="VERTA_WORKSPACE")
if self.auth_credentials is None:
if debug:
print("[DEBUG] credentials not found; auth disabled")
else:
if debug:
print("[DEBUG] using credentials: {}".format(repr(self.auth_credentials)))
# save credentials to env for other Verta Client features
self.auth_credentials.export_env_vars_to_os()
# TODO: Perhaps these things should move into Connection as well?
back_end_url = urlparse(host)
socket = back_end_url.netloc + back_end_url.path.rstrip('/')
if port is not None:
warnings.warn("`port` (the second parameter) will be removed in a later version;"
" please combine it with the first parameter, e.g. \"localhost:8080\"",
category=FutureWarning)
socket = "{}:{}".format(socket, port)
scheme = back_end_url.scheme or ("https" if ".verta.ai" in socket else "http")
conn = _utils.Connection(scheme=scheme, socket=socket, max_retries=max_retries, ignore_conn_err=ignore_conn_err, credentials=self.auth_credentials, headers=extra_auth_headers)
# verify connection
if _connect:
conn.test()
self._conn = conn
self._conf = _utils.Configuration(use_git, debug)
self._ctx = _Context(self._conn, self._conf)
@property
def proj(self):
return self._ctx.proj
@property
def expt(self):
return self._ctx.expt
@property
def max_retries(self):
return self._conn.retry.total
@max_retries.setter
def max_retries(self, value):
self._conn.retry.total = value
@property
def ignore_conn_err(self):
return self._conn.ignore_conn_err
@ignore_conn_err.setter
def ignore_conn_err(self, value):
self._conn.ignore_conn_err = value
@property
def use_git(self):
return self._conf.use_git
@use_git.setter
def use_git(self, _):
"""This would mess with state in safe but unexpected ways."""
raise AttributeError("cannot set `use_git` after Client has been initialized")
@property
def debug(self):
return self._conf.debug
@debug.setter
def debug(self, value):
self._conf.debug = value
@property
def monitoring(self):
return MonitoringClient(self)
@property
def projects(self):
return Projects(self._conn, self._conf).with_workspace(self.get_workspace())
@property
def experiments(self):
return Experiments(self._conn, self._conf).with_workspace(self.get_workspace())
@property
def expt_runs(self):
return ExperimentRuns(self._conn, self._conf).with_workspace(self.get_workspace())
def _load_config(self):
with _config_utils.read_merged_config() as config:
self._config = config
def _set_from_config_if_none(self, var, resource_name):
if var is None:
var = self._config.get(resource_name)
if var:
print("setting {} from config file".format(resource_name))
return var or None
def _get_with_fallback(self, parameter, env_var=None, config_var=None):
if parameter:
return parameter
elif env_var:
from_env = os.environ.get(env_var)
if from_env:
print("got {} from environment".format(env_var))
return from_env
elif config_var:
from_config = self._config.get(config_var)
if from_config:
print("got {} from config file".format(config_var))
return from_config
else:
return None
@staticmethod
def _validate_visibility(visibility):
"""
Validates the value of `visibility`.
Parameters
----------
visibility : :mod:`~verta.visibility` or None
"""
# TODO: consider a decorator for create_*()s that validates common params
if (visibility is not None
and not isinstance(visibility, _visibility._Visibility)):
raise TypeError(
"`visibility` must be an object from `verta.visibility`,"
" not {}".format(type(visibility))
)
return visibility
def get_workspace(self):
"""
Gets the active workspace for this client instance.
.. versionadded:: 0.17.0
The active workspace is determined by this order of precedence:
1) value set in :meth:`~Client.set_workspace`
2) value set in client config file
3) default workspace set in web app
Returns
-------
workspace : str
Verta workspace.
"""
workspace = self._workspace
if not workspace:
workspace = self._config.get("workspace")
if not workspace:
workspace = self._conn.get_default_workspace()
return workspace
def set_workspace(self, workspace):
"""
Sets the active workspace for this client instance.
.. versionadded:: 0.17.0
Parameters
----------
workspace : str
Verta workspace.
"""
if not isinstance(workspace, six.string_types):
raise TypeError("`workspace` must be a string")
if not self._conn.is_workspace(workspace):
raise ValueError("workspace \"{}\" not found".format(workspace))
self._workspace = workspace
def get_project(self, name=None, workspace=None, id=None):
"""
Retrieves an already created Project. Only one of `name` or `id` can be provided.
Parameters
----------
name : str, optional
Name of the Project.
workspace : str, optional
Workspace under which the Project with name `name` exists. If not provided, the current
user's personal workspace will be used.
id : str, optional
ID of the Project. This parameter cannot be provided alongside `name`.
Returns
-------
:class:`~verta.tracking.entities.Project`
"""
if name is not None and id is not None:
raise ValueError("cannot specify both `name` and `id`")
name = self._set_from_config_if_none(name, "project")
if name is None and id is None:
raise ValueError("must specify either `name` or `id`")
if workspace is None:
workspace = self.get_workspace()
self._ctx = _Context(self._conn, self._conf)
self._ctx.workspace_name = workspace
if id is not None:
self._ctx.proj = Project._get_by_id(self._conn, self._conf, id)
self._ctx.populate()
else:
self._ctx.proj = Project._get_by_name(self._conn, self._conf, name, self._ctx.workspace_name)
if self._ctx.proj is None:
raise ValueError("Project not found")
return self._ctx.proj
def set_project(self, name=None, desc=None, tags=None, attrs=None, workspace=None, public_within_org=None, visibility=None, id=None):
"""
Attaches a Project to this Client.
If an accessible Project with name `name` does not already exist, it will be created
and initialized with specified metadata parameters. If such a Project does already exist,
it will be retrieved; specifying metadata parameters in this case will raise a warning.
If an Experiment is already attached to this Client, it will be detached.
Parameters
----------
name : str, optional
Name of the Project. If no name is provided, one will be generated.
desc : str, optional
Description of the Project.
tags : list of str, optional
Tags of the Project.
attrs : dict of str to {None, bool, float, int, | |
about.
:param id: The id of the round to retrieve."""
tournament_id = str(tournament_id)
id = str(id)
method = 'GET'
path = '/tournaments/{tournament_id}/rounds/{id}'
path_mapping = {
'tournament_id': tournament_id,
'id': id,
}
query_parameters = {
}
headers = {
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return Round(**content)
def get_stages(self, tournament_id):
"""Retrieve all stages of a tournament.
Returns all stages of a tournament with basic information and settings.
:param tournament_id: The id of the tournament you want to retrieve data about."""
tournament_id = str(tournament_id)
method = 'GET'
path = '/tournaments/{tournament_id}/stages'
path_mapping = {
'tournament_id': tournament_id,
}
query_parameters = {
}
headers = {
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [Stage(**stage) for stage in content]
def get_stage(self, tournament_id, id):
"""Retrieve a single stage of a tournament.
Returns a stage with the given id with basic information and settings.
:param tournament_id: The id of the tournament you want to retrieve data about.
:param id: The id of the stage to retrieve."""
tournament_id = str(tournament_id)
id = str(id)
method = 'GET'
path = '/tournaments/{tournament_id}/stages/{id}'
path_mapping = {
'tournament_id': tournament_id,
'id': id,
}
query_parameters = {
}
headers = {
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return Stage(**content)
def get_standings(self, *, range: Range, tournament_ids: list, participant_ids: Optional[list] = None):
"""Retrieve a list of final standing items.
Returns a list of final standing items.
:param range: A range of requested items using the 'items' unit. The size of the range can not exceed 50. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param tournament_ids: Only return tournaments for the given list of ids.
:param participant_ids: One or several participant ids involved in the standings to filter."""
tournament_ids = [str(e) for e in tournament_ids]
participant_ids = [str(e) for e in participant_ids] if participant_ids else participant_ids
method = 'GET'
path = '/standings'
path_mapping = {
}
query_parameters = {
'tournament_ids': tournament_ids,
}
if participant_ids:
query_parameters['participant_ids'] = participant_ids
if not range.unit:
range.unit = 'items'
headers = {
'Range': range.get_header_value(),
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [StandingItem(**item) for item in content]
def get_streams(self, tournament_id, *, range: Range, match_ids: Optional[list] = None):
"""Retrieves available streams.
Returns the streams of the given tournament.
:param tournament_id: The id of the tournament you want to retrieve data about.
:param range: A range of requested items using the 'streams' unit. The size of the range can not exceed 50. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param match_ids: A list of match ids to filter."""
tournament_id = str(tournament_id)
match_ids = [str(e) for e in match_ids] if match_ids else match_ids
method = 'GET'
path = '/tournaments/{tournament_id}/streams'
path_mapping = {
'tournament_id': tournament_id,
}
query_parameters = {
}
if match_ids:
query_parameters['match_ids'] = match_ids
if not range.unit:
range.unit = 'streams'
headers = {
'Range': range.get_header_value(),
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [Stream(**stream) for stream in content]
def get_videos(self, tournament_id, *, range: Range, participant_ids: Optional[list] = None,
category: Optional[str] = None, sort: Optional[str] = None):
"""Retrieve videos of a tournament.
Returns the videos of the given tournament.
:param tournament_id: The id of the tournament you want to retrieve data about.
:param range: A range of requested items using the 'videos' unit. The size of the range can not exceed 50. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param participant_ids: One or several participant ids to filter.
:param category: The category of the videos.
:param sort: Sorts the collection in a particular order. "created_asc" sorts the videos from the oldest to the most recent one; "created_desc" sorts the videos from the most recent to the oldest one."""
tournament_id = str(tournament_id)
participant_ids = [str(e) for e in participant_ids] if participant_ids else participant_ids
method = 'GET'
path = '/tournaments/{tournament_id}/videos'
path_mapping = {
'tournament_id': tournament_id,
}
query_parameters = {
}
if participant_ids:
query_parameters['participant_ids'] = participant_ids
if category:
query_parameters['category'] = category
if sort:
query_parameters['sort'] = sort
if not range.unit:
range.unit = 'videos'
headers = {
'Range': range.get_header_value(),
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [VideoTournament(**video) for video in content]
def get_videos_by_match(self, tournament_id, match_id, *, category: Optional[str] = None,
sort: Optional[str] = None):
"""Retrieve videos of a match.
Returns the videos of the given match.
:param tournament_id: The id of the tournament you want to retrieve data about.
:param match_id: The id of the match to retrieve.
:param category: The category of the videos.
:param sort: Sorts the collection in a particular order. "created_asc" sorts the videos from the oldest to the most recent one; "created_desc" sorts the videos from the most recent to the oldest one."""
tournament_id = str(tournament_id)
match_id = str(match_id)
method = 'GET'
path = '/tournaments/{tournament_id}/matches/{match_id}/videos'
path_mapping = {
'tournament_id': tournament_id,
'match_id': match_id,
}
query_parameters = {
}
if category:
query_parameters['category'] = category
if sort:
query_parameters['sort'] = sort
headers = {
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [Video(**video) for video in content]
def get_tournaments_featured(self, *, range: Range, name: Optional[str] = None, disciplines: Optional[str] = None,
statuses: Optional[str] = None, scheduled_before: Optional[str] = None,
scheduled_after: Optional[str] = None, countries: Optional[str] = None,
platforms: Optional[str] = None, is_online: Optional[int] = None,
sort: Optional[str] = None):
"""Retrieve published featured tournaments.
Returns a collection of published featured tournaments.
:param range: A range of requested items using the 'tournaments' unit. The size of the range can not exceed 50. (see [Pagination](https://developer.toornament.com/v2/overview/pagination))
:param name: The string to be looked for in the name or full name.
:param disciplines: One or several disciplines to filter.
:param statuses: One or several tournament statuses to filter.
:param scheduled_before: A date to include all tournaments scheduled to take place before or at the date, in ISO 8601 format (only the date part, with YYYY-MM-DD pattern).
:param scheduled_after: A date to include all tournaments scheduled to take place after or at the date, in ISO 8601 format (only the date part, with YYYY-MM-DD pattern).
:param countries: One or several countries to filter in ISO 3166-1 alpha-2 country codes format (some codes may not be supported)
:param platforms: One or several platforms to filter.
:param is_online: Whether the tournament is played online.
:param sort: Sorts the collection in a particular order. "scheduled_asc" sorts the tournaments by scheduled date from the oldest to the most recent one; "scheduled_desc" sorts the tournaments by scheduled date from the most recent to the oldest one."""
method = 'GET'
path = '/tournaments/featured'
path_mapping = {
}
query_parameters = {
}
if name:
query_parameters['name'] = name
if disciplines:
query_parameters['disciplines'] = disciplines
if statuses:
query_parameters['statuses'] = statuses
if scheduled_before:
query_parameters['scheduled_before'] = scheduled_before
if scheduled_after:
query_parameters['scheduled_after'] = scheduled_after
if countries:
query_parameters['countries'] = countries
if platforms:
query_parameters['platforms'] = platforms
if is_online:
query_parameters['is_online'] = is_online
if sort:
query_parameters['sort'] = sort
if not range.unit:
range.unit = 'tournaments'
headers = {
'Range': range.get_header_value(),
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return [Tournament(**tour) for tour in content]
def get_tournament(self, id):
"""Retrieve a single tournament.
Returns a tournament identified with the given id.
:param id: The id of the tournament to retrieve."""
id = str(id)
method = 'GET'
path = '/tournaments/{id}'
path_mapping = {
'id': id,
}
query_parameters = {
}
headers = {
}
content = self._simple_access(method, path, path_parameters = path_mapping, query_parameters = query_parameters,
headers = headers)
return TournamentDetailed(**content)
def get_tournaments_by_playlist(self, id, *, range: Range, name: Optional[str] = None,
disciplines: Optional[str] = None, statuses: Optional[str] = None,
scheduled_before: Optional[str] = None, scheduled_after: Optional[str] = None,
countries: Optional[str] = None, platforms: Optional[str] = None,
is_online: Optional[int] = None, sort: Optional[str] = None):
"""Retrieve the tournaments of a playlist.
Returns the tournaments of a playlist.
:param range: A range of requested items using the 'tournaments' unit. The size of the range | |
1) % len(self.allowed_freqs)
scan_freq = list(sorted(self.allowed_freqs))[self.scan_idx]
self.Log('scanning %d MHz (%d/%d)', scan_freq, self.scan_idx + 1,
len(self.allowed_freqs))
RunProc(callback=self._ScanResults,
args=['wifi', 'scan', '-b', BandForFreq(scan_freq),
'--scan-freq', str(scan_freq), '--scan-ap-force',
'--scan-passive'])
chan_interval = opt.scan_interval / len(self.allowed_freqs)
# Randomly fiddle with the timing to avoid permanent alignment with
# other nodes also doing scans. If we're perfectly aligned with
# another node, they might never see us in their periodic scan.
chan_interval = random.uniform(chan_interval * 0.5, chan_interval * 1.5)
self.next_scan_time += chan_interval
if not self.scan_idx:
log.WriteEventFile('%s.scanned' % self.vdevname)
if not opt.scan_interval:
self.next_scan_time = None
def UpdateStationInfo(self):
# These change in the background, not as the result of a scan
if self.primary:
RunProc(callback=self._SurveyResults,
args=['iw', 'dev', self.vdevname, 'survey', 'dump'])
RunProc(callback=self._AssocResults,
args=['iw', 'dev', self.vdevname, 'station', 'dump'])
def WriteApListFile(self):
"""Write out a file of known APs."""
ap_list = []
for peer in self.peer_list.itervalues():
if peer.me.mac not in self.bss_list:
continue
bssid = helpers.DecodeMAC(peer.me.mac)
b = self.bss_list[peer.me.mac]
txt = 'bssid:%s|freq:%d|cap:0x%x|phy:%d|reg:%s|rssi:%s|last_seen:%d'
s = txt % (bssid, b.freq, b.cap, b.phy, b.reg, b.rssi, b.last_seen)
ap_list.append(s)
content = '\n'.join(ap_list)
if AP_LIST_FILE[0]:
filename = AP_LIST_FILE[0] + '.' + self.vdevname
helpers.WriteFileAtomic(filename, content)
def WritePeerApInfoFile(self, peer_data):
"""Writes files containing signal strength information.
The files contain other access points' data about their peers;
these are named PeerAPs.{interface}.
Args:
peer_data: address about each MAC.
"""
peer_ap_list = []
for peer_mac_addr in peer_data:
for b in peer_data[peer_mac_addr]:
peer_ap = helpers.DecodeMAC(b.mac)
txt = ('peer:%s|bssid:%s|freq:%d|cap:0x%x|phy:%d|reg:%s|rssi:%s'
'|last_seen:%d|flags:0x%x')
if all(c in string.printable for c in b.reg):
reg = b.reg
else:
reg = ''
s = txt % (peer_mac_addr, peer_ap, b.freq, b.cap, b.phy, reg, b.rssi,
b.last_seen, b.flags)
peer_ap_list.append(s)
content = '\n'.join(peer_ap_list)
if PEER_AP_LIST_FILE[0]:
filename = PEER_AP_LIST_FILE[0] + '.' + self.vdevname
helpers.WriteFileAtomic(filename, content)
def WriteJsonSignals(self):
"""Writes set of files containing JSON formatted signal data.
The files are about the signal strength of other access points
as seen by this access point (ap_signals) and the signal strength
of this access point as seen by other access points (self_signals).
These two files are in the signals_json directory.
"""
signal_dir = os.path.join(opt.status_dir, 'signals_json')
self_signals_file = os.path.join(signal_dir, 'self_signals')
ap_signals_file = os.path.join(signal_dir, 'ap_signals')
try:
os.makedirs(signal_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.self_signals:
helpers.WriteFileAtomic(self_signals_file, json.dumps(self.self_signals))
if self.ap_signals:
helpers.WriteFileAtomic(ap_signals_file, json.dumps(self.ap_signals))
def ShouldAutoDisable(self):
"""Returns MAC address of high-powered peer if we should auto disable."""
if self.flags & wgdata.ApFlags.HighPower:
self.Debug('high-powered AP: never auto-disable')
return None
for peer in sorted(self.peer_list.values(), key=lambda p: p.me.mac):
self.Debug('considering auto disable: peer=%s',
helpers.DecodeMAC(peer.me.mac))
if peer.me.mac not in self.bss_list:
self.Debug('--> peer no match')
else:
bss = self.bss_list[peer.me.mac]
peer_age_secs = time.time() - peer.me.now
scan_age_secs = time.time() - bss.last_seen
peer_power = peer.me.flags & wgdata.ApFlags.HighPower
# TODO(apenwarr): overlap should consider only our *current* band.
# This isn't too important right away since high powered APs
# are on all bands simultaneously anyway.
overlap = self.flags & peer.me.flags & wgdata.ApFlags.Can_Mask
self.Debug('--> peer matches! p_age=%.3f s_age=%.3f power=0x%x '
'band_overlap=0x%02x', peer_age_secs, scan_age_secs,
peer_power, overlap)
if bss.rssi <= opt.auto_disable_threshold:
self.Debug('--> peer is far away, keep going.')
elif not peer_power:
self.Debug('--> peer is not high-power, keep going.')
elif not overlap:
self.Debug('--> peer does not overlap our band, keep going.')
elif (peer_age_secs > opt.tx_interval * 4 or
(opt.scan_interval and scan_age_secs > opt.scan_interval * 4)):
self.Debug('--> peer is too old, keep going.')
else:
self.Debug('--> peer overwhelms us, shut down.')
return peer.me.mac
return None
def MaybeAutoDisable(self):
"""Writes/removes the auto-disable file based on ShouldAutoDisable()."""
ad = self.ShouldAutoDisable()
if ad and self.auto_disabled != ad:
self.Log('auto-disabling because of %s', helpers.DecodeMAC(ad))
helpers.WriteFileAtomic(self.Filename('disabled'), helpers.DecodeMAC(ad))
elif self.auto_disabled and not ad:
self.Log('auto-enabling because %s disappeared',
helpers.DecodeMAC(self.auto_disabled))
helpers.Unlink(self.Filename('disabled'))
self.auto_disabled = ad
def _ChooseChannel(self, state, candidates, hysteresis_freq):
"""Recommend a wifi channel for a particular set of constraints."""
spreading = helpers.Experiment('WifiPrimarySpreading')
combos = autochannel.LegalCombos(self.allowed_freqs, candidates)
use_active_time = helpers.Experiment('WifiUseActiveTime')
cc = autochannel.SoloChooseChannel(state,
candidates=combos,
use_primary_spreading=spreading,
use_active_time=use_active_time,
hysteresis_freq=hysteresis_freq)
self.Log('%s', cc)
return cc.primary_freq
def ChooseChannel(self):
"""Recommend a wifi channel for this device."""
freqs = list(sorted(self.allowed_freqs))
self.Log('Freqs: %s', ' '.join(str(f) for f in freqs))
apc = ''
for freq in freqs:
apc += '%s ' % len([bss for bss in self.bss_list.values()
if bss.freq == freq])
self.Log('APcounts: %s', apc)
busy = ''
for freq in freqs:
cs = self.channel_survey_list.get(freq, None)
if cs:
frac = cs.busy_ms * 100 / (cs.observed_ms + 1)
busy += '%s%d ' % (
('*'
if cs.observed_ms < autochannel.AIRTIME_THRESHOLD_MS else ''), frac
)
else:
busy += '*0 '
self.Log('Busy%%: %s', busy)
state = self.GetState()
candidates_free = []
if self.flags & wgdata.ApFlags.Can2G:
if helpers.Experiment('WifiChannelsLimited2G'):
candidates2g = autochannel.C_24MAIN
else:
candidates2g = autochannel.C_24ANY
candidates_free += candidates2g
self.autochan_2g = self._ChooseChannel(
state, candidates2g, self.autochan_2g)
WriteFileIfMissing(self.Filename('autochan_2g.init'),
str(self.autochan_2g))
helpers.WriteFileAtomic(self.Filename('autochan_2g'),
str(self.autochan_2g))
if self.flags & wgdata.ApFlags.Can5G:
candidates5g = []
if helpers.Experiment('WifiLowIsHigh'):
# WifiLowIsHigh means to treat low-powered channels as part of the
# high-powered category. Newer FCC rules allow high power
# transmission on the previously low-powered channels, but not all
# devices support it.
candidates5g += autochannel.C_5LOW + autochannel.C_5HIGH
elif opt.high_power:
candidates5g += autochannel.C_5HIGH
else:
candidates5g += autochannel.C_5LOW
if helpers.Experiment('WifiUseDFS'):
candidates5g += autochannel.C_5DFS
candidates_free += candidates5g
self.autochan_5g = self._ChooseChannel(
state, candidates5g, self.autochan_5g)
WriteFileIfMissing(self.Filename('autochan_5g.init'),
str(self.autochan_5g))
helpers.WriteFileAtomic(self.Filename('autochan_5g'),
str(self.autochan_5g))
self.autochan_free = self._ChooseChannel(
state, candidates_free, self.autochan_free)
WriteFileIfMissing(self.Filename('autochan_free.init'),
str(self.autochan_free))
helpers.WriteFileAtomic(self.Filename('autochan_free'),
str(self.autochan_free))
self.Log('Recommended freqs: %d %d -> %d', self.autochan_2g,
self.autochan_5g, self.autochan_free)
log.WriteEventFile('autochan_done')
def _ReadArpTable(self):
"""Reads the kernel's ARP entries."""
now = time.time()
try:
f = open('/proc/net/arp', 'r', 64 * 1024)
except IOError as e:
self.Log('arp table missing: %s', e)
return
data = f.read(64 * 1024)
lines = data.split('\n')[1:] # skip header line
for line in lines:
g = re.match(r'(\d+\.\d+\.\d+\.\d+)\s+.*\s+'
r'(([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})', line)
if g:
ip = helpers.EncodeIP(g.group(1))
mac = helpers.EncodeMAC(g.group(2))
self.arp_list[mac] = wgdata.ARP(ip=ip, mac=mac, last_seen=now)
self.Debug('arp %r', self.arp_list[mac])
def _PhyResults(self, errcode, stdout, stderr):
"""Callback for 'iw phy xxx info' results."""
self.Debug('phy %r err:%r stdout:%r stderr:%r', self.phyname, errcode,
stdout[:70], stderr)
if errcode: return
for line in stdout.split('\n'):
line = line.strip()
g = re.match(r'\* (\d+) MHz \[(\d+)\] \((.*)\)', line)
if g:
freq = int(g.group(1))
chan = int(g.group(2))
disabled = (g.group(3) == 'disabled')
self.Debug('phy freq=%d chan=%d disabled=%d', freq, chan, disabled)
if not disabled:
band = BandForFreq(freq)
if band == '2.4':
self.flags |= wgdata.ApFlags.Can2G
elif band == '5':
self.flags |= wgdata.ApFlags.Can5G
self.allowed_freqs.add(freq)
freq_to_chan[freq] = chan
chan_to_freq[chan] = freq
def _DevResults(self, errcode, stdout, stderr):
"""Callback for 'iw dev xxx info' results."""
self.Debug('dev err:%r stdout:%r stderr:%r', errcode, stdout[:70], stderr)
if errcode: return
for line in stdout.split('\n'):
line = line.strip()
g = re.match(r'addr (([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})', line)
if g:
self.mac = helpers.EncodeMAC(g.group(1))
continue
g = re.match(r'ssid (.*)', line)
if g:
self.ssid = g.group(1)
def _ScanResults(self, errcode, stdout, stderr):
"""Callback for 'iw scan' results."""
self.Debug('scan err:%r stdout:%r stderr:%r', errcode, stdout[:70], stderr)
if errcode: return
now = time.time()
mac = freq = rssi = last_seen = None
reg = ''
flags = cap = phy = 0
def AddEntry():
if mac:
is_ours = False # TODO(apenwarr): calc from received waveguide packets
bss = wgdata.BSS(is_ours=is_ours,
freq=freq,
mac=mac,
rssi=rssi,
flags=flags,
last_seen=last_seen,
cap=cap,
phy=phy,
reg=reg)
if mac not in self.bss_list:
self.Debug('Added: %r', bss)
self.bss_list[mac] = bss
for line in stdout.split('\n'):
line = line.strip()
g = re.match(r'BSS (([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})', line)
if g:
AddEntry()
mac = freq = rssi = last_seen = None
reg = ''
flags = cap = phy = 0
mac = helpers.EncodeMAC(g.group(1))
g = re.match(r'freq: (\d+)', line)
if g:
freq = int(g.group(1))
g = re.match(r'signal: ([-\d.]+) dBm', line)
if g:
rssi = float(g.group(1))
g = re.match(r'last seen: (\d+) ms ago', line)
if g:
last_seen = now - float(g.group(1)) / 1000
g = re.match(r'capability: .* \((\S+)\)', line)
if g:
cap = int(g.group(1), 0)
g = re.match(r'HT capabilities:', line)
if g:
phy = max(phy, 7) # dot11_phy_type_ht = 7
g = re.match(r'VHT capabilities:', line)
if g:
phy = max(phy, 8) # dot11_phy_type_vht = 8
g = re.match(r'Country: (\S\S) ', line)
if g:
reg = str(g.group(1))
AddEntry()
self.MaybeAutoDisable()
self.WriteApListFile()
def _SurveyResults(self, errcode, stdout, stderr):
"""Callback for 'iw survey dump' results."""
self.Debug('survey err:%r stdout:%r stderr:%r', errcode, stdout[:70],
stderr)
if errcode: return
freq = None
noise = active_ms = busy_ms = rx_ms = tx_ms = 0
def AddEntry():
if freq:
# TODO(apenwarr): ath9k: rx_ms includes all airtime, not just ours.
# tx_ms is only time *we* were transmitting, so | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 4/20/21-14:03
# @Author : TuringEmmy
# @Email : <EMAIL>
# @WeChat : superior_god
# @File : model.py
# @Project : 00PythonProjects
import os
import timeit
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import cross_val_score, cross_val_predict, GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
from model.BP import BPNeuralNetwork
from model.MLP import HiddenLayer
mm = MinMaxScaler()
def MaxMinNormalization(x):
mm_data = mm.fit_transform(x)
return mm_data
class RBM(object):
"""A Restricted Boltzmann Machines class"""
def __init__(self, inpt=None, n_visiable=784, n_hidden=500, W=None,
hbias=None, vbias=None):
"""
:param inpt: Tensor, the input tensor [None, n_visiable]
:param n_visiable: int, number of visiable units
:param n_hidden: int, number of hidden units
:param W, hbias, vbias: Tensor, the parameters of RBM (tf.Variable)
"""
self.n_visiable = n_visiable
self.n_hidden = n_hidden
# Optionally initialize input
if inpt is None:
inpt = tf.placeholder(dtype=tf.float32, shape=[None, self.n_visiable])
self.input = inpt
# Initialize the parameters if not given
if W is None:
# bounds = 4.0 * np.sqrt(6.0 / (self.n_visiable + self.n_hidden))
bounds = -4.0 * np.sqrt(6.0 / (self.n_visiable + self.n_hidden))
W = tf.Variable(tf.random_uniform([self.n_visiable, self.n_hidden], minval=-bounds,
maxval=bounds), dtype=tf.float32)
if hbias is None:
hbias = tf.Variable(tf.zeros([self.n_hidden, ]), dtype=tf.float32)
if vbias is None:
vbias = tf.Variable(tf.zeros([self.n_visiable, ]), dtype=tf.float32)
self.W = W
self.hbias = hbias
self.vbias = vbias
# keep track of parameters for training (DBN)
self.params = [self.W, self.hbias, self.vbias]
def propup(self, v):
"""Compute the sigmoid activation for hidden units given visible units"""
return tf.nn.sigmoid(tf.matmul(v, self.W) + self.hbias)
def propdown(self, h):
"""Compute the sigmoid activation for visible units given hidden units"""
return tf.nn.sigmoid(tf.matmul(h, tf.transpose(self.W)) + self.vbias)
def sample_prob(self, prob):
"""Do sampling with the given probability (you can use binomial in Theano)"""
return tf.nn.relu(tf.sign(prob - tf.random_uniform(tf.shape(prob))))
def sample_h_given_v(self, v0_sample):
"""Sampling the hidden units given visiable sample"""
h1_mean = self.propup(v0_sample)
h1_sample = self.sample_prob(h1_mean)
return (h1_mean, h1_sample)
def sample_v_given_h(self, h0_sample):
"""Sampling the visiable units given hidden sample"""
v1_mean = self.propdown(h0_sample)
v1_sample = self.sample_prob(v1_mean)
return (v1_mean, v1_sample)
def gibbs_vhv(self, v0_sample):
"""Implement one step of Gibbs sampling from the visiable state"""
h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return (h1_mean, h1_sample, v1_mean, v1_sample)
def gibbs_hvh(self, h0_sample):
"""Implement one step of Gibbs sampling from the hidden state"""
v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return (v1_mean, v1_sample, h1_mean, h1_sample)
def free_energy(self, v_sample):
"""Compute the free energy"""
wx_b = tf.matmul(v_sample, self.W) + self.hbias
vbias_term = tf.matmul(v_sample, tf.expand_dims(self.vbias, axis=1))
hidden_term = tf.reduce_sum(tf.log(1.0 + tf.exp(wx_b)), axis=1)
return -hidden_term - vbias_term
def get_train_ops(self, learning_rate=0.1, k=1, persistent=None):
"""
Get the training opts by CD-k
:params learning_rate: float
:params k: int, the number of Gibbs step (Note k=1 has been shown work surprisingly well)
:params persistent: Tensor, PCD-k (TO DO:)
"""
# Compute the positive phase
ph_mean, ph_sample = self.sample_h_given_v(self.input)
# The old state of the chain
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
# Use tf.while_loop to do the CD-k
cond = lambda i, nv_mean, nv_sample, nh_mean, nh_sample: i < k
body = lambda i, nv_mean, nv_sample, nh_mean, nh_sample: (i + 1,) + self.gibbs_hvh(nh_sample)
i, nv_mean, nv_sample, nh_mean, nh_sample = tf.while_loop(cond, body, loop_vars=[tf.constant(0),
tf.zeros(tf.shape(self.input)),
tf.zeros(tf.shape(self.input)),
tf.zeros(
tf.shape(chain_start)),
chain_start])
"""
# Compute the update values for each parameter
update_W = self.W + learning_rate * (tf.matmul(tf.transpose(self.input), ph_mean) -
tf.matmul(tf.transpose(nv_sample), nh_mean)) / tf.to_float(tf.shape(self.input)[0]) # use probability
update_vbias = self.vbias + learning_rate * (tf.reduce_mean(self.input - nv_sample, axis=0)) # use binary value
update_hbias = self.hbias + learning_rate * (tf.reduce_mean(ph_mean - nh_mean, axis=0)) # use probability
# Assign the parameters new values
new_W = tf.assign(self.W, update_W)
new_vbias = tf.assign(self.vbias, update_vbias)
new_hbias = tf.assign(self.hbias, update_hbias)
"""
chain_end = tf.stop_gradient(nv_sample) # do not compute the gradients
cost = tf.reduce_mean(self.free_energy(self.input)) - tf.reduce_mean(self.free_energy(chain_end))
# Compute the gradients
gparamm = tf.gradients(ys=[cost], xs=self.params)
gparams = []
for i in range(len(gparamm)):
gparams.append(tf.clip_by_value(gparamm[i], clip_value_min=-1, clip_value_max=1))
new_params = []
for gparam, param in zip(gparams, self.params):
new_params.append(tf.assign(param, param - gparam * learning_rate))
if persistent is not None:
new_persistent = [tf.assign(persistent, nh_sample)]
else:
new_persistent = []
return new_params + new_persistent # use for training
def get_reconstruction_cost(self):
"""Compute the cross-entropy of the original input and the reconstruction"""
activation_h = self.propup(self.input)
activation_v = self.propdown(activation_h)
# Do this to not get Nan
activation_v_clip = tf.clip_by_value(activation_v, clip_value_min=1e-10, clip_value_max=1)
reduce_activation_v_clip = tf.clip_by_value(1.0 - activation_v, clip_value_min=1e-10, clip_value_max=1)
cross_entropy = -tf.reduce_mean(tf.reduce_sum(
self.input * (tf.log(activation_v_clip)) + (1.0 - self.input) * (tf.log(reduce_activation_v_clip)), axis=1))
# cross_entropy = tf.reduce_mean(tf.reduce_sum(self.input * (tf.log(activation_v_clip)) + (1.0 - self.input) * (tf.log(reduce_activation_v_clip)), axis=1))
# cross_entropy = tf.sqrt(tf.reduce_mean(tf.square(self.input - activation_v)))
return cross_entropy
def reconstruct(self, v):
"""Reconstruct the original input by RBM"""
h = self.propup(v)
return self.propdown(h)
# 搭建模型
class DBN(object):
"""
An implement of deep belief network
The hidden layers are firstly pretrained by RBM, then DBN is treated as a normal
MLP by adding a output layer.
"""
def __init__(self, n_in=784, n_out=10, hidden_layers_sizes=[500, 500]):
"""
:param n_in: int, the dimension of input
:param n_out: int, the dimension of output
:param hidden_layers_sizes: list or tuple, the hidden layer sizes
"""
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Number of layers
assert len(hidden_layers_sizes) > 0
self.n_layers = len(hidden_layers_sizes)
self.layers = [] # normal sigmoid layer
self.rbm_layers = [] # RBM layer
self.params = [] # keep track of params for training
# Define the input and output
self.x = tf.placeholder(tf.float32, shape=[None, n_in])
self.y = tf.placeholder(tf.float32, shape=[None, n_out])
# Contruct the layers of DBN
with tf.name_scope('DBN_layer'):
for i in range(self.n_layers):
if i == 0:
layer_input = self.x
input_size = n_in
else:
layer_input = self.layers[i - 1].output
input_size = hidden_layers_sizes[i - 1]
# Sigmoid layer
with tf.name_scope('internel_layer'):
sigmoid_layer = HiddenLayer(input=layer_input, n_in=input_size, n_out=hidden_layers_sizes[i],
activation=tf.nn.sigmoid)
self.layers.append(sigmoid_layer)
# Add the parameters for finetuning
self.params.extend(sigmoid_layer.params)
# Create the RBM layer
with tf.name_scope('rbm_layer'):
self.rbm_layers.append(RBM(inpt=layer_input, n_visiable=input_size, n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W, hbias=sigmoid_layer.b))
# We use the BP layer as the output layer
with tf.name_scope('output_layer'):
self.output_layer = BPNeuralNetwork(inpt=self.layers[-1].output, n_in=hidden_layers_sizes[-1],
n_out=n_out)
self.params.extend(self.output_layer.params)
# The finetuning cost
with tf.name_scope('output_loss'):
self.cost = self.output_layer.cost(self.y)
# The accuracy
self.accuracy = self.output_layer.accuarcy(self.y)
def pretrain(self, sess, train_x, batch_size=2, pretraining_epochs=20, lr=0.01, k=1,
display_step=10):
"""
Pretrain the layers (just train the RBM layers)
:param sess: tf.Session
:param X_train: the input of the train set (You might modidy this function if you do not use the desgined mnist)
:param batch_size: int
:param lr: float
:param k: int, use CD-k
:param pretraining_epoch: int
:param display_step: int
"""
print('Starting pretraining...\n')
start_time = timeit.default_timer()
# Pretrain layer by layer
for i in range(self.n_layers):
cost = self.rbm_layers[i].get_reconstruction_cost()
train_ops = self.rbm_layers[i].get_train_ops(learning_rate=lr, k=k, persistent=None)
batch_num = int(train_x.shape[0] / batch_size)
for epoch in range(pretraining_epochs):
avg_cost = 0.0
for step in range(batch_num - 1):
# 训练
x_batch = train_x[step * batch_size:(step + 1) * batch_size]
sess.run(train_ops, feed_dict={self.x: x_batch})
# 计算cost
avg_cost += sess.run(cost, feed_dict={self.x: x_batch, }) / batch_num
# print(avg_cost)
# 输出
if epoch % display_step == 0:
print("\tPretraing layer {0} Epoch {1} cost: {2}".format(i, epoch, avg_cost))
end_time = timeit.default_timer()
print("\nThe pretraining process ran for {0} minutes".format((end_time - start_time) / 60))
def finetuning(self, sess, train_x, train_y, test_x, test_y, training_epochs=20, batch_size=50, lr=0.1,
display_step=1):
"""
Finetuing the network
"""
accu = []
accuu = []
print("\nStart finetuning...\n")
start_time = timeit.default_timer()
train_op = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(self.cost)
batch_num = int(train_x.shape[0] / batch_size)
# merged = tf.summary.merge_all()
# writer = tf.summary.FileWriter("logs", sess.graph)
for epoch in range(training_epochs):
avg_cost = 0.0
for step in range(batch_num - 1):
x_batch = train_x[step * batch_size:(step + 1) * batch_size]
y_batch = train_y[step * batch_size:(step + 1) * batch_size]
# 训练
sess.run(train_op, feed_dict={self.x: x_batch, self.y: y_batch})
# 计算cost
avg_cost += sess.run(self.cost, feed_dict={self.x: x_batch, self.y: y_batch}) / batch_num
# 输出
if epoch % display_step == 0:
val_acc = sess.run(self.accuracy, feed_dict={self.x: test_x, self.y: test_y})
# accu.append(val_acc)
# accuu.append(avg_cost)
print("\tEpoch {0} cost: {1} accuracy:{2}".format(epoch, avg_cost, val_acc))
# result = sess.run(merged, feed_dict={self.x: test_x, self.y: test_y}) # 输出
# writer.add_summary(result, epoch)
end_time = timeit.default_timer()
print("\nThe finetuning process ran for {0} minutes".format((end_time - start_time) / 60))
# y_aix = np.array(accu)
# y_aix1=np.array(accuu)
# x_aix = np.transpose(np.arange(1, 6))
# plt.plot(x_aix, y_aix,label="predict")
# plt.plot(x_aix,y_aix1,label="real")
# plt.savefig("E:\\高若涵计算机毕设\\DBN_predict_performance\\picture\\test_p30_f3.jpg")
# plt.show()
def predict(self, sess, x_test=None):
print("\nStart predict...\n")
# predict_model = theano.function(
# inputs=[self.params],
# outputs=self.output_layer.y_pre)
dbn_y_pre_temp = sess.run(self.output_layer.output, feed_dict={self.x: x_test})
# print(dbn_y_pre_temp)
dbn_y_pre = pd.DataFrame(mm.inverse_transform(dbn_y_pre_temp))
dbn_y_pre.to_csv('NSW_06.csv')
print("\nPredict over...\n")
# SVR输出预测结果
def | |
# -*- coding:utf-8 -*-
__author__ = "<NAME>"
import re
import sys
import time
import pandas
import xlrd
import xlwt
from PySide2 import QtCore, QtWidgets
from xlutils.copy import copy
from pyautogui import alert
class MyWidget(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.Step1Btn = QtWidgets.QPushButton("Generate OMS Sheet using oa.xlsx")
self.Step2Btn = QtWidgets.QPushButton("Generate OCH Sheet using ocp.xlsx")
self.Step3Btn = QtWidgets.QPushButton("Generate OPS Sheet using osc.xlsx")
self.genButton = QtWidgets.QPushButton("Generate!")
self.exitButton = QtWidgets.QPushButton("Exit")
self.text = QtWidgets.QLabel("自动生成烽火系统运维表格 V1.0\n\nby 张梓扬 <NAME>\n\n2021年8月 第一版\n\n请自行合并软件根目录下的三个文件")
self.text.setAlignment(QtCore.Qt.AlignCenter)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.text)
self.layout.addWidget(self.Step1Btn)
self.layout.addWidget(self.Step2Btn)
self.layout.addWidget(self.Step3Btn)
self.layout.addWidget(self.genButton)
self.layout.addWidget(self.exitButton)
self.setLayout(self.layout)
self.Step1Btn.clicked.connect(genNewOA)
self.Step2Btn.clicked.connect(genNewNewOCP)
self.Step3Btn.clicked.connect(genNewNewOSC)
self.genButton.clicked.connect(wholeProcess)
self.exitButton.clicked.connect(app.exit)
def genNewOA():
time_start = time.time()
oa_Ori = pandas.read_excel("oa.xlsx", sheet_name=0, header=0)
newOA = xlwt.Workbook()
# ====================================== 执行正则匹配并化简多余项 ====================================================
oaDirectSlotMatch = []
oa_nameReg = "\:"
oa_noReg_1 = '\['
oa_noReg_2 = "\:\:"
regNum = "[-+]?\d+.?\d*"
for i in range(0, len(oa_Ori)):
oaDirectSlotMatch.append(
re.split(oa_nameReg, oa_Ori[1][i])[-1] + ":" + re.split(oa_noReg_1, oa_Ori[2][i])[0] + ":" +
re.split(oa_noReg_2, oa_Ori[2][i])[1])
oaDirectSlotMatch = sorted(set(oaDirectSlotMatch), key=oaDirectSlotMatch.index)
sheet1 = newOA.add_sheet('烽火波分OMS检查(每月)', cell_overwrite_ok=True)
sheet1_title = ["网元名", "方向/槽位", "输入光功率", "输出光功率", "VOA", "处理建议", "备注"]
# =================================== 写入表格 ==========================================
# 写入表格标题
for i in range(0, len(sheet1_title)):
sheet1.write(0, i, sheet1_title[i])
# 写入OA的方向/槽位信息
for i in range(0, len(oaDirectSlotMatch)):
sheet1.write(i + 1, 1, oaDirectSlotMatch[i])
# 通过检索实现数据的对应输入
for i in range(0, len(oa_Ori)):
index = oaDirectSlotMatch.index(
re.split(oa_nameReg, oa_Ori[1][i])[-1] + ":" + re.split(oa_noReg_1, oa_Ori[2][i])[0] + ":" +
re.split(oa_noReg_2, oa_Ori[2][i])[1])
if oa_Ori[6][i] == "IOP":
sheet1.write(index + 1, 2, oa_Ori[7][i])
elif oa_Ori[6][i] == "OOP":
sheet1.write(index + 1, 3, oa_Ori[7][i])
elif oa_Ori[6][i] == "VOA_ATT":
sheet1.write(index + 1, 4, oa_Ori[7][i])
else:
continue
newOA.save("genNewOA.xls")
newOA = pandas.read_excel("genNewOA.xls", sheet_name="烽火波分OMS检查(每月)", header=0)
newOA.sort_values(by=['方向/槽位'], inplace=True)
newOA.reset_index(drop=True, inplace=True)
# 如果同时有PA与OA且都是收同一个方向,在其它字段一致的情况下,则放弃OA
newOA_temp = newOA.copy(deep=True)
for i in range(0, len(newOA)-1):
if 'OA' in newOA.iat[i, 1] and 'OTM' in newOA.iat[i, 1]:
if re.findall("[收|发][\u4e00-\u9fa5]*", newOA.iat[i, 1]) == re.findall("[收|发][\u4e00-\u9fa5]*", newOA.iat[i+1, 1]):
if 'PA' in newOA.iat[i+1, 1]:
newOA_temp.drop([i, i], inplace=True)
else:
continue
else:
continue
else:
continue
newOA_temp.reset_index(drop=True, inplace=True)
# 标出需要注意的方向/槽位(VOA小于3 dB)
for i in range(0, len(newOA_temp)):
if str(newOA_temp['VOA'][i]) == 'nan':
continue
elif float(re.findall(regNum, newOA_temp['VOA'][i])[0]) <= 3:
newOA_temp.iat[i, 5] = '注意!'
else:
continue
print("genNewOA: 已去除其它字段相等时,有PA的OA内容")
warning = newOA_temp.copy(deep=True)
for i in range(0, len(newOA_temp)):
if newOA_temp.iat[i, 5] != '注意!':
warning.drop([i, i], inplace=True)
else:
continue
with pandas.ExcelWriter('./genNewOA.xls') as writer:
newOA_temp.to_excel(writer, encoding='utf-8', sheet_name='烽火波分OMS检查(每月)', index=False)
warning.to_excel(writer, encoding='utf-8', sheet_name='烽火波分OMS警告', index=False)
time_end = time.time()
print("genNewOA: DONE, Total Time Cost: ", time_end - time_start)
alert(text="Targeted Excel Generated!\n用时:" + str(time_end - time_start), title="处理结果", button="好的")
'''
def genNewOCP():
time_start = time.time()
ocp_Ori = pandas.read_excel("ocp.xlsx", sheet_name=0, header=0)
newOCP = xlwt.Workbook()
# ====================================== 执行正则匹配并化简多余项 ====================================================
ocpDirectSlotMatch = []
ocp_nameReg = "\:"
ocp_noReg_1 = '\['
ocp_noReg_2 = "\:\:"
for i in range(0, len(ocp_Ori)):
ocpDirectSlotMatch.append(
re.split(ocp_nameReg, ocp_Ori[1][i])[-1] + ":" + re.split(ocp_noReg_1, ocp_Ori[2][i])[0] + ":" +
re.split(ocp_noReg_2, ocp_Ori[2][i])[1])
ocpDirectSlotMatch = sorted(set(ocpDirectSlotMatch), key=ocpDirectSlotMatch.index)
sheet2 = newOCP.add_sheet('烽火波分OCH光功率检查(每月)', cell_overwrite_ok=True)
sheet2_title = ["烽火波分环", "方向/板卡/端口", "输入光功率", "处理建议", "备注", "差异"]
# =================================== 写入表格 ==========================================
# 写入表格标题
for i in range(0, len(sheet2_title)):
sheet2.write(0, i, sheet2_title[i])
# 写入OCP的方向/板卡/端口信息
for i in range(0, len(ocpDirectSlotMatch)):
sheet2.write(i + 1, 1, ocpDirectSlotMatch[i])
# 通过检索实现数据的对应输入
reg = "[-+]?\d+.?\d*" # Rule for Matching Numbers
for i in range(0, len(ocp_Ori)):
index = ocpDirectSlotMatch.index(
re.split(ocp_nameReg, ocp_Ori[1][i])[-1] + ":" + re.split(ocp_noReg_1, ocp_Ori[2][i])[0] + ":" +
re.split(ocp_noReg_2, ocp_Ori[2][i])[1])
if ocp_Ori[6][i] == "IOP":
if ocp_Ori[7][i] == '收无光':
sheet2.write(index + 1, 2, "收无光")
else:
sheet2.write(index + 1, 2, float(re.findall(reg, str(ocp_Ori[7][i]))[0]))
else:
continue
newOCP.save("genNewOCP.xls")
# 读入表格进行排序
newOCP = pandas.read_excel("genNewOCP.xls", sheet_name="烽火波分OCH光功率检查(每月)", header=0)
newOCP.sort_values(by=['方向/板卡/端口'], inplace=True)
newOCP.reset_index(drop=True, inplace=True)
# 创建一个化简的ocp_OriSimple,用空间换时间
ocp_OriSimple = ocp_Ori
for i in range(0, len(ocp_OriSimple)):
if (ocp_OriSimple[6][i] == 'IOP') or (ocp_OriSimple[6][i] == 'IOP_MAX'):
ocp_OriSimple.drop([i, i], inplace=True)
# 重置 DataFrame 'osc_Ori' 的索引
ocp_OriSimple.reset_index(drop=True, inplace=True)
ocp_Ori = pandas.read_excel("ocp.xlsx", sheet_name=0, header=0)
print("genNewOCP 已完成ocp_Ori简化,去除含IOP、IOP_MAX内容")
# 对于没有IOP的线路,选用IOP_MIN进行填充
for i in range(0, len(newOCP)):
if str(newOCP['输入光功率'][i]) == 'nan':
for j in range(0, len(ocp_OriSimple)):
dest = re.split(ocp_nameReg, ocp_OriSimple[1][j])[-1] + ":" + re.split(ocp_noReg_1, ocp_OriSimple[2][j])[0] + ":" + re.split(ocp_noReg_2, ocp_OriSimple[2][j])[1]
if newOCP.iat[i, 1] == dest and ocp_OriSimple[6][j] == "IOP_MIN":
if ocp_OriSimple[7][j] == '收无光':
newOCP.iat[i, 2] = '收无光'
else:
newOCP.iat[i, 2] = float(re.findall(reg, str(ocp_OriSimple[7][j]))[0])
else:
continue
with pandas.ExcelWriter('./genNewOCP.xls') as writer:
newOCP.to_excel(writer, encoding='utf-8', sheet_name='烽火波分OCH光功率检查(每月)', index=False)
# 合并单元格计算差异
r_xls = xlrd.open_workbook("genNewOCP.xls") # 读取excel文件
excelCopy = copy(r_xls) # 将xlrd的对象转化为xlwt的对象
sheet2 = excelCopy.get_sheet(0)
OCPData = pandas.read_excel("genNewOCP.xls", sheet_name="烽火波分OCH光功率检查(每月)", header=0, usecols=[2, 2])
OCPName = pandas.read_excel("genNewOCP.xls", sheet_name="烽火波分OCH光功率检查(每月)", header=0, usecols=[1, 1])
if len(OCPData) % 2 != 0:
OCPData.loc[len(OCPData)] = ['收无光']
OCPName.loc[len(OCPName)] = ['99-99-奇数提醒-ROADM:ODD_WRN:WRNB-1/WRN-1']
i = 0
while i <= len(OCPData) - 1:
# OCPData_Diff.append(abs(OCPData[i] - OCPData[i+1]))
# sheet2.write_merge(i+1, i+2, 5, 5, "")
if OCPData.iat[i, 0] == '收无光' or OCPData.iat[i + 1, 0] == '收无光':
sheet2.write_merge(i + 1, i + 2, 5, 5, "无法计算")
else:
sheet2.write_merge(i + 1, i + 2, 5, 5, float(abs(OCPData.iat[i, 0] - OCPData.iat[i + 1, 0])))
i = i + 2
# 把错误排序的节点标红
repeat_reg = "[T][R][X][AB]"
pattern = xlwt.Pattern() # Create the pattern
pattern.pattern = xlwt.Pattern.SOLID_PATTERN # May be: NO_PATTERN, SOLID_PATTERN, or 0x00 through 0x12
pattern.pattern_fore_colour = 5
style = xlwt.XFStyle() # Create the pattern
style.pattern = pattern # Add pattern to style
i = 0
while i <= len(OCPName) - 1:
if re.findall(repeat_reg, OCPName["方向/板卡/端口"][i]) == re.findall(repeat_reg, OCPName["方向/板卡/端口"][i + 1]):
sheet2.write(i + 1, 1, OCPName["方向/板卡/端口"][i], style=style)
i = i + 2
else:
i = i + 2
i = 0
while i <= len(OCPName) - 1:
if re.split('-', re.split('/', re.split(':', OCPName.iat[i, 0])[-1])[0])[-1] == '2' and re.split('-', re.split('/', re.split(':', OCPName.iat[i+1, 0])[-1])[0])[-1] == '1':
sheet2.write(i, 6, "以下发生错位!")
break
else:
i = i + 2
print('genNewOCP: 对错误节点的标注已完成')
excelCopy.save("genNewOCP.xls")
time_end = time.time()
print("genNewOCP: DONE, Total Time Cost: ", time_end - time_start)
alert(text="Targeted Excel Generated!\n用时:" + str(time_end - time_start), title="处理结果", button="好的")
'''
'''
def genNewOSC():
osc_Ori = pandas.read_excel("osc.xlsx", header=0, sheet_name=0)
newOSC = xlwt.Workbook()
sheet3 = newOSC.add_sheet('烽火波分OPS检查(每月)', cell_overwrite_ok=True)
sheet3_title = ["波分环", "A节点设备名称", "OCS-输出光功率(dBm)", "OCS-输入光功率(dBm)", "B节点设备名称", "A->B衰耗"]
oscSum = []
oscName = []
for i in range(0, len(osc_Ori)):
if str(osc_Ori[3][i]) == 'nan':
oscSum.append(osc_Ori[1][i] + ":" + osc_Ori[2][i] + ":" + "")
else:
oscSum.append(osc_Ori[1][i] + ":" + osc_Ori[2][i] + ":" + osc_Ori[3][i])
oscSum = sorted(set(oscSum), key=oscSum.index)
reg = "(\d{2}-\d*(-|)[\u4e00-\u9fa5]*(OA|OTM-[\u4e00-\u9fa5]*|OTM|-ROADM\(OA\)|ROADM\(OA\)|-ROADM))"
regOTMFX = "\d{2}\-\d*-[\u4e00-\u9fa5]*OTM-[\u4e00-\u9fa5]*方向|\d{2}\-\d*[\u4e00-\u9fa5]*OTM-[\u4e00-\u9fa5]*方向"
OTMFX = []
for i in range(0, len(oscSum)):
if re.findall(regOTMFX, oscSum[i]) == []:
oscName.append(re.findall(reg, oscSum[i])[0][0])
else:
OTMFX.append(re.findall(reg, oscSum[i])[0][0])
# OTM-XX方向都是只重复一次的,所以可以先提取出来然后去掉重复元素
OTMFX = sorted(set(OTMFX), key=OTMFX.index)
# =================================== 写入表格 ==========================================
# 写入表格标题
for i in range(0, len(sheet3_title)):
sheet3.write(0, i, sheet3_title[i])
# 写入OTM-XX方向这一类的线路名称
for i in range(0, len(OTMFX)):
sheet3.write(i + 1, 1, OTMFX[i])
# 检索和A节点对应的B节点
regNum = "[-+]?\d+.?\d*" # Rule for Matching Numbers
for i in range(0, len(OTMFX)):
knot_A = re.split("OTM-", re.split("惠州", OTMFX[i])[-1])[0]
knot_B = re.split("方向", re.split("OTM-", re.split("惠州", OTMFX[i])[-1])[-1])[0]
Num1 = re.split("-", OTMFX[79])[0]
searchKey = Num1 + "惠州" + knot_B + "OTM-" + knot_A + "方向"
searchResult = difflib.get_close_matches(searchKey, OTMFX, 1, cutoff=0.5)
sheet3.write(i + 1, 4, searchResult)
for j in range(0, len(osc_Ori)):
if re.findall(reg, osc_Ori[1][j])[0][0] == searchResult[0] and re.split("\:\:", osc_Ori[2][j])[-1] == 'OSC_W' and osc_Ori[6][j] == 'IOP_MIN':
# sheet3.write(i + 1, 3, osc_Ori[7][j])
if osc_Ori[7][j] == '无光':
sheet3.write(i + 1, 3, osc_Ori[7][j])
else:
sheet3.write(i + 1, 3, float(re.findall(regNum, osc_Ori[7][j])[0]))
for j in range(0, len(OTMFX)):
for i in range(0, len(osc_Ori)):
if re.findall(reg, osc_Ori[1][i])[0][0] == OTMFX[j] and re.split("\:\:", osc_Ori[2][i])[-1] == 'OSC_W' and osc_Ori[6][i] == 'OOP_MIN':
if osc_Ori[7][i] == '无光':
sheet3.write(i + 1, 3, osc_Ori[7][i])
else:
sheet3.write(j + 1, 2, float(re.findall(regNum, osc_Ori[7][i])[0]))
newOSC.save("genNewOSC.xls")
newOSC = pandas.read_excel("genNewOSC.xls", sheet_name="烽火波分OPS检查(每月)", header=0)
newOSC.sort_values(by=['A节点设备名称'], inplace=True)
with pandas.ExcelWriter('./genNewOSC.xls') as writer:
newOSC.to_excel(writer, encoding='utf-8', sheet_name='烽火波分OPS检查(每月)', index=False)
r_xls = xlrd.open_workbook("genNewOSC.xls") # 读取excel文件
excelCopy = copy(r_xls) # 将xlrd的对象转化为xlwt的对象
sheet_minus = excelCopy.get_sheet(0)
OSC_IOOP = pandas.read_excel("genNewOSC.xls", header=0, usecols=[2, 3])
for i in range(0, len(OSC_IOOP)):
if isinstance(OSC_IOOP["OCS-输出光功率(dBm)"][i], float) and isinstance(OSC_IOOP["OCS-输入光功率(dBm)"][i], float):
sheet_minus.write(i + 1, 5, OSC_IOOP["OCS-输出光功率(dBm)"][i] - OSC_IOOP["OCS-输入光功率(dBm)"][i])
else:
sheet_minus.write(i + 1, 5, "无法计算")
excelCopy.save("genNewOSC.xls")
print("DONE")
'''
def genNewNewOCP():
time_start = time.time()
ocp_Ori = pandas.read_excel("ocp.xlsx", sheet_name=0, header=0)
och_template = pandas.read_excel("och_template.xlsx", sheet_name=0, header=0)
regNum = "[-+]?\d+.?\d*" # Rule for Matching Numbers
ocp_OriSimple = ocp_Ori.copy(deep=True)
for i in range(0, len(ocp_Ori)):
if ('IOP_MAX' in ocp_Ori.iat[i, 6]) or ('IOP_MIN' in ocp_Ori.iat[i, 6]):
ocp_OriSimple.drop([i, i], inplace=True)
# 重置 DataFrame 'ocp_OriSimple' 的索引
ocp_OriSimple.reset_index(drop=True, inplace=True)
print("(genNewNewOCP) 已完成ocp_Ori简化,去除含IOP_MAX和IOP_MIN内容")
ocp_IOPMIN = ocp_Ori.copy(deep=True)
for i in range(0, len(ocp_Ori)):
if ('IOP_MAX' in ocp_Ori.iat[i, 6]) or (ocp_Ori.iat[i, 6] == 'IOP'):
ocp_IOPMIN.drop([i, i], inplace=True)
# 重置 DataFrame 'ocp_OriSimple' 的索引
ocp_IOPMIN.reset_index(drop=True, inplace=True)
print("(genNewNewOCP) 已完成ocp_IOPMIN简化,去除含IOP_MAX和IOP内容")
for i in range(0, len(och_template)):
for j in range(0, len(ocp_OriSimple)):
if re.split(':', och_template.iat[i, 1])[0] in ocp_OriSimple.iat[j, 1]:
if re.split(':', och_template.iat[i, 1])[1] in ocp_OriSimple.iat[j, 2]:
if re.split(':', och_template.iat[i, 1])[-1] in ocp_OriSimple.iat[j, 2]:
if ocp_OriSimple.iat[j, 7] == '收无光':
och_template.iat[i, 2] = ocp_OriSimple.iat[j, 7]
print('\r(genNewNewOCP)Now Progress: i: ' + str(i) + '/' + | |
#!/usr/bin/python2
from __future__ import absolute_import, division, print_function
import gui_elements as gui
import thread
import wx
import evora.common.utils.fits as fits_utils
import evora.common.utils.logs as log_utils
class ScriptStatus(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# Main Sizer
self.vertSizer = wx.BoxSizer(wx.VERTICAL)
# Widgets
self.statusBox = wx.StaticBox(self, label="Script Activity", size=(400, 150))
self.statusBoxSizer = wx.StaticBoxSizer(self.statusBox, wx.VERTICAL)
self.activityText = wx.TextCtrl(self, style=wx.TE_READONLY | wx.TE_MULTILINE,
size=(400, 150))
# adjust subsizers
self.statusBoxSizer.Add(self.activityText, flag=wx.ALIGN_CENTER)
# adjust main sizers
self.vertSizer.Add(self.statusBoxSizer, flag=wx.ALIGN_CENTER)
self.SetSizer(self.vertSizer)
self.vertSizer.Fit(self)
class ScriptCommands(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# Global variables
self.parent = parent
self.protocol = None
# Main Sizer
self.vertSizer = wx.BoxSizer(wx.VERTICAL)
# subsizers
self.subVert = wx.BoxSizer(wx.VERTICAL)
self.buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
# Widgets
self.commandFrame = wx.StaticBox(self, id=3000, label="Command Prompt", size=(350, 50))
self.commandFrameSizer = wx.StaticBoxSizer(self.commandFrame, wx.VERTICAL)
self.button = wx.Button(self, id=3001, label="OK") # for entering command prompt
self.upButton = wx.Button(self, id=3002, label="Upload")
self.button.Enable(False)
self.upButton.Enable(False)
self.commandBox = wx.TextCtrl(self, id=3003, size=(350, -1), style=wx.TE_PROCESS_ENTER | wx.TE_READONLY)
# adjust subsizers
self.buttonSizer.Add(self.button, flag=wx.ALIGN_CENTER)
gui.AddLinearSpacer(self.buttonSizer, 10)
self.buttonSizer.Add(self.upButton, flag=wx.ALIGN_CENTER)
self.subVert.Add(self.commandBox, flag=wx.ALIGN_CENTER)
gui.AddLinearSpacer(self.subVert, 15)
self.subVert.Add(self.buttonSizer, flag=wx.ALIGN_CENTER)
self.commandFrameSizer.Add(self.subVert, flag=wx.ALIGN_CENTER)
# adjust main sizers
self.vertSizer.Add(self.commandFrameSizer, flag=wx.ALIGN_CENTER)
# Variables
self.command = ""
# Bindings
self.Bind(wx.EVT_TEXT, self.getCommand, id=3003)
# self.Bind(wx.EVT_TEXT_ENTER, self.onOk, id=3003)
self.Bind(wx.EVT_BUTTON, self.onOk, id=3001)
self.Bind(wx.EVT_BUTTON, self.onUpload, id=3002)
##
self.SetSizer(self.vertSizer)
self.vertSizer.Fit(self)
def getCommand(self, event):
self.command = self.commandBox.GetValue()
self.button.SetDefault()
def onOk(self, event):
"""
Once a command has been inputed to the command prompt
"""
self.button.SetFocus()
if self.command == "":
print("No command")
else:
# print(self.command)
runList = self.parseCommand(self.command) # parses user command
self.executeCommand(runList) # executes user command
def onUpload(self, event):
print("Upload your script")
def sendToStatus(self, string):
send = log_utils.time_stamp()
send += " " + string
wx.CallAfter(self.threadSafeScriptingStatus, send)
def threadSafeScriptingStatus(self, string):
val = self.parent.scriptStatus.activityText.GetValue()
self.parent.scriptStatus.activityText.SetValue(val + string + "\n")
self.parent.scriptStatus.activityText.SetInsertionPointEnd()
def logScript(self, logmsg):
"""
Pre: Pass in a message to be logged.
Post: Sends log message to status box as well as logs to file.
"""
print("logging from scripting class")
self.sendToStatus(logmsg)
logInstance = self.parent.parent.parent.log.logInstance
wx.CallAfter(logInstance.threadSafeLogStatus, logmsg)
def executeCommand(self, runList):
"""
This will take the known order of runList from the command and then send it to the server
while also displaying pertanent information. It will essentially be reusing methods,
whenever possible from the acquisitionClasses.py file.
Returns: Nothing is returned through this method.
"""
val = self.parent.scriptStatus.activityText.GetValue()
print(val)
if type(runList) == list:
self.parent.scriptStatus.activityText.SetValue(val + str(runList) + "\n")
self.parent.scriptStatus.activityText.SetInsertionPointEnd()
print(runList)
print("sending command")
# surround in str to get rid of unicode, otherwise fails at sending
sendCommand = str(runList[0])
if sendCommand == 'series':
imtype = str(runList[1])
number = str(runList[2])
exposeClass = self.parent.parent.parent.takeImage.exposureInstance
exposeClass.seriesImageNumber = int(number)
exposeClass.logFunction = self.logScript # point to the correct log function that prints to log tab and script status
# example runList (['series', 'bias', int(number), 'basename'])
if imtype == 'bias':
basename = str(runList[3])
exposeClass.currentImage = basename
overwrite = None
if fits_utils.check_for_file("/data/copyfile/" + self.currentImage + "_001.fits"):
dialog = wx.MessageDialog(None, "Do you want to change temperature during exposure?", "", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
overwrite = dialog.ShowModal()
dialog.Destroy()
if overwrite is not None or overwrite == wx.ID_OK:
d = self.protocol.addDeferred("seriesSent")
d.addCallback(exposeClass.displaySeriesImage_thread)
d = self.protocol.sendCommand(sendCommand + " " + imtype + " " + number + " 0 " + str(self.parent.parent.parent.binning))
d.addCallback(exposeClass.seriesCallback)
# start timer
thread.start_new_thread(exposeClass.exposeTimer, (0,))
if imtype in ['flat', 'object', 'dark']:
exposeClass.expButton.Enable(False)
exposeClass.stopExp.Enable(True)
exposeClass.abort = True
itime = str(runList[3])
basename = str(runList[4])
exposeClass.currentImage = basename
overwrite = None
if fits_utils.check_for_file("/data/copyfile/" + self.currentImage + "_001.fits"):
dialog = wx.MessageDialog(None, "Do you want to change temperature during exposure?", "", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
overwrite = dialog.ShowModal()
dialog.Destroy()
if overwrite is not None or overwrite == wx.ID_OK:
for i in range(int(number)):
d = self.protocol.addDeferred("seriesSent" + str(i + 1))
d.addCallback(exposeClass.displaySeriesImage_thread)
d = self.protocol.sendCommand(sendCommand + " " + imtype + " " + number + " " + itime + " " + str(self.parent.parent.parent.binning))
d.addCallback(exposeClass.seriesCallback)
# start timer
thread.start_new_thread(exposeClass.exposeTimer, (float(itime),))
if sendCommand == 'abort':
exposeClass = self.parent.parent.parent.takeImage.exposureInstance
exposeClass.onStop(None)
if sendCommand == 'expose' and runList[1] == 'help':
# report on all the help options
helpString = ""
if runList[2] == 'abort':
helpString += "\"expose abort\" is used to stop the current exposure. This can be "\
+ "an exposure started through the imaging or scripting tab. Invoke with "\
+ "\"expose abort\"."
if runList[2] == 'bias':
helpString += "\"expose bias\" is used to take a number of biases in one command. Invoke this "\
+ "command with \"expose bias arg1 arg2\", where arg1 and arg2, in no particular "\
+ "order, are time=XX in seconds and basename=imagename."
if runList[2] == 'dark':
helpString += "\"expose dark\" is used to take a number of darks in one command. Invoke this "\
+ "command with \"expose dark arg1 arg2 arg3\", where arg1, arg2, and arg3, in no particular "\
+ "order, are time=XX in seconds, number=XX as an int, and basename=imagename."
if runList[2] == 'flat':
helpString += "\"expose flat\" is used to take a number of darks in one command. Invoke this "\
+ "command with \"expose flat arg1 arg2 arg3\", where arg1, arg2, and arg3, in no particular "\
+ "order, are time=XX in seconds, number=XX as an int, and basename=imagename."
if runList[2] == 'object':
helpString += "\"expose object\" is used to take a number of darks in one command. Invoke this "\
+ "command with \"expose object arg1 arg2 arg3\", where arg1, arg2, and arg3, in no particular "\
+ "order, are time=XX in seconds, number=XX as an int, and basename=imagename."
self.sendToStatus(helpString)
# Deal with set commands
# command: set temp XX
if sendCommand == 'setTEC':
temp = int(runList[1])
tempClass = self.parent.parent.parent.takeImage.tempInstance
tempClass.tempToSend = temp
tempClass.onCool(None)
# command: set temp warmup
if sendCommand == 'warmup':
tempClass = self.parent.parent.parent.takeImage.tempInstance
tempClass.onStopCooling(None)
# command: set filter X
# unused
# if sendCommand == 'set':
# if runList[1] == 'filter':
# pos = int(runList[2])
# command: set binning X
if sendCommand == 'set':
if runList[1] == 'binning':
topInstance = self.parent.parent.parent
bin = str(runList[2])
if bin == '1':
topInstance.on1x1(None)
file = topInstance.menuBar.GetMenu(0)
file.FindItemById(1120).Check(check=True)
else:
topInstance.on2x2(None)
file = topInstance.menuBar.GetMenu(0)
file.FindItemById(1121).Check(check=True)
# command: set help binning
# set help temp
# set help filter
if sendCommand == 'set':
if runList[1] == 'help':
if runList[2] == 'binning':
helpBinning = "\"set binning\" is used to set the binning type of the CCD. To invoke use the following "\
+ "command: \"set binning arg1\", where arg1 is the binning type of 1 or 2."
self.sendToStatus(helpBinning)
if runList[2] == 'temp':
helpTemp = "\"set temp\" is used to set the temperature of the CCD. To invoke use the following "\
+ "command: \"set temp arg1\", where arg1 is an int between -80 to -10 "\
+ "or warmup."
self.sendToStatus(helpTemp)
if runList[2] == 'filter':
helpFilter = "\"set filter\" is used to set the filter wheel position. To invoke use the following "\
+ "command: \"set filter arg1\", where arg1 is an int between 1 and 6."
self.sendToStatus(helpFilter)
# command: help expose
# help set
# help filter
if sendCommand == 'help':
if runList[1] == 'expose':
helpExpose = "\"expose\" command is explicitely for taking several images in one command. "
helpExpose += "This is invoked by typing \"expose imageType\" where imageType is either "
helpExpose += "bias, dark, flat, or object. Use \"expose help\" followed by image type to "
helpExpose += "see what arguments are needed (e.g. \"expose help bias\")."
self.sendToStatus(helpExpose)
if runList[1] == 'set':
helpSet = "\"set\" command is used to set the camera attributes of binning, temperature, and "
helpSet += "filter position. Use \"set help\" followed by one of the attributes (binning, temp, "
helpSet += "filter) to get info on the need arguements (e.g. \"set help temp\")."
self.sendToStatus(helpSet)
if runList[1] == 'filter':
helpFilter = "\"filter\" command is used to control the filter attributes."
self.sendToStatus(helpFilter)
else:
print("something went wrong")
print(runList)
dialog = wx.MessageDialog(None, runList, "", wx.OK | wx.ICON_ERROR)
dialog.ShowModal()
dialog.Destroy()
self.commandBox.SetFocus()
def parseCommand(self, command):
"""
This method parses the command inputed by the user from the command promp text control box.
If the command is good it will return a list with the command that is sent to the server
followed by the | |
<gh_stars>1-10
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from textwrap import dedent
from ...nbgraderformat import read
from .conftest import _make_nbserver, _make_browser, _close_nbserver, _close_browser
from nbformat import current_nbformat
@pytest.fixture(scope="module")
def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache):
server = _make_nbserver("", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache)
def fin():
_close_nbserver(server)
request.addfinalizer(fin)
return server
@pytest.fixture
def browser(request, tempdir, nbserver):
browser = _make_browser(tempdir)
def fin():
_close_browser(browser)
request.addfinalizer(fin)
return browser
def _wait(browser):
return WebDriverWait(browser, 30)
def _load_notebook(browser, port, retries=5, name="blank.ipynb"):
# go to the correct page
browser.get("http://localhost:{}/notebooks/{}".format(port, name))
def page_loaded(browser):
return browser.execute_script(
'return typeof Jupyter !== "undefined" && Jupyter.page !== undefined && Jupyter.notebook !== undefined;')
# wait for the page to load
try:
_wait(browser).until(page_loaded)
except TimeoutException:
if retries > 0:
print("Retrying page load...")
# page timeout, but sometimes this happens, so try refreshing?
_load_notebook(browser, port, retries=retries - 1)
else:
print("Failed to load the page too many times")
raise
def _activate_toolbar(browser, name="Create%20Assignment"):
def celltoolbar_exists(browser):
return browser.execute_script(
'return $("#view_menu #menu-cell-toolbar").find("[data-name=\'{}\']").length == 1;'.format(name))
# wait for the view menu to appear
_wait(browser).until(celltoolbar_exists)
# activate the Create Assignment toolbar
browser.execute_script(
"$('#view_menu #menu-cell-toolbar').find('[data-name=\"{}\"]').find('a').click();".format(name)
)
# make sure the toolbar appeared
if name == "Create%20Assignment":
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".celltoolbar select")))
elif name == "Edit%20Metadata":
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".celltoolbar button")))
def _select_none(browser, index=0):
select = Select(browser.find_elements_by_css_selector('.celltoolbar select')[index])
select.select_by_value('')
def _select_manual(browser, index=0):
select = Select(browser.find_elements_by_css_selector('.celltoolbar select')[index])
select.select_by_value('manual')
def _select_solution(browser, index=0):
select = Select(browser.find_elements_by_css_selector('.celltoolbar select')[index])
select.select_by_value('solution')
def _select_tests(browser, index=0):
select = Select(browser.find_elements_by_css_selector('.celltoolbar select')[index])
select.select_by_value('tests')
def _select_locked(browser, index=0):
select = Select(browser.find_elements_by_css_selector('.celltoolbar select')[index])
select.select_by_value('readonly')
def _set_points(browser, points=2, index=0):
elem = browser.find_elements_by_css_selector(".nbgrader-points-input")[index]
elem.clear()
elem.send_keys(points)
browser.find_elements_by_css_selector(".nbgrader-cell")[index].click()
def _set_id(browser, cell_id="foo", index=0):
elem = browser.find_elements_by_css_selector(".nbgrader-id-input")[index]
elem.clear()
elem.send_keys(cell_id)
browser.find_elements_by_css_selector(".nbgrader-cell")[index].click()
def _get_metadata(browser):
return browser.execute_script(
"""
var cell = Jupyter.notebook.get_cell(0);
return cell.metadata.nbgrader;
"""
)
def _get_total_points(browser):
element = browser.find_element_by_id("nbgrader-total-points")
return float(element.get_attribute("value"))
def _save(browser):
browser.execute_script(dedent(
"""
Jupyter._notebook_saved = false;
Jupyter.notebook.save_notebook().then(function () {
Jupyter._notebook_saved = true;
});
"""
))
def is_saved(browser):
return browser.execute_script(dedent(
"""
if (Jupyter._notebook_saved === true) {
Jupyter._notebook_saved = false;
return true;
} else {
return false;
}
"""
))
return is_saved
def _save_and_validate(browser):
_wait(browser).until(_save(browser))
read("blank.ipynb", current_nbformat)
def _wait_for_modal(browser):
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".modal-dialog")))
def _dismiss_modal(browser):
button = browser.find_element_by_css_selector(".modal-footer .btn-primary")
button.click()
def modal_gone(browser):
try:
browser.find_element_by_css_selector(".modal-dialog")
except NoSuchElementException:
return True
return False
_wait(browser).until(modal_gone)
@pytest.mark.nbextensions
def test_manual_cell(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# does the nbgrader metadata exist?
assert _get_metadata(browser) is None
# make it manually graded
_select_manual(browser)
assert _get_metadata(browser)['solution']
assert _get_metadata(browser)['grade']
assert not _get_metadata(browser)['locked']
# wait for the points and id fields to appear
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".nbgrader-points")))
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".nbgrader-id")))
# set the points
_set_points(browser)
assert 2 == _get_metadata(browser)['points']
# set the id
assert _get_metadata(browser)['grade_id'].startswith("cell-")
_set_id(browser)
assert "foo" == _get_metadata(browser)['grade_id']
# make sure the metadata is valid
_save_and_validate(browser)
# make it nothing
_select_none(browser)
assert not _get_metadata(browser)
_save_and_validate(browser)
@pytest.mark.nbextensions
def test_solution_cell(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# does the nbgrader metadata exist?
assert _get_metadata(browser) is None
# make it a solution cell
_select_solution(browser)
assert _get_metadata(browser)['solution']
assert not _get_metadata(browser)['grade']
assert not _get_metadata(browser)['locked']
# wait for the id field to appear
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".nbgrader-id")))
# set the id
assert _get_metadata(browser)['grade_id'].startswith("cell-")
_set_id(browser)
assert "foo" == _get_metadata(browser)['grade_id']
# make sure the metadata is valid
_save_and_validate(browser)
# make it nothing
_select_none(browser)
assert not _get_metadata(browser)
_save_and_validate(browser)
@pytest.mark.nbextensions
def test_tests_cell(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# does the nbgrader metadata exist?
assert _get_metadata(browser) is None
# make it autograder tests
_select_tests(browser)
assert not _get_metadata(browser)['solution']
assert _get_metadata(browser)['grade']
assert _get_metadata(browser)['locked']
# wait for the points and id fields to appear
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".nbgrader-points")))
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".nbgrader-id")))
WebDriverWait(browser, 30).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".lock-button")))
# set the points
_set_points(browser)
assert 2 == _get_metadata(browser)['points']
# set the id
assert _get_metadata(browser)['grade_id'].startswith("cell-")
_set_id(browser)
assert "foo" == _get_metadata(browser)['grade_id']
# make sure the metadata is valid
_save_and_validate(browser)
# make it nothing
_select_none(browser)
assert not _get_metadata(browser)
_save_and_validate(browser)
@pytest.mark.nbextensions
def test_tests_to_solution_cell(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# does the nbgrader metadata exist?
assert _get_metadata(browser) is None
# make it autograder tests
_select_tests(browser)
assert not _get_metadata(browser)['solution']
assert _get_metadata(browser)['grade']
assert _get_metadata(browser)['locked']
# wait for the points and id fields to appear
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".nbgrader-points")))
_wait(browser).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".nbgrader-id")))
WebDriverWait(browser, 30).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".lock-button")))
# set the points
_set_points(browser)
assert 2 == _get_metadata(browser)['points']
# set the id
assert _get_metadata(browser)['grade_id'].startswith("cell-")
_set_id(browser)
assert "foo" == _get_metadata(browser)['grade_id']
# make sure the metadata is valid
_save_and_validate(browser)
# make it a solution cell and make sure the points are gone
_select_solution(browser)
assert _get_metadata(browser)['solution']
assert not _get_metadata(browser)['grade']
assert not _get_metadata(browser)['locked']
assert 'points' not in _get_metadata(browser)
_save_and_validate(browser)
# make it nothing
_select_none(browser)
assert not _get_metadata(browser)
_save_and_validate(browser)
@pytest.mark.nbextensions
def test_locked_cell(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# does the nbgrader metadata exist?
assert _get_metadata(browser) is None
# make it locked
_select_locked(browser)
assert not _get_metadata(browser)['solution']
assert not _get_metadata(browser)['grade']
assert _get_metadata(browser)['locked']
# wait for the id and lock button to appear
WebDriverWait(browser, 30).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".nbgrader-id")))
WebDriverWait(browser, 30).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".lock-button")))
# set the id
assert _get_metadata(browser)['grade_id'].startswith("cell-")
_set_id(browser)
assert "foo" == _get_metadata(browser)['grade_id']
# make sure the metadata is valid
_save_and_validate(browser)
# make it nothing
_select_none(browser)
assert not _get_metadata(browser)
_save_and_validate(browser)
@pytest.mark.nbextensions
def test_grade_cell_css(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# make it manually graded
_select_manual(browser)
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 1
# make it nothing
_select_none(browser)
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 0
# make it a solution
_select_solution(browser)
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 1
# make it nothing
_select_none(browser)
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 0
# make it autograder tests
_select_tests(browser)
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 1
# make it nothing
_select_none(browser)
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 0
# make it autograder tests
_select_tests(browser)
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 1
# deactivate the toolbar
_activate_toolbar(browser, "None")
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 0
# activate the toolbar
_activate_toolbar(browser)
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 1
# deactivate the toolbar
_activate_toolbar(browser, "Edit%20Metadata")
elements = browser.find_elements_by_css_selector(".nbgrader-cell")
assert len(elements) == 0
@pytest.mark.nbextensions
def test_tabbing(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# make it manually graded
_select_manual(browser)
# click the id field
element = browser.find_element_by_css_selector(".nbgrader-points-input")
element.click()
# get the active element
element = browser.execute_script("return document.activeElement")
assert "nbgrader-points-input" == element.get_attribute("class")
# press tab and check that the active element is correct
element.send_keys(Keys.TAB)
element = browser.execute_script("return document.activeElement")
assert "nbgrader-id-input" == element.get_attribute("class")
# make it autograder tests
_select_tests(browser)
# click the id field
element = browser.find_element_by_css_selector(".nbgrader-points-input")
element.click()
# get the active element
element = browser.execute_script("return document.activeElement")
assert "nbgrader-points-input" == element.get_attribute("class")
# press tab and check that the active element is correct
element.send_keys(Keys.TAB)
element = browser.execute_script("return document.activeElement")
assert "nbgrader-id-input" == element.get_attribute("class")
@pytest.mark.nbextensions
def test_total_points(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# make sure the total points is zero
assert _get_total_points(browser) == 0
# make it autograder tests and set the points to two
_select_tests(browser)
_set_points(browser)
_set_id(browser)
assert _get_total_points(browser) == 2
# make it manually graded
_select_manual(browser)
assert _get_total_points(browser) == 2
# make it a solution make sure the total points is zero
_select_solution(browser)
assert _get_total_points(browser) == 0
# make it autograder tests
_select_tests(browser)
assert _get_total_points(browser) == 0
_set_points(browser)
assert _get_total_points(browser) == 2
# create a new cell
element = browser.find_element_by_tag_name("body")
element.send_keys(Keys.ESCAPE)
element.send_keys("b")
# make sure the toolbar appeared
def find_toolbar(browser):
try:
browser.find_elements_by_css_selector(".celltoolbar select")[1]
except IndexError:
return False
return True
_wait(browser).until(find_toolbar)
# make it a test cell
_select_tests(browser, index=1)
_set_points(browser, points=1, index=1)
_set_id(browser, cell_id="bar", index=1)
assert _get_total_points(browser) == 3
# delete the new cell
element = browser.find_elements_by_css_selector(".cell")[0]
element.click()
element.send_keys(Keys.ESCAPE)
element.send_keys("d")
element.send_keys("d")
assert _get_total_points(browser) == 1
# delete the first cell
element = browser.find_elements_by_css_selector(".cell")[0]
element.send_keys("d")
element.send_keys("d")
assert _get_total_points(browser) == 0
@pytest.mark.nbextensions
def test_cell_ids(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# turn it into a cell with an id
_select_solution(browser)
_set_id(browser, cell_id="")
# save and check for an error (blank id)
_save(browser)
_wait_for_modal(browser)
_dismiss_modal(browser)
# set the label
_set_id(browser)
# create a new cell
element = browser.find_element_by_tag_name("body")
element.send_keys(Keys.ESCAPE)
element.send_keys("b")
# make sure the toolbar appeared
def find_toolbar(browser):
try:
browser.find_elements_by_css_selector(".celltoolbar select")[1]
except IndexError:
return False
return True
_wait(browser).until(find_toolbar)
# make it a test cell and set the label
_select_tests(browser, index=1)
_set_id(browser, index=1)
# save and check for an error (duplicate id)
_save(browser)
_wait_for_modal(browser)
_dismiss_modal(browser)
@pytest.mark.nbextensions
def test_negative_points(browser, port):
_load_notebook(browser, port)
_activate_toolbar(browser)
# make sure the total points is zero
assert _get_total_points(browser) == 0
# make it autograder tests and set the points to two
_select_tests(browser)
_set_points(browser, points=2)
_set_id(browser)
assert _get_total_points(browser) == 2
assert 2 == _get_metadata(browser)['points']
# set the points to negative one
_set_points(browser, points=-1)
| |
and r.status_code != 206) \
or (not ('method' in pars and pars['method'] == 'download') \
and url.find('method=download') == -1 \
and url.find('baidupcs.com/file/') == -1):
self.__print_error_json(r)
perr("Website returned: {}".format(rb(r.text)))
# child class override this to to customize error handling
def __handle_more_response_error(self, r, sc, ec, act, actargs):
return const.ERequestFailed
# TODO: the 'act' param is hacky
def __get_json(self, r, act, defaultec = const.ERequestFailed):
try:
j = r.json()
self.pd("Website returned JSON: {}".format(j))
if 'error_code' in j:
return j['error_code']
else:
return defaultec
except ValueError:
if hasattr(r, 'text'):
self.pd("Website Response: {}".format(rb(r.text)))
if act == self.__cdl_act:
return const.IETaskNotFound
return defaultec
def __request_work_die(self, ex, url, pars, r, act):
result = const.EFatal
self.__dump_exception(ex, url, pars, r, act)
perr("Fatal Exception, no way to continue.\nQuitting...\n")
perr("If the error is reproducible, run the program with `-dv` arguments again to get more info.\n")
quit(result)
# we eat the exception, and use return code as the only
# error notification method, we don't want to mix them two
#raise # must notify the caller about the failure
def __request_work(self, url, pars, act, method, actargs = None, addtoken = True, dumpex = True, **kwargs):
result = const.ENoError
r = None
self.__extraupdate()
parsnew = pars.copy()
if addtoken:
parsnew['access_token'] = self.__access_token
try:
self.pd(method + ' ' + url)
self.pd("actargs: {}".format(actargs))
self.pd("Params: {}".format(pars))
r = self.__requester.request(method, url, params = parsnew, timeout = self.__timeout, verify = self.__checkssl, **kwargs)
self.response = r
sc = r.status_code
self.pd("Full URL: {}".format(r.url))
self.pd("HTTP Status Code: {}".format(sc))
# BUGFIX: DON'T do this, if we are downloading a big file,
# the program will eat A LOT of memeory and potentially hang / get killed
#self.pd("Request Headers: {}".format(pprint.pformat(r.request.headers)), 2)
#self.pd("Response Header: {}".format(pprint.pformat(r.headers)), 2)
#self.pd("Response: {}".format(rb(r.text)), 3)
if sc == requests.codes.ok or sc == 206: # 206 Partial Content
if sc == requests.codes.ok:
# #162 https://github.com/houtianze/bypy/pull/162
# handle response like this: {"error_code":0,"error_msg":"no error","request_id":70768340515255385}
if not ('method' in pars and pars['method'] == 'download'):
try:
j = r.json()
if 'error_code' in j and j['error_code'] == 0 and 'error_msg' in j and j['error_msg'] == 'no error':
self.pd("Unexpected response: {}".format(j))
return const.ERequestFailed
except Exception as ex:
perr(formatex(ex))
# TODO: Shall i return this?
return const.ERequestFailed
self.pd("200 OK, processing action")
else:
self.pd("206 Partial Content (this is OK), processing action")
result = act(r, actargs)
if result == const.ENoError:
self.pd("Request all goes fine")
else:
ec = self.__get_json(r, act)
# 6 (sc: 403): No permission to access user data
# 110 (sc: 401): Access token invalid or no longer valid
# 111 (sc: 401): Access token expired
if ec == 111 or ec == 110 or ec == 6: # and sc == 401:
self.pd("ec = {}".format(ec))
self.pd("Need to refresh token, refreshing")
if const.ENoError == self.__refresh_token(): # refresh the token and re-request
# TODO: avoid infinite recursive loops
# TODO: properly pass retry
result = self.__request(url, pars, act, method, actargs, True, addtoken, dumpex, **kwargs)
else:
result = const.EFatal
perr("FATAL: Token refreshing failed, can't continue.\nQuitting...\n")
quit(result)
# File md5 not found, you should use upload API to upload the whole file.
elif ec == const.IEMD5NotFound: # and sc == 404:
self.pd("MD5 not found, rapidupload failed")
result = ec
# superfile create failed
elif ec == const.IESuperfileCreationFailed \
or ec == const.IEBlockMissInSuperFile2:
self.pd("Failed to combine files from MD5 slices")
result = ec
# errors that make retrying meaningless
elif (
ec == 31061 or # sc == 400 file already exists
ec == 31062 or # sc == 400 file name is invalid
ec == 31063 or # sc == 400 file parent path does not exist
ec == 31064 or # sc == 403 file is not authorized
ec == 31065 or # sc == 400 directory is full
ec == 31066 or # sc == 403 (indeed 404) file does not exist
ec == const.IETaskNotFound or # 36016 or # sc == 404 Task was not found
# the following was found by xslidian, but i have never ecountered before
ec == 31390): # sc == 404 # {"error_code":31390,"error_msg":"Illegal File"} # r.url.find('http://bcscdn.baidu.com/bcs-cdn/wenxintishi') == 0
result = ec
# TODO: Move this out to cdl_cancel() ?
#if ec == const.IETaskNotFound:
# pr(r.json())
if dumpex:
self.__dump_exception(None, url, pars, r, act)
else:
# gate for child classes to customize behaviors
# the function should return ERequestFailed if it doesn't handle the case
result = self.__handle_more_response_error(r, sc, ec, act, actargs)
if result == const.ERequestFailed and dumpex:
self.__dump_exception(None, url, pars, r, act)
except (requests.exceptions.RequestException,
socket.error,
ReadTimeoutError) as ex:
# If certificate check failed, no need to continue
# but prompt the user for work-around and quit
# why so kludge? because requests' SSLError doesn't set
# the errno and strerror due to using **kwargs,
# so we are forced to use string matching
if isinstance(ex, requests.exceptions.SSLError) \
and re.match(r'^\[Errno 1\].*error:14090086.*:certificate verify failed$', str(ex), re.I):
# [Errno 1] _ssl.c:504: error:14090086:SSL routines:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
result = const.EFatal
self.__dump_exception(ex, url, pars, r, act)
perr("\n\n== Baidu's Certificate Verification Failure ==\n"
"We couldn't verify Baidu's SSL Certificate.\n"
"It's most likely that the system doesn't have "
"the corresponding CA certificate installed.\n"
"There are two ways of solving this:\n"
"Either) Run this prog with the '" + const.CaCertsOption + \
" <path to " + const.ByPyCertsFileName + "> argument "
"(" + const.ByPyCertsFileName + " comes along with this prog). "
"This is the secure way. "
"However, it won't work after 2020-02-08 when "
"the certificat expires.\n"
"Or) Run this prog with the '" + const.DisableSslCheckOption + \
"' argument. This supresses the CA cert check "
"and always works.\n")
quit(result)
# why so kludge? because requests' SSLError doesn't set
# the errno and strerror due to using **kwargs,
# so we are forced to use string matching
if isinstance(ex, requests.exceptions.SSLError) \
and re.match(r'^\[Errno 1\].*error:14090086.*:certificate verify failed$', str(ex), re.I):
# [Errno 1] _ssl.c:504: error:14090086:SSL routines:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
perr("\n*** We probably don't have Baidu's CA Certificate ***\n" \
"This in fact doesn't matter most of the time.\n\n" \
"However, if you are _really_ concern about it, you can:\n" \
"Either) Run this prog with the '" + const.CaCertsOption + \
" <path to bypy.cacerts.pem>' " \
"argument. This is the secure way.\n" \
"Or) Run this prog with the '" + const.DisableSslCheckOption + \
"' argument. This suppresses the CA cert check.\n")
result = const.ERequestFailed
if dumpex:
self.__dump_exception(ex, url, pars, r, act)
# TODO: put this check into the specific funcitons?
except ValueError as ex:
if ex.message == 'No JSON object could be decoded':
result = const.ERequestFailed
if dumpex:
self.__dump_exception(ex, url, pars, r, act)
else:
result = const.EFatal
self.__request_work_die(ex, url, pars, r, act)
except Exception as ex:
# OpenSSL SysCallError
if ex.args == (10054, 'WSAECONNRESET') \
or ex.args == (10053, 'WSAECONNABORTED') \
or ex.args == (104, 'ECONNRESET') \
or ex.args == (110, 'ETIMEDOUT') \
or ex.args == (32, 'EPIPE'):
result = const.ERequestFailed
if dumpex:
self.__dump_exception(ex, url, pars, r, act)
else:
result = const.EFatal
self.__request_work_die(ex, url, pars, r, act)
return result
def __request(self, url, pars, act, method, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
tries = 1
if retry:
tries = self.__retry
result = const.ERequestFailed
# Change the User-Agent to avoid server fuss
kwnew = kwargs.copy()
if 'headers' not in kwnew:
kwnew['headers'] = { 'User-Agent': const.UserAgent }
# Now, allow to User-Agent to be set in the caller, instead of always using the default UserAgent value.
if 'User-Agent' not in kwnew['headers']:
kwnew['headers']['User-Agent'] = const.UserAgent
i = 0
while True:
result = self.__request_work(url, pars, act, method, actargs, addtoken, dumpex, **kwnew)
i += 1
# only ERequestFailed needs retry, other error still directly return
if result == const.ERequestFailed:
if i < tries:
# algo changed: delay more after each failure
delay = const.RetryDelayInSec * i
perr("Waiting {} seconds before retrying...".format(delay))
time.sleep(delay)
perr("Request Try #{} / {}".format(i + 1, tries))
else:
result = const.EMaxRetry
perr("Maximum number ({}) of tries failed.".format(tries))
if self.__quit_when_fail:
quit(const.EMaxRetry)
break
else:
break
return result
def __get(self, url, pars, act, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
return self.__request(url, pars, act, 'GET', actargs, retry, addtoken, dumpex, **kwargs)
def __post(self, url, pars, act, actargs = None, retry = True, addtoken = True, dumpex = True, **kwargs):
return self.__request(url, pars, act, 'POST', actargs, retry, addtoken, dumpex, **kwargs)
# direction: True - upload, False - download
def __shallinclude(self, lpath, rpath, direction):
arrow = '==>' if direction else '<=='
checkpath = lpath if direction else rpath
# TODO: bad practice, see os.access() document for more info
if direction: # upload
if not os.path.exists(lpath):
perr("'{}' {} '{}' skipped since local path no longer exists".format(
lpath, arrow, rpath));
return False
else: # download
if os.path.exists(lpath) and (not os.access(lpath, os.R_OK)):
perr("'{}' {} '{}' skipped due to permission".format(
lpath, arrow, rpath));
return False
if '\\' in os.path.basename(checkpath):
perr("'{}' {} '{}' skipped due to problemic '\\' in the path".format(
lpath, arrow, rpath));
return False
include = (not self.__incregex) or self.__incregmo.match(checkpath)
if not include:
self.pv("'{}' {} '{}' skipped as it's not included in the regex pattern".format(
lpath, arrow, rpath));
return include
ListFormatDict = {
'$t' : (lambda json: ls_type(json['isdir'])),
'$f' : (lambda json: json['path'].split('/')[-1]),
'$c' : (lambda json: ls_time(json['ctime'])),
'$m' : (lambda json: ls_time(json['mtime'])),
'$d' : (lambda json: str(json['md5'] if 'md5' in json else '')),
'$s' : (lambda json: str(json['size'])),
'$i' : (lambda json: str(json['fs_id'])),
'$b' : (lambda json: str(json['block_list'] if 'block_list' in json else '')),
'$u' : (lambda json: 'HasSubDir' if 'ifhassubdir' in json and json['ifhassubdir'] else 'NoSubDir'),
'$$' : (lambda json: '$')
}
def __replace_list_format(self, fmt, | |
<reponame>AaronFriel/pulumi-google-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['FeedArgs', 'Feed']
@pulumi.input_type
class FeedArgs:
def __init__(__self__, *,
feed_id: pulumi.Input[str],
feed_output_config: pulumi.Input['FeedOutputConfigArgs'],
name: pulumi.Input[str],
v1_id: pulumi.Input[str],
v1_id1: pulumi.Input[str],
asset_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
asset_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
condition: Optional[pulumi.Input['ExprArgs']] = None,
content_type: Optional[pulumi.Input['FeedContentType']] = None,
relationship_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Feed resource.
:param pulumi.Input[str] feed_id: This is the client-assigned asset feed identifier and it needs to be unique under a specific parent project/folder/organization.
:param pulumi.Input['FeedOutputConfigArgs'] feed_output_config: Feed output configuration defining where the asset updates are published to.
:param pulumi.Input[str] name: The format will be projects/{project_number}/feeds/{client-assigned_feed_identifier} or folders/{folder_number}/feeds/{client-assigned_feed_identifier} or organizations/{organization_number}/feeds/{client-assigned_feed_identifier} The client-assigned feed identifier must be unique within the parent project/folder/organization.
:param pulumi.Input[Sequence[pulumi.Input[str]]] asset_names: A list of the full names of the assets to receive updates. You must specify either or both of asset_names and asset_types. Only asset updates matching specified asset_names or asset_types are exported to the feed. Example: `//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1`. See [Resource Names](https://cloud.google.com/apis/design/resource_names#full_resource_name) for more info.
:param pulumi.Input[Sequence[pulumi.Input[str]]] asset_types: A list of types of the assets to receive updates. You must specify either or both of asset_names and asset_types. Only asset updates matching specified asset_names or asset_types are exported to the feed. Example: `"compute.googleapis.com/Disk"` See [this topic](https://cloud.google.com/asset-inventory/docs/supported-asset-types) for a list of all supported asset types.
:param pulumi.Input['ExprArgs'] condition: A condition which determines whether an asset update should be published. If specified, an asset will be returned only when the expression evaluates to true. When set, `expression` field in the `Expr` must be a valid [CEL expression] (https://github.com/google/cel-spec) on a TemporalAsset with name `temporal_asset`. Example: a Feed with expression ("temporal_asset.deleted == true") will only publish Asset deletions. Other fields of `Expr` are optional. See our [user guide](https://cloud.google.com/asset-inventory/docs/monitoring-asset-changes-with-condition) for detailed instructions.
:param pulumi.Input['FeedContentType'] content_type: Asset content type. If not specified, no content but the asset name and type will be returned.
:param pulumi.Input[Sequence[pulumi.Input[str]]] relationship_types: A list of relationship types to output, for example: `INSTANCE_TO_INSTANCEGROUP`. This field should only be specified if content_type=RELATIONSHIP. * If specified: it outputs specified relationship updates on the [asset_names] or the [asset_types]. It returns an error if any of the [relationship_types] doesn't belong to the supported relationship types of the [asset_names] or [asset_types], or any of the [asset_names] or the [asset_types] doesn't belong to the source types of the [relationship_types]. * Otherwise: it outputs the supported relationships of the types of [asset_names] and [asset_types] or returns an error if any of the [asset_names] or the [asset_types] has no replationship support. See [Introduction to Cloud Asset Inventory](https://cloud.google.com/asset-inventory/docs/overview) for all supported asset types and relationship types.
"""
pulumi.set(__self__, "feed_id", feed_id)
pulumi.set(__self__, "feed_output_config", feed_output_config)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "v1_id", v1_id)
pulumi.set(__self__, "v1_id1", v1_id1)
if asset_names is not None:
pulumi.set(__self__, "asset_names", asset_names)
if asset_types is not None:
pulumi.set(__self__, "asset_types", asset_types)
if condition is not None:
pulumi.set(__self__, "condition", condition)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if relationship_types is not None:
pulumi.set(__self__, "relationship_types", relationship_types)
@property
@pulumi.getter(name="feedId")
def feed_id(self) -> pulumi.Input[str]:
"""
This is the client-assigned asset feed identifier and it needs to be unique under a specific parent project/folder/organization.
"""
return pulumi.get(self, "feed_id")
@feed_id.setter
def feed_id(self, value: pulumi.Input[str]):
pulumi.set(self, "feed_id", value)
@property
@pulumi.getter(name="feedOutputConfig")
def feed_output_config(self) -> pulumi.Input['FeedOutputConfigArgs']:
"""
Feed output configuration defining where the asset updates are published to.
"""
return pulumi.get(self, "feed_output_config")
@feed_output_config.setter
def feed_output_config(self, value: pulumi.Input['FeedOutputConfigArgs']):
pulumi.set(self, "feed_output_config", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The format will be projects/{project_number}/feeds/{client-assigned_feed_identifier} or folders/{folder_number}/feeds/{client-assigned_feed_identifier} or organizations/{organization_number}/feeds/{client-assigned_feed_identifier} The client-assigned feed identifier must be unique within the parent project/folder/organization.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="v1Id")
def v1_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "v1_id")
@v1_id.setter
def v1_id(self, value: pulumi.Input[str]):
pulumi.set(self, "v1_id", value)
@property
@pulumi.getter(name="v1Id1")
def v1_id1(self) -> pulumi.Input[str]:
return pulumi.get(self, "v1_id1")
@v1_id1.setter
def v1_id1(self, value: pulumi.Input[str]):
pulumi.set(self, "v1_id1", value)
@property
@pulumi.getter(name="assetNames")
def asset_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of the full names of the assets to receive updates. You must specify either or both of asset_names and asset_types. Only asset updates matching specified asset_names or asset_types are exported to the feed. Example: `//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1`. See [Resource Names](https://cloud.google.com/apis/design/resource_names#full_resource_name) for more info.
"""
return pulumi.get(self, "asset_names")
@asset_names.setter
def asset_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "asset_names", value)
@property
@pulumi.getter(name="assetTypes")
def asset_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of types of the assets to receive updates. You must specify either or both of asset_names and asset_types. Only asset updates matching specified asset_names or asset_types are exported to the feed. Example: `"compute.googleapis.com/Disk"` See [this topic](https://cloud.google.com/asset-inventory/docs/supported-asset-types) for a list of all supported asset types.
"""
return pulumi.get(self, "asset_types")
@asset_types.setter
def asset_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "asset_types", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['ExprArgs']]:
"""
A condition which determines whether an asset update should be published. If specified, an asset will be returned only when the expression evaluates to true. When set, `expression` field in the `Expr` must be a valid [CEL expression] (https://github.com/google/cel-spec) on a TemporalAsset with name `temporal_asset`. Example: a Feed with expression ("temporal_asset.deleted == true") will only publish Asset deletions. Other fields of `Expr` are optional. See our [user guide](https://cloud.google.com/asset-inventory/docs/monitoring-asset-changes-with-condition) for detailed instructions.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['ExprArgs']]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input['FeedContentType']]:
"""
Asset content type. If not specified, no content but the asset name and type will be returned.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input['FeedContentType']]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter(name="relationshipTypes")
def relationship_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of relationship types to output, for example: `INSTANCE_TO_INSTANCEGROUP`. This field should only be specified if content_type=RELATIONSHIP. * If specified: it outputs specified relationship updates on the [asset_names] or the [asset_types]. It returns an error if any of the [relationship_types] doesn't belong to the supported relationship types of the [asset_names] or [asset_types], or any of the [asset_names] or the [asset_types] doesn't belong to the source types of the [relationship_types]. * Otherwise: it outputs the supported relationships of the types of [asset_names] and [asset_types] or returns an error if any of the [asset_names] or the [asset_types] has no replationship support. See [Introduction to Cloud Asset Inventory](https://cloud.google.com/asset-inventory/docs/overview) for all supported asset types and relationship types.
"""
return pulumi.get(self, "relationship_types")
@relationship_types.setter
def relationship_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "relationship_types", value)
class Feed(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
asset_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
asset_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
condition: Optional[pulumi.Input[pulumi.InputType['ExprArgs']]] = None,
content_type: Optional[pulumi.Input['FeedContentType']] = None,
feed_id: Optional[pulumi.Input[str]] = None,
feed_output_config: Optional[pulumi.Input[pulumi.InputType['FeedOutputConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
relationship_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
v1_id: Optional[pulumi.Input[str]] = None,
v1_id1: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a feed in a parent project/folder/organization to listen to its asset updates.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] asset_names: A list of the full names of the assets to receive updates. You must specify either or both of asset_names and asset_types. Only asset updates matching specified asset_names or asset_types are exported to the feed. Example: `//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1`. See [Resource Names](https://cloud.google.com/apis/design/resource_names#full_resource_name) for more info.
:param pulumi.Input[Sequence[pulumi.Input[str]]] asset_types: A list of types of the assets to receive updates. You must specify either or both of asset_names and asset_types. Only asset updates matching specified asset_names or asset_types are exported to the feed. Example: `"compute.googleapis.com/Disk"` See [this topic](https://cloud.google.com/asset-inventory/docs/supported-asset-types) for a list of all supported asset types.
:param pulumi.Input[pulumi.InputType['ExprArgs']] condition: A condition which determines whether an asset update should be published. If specified, an asset will be returned only when the expression evaluates to true. When set, `expression` field in the `Expr` must be a valid [CEL expression] (https://github.com/google/cel-spec) on a TemporalAsset with name `temporal_asset`. Example: a Feed | |
import json, hashlib, datetime, uuid, time
import logging
from ckan.logic import side_effect_free
import ckan.logic.schema as schema
from ckan.logic.action import get as core_get
import ckan.model as model
import ckan.plugins as p
from ckanext.geodatagov.plugins import change_resource_details, split_tags
from ckanext.geodatagov.harvesters.arcgis import _slugify
from ckanext.harvest.model import HarvestJob, HarvestObject
try:
from ckan.common import config
except ImportError: # CKAN 2.3
from pylons import config
log = logging.getLogger(__name__)
@side_effect_free
def location_search(context, data_dict):
'''
Basic bounding box geocoder for countries, US states, US counties
and US postal codes.
:param q: The search term. It must have at least 3 characters.
:type q: string
Returns an ordered list of locations matching the query, where the
order is defined by the entity type (countries > states > counties > postal
codes) and alphabetically.
Each result contains the following keys:
:param id: Location identifier
:type id: integer
:param text: Location display name
:type text: string
:param geom: GeoJSON-like representation of the bbox geometry
:type geom: dict
'''
term = data_dict.get('q')
if not term:
raise p.toolkit.ValidationError({'q': 'Missing parameter'})
if len(term) < 3:
raise p.toolkit.ValidationError({'q': 'Provide at least three characters'})
model = context['model']
sql = '''SELECT id, display_name, ST_AsGeoJSON(the_geom) AS geom
FROM locations
WHERE lower(name) LIKE :term
ORDER BY type_order, display_name'''
q = model.Session.execute(sql, {'term': '{0}%'.format(term.lower())})
out = []
for row in q:
out.append({'id': row['id'],
'text': row['display_name'],
'geom': json.loads(row['geom'])})
return out
def group_show(context, data_dict):
context.update({'limits': {'packages': 2}})
return core_get.group_show(context, data_dict)
def package_show_rest(context, data_dict):
data_dict = core_get.package_show_rest(context, data_dict)
extras = data_dict.get('extras', {})
rollup = extras.pop('extras_rollup', None)
if rollup:
rollup = json.loads(rollup)
for key, value in rollup.items():
extras[key] = value
return data_dict
def organization_show(context, data_dict):
context.update({'limits': {'packages': 2}})
return core_get.organization_show(context, data_dict)
@side_effect_free
def organization_list(context, data_dict):
model = context['model']
results = core_get.organization_list(context, data_dict)
if not data_dict.get('all_fields'):
return results
query_results = model.Session.query(
model.GroupExtra.group_id,
model.GroupExtra.value
).filter_by(
key='organization_type'
).filter(
model.GroupExtra.group_id.in_([group['id'] for group in results])
).all()
lookup = dict((row[0], row[1]) for row in query_results)
for group in results:
organization_type = lookup.get(group['id'])
if organization_type:
group['organization_type'] = organization_type
return results
def resource_show(context, data_dict):
resource = core_get.resource_show(context, data_dict)
change_resource_details(resource)
return resource
MAPPING = {"title": "title",
"theme": "extras__theme",
"accessLevel": "extras__access-level",
"identifier": "id",
"organizationId": "owner_org",
"organizationName": "owner_name",
"description": "notes",
"keyword" : "extras__tags",
"person": "extras__person",
"accrualPeriodicity": "extras__frequency-of-update",
"spatial": "extras__spatial-text",
"references": "extras__references",
"dataDictionary": "extras__data-dictiionary",
"temporal": "extras__dataset-reference-date",
"issued": "extras__issued",
"modified": "extras__metadata-date",
"mbox": "extras__contact-email",
"granularity": "extras__granularity",
"license": "extras__licence",
"dataQuality": "extras__data-quality"}
ORG_MAPPING = { 'national-park-service':'nps-gov',
'u-s-fish-and-wildlife-service':'fws-gov',
'u-s-geological-survey':'usgs-gov',
'bureau-of-land-management':'blm-gov',
'bureau-of-ocean-energy-management':'boem-gov',
'office-of-surface-mining':'osmre-gov',
'bureau-of-reclamation':'usbr-gov'}
def create_data_dict(record):
data_dict = {"extras":[{"key": "metadata-source", "value": "dms"},
{"key": "resource-type", "value": "Dataset"},
],
"resources": []}
extras = data_dict["extras"]
distributions = record['distribution']
for distribution in distributions:
data_dict['resources'].append({'url': distribution['accessURL'],
'format': distribution['format'],
'size_text': distribution.get('size') })
for key, value in record.items():
new_key = MAPPING.get(key)
if not new_key:
continue
if not value:
continue
if new_key.startswith('extras__'):
extras.append({"key": new_key[8:], "value": value})
else:
data_dict[new_key] = value
return data_dict
def group_catagory_tag_update(context, data_dict):
""" If data_dict include "categories" a new
extra will be added:
__category_tag_{group.id} = categories
"""
p.toolkit.check_access('group_catagory_tag_update', context)
categories = data_dict.get('categories', None)
if categories is None:
return data_dict
package_id = data_dict.get('id')
group_id = data_dict.get('group_id')
model = context['model']
group = model.Group.get(group_id)
if not group:
raise Exception('A group is required')
key = '__category_tag_%s' % group.id
pkg_dict = p.toolkit.get_action('package_show')(context, {'id': package_id})
extras = pkg_dict['extras']
new_extras = []
for extra in extras:
if extra.get('key') != key:
new_extras.append(extra)
if categories:
new_extras.append({'key': key, 'value': json.dumps(categories)})
pkg_dict['extras'] = new_extras
pkg_dict = p.toolkit.get_action('package_update')(context, pkg_dict)
return data_dict
def datajson_create(context, data_dict):
model = context['model']
new_package = create_data_dict(data_dict)
owner_org = model.Group.get(new_package['owner_org'])
group_name = new_package.pop('owner_name', None)
new_package['name'] = _slugify(new_package['title'])[:80]
existing_package = model.Package.get(new_package['name'])
if existing_package:
new_package['name'] = new_package['name'] + '-' + new_package['id'].lower()
if not owner_org:
p.toolkit.get_action('organization_create')(
context,
{'name': new_package['owner_org'], 'title': group_name,
'extras': [{'key': 'organization_type', 'value': "Federal Government"}]})
context['schema'] = schema.default_create_package_schema()
context['schema']['id'] = [p.toolkit.get_validator('not_empty')]
context['return_id_only'] = True
return p.toolkit.get_action('package_create')(context, new_package)
def datajson_update(context, data_dict):
new_package = create_data_dict(data_dict)
model = context['model']
owner_org = model.Group.get(new_package['owner_org'])
group_name = new_package.pop('owner_name', None)
old_package = p.toolkit.get_action('package_show')(
{'model': model, 'ignore_auth': True}, {"id":new_package['id']})
old_resources = old_package['resources']
if not owner_org:
p.toolkit.get_action('organization_create')(
context,
{'name': new_package['owner_org'], 'title': group_name,
'extras': [{'key': 'organization_type', 'value': "Federal Government"}]})
for num, resource in enumerate(new_package['resources']):
try:
old_id = old_resources[num]['id']
resource['id'] = old_id
except IndexError:
pass
context['return_id_only'] = True
p.toolkit.get_action('package_update')(context, new_package)
def doi_create(context, data_dict):
model = context['model']
new_package = data_dict
source_hash = hashlib.sha1(json.dumps(data_dict, sort_keys=True)).hexdigest()
new_package["extras"].append({"key": "source_hash", "value": source_hash})
new_package["extras"].append({"key": "metadata-source", "value": "doi"})
new_package["extras"].append({"key": "source_doi_import_identifier", "value": True})
owner_org = model.Group.get(ORG_MAPPING.get(new_package['organization']['name']))
if not owner_org:
print str(datetime.datetime.now()) + ' Fail to import doi id ' + new_package['id'] + '. Organization ' + new_package['organization']['name'] + ' does not exist.'
return
new_package['owner_org'] = owner_org.name
group_name = new_package.pop('owner_name', None)
new_package['name'] = _slugify(new_package['title'])[:80]
existing_package = model.Package.get(new_package['name'])
if existing_package:
new_package['name'] = new_package['name'] + '-' + str(int(time.time()))
resources = []
for resource in new_package['resources']:
resource.pop('resource_group_id', None)
resource.pop('revision_id', None)
resource.pop('id', None)
resources.append(resource)
new_package['resources'] = resources
obj = HarvestObject(
guid=uuid.uuid4().hex,
job=context['harvest_job'],
content=context['harvestobj'])
obj.save()
new_package["extras"].append({"key": "harvest_object_id", "value": obj.id})
context['schema'] = schema.default_create_package_schema()
context['schema']['id'] = [p.toolkit.get_validator('not_empty')]
context['return_id_only'] = True
p.toolkit.get_action('package_create')(context, new_package)
print str(datetime.datetime.now()) + ' Imported doi id ' + new_package['id']
def doi_update(context, data_dict):
model = context['model']
new_package = data_dict
source_hash = hashlib.sha1(json.dumps(data_dict, sort_keys=True)).hexdigest()
old_package = p.toolkit.get_action('package_show')(
{'model': model, 'ignore_auth': True}, {"id":new_package['id']})
for extra in old_package['extras']:
if extra['key'] == 'source_hash':
old_source_hash = extra['value']
break
else:
old_source_hash = None
if source_hash == old_source_hash and old_package.get('state') =='active':
print str(datetime.datetime.now()) + ' No change for doi id ' + new_package['id']
return
new_package["extras"].append({"key": "source_hash", "value": source_hash})
new_package["extras"].append({"key": "metadata-source", "value": "doi"})
new_package["extras"].append({"key": "source_doi_import_identifier", "value": True})
new_package.pop("name", None)
owner_org = model.Group.get(ORG_MAPPING.get(new_package['organization']['name']))
if not owner_org:
print str(datetime.datetime.now()) + ' Fail to update doi id ' + new_package['id'] + '. Organization ' + new_package['organization']['name'] + ' does not exist.'
return
new_package['owner_org'] = owner_org.name
group_name = new_package.pop('owner_name', None)
resources = []
for resource in new_package['resources']:
resource.pop('resource_group_id', None)
resource.pop('revision_id', None)
resource.pop('id', None)
resources.append(resource)
new_package['resources'] = resources
obj = HarvestObject(
guid=uuid.uuid4().hex,
job=context['harvest_job'],
content=context['harvestobj'])
obj.save()
new_package["extras"].append({"key": "harvest_object_id", "value": obj.id})
context['return_id_only'] = True
p.toolkit.get_action('package_update')(context, new_package)
print str(datetime.datetime.now()) + ' Updated doi id ' + new_package['id']
def preserve_category_tags(context, data_dict):
""" Look category tags in previous version before update dataset """
# get groups from previous dataset version
pkg_dict = p.toolkit.get_action('package_show')(context, {'id': data_dict['id']})
if 'groups' not in data_dict:
data_dict['groups'] = pkg_dict.get('groups', [])
# search __category_tag_'s in previous dataset version
cats = {}
for extra in pkg_dict.get('extras', []):
if extra['key'].startswith('__category_tag_'):
cats[extra['key']] = extra['value']
# check extras in new datasets and append
extras = data_dict.get('extras', [])
for item in extras:
if item['key'] in cats:
del cats[item['key']]
for cat in cats:
extras.append({'key': cat, 'value': cats[cat]})
# source ignored as queried diretly
EXTRAS_ROLLUP_KEY_IGNORE = ["metadata-source", "tags"]
def rollup_save_action(context, data_dict):
""" to run before create actions """
extras_rollup = {}
new_extras = []
new_extras_rollup = {}
for extra in data_dict.get('extras', []):
if extra['key'] in EXTRAS_ROLLUP_KEY_IGNORE:
new_extras.append(extra)
elif extra['key'] == "extras_rollup":
new_extras_rollup = json.loads(extra['value'])
else:
extras_rollup[extra['key']] = extra['value']
# update new values
new_extras_rollup.update(extras_rollup)
## If we use SOLR, try to index (with ckanext-spatial) a valid spatial data
if p.toolkit.check_ckan_version(min_version='2.8'):
search_backend = config.get('ckanext.spatial.search_backend', 'postgis')
log.debug('Search backend {}'.format(search_backend))
if search_backend == 'solr':
old_spatial = new_extras_rollup.get('spatial', None)
if old_spatial is not None:
log.info('Old Spatial found {}'.format(old_spatial))
# TODO look for more not-found location names
if old_spatial in ['National', 'US']:
old_spatial = 'United States'
new_spatial = translate_spatial(old_spatial)
if new_spatial is not None:
log.info('New Spatial transformed {}'.format(new_spatial))
# add the real spatial
new_extras.append({'key': 'spatial', 'value': new_spatial})
# remove rolled spatial to skip run this process again
new_extras_rollup['old-spatial'] = new_extras_rollup.pop('spatial')
if new_extras_rollup:
new_extras.append({'key': 'extras_rollup', 'value': json.dumps(new_extras_rollup)})
data_dict['extras'] = new_extras
def translate_spatial(old_spatial):
""" catalog-classic use a non-valid spatial extra.
Sometimes uses words (like "California") or raw coordinates (like "-96.8518,43.4659,-96.5944,43.6345")
catalog-next use ckan/spatial and require spatial to be valid geojson
When possible we need to transform this data so before_index at
ckanext-spatial could save in solr.
To save in solr we need a polygon like this:
{
"type":"Polygon",
"coordinates":[
[
[2.05827, 49.8625],
[2.05827, 55.7447],
[-6.41736, 55.7447],
[-6.41736, 49.8625],
[2.05827, 49.8625]
]
]
} """
# Analyze with type of data is JSON valid
try:
geometry = json.loads(old_spatial)
# If we already have a good geometry, use it
return old_spatial
except:
pass
geojson_tpl = '{{"type": "Polygon", "coordinates": [[[{minx}, {miny}], [{minx}, {maxy}], [{maxx}, {maxy}], [{maxx}, {miny}], [{minx}, {miny}]]]}}'
# If we have 4 numbers separated by commas, transforme them | |
please pass async_req=True
>>> thread = api.address_geocode_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateAddressRequest input: Input parse request (required)
:return: ValidateAddressResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method address_geocode" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `address_geocode`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/address/geocode', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ValidateAddressResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def address_get_country_currency(self, input, **kwargs): # noqa: E501
"""Get the currency of the input country # noqa: E501
Gets the currency information for the input country, including the ISO three-letter country code, currency symbol, and English currency name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.address_get_country_currency(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateCountryRequest input: Input request (required)
:return: ValidateCountryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.address_get_country_currency_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.address_get_country_currency_with_http_info(input, **kwargs) # noqa: E501
return data
def address_get_country_currency_with_http_info(self, input, **kwargs): # noqa: E501
"""Get the currency of the input country # noqa: E501
Gets the currency information for the input country, including the ISO three-letter country code, currency symbol, and English currency name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.address_get_country_currency_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateCountryRequest input: Input request (required)
:return: ValidateCountryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method address_get_country_currency" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `address_get_country_currency`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/address/country/get-currency', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ValidateCountryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def address_get_country_region(self, input, **kwargs): # noqa: E501
"""Get the region, subregion and continent of the country # noqa: E501
Gets the continent information including region and subregion for the input country. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.address_get_country_region(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateCountryRequest input: Input request (required)
:return: ValidateCountryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.address_get_country_region_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.address_get_country_region_with_http_info(input, **kwargs) # noqa: E501
return data
def address_get_country_region_with_http_info(self, input, **kwargs): # noqa: E501
"""Get the region, subregion and continent of the country # noqa: E501
Gets the continent information including region and subregion for the input country. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.address_get_country_region_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ValidateCountryRequest input: Input request (required)
:return: ValidateCountryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method address_get_country_region" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `address_get_country_region`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/address/country/get-region', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ValidateCountryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def address_get_timezone(self, input, **kwargs): # noqa: E501
"""Gets IANA/Olsen time zones for a country # noqa: E501
Gets the IANA/Olsen time zones for a country. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.address_get_timezone(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param GetTimezonesRequest input: Input request (required)
:return: GetTimezonesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.address_get_timezone_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.address_get_timezone_with_http_info(input, **kwargs) # noqa: E501
return data
def address_get_timezone_with_http_info(self, input, **kwargs): # noqa: E501
"""Gets IANA/Olsen time zones for a country # noqa: E501
Gets the IANA/Olsen time zones for a country. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.address_get_timezone_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param GetTimezonesRequest input: Input request (required)
:return: GetTimezonesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method address_get_timezone" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `address_get_timezone`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/validate/address/country/get-timezones', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetTimezonesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def address_normalize_address(self, input, **kwargs): # noqa: E501
"""Normalize a street address # noqa: E501
Normalizes an input structured street address is valid or invalid. If the address is valid, also returns the latitude and longitude of the address. Supports all major international addresses. # noqa: E501
This method | |
return 1;
}
self_sKeys.sort();
other_sKeys.sort();
var c, sKey;
for (var idx = 0; idx < selfLen; idx++) {
c = @{{cmp}}(selfObj[sKey = self_sKeys[idx]][0], otherObj[other_sKeys[idx]][0]);
if (c != 0) {
return c;
}
c = @{{cmp}}(selfObj[sKey][1], otherObj[sKey][1]);
if (c != 0) {
return c;
}
}
return 0;""")
def __len__(self):
size = 0
JS("""
for (var i in @{{self}}.__object) @{{size}}++;
""")
return INT(size)
#def has_key(self, key):
# return self.__contains__(key)
#See monkey patch at the end of the dict class definition
def __delitem__(self, key):
JS("""
var sKey = (@{{key}}===null?null:(key.hasOwnProperty("$H")?@{{key}}.$H:(typeof @{{key}} == 'string' ? '$s' + @{{key}} : (@{{key}}.__number__ ? '$n' + @{{key}}: @{{__hash}}(@{{key}})))));
delete @{{self}}.__object[sKey];
""")
def __contains__(self, key):
JS("""
var sKey = (@{{key}}===null?null:(key.hasOwnProperty("$H")?@{{key}}.$H:(typeof @{{key}} == 'string' ? '$s' + @{{key}} : (@{{key}}.__number__ ? '$n' + @{{key}}: @{{__hash}}(@{{key}})))));
return typeof @{{self}}.__object[sKey] == 'undefined' ? false : true;
""")
has_key = __contains__
def keys(self):
JS("""
var keys=@{{list}}.__new__(@{{list}}),
selfObj = @{{self}}.__object,
__array = keys.__array,
i = 0;
for (var sKey in @{{self}}.__object) {
__array[i++] = selfObj[sKey][0];
}
return keys;
""")
@staticmethod
def fromkeys(iterable, v=None):
d = {}
for i in iterable:
d[i] = v
return d
def values(self):
JS("""
var values=@{{list}}.__new__(@{{list}});
var i = 0;
for (var key in @{{self}}.__object) {
values.__array[i++] = @{{self}}.__object[key][1];
}
return values;
""")
def items(self):
JS("""
var items = @{{list}}.__new__(@{{list}});
var i = 0;
for (var key in @{{self}}.__object) {
var kv = @{{self}}.__object[key];
items.__array[i++] = @{{list}}(kv);
}
return items;
""")
def __iter__(self):
JS("""
var keys = new Array();
var i = 0;
for (var key in @{{self}}.__object) {
keys[i++] = @{{self}}.__object[key][0];
}
return new $iter_array(keys);
""")
iterkeys = __iter__
def __enumerate__(self):
JS("""
var keys = new Array();
var i = 0;
for (var key in @{{self}}.__object) {
keys[i++] = @{{self}}.__object[key][0];
}
return new $enumerate_array(keys);
""")
#def iterkeys(self):
# return self.__iter__()
#See monkey patch at the end of the dict class definition
def itervalues(self):
return self.values().__iter__();
def iteritems(self):
return self.items().__iter__();
def setdefault(self, key, default_value):
JS("""
var sKey = (@{{key}}===null?null:(@{{key}}.hasOwnProperty("$H")?@{{key}}.$H:(typeof @{{key}} == 'string' ? '$s' + @{{key}} : (@{{key}}.__number__ ? '$n' + @{{key}}: @{{__hash}}(@{{key}})))));
return typeof @{{self}}.__object[sKey] == 'undefined' ? (@{{self}}.__object[sKey]=[@{{key}}, @{{default_value}}])[1] : @{{self}}.__object[sKey][1];
""")
def get(self, key, default_value=None):
JS("""
var empty = true;
for (var sKey in @{{self}}.__object) {
empty = false;
break;
}
if (empty) return @{{default_value}};
var sKey = (@{{key}}===null?null:(@{{key}}.hasOwnProperty("$H")?@{{key}}.$H:(typeof @{{key}} == 'string' ? '$s' + @{{key}} : (@{{key}}.__number__ ? '$n' + @{{key}}: @{{__hash}}(@{{key}})))));
return typeof @{{self}}.__object[sKey] == 'undefined' ? @{{default_value}} : @{{self}}.__object[sKey][1];
""")
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
d = args[0]
if hasattr(d, "iteritems"):
for k,v in d.iteritems():
self[k] = v
elif hasattr(d, "keys"):
for k in d:
self[k] = d[k]
else:
for k, v in d:
self[k] = v
if kwargs:
for k,v in kwargs.iteritems():
self[k] = v
def pop(self, key):
JS("""
if (arguments.length > 2) {
throw $pyce(@{{TypeError}}("pop expected at most 3 arguments, got "
+ (1 + arguments.length).toString()));
}
var default_value = arguments.length == 2 ? arguments[1] : undefined;
var sKey = (@{{key}}===null?null:(@{{key}}.hasOwnProperty("$H")?@{{key}}.$H:(typeof @{{key}} == 'string' ? '$s' + @{{key}} : (@{{key}}.__number__ ? '$n' + @{{key}}: @{{__hash}}(@{{key}})))));
var value = @{{self}}.__object[sKey];
if (typeof value == 'undefined') {
if (default_value === undefined) {
throw $pyce(@{{KeyError}}(@{{key}}));
}
return default_value;
}
delete @{{self}}.__object[sKey];
return value[1];
""")
def popitem(self):
JS("""
for (var sKey in @{{self}}.__object) {
var result = @{{self}}.__object[sKey];
delete @{{self}}.__object[sKey];
return result[1];
}
""")
raise KeyError('popitem(): dictionary is empty')
def getObject(self):
"""
Return the javascript Object which this class uses to store
dictionary keys and values
"""
return self.__object
def copy(self):
JS("""
var result = @{{:dict}}.__new__(@{{:dict}});
data = @{{self}}.__object;
for (var sKey in data) {
result.__object[sKey] = data[sKey];
}
return result;
""")
def clear(self):
self.__object = JS("{}")
def __repr__(self):
if callable(self):
return "<type '%s'>" % self.__name__
JS("""
var keys = new Array();
for (var key in @{{self}}.__object)
keys.push(key);
var s = "{";
for (var i=0; i<keys.length; i++) {
var v = @{{self}}.__object[keys[i]];
s += @{{repr}}(v[0]) + ": " + @{{repr}}(v[1]);
if (i < keys.length-1)
s += ", ";
}
s += "}";
return s;
""")
__str__ = __repr__
JS("@{{dict}}.toString = function() { return this.__is_instance__ ? this.__repr__() : '<type dict>'; };")
_copy_dict_part = JS("""function (dict, index) {
var result = [];
var data = dict.__object;
for (var key in data) {
result.push(data[key][index]);
}
return result;
}""")
class BaseSet(object):
def __new__(cls):
# initialize memory
self = object.__new__(cls)
JS("""
@{{self}}.__object = {};
return @{{self}};
""")
JS('@{{__new__}}.$ignore__args__ = true;')
def __cmp__(self, other):
# We (mis)use cmp here for the missing __gt__/__ge__/...
# if self == other : return 0
# if self is subset of other: return -1
# if self is superset of other: return 1
# else return 2
if not isSet(other):
return 2
#other = frozenset(other)
JS("""
var selfObj = @{{self}}.__object,
otherObj = @{{other}}.__object,
selfMismatch = false,
otherMismatch = false;
if (selfObj === otherObj) {
throw $pyce(@{{TypeError}}("Set operations must use two sets."));
}
for (var sVal in selfObj) {
if (!(sVal in otherObj)) {
selfMismatch = true;
break;
}
}
for (var sVal in otherObj) {
if (!(sVal in selfObj)) {
otherMismatch = true;
break;
}
}
if (selfMismatch && otherMismatch) return 2;
if (selfMismatch) return 1;
if (otherMismatch) return -1;
return 0;
""")
def __nonzero__(self):
JS("""
for (var key in @{{self}}.__object) {
return true;
}
return false;
""")
def __contains__(self, value):
if isSet(value) == 1: # An instance of set
# Use frozenset hash
JS("""
var hashes = new Array(),
obj = @{{self}}.__object,
i = 0;
for (var v in obj) {
hashes[i++] = v;
}
hashes.sort();
var h = hashes.join("|");
return (h in obj);
""")
JS("""return @{{hash}}(@{{value}}) in @{{self}}.__object;""")
def __iter__(self):
JS("""
var items = [], i = 0, obj = @{{self}}.__object;
for (var key in obj) {
items[i++] = obj[key];
}
return new $iter_array(items);
""")
def __len__(self):
size=0.0
JS("""
for (var i in @{{self}}.__object) @{{size}}++;
""")
return INT(size)
def __repr__(self):
if callable(self):
return "<type '%s'>" % self.__name__
JS("""
var values = new Array();
var i = 0,
obj = @{{self}}.__object,
s = @{{self}}.__name__ + "([";
for (var sVal in obj) {
values[i++] = @{{repr}}(obj[sVal]);
}
s += values.join(", ");
s += "])";
return s;
""")
__str__ = __repr__
def __and__(self, other):
""" Return the intersection of two sets as a new set.
only available under --number-classes
"""
if not isSet(other):
return NotImplemented
return self.intersection(other)
def __or__(self, other):
""" Return the union of two sets as a new set..
only available under --number-classes
"""
if not isSet(other):
return NotImplemented
return self.union(other)
def __xor__(self, other):
""" Return the symmetric difference of two sets as a new set..
only available under --number-classes
"""
if not isSet(other):
return NotImplemented
return self.symmetric_difference(other)
def __sub__(self, other):
""" Return the difference of two sets as a new Set..
only available under --number-classes
"""
if not isSet(other):
return NotImplemented
return self.difference(other)
def copy(self):
new_set = set()
JS("""
var obj = @{{new_set}}.__object,
selfObj = @{{self}}.__object;
for (var sVal in selfObj) {
obj[sVal] = selfObj[sVal];
}
""")
return new_set
def difference(self, other):
""" Return the difference of two sets as a new set.
(i.e. all elements that are in this set but not the other.)
"""
if not isSet(other):
other = frozenset(other)
new_set = set()
JS("""
var obj = @{{new_set}}.__object,
selfObj = @{{self}}.__object,
otherObj = @{{other}}.__object;
for (var sVal in selfObj) {
if (!(sVal in otherObj)) {
obj[sVal] = selfObj[sVal];
}
}
""")
return new_set
def intersection(self, other):
""" Return the intersection of two sets as a new set.
(i.e. all elements that are in both sets.)
"""
if not isSet(other):
other = frozenset(other)
new_set = set()
JS("""
var obj = @{{new_set}}.__object,
selfObj = @{{self}}.__object,
otherObj = @{{other}}.__object;
for (var sVal in selfObj) {
if (sVal in otherObj) {
obj[sVal] = selfObj[sVal];
}
}
""")
return new_set
def isdisjoint(self, other):
""" Return True if two sets have a null intersection.
"""
if | |
"""
Ibutsu API
A system to store and query test results # noqa: E501
The version of the OpenAPI document: 1.13.4
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ibutsu_client.api_client import ApiClient, Endpoint as _Endpoint
from ibutsu_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from ibutsu_client.model.dashboard import Dashboard
from ibutsu_client.model.dashboard_list import DashboardList
class DashboardApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.add_dashboard_endpoint = _Endpoint(
settings={
'response_type': (Dashboard,),
'auth': [
'jwt'
],
'endpoint_path': '/dashboard',
'operation_id': 'add_dashboard',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'dashboard',
],
'required': [
'dashboard',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'dashboard':
(Dashboard,),
},
'attribute_map': {
},
'location_map': {
'dashboard': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_dashboard_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'jwt'
],
'endpoint_path': '/dashboard/{id}',
'operation_id': 'delete_dashboard',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client
)
self.get_dashboard_endpoint = _Endpoint(
settings={
'response_type': (Dashboard,),
'auth': [
'jwt'
],
'endpoint_path': '/dashboard/{id}',
'operation_id': 'get_dashboard',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_dashboard_list_endpoint = _Endpoint(
settings={
'response_type': (DashboardList,),
'auth': [
'jwt'
],
'endpoint_path': '/dashboard',
'operation_id': 'get_dashboard_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'user_id',
'page',
'page_size',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'user_id':
(str,),
'page':
(int,),
'page_size':
(int,),
},
'attribute_map': {
'project_id': 'project_id',
'user_id': 'user_id',
'page': 'page',
'page_size': 'pageSize',
},
'location_map': {
'project_id': 'query',
'user_id': 'query',
'page': 'query',
'page_size': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_dashboard_endpoint = _Endpoint(
settings={
'response_type': (Dashboard,),
'auth': [
'jwt'
],
'endpoint_path': '/dashboard/{id}',
'operation_id': 'update_dashboard',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'id',
'dashboard',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'dashboard':
(Dashboard,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'dashboard': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def add_dashboard(
self,
dashboard,
**kwargs
):
"""Create a dashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_dashboard(dashboard, async_req=True)
>>> result = thread.get()
Args:
dashboard (Dashboard): Dashboard
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Dashboard
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['dashboard'] = \
dashboard
return self.add_dashboard_endpoint.call_with_http_info(**kwargs)
def delete_dashboard(
self,
id,
**kwargs
):
"""Delete a dashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_dashboard(id, async_req=True)
>>> result = thread.get()
Args:
id (str): ID of dashboard to delete
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.delete_dashboard_endpoint.call_with_http_info(**kwargs)
def get_dashboard(
self,
id,
**kwargs
):
"""Get a single dashboard by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard(id, async_req=True)
>>> result = thread.get()
Args:
id (str): ID of test dashboard
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Dashboard
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.get_dashboard_endpoint.call_with_http_info(**kwargs)
def get_dashboard_list(
self,
**kwargs
):
"""Get a list of dashboards # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_list(async_req=True)
>>> result = thread.get()
Keyword Args:
project_id (str): Filter dashboards by project ID. [optional]
user_id (str): Filter dashboards by user ID. [optional]
page (int): Set the page of items to return, defaults to 1. [optional]
page_size (int): Set the number of items per page, defaults to 25. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response | |
by default None.
This is often the output for this spaxel's :func:`marginal_fits`.
Same spatial shape as `fitList` (nxm)
final_choices : array of int, optional
The fits chosen by the user, if any, by default None.
Same spatial shape as `fitList` (nxm)
onlyChecked : bool, optional
Whether to save only the fits flagged for manual checking, by default False
title : str, optional
The figure's suptitle, by default "". The function will prepend to `title`
the spaxel's coordinates, and append
"auto choice _", and if the user has checked the fits,
"user choice _" is also appended.
See Also
--------
plot_ModelResults_pixel : The plotting function used for each spaxel.
"""
# I'm sure there's a better way to do this but I can't think about it now.
# easy case: if onlyChecked is True but user_check is None or contains
# no True values, then exit function.
if onlyChecked:
if (user_check is None) or (not user_check.any()):
print("No user checked pixels. Skipping save user checked plots.")
return
# If the inputs are None, make it so that when looping later,
# it can be addressed correctly.
try:
emptyNone = np.full(fitList[0].shape, None)
except AttributeError:
emptyNone = None
if computer_choices is None:
computer_choices = emptyNone
if user_check is None:
user_check = emptyNone
if final_choices is None:
final_choices = emptyNone
print("Saving plots to {}, this may take a while.".format(filename))
total = np.size(computer_choices)
count = 0
# if there are no fits for a given pixel, record that pixel here
# to deal with later.
lst_no_fits = []
with PdfPages(filename) as pdf:
# loops over all spaxels:
for this_pix in np.ndindex(computer_choices.shape):
if onlyChecked:
if not user_check[this_pix]:
continue # skip this one.
this_fitList = [x[this_pix] for x in fitList]
no_fits = all([v is None for v in this_fitList])
if no_fits:
lst_no_fits += [str(this_pix)]
count += 1
if count % 100 == 0:
print("Saved {}/{}".format(count, total))
continue
# create the figure to save.
fig = plot_ModelResults_pixel(
this_fitList,
title=str(this_pix) + " " + title,
computer_choice=computer_choices[this_pix],
user_checked=user_check[this_pix],
user_choice=final_choices[this_pix],
)
# intelligent iterrupt to allow you to view the pdf
try:
pdf.savefig(fig)
except KeyboardInterrupt:
raise
plt.close(fig)
# silly progress counter.
count += 1
if count % 100 == 0:
print("Saved {}/{}".format(count, total))
# # deal with the list of not-fit spaxels.
# # This section should add a pdf page plot which is just a list of
# # spaxel coordinates with no plot associated.
# # The intention here was you
# # could search for a spaxel coordinates, and still find it even if there
# # were no fits. My pdf viewer didn't work like that though.... so maybe
# # this can be eliminated.
# # add 1 more page if we looped over a spaxel that wasn't plotted.
# if len(lst_no_fits) > 0:
# pix_per_line = int(len(fitList) * 10 / 3.0) # 10 works well if 3 plots
# num_lines = int(2 + np.ceil(len(lst_no_fits) / pix_per_line))
# str_no_fits = "\n".join(
# [
# ", ".join(lst_no_fits[x : x + pix_per_line])
# for x in range(0, len(lst_no_fits), pix_per_line)
# ]
# )
# fontsize = 14
# fig_height = (num_lines * (1 / 72.0) * (fontsize + 2)) + 2 * 0.04167
# fig = plt.figure(figsize=(4 * len(fitList), fig_height), num="No Fits")
# plt.text(
# 0.5,
# 0.5,
# "No Fits\n{}\nEnd of List".format(str_no_fits),
# horizontalalignment="center",
# verticalalignment="center",
# transform=plt.gca().transAxes,
# fontsize=fontsize,
# )
# plt.gca().set_axis_off()
# pdf.savefig(fig)
# plt.close(fig)
print("Finished saving to {}".format(filename))
# def compile_spaxel_info_mc(mc_fits, keys_to_save):
# # handle special case inputs:
# if mc_fits is None:
# return None
# if len(mc_fits) == 0:
# # gives an entry for every output desired: median, std, and median_err for each key
# return [None] * 3 * len(keys_to_save)
# temp = save_fit_stats(None, mc_fits, fit_info=None, model_keys="all")
# names = temp[0][1:]
# info = np.ma.masked_equal([t[1:] for t in temp[1:]], None)
# # These next lines take care of None values for any parameter error (e.g. as when given when the)
# # fit converges at the parameter limit. If all have None, will enter 0.
# info.set_fill_value(0)
# info = info.astype("float")
# mc_medians = np.ma.median(info, axis=0).filled()
# mc_std = np.ma.std(info, axis=0).filled()
# data_row = []
# for key in keys_to_save:
# try:
# ix = names.index(key)
# # label_row += ["median_{}".format(key), "stdev_{}".format(key)]
# data_row += [mc_medians[ix], mc_std[ix]]
# # label_row += ["av_error_{}".format(key)]
# ix = names.index(key + "_err")
# data_row += [mc_medians[ix]]
# except ValueError:
# data_row += 3 * [None]
# return data_row
# def create_label_row_mc(keys_to_save):
# label_row = []
# for key in keys_to_save:
# label_row += ["median_{}".format(key), "stdev_{}".format(key)]
# label_row += ["av_error_{}".format(key)]
# return label_row
def extract_spaxel_info(
fit_results, fit_info=None, model_params=None, result_dict=None, names_only=False
):
"""Extract ModelResult attributes into numpy array.
This function broadcasts :meth:`threadcount.lmfit_ext.summary_array` to all ModelResult
in `fit_results` and handles None values.
If `names_only` is True, the output is a list containing the attribute names.
(simply `fit_info` + `model_params`.)
If `result_dict` is not None, the output will be appendeded to `result_dict`,
otherise a new :class:`ResultDict` will be returned. The keys in the dict
correspond to the entries in `fit_info` and `model_params`.
Each entry of results[key] will have the same spatial shape as `fit_results`,
giving an easy way of viewing spatial maps of these parameters.
Parameters
----------
fit_results : Array of :class:`lmfit.model.ModelResult`
The set of ModelResults to extract the information from.
fit_info : list of string
Any attribute that can return a float value from :class:`lmfit.model.ModelResult`,
e.g. ["chisqr", "aic", "aic_real", "success"]
(aic_real is defined in :mod:`threadcount.lmfit_ext` module)
model_params : list of str
Options here are the param names, or the param names with "_err" appended.
As an example, lets say we have a param name = "height", and we wish to
extract the value of the "height" parameter and it's fit error, then
`model_params` = ["height", "height_err"]. The full list you can choose
from for a model is model.make_params().valerrsdict().keys().
result_dict : :class:`ResultDict`, optional
Append these results to `result_dict` or create a new one (if None),
by default None
names_only : bool, optional
Return a list of the attribute names this call generates, by default False.
It is essentially `fit_info` + `model_params`.
Returns
-------
:class:`ResultDict`
A ResultDict where the keys are the strings in `fit_info` and
`model_params`, and the values are the numpy arrays of same spatial shape
as `fit_results`. Essentially images of the parameters.
See Also
--------
:meth:`threadcount.lmfit_ext.valerrsdict`
:meth:`threadcount.lmfit_ext.aic_real`
"""
if fit_results is None:
fit_results = []
if fit_info is None:
fit_info = []
if model_params is None:
model_params = []
label_row = fit_info + model_params
if names_only is True:
return label_row
fit_results = np.array(fit_results)
spatial_shape = fit_results.shape
# create empty output
output = np.empty((len(label_row),) + spatial_shape)
for index in np.ndindex(spatial_shape):
this_fit = fit_results[index]
if this_fit is None:
save_info = np.array(np.broadcast_to(None, (len(label_row),)), dtype=float)
else:
save_info = this_fit.summary_array(fit_info, model_params)
output[(slice(None), *index)] = save_info
if result_dict is None:
result_dict = ResultDict(output, names=label_row)
else:
for i, label in enumerate(label_row):
result_dict[label] = output[i]
return result_dict
def extract_spaxel_info_mc(
mc_fits, fit_info, model_params, method="median", names_only=False
):
"""Compute the average and standard deviation of the information requested.
This function takes as input a list of monte carlo iterations of a ModelResult
and returns the average of the `fit_info`, and average and standard deviation
of the `model_params`, using method "median" or "mean", provided by `method`.
For models which have multiple gaussian components, this function also
incorporates a re-ordering of components
(see :meth:`threadcount.lmfit_ext.order_gauss`) with the idea that we would
like to average similar components together.
It is advisable to also have this function compute the names array for you,
using `names_only` =True, since the entries in `fit_info` and `model_params`
will have strings added to the beginning and end.
Parameters
----------
mc_fits : list of :class:`lmfit.model.ModelResult`
List of fits to extract and average parameters from.
fit_info : list of string
Options here include things like "chisqr","aic_real", "success",
any attribute that will return a float from ModelResult.attribute
model_params : list of string
The list of the model parameter names (don't include "_err", that will be
added for you.) For example, you could compute this from
model.make_params().keys()
method : | |
<filename>sdk/python/pulumi_alicloud/quotas/quota_alarm.py<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['QuotaAlarmArgs', 'QuotaAlarm']
@pulumi.input_type
class QuotaAlarmArgs:
def __init__(__self__, *,
product_code: pulumi.Input[str],
quota_action_code: pulumi.Input[str],
quota_alarm_name: pulumi.Input[str],
quota_dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['QuotaAlarmQuotaDimensionArgs']]]] = None,
threshold: Optional[pulumi.Input[float]] = None,
threshold_percent: Optional[pulumi.Input[float]] = None,
web_hook: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a QuotaAlarm resource.
:param pulumi.Input[str] product_code: The Product Code.
:param pulumi.Input[str] quota_action_code: The Quota Action Code.
:param pulumi.Input[str] quota_alarm_name: The name of Quota Alarm.
:param pulumi.Input[Sequence[pulumi.Input['QuotaAlarmQuotaDimensionArgs']]] quota_dimensions: The Quota Dimensions.
:param pulumi.Input[float] threshold: The threshold of Quota Alarm.
:param pulumi.Input[float] threshold_percent: The threshold percent of Quota Alarm.
:param pulumi.Input[str] web_hook: The WebHook of Quota Alarm.
"""
pulumi.set(__self__, "product_code", product_code)
pulumi.set(__self__, "quota_action_code", quota_action_code)
pulumi.set(__self__, "quota_alarm_name", quota_alarm_name)
if quota_dimensions is not None:
pulumi.set(__self__, "quota_dimensions", quota_dimensions)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if threshold_percent is not None:
pulumi.set(__self__, "threshold_percent", threshold_percent)
if web_hook is not None:
pulumi.set(__self__, "web_hook", web_hook)
@property
@pulumi.getter(name="productCode")
def product_code(self) -> pulumi.Input[str]:
"""
The Product Code.
"""
return pulumi.get(self, "product_code")
@product_code.setter
def product_code(self, value: pulumi.Input[str]):
pulumi.set(self, "product_code", value)
@property
@pulumi.getter(name="quotaActionCode")
def quota_action_code(self) -> pulumi.Input[str]:
"""
The Quota Action Code.
"""
return pulumi.get(self, "quota_action_code")
@quota_action_code.setter
def quota_action_code(self, value: pulumi.Input[str]):
pulumi.set(self, "quota_action_code", value)
@property
@pulumi.getter(name="quotaAlarmName")
def quota_alarm_name(self) -> pulumi.Input[str]:
"""
The name of Quota Alarm.
"""
return pulumi.get(self, "quota_alarm_name")
@quota_alarm_name.setter
def quota_alarm_name(self, value: pulumi.Input[str]):
pulumi.set(self, "quota_alarm_name", value)
@property
@pulumi.getter(name="quotaDimensions")
def quota_dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['QuotaAlarmQuotaDimensionArgs']]]]:
"""
The Quota Dimensions.
"""
return pulumi.get(self, "quota_dimensions")
@quota_dimensions.setter
def quota_dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['QuotaAlarmQuotaDimensionArgs']]]]):
pulumi.set(self, "quota_dimensions", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
The threshold of Quota Alarm.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="thresholdPercent")
def threshold_percent(self) -> Optional[pulumi.Input[float]]:
"""
The threshold percent of Quota Alarm.
"""
return pulumi.get(self, "threshold_percent")
@threshold_percent.setter
def threshold_percent(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold_percent", value)
@property
@pulumi.getter(name="webHook")
def web_hook(self) -> Optional[pulumi.Input[str]]:
"""
The WebHook of Quota Alarm.
"""
return pulumi.get(self, "web_hook")
@web_hook.setter
def web_hook(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "web_hook", value)
@pulumi.input_type
class _QuotaAlarmState:
def __init__(__self__, *,
product_code: Optional[pulumi.Input[str]] = None,
quota_action_code: Optional[pulumi.Input[str]] = None,
quota_alarm_name: Optional[pulumi.Input[str]] = None,
quota_dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['QuotaAlarmQuotaDimensionArgs']]]] = None,
threshold: Optional[pulumi.Input[float]] = None,
threshold_percent: Optional[pulumi.Input[float]] = None,
web_hook: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering QuotaAlarm resources.
:param pulumi.Input[str] product_code: The Product Code.
:param pulumi.Input[str] quota_action_code: The Quota Action Code.
:param pulumi.Input[str] quota_alarm_name: The name of Quota Alarm.
:param pulumi.Input[Sequence[pulumi.Input['QuotaAlarmQuotaDimensionArgs']]] quota_dimensions: The Quota Dimensions.
:param pulumi.Input[float] threshold: The threshold of Quota Alarm.
:param pulumi.Input[float] threshold_percent: The threshold percent of Quota Alarm.
:param pulumi.Input[str] web_hook: The WebHook of Quota Alarm.
"""
if product_code is not None:
pulumi.set(__self__, "product_code", product_code)
if quota_action_code is not None:
pulumi.set(__self__, "quota_action_code", quota_action_code)
if quota_alarm_name is not None:
pulumi.set(__self__, "quota_alarm_name", quota_alarm_name)
if quota_dimensions is not None:
pulumi.set(__self__, "quota_dimensions", quota_dimensions)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if threshold_percent is not None:
pulumi.set(__self__, "threshold_percent", threshold_percent)
if web_hook is not None:
pulumi.set(__self__, "web_hook", web_hook)
@property
@pulumi.getter(name="productCode")
def product_code(self) -> Optional[pulumi.Input[str]]:
"""
The Product Code.
"""
return pulumi.get(self, "product_code")
@product_code.setter
def product_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_code", value)
@property
@pulumi.getter(name="quotaActionCode")
def quota_action_code(self) -> Optional[pulumi.Input[str]]:
"""
The Quota Action Code.
"""
return pulumi.get(self, "quota_action_code")
@quota_action_code.setter
def quota_action_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "quota_action_code", value)
@property
@pulumi.getter(name="quotaAlarmName")
def quota_alarm_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of Quota Alarm.
"""
return pulumi.get(self, "quota_alarm_name")
@quota_alarm_name.setter
def quota_alarm_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "quota_alarm_name", value)
@property
@pulumi.getter(name="quotaDimensions")
def quota_dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['QuotaAlarmQuotaDimensionArgs']]]]:
"""
The Quota Dimensions.
"""
return pulumi.get(self, "quota_dimensions")
@quota_dimensions.setter
def quota_dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['QuotaAlarmQuotaDimensionArgs']]]]):
pulumi.set(self, "quota_dimensions", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
The threshold of Quota Alarm.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="thresholdPercent")
def threshold_percent(self) -> Optional[pulumi.Input[float]]:
"""
The threshold percent of Quota Alarm.
"""
return pulumi.get(self, "threshold_percent")
@threshold_percent.setter
def threshold_percent(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold_percent", value)
@property
@pulumi.getter(name="webHook")
def web_hook(self) -> Optional[pulumi.Input[str]]:
"""
The WebHook of Quota Alarm.
"""
return pulumi.get(self, "web_hook")
@web_hook.setter
def web_hook(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "web_hook", value)
class QuotaAlarm(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
product_code: Optional[pulumi.Input[str]] = None,
quota_action_code: Optional[pulumi.Input[str]] = None,
quota_alarm_name: Optional[pulumi.Input[str]] = None,
quota_dimensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['QuotaAlarmQuotaDimensionArgs']]]]] = None,
threshold: Optional[pulumi.Input[float]] = None,
threshold_percent: Optional[pulumi.Input[float]] = None,
web_hook: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Quotas Quota Alarm resource.
For information about Quotas Quota Alarm and how to use it, see [What is Quota Alarm](https://help.aliyun.com/document_detail/184343.html).
> **NOTE:** Available in v1.116.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.quotas.QuotaAlarm("example",
product_code="ecs",
quota_action_code="q_prepaid-instance-count-per-once-purchase",
quota_alarm_name="tf-testAcc",
quota_dimensions=[alicloud.quotas.QuotaAlarmQuotaDimensionArgs(
key="regionId",
value="cn-hangzhou",
)],
threshold=100)
```
## Import
Quotas Quota Alarm can be imported using the id, e.g.
```sh
$ pulumi import alicloud:quotas/quotaAlarm:QuotaAlarm example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] product_code: The Product Code.
:param pulumi.Input[str] quota_action_code: The Quota Action Code.
:param pulumi.Input[str] quota_alarm_name: The name of Quota Alarm.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['QuotaAlarmQuotaDimensionArgs']]]] quota_dimensions: The Quota Dimensions.
:param pulumi.Input[float] threshold: The threshold of Quota Alarm.
:param pulumi.Input[float] threshold_percent: The threshold percent of Quota Alarm.
:param pulumi.Input[str] web_hook: The WebHook of Quota Alarm.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: QuotaAlarmArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Quotas Quota Alarm resource.
For information about Quotas Quota Alarm and how to use it, see [What is Quota Alarm](https://help.aliyun.com/document_detail/184343.html).
> **NOTE:** Available in v1.116.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.quotas.QuotaAlarm("example",
product_code="ecs",
quota_action_code="q_prepaid-instance-count-per-once-purchase",
quota_alarm_name="tf-testAcc",
quota_dimensions=[alicloud.quotas.QuotaAlarmQuotaDimensionArgs(
key="regionId",
value="cn-hangzhou",
)],
threshold=100)
```
## Import
Quotas Quota Alarm can be imported using the id, e.g.
```sh
$ pulumi import alicloud:quotas/quotaAlarm:QuotaAlarm example <id>
```
:param str resource_name: The name of the resource.
:param QuotaAlarmArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(QuotaAlarmArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
product_code: Optional[pulumi.Input[str]] = None,
quota_action_code: Optional[pulumi.Input[str]] = None,
quota_alarm_name: Optional[pulumi.Input[str]] = None,
quota_dimensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['QuotaAlarmQuotaDimensionArgs']]]]] = None,
threshold: Optional[pulumi.Input[float]] = None,
threshold_percent: Optional[pulumi.Input[float]] = None,
web_hook: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = QuotaAlarmArgs.__new__(QuotaAlarmArgs)
if product_code is None and not opts.urn:
raise TypeError("Missing required property 'product_code'")
__props__.__dict__["product_code"] = product_code
if quota_action_code is None and not opts.urn:
raise TypeError("Missing required property 'quota_action_code'")
__props__.__dict__["quota_action_code"] = quota_action_code
if quota_alarm_name is None and not opts.urn:
raise TypeError("Missing required property 'quota_alarm_name'")
__props__.__dict__["quota_alarm_name"] = quota_alarm_name
__props__.__dict__["quota_dimensions"] = quota_dimensions
__props__.__dict__["threshold"] = threshold
__props__.__dict__["threshold_percent"] = threshold_percent
__props__.__dict__["web_hook"] = web_hook
super(QuotaAlarm, __self__).__init__(
'alicloud:quotas/quotaAlarm:QuotaAlarm',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
product_code: Optional[pulumi.Input[str]] = None,
quota_action_code: Optional[pulumi.Input[str]] = None,
quota_alarm_name: Optional[pulumi.Input[str]] = None,
quota_dimensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['QuotaAlarmQuotaDimensionArgs']]]]] = None,
threshold: Optional[pulumi.Input[float]] = None,
threshold_percent: Optional[pulumi.Input[float]] = None,
web_hook: Optional[pulumi.Input[str]] = None) -> 'QuotaAlarm':
"""
Get an existing QuotaAlarm resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] product_code: The Product Code.
:param pulumi.Input[str] quota_action_code: The Quota Action Code.
:param pulumi.Input[str] quota_alarm_name: The name of Quota Alarm.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['QuotaAlarmQuotaDimensionArgs']]]] quota_dimensions: The Quota Dimensions.
:param pulumi.Input[float] threshold: The threshold of Quota Alarm.
:param pulumi.Input[float] threshold_percent: The threshold percent of Quota Alarm.
:param pulumi.Input[str] web_hook: The WebHook of Quota | |
"ip6_peer",
"ip6_gw_addr", "ip6_gw_if", "ip6_gw_default",
"dns_server", "dhcp_server",
"wireless",
"numa_node", "local_cpulist", "local_cpu",
"enable", "current_link_speed", "current_link_width",
"driver", "bus_info",
"bus_info", "wireless_protocol",
"duplex", "carrier",
"operstate", "type",
"mtu", "tx_queue_len",
"ufo",
"broadcast", "debug","loopback",
"point_to_point","notrailers","running","noarp","promisc",
"allmulticast","lb_master","lb_slave","multicast_support",
"portselect","automedia","dynamic",
"tx-scatter-gather","tx-checksum-ipv4",
"tx-checksum-ip-generic","tx-checksum-ipv6",
"highdma","tx-scatter-gather-fraglist","tx-vlan-hw-insert",
"rx-vlan-hw-parse","rx-vlan-filter","vlan-challenged",
"tx-generic-segmentation","tx-lockless","netns-local",
"rx-gro","rx-lro","tx-tcp-segmentation","tx-gso-robust",
"tx-tcp-ecn-segmentation","tx-tcp-mangleid-segmentation",
"tx-tcp6-segmentation","tx-fcoe-segmentation","tx-gre-segmentation",
"tx-gre-csum-segmentation","tx-ipxip4-segmentation",
"tx-ipxip6-segmentation","tx-udp_tnl-segmentation",
"tx-udp_tnl-csum-segmentation","tx-gso-partial",
"tx-sctp-segmentation","tx-esp-segmentation","tx-udp-segmentation",
"tx-checksum-fcoe-crc","tx-checksum-sctp","fcoe-mtu",
"rx-ntuple-filter","rx-hashing","rx-checksum","tx-nocache-copy",
"rx-fcs","rx-all","tx-vlan-stag-hw-insert","rx-vlan-stag-hw-parse",
"rx-vlan-stag-filter","l2-fwd-offload","hw-tc-offload",
"esp-hw-offload","esp-tx-csum-hw-offload","rx-udp_tunnel-port-offload",
"tls-hw-tx-offload","tls-hw-rx-offload","rx-gro-hw","tls-hw-record",
"tx-udp-fragmentation", "rx-gro-list",
# counters
"carrier_down_count", "carrier_up_count", "carrier_changes",
] + attr_list_netdev
type_list = 37*[str] + 72*[int] + 27*[int]
counter_list = 109*[False] + 27*[True]
gws = netifaces.gateways()
active_ifs = []
# DNS
# before ~2018 dns are stored in /etc/resolv.conf
nameserver=""
nameservers={}
with open('/etc/resolv.conf') as f:
for l in f.readlines():
if l.startswith("nameserver"):
nameserver = l.split()[-1]
break
# post-2018 systems use systemd based resolution
# 127.0.0.53 indicates such behavior
if not nameserver or nameserver == "127.0.0.53":
try:
res=subprocess.run(["systemd-resolve","--no-pager","--status"],
capture_output=True)
this_if = "global"
for l in res.stdout.split(b'\n'):
if b"Link" in l:
this_if=l.split()[-1][1:-1].decode()
elif b"Current DNS Server" in l:
nameservers[this_if]=l.split()[-1].decode()
except:
self.info("systemd probe failed")
# DHCP
# parse dhcp lease files
dhcp_servers={}
prefix='/var/lib/dhcp/'
for suffix in os.listdir(prefix):
if not suffix.endswith("leases"):
continue
with open(prefix+suffix) as f:
for l in f.readlines():
if "interface" in l:
this_if = l.split()[-1][1:-2]
elif "dhcp-server-identifier" in l:
dhcp_servers[this_if] = l.split()[-1][:-1]
for if_name in netifaces.interfaces(): #os.listdir("/sys/class/net")
# create dict if interface was never observed
active_ifs.append(if_name)
self._data["net/dev"].setdefault(if_name, init_rb_dict(attr_list,
types=type_list, counters=counter_list))
if_dict = self._data["net/dev"][if_name]
# link
addrs = netifaces.ifaddresses(if_name)
if netifaces.AF_LINK in addrs:
# addresses
for item in addrs[netifaces.AF_LINK]:
if "addr" in item:
if_dict["link_addr"].append(item["addr"])
if "broadcast" in item:
if_dict["link_broadcast"].append(item["broadcast"])
if "peer" in item:
if_dict["link_peer"].append(item["peer"])
# gateways
if netifaces.AF_LINK in gws:
for item in gws[netifaces.AF_LINK]:
if item[1] != if_name:
continue
if_dict["link_gw_addr"].append(item[0])
if_dict["link_gw_if"].append(item[1])
if_dict["link_gw_default"].append(item[2])
# ip4
if netifaces.AF_INET in addrs:
# addr
for item in addrs[netifaces.AF_INET]:
if "addr" in item:
if_dict["ip4_addr"].append(item["addr"])
if "broadcast" in item:
if_dict["ip4_broadcast"].append(item["broadcast"])
if "netmask" in item:
if_dict["ip4_netmask"].append(item["netmask"])
if "peer" in item:
if_dict["ip4_peer"].append(item["peer"])
# gateways
if netifaces.AF_INET in gws:
for item in gws[netifaces.AF_INET]:
if item[1] != if_name:
continue
if_dict["ip4_gw_addr"].append(item[0])
if_dict["ip4_gw_if"].append(item[1])
if_dict["ip4_gw_default"].append(item[2])
# ip6 addr
if netifaces.AF_INET6 in addrs:
# addr
for item in addrs[netifaces.AF_INET6]:
if "addr" in item:
if_dict["ip6_addr"].append(item["addr"])
if "broadcast" in item:
if_dict["ip6_broadcast"].append(item["broadcast"])
if "netmask" in item:
if_dict["ip6_netmask"].append(item["netmask"])
if "peer" in item:
if_dict["ip6_peer"].append(item["peer"])
# gateways
if netifaces.AF_INET6 in gws:
for item in gws[netifaces.AF_INET6]:
if item[1] != if_name:
continue
if_dict["ip6_gw_addr"].append(item[0])
if_dict["ip6_gw_if"].append(item[1])
if_dict["ip6_gw_default"].append(item[2])
self.read_ethtool_info(if_name, if_dict)
#
# non-standard if attributes
# https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-class-net
#
path_prefix="/sys/class/net/{}/".format(if_name)
self._open_read_append(path_prefix+"carrier_down_count",
if_dict["carrier_down_count"])
self._open_read_append(path_prefix+"carrier_up_count",
if_dict["carrier_up_count"])
self._open_read_append(path_prefix+"carrier_changes",
if_dict["carrier_changes"])
self._open_read_append(path_prefix+"device/numa_node",
if_dict["numa_node"])
self._open_read_append(path_prefix+"device/local_cpulist",
if_dict["local_cpulist"])
self._open_read_append(path_prefix+"device/local_cpu",
if_dict["local_cpu"])
self._open_read_append(path_prefix+"device/enable",
if_dict["enable"])
self._open_read_append(path_prefix+"device/current_link_speed",
if_dict["current_link_speed"])
self._open_read_append(path_prefix+"device/current_link_width",
if_dict["current_link_width"])
self._open_read_append(path_prefix+"mtu",
if_dict["mtu"])
self._open_read_append(path_prefix+"tx_queue_len",
if_dict["tx_queue_len"])
self._open_read_append(path_prefix+"duplex",
if_dict["duplex"])
self._open_read_append(path_prefix+"carrier",
if_dict["carrier"])
self._open_read_append(path_prefix+"operstate",
if_dict["operstate"])
if_type = _linux_if_types.get(self._open_read(path_prefix+"type"),
"unknown")
if_dict["type"].append(if_type)
if_dict["wireless"].append(
int(os.path.exists(path_prefix+"wireless")))
if_dict["dns_server"].append(
nameservers.get(if_name, nameserver))
if_dict["dhcp_server"].append(
dhcp_servers.get(if_name, ""))
with open("/proc/net/dev", 'r') as f:
for l in f.readlines()[2:]:
attr_val = [e.rstrip(':') for e in l.rstrip().split()]
index = attr_val[0]
self._data["net/dev"].setdefault(index, init_rb_dict(attr_list,
types=type_list, counters=counter_list))
for i,e in enumerate(attr_val[1:]):
self._data["net/dev"][index][attr_list_netdev[i]].append(e)
active_ifs.append(index)
# cleanup expired ifs
for monitored_ifs in list(self._data["net/dev"].keys()):
if monitored_ifs not in active_ifs:
del self._data["net/dev"][monitored_ifs]
def _process_routes(self):
# https://man7.org/linux/man-pages/man7/rtnetlink.7.html
#
extra_attrs = ['RTA_PRIORITY', 'RTA_GATEWAY', 'RTA_OIF', 'RTA_DST',
'RTA_SRC', 'RTA_IIF', 'RTA_PREFSRC',]
base_attrs = [ 'dst_len', 'src_len', 'tos', 'proto', 'scope',
'type',
]
attrs = base_attrs+ extra_attrs
# atm, we only consider main table
for route in self._route.get_routes(table=254):
if route['event'] != 'RTM_NEWROUTE':
self.info("Unexpected route event: {}".format(route['event']))
route['proto'] = rt_proto[route['proto']]
route['scope'] = rt_scope[route['scope']]
route['type'] = rt_type[route['type']]
route_attrs = dict(route["attrs"])
if route["family"] == socket.AF_INET:
route_dict = self._data["routes4"]
elif route["family"] == socket.AF_INET6:
route_dict = self._data["routes6"]
if 'RTA_DST' in route_attrs:
key = "{}/{}".format(route_attrs["RTA_DST"], route['dst_len'])
else:
key = "default"
route_dict.setdefault(key, init_rb_dict(attrs, type=str))
for attr in base_attrs:
route_dict[key][attr].append(route[attr])
for attr in extra_attrs:
if attr in route_attrs:
route_dict[key][attr].append(route_attrs[attr])
def _process_owamp(self):
attrs_to = [
'to_addr_from', 'to_addr_to', 'to_sid', 'to_first', 'to_last',
'to_pkts_sent', 'to_pkts_lost', 'to_pkts_dup',
'to_ow_del_min', 'to_ow_del_med', 'to_ow_del_max',
'to_ow_jitter', 'to_hops', 'to_reordering',]
attrs_from = [
'from_addr_from', 'from_addr_to', 'from_sid', 'from_first', 'from_last',
'from_pkts_sent', 'from_pkts_lost', 'from_pkts_dup',
'from_ow_del_min', 'from_ow_del_med', 'from_ow_del_max',
'from_ow_jitter', 'from_hops', 'from_reordering',]
attr_types = [str]
attr_types_dup= 5*[str] + 3*[int] + 4*[float] + [int] + [float]
attr_types += attr_types_dup + attr_types_dup
attrs = ['owamp_accessible'] + attrs_to + attrs_from
# get address list
address_list = self.parent.config["owamp-client"]["address_list"]
# no ping to realize
if not address_list:
return
address_list = address_list.split(",")
# For each address
for addr in address_list:
key = addr.split(":")[0]
owamp_info = self._read_owamp_info(key, attrs)
# not yet owamp ping done
if not owamp_info:
continue
# Init dict the first time
if "owamp" not in self._data:
self._data["owamp"] = {}
owamp_dict = self._data["owamp"]
owamp_dict.setdefault(key, init_rb_dict(attrs, types=attr_types))
# ping failed
if owamp_info["owamp_accessible"] == "no":
self.set_owamp_dict_ping_failed(owamp_dict[key], attrs, attr_types)
continue
# ping work
for attr in attrs:
owamp_dict[key][attr].append(owamp_info[attr])
def set_owamp_dict_ping_failed(self, owamp_dict_addr, attrs, attr_types):
"""
Set the owamp dict when the ping failed
"""
for attr, attr_type in zip(attrs, attr_types):
if attr == 'owamp_accessible':
owamp_dict_addr[attr].append("no")
elif attr_type == str:
owamp_dict_addr[attr].append("unavailable")
elif attr_type == int:
owamp_dict_addr[attr].append(0)
elif attr_type == float:
owamp_dict_addr[attr].append(0.0)
def _read_owamp_info(self, addr, attrs):
"""
Fill and return a dictionnary containing owping infos
"""
# get file to read
stat_file_dir = self.parent.agent_dir + "/ping/outputs/owamp_outputs"
stat_file_to_read = stat_file_dir + "/" + addr + ".txt"
owamp_info = {}
# get info from file
if Path(stat_file_to_read).is_file():
with open(stat_file_to_read) as f:
lines = f.read().splitlines()
count = 0
# ping worked
if lines[0].startswith("yes"):
owamp_info["owamp_accessible"] = "yes"
owamp_info["err_msg"] = ""
for line in lines:
# check ping ended before end
if line == "\n" or line.startswith("nan") or line.startswith("reported"):
owamp_info["owamp_accessible"] = "no"
owamp_info["err_msg"] = "ping ended before end"
owamp_info[attrs[count]] = line
count += 1
# ping failed
elif lines[0].startswith("no"):
owamp_info["owamp_accessible"] = "no"
owamp_info["err_msg"] = lines[1] + lines[2]
else:
owamp_info["owamp_accessible"] = "no"
owamp_info["err_msg"] = "unknown error : shoulkd never happen"
return owamp_info
def _process_icmp_ping(self):
attrs = [
'icmp_accessible', 'time_ping_ended', 'pkts_sent',
'pkts_received', '%_pkts_lost',
'min_rtt', 'avg_rtt', 'max_rtt',]
attr_types = 2*[str] + 2*[int] + 4*[float]
# get address list
address_list = self.parent.config["ping"]["address_list"]
# no ping to realize
if not address_list:
return
address_list = address_list.split(",")
pings_info = self._read_icmp_ping_info(attrs)
# not yet ping done
if not pings_info:
return
# Init dict the first time
if "ping" not in self._data:
self._data["ping"] = {}
ping_dict = self._data["ping"]
# for each address
for addr in pings_info.keys():
# for each pingable address
if pings_info[addr]:
# init the dict of rb
ping_dict.setdefault(addr, init_rb_dict(attrs, types=attr_types))
for attr in attrs:
ping_dict[addr][attr].append(pings_info[addr][attr])
def _read_icmp_ping_info(self, attrs):
"""
Fill and return a dictionnary containing ping infos
"""
# get file to read
stat_file = self.parent.agent_dir + "/ping/outputs/icmp_outputs/ping_output.txt"
ping_info = {}
# get info from file
if Path(stat_file).is_file():
with open(stat_file) as f:
lines = f.read().splitlines()
time_read = False
time = None
for line in lines:
if not time_read:
time = line
time_read = True
continue
if line:
if line.startswith("ICMP"):
addr = line.split(" ")[10].strip()
ping_info[addr] = {}
ping_info[addr]["icmp_accessible"] = "no"
ping_info[addr]["min_rtt"] = 0
ping_info[addr]["avg_rtt"] = 0
ping_info[addr]["max_rtt"] = 0
else:
cut_line = line.split(":")
addr = cut_line[0].strip()
stats = cut_line[1].strip()
ping_info[addr] = {}
# address cannot be pinged
if not stats.startswith("xmt"):
continue
else:
ping_info[addr]["time_ping_ended"] = time
ping_info[addr]["icmp_accessible"] = "yes"
sub_stats = stats.split("=")
# pkts infos
pkts_info = sub_stats[1].split(",")[0].strip().split("/")
ping_info[addr]["pkts_sent"] = int(pkts_info[0])
ping_info[addr]["pkts_received"] = int(pkts_info[1])
ping_info[addr]["%_pkts_lost"] = float(pkts_info[2].strip("%"))
# rtt infos
# some packets were received
if ping_info[addr]["pkts_received"] > 0:
rtt_info = sub_stats[2].strip().split("/")
ping_info[addr]["min_rtt"] = float(rtt_info[0])
ping_info[addr]["avg_rtt"] = float(rtt_info[1])
ping_info[addr]["max_rtt"] = float(rtt_info[2])
# 100 % of loss
else:
ping_info[addr]["icmp_accessible"] = "no"
ping_info[addr]["min_rtt"] = 0
ping_info[addr]["avg_rtt"] = 0
ping_info[addr]["max_rtt"] = 0
return ping_info
def read_ethtool_info(self, if_name, if_dict):
"""
@see ethtool.c from python3-ethtool
"""
getters = [("driver", ethtool.get_module),
("bus_info", ethtool.get_businfo),
# ("ufo", ethtool.get_ufo),
("wireless_protocol", ethtool.get_wireless_protocol),
]
for attr, getter in getters:
try:
if_dict[attr].append(getter(if_name))
except:
pass
for feature in self._ethtool.get_features(if_name)[0].values():
if not feature.name or not feature.available:
continue
#self.info("if: {} feature: {}".format(if_name,feature.name))
# Crashes on my machine (<NAME>)
#if_dict[feature.name].append(int(feature.enable))
#self.info("inserted {} type {}".format(if_dict[attr]._top(),
# if_dict[attr].type))
#try:
# coalesce_settings = ethtool.get_coalesce(if_name)
#except:
# pass
# 'rx_max_pending': 0, 'rx_mini_max_pending': 0,
# 'rx_jumbo_max_pending': 0, 'tx_max_pending': 0,
# 'rx_pending': 0, 'rx_mini_pending': 0, 'rx_jumbo_pending': 0,
# 'tx_pending': 0
try:
for attr,val in ethtool.get_ringparam(if_name).items():
if_dict[attr].append(val)
except:
pass
try:
flags = ethtool.get_flags(if_name)
except:
return
if_dict["broadcast"].append((flags & ethtool.IFF_BROADCAST) | |
<filename>malchive/utilities/vtinspect.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2021 The MITRE Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import json
import struct
import socket
import os.path
import logging
import argparse
import hashlib
import requests
import progressbar
__version__ = "1.4.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
class VirusTotalAPI:
"""
Gets specific configuration IoCs from the malware.
:ivar dict params: Parameters to inform query to VT API.
:ivar str base: Base URL for queries to be made against.
"""
def __init__(self, apikey):
"""
Initialize VirusTotal interface.
:param str apikey: API key to use.
"""
self.params = {'apikey': apikey}
self.base = 'https://www.virustotal.com/vtapi/v2'
def search_file(self, resource, allinfo):
"""
Search for information based on a supplied hash.
:param str resource: Hash to search on.
:param bool allinfo: Show extra verbose information about the hash.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['allinfo'] = int(allinfo)
self.params['resource'] = resource
response = requests.get('%s/%s' % (self.base, 'file/report'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def get_submission_info(self, resource):
"""
Get submitter information for resource.
:param str resource: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['resource'] = resource
response = requests.get('%s/%s' % (self.base, 'file/submissions'),
params=self.params, verify=True)
response_json = response.json()
if len(response_json) == 0:
# Doesn't return anything if a match fails so
# we manufacture one here
return {"verbose_msg": "The requested resource is not among "
"the finished, queued or pending scans"}
return json.dumps(response_json, indent=4, sort_keys=False)
def download_file(self, hash):
"""
Download file corresponding to supplied hash value.
:param str hash: Hash of file to download.
:return: JSON dump of response data from VirusTotal
:rtype: response
"""
self.params['hash'] = hash
response = requests.get('%s/%s' % (self.base, 'file/download'),
params=self.params, verify=True)
return response
def download_pcap(self, hash):
"""
Download PCAP information gathered corresponding to the supplied hash.
:param str hash: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: response
"""
self.params['hash'] = hash
response = requests.get('%s/%s' % (self.base, 'file/network-traffic'),
params=self.params, verify=True)
return response
def rescan(self, hash):
"""
Search for information based on a supplied hash.
:param str hash: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['resource'] = hash
response = requests.post('%s/%s' % (self.base, 'file/rescan'),
params=self.params)
response_json = response.json()
return response_json
def behaviour(self, hash):
"""
Search for sandbox information corresponding to the supplied hash.
:param str hash: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['hash'] = hash
response = requests.get('%s/%s' % (self.base, 'file/behaviour'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def comments(self, hash):
"""
Search for user comments corresponding to the supplied hash.
:param str hash: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['resource'] = hash
response = requests.get('%s/%s' % (self.base, 'comments/get'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def search_url(self, resource, scan, allinfo):
"""
Search for reputation data corresponding to the supplied url.
:param str resource: URL to search on.
:param bool scan: Bool to force a scan for a URL not in the VirusTotal
database.
:param bool allinfo: Retrieve more verbose information about the
supplied URL.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['allinfo'] = int(allinfo)
self.params['resource'] = resource
self.params['scan'] = int(scan)
response = requests.post('%s/%s' % (self.base, 'url/report'),
params=self.params)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def search_domain(self, domain):
"""
Search for data corresponding to the submitted domain.
:param str domain: Domain to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['domain'] = domain
response = requests.get('%s/%s' % (self.base, 'domain/report'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def search_ip(self, ip):
"""
Search for data corresponding to the submitted IP address.
:param str ip: IP address to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['ip'] = ip
response = requests.get('%s/%s' % (self.base, 'ip-address/report'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def query_submissions(self, query, offset=None):
"""
Execute a search modifier compliant file search query against
VirusTotal.
:param str query: Search modifier to use.
:param str offset: Search offset given from VT API when a query has
more than 300 results.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
if offset is not None:
self.params['offset'] = offset
self.params['query'] = query
response = requests.post('%s/%s' % (self.base, 'file/search'),
params=self.params, verify=True)
response_json = response.json()
return response_json
def process_hashes(opt, vt):
for h in opt.hashes:
# Verify hash is md5/sha1/sha256
if not (re.match(r'^[a-fA-F\d]{32}$', h) or
re.match(r'^[a-fA-F\d]{40}$', h) or
re.match(r'^[a-fA-F\d]{64}$', h)):
log.warning('Invalid hash supplied, skipping %s' % h)
continue
# Get detailed file information
if opt.info:
print(vt.search_file(h, opt.verbose))
# Get detailed info from submitters
if opt.submitter:
print(vt.get_submission_info(h))
# Download the specimen
if opt.download_file:
log.info('Attempting to retrieve file...')
result = vt.download_file(h)
if result.status_code == 404:
log.warning('404 returned, %s not found in database...' % h)
elif result.status_code == 200:
name = '%s.vt' % h.lower()
with open(name, 'wb') as f:
f.write(result.content)
print('Retrieved file from VT, saved locally as %s' % name)
check_payload_integrity(h, result.content)
else:
log.warning(
'Unable to retrieve file, response code was %s. '
'Try again in a few minutes...' % result.status_code)
# Download the pcap
if opt.download_pcap:
log.info('Attempting to retrieve pcap...')
result = vt.download_pcap(h)
if result.status_code == 404:
log.warning('404 returned, file not found in database...')
elif result.status_code == 200 and \
result.content.startswith(b'\xd4\xc3\xb2\xa1'):
name = '%s.pcap' % h.lower()
with open(name, 'wb') as f:
f.write(result.content)
print('Retrieved file from VT, saved locally as %s' % name)
else:
log.warning(
'Unable to retrieve PCAP for %s. PCAP data may not '
'exist for it. Response code: %s'
% (h, result.status_code))
# Rescan the supplied hash/file
if opt.rescan:
result = vt.rescan(h)
if result['response_code'] == 0:
log.error('There was an error rescanning. '
'The hash may not be in the database.')
elif result['response_code'] == 1:
print('Rescan request successful, please navigate to the '
'provided URL and await the results...\n%s' %
result['permalink'])
# Get behaviour of file
if opt.behaviour:
print(vt.behaviour(h))
# Get user submitted comments
if opt.comments:
print(vt.comments(h))
def check_payload_integrity(h, buff):
result_hash = ''
# Verify hash is md5/sha1/sha256
if re.match(r'^[a-fA-F\d]{32}$', h):
result_hash = hashlib.md5(buff).hexdigest()
if re.match(r'^[a-fA-F\d]{40}$', h):
result_hash = hashlib.sha1(buff).hexdigest()
if re.match(r'^[a-fA-F\d]{64}$', h):
result_hash = hashlib.sha256(buff).hexdigest()
if h.lower() != result_hash.lower():
log.error('The retrieved file does not match the provided '
'hash value! %s != %s' %
(h.lower(), result_hash.lower()))
def process_net_indicators(opt, vt):
if opt.url:
for url in opt.url:
# Verify url
if not re.match(r'^(http|https)://.*?\..*', url):
log.error(
'Invalid URL supplied, skipping %s.\nEnsure the '
'http:// or https:// prefix is at the beginning...'
% url)
continue
# Get detailed URL information
if opt.force_url:
# User coaching
while True:
ans = input(
'Forcing a URL scan will add a public record '
'to the VirusTotal database for the URL you '
'submitted if it\'s not found in the existing '
'dataset. \nAre you sure? (y/n) ')
if ans == 'y':
break
elif ans == 'n':
print('Aborting URL scan of %s...' % url)
continue
else:
print('Please provide either \'y\' or \'n\'')
print('Initiating a scan request on your behalf...')
print(vt.search_url(url, opt.force_url, opt.verbose))
if opt.domain_name:
for d in opt.domain_name:
# Verify domain
if len(d.split(".")) > 1 and d.startswith('.') is True:
log.warning(
'Invalid domain supplied, skipping %s.\nProvide a '
'valid domain, with a basename prefix \
(ex: google.com)...' % d)
continue
# Get detailed domain name information
print(vt.search_domain(d))
if opt.ip:
for ip_addr in opt.ip:
# Very simple verify IP, VT will do additional checks but
# this serves as an initial gate
try:
struct.unpack("!L", socket.inet_aton(ip_addr))[0]
| |
"""Set new teletext page to retrieve.
@param i_page: teletex page number requested
"""
e=VLCException()
return libvlc_video_set_teletext(self, i_page, e)
if hasattr(dll, 'libvlc_video_get_track_count'):
def video_get_track_count(self):
"""Get number of available video tracks.
@return: the number of available video tracks (int)
"""
e=VLCException()
return libvlc_video_get_track_count(self, e)
if hasattr(dll, 'libvlc_video_get_track_description'):
def video_get_track_description(self):
"""Get the description of available video tracks.
@return: list with description of available video tracks
"""
e=VLCException()
return libvlc_video_get_track_description(self, e)
if hasattr(dll, 'libvlc_video_get_track'):
def video_get_track(self):
"""Get current video track.
@return: the video track (int)
"""
e=VLCException()
return libvlc_video_get_track(self, e)
if hasattr(dll, 'libvlc_video_set_track'):
def video_set_track(self, i_track):
"""Set video track.
@param i_track: the track (int)
"""
e=VLCException()
return libvlc_video_set_track(self, i_track, e)
if hasattr(dll, 'libvlc_video_take_snapshot'):
def video_take_snapshot(self, psz_filepath, i_width, i_height):
"""Take a snapshot of the current video window.
If i_width AND i_height is 0, original size is used.
If i_width XOR i_height is 0, original aspect-ratio is preserved.
@param psz_filepath: the path where to save the screenshot to
@param i_width: the snapshot's width
@param i_height: the snapshot's height
"""
e=VLCException()
return libvlc_video_take_snapshot(self, psz_filepath, i_width, i_height, e)
if hasattr(dll, 'libvlc_audio_get_track_count'):
def audio_get_track_count(self):
"""Get number of available audio tracks.
@return: the number of available audio tracks (int)
"""
e=VLCException()
return libvlc_audio_get_track_count(self, e)
if hasattr(dll, 'libvlc_audio_get_track_description'):
def audio_get_track_description(self):
"""Get the description of available audio tracks.
@return: list with description of available audio tracks
"""
e=VLCException()
return libvlc_audio_get_track_description(self, e)
if hasattr(dll, 'libvlc_audio_get_track'):
def audio_get_track(self):
"""Get current audio track.
@return: the audio track (int)
"""
e=VLCException()
return libvlc_audio_get_track(self, e)
if hasattr(dll, 'libvlc_audio_set_track'):
def audio_set_track(self, i_track):
"""Set current audio track.
@param i_track: the track (int)
"""
e=VLCException()
return libvlc_audio_set_track(self, i_track, e)
class TrackDescription(object):
def __new__(cls, pointer=None):
'''Internal method used for instanciating wrappers from ctypes.
'''
if pointer is None:
raise Exception("Internal method. Surely this class cannot be instanciated by itself.")
if pointer == 0:
return None
else:
o=object.__new__(cls)
o._as_parameter_=ctypes.c_void_p(pointer)
return o
@staticmethod
def from_param(arg):
'''(INTERNAL) ctypes parameter conversion method.
'''
return arg._as_parameter_
if hasattr(dll, 'libvlc_track_description_release'):
def release(self):
"""Release (free) libvlc_track_description_t
"""
return libvlc_track_description_release(self)
if hasattr(dll, 'libvlc_exception_init'):
prototype=ctypes.CFUNCTYPE(None, ctypes.POINTER(VLCException))
paramflags=( (3, ), )
libvlc_exception_init = prototype( ("libvlc_exception_init", dll), paramflags )
libvlc_exception_init.errcheck = check_vlc_exception
libvlc_exception_init.__doc__ = """Initialize an exception structure. This can be called several times to
reuse an exception structure.
@param p_exception the exception to initialize
"""
if hasattr(dll, 'libvlc_exception_clear'):
prototype=ctypes.CFUNCTYPE(None, ctypes.POINTER(VLCException))
paramflags=( (3, ), )
libvlc_exception_clear = prototype( ("libvlc_exception_clear", dll), paramflags )
libvlc_exception_clear.errcheck = check_vlc_exception
libvlc_exception_clear.__doc__ = """Clear an exception object so it can be reused.
The exception object must have be initialized.
@param p_exception the exception to clear
"""
if hasattr(dll, 'libvlc_new'):
prototype=ctypes.CFUNCTYPE(Instance, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.POINTER(VLCException))
paramflags=(1,), (1,), (3,)
libvlc_new = prototype( ("libvlc_new", dll), paramflags )
libvlc_new.errcheck = check_vlc_exception
libvlc_new.__doc__ = """Create and initialize a libvlc instance.
@param argc the number of arguments
@param argv command-line-type arguments. argv[0] must be the path of the
calling program.
@param p_e an initialized exception pointer
@return the libvlc instance
"""
if hasattr(dll, 'libvlc_get_vlc_id'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, Instance)
paramflags=( (1, ), )
libvlc_get_vlc_id = prototype( ("libvlc_get_vlc_id", dll), paramflags )
libvlc_get_vlc_id.__doc__ = """Return a libvlc instance identifier for legacy APIs. Use of this
function is discouraged, you should convert your program to use the
new API.
@param p_instance the instance
@return the instance identifier
"""
if hasattr(dll, 'libvlc_release'):
prototype=ctypes.CFUNCTYPE(None, Instance)
paramflags=( (1, ), )
libvlc_release = prototype( ("libvlc_release", dll), paramflags )
libvlc_release.__doc__ = """Decrement the reference count of a libvlc instance, and destroy it
if it reaches zero.
@param p_instance the instance to destroy
"""
if hasattr(dll, 'libvlc_retain'):
prototype=ctypes.CFUNCTYPE(None, Instance)
paramflags=( (1, ), )
libvlc_retain = prototype( ("libvlc_retain", dll), paramflags )
libvlc_retain.__doc__ = """Increments the reference count of a libvlc instance.
The initial reference count is 1 after libvlc_new() returns.
@param p_instance the instance to reference
"""
if hasattr(dll, 'libvlc_add_intf'):
prototype=ctypes.CFUNCTYPE(None, Instance, ctypes.c_char_p, ctypes.POINTER(VLCException))
paramflags=(1,), (1,), (3,)
libvlc_add_intf = prototype( ("libvlc_add_intf", dll), paramflags )
libvlc_add_intf.errcheck = check_vlc_exception
libvlc_add_intf.__doc__ = """Try to start a user interface for the libvlc instance.
@param p_instance the instance
@param name interface name, or NULL for default
@param p_exception an initialized exception pointer
"""
if hasattr(dll, 'libvlc_wait'):
prototype=ctypes.CFUNCTYPE(None, Instance)
paramflags=( (1, ), )
libvlc_wait = prototype( ("libvlc_wait", dll), paramflags )
libvlc_wait.__doc__ = """Waits until an interface causes the instance to exit.
You should start at least one interface first, using libvlc_add_intf().
@param p_instance the instance
"""
if hasattr(dll, 'libvlc_get_version'):
prototype=ctypes.CFUNCTYPE(ctypes.c_char_p)
paramflags= tuple()
libvlc_get_version = prototype( ("libvlc_get_version", dll), paramflags )
libvlc_get_version.__doc__ = """Retrieve libvlc version.
Example: "0.9.0-git Grishenko"
@return a string containing the libvlc version
"""
if hasattr(dll, 'libvlc_get_compiler'):
prototype=ctypes.CFUNCTYPE(ctypes.c_char_p)
paramflags= tuple()
libvlc_get_compiler = prototype( ("libvlc_get_compiler", dll), paramflags )
libvlc_get_compiler.__doc__ = """Retrieve libvlc compiler version.
Example: "gcc version 4.2.3 (Ubuntu 4.2.3-2ubuntu6)"
@return a string containing the libvlc compiler version
"""
if hasattr(dll, 'libvlc_get_changeset'):
prototype=ctypes.CFUNCTYPE(ctypes.c_char_p)
paramflags= tuple()
libvlc_get_changeset = prototype( ("libvlc_get_changeset", dll), paramflags )
libvlc_get_changeset.__doc__ = """Retrieve libvlc changeset.
Example: "aa9bce0bc4"
@return a string containing the libvlc changeset
"""
if hasattr(dll, 'libvlc_free'):
prototype=ctypes.CFUNCTYPE(None, ctypes.c_void_p)
paramflags=( (1, ), )
libvlc_free = prototype( ("libvlc_free", dll), paramflags )
libvlc_free.__doc__ = """Frees an heap allocation (char *) returned by a LibVLC API.
If you know you're using the same underlying C run-time as the LibVLC
implementation, then you can call ANSI C free() directly instead.
"""
if hasattr(dll, 'libvlc_event_attach'):
prototype=ctypes.CFUNCTYPE(None, EventManager, EventType, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(VLCException))
paramflags=(1,), (1,), (1,), (1,), (3,)
libvlc_event_attach = prototype( ("libvlc_event_attach", dll), paramflags )
libvlc_event_attach.errcheck = check_vlc_exception
libvlc_event_attach.__doc__ = """Register for an event notification.
@param p_event_manager the event manager to which you want to attach to.
Generally it is obtained by vlc_my_object_event_manager() where
my_object is the object you want to listen to.
@param i_event_type the desired event to which we want to listen
@param f_callback the function to call when i_event_type occurs
@param user_data user provided data to carry with the event
@param p_e an initialized exception pointer
"""
if hasattr(dll, 'libvlc_event_detach'):
prototype=ctypes.CFUNCTYPE(None, EventManager, EventType, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(VLCException))
paramflags=(1,), (1,), (1,), (1,), (3,)
libvlc_event_detach = prototype( ("libvlc_event_detach", dll), paramflags )
libvlc_event_detach.errcheck = check_vlc_exception
libvlc_event_detach.__doc__ = """Unregister an event notification.
@param p_event_manager the event manager
@param i_event_type the desired event to which we want to unregister
@param f_callback the function to call when i_event_type occurs
@param p_user_data user provided data to carry with the event
@param p_e an initialized exception pointer
"""
if hasattr(dll, 'libvlc_event_type_name'):
prototype=ctypes.CFUNCTYPE(ctypes.c_char_p, EventType)
paramflags=( (1, ), )
libvlc_event_type_name = prototype( ("libvlc_event_type_name", dll), paramflags )
libvlc_event_type_name.__doc__ = """Get an event's type name.
@param i_event_type the desired event
"""
if hasattr(dll, 'libvlc_get_log_verbosity'):
prototype=ctypes.CFUNCTYPE(ctypes.c_uint, Instance, ctypes.POINTER(VLCException))
paramflags=(1,), (3,)
libvlc_get_log_verbosity = prototype( ("libvlc_get_log_verbosity", dll), paramflags )
libvlc_get_log_verbosity.errcheck = check_vlc_exception
libvlc_get_log_verbosity.__doc__ = """Return the VLC messaging verbosity level.
@param p_instance libvlc instance
@param p_e an initialized exception pointer
@return verbosity level for messages
"""
if hasattr(dll, 'libvlc_set_log_verbosity'):
prototype=ctypes.CFUNCTYPE(None, Instance, ctypes.c_uint, ctypes.POINTER(VLCException))
paramflags=(1,), (1,), (3,)
libvlc_set_log_verbosity = prototype( ("libvlc_set_log_verbosity", dll), paramflags )
libvlc_set_log_verbosity.errcheck = check_vlc_exception
libvlc_set_log_verbosity.__doc__ = """Set the VLC messaging verbosity level.
@param p_instance libvlc log instance
@param level log level
@param p_e an initialized exception pointer
"""
if hasattr(dll, 'libvlc_log_open'):
prototype=ctypes.CFUNCTYPE(Log, Instance, ctypes.POINTER(VLCException))
paramflags=(1,), (3,)
libvlc_log_open = prototype( ("libvlc_log_open", dll), paramflags )
libvlc_log_open.errcheck = check_vlc_exception
libvlc_log_open.__doc__ = """Open a VLC message log instance.
@param p_instance libvlc instance
@param p_e an initialized exception pointer
@return log message instance
"""
if hasattr(dll, 'libvlc_log_close'):
prototype=ctypes.CFUNCTYPE(None, Log, ctypes.POINTER(VLCException))
paramflags=(1,), (3,)
libvlc_log_close = prototype( ("libvlc_log_close", dll), paramflags )
libvlc_log_close.errcheck = check_vlc_exception
libvlc_log_close.__doc__ = """Close a VLC message log instance.
@param p_log libvlc log instance
@param p_e an initialized exception pointer
"""
if hasattr(dll, 'libvlc_log_count'):
prototype=ctypes.CFUNCTYPE(ctypes.c_uint, Log, ctypes.POINTER(VLCException))
paramflags=(1,), (3,)
libvlc_log_count = prototype( ("libvlc_log_count", dll), paramflags )
libvlc_log_count.errcheck = check_vlc_exception
libvlc_log_count.__doc__ = """Returns the number of messages in a log instance.
@param p_log libvlc log instance
@param p_e an initialized exception pointer
@return number of log messages
"""
if hasattr(dll, 'libvlc_log_clear'):
prototype=ctypes.CFUNCTYPE(None, Log, ctypes.POINTER(VLCException))
paramflags=(1,), (3,)
libvlc_log_clear = prototype( ("libvlc_log_clear", dll), paramflags )
libvlc_log_clear.errcheck = check_vlc_exception
libvlc_log_clear.__doc__ = """Clear a log instance.
All messages in the log are removed. The log should be cleared on a
regular basis to avoid clogging.
@param p_log libvlc log instance
@param p_e an initialized exception pointer
"""
if hasattr(dll, 'libvlc_log_get_iterator'):
prototype=ctypes.CFUNCTYPE(LogIterator, Log, ctypes.POINTER(VLCException))
paramflags=(1,), (3,)
libvlc_log_get_iterator = prototype( ("libvlc_log_get_iterator", dll), paramflags )
libvlc_log_get_iterator.errcheck = check_vlc_exception
libvlc_log_get_iterator.__doc__ = """Allocate and returns a new iterator to messages in log.
@param p_log libvlc log instance
@param p_e an initialized exception pointer
@return log iterator object
"""
if hasattr(dll, 'libvlc_log_iterator_free'):
prototype=ctypes.CFUNCTYPE(None, LogIterator, ctypes.POINTER(VLCException))
paramflags=(1,), (3,)
libvlc_log_iterator_free = prototype( ("libvlc_log_iterator_free", dll), paramflags )
libvlc_log_iterator_free.errcheck = check_vlc_exception
libvlc_log_iterator_free.__doc__ = """Release a previoulsy allocated iterator.
@param p_iter libvlc log iterator
@param p_e an initialized exception pointer
"""
if hasattr(dll, 'libvlc_log_iterator_has_next'):
prototype=ctypes.CFUNCTYPE(ctypes.c_int, LogIterator, ctypes.POINTER(VLCException))
paramflags=(1,), (3,)
libvlc_log_iterator_has_next = prototype( ("libvlc_log_iterator_has_next", dll), paramflags )
libvlc_log_iterator_has_next.errcheck = check_vlc_exception
libvlc_log_iterator_has_next.__doc__ = """Return whether log | |
#!/usr/bin/env python3
#
# Copyright 2021 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
import glob
import hashlib
import logging
import os
import os.path as op
import pathlib
import platform
import shlex
import shutil
import sysconfig
import tarfile
import urllib.request
import venv
import zipfile
from multiprocessing import Pool
from subprocess import run
_ROOTDIR = op.abspath(op.dirname(__file__))
_SYSTEM = 'MinGW' if sysconfig.get_platform().startswith('mingw') else platform.system()
_RENDERDOC_ID = f'renderdoc_{_SYSTEM}'
_EXTERNAL_DEPS = dict(
sxplayer=dict(
version='9.10.0',
url='https://github.com/Stupeflix/sxplayer/archive/v@[email protected]',
dst_file='sxplayer-@[email protected]',
sha256='229cc06b4febac6d37ad596089edc1a9614b72383230fa546140c40c665e540d',
),
pkgconf=dict(
version='1.7.4',
url='https://distfiles.dereferenced.org/pkgconf/pkgconf-@[email protected]',
sha256='d73f32c248a4591139a6b17777c80d4deab6b414ec2b3d21d0a24be348c476ab',
),
renderdoc_Windows=dict(
version='1.15',
url=f'https://renderdoc.org/stable/@VERSION@/RenderDoc_@VERSION@_64.zip',
sha256='9a437ddf2e7d687bb2d78327c27e06a52e5f8e9dd62f4e6e13bf036d2830bc3d',
),
renderdoc_Linux=dict(
version='1.15',
url=f'https://renderdoc.org/stable/@VERSION@/renderdoc_@[email protected]',
sha256='ce0c7339359aa6c463055d7b92d32b442a6a6943d3c9009d50136127ee9c855a',
),
)
def _get_external_deps(args):
deps = ['sxplayer']
if _SYSTEM == 'Windows':
deps.append('pkgconf')
if 'gpu_capture' in args.debug_opts:
if _SYSTEM not in {'Windows', 'Linux'}:
raise Exception(f'Renderdoc is not supported on {_SYSTEM}')
deps.append(_RENDERDOC_ID)
return {dep: _EXTERNAL_DEPS[dep] for dep in deps}
def _guess_base_dir(dirs):
smallest_dir = sorted(dirs, key=lambda x: len(x))[0]
return pathlib.Path(smallest_dir).parts[0]
def _file_chk(path, chksum_hexdigest):
chksum = hashlib.sha256()
with open(path, 'rb') as f:
while True:
buf = f.read(8196)
if not buf:
break
chksum.update(buf)
match = chksum.hexdigest() == chksum_hexdigest
if not match:
logging.warning('%s: mismatching check sum', path)
return match
def _download_extract(dep_item):
logging.basicConfig(level='INFO') # Needed for every process on Windows
name, dep = dep_item
version = dep['version']
url = dep['url'].replace('@VERSION@', version)
chksum = dep['sha256']
dst_file = dep.get('dst_file', op.basename(url)).replace('@VERSION@', version)
dst_base = op.join(_ROOTDIR, 'external')
dst_path = op.join(dst_base, dst_file)
os.makedirs(dst_base, exist_ok=True)
# Download
if not op.exists(dst_path) or not _file_chk(dst_path, dep['sha256']):
logging.info('downloading %s to %s', url, dst_file)
urllib.request.urlretrieve(url, dst_path)
# Extract
if tarfile.is_tarfile(dst_path):
with tarfile.open(dst_path) as tar:
dirs = {f.name for f in tar.getmembers() if f.isdir()}
extract_dir = op.join(dst_base, _guess_base_dir(dirs))
if not op.exists(extract_dir):
logging.info('extracting %s', dst_file)
tar.extractall(dst_base)
elif zipfile.is_zipfile(dst_path):
with zipfile.ZipFile(dst_path) as zip_:
dirs = {op.dirname(f) for f in zip_.namelist()}
extract_dir = op.join(dst_base, _guess_base_dir(dirs))
if not op.exists(extract_dir):
logging.info('extracting %s', dst_file)
zip_.extractall(dst_base)
else:
assert False
# Remove previous link if needed
target = op.join(dst_base, name)
rel_extract_dir = op.basename(extract_dir)
if op.islink(target) and os.readlink(target) != rel_extract_dir:
logging.info('unlink %s target', target)
os.unlink(target)
elif op.exists(target) and not op.islink(target):
logging.info('remove previous %s copy', target)
shutil.rmtree(target)
# Link (or copy)
if not op.exists(target):
logging.info('symlink %s -> %s', target, rel_extract_dir)
try:
os.symlink(rel_extract_dir, target)
except OSError:
# This typically happens on Windows when Developer Mode is not
# available/enabled
logging.info('unable to symlink, fallback on copy (%s -> %s)', extract_dir, target)
shutil.copytree(extract_dir, target)
return name, target
def _fetch_externals(args):
dependencies = _get_external_deps(args)
with Pool() as p:
return dict(p.map(_download_extract, dependencies.items()))
def _block(name, prerequisites=None):
def real_decorator(block_func):
block_func.name = name
block_func.prerequisites = prerequisites if prerequisites else []
return block_func
return real_decorator
def _meson_compile_install_cmd(component):
builddir = op.join('builddir', component)
return ['$(MESON) ' + _cmd_join(action, '-C', builddir) for action in ('compile', 'install')]
@_block('pkgconf-setup')
def _pkgconf_setup(cfg):
return ['$(MESON_SETUP) ' + _cmd_join('-Dtests=false', cfg.externals['pkgconf'], op.join('builddir', 'pkgconf'))]
@_block('pkgconf-install', [_pkgconf_setup])
def _pkgconf_install(cfg):
ret = _meson_compile_install_cmd('pkgconf')
pkgconf_exe = op.join(cfg.bin_path, 'pkgconf.exe')
pkgconfig_exe = op.join(cfg.bin_path, 'pkg-config.exe')
return ret + [f'copy {pkgconf_exe} {pkgconfig_exe}']
@_block('sxplayer-setup')
def _sxplayer_setup(cfg):
return ['$(MESON_SETUP) ' + _cmd_join(cfg.externals['sxplayer'], op.join('builddir', 'sxplayer'))]
@_block('sxplayer-install', [_sxplayer_setup])
def _sxplayer_install(cfg):
return _meson_compile_install_cmd('sxplayer')
@_block('renderdoc-install')
def _renderdoc_install(cfg):
renderdoc_dll = op.join(cfg.externals[_RENDERDOC_ID], 'renderdoc.dll')
return [f'copy {renderdoc_dll} {cfg.bin_path}']
@_block('nodegl-setup', [_sxplayer_install])
def _nodegl_setup(cfg):
nodegl_debug_opts = []
if cfg.args.debug_opts:
debug_opts = ','.join(cfg.args.debug_opts)
nodegl_debug_opts += [f'-Ddebug_opts={debug_opts}']
if 'gpu_capture' in cfg.args.debug_opts:
renderdoc_dir = cfg.externals[_RENDERDOC_ID]
nodegl_debug_opts += [f'-Drenderdoc_dir={renderdoc_dir}']
return ['$(MESON_SETUP) ' + _cmd_join(*nodegl_debug_opts, 'libnodegl', op.join('builddir', 'libnodegl'))]
@_block('nodegl-install', [_nodegl_setup])
def _nodegl_install(cfg):
return _meson_compile_install_cmd('libnodegl')
@_block('pynodegl-deps-install', [_nodegl_install])
def _pynodegl_deps_install(cfg):
return ['$(PIP) ' + _cmd_join('install', '-r', op.join('.', 'pynodegl', 'requirements.txt'))]
@_block('pynodegl-install', [_pynodegl_deps_install])
def _pynodegl_install(cfg):
ret = ['$(PIP) ' + _cmd_join('-v', 'install', '-e', op.join('.', 'pynodegl'))]
if _SYSTEM == 'Windows':
dlls = op.join(cfg.prefix, 'Scripts', '*.dll')
ret += [f'xcopy /Y {dlls} pynodegl\\.']
else:
rpath = op.join(cfg.prefix, 'lib')
ldflags = f'-Wl,-rpath,{rpath}'
ret[0] = f'LDFLAGS={ldflags} {ret[0]}'
return ret
@_block('pynodegl-utils-deps-install', [_pynodegl_install])
def _pynodegl_utils_deps_install(cfg):
#
# Requirements not installed on MinGW because:
# - PySide2 can't be pulled (required to be installed by the user outside the
# Python virtual env)
# - Pillow fails to find zlib (required to be installed by the user outside the
# Python virtual env)
#
if _SYSTEM == 'MinGW':
return ['@'] # noop
return ['$(PIP) ' + _cmd_join('install', '-r', op.join('.', 'pynodegl-utils', 'requirements.txt'))]
@_block('pynodegl-utils-install', [_pynodegl_utils_deps_install])
def _pynodegl_utils_install(cfg):
return ['$(PIP) ' + _cmd_join('-v', 'install', '-e', op.join('.', 'pynodegl-utils'))]
@_block('ngl-tools-setup', [_nodegl_install])
def _ngl_tools_setup(cfg):
return ['$(MESON_SETUP) ' + _cmd_join('ngl-tools', op.join('builddir', 'ngl-tools'))]
@_block('ngl-tools-install', [_ngl_tools_setup])
def _ngl_tools_install(cfg):
return _meson_compile_install_cmd('ngl-tools')
def _nodegl_run_target_cmd(cfg, target):
builddir = op.join('builddir', 'libnodegl')
return ['$(MESON) ' + _cmd_join('compile', '-C', builddir, target)]
@_block('nodegl-updatedoc', [_nodegl_install])
def _nodegl_updatedoc(cfg):
return _nodegl_run_target_cmd(cfg, 'updatedoc')
@_block('nodegl-updatespecs', [_nodegl_install])
def _nodegl_updatespecs(cfg):
return _nodegl_run_target_cmd(cfg, 'updatespecs')
@_block('nodegl-updateglwrappers', [_nodegl_install])
def _nodegl_updateglwrappers(cfg):
return _nodegl_run_target_cmd(cfg, 'updateglwrappers')
@_block('all', [_ngl_tools_install, _pynodegl_utils_install])
def _all(cfg):
echo = ['', 'Build completed.', '', 'You can now enter the venv with:']
if _SYSTEM == 'Windows':
echo.append(op.join(cfg.bin_path, 'Activate.ps1'))
return [f'@echo.{e}' for e in echo]
else:
echo.append(' ' * 4 + '. ' + op.join(cfg.bin_path, 'activate'))
return [f'@echo " {e}"' for e in echo]
@_block('tests-setup', [_ngl_tools_install, _pynodegl_utils_install])
def _tests_setup(cfg):
return ['$(MESON_SETUP_TESTS) ' + _cmd_join('tests', op.join('builddir', 'tests'))]
@_block('nodegl-tests', [_nodegl_install])
def _nodegl_tests(cfg):
return [_cmd_join('meson', 'test', '-C', op.join('builddir', 'libnodegl'))]
def _rm(f):
return f'(if exist {f} del /q {f})' if _SYSTEM == 'Windows' else f'$(RM) {f}'
def _rd(d):
return f'(if exist {d} rd /s /q {d})' if _SYSTEM == 'Windows' else f'$(RM) -r {d}'
@_block('clean-py')
def _clean_py(cfg):
return [
_rm(op.join('pynodegl', 'nodes_def.pyx')),
_rm(op.join('pynodegl', 'pynodegl.c')),
_rm(op.join('pynodegl', 'pynodegl.*.so')),
_rm(op.join('pynodegl', 'pynodegl.*.pyd')),
_rd(op.join('pynodegl', 'build')),
_rd(op.join('pynodegl', 'pynodegl.egg-info')),
_rd(op.join('pynodegl', '.eggs')),
_rd(op.join('pynodegl-utils', 'pynodegl_utils.egg-info')),
_rd(op.join('pynodegl-utils', '.eggs')),
]
@_block('clean', [_clean_py])
def _clean(cfg):
return [
_rd(op.join('builddir', 'sxplayer')),
_rd(op.join('builddir', 'libnodegl')),
_rd(op.join('builddir', 'ngl-tools')),
_rd(op.join('builddir', 'tests')),
]
def _coverage(cfg, output):
# We don't use `meson coverage` here because of
# https://github.com/mesonbuild/meson/issues/7895
return [_cmd_join('ninja', '-C', op.join('builddir', 'libnodegl'), f'coverage-{output}')]
@_block('coverage-html')
def _coverage_html(cfg):
return _coverage(cfg, 'html')
@_block('coverage-xml')
def _coverage_xml(cfg):
return _coverage(cfg, 'xml')
@_block('tests', [_nodegl_tests, _tests_setup])
def _tests(cfg):
return [_cmd_join('meson', 'test', '-C', op.join('builddir', 'tests'))]
def _quote(s):
if not s or ' ' in s:
return f'"{s}"'
assert "'" not in s
assert '"' not in s
return s
def _cmd_join(*cmds):
if _SYSTEM == 'Windows':
return ' '.join(_quote(cmd) for cmd in cmds)
return shlex.join(cmds)
def _get_make_vars(cfg):
# We don't want Python to fallback on one found in the PATH so we explicit
# it to the one in the venv.
python = op.join(cfg.bin_path, 'python')
#
# MAKEFLAGS= is a workaround (not working on Windows due to incompatible Make
# syntax) for the issue described here:
# https://github.com/ninja-build/ninja/issues/1139#issuecomment-724061270
#
# Note: this will invoke the meson in the venv, unless we're on MinGW where
# it will fallback on the system one. This is due to the extended PATH
# mechanism.
#
meson = 'MAKEFLAGS= meson' if _SYSTEM != 'Windows' else 'meson'
buildtype = 'debugoptimized' if cfg.args.coverage or cfg.args.buildtype == 'debug' else 'release'
meson_setup = [
'setup',
'--prefix', cfg.prefix,
'-Drpath=true',
'--pkg-config-path', cfg.pkg_config_path,
'--buildtype', buildtype,
]
if cfg.args.coverage:
meson_setup += ['-Db_coverage=true']
if _SYSTEM != 'MinGW':
meson_setup += ['-Db_lto=true']
if _SYSTEM == 'Windows':
meson_setup += ['--bindir=Scripts', '--libdir=Lib', '--includedir=Include']
elif op.isfile('/etc/debian_version'):
# Workaround Debian/Ubuntu bug; see https://github.com/mesonbuild/meson/issues/5925
meson_setup += ['--libdir=lib']
ret = dict(
PIP=_cmd_join(python, '-m', 'pip'),
MESON=meson,
)
ret['MESON_SETUP'] = '$(MESON) ' + _cmd_join(*meson_setup, f'--backend={cfg.args.build_backend}')
# Our tests/meson.build logic is not well supported with the VS backend so
# we need to fallback on Ninja
ret['MESON_SETUP_TESTS'] = '$(MESON) ' + _cmd_join(*meson_setup, '--backend=ninja')
return ret
def _get_makefile_rec(cfg, blocks, declared):
ret = ''
for block in blocks:
if block.name in declared:
continue
declared |= {block.name}
req_names = ' '.join(r.name for r in block.prerequisites)
req = f' {req_names}' if req_names else ''
commands = '\n'.join('\t' + cmd for cmd in block(cfg))
ret += f'{block.name}:{req}\n{commands}\n'
ret += _get_makefile_rec(cfg, block.prerequisites, declared)
return ret
def _get_makefile(cfg, blocks):
env = cfg.get_env()
env_vars = {k: _quote(v) for k, v in env.items()}
if _SYSTEM == 'Windows':
#
# Environment variables are altered if and only if they already exists
# in the environment. While this is (usually) true for PATH, it isn't
# for the others we're trying to declare. This "if [set...]" trick is
# to circumvent this issue.
#
# See https://stackoverflow.com/questions/38381422/how-do-i-set-an-environment-variables-with-nmake
#
vars_export = '\n'.join(f'{k} = {v}' for k, v in env_vars.items()) + '\n'
vars_export_cond = | |
raise
else:
logger.debug("Got following CR IDs: " + json.dumps(cr_id_list))
# Add Consent Record IDs to record_container
for cr_id in cr_id_list:
logger.debug("Looping through cr_id_list: " + json.dumps(cr_id_list))
try:
logger.info("Adding CR IDs")
record_id_container[slr_id]["consentRecords"][cr_id] = {"consentStatusRecords": {}}
except Exception as exp:
logger.error('Could not add cr_id: ' + str(cr_id) + ' to record_id_container: ' + repr(exp))
raise
else:
logger.debug("Added CR ID: " + str(cr_id))
# Get Consent Status Record IDs
try:
logger.info("Getting CSR IDs")
cursor, csr_id_list = get_csr_ids(cursor=cursor, cr_id=cr_id, table_name=table_names["csr"])
except Exception as exp:
logger.error('Could not get csr primary key list: ' + repr(exp))
raise
else:
logger.debug("Got following CSR IDs: " + json.dumps(csr_id_list))
# Add Consent Status Record IDs to record_container
for csr_id in csr_id_list:
logger.debug("Looping through csr_id_list: " + json.dumps(csr_id_list))
try:
logger.info("Adding CSR IDs")
record_id_container[slr_id]["consentRecords"][cr_id]["consentStatusRecords"][csr_id] = {}
except Exception as exp:
logger.error('Could not add csr_id: ' + str(csr_id) + ' to record_id_container: ' + repr(exp))
raise
else:
logger.debug("Added CSR ID: " + str(csr_id))
return record_id_container
def get_records(cursor=None, record_ids=None):
if cursor is None:
raise AttributeError("Provide cursor as parameter")
if record_ids is None:
raise AttributeError("Provide record_ids as parameter")
if not isinstance(record_ids, dict):
raise AttributeError("record_ids MUST be dict")
logger.debug("Type of record_ids: " + repr(type(record_ids)))
record_container = {}
logger.info("Getting Records")
logger.info("record_ids: " + repr(record_ids))
record_ids = dict(record_ids)
# logger.info("Get Service Link Records")
# for slr in record_ids.iteritems():
# logger.debug("slr: " + repr(slr))
# logger.info("Looping through Service Link Record with ID: " + json.dumps(slr))
return record_container
def get_slrs_and_subcomponents(account_id=None):
"""
Get all slr -entries with sub elements (slsr, cr, csr) related to account
:param account_id:
:return: List of dicts
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
# Containers
return_container = {}
record_id_container = {}
record_container = {}
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
try:
record_id_container = get_record_ids(cursor=cursor, account_id=account_id)
except Exception as exp:
logger.error('Could not get record id collection: ' + repr(exp))
raise
# TODO: Get Actual records from db
logger.info("################")
logger.info("################")
logger.info("################")
try:
record_container = get_records(cursor=cursor, record_ids=record_id_container)
except Exception as exp:
logger.error('Could not get record collection: ' + repr(exp))
raise
logger.info("################")
logger.info("################")
logger.info("################")
return_container["record_id_container"] = record_id_container
return_container["record_container"] = record_container
# Get slrs from database
# logger.info("Get slrs from database")
# db_entry_list = []
# for id in id_list:
# # TODO: try-except needed?
# logger.info("Getting slr with slr_id: " + str(id))
# db_entry_dict = get_slr(account_id=account_id, slr_id=id)
# db_entry_list.append(db_entry_dict)
# logger.info("slr object added to list: " + json.dumps(db_entry_dict))
return return_container
##################################
###################################
# Service Link Status Records
##################################
##################################
def get_slsr(account_id=None, slr_id=None, slsr_id=None, cursor=None):
"""
Get one slsr entry from database by Account ID and ID
:param slr_id:
:param slsr_id:
:return: dict
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if slr_id is None:
raise AttributeError("Provide slr_id as parameter")
if slsr_id is None:
raise AttributeError("Provide slsr_id as parameter")
if cursor is None:
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
# Check if slr can be found with account_id and slr_id
try:
slr = get_slr(account_id=account_id, slr_id=slr_id)
except StandardError as exp:
logger.error(repr(exp))
raise
except Exception as exp:
func_data = {'account_id': account_id, 'slr_id': slr_id}
title = "No SLR with: " + json.dumps(func_data)
logger.error(title)
raise StandardError(title + ": " + repr(exp))
else:
logger.info("Found SLR: " + repr(slr))
try:
db_entry_object = ServiceLinkStatusRecord(service_link_status_record_id=slsr_id, service_link_record_id=slr_id)
except Exception as exp:
error_title = "Failed to create slsr object"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.debug("slsr object created: " + db_entry_object.log_entry)
# Get slsr from DB
try:
logger.info("Get slsr from DB")
cursor = db_entry_object.from_db(cursor=cursor)
except Exception as exp:
error_title = "Failed to fetch slsr from DB"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.info("slsr fetched")
logger.info("slsr fetched from db: " + db_entry_object.log_entry)
return db_entry_object.to_record_dict
def get_slsrs(account_id=None, slr_id=None):
"""
Get all slsr -entries related to service link record
:param account_id:
:param slr_id:
:return: List of dicts
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if slr_id is None:
raise AttributeError("Provide slr_id as parameter")
# Check if slr can be found with account_id and slr_id
try:
slr = get_slr(account_id=account_id, slr_id=slr_id)
except StandardError as exp:
logger.error(repr(exp))
raise
except Exception as exp:
func_data = {'account_id': account_id, 'slr_id': slr_id}
title = "No SLR with: " + json.dumps(func_data)
logger.error(title)
raise StandardError(title + ": " + repr(exp))
else:
logger.info("HEP")
logger.info("Found SLR: " + repr(slr))
# Get table name
logger.info("Create slsr")
db_entry_object = ServiceLinkStatusRecord()
logger.info(db_entry_object.log_entry)
logger.info("Get table name")
table_name = db_entry_object.table_name
logger.info("Got table name: " + str(table_name))
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
# Get primary keys for slsr
try:
cursor, id_list = get_slsr_ids(cursor=cursor, slr_id=slr_id, table_name=table_name)
except Exception as exp:
logger.error('Could not get primary key list: ' + repr(exp))
raise
# Get slsrs from database
logger.info("Get slsrs from database")
db_entry_list = []
for id in id_list:
# TODO: try-except needed?
logger.info("Getting slsr with account_id: " + str(account_id) + " slr_id: " + str(slr_id) + " slsr_id: " + str(id))
db_entry_dict = get_slsr(account_id=account_id, slr_id=slr_id, slsr_id=id)
db_entry_list.append(db_entry_dict)
logger.info("slsr object added to list: " + json.dumps(db_entry_dict))
return db_entry_list
##################################
###################################
# Consent Records
##################################
##################################
def get_cr(account_id=None, slr_id=None, cr_id=None, cursor=None):
"""
Get one cr entry from database by Account ID and ID
:param slr_id:
:param cr_id:
:return: dict
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if slr_id is None:
raise AttributeError("Provide slr_id as parameter")
if cr_id is None:
raise AttributeError("Provide cr_id as parameter")
if cursor is None:
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
# Check if slr can be found with account_id and slr_id
try:
logger.info("Check if slr can be found with account_id and slr_id")
slr = get_slr(account_id=account_id, slr_id=slr_id)
except StandardError as exp:
logger.error(repr(exp))
raise
except Exception as exp:
func_data = {'account_id': account_id, 'slr_id': slr_id}
title = "No SLR with: " + json.dumps(func_data)
logger.error(title)
raise StandardError(title + ": " + repr(exp))
else:
logger.info("Found: " + repr(slr))
try:
db_entry_object = ConsentRecord(consent_id=cr_id, service_link_record_id=slr_id)
except Exception as exp:
error_title = "Failed to create cr object"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.debug("cr object created: " + db_entry_object.log_entry)
# Get cr from DB
try:
cursor = db_entry_object.from_db(cursor=cursor)
except Exception as exp:
error_title = "Failed to fetch cr from DB"
logger.error(error_title + ": " + repr(exp))
raise
else:
logger.info("cr fetched")
logger.info("cr fetched from db: " + db_entry_object.log_entry)
return db_entry_object.to_record_dict
def get_crs(account_id=None, slr_id=None):
"""
Get all cr -entries related to service link record
:param account_id:
:param slr_id:
:return: List of dicts
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if slr_id is None:
raise AttributeError("Provide slr_id as parameter")
# Check if slr can be found with account_id and slr_id
try:
slr = get_slr(account_id=account_id, slr_id=slr_id)
except StandardError as exp:
logger.error(repr(exp))
raise
except Exception as exp:
func_data = {'account_id': account_id, 'slr_id': slr_id}
title = "No SLR with: " + json.dumps(func_data)
logger.error(title)
raise StandardError(title + ": " + repr(exp))
else:
logger.info("Found SLR: " + repr(slr))
# Get table name
logger.info("Create cr")
db_entry_object = ConsentRecord()
logger.info(db_entry_object.log_entry)
logger.info("Get table name")
table_name = db_entry_object.table_name
logger.info("Got table name: " + str(table_name))
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
# Get primary keys for crs
try:
logger.info("Get primary keys for crs")
cursor, id_list = get_cr_ids(cursor=cursor, slr_id=slr_id, table_name=table_name)
except Exception as exp:
logger.error('Could not get primary key list: ' + repr(exp))
raise
else:
logger.info("primary keys for crs: " + repr(id_list))
# Get crs from database
logger.info("Get crs from database")
db_entry_list = []
for id in id_list:
# TODO: try-except needed?
logger.info("Getting cr with account_id: " + str(account_id) + " slr_id: " + str(slr_id) + " cr_id: " + str(id))
db_entry_dict = get_cr(account_id=account_id, slr_id=slr_id, cr_id=id)
db_entry_list.append(db_entry_dict)
logger.info("cr object added to list: " + json.dumps(db_entry_dict))
return db_entry_list
##################################
###################################
# Consent Status Records
##################################
##################################
def get_csr(account_id=None, slr_id=None, cr_id=None, csr_id=None, cursor=None):
"""
Get one csr entry from database by Account | |
= config_positive_int_value(
conf.get('per_container_ratelimit_buckets', 1000))
self.node_timeout = float(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.report_interval = float(conf.get('report_interval', 300))
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.rcache = os.path.join(self.recon_cache_path, RECON_OBJECT_FILE)
self.stats = SweepStats()
self.max_deferred_updates = non_negative_int(
conf.get('max_deferred_updates', 10000))
self.begin = time.time()
def _listdir(self, path):
try:
return os.listdir(path)
except OSError as e:
self.stats.errors += 1
self.logger.increment('errors')
self.logger.error('ERROR: Unable to access %(path)s: '
'%(error)s',
{'path': path, 'error': e})
return []
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.container_ring = Ring(self.swift_dir, ring_name='container')
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info('Begin object update sweep')
self.begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in self._listdir(self.devices):
try:
dev_path = check_drive(self.devices, device,
self.mount_check)
except ValueError as err:
# We don't count this as an error. The occasional
# unmounted drive is part of normal cluster operations,
# so a simple warning is sufficient.
self.logger.warning('Skipping: %s', err)
continue
while len(pids) >= self.updater_workers:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
eventlet_monkey_patch()
self.stats.reset()
forkbegin = time.time()
self.object_sweep(dev_path)
elapsed = time.time() - forkbegin
self.logger.info(
('Object update sweep of %(device)s '
'completed: %(elapsed).02fs, %(stats)s'),
{'device': device, 'elapsed': elapsed,
'stats': self.stats})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - self.begin
self.logger.info('Object update sweep completed: %.02fs',
elapsed)
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once."""
self.logger.info('Begin object update single threaded sweep')
self.begin = time.time()
self.stats.reset()
for device in self._listdir(self.devices):
try:
dev_path = check_drive(self.devices, device, self.mount_check)
except ValueError as err:
# We don't count this as an error. The occasional unmounted
# drive is part of normal cluster operations, so a simple
# warning is sufficient.
self.logger.warning('Skipping: %s', err)
continue
self.object_sweep(dev_path)
elapsed = time.time() - self.begin
self.logger.info(
('Object update single-threaded sweep completed: '
'%(elapsed).02fs, %(stats)s'),
{'elapsed': elapsed, 'stats': self.stats})
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
def _load_update(self, device, update_path):
try:
return pickle.load(open(update_path, 'rb'))
except Exception as e:
if getattr(e, 'errno', None) == errno.ENOENT:
return
self.logger.exception(
'ERROR Pickle problem, quarantining %s', update_path)
self.stats.quarantines += 1
self.logger.increment('quarantines')
target_path = os.path.join(device, 'quarantined', 'objects',
os.path.basename(update_path))
renamer(update_path, target_path, fsync=False)
try:
# If this was the last async_pending in the directory,
# then this will succeed. Otherwise, it'll fail, and
# that's okay.
os.rmdir(os.path.dirname(update_path))
except OSError:
pass
return
def _iter_async_pendings(self, device):
"""
Locate and yield an update context for all the async pending files on
the device. Each update context contains details of the async pending
file location, its timestamp and the un-pickled update data.
Async pending files that fail to load will be quarantined.
Only the most recent update for the same object is yielded; older
(stale) async pending files are unlinked as they are located.
The iterator tries to clean up empty directories as it goes.
"""
# loop through async pending dirs for all policies
for asyncdir in self._listdir(device):
# we only care about directories
async_pending = os.path.join(device, asyncdir)
if not asyncdir.startswith(ASYNCDIR_BASE):
# skip stuff like "accounts", "containers", etc.
continue
if not os.path.isdir(async_pending):
continue
try:
base, policy = split_policy_string(asyncdir)
except PolicyError as e:
# This isn't an error, but a misconfiguration. Logging a
# warning should be sufficient.
self.logger.warning('Directory %(directory)r does not map '
'to a valid policy (%(error)s)', {
'directory': asyncdir, 'error': e})
continue
prefix_dirs = self._listdir(async_pending)
shuffle(prefix_dirs)
for prefix in prefix_dirs:
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update_file in sorted(self._listdir(prefix_path),
reverse=True):
update_path = os.path.join(prefix_path, update_file)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update_file.split('-')
except ValueError:
self.stats.errors += 1
self.logger.increment('errors')
self.logger.error(
'ERROR async pending file with unexpected '
'name %s', update_path)
continue
# Async pendings are stored on disk like this:
#
# <device>/async_pending/<suffix>/<obj_hash>-<timestamp>
#
# If there are multiple updates for a given object,
# they'll look like this:
#
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp1>
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp2>
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp3>
#
# Async updates also have the property that newer
# updates contain all the information in older updates.
# Since we sorted the directory listing in reverse
# order, we'll see timestamp3 first, yield it, and then
# unlink timestamp2 and timestamp1 since we know they
# are obsolete.
#
# This way, our caller only gets useful async_pendings.
if obj_hash == last_obj_hash:
self.stats.unlinks += 1
self.logger.increment('unlinks')
try:
os.unlink(update_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
last_obj_hash = obj_hash
update = self._load_update(device, update_path)
if update is not None:
yield {'device': device,
'policy': policy,
'update_path': update_path,
'obj_hash': obj_hash,
'timestamp': timestamp,
'update': update}
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
start_time = time.time()
last_status_update = start_time
start_stats = self.stats.copy()
my_pid = os.getpid()
self.logger.info("Object update sweep starting on %s (pid: %d)",
device, my_pid)
ap_iter = RateLimitedIterator(
self._iter_async_pendings(device),
elements_per_second=self.max_objects_per_second)
ap_iter = BucketizedUpdateSkippingLimiter(
ap_iter, self.logger, self.stats,
self.per_container_ratelimit_buckets,
self.max_objects_per_container_per_second,
max_deferred_elements=self.max_deferred_updates,
drain_until=self.begin + self.interval)
with ContextPool(self.concurrency) as pool:
for update_ctx in ap_iter:
pool.spawn(self.process_object_update, **update_ctx)
now = time.time()
if now - last_status_update >= self.report_interval:
this_sweep = self.stats.since(start_stats)
self.logger.info(
('Object update sweep progress on %(device)s: '
'%(elapsed).02fs, %(stats)s (pid: %(pid)d)'),
{'device': device,
'elapsed': now - start_time,
'pid': my_pid,
'stats': this_sweep})
last_status_update = now
pool.waitall()
self.logger.timing_since('timing', start_time)
sweep_totals = self.stats.since(start_stats)
self.logger.info(
('Object update sweep completed on %(device)s '
'in %(elapsed).02fs seconds:, '
'%(successes)d successes, %(failures)d failures, '
'%(quarantines)d quarantines, '
'%(unlinks)d unlinks, %(errors)d errors, '
'%(redirects)d redirects, '
'%(skips)d skips, '
'%(deferrals)d deferrals, '
'%(drains)d drains '
'(pid: %(pid)d)'),
{'device': device,
'elapsed': time.time() - start_time,
'pid': my_pid,
'successes': sweep_totals.successes,
'failures': sweep_totals.failures,
'quarantines': sweep_totals.quarantines,
'unlinks': sweep_totals.unlinks,
'errors': sweep_totals.errors,
'redirects': sweep_totals.redirects,
'skips': sweep_totals.skips,
'deferrals': sweep_totals.deferrals,
'drains': sweep_totals.drains
})
def process_object_update(self, update_path, device, policy, update,
**kwargs):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
:param policy: storage policy of object update
:param update: the un-pickled update data
:param kwargs: un-used keys from update_ctx
"""
def do_update():
successes = update.get('successes', [])
headers_out = HeaderKeyDict(update['headers'].copy())
headers_out['user-agent'] = 'object-updater %s' % os.getpid()
headers_out.setdefault('X-Backend-Storage-Policy-Index',
str(int(policy)))
headers_out.setdefault('X-Backend-Accept-Redirect', 'true')
headers_out.setdefault('X-Backend-Accept-Quoted-Location', 'true')
acct, cont = split_update_path(update)
part, nodes = self.get_container_ring().get_nodes(acct, cont)
obj = '/%s/%s/%s' % (acct, cont, update['obj'])
events = [spawn(self.object_update,
node, part, update['op'], obj, headers_out)
for node in nodes if node['id'] not in successes]
success = True
new_successes = rewrite_pickle = False
redirect = None
redirects = set()
for event in events:
event_success, node_id, redirect = event.wait()
if event_success is True:
successes.append(node_id)
new_successes = True
else:
success = False
if redirect:
redirects.add(redirect)
if success:
self.stats.successes += 1
self.logger.increment('successes')
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.stats.unlinks += 1
self.logger.increment('unlinks')
os.unlink(update_path)
try:
# If this was the last async_pending in the directory,
# then this will succeed. Otherwise, it'll fail, and
# that's okay.
os.rmdir(os.path.dirname(update_path))
except OSError:
pass
elif redirects:
# erase any previous successes
update.pop('successes', None)
redirect = max(redirects, key=lambda x: x[-1])[0]
redirect_history = update.setdefault('redirect_history', [])
if redirect in redirect_history:
# force next update to be sent to root, reset history
update['container_path'] = None
update['redirect_history'] = []
else:
update['container_path'] = redirect
redirect_history.append(redirect)
self.stats.redirects += 1
self.logger.increment("redirects")
self.logger.debug(
'Update redirected for %(obj)s %(path)s to %(shard)s',
{'obj': obj, 'path': update_path,
'shard': update['container_path']})
rewrite_pickle = True
else:
self.stats.failures += 1
self.logger.increment('failures')
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes
rewrite_pickle = True
return rewrite_pickle, redirect
rewrite_pickle, redirect = do_update()
if redirect:
# make one immediate retry to the redirect location
rewrite_pickle, redirect = do_update()
if rewrite_pickle:
write_pickle(update, update_path, os.path.join(
device, get_tmp_dir(policy)))
def object_update(self, node, part, op, obj, headers_out):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'PUT' or 'DELETE')
:param obj: object name being updated
:param headers_out: headers to send with the update
:return: a tuple of (``success``, ``node_id``, | |
interpretation
dswx_ctable = _get_interpreted_dswx_ctable()
gdal_band.SetRasterColorTable(dswx_ctable)
gdal_band.SetRasterColorInterpretation(
gdal.GCI_PaletteIndex)
if description is not None:
gdal_band.SetDescription(description)
else:
gdal_band.SetDescription(description_from_dict)
gdal_band.FlushCache()
gdal_band = None
if n_valid_bands == 1:
break
gdal_ds.FlushCache()
gdal_ds = None
save_as_cog(output_file, scratch_dir, logger)
if output_files_list is not None:
output_files_list.append(output_file)
logger.info(f'file saved: {output_file}')
def save_mask(mask, output_file, dswx_metadata_dict, geotransform, projection,
description = None, output_files_list = None):
"""Save DSWx-HLS cloud/cloud-mask layer
Parameters
----------
mask: numpy.ndarray
Cloud/cloud-shadow layer
output_file: str
Output filename
dswx_metadata_dict: dict
Metadata dictionary to be written into the output file
geotransform: numpy.ndarray
Geotransform describing the output file geolocation
projection: str
Output file's projection
description: str (optional)
Band description
output_files_list: list (optional)
Mutable list of output files
"""
_makedirs(output_file)
shape = mask.shape
driver = gdal.GetDriverByName("GTiff")
gdal_ds = driver.Create(output_file, shape[1], shape[0], 1, gdal.GDT_Byte)
gdal_ds.SetMetadata(dswx_metadata_dict)
gdal_ds.SetGeoTransform(geotransform)
gdal_ds.SetProjection(projection)
mask_band = gdal_ds.GetRasterBand(1)
mask_band.WriteArray(mask)
mask_band.SetNoDataValue(255)
# set color table and color interpretation
mask_ctable = _get_mask_ctable()
mask_band.SetRasterColorTable(mask_ctable)
mask_band.SetRasterColorInterpretation(
gdal.GCI_PaletteIndex)
if description is not None:
mask_band.SetDescription(description)
gdal_ds.FlushCache()
gdal_ds = None
if output_files_list is not None:
output_files_list.append(output_file)
logger.info(f'file saved: {output_file}')
def _save_binary_water(binary_water_layer, output_file, dswx_metadata_dict,
geotransform, projection, description = None,
output_files_list = None):
"""Save DSWx-HLS binary water layer
Parameters
----------
binary_water_layer: numpy.ndarray
Binary water layer
output_file: str
Output filename
dswx_metadata_dict: dict
Metadata dictionary to be written into the output file
geotransform: numpy.ndarray
Geotransform describing the output file geolocation
projection: str
Output file's projection
description: str (optional)
Band description
output_files_list: list (optional)
Mutable list of output files
"""
_makedirs(output_file)
shape = binary_water_layer.shape
driver = gdal.GetDriverByName("GTiff")
gdal_ds = driver.Create(output_file, shape[1], shape[0], 1, gdal.GDT_Byte)
gdal_ds.SetMetadata(dswx_metadata_dict)
gdal_ds.SetGeoTransform(geotransform)
gdal_ds.SetProjection(projection)
binary_water_band = gdal_ds.GetRasterBand(1)
binary_water_band.WriteArray(binary_water_layer)
binary_water_band.SetNoDataValue(255)
# set color table and color interpretation
binary_water_ctable = _get_binary_water_ctable()
binary_water_band.SetRasterColorTable(binary_water_ctable)
binary_water_band.SetRasterColorInterpretation(
gdal.GCI_PaletteIndex)
if description is not None:
binary_water_band.SetDescription(description)
gdal_ds.FlushCache()
gdal_ds = None
if output_files_list is not None:
output_files_list.append(output_file)
logger.info(f'file saved: {output_file}')
def _save_array(input_array, output_file, dswx_metadata_dict, geotransform,
projection, description = None, output_files_list = None):
"""Save a generic DSWx-HLS layer (e.g., diagnostic layer, shadow layer, etc.)
Parameters
----------
input_array: numpy.ndarray
DSWx-HLS layer to be saved
output_file: str
Output filename
dswx_metadata_dict: dict
Metadata dictionary to be written into the output file
geotransform: numpy.ndarray
Geotransform describing the output file geolocation
projection: str
Output file's projection
description: str (optional)
Band description
output_files_list: list (optional)
Mutable list of output files
"""
_makedirs(output_file)
shape = input_array.shape
driver = gdal.GetDriverByName("GTiff")
gdal_ds = driver.Create(output_file, shape[1], shape[0], 1, gdal.GDT_Byte)
gdal_ds.SetMetadata(dswx_metadata_dict)
gdal_ds.SetGeoTransform(geotransform)
gdal_ds.SetProjection(projection)
raster_band = gdal_ds.GetRasterBand(1)
raster_band.WriteArray(input_array)
if description is not None:
gdal_ds.SetDescription(description)
gdal_ds.FlushCache()
gdal_ds = None
if output_files_list is not None:
output_files_list.append(output_file)
logger.info(f'file saved: {output_file}')
def _makedirs(input_file):
output_dir = os.path.dirname(input_file)
if not output_dir:
return
os.makedirs(output_dir, exist_ok=True)
def _save_output_rgb_file(red, green, blue, output_file,
offset_dict, scale_dict,
flag_offset_and_scale_inputs,
geotransform, projection,
invalid_ind = None, output_files_list = None,
flag_infrared = False):
"""Save the a three-band reflectance-layer (RGB or infrared RGB) GeoTIFF
Parameters
----------
red: numpy.ndarray
Red reflectance layer
green: numpy.ndarray
Green reflectance layer
blue: numpy.ndarray
Blue reflectance layer
output_file: str
Output filename
offset_dict: dict
Offset dictionary that stores band offsets
scale_dict: dict
Scale dictionary that stores bands scaling factor
flag_offset_and_scale_inputs: bool
Flag to indicate if the band has been already offseted and scaled
geotransform: numpy.ndarray
Geotransform describing the output file geolocation
projection: str
Output file's projection
invalid_ind: list
List of invalid indices to be set to NaN
output_files_list: list (optional)
Mutable list of output files
flag_infrared: bool
Flag to indicate if layer represents infrared reflectance,
i.e., Red, NIR, and SWIR-1
"""
_makedirs(output_file)
shape = blue.shape
driver = gdal.GetDriverByName("GTiff")
gdal_dtype = GDT_Float32
gdal_ds = driver.Create(output_file, shape[1], shape[0], 3, gdal_dtype)
gdal_ds.SetGeoTransform(geotransform)
gdal_ds.SetProjection(projection)
# HLS images were not yet corrected for offset and scale factor
if not flag_offset_and_scale_inputs:
if not flag_infrared:
red_key = 'red'
green_key = 'green'
blue_key = 'blue'
else:
red_key = 'swir1'
green_key = 'nir'
blue_key = 'red'
red = scale_dict[red_key] * (np.asarray(red, dtype=np.float32) -
offset_dict[red_key])
green = scale_dict[green_key] * (np.asarray(green, dtype=np.float32) -
offset_dict[green_key])
blue = scale_dict[blue_key] * (np.asarray(blue, dtype=np.float32) -
offset_dict[blue_key])
if invalid_ind is not None:
red[invalid_ind] = np.nan
green[invalid_ind] = np.nan
blue[invalid_ind] = np.nan
# Save red band
gdal_ds.GetRasterBand(1).WriteArray(red)
# Save green band
gdal_ds.GetRasterBand(2).WriteArray(green)
# Save blue band
gdal_ds.GetRasterBand(3).WriteArray(blue)
gdal_ds.FlushCache()
gdal_ds = None
if output_files_list is not None:
output_files_list.append(output_file)
logger.info(f'file saved: {output_file}')
def get_projection_proj4(projection):
"""Return projection in proj4 format
projection : str
Projection
Returns
-------
projection_proj4 : str
Projection in proj4 format
"""
srs = osr.SpatialReference()
if projection.upper() == 'WGS84':
srs.SetWellKnownGeogCS(projection)
else:
srs.ImportFromProj4(projection)
projection_proj4 = srs.ExportToProj4()
projection_proj4 = projection_proj4.strip()
return projection_proj4
def _relocate(input_file, geotransform, projection,
length, width,
resample_algorithm='nearest',
relocated_file=None):
"""Relocate/reproject a file (e.g., landcover or DEM) based on geolocation
defined by a geotransform, output dimensions (length and width)
and projection
Parameters
----------
input_file: str
Input filename
geotransform: numpy.ndarray
Geotransform describing the output file geolocation
projection: str
Output file's projection
length: int
Output length
width: int
Output width
resample_algorithm: str
Resample algorithm
relocated_file: str
Relocated file (output file)
Returns
-------
relocated_array : numpy.ndarray
Relocated array
"""
logger.info(f'relocating file: {input_file}')
dy = geotransform[5]
dx = geotransform[1]
y0 = geotransform[3]
x0 = geotransform[0]
xf = x0 + width * dx
yf = y0 + length * dy
dstSRS = get_projection_proj4(projection)
if relocated_file is None:
relocated_file = tempfile.NamedTemporaryFile(
dir='.', suffix='.tif').name
logger.info(f'temporary file: {relocated_file}')
else:
logger.info(f'relocated file: {relocated_file}')
_makedirs(relocated_file)
gdal.Warp(relocated_file, input_file, format='GTiff',
dstSRS=dstSRS,
outputBounds=[x0, yf, xf, y0], multithread=True,
xRes=dx, yRes=abs(dy), resampleAlg=resample_algorithm,
errorThreshold=0)
gdal_ds = gdal.Open(relocated_file)
relocated_array = gdal_ds.ReadAsArray()
del gdal_ds
return relocated_array
def _deep_update(main_dict, update_dict):
"""Update input dictionary with a second (update) dictionary
https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
Parameters
----------
main_dict: dict
Input dictionary
update_dict: dict
Update dictionary
Returns
-------
updated_dict : dict
Updated dictionary
"""
for key, val in update_dict.items():
if isinstance(val, dict):
main_dict[key] = _deep_update(main_dict.get(key, {}), val)
else:
main_dict[key] = val
# return updated main_dict
return main_dict
def parse_runconfig_file(user_runconfig_file = None, args = None):
"""
Parse run configuration file updating an argument
(argparse.Namespace) and an HlsThresholds object
Parameters
----------
user_runconfig_file: str (optional)
Run configuration (runconfig) filename
args: argparse.Namespace (optional)
Argument object
"""
bin_dirname = os.path.dirname(__file__)
source_dirname = os.path.split(bin_dirname)[0]
default_runconfig_file = f'{source_dirname}/proteus/defaults/dswx_hls.yaml'
logger.info(f'Default runconfig file: {default_runconfig_file}')
yaml_schema = f'{source_dirname}/proteus/schemas/dswx_hls.yaml'
logger.info(f'YAML schema: {yaml_schema}')
schema = yamale.make_schema(yaml_schema, parser='ruamel')
# parse default config
parser = ruamel_yaml(typ='safe')
with open(default_runconfig_file, 'r') as f:
default_runconfig = parser.load(f)
if user_runconfig_file is not None:
if not os.path.isfile(user_runconfig_file):
logger.info(f'ERROR invalid file {user_runconfig_file}')
return
logger.info(f'Input runconfig file: {user_runconfig_file}')
data = yamale.make_data(user_runconfig_file, parser='ruamel')
logger.info(f'Validating runconfig file: {user_runconfig_file}')
yamale.validate(schema, data)
# parse user config
with open(user_runconfig_file) as f_yaml:
user_runconfig = parser.load(f_yaml)
# copy user suppiled config into default config
runconfig = _deep_update(default_runconfig, user_runconfig)
else:
runconfig = default_runconfig
hls_thresholds = HlsThresholds()
hls_thresholds_user = runconfig['runconfig']['groups']['hls_thresholds']
# copy runconfig parameters from dictionary
if hls_thresholds_user is not None:
print('HLS thresholds:')
for key in hls_thresholds_user.keys():
print(f' {key}: {hls_thresholds_user[key]}')
hls_thresholds.__setattr__(key, hls_thresholds_user[key])
if args is None:
return hls_thresholds
input_file_path = runconfig['runconfig']['groups']['input_file_group'][
'input_file_path']
ancillary_ds_group = runconfig['runconfig']['groups'][
'dynamic_ancillary_file_group']
if 'dem_file' not in ancillary_ds_group:
dem_file = None
else:
dem_file = ancillary_ds_group['dem_file']
if 'landcover_file' not in ancillary_ds_group:
landcover_file = None
else:
landcover_file = ancillary_ds_group['landcover_file']
if 'built_up_cover_fraction_file' not in ancillary_ds_group:
built_up_cover_fraction_file = None
else:
built_up_cover_fraction_file = ancillary_ds_group[
'built_up_cover_fraction_file']
output_file = runconfig['runconfig']['groups']['product_path_group'][
'sas_output_file']
scratch_dir = runconfig['runconfig']['groups']['product_path_group'][
'scratch_path']
if (input_file_path is not None and len(input_file_path) == 1 and
os.path.isdir(input_file_path[0])):
logger.info(f'input HLS files directory: {input_file_path[0]}')
input_list = glob.glob(os.path.join(input_file_path[0], '*.tif'))
args.input_list = input_list
elif input_file_path is not None:
input_list = input_file_path
args.input_list = input_list
if args.output_file is not None and output_file is not None:
logger.warning(f'command line output file "{args.output_file}"'
f' has precedence over runconfig output file "{output_file}"')
elif args.output_file is None:
args.output_file = output_file
if args.dem_file is not None and dem_file is not None:
logger.warning(f'command line output file "{args.dem_file}"'
f' has precedence over runconfig output file "{dem_file}"')
elif args.dem_file is None:
args.dem_file = dem_file
if args.landcover_file is not None and landcover_file is not None:
logger.warning(f'command line output file "{args.landcover_file}"'
f' has precedence over runconfig output file "{landcover_file}"')
elif args.landcover_file is None:
args.landcover_file = landcover_file
if (args.built_up_cover_fraction_file is not None and
built_up_cover_fraction_file is not None):
logger.warning('command line output file'
f' "{args.built_up_cover_fraction_file}"'
' has precedence over runconfig output file'
f' "{built_up_cover_fraction_file}"')
elif args.built_up_cover_fraction_file is None:
args.built_up_cover_fraction_file = built_up_cover_fraction_file
if args.scratch_dir is not None and scratch_dir is not None:
logger.warning(f'command line output file "{args.scratch_dir}"'
f' has precedence over runconfig output | |
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import random
import sys
import unittest
import gym
from gym import Space, spaces
import rps3env.config
from rps3env.envs import RPS3GameEnv, RPS3GameMinMaxEnv
from rps3env.tests.utils import captured_output
__author__ = '<NAME>'
logger = logging.getLogger(__name__)
logger.setLevel(rps3env.config.ENV_LOG_LEVEL)
logger.addHandler(logging.StreamHandler(sys.stdout))
EMPTY_BOARD = """
..
.. .. ..
.. .. ..
.. .. ..
.. .. .. .. ..
.. .. ..
.. .. ..
.. .. ..
.. .. ..
..
..
.. .. ..
.. .. ..
.. .. ..
.. .. .. .. ..
.. .. ..
.. .. ..
.. .. ..
.. .. ..
..
Turns: 0
Player Counts: [0, 0, 0]
Player Reveals: [0, 0, 0]
Opponent Captures: [0, 0, 0]
Opponent Counts: [0, 0, 0, 0]
Probabilities: [0.0, 0.0, 0.0]"""
INIT_BOARD = """
OP
OR .. OS
OP .. OR
OS .. OS
OP .. .. .. OR
PS .. PR
PP .. PP
PR .. PS
PS .. PR
PP
PP
PR .. PS
PP .. PR
PS .. PS
PP .. .. .. PR
OU .. OU
OU .. OU
OU .. OU
OU .. OU
OU
Turns: 0
Player Counts: [3, 3, 3]
Player Reveals: [0, 0, 0]
Opponent Captures: [0, 0, 0]
Opponent Counts: [0, 0, 0, 9]
Probabilities: [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]"""
OBS_BEFORE_BOARD_INIT = {
'occupied': [False] * 28,
'player_owned': [False] * 28,
'piece_type': [-1] * 28,
'player_captures': [0, 0, 0],
'opponent_captures': [0, 0, 0],
}
OBS_AFTER_BOARD_INIT = {
'occupied': [True] * 18 + [False] * 10,
'player_owned': [True] * 9 + [False] * 19,
'piece_type': [1, 2, 3] * 3 + [0] * 9 + [-1] * 10,
'player_captures': [0, 0, 0],
'opponent_captures': [0, 0, 0],
}
OBS_AFTER_LEGAL_MOVE = {
'occupied': [False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, True,
True, True, False, False, False, False, False, False, True, False, False],
'player_owned': [False, True, True, True, True, True, True, True, True, False, False, False, False, False, False,
False, False, False, True, False, False, False, False, False, False, False, False, False],
'piece_type': [-1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, -1, -1, -1, -1, -1, -1, 0, -1, -1],
'player_captures': [0, 0, 0],
'opponent_captures': [0, 0, 0],
}
OBS_AFTER_CHALLENGE_WIN = {
'occupied': [True, True, True, True, True, True, True, True, False, True, True, True, True, True, True, True, False,
True, False, False, False, False, False, False, False, False, True, False],
'player_owned': [True, True, True, True, True, True, True, True, False, True, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False],
'piece_type': [1, 2, 3, 1, 2, 3, 1, 2, -1, 3, 0, 0, 0, 0, 0, 0, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1],
'player_captures': [0, 0, 0],
'opponent_captures': [0, 1, 0],
}
OBS_AFTER_CHALLENGE_TIE = {
'occupied': [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, True,
True, False, False, False, False, False, False, False, True, False, False],
'player_owned': [True, True, True, True, True, True, True, True, True, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False],
'piece_type': [1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 0, 0, 0, 0, 0, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1],
'player_captures': [0, 0, 0],
'opponent_captures': [0, 0, 0],
}
OBS_AFTER_CHALLENGE_LOSS = {
'occupied': [True, True, True, True, True, True, True, True, False, True, True, True, True, True, True, True, True,
True, False, False, False, False, False, False, False, False, False, False],
'player_owned': [True, True, True, True, True, True, True, True, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False],
'piece_type': [1, 2, 3, 1, 2, 3, 1, 2, -1, 1, 0, 0, 0, 0, 0, 0, 0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
'player_captures': [0, 0, 1],
'opponent_captures': [0, 0, 0],
}
OBS_AFTER_FULL_GAME = {
'occupied': [True, True, True, True, True, True, False, True, False, False, False, False, False, True, False, False,
True, False, True, False, False, False, False, True, True, True, True, True],
'player_owned': [True, True, True, True, True, True, False, True, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False, False, False, True],
'piece_type': [1, 2, 3, 1, 2, 3, -1, 2, -1, -1, -1, -1, -1, 3, -1, -1, 0, -1, 0, -1, -1, -1, -1, 3, 0, 0, 1, 1],
'player_captures': [0, 0, 0],
'opponent_captures': [0, 3, 0],
}
OBS_AFTER_TAKE_ALL_PIECES = {
'occupied': [False, False, True, False, True, True, True, True, False, False, False, False, False, False, False,
False, False, True, False, False, False, False, False, False, True, False, False, False],
'piece_type': [-1, -1, 3, -1, 2, 3, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 2, -1, -1,
-1],
'player_owned': [False, False, True, False, True, True, True, True, False, False, False, False, False, False, False,
False, False, True, False, False, False, False, False, False, True, False, False, False],
'player_captures': [1, 0, 1],
'opponent_captures': [3, 3, 3],
}
OBS_AFTER_LOSE_ALL_PIECES = {
'occupied': [True, False, False, False, False, False, False, True, False, False, False, True, False, True, False,
False, True, True, False, False, False, False, False, True, False, False, False, False],
'piece_type': [1, -1, -1, -1, -1, -1, -1, 3, -1, -1, -1, 0, -1, 0, -1, -1, 0, 1, -1, -1, -1, -1, -1, 2, -1, -1, -1,
-1],
'player_owned': [False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False, False],
'player_captures': [3, 3, 3],
'opponent_captures': [0, 2, 0],
}
AVAILABLE_ACTIONS_AFTER_INIT = [
(0, 17), (0, 18), (1, 18), (2, 19), (3, 19), (4, 20), (5, 20), (6, 21), (7, 21), (8, 9), (8, 22)
]
class RPS3GameEnvTest(unittest.TestCase):
def setUp(self):
self.env = gym.make('RPS3Game-v0') # type: RPS3GameEnv
def tearDown(self):
self.env.close()
def init_board(self):
self.env.reset()
obs, reward, done, info = self.env.step([1, 2, 3] * 3)
return obs, reward, done, info
def step_assert(self, obs_actual, reward_actual, done_actual, info_actual,
obs_expected, reward_expected=None, done_expected=False, info_expected=None):
if reward_expected is None:
reward_expected = [0, 0]
self.assertEqual(obs_expected, obs_actual)
self.assertEqual(reward_expected, reward_actual)
self.assertEqual(done_expected, done_actual)
if info_expected is not None:
for k in info_expected.keys():
self.assertEqual(info_expected[k], info_actual[k])
def test_initializable(self):
self.assertIsNotNone(self.env, msg='gym.make() returned None.')
def test_action_space_pre_reset(self):
self.assertRaises(ValueError, lambda: self.env.action_space)
def test_action_space_pre_init(self):
self.env.reset()
self.assertIsInstance(self.env.action_space, Space)
self.assertEqual(9, self.env.action_space.shape[0])
self.assertEqual(3, max(self.env.action_space.nvec))
self.assertEqual(3, min(self.env.action_space.nvec))
def test_action_space_post_init(self):
self.init_board()
self.assertIsInstance(self.env.action_space, Space)
self.assertEqual(2, self.env.action_space.shape[0])
self.assertEqual(27, max(self.env.action_space.nvec))
self.assertEqual(27, min(self.env.action_space.nvec))
def test_observation_space(self):
self.assertIsInstance(self.env.observation_space, spaces.Dict)
obs_sp = self.env.observation_space.spaces
self.assertEqual(5, len(obs_sp))
self.assertEqual(28, obs_sp['occupied'].n)
self.assertEqual(28, obs_sp['player_owned'].n)
piece_type = obs_sp['piece_type']
self.assertEqual(28, piece_type.shape[0])
self.assertEqual(3, max(piece_type.nvec))
self.assertEqual(3, min(piece_type.nvec))
player_captures = obs_sp['player_captures']
self.assertEqual(3, player_captures.shape[0])
self.assertEqual(3, max(player_captures.nvec))
self.assertEqual(3, min(player_captures.nvec))
opponent_captures = obs_sp['opponent_captures']
self.assertEqual(3, opponent_captures.shape[0])
self.assertEqual(3, max(opponent_captures.nvec))
self.assertEqual(3, min(opponent_captures.nvec))
def test_reward_range(self):
self.assertEqual(-100, self.env.reward_range[0])
self.assertEqual(100, self.env.reward_range[1])
def test_step_before_reset(self):
self.assertRaises(ValueError, lambda: self.env.step([1, 2, 3] * 3))
def test_reset(self):
actual = self.env.reset()
expected = OBS_BEFORE_BOARD_INIT
self.assertEqual(expected, actual)
def test_render_empty_board(self):
self.env.reset()
actual = self.env.render(mode='ansi')
self.assertEqual(EMPTY_BOARD, actual)
def test_output_empty_board(self):
self.env.reset()
with captured_output() as (out, err):
self.env.render(mode='console')
self.assertEqual(EMPTY_BOARD, out.getvalue().rstrip())
def test_set_board(self):
self.env.seed()
obs, reward, done, info = self.init_board()
self.step_assert(obs, reward, done, info, OBS_AFTER_BOARD_INIT)
def test_render_set_board(self):
self.env.seed(0)
self.init_board()
actual = self.env.render(mode='ansi')
self.assertEqual(INIT_BOARD, actual)
def test_render_human(self):
self.env.seed(0)
self.init_board()
self.env.render(mode='human')
done = False
while not done:
self.env.render(mode='human')
action = random.choice(self.env.available_actions)
obs, reward, done, info = self.env.step(action)
self.env.render(mode='human')
def test_render_rgb_array(self):
self.env.seed(0)
self.init_board()
self.env.render(mode='rgb_array')
done = False
while not done:
self.env.render(mode='rgb_array')
action = random.choice(self.env.available_actions)
obs, reward, done, info = self.env.step(action)
self.env.render(mode='rgb_array')
def test_render_invalid_mode(self):
self.init_board()
| |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Class for manipulating all of the deliverable data."""
import collections
import copy
import functools
import glob
import os
import os.path
import weakref
from openstack_governance import governance
import pbr.version
from openstack_releases import series_status
from openstack_releases import yamlutils
def _safe_semver(v):
"""Get a SemanticVersion that closely represents the version string.
We can't always get a SemanticVersion instance because some of the
legacy tags don't comply with the parser. This method corrects
some of the more common mistakes in formatting to make it more
likely we can construct a SemanticVersion, even if the results
don't quite match the input.
"""
v = str(v)
# Remove "v" prefixes.
v = v.lstrip('v')
# Remove any stray "." at the start or end, after the other
# cleanups.
v = v.strip('.')
# If we have a version with 4 positions that are all integers,
# drop the fourth.
parts = v.split('.')
if len(parts) > 3:
try:
int(parts[3])
except ValueError:
pass
else:
parts = parts[:3]
v = '.'.join(parts)
return pbr.version.SemanticVersion.from_pip_string(v)
def _version_sort_key(release):
"""Return a value we can compare for sorting."""
# NOTE(dhellmann): We want EOL and EM tags to sort last. This assumes we
# won't have more than 1000 major releases of anything, and I
# surely hope that is a safe assumption.
version_string = release['version']
if version_string.endswith('-eol') or \
version_string.endswith('-em') or \
version_string.endswith('-last'):
return _safe_semver('1000.0.0')
return _safe_semver(version_string)
def _collapse_deliverable_history(name, info):
"""Collapse pre-releases into their final release.
Edit the info dictionary in place.
"""
sorted_releases = sorted(
info.get('releases', []),
key=_version_sort_key,
)
# Collapse pre-releases into their final release.
releases = []
known_versions = set()
for r in reversed(sorted_releases):
try:
parsed_vers = pbr.version.SemanticVersion.from_pip_string(
str(r['version']))
vers_tuple = parsed_vers.version_tuple()
except Exception:
# If we can't parse the version, it must be some sort
# of made up legacy tag. Ignore the parse error
# and include the value in our output.
releases.append(r)
else:
if len(vers_tuple) != 3:
# This is not a normal release, so assume it
# is a pre-release.
final = parsed_vers.brief_string()
if final in known_versions:
continue
releases.append(r)
known_versions.add(r['version'])
info['releases'] = list(reversed(releases))
class Deliverables(object):
def __init__(self, root_dir, collapse_history=True):
self._root_dir = root_dir
self._collapse_history = collapse_history
# Map team names to a list of all of their deliverables.
self._team_deliverables = collections.defaultdict(set)
# Map team names to a set of all the series in which they
# produced anything.
self._team_series = collections.defaultdict(set)
self._active_teams = set()
# Map team, series, and deliverable names to a list of the
# deliverable files.
self._by_team_and_series = collections.defaultdict(list)
self._by_series = collections.defaultdict(list)
self._by_deliverable_name = collections.defaultdict(list)
# Map filenames to parsed content.
self._by_filename = {}
self._load_deliverable_files(root_dir)
def _load_deliverable_files(self, root_dir):
deliverable_files = glob.glob(os.path.join(root_dir, '*/*.yaml'))
for filename in sorted(deliverable_files):
series = self._series_from_filename(filename)
deliverable = self._deliverable_from_filename(filename)
with open(filename, 'r', encoding='utf-8') as f:
d_info = yamlutils.loads(f.read())
if self._collapse_history:
_collapse_deliverable_history(deliverable, d_info)
team = d_info['team']
self._add_deliverable_file(
filename, series, team, deliverable, d_info,
)
@staticmethod
def _series_from_filename(filename):
return os.path.basename(os.path.dirname(filename)).lstrip('_')
@staticmethod
def _deliverable_from_filename(filename):
return os.path.splitext(os.path.basename(filename))[0]
def _add_deliverable_file(self, filename, series, team, deliverable,
d_info):
self._by_filename[filename] = d_info
self._by_team_and_series[(team, series)].append(filename)
self._by_series[series].append(filename)
self._team_deliverables[team].add(deliverable)
self._team_series[team].add(series)
d = Deliverable(team, series, deliverable, d_info)
if d.allows_releases:
self._active_teams.add(team)
deliv = self._deliverable_from_filename(filename)
self._by_deliverable_name[deliv].append(filename)
def get_team_deliverables(self, team):
"Returns a list of deliverable names produced by the team."
return list(sorted(self._team_deliverables[team]))
def get_team_series(self, team):
"Return the names of the series in which the team produced anything."
return self._team_series[team]
def get_teams(self):
"Return all of the names of all of the teams seen."
return list(self._team_series.keys())
def get_active_teams(self):
"Return the names of all teams which have releasable deliverables."
return self._active_teams
def get_deliverables(self, team, series):
"""Return a sequence of deliverable data for the team and series.
Return tuples containing team, series, deliverable, and parsed
deliverable file content.
If the team or series is None, treat that value as a wildcard.
"""
if team is None:
if series is None:
series = 'independent'
filenames = self._by_series[series]
else:
filenames = self._by_team_and_series[(team, series)]
for filename in filenames:
yield Deliverable(
team,
self._series_from_filename(filename),
self._deliverable_from_filename(filename),
self._by_filename.get(filename, {}),
)
def get_deliverable_history(self, name):
"""Return info associated with a deliverable name."""
for filename in self._by_deliverable_name.get(name, []):
yield Deliverable(
None, # team will be taken from the data
self._series_from_filename(filename),
self._deliverable_from_filename(filename),
self._by_filename.get(filename, {}),
)
@functools.total_ordering
class Repo(object):
def __init__(self, name, data, deliv):
self.name = name
self._data = data
self.deliv = weakref.proxy(deliv)
@property
def flags(self):
return self._data.get('flags', [])
@property
def is_retired(self):
return 'retired' in self.flags
@property
def no_artifact_build_job(self):
return 'no-artifact-build-job' in self.flags
@property
def pypi_name(self):
return self._data.get('pypi-name')
@property
def base_name(self):
return self.name.rsplit('/')[-1]
@property
def tarball_base(self):
return self._data.get('tarball-base')
def __eq__(self, other):
return self.name == other.name
def __gt__(self, other):
return self.name > other.name
def __str__(self):
return self.name
@functools.total_ordering
class ReleaseProject(object):
def __init__(self, repo, hash, data, release=None):
self._repo = repo
self.repo = release.deliv.get_repo(repo)
self.hash = hash
self._data = data
self.release = weakref.proxy(release)
@property
def tarball_base(self):
if 'tarball-base' in self._data:
return self._data['tarball-base']
return self.repo.tarball_base
def guess_sdist_name(self):
return self.tarball_base or self.repo.base_name
def __eq__(self, other):
return self.repo == other.repo
def __gt__(self, other):
return self.repo > other.repo
class Release(object):
def __init__(self, version, projects, data, deliv):
self.version = version
if deliv:
self.deliv = weakref.proxy(deliv)
else:
self.deliv = deliv
self._data = data
self._projects = {
p['repo']: ReleaseProject(p['repo'], p['hash'], p, self)
for p in projects
}
@property
def was_forced(self):
return 'forced' in self._data.get('flags', set())
@property
def skipped_sig(self):
return 'skipped-sig' in self._data.get('flags', set())
@property
def projects(self):
return sorted(self._projects.values())
def project(self, repo):
if repo in self._projects:
return self._projects[repo]
return None
@property
def diff_start(self):
return self._data.get('diff-start')
@property
def is_release_candidate(self):
return 'rc' in self.version
@property
def is_pre_release_version(self):
return (
'rc' in self.version or
'a' in self.version or
'b' in self.version
)
@property
def is_eol(self):
return self.version.endswith('-eol')
@property
def eol_series(self):
if self.is_eol:
return self.version.rpartition('-')[0]
return ''
@property
def is_em(self):
return self.version.endswith('-em')
@property
def is_last(self):
return self.version.endswith('-last')
@property
def em_series(self):
if self.is_em:
return self.version.rpartition('-')[0]
return ''
def __eq__(self, other):
return self.version == other.version
class Branch(object):
def __init__(self, name, location, data, deliv):
self.name = name
self.location = location
self.deliv = weakref.proxy(deliv)
self._data = data
def __eq__(self, other):
return self.version == other.version
@property
def prefix(self):
return self.name.split('/')[0]
@property
def series(self):
return self.name.split('/')[1]
def get_repo_map(self):
"Return mapping between repo and hash."
if isinstance(self.location, dict):
return self.location
release = self.deliv.get_release(self.location)
return {
p.repo.name: p.hash
for p in release.projects
}
@functools.total_ordering
class Deliverable(object):
_gov_data = None
_series_status_data = None
def __init__(self, team, series, name, data):
self.team = team
if self.team is None:
self.team = data.get('team')
self.series = series
self.name = name
self._data = data
repos = set(self._data.get('repository-settings', {}).keys())
# NOTE(dhellmann): We do this next bit for legacy deliverable
# files without the repository-settings sections. We should be
# able to remove this after the T series is opened because at
# that point all actively validated deliverable files will
# have this data.
for r in data.get('releases') or []:
for p in r.get('projects') or []:
repos.add(p.get('repo'))
self._repos = {
r: Repo(
name=r,
data=self._data.get('repository-settings', {}).get(r, {}),
deliv=self,
)
for r in sorted(repos)
}
self._releases = [
Release(
version=r['version'],
projects=r['projects'],
data=r,
deliv=self,
)
for r in self._data.get('releases') or []
]
self._branches = [
Branch(
name=b['name'],
location=b['location'],
data=b,
deliv=self,
)
for b in self._data.get('branches') or []
]
@classmethod
def read_file(cls, filename):
with open(filename, 'r', encoding='utf-8') as f:
data = yamlutils.loads(f.read())
series_name = os.path.basename(
os.path.dirname(filename)
).lstrip('_')
deliverable_name = os.path.basename(filename)[:-5] # strip .yaml
return cls(
team=None, # extracted from the info automatically
series=series_name,
name=deliverable_name,
data=data or {}, # if the file is empty yaml returns None
)
@property
def launchpad_id(self):
return self._data.get('launchpad')
@property
def storyboard_id(self):
return self._data.get('storyboard')
@property
def repos(self):
return sorted(self._repos.values())
@property
def known_repo_names(self):
return set(self._data.get('repository-settings', {}).keys())
def get_repo(self, name):
return self._repos[name]
@property
def model(self):
model = self._data.get('release-model', '')
if self.is_independent and model != 'abandoned':
return 'independent'
return model
@property
def is_independent(self):
return | |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# @formatter:off
#
# ,,
# db
# \
# _\,, `7MM `7MM `7MMpMMMb. `7MM ,p6"bo ,pW"Wq.`7Mb,od8 `7MMpMMMb.
# "-=\~ _ MM MM MM MM MM 6M' OO 6W' `Wb MM' "' MM MM
# \\~___( ~ MM MM MM MM MM 8M 8M M8 MM MM MM
# _|/---\\_ MM MM MM MM MM 8M 8M M8 MM MM MM
# \ \ MM MM MM MM MM YM. , YA. ,A9 MM MM MM
# `Mbod"YML..JMML JMML..JMML.YMbmd' `Ybmd9'.JMML. .JMML JMML.
#
#
# written with <3 by <NAME> using PyCharm
# https://github.com/michagrandel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# __
# _ww _a+"D
# y#, _r^ # _*^ y`
# q0 0 a" W*` F ____
# ; #^ Mw` __`. . 4-~~^^`
# _ _P ` /'^ `www=.
# , $ +F ` q
# K ] ^K`
# , §_ . ___ r ],
# _*.^ '.__dP^^~#, ,_ *,
# ^b / _ `` _F ] ]_
# '___ ' ~~^ ] [
# :` ]b_ ~k_ ,` yl
# §P `*a__ __a~ z~`
# §L _ ^------~^` ,
# ~-vww*"v_ _/`
# ^"q_ _x"
# __§my..___p/`mma____
# _awP",`,^"-_"^`._ L L #
# _#0w_^_^,^r___...._ t [],"w
# e^ ]b_x^_~^` __, .]Wy7` x`
# '=w__^9*§P-*MF` ^[_.=
# ^"y qw/"^_____^~9 t
# ]_l ,'^_`..=== x'
# ">.ak__awwwwWW
# #§WWWWWWWWWWWWWW
# _WWWWWWMM§WWWW_JP^"~-=w_
# .____awwmp_wNw#[w/` ^#, ~b___.
# ` ^^^~^"W___ ]Raaaamw~`^``^^~
# ^~"~---~~~~~~`#
# @formatter:on
"""
.. access user specific paths, initiate internationalization, manage configurations, generate argument parser
.. module:: Application
:platform: Windows, Linux, macOS, OS X
:synopsis: access user specific paths, initiate internationalization, manage configurations, generate argument parser
.. moduleauthor:: <NAME> <<EMAIL>>
.. sectionauthor:: <NAME> <<EMAIL>>
.. versionadded:: 0.1
The :mod:`Application`-module extends the `appdirs <https://pypi.python.org/pypi/appdirs>`_-module and provides some
basic functionality useful for an application. This includes functions for simple *logging*, *configuration management*,
*initialization*, *argument parsing* or simply *accessing platform specific paths* like paths for caches, logging files,
configuration- or data files.
This module inherits many methods and attributes from `appdirs.AppDirs <https://pypi.python.org/pypi/appdirs>`_,
a module written by <NAME> and <NAME>!
.. seealso::
`appdirs <https://pypi.python.org/pypi/appdirs>`_
"""
from __future__ import print_function, division, unicode_literals
import argparse
import gettext
import locale
import platform
import shutil
import tarfile
import tempfile
from zipfile import ZipFile, ZIP_DEFLATED, is_zipfile
import logging
from logging.handlers import *
import appdirs
from datetime import datetime
import six
from six.moves import configparser
try:
from Qt import __binding__
except ImportError:
__binding__ = 'console'
_ = unicode if six.PY2 else str
__version__ = '0.1'
__status__ = 'alpha'
# Translate: Pia Ballerstadt is a proper name
__author__ = _('<NAME>')
__contact__ = 'https://github.com/michagrandel'
# Translate: Pia Ballerstadt is a proper name; "<3" should symbolize a heart in ascii characters
__copyright__ = _('written with <3 by <NAME>')
# Translate: Apache is a company name; "Apache License" is the title of an open source license
__license__ = _('Apache License, Version 2.0')
class Application(appdirs.AppDirs):
"""
The :class:`Application`-class extends the `appdirs.AppDirs <https://pypi.python.org/pypi/appdirs>`_-class and
provides some basic functionality useful for an application. In addition to AppDirs platform specific path's,
the Application class includes functions for simple *logging*, *configuration management*, *internationalization*,
and *argument parsing*.
This class inherits many methods and attributes from `appdirs.AppDirs <https://pypi.python.org/pypi/appdirs>`_,
a module written by <NAME> and <NAME>!
:param unicode appname: application name
:param unicode appauthor: application author
.. attribute:: user_config_dir
user's directory for configuration files
Inherited from `appdirs.AppDirs <https://pypi.python.org/pypi/appdirs>`_.
.. attribute:: user_cache_dir
user's directory for caches
Inherited from `appdirs.AppDirs <https://pypi.python.org/pypi/appdirs>`_.
.. attribute:: user_log_dir
user's directory for logfiles
Inherited from `appdirs.AppDirs <https://pypi.python.org/pypi/appdirs>`_.
.. attribute:: user_data_dir
user's directory for application data
Inherited from `appdirs.AppDirs <https://pypi.python.org/pypi/appdirs>`_.
"""
def __init__(self, appname, appauthor):
"""
initialize Application
:param unicode appname: application name
:param unicode appauthor: application author
"""
super(Application, self).__init__(appname=appname, appauthor=appauthor, roaming=True)
global __author__
global __copyright__
__author__ = appauthor
__copyright__ = 'written with <3 by {author}'.format(author=appauthor)
locale.setlocale(locale.LC_ALL, '')
self._configuration = dict()
self.language_search_locations = []
self._locale_path = os.path.join(self.user_data_dir, 'languages')
self._language_filepack_pattern = re.compile(
r'^%(name)s-[a-z]{2}_[A-Z]{2}(\.mo|\.qm|\.tar\.gz|\.zip)?$' % {'name': self.appname.lower()})
self._language_file_pattern = re.compile(r'^%(name)s-[a-z]{2}_[A-Z]{2}(\.mo|\.qm)?$')
global _
_ = self._ = self.ugettext = unicode if six.PY2 else str
self.languages()
def configuration(self, filename=None, default=None):
"""
read and write configuration files and returns configuration
If no file with *filename* exists, a file will created with the `default` values inside the configuration path.
If *filename* is `None`, a default file named after the application will be used. It will be stored in the
application's configuration path.
The configuration file has to be in the `configparser <https://docs.python.org/2/library/configparser.html>`_ Format.
:param unicode filename: configuration file
:param dict default: default values if no configuration file is found
:return: configuration
:rtype: `configparser <https://docs.python.org/2/library/configparser.html>`_
.. seealso::
Module `configparser <https://docs.python.org/2/library/configparser.html>`_:
Parse configuration data
"""
filename = os.path.abspath(os.path.expanduser(filename)) or \
os.path.join(self.user_config_dir, self.appname.lower() + '.conf')
default = default or {}
try:
return self._configuration[os.path.splitext(os.path.basename(filename))[0]]
except KeyError:
configuration = configparser.ConfigParser(allow_no_value=True)
# fixme: replace unicode()-call with code compatible with python2 AND python3
configuration.optionxform = lambda s: unicode(s) if six.PY2 else str(s)
for section, keyvalues in six.iteritems(default):
try:
configuration.add_section(section)
except configparser.DuplicateSectionError:
pass
for option, value in six.iteritems(keyvalues):
configuration.set(section, option, value)
try:
os.makedirs(os.path.dirname(os.path.abspath(filename)))
with open(os.path.abspath(filename), 'w') as config_file:
configuration.write(config_file)
except OSError:
pass
self._configuration[os.path.splitext(os.path.basename(filename))[0]] = configuration
return self._configuration[os.path.splitext(os.path.basename(filename))[0]]
def extract_languages_from_archive(self, archive_file):
"""
extract .qm- and .mo-files from *archive_file* and stores them in the directory for translation files.
:param unicode archive_file: filename of an archive file
:return: count of extracted files
:rtype: int
Supported archive formats include ZIP-Files (.zip), TAR-Files (.tar), Compressed TAR-Files (.tar.gz, .tgz).
Extracted .mo-files will be stored in *user_data_dir*/language/*languagecode*/LC_MESSAGES/\*.mo,
while .qm-files will be stored in *user_data_dir*/language/*languagecode*/\*.qm.
`user_data_dir` is replaced by self.user_data_dir.
`languagecode` is the language code of the target language of the translation files, e.g. en_US, de_DE etc.
.. seealso::
Module `appdirs <https://pypi.python.org/pypi/appdirs>`_:
A small Python module for determining appropriate platform-specific dirs, e.g. a user data dir
"""
archive_file = os.path.abspath(archive_file)
domain, language = os.path.splitext(os.path.basename(archive_file))[0].split('-')
count_extracted_files = 0
logger = logging.getLogger(self.appname.lower())
if is_zipfile(archive_file):
try:
with ZipFile(archive_file, 'r', compression=ZIP_DEFLATED) as language_archive:
for name in language_archive.namelist():
if self._language_file_pattern.match(name) is not None:
target_path = os.path.join(self._locale_path, language)
if os.path.splitext(name)[1] == '.mo':
target_path = os.path.join(target_path, 'LC_MESSAGES')
os.makedirs(target_path)
language_archive.extract(name, path=target_path)
count_extracted_files += 1
except (RuntimeError, OSError):
logger.warning('Can not extract files from {archive}.'.format(archive=os.path.basename(archive_file)))
return -1
elif tarfile.is_tarfile(archive_file):
try:
with tarfile.open(archive_file, mode='r:gz') as language_archive:
for name in language_archive.getnames():
if self._language_file_pattern.match(name) is not None:
target_path = os.path.join(self._locale_path, language)
if os.path.splitext(name)[1] == '.mo':
target_path = os.path.join(target_path, 'LC_MESSAGES')
os.makedirs(target_path)
language_archive.extract(name, path=target_path)
count_extracted_files += 1
except (RuntimeError, OSError, tarfile.ReadError, tarfile.CompressionError):
logger.warning('Can not extract files from {archive}.'.format(archive=os.path.basename(archive_file)))
return -1
else:
logger.warning('{archive} is not a supported archive type.'.format(archive=os.path.basename(archive_file)))
return -1
return count_extracted_files
def languages(self):
"""
copy language files from search directories to language directory
:return: root directory for internationalization related files
Copy .mo-files to *user_data_dir*/language/*languagecode*/LC_MESSAGES/\*.mo,
and copy .qm-files to *user_data_dir*/language/*languagecode*/\*.qm.
`user_data_dir` is replaced by self.user_data_dir.
`languagecode` is the language code of the target language of the translation files, e.g. en_US, de_DE etc.
.. seealso::
Module `appdirs <https://pypi.python.org/pypi/appdirs>`_:
A small Python module for determining appropriate platform-specific dirs, e.g. a user data dir
"""
# @formatter:off
# paths to search for language files
self.language_search_locations.extend((
os.path.join('~/Projects', self.appname.lower()),
os.path.join('~/Projekte', self.appname.lower()),
os.path.join(self.user_data_dir, 'lang'),
os.path.join(self.user_data_dir, 'language'),
os.path.join(self.user_data_dir, 'translations'),
os.path.join(self.user_data_dir, 'translation'),
os.path.join(self.user_data_dir, 'locale')))
# @formatter:on
# search .qm- and .mo-files in language_search_directories
for language_search_directory in self.language_search_locations:
language_search_directory = os.path.abspath(os.path.expanduser(language_search_directory))
try:
for original_file in os.listdir(language_search_directory):
# handle translation files
if self._language_filepack_pattern.match(original_file) is not None:
if not os.path.exists(self._locale_path):
os.makedirs(self._locale_path)
domain, language = os.path.splitext(original_file)[0].split('-')
if os.path.splitext(original_file)[1] == '.zip':
self.extract_languages_from_archive(
os.path.join(language_search_directory, original_file))
elif original_file.endswith('.tar.gz') or os.path.splitext(original_file)[1] == '.tgz':
self.extract_languages_from_archive(
os.path.join(language_search_directory, original_file))
else:
original_file = os.path.join(language_search_directory, original_file)
target_path = os.path.join(self._locale_path, language)
if os.path.splitext(original_file)[1] == '.mo':
target_path = os.path.join(target_path, 'LC_MESSAGES')
shutil.copy2(original_file,
os.path.join(target_path, domain + os.path.splitext(original_file)[1]))
except (OSError, IOError):
pass
global _
_ = self._ = self.ugettext = gettext.translation(self.appname, self._locale_path, fallback=True).ugettext
return self._locale_path
def parser(self, csvdata, **kwargs):
"""
setup a argparser to parse command line arguments using *csvdata* (tab-separated).
:param unicode csvdata: CSV data (tab-separated)
:param kwargs: additonal keyword arguments as described in "additional options"
:return: parser or False
:rtype: argparse.parser or boolean
The csvdata must include the following columns: Argument Flags, Action, Group, Help, Other Options.
| |
feature.parent):
if feature.path:
path = feature.path
else:
path = GIS.update_location_tree(feature)
if path:
path_list = [int(item) for item in path.split("/")]
if len(path_list) == 1:
# No parents - path contains only this feature.
return None
# Get only ancestors
path_list = path_list[:-1]
# Get path in the desired -- reversed -- order.
path_list.reverse()
elif feature.parent:
path_list = [feature.parent]
else:
return None
# If only ids are wanted, stop here.
if ids_only:
return path_list
# Retrieve parents - order in which they're returned is arbitrary.
s3db = current.s3db
table = s3db.gis_location
query = (table.id.belongs(path_list))
fields = [table.id, table.name, table.level, table.lat, table.lon]
unordered_parents = current.db(query).select(cache=s3db.cache,
*fields)
# Reorder parents in order of reversed path.
unordered_ids = [row.id for row in unordered_parents]
parents = [unordered_parents[unordered_ids.index(path_id)]
for path_id in path_list if path_id in unordered_ids]
return parents
else:
return None
# -------------------------------------------------------------------------
def get_parent_per_level(self, results, feature_id,
feature=None,
ids=True,
names=True):
"""
Adds ancestor of requested feature for each level to supplied dict.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If a dict is not supplied in results, one is created. The results
dict is returned in either case.
If ids=True and names=False (used by old S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.id
If ids=False and names=True (used by address_onvalidation):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.name
If ids=True and names=True (used by new S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : {name : ancestor.name, id: ancestor.id}
"""
if not results:
results = {}
_id = feature_id
# if we don't have a feature or a feature ID return the dict as-is
if not feature_id and not feature:
return results
if not feature_id and "path" not in feature and "parent" in feature:
# gis_location_onvalidation on a Create => no ID yet
# Read the Parent's path instead
feature = self._lookup_parent_path(feature.parent)
_id = feature.id
elif not feature or "path" not in feature or "parent" not in feature:
feature = self._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = self.update_location_tree(feature)
# Get ids of ancestors at each level.
if feature.parent:
strict = self.get_strict_hierarchy(feature.parent)
else:
strict = self.get_strict_hierarchy(_id)
if path and strict and not names:
# No need to do a db lookup for parents in this case -- we
# know the levels of the parents from their position in path.
# Note ids returned from db are ints, not strings, so be
# consistent with that.
path_ids = [int(item) for item in path.split("/")]
# This skips the last path element, which is the supplied
# location.
for (i, _id) in enumerate(path_ids[:-1]):
results["L%i" % i] = _id
elif path:
ancestors = self.get_parents(_id, feature=feature)
if ancestors:
for ancestor in ancestors:
if ancestor.level and ancestor.level in self.hierarchy_level_keys:
if names and ids:
results[ancestor.level] = Storage()
results[ancestor.level].name = ancestor.name
results[ancestor.level].id = ancestor.id
elif names:
results[ancestor.level] = ancestor.name
else:
results[ancestor.level] = ancestor.id
if not feature_id:
# Add the Parent in (we only need the version required for gis_location onvalidation here)
results[feature.level] = feature.name
if names:
# We need to have entries for all levels
# (both for address onvalidation & new LocationSelector)
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key not in results:
results[key] = None
return results
# -------------------------------------------------------------------------
def update_table_hierarchy_labels(self, tablename=None):
"""
Re-set table options that depend on location_hierarchy
Only update tables which are already defined
"""
levels = ("L1", "L2", "L3", "L4", "L5")
labels = self.get_location_hierarchy()
db = current.db
if tablename and tablename in db:
# Update the specific table which has just been defined
table = db[tablename]
if tablename == "gis_location":
labels["L0"] = current.messages.COUNTRY
table.level.requires = \
IS_EMPTY_OR(IS_IN_SET(labels))
else:
for level in levels:
table[level].label = labels[level]
else:
# Do all Tables which are already defined
# gis_location
if "gis_location" in db:
table = db.gis_location
table.level.requires = \
IS_EMPTY_OR(IS_IN_SET(labels))
# These tables store location hierarchy info for XSLT export.
# Labels are used for PDF & XLS Reports
tables = ["org_office",
#"pr_person",
"pr_address",
"cr_shelter",
"asset_asset",
#"hms_hospital",
]
for tablename in tables:
if tablename in db:
table = db[tablename]
for level in levels:
table[level].label = labels[level]
# -------------------------------------------------------------------------
@staticmethod
def set_config(config_id=None, force_update_cache=False):
"""
Reads the specified GIS config from the DB, caches it in response.
Passing in a false or non-existent id will cause the personal config,
if any, to be used, else the site config (uuid SITE_DEFAULT), else
their fallback values defined in this class.
If force_update_cache is true, the config will be read and cached in
response even if the specified config is the same as what's already
cached. Used when the config was just written.
The config itself will be available in response.s3.gis.config.
Scalar fields from the gis_config record and its linked
gis_projection record have the same names as the fields in their
tables and can be accessed as response.s3.gis.<fieldname>.
Returns the id of the config it actually used, if any.
Args:
:param: config_id. use '0' to set the SITE_DEFAULT
@ToDo: Merge configs for Event
"""
cache = Storage()
_gis = current.response.s3.gis
# If an id has been supplied, try it first. If it matches what's in
# response, there's no work to do.
if config_id and not force_update_cache and \
_gis.config and \
_gis.config.id == config_id:
return cache
db = current.db
s3db = current.s3db
ctable = s3db.gis_config
mtable = s3db.gis_marker
ptable = s3db.gis_projection
stable = s3db.gis_style
fields = (ctable.id,
ctable.default_location_id,
ctable.region_location_id,
ctable.geocoder,
ctable.lat_min,
ctable.lat_max,
ctable.lon_min,
ctable.lon_max,
ctable.zoom,
ctable.lat,
ctable.lon,
ctable.pe_id,
ctable.wmsbrowser_url,
ctable.wmsbrowser_name,
ctable.zoom_levels,
ctable.merge,
mtable.image,
mtable.height,
mtable.width,
ptable.epsg,
ptable.proj4js,
ptable.maxExtent,
ptable.units,
)
# May well not be complete, so Left Join
left = (ptable.on(ptable.id == ctable.projection_id),
stable.on((stable.config_id == ctable.id) & \
(stable.layer_id == None)),
mtable.on(mtable.id == stable.marker_id),
)
row = None
rows = None
if config_id:
# Does config exist?
# Should we merge it?
row = db(ctable.id == config_id).select(ctable.merge,
ctable.uuid,
limitby= (0, 1)
).first()
if row:
if row.merge and row.uuid != "SITE_DEFAULT":
row = None
# Merge this one with the Site Default
query = (ctable.id == config_id) | \
(ctable.uuid == "SITE_DEFAULT")
rows = db(query).select(*fields,
left = left,
# We want SITE_DEFAULT to be last here (after urn:xxx)
orderby = ~ctable.uuid,
limitby = (0, 2)
)
else:
# Just use this config
row = db(ctable.id == config_id).select(*fields,
left = left,
limitby = (0, 1)
).first()
else:
# The requested config must be invalid, so just use site default
config_id = 0
if config_id == 0:
# Use site default
row = db(ctable.uuid == "SITE_DEFAULT").select(*fields,
left = left,
limitby = (0, 1)
).first()
if not row:
# No configs found at all
_gis.config = cache
return cache
# If no id supplied, extend the site config with any personal or OU configs
if not rows and not row:
auth = current.auth
if auth.is_logged_in():
# Read personalised config, if available.
user = auth.user
pe_id = user.get("pe_id")
if pe_id:
# Also look for OU configs
pes = []
if user.organisation_id:
# Add the user account's Org to the list
# (Will take lower-priority than Personal)
otable = s3db.org_organisation
org = db(otable.id == user.organisation_id).select(otable.pe_id,
limitby = (0, 1)
).first()
try:
pes.append(org.pe_id)
except:
current.log.warning("Unable to find Org %s" % user.organisation_id)
if current.deployment_settings.get_org_branches():
# Also look for Parent Orgs
ancestors = s3db.pr_get_ancestors(org.pe_id)
pes += ancestors
if user.site_id:
# Add the user account's Site to the list
# (Will take lower-priority than Org/Personal)
site_pe_id = s3db.pr_get_pe_id("org_site", user.site_id)
if site_pe_id:
pes.append(site_pe_id)
if user.org_group_id:
# Add the user account's Org Group to the list
# (Will take lower-priority than Site/Org/Personal)
ogtable = s3db.org_group
ogroup = db(ogtable.id == user.org_group_id).select(ogtable.pe_id,
limitby = (0, 1)
).first()
pes = list(pes)
try:
pes.append(ogroup.pe_id)
except:
current.log.warning("Unable to find Org Group %s" % user.org_group_id)
query = (ctable.uuid == "SITE_DEFAULT") | \
| |
: self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
gru = GRU(hidden_dim=128,learning_rate=1e-3,batch_size=32,mom_coeff=0.0)
# %%
gru.fit(X_train,y_train,X_test,y_test,epochs = 15,optimizer = 'RMSprop')
# %%
gru_history = gru.history()
# %%
# For figure 97:
plt.figure()
plt.plot(gru_history['TrainLoss'],'-o')
plt.plot(gru_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss over epochs')
plt.legend(['Train Loss','Test Loss'])
plt.show()
plt.figure()
plt.plot(gru_history['TrainAcc'],'-o')
plt.plot(gru_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Accuracy over epochs')
plt.legend(['Train Acc','Test Acc'])
plt.show()
# %%
# For figure 98:
multi_layer_gru_history = multi_layer_gru.history()
plt.figure()
plt.plot(multi_layer_gru_history['TrainAcc'],'-o')
plt.plot(gru_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['Multi Layer GRU','GRU'])
plt.show()
plt.figure()
plt.plot(multi_layer_gru_history['TestAcc'],'-o')
plt.plot(gru_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['Multi Layer GRU','GRU'])
plt.show()
plt.figure()
plt.plot(multi_layer_gru_history['TrainLoss'],'-o')
plt.plot(gru_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Loss over epochs')
plt.legend(['Multi Layer GRU','GRU'])
plt.show()
plt.figure()
plt.plot(multi_layer_gru_history['TestLoss'],'-o')
plt.plot(gru_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.legend(['Multi Layer GRU','GRU'])
plt.show()
# %%
# For figure 99:
three_layer_rnn_history = three_layer_rnn.history()
plt.figure()
plt.plot(gru_history['TrainAcc'],'-o')
plt.plot(lstm_history['TrainAcc'],'-o')
plt.plot(three_layer_rnn_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['GRU','LSTM','RNN'])
plt.show()
plt.figure()
plt.plot(gru_history['TestAcc'],'-o')
plt.plot(lstm_history['TestAcc'],'-o')
plt.plot(three_layer_rnn_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['GRU','LSTM','RNN'])
plt.show()
plt.figure()
plt.plot(gru_history['TrainLoss'],'-o')
plt.plot(lstm_history['TrainLoss'],'-o')
plt.plot(three_layer_rnn_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Loss over epochs')
plt.legend(['GRU','LSTM','RNN'])
plt.show()
plt.figure()
plt.plot(gru_history['TestLoss'],'-o')
plt.plot(lstm_history['TestLoss'],'-o')
plt.plot(three_layer_rnn_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.legend(['GRU','LSTM','RNN'])
plt.show()
# %%
train_preds_gru = gru.predict(X_train)
test_preds_gru = gru.predict(X_test)
confusion_mat_train_gru = metrics.confusion_matrix(np.argmax(y_train,1),train_preds_gru)
confusion_mat_test_gru = metrics.confusion_matrix(np.argmax(y_test,1),test_preds_gru)
body_movements = ['downstairs','jogging','sitting','standing','upstairs','walking']
confusion_mat_train_gru.columns = body_movements
confusion_mat_train_gru.index = body_movements
confusion_mat_test_gru.columns = body_movements
confusion_mat_test_gru.index = body_movements
sns.heatmap(confusion_mat_train_gru/np.sum(confusion_mat_train_gru), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
sns.heatmap(confusion_mat_test_gru/np.sum(confusion_mat_test_gru), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
class Multi_layer_GRU(object):
"""
Gater recurrent unit, encapsulates all necessary logic for training, then built the hyperparameters and architecture of the network.
"""
def __init__(self,input_dim = 3,hidden_dim_1 = 128,hidden_dim_2 = 64,output_class = 6,seq_len = 150,batch_size = 32,learning_rate = 1e-1,mom_coeff = 0.85):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim_1 = hidden_dim_1
self.hidden_dim_2 = hidden_dim_2
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim1 = Xavier(self.input_dim,self.hidden_dim_1)
lim1_hid = Xavier(self.hidden_dim_1,self.hidden_dim_1)
self.W_z = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim_1))
self.U_z = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim_1,self.hidden_dim_1))
self.B_z = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
self.W_r = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim_1))
self.U_r = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim_1,self.hidden_dim_1))
self.B_r = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
self.W_h = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim_1))
self.U_h = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim_1,self.hidden_dim_1))
self.B_h = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
lim2_hid = Xavier(self.hidden_dim_1,self.hidden_dim_2)
self.W_hid = np.random.uniform(-lim2_hid,lim2_hid,(self.hidden_dim_1,self.hidden_dim_2))
self.B_hid = np.random.uniform(-lim2_hid,lim2_hid,(1,self.hidden_dim_2))
lim2 = Xavier(self.hidden_dim_2,self.output_class)
self.W = np.random.uniform(-lim2,lim2,(self.hidden_dim_2,self.output_class))
self.B = np.random.uniform(-lim2,lim2,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# To keep previous updates in momentum :
self.previous_updates = [0] * 13
# For AdaGrad:
self.cache = [0] * 13
self.cache_rmsprop = [0] * 13
self.m = [0] * 13
self.v = [0] * 13
self.t = 1
def cell_forward(self,X,h_prev):
# Update gate:
update_gate = activations.sigmoid(np.dot(X,self.W_z) + np.dot(h_prev,self.U_z) + self.B_z)
# Reset gate:
reset_gate = activations.sigmoid(np.dot(X,self.W_r) + np.dot(h_prev,self.U_r) + self.B_r)
# Current memory content:
h_hat = np.tanh(np.dot(X,self.W_h) + np.dot(np.multiply(reset_gate,h_prev),self.U_h) + self.B_h)
# Hidden state:
hidden_state = np.multiply(update_gate,h_prev) + np.multiply((1-update_gate),h_hat)
# Hidden MLP:
hid_dense = np.dot(hidden_state,self.W_hid) + self.B_hid
relu = activations.ReLU(hid_dense)
# Classifiers (Softmax) :
dense = np.dot(relu,self.W) + self.B
probs = activations.softmax(dense)
return (update_gate,reset_gate,h_hat,hidden_state,hid_dense,relu,dense,probs)
def forward(self,X,h_prev):
x_s,z_s,r_s,h_hat = {},{},{},{}
h_s = {}
hd_s,relu_s = {},{}
y_s,p_s = {},{}
h_s[-1] = np.copy(h_prev)
for t in range(self.seq_len):
x_s[t] = X[:,t,:]
z_s[t], r_s[t], h_hat[t], h_s[t],hd_s[t],relu_s[t], y_s[t], p_s[t] = self.cell_forward(x_s[t],h_s[t-1])
return (x_s,z_s, r_s, h_hat, h_s, hd_s,relu_s, y_s, p_s)
def BPTT(self,outs,Y):
x_s,z_s, r_s, h_hat, h_s, hd_s,relu_s, y_s, p_s = outs
dW_z, dW_r,dW_h, dW = np.zeros_like(self.W_z), np.zeros_like(self.W_r), np.zeros_like(self.W_h),np.zeros_like(self.W)
dW_hid = np.zeros_like(self.W_hid)
dU_z, dU_r,dU_h = np.zeros_like(self.U_z), np.zeros_like(self.U_r), np.zeros_like(self.U_h)
dB_z, dB_r,dB_h,dB = np.zeros_like(self.B_z), np.zeros_like(self.B_r),np.zeros_like(self.B_h),np.zeros_like(self.B)
dB_hid = np.zeros_like(self.B_hid)
dh_next = np.zeros_like(h_s[0])
# w.r.t. softmax input
ddense = np.copy(p_s[149])
ddense[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#ddense[np.argmax(Y,1)] -=1
#ddense = y_s[149] - Y
# Softmax classifier's :
dW = np.dot(relu_s[149].T,ddense)
dB = np.sum(ddense,axis = 0, keepdims = True)
ddense_hid = np.dot(ddense,self.W.T) * activations.dReLU(hd_s[149])
dW_hid = np.dot(h_s[149].T,ddense_hid)
dB_hid = np.sum(ddense_hid,axis = 0, keepdims = True)
# Backprop through time:
for t in reversed(range(1,self.seq_len)):
# Curernt memort state :
dh = np.dot(ddense_hid,self.W_hid.T) + dh_next
dh_hat = dh * (1-z_s[t])
dh_hat = dh_hat * dtanh(h_hat[t])
dW_h += np.dot(x_s[t].T,dh_hat)
dU_h += np.dot((r_s[t] * h_s[t-1]).T,dh_hat)
dB_h += np.sum(dh_hat,axis = 0, keepdims = True)
# Reset gate:
dr_1 = np.dot(dh_hat,self.U_h.T)
dr = dr_1 * h_s[t-1]
dr = dr * dsigmoid(r_s[t])
dW_r += np.dot(x_s[t].T,dr)
dU_r += np.dot(h_s[t-1].T,dr)
dB_r += np.sum(dr,axis = 0, keepdims = True)
# Forget gate:
dz = dh * (h_s[t-1] - h_hat[t])
dz = dz * dsigmoid(z_s[t])
dW_z += np.dot(x_s[t].T,dz)
dU_z += np.dot(h_s[t-1].T,dz)
dB_z += np.sum(dz,axis = 0, keepdims = True)
# Nexts:
dh_next = np.dot(dz,self.U_z.T) + (dh * z_s[t]) + (dr_1 * r_s[t]) + np.dot(dr,self.U_r.T)
# List of gradients :
grads = [dW,dB,dW_hid,dB_hid,dW_z,dU_z,dB_z,dW_r,dU_r,dB_r,dW_h,dU_h,dB_h]
# Clipping gradients anyway
for grad in grads:
np.clip(grad, -15, 15, out = grad)
return h_s[self.seq_len - 1],grads
def fit(self,X,Y,X_val,y_val,epochs = 50 ,optimizer = 'SGD',verbose = True, crossVal = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
# Equate 0 in every epoch:
h_prev = np.zeros((self.batch_size,self.hidden_dim_1))
for i in range(round(X.shape[0]/self.batch_size) - 1):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
# Feeding random indexes:
X_feed = X[index]
y_feed = Y[index]
# Forward + BPTT + Optimization:
cache_train = self.forward(X_feed,h_prev)
h,grads = self.BPTT(cache_train,y_feed)
if optimizer == 'SGD':
self.SGD(grads)
elif optimizer == 'AdaGrad' :
self.AdaGrad(grads)
elif optimizer == 'RMSprop':
self.RMSprop(grads)
elif optimizer == 'VanillaAdam':
self.VanillaAdam(grads)
else:
self.Adam(grads)
# Hidden state -------> Previous hidden state
h_prev = h
# Training metrics calculations:
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[8][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
# Validation metrics calculations:
test_prevs = np.zeros((X_val.shape[0],self.hidden_dim_1))
_,__,___,____,_____,______,_______,________,probs_test = self.forward(X_val,test_prevs)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def params(self):
"""
Return all weights/biases in sequential order starting from end in list form.
"""
return [self.W,self.B,self.W_hid,self.B_hid,self.W_z,self.U_z,self.B_z,self.W_r,self.U_r,self.B_r,self.W_h,self.U_h,self.B_h]
def SGD(self,grads):
"""
Stochastic gradient descent with momentum on mini-batches.
"""
prevs = []
for param,grad,prev_update in zip(self.params(),grads,self.previous_updates):
delta = self.learning_rate * grad + self.mom_coeff * prev_update
param -= delta
prevs.append(delta)
self.previous_updates = prevs
self.learning_rate *= 0.99999
def AdaGrad(self,grads):
"""
AdaGrad adaptive optimization algorithm.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache[i] += grad **2
param += -self.learning_rate * grad / (np.sqrt(self.cache[i]) + 1e-6)
i += 1
def RMSprop(self,grads,decay_rate = 0.9):
"""
RMSprop adaptive optimization algorithm
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache_rmsprop[i] = decay_rate * self.cache_rmsprop[i] + (1-decay_rate) * grad **2
param += - self.learning_rate * grad / (np.sqrt(self.cache_rmsprop[i])+ 1e-6)
i += 1
def VanillaAdam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, but bias correction is not implemented
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
param += -self.learning_rate * self.m[i] / (np.sqrt(self.v[i]) + 1e-8)
i += 1
def Adam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, bias correction is implemented.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
m_corrected = self.m[i] / (1-beta1**self.t)
v_corrected = self.v[i] / (1-beta2**self.t)
param += -self.learning_rate * m_corrected / (np.sqrt(v_corrected) + 1e-8)
i += 1
self.t +=1
def CategoricalCrossEntropy(self,labels,preds):
| |
<filename>utils/thredds.py<gh_stars>0
from __future__ import print_function, absolute_import
from future.standard_library import hooks
import os
import copy
from shutil import rmtree
from tempfile import mkdtemp
from datetime import datetime
from numpy import empty, float32, datetime64, timedelta64, argmin, abs, array
from rasterio import open as rasopen
from rasterio.crs import CRS
from rasterio.transform import Affine
from rasterio.mask import mask
from rasterio.warp import reproject, Resampling
from rasterio.warp import calculate_default_transform as cdt
from xlrd.xldate import xldate_from_date_tuple
from xarray import open_dataset
from pandas import date_range, DataFrame
from bounds import GeoBounds
with hooks():
from urllib.parse import urlunparse
class Thredds(object):
""" Unidata's Thematic Real-time Environmental Distributed Data Services (THREDDS)
"""
def __init__(self, start=None, end=None, date=None,
bounds=None, target_profile=None, lat=None, lon=None
):
self.start = start
self.end = end
self.date = date
self.src_bounds_wsen = None
self.target_profile = target_profile
self.bbox = bounds
self.lat = lat
self.lon = lon
def conform(self, subset, out_file=None):
if subset.dtype != float32:
subset = array(subset, dtype=float32)
self._project(subset)
self._reproject()
self._mask()
result = self._resample()
if out_file:
self.save_raster(result, self.target_profile, output_filename=out_file)
return result
def _project(self, subset):
proj_path = os.path.join(self.temp_dir, 'tiled_proj.tif')
setattr(self, 'projection', proj_path)
profile = copy.deepcopy(self.target_profile)
profile['dtype'] = float32
bb = self.bbox.as_tuple()
if self.src_bounds_wsen:
bounds = self.src_bounds_wsen
else:
bounds = (bb[0], bb[1],
bb[2], bb[3])
dst_affine, dst_width, dst_height = cdt(CRS({'init': 'epsg:4326'}),
CRS({'init': 'epsg:4326'}),
subset.shape[1],
subset.shape[2],
*bounds,
)
profile.update({'crs': CRS({'init': 'epsg:4326'}),
'transform': dst_affine,
'width': dst_width,
'height': dst_height})
with rasopen(proj_path, 'w', **profile) as dst:
dst.write(subset)
def _reproject(self):
reproj_path = os.path.join(self.temp_dir, 'reproj.tif')
setattr(self, 'reprojection', reproj_path)
with rasopen(self.projection, 'r') as src:
src_profile = src.profile
src_bounds = src.bounds
src_array = src.read(1)
dst_profile = copy.deepcopy(self.target_profile)
dst_profile['dtype'] = float32
bounds = src_bounds
dst_affine, dst_width, dst_height = cdt(src_profile['crs'],
dst_profile['crs'],
src_profile['width'],
src_profile['height'],
*bounds)
dst_profile.update({'crs': dst_profile['crs'],
'transform': dst_affine,
'width': dst_width,
'height': dst_height})
with rasopen(reproj_path, 'w', **dst_profile) as dst:
dst_array = empty((1, dst_height, dst_width), dtype=float32)
reproject(src_array, dst_array, src_transform=src_profile['transform'],
src_crs=src_profile['crs'], dst_crs=self.target_profile['crs'],
dst_transform=dst_affine, resampling=Resampling.nearest,
num_threads=2)
dst.write(dst_array.reshape(1, dst_array.shape[1], dst_array.shape[2]))
def _mask(self):
mask_path = os.path.join(self.temp_dir, 'masked.tif')
with rasopen(self.reprojection) as src:
out_arr, out_trans = mask(src, self.clip_feature, crop=True,
all_touched=True)
out_meta = src.meta.copy()
out_meta.update({'driver': 'GTiff',
'height': out_arr.shape[1],
'width': out_arr.shape[2],
'transform': out_trans})
with rasopen(mask_path, 'w', **out_meta) as dst:
dst.write(out_arr)
setattr(self, 'mask', mask_path)
delattr(self, 'reprojection')
def _resample(self):
# home = os.path.expanduser('~')
# resample_path = os.path.join(home, 'images', 'sandbox', 'thredds', 'resamp_twx_{}.tif'.format(var))
resample_path = os.path.join(self.temp_dir, 'resample.tif')
with rasopen(self.mask, 'r') as src:
array = src.read(1)
profile = src.profile
res = src.res
try:
target_affine = self.target_profile['affine']
except KeyError:
target_affine = self.target_profile['transform']
target_res = target_affine.a
res_coeff = res[0] / target_res
new_array = empty(shape=(1, round(array.shape[0] * res_coeff),
round(array.shape[1] * res_coeff)), dtype=float32)
aff = src.affine
new_affine = Affine(aff.a / res_coeff, aff.b, aff.c, aff.d, aff.e / res_coeff, aff.f)
profile['transform'] = self.target_profile['transform']
profile['width'] = self.target_profile['width']
profile['height'] = self.target_profile['height']
profile['dtype'] = str(new_array.dtype)
delattr(self, 'mask')
with rasopen(resample_path, 'w', **profile) as dst:
reproject(array, new_array, src_transform=aff, dst_transform=new_affine, src_crs=src.crs,
dst_crs=src.crs, resampling=Resampling.nearest)
dst.write(new_array)
with rasopen(resample_path, 'r') as src:
arr = src.read()
return arr
def _date_index(self):
date_ind = date_range(self.start, self.end, freq='d')
return date_ind
@staticmethod
def _dtime_to_dtime64(dtime):
dtnumpy = datetime64(dtime).astype(datetime64)
return dtnumpy
@staticmethod
def save_raster(arr, geometry, output_filename):
try:
arr = arr.reshape(1, arr.shape[1], arr.shape[2])
except IndexError:
arr = arr.reshape(1, arr.shape[0], arr.shape[1])
geometry['dtype'] = str(arr.dtype)
with rasopen(output_filename, 'w', **geometry) as dst:
dst.write(arr)
return None
class TopoWX(Thredds):
""" Twix
TopoWX Surface Temperature, return as numpy array in daily stack unless modified.
Available variables: [ 'tmmn', 'tmmx']
----------
Observation elements to access. Currently available elements:
- 'tmmn' : daily minimum air temperature [K]
- 'tmmx' : daily maximum air temperature [K]
:param start: datetime object start of period of data
:param end: datetime object end of period of data
:param variables: List of available variables. At lease one.
:param date: single-day datetime date object
:param bounds: met.misc.BBox object representing spatial bounds, default to conterminous US
:return: numpy.ndarray """
def __init__(self, **kwargs):
Thredds.__init__(self)
self.temp_dir = mkdtemp()
for key, val in kwargs.items():
setattr(self, key, val)
self.service = 'cida.usgs.gov'
self.scheme = 'https'
self.variables = ['tmin', 'tmax']
if self.date:
self.start = self.date
self.end = self.date
self.year = self.start.year
def get_data_subset(self, grid_conform=False, var='tmax',
out_file=None, temp_units_out='C'):
if var not in self.variables:
raise TypeError('Must choose from "tmax" or "tmin"..')
url = self._build_url(var)
xray = open_dataset(url)
start = self._dtime_to_dtime64(self.start)
end = self._dtime_to_dtime64(self.end)
if self.date:
end = end + timedelta64(1, 'D')
# find index and value of bounds
# 1/100 degree adds a small buffer for this 800 m res data
north_ind = argmin(abs(xray.lat.values - (self.bbox.north + 1.)))
south_ind = argmin(abs(xray.lat.values - (self.bbox.south - 1.)))
west_ind = argmin(abs(xray.lon.values - (self.bbox.west - 1.)))
east_ind = argmin(abs(xray.lon.values - (self.bbox.east + 1.)))
north_val = xray.lat.values[north_ind]
south_val = xray.lat.values[south_ind]
west_val = xray.lon.values[west_ind]
east_val = xray.lon.values[east_ind]
setattr(self, 'src_bounds_wsen', (west_val, south_val,
east_val, north_val))
subset = xray.loc[dict(time=slice(start, end),
lat=slice(north_val, south_val),
lon=slice(west_val, east_val))]
date_ind = self._date_index()
subset['time'] = date_ind
if not grid_conform:
setattr(self, var, subset)
else:
if var == 'tmin':
arr = subset.tmin.values
elif var == 'tmax':
arr = subset.tmax.values
else:
arr = None
if temp_units_out == 'K':
arr += 273.15
conformed_array = self.conform(arr, out_file=out_file)
return conformed_array
def _build_url(self, var):
# ParseResult('scheme', 'netloc', 'path', 'params', 'query', 'fragment')
url = urlunparse([self.scheme, self.service,
'/thredds/dodsC/topowx?crs,lat[0:1:3249],lon[0:1:6999],{},'
'time'.format(var),
'', '', ''])
return url
class GridMet(Thredds):
""" U of I Gridmet
Return as numpy array per met variable in daily stack unless modified.
Available variables: ['bi', 'elev', 'erc', 'fm100', fm1000', 'pdsi', 'pet', 'pr', 'rmax', 'rmin', 'sph', 'srad',
'th', 'tmmn', 'tmmx', 'vs']
----------
Observation elements to access. Currently available elements:
- 'bi' : burning index [-]
- 'elev' : elevation above sea level [m]
- 'erc' : energy release component [-]
- 'fm100' : 100-hour dead fuel moisture [%]
- 'fm1000' : 1000-hour dead fuel moisture [%]
- 'pdsi' : Palmer Drough Severity Index [-]
- 'pet' : daily reference potential evapotranspiration [mm]
- 'pr' : daily accumulated precipitation [mm]
- 'rmax' : daily maximum relative humidity [%]
- 'rmin' : daily minimum relative humidity [%]
- 'sph' : daily mean specific humidity [kg/kg]
- 'prcp' : daily total precipitation [mm]
- 'srad' : daily mean downward shortwave radiation at surface [W m-2]
- 'th' : daily mean wind direction clockwise from North [degrees]
- 'tmmn' : daily minimum air temperature [K]
- 'tmmx' : daily maximum air temperature [K]
- 'vs' : daily mean wind speed [m -s]
:param start: datetime object start of period of data
:param end: datetime object end of period of data
:param variables: List of available variables. At lease one.
:param date: single-day datetime date object
:param bbox: met.misc.BBox object representing spatial bounds, default to conterminous US
:return: numpy.ndarray
Must have either start and end, or date.
Must have at least one valid variable. Invalid variables will be excluded gracefully.
note: NetCDF dates are in xl '1900' format, i.e., number of days since 1899-12-31 23:59
xlrd.xldate handles this for the time being
"""
def __init__(self, variable=None, date=None, start=None, end=None, bbox=None,
target_profile=None, clip_feature=None):
Thredds.__init__(self)
self.date = date
self.start = start
self.end = end
self.bbox = bbox
self.target_profile = target_profile
self.service = 'thredds.northwestknowledge.net:8080'
self.scheme = 'http'
self.temp_dir = mkdtemp()
self.variable = variable
self.available = ['elev', 'pr', 'rmax', 'rmin', 'sph', 'srad',
'th', 'tmmn', 'tmmx', 'pet', 'vs', 'erc', 'bi',
'fm100', 'pdsi']
if self.variable not in self.available:
Warning('Variable {} is not available'.
format(self.variable))
self.kwords = {'bi': 'burning_index_g',
'elev': '',
'erc': 'energy_release_component-g',
'fm100': 'dead_fuel_moisture_100hr',
'fm1000': 'dead_fuel_moisture_1000hr',
'pdsi': 'palmer_drought_severity_index',
'etr': 'potential_evapotranspiration',
'pet': 'potential_evapotranspiration',
'pr': 'precipitation_amount',
'rmax': 'relative_humidity',
'rmin': 'relative_humidity',
'sph': 'specific_humidity',
'srad': 'surface_downwelling_shortwave_flux_in_air',
'th': 'wind_from_direction',
'tmmn': 'air_temperature',
'tmmx': 'air_temperature',
'vs': 'wind_speed', }
if self.date:
self.start = self.date
self.end = self.date
self.year = self.start.year
if not self.bbox and not self.lat:
self.bbox = GeoBounds()
def subset_daily_tif(self, out_filename=None):
url = self._build_url()
url = url + '#fillmismatch'
xray = open_dataset(url)
north_ind = argmin(abs(xray.lat.values - (self.bbox.north + 1.)))
south_ind = argmin(abs(xray.lat.values - (self.bbox.south - 1.)))
west_ind = argmin(abs(xray.lon.values - (self.bbox.west - 1.)))
east_ind = argmin(abs(xray.lon.values - (self.bbox.east + 1.)))
north_val = xray.lat.values[north_ind]
south_val = xray.lat.values[south_ind]
west_val = xray.lon.values[west_ind]
east_val = xray.lon.values[east_ind]
setattr(self, 'src_bounds_wsen', (west_val, south_val,
east_val, north_val))
if self.variable != 'elev':
xray = xray.rename({'day': 'time'})
subset = xray.loc[dict(time=slice(self.start, self.end),
lat=slice(north_val, south_val),
lon=slice(west_val, east_val))]
date_ind = self._date_index()
subset['time'] = date_ind
setattr(self, 'width', subset.dims['lon'])
setattr(self, 'height', subset.dims['lat'])
arr = subset[self.kwords[self.variable]].values
arr = arr.reshape(arr.shape[1], arr.shape[2]).transpose()
arr = arr.reshape(1, arr.shape[0], arr.shape[1])
arr = self.conform(arr, out_file=out_filename)
rmtree(self.temp_dir)
return arr
else:
subset = | |
<= 0)
m.c1309 = Constraint(expr= - m.b130 + m.b131 - m.b155 <= 0)
m.c1310 = Constraint(expr= - m.b130 + m.b132 - m.b156 <= 0)
m.c1311 = Constraint(expr= - m.b130 + m.b133 - m.b157 <= 0)
m.c1312 = Constraint(expr= - m.b130 + m.b134 - m.b158 <= 0)
m.c1313 = Constraint(expr= - m.b130 + m.b135 - m.b159 <= 0)
m.c1314 = Constraint(expr= - m.b130 + m.b136 - m.b160 <= 0)
m.c1315 = Constraint(expr= - m.b130 + m.b137 - m.b161 <= 0)
m.c1316 = Constraint(expr= - m.b131 + m.b132 - m.b162 <= 0)
m.c1317 = Constraint(expr= - m.b131 + m.b133 - m.b163 <= 0)
m.c1318 = Constraint(expr= - m.b131 + m.b134 - m.b164 <= 0)
m.c1319 = Constraint(expr= - m.b131 + m.b135 - m.b165 <= 0)
m.c1320 = Constraint(expr= - m.b131 + m.b136 - m.b166 <= 0)
m.c1321 = Constraint(expr= - m.b131 + m.b137 - m.b167 <= 0)
m.c1322 = Constraint(expr= - m.b132 + m.b133 - m.b168 <= 0)
m.c1323 = Constraint(expr= - m.b132 + m.b134 - m.b169 <= 0)
m.c1324 = Constraint(expr= - m.b132 + m.b135 - m.b170 <= 0)
m.c1325 = Constraint(expr= - m.b132 + m.b136 - m.b171 <= 0)
m.c1326 = Constraint(expr= - m.b132 + m.b137 - m.b172 <= 0)
m.c1327 = Constraint(expr= - m.b133 + m.b134 - m.b173 <= 0)
m.c1328 = Constraint(expr= - m.b133 + m.b135 - m.b174 <= 0)
m.c1329 = Constraint(expr= - m.b133 + m.b136 - m.b175 <= 0)
m.c1330 = Constraint(expr= - m.b133 + m.b137 - m.b176 <= 0)
m.c1331 = Constraint(expr= - m.b134 + m.b135 - m.b177 <= 0)
m.c1332 = Constraint(expr= - m.b134 + m.b136 - m.b178 <= 0)
m.c1333 = Constraint(expr= - m.b134 + m.b137 - m.b179 <= 0)
m.c1334 = Constraint(expr= - m.b135 + m.b136 - m.b180 <= 0)
m.c1335 = Constraint(expr= - m.b135 + m.b137 - m.b181 <= 0)
m.c1336 = Constraint(expr= - m.b136 + m.b137 - m.b182 <= 0)
m.c1337 = Constraint(expr= - m.b138 + m.b139 - m.b147 <= 0)
m.c1338 = Constraint(expr= - m.b138 + m.b140 - m.b148 <= 0)
m.c1339 = Constraint(expr= - m.b138 + m.b141 - m.b149 <= 0)
m.c1340 = Constraint(expr= - m.b138 + m.b142 - m.b150 <= 0)
m.c1341 = Constraint(expr= - m.b138 + m.b143 - m.b151 <= 0)
m.c1342 = Constraint(expr= - m.b138 + m.b144 - m.b152 <= 0)
m.c1343 = Constraint(expr= - m.b138 + m.b145 - m.b153 <= 0)
m.c1344 = Constraint(expr= - m.b138 + m.b146 - m.b154 <= 0)
m.c1345 = Constraint(expr= - m.b139 + m.b140 - m.b155 <= 0)
m.c1346 = Constraint(expr= - m.b139 + m.b141 - m.b156 <= 0)
m.c1347 = Constraint(expr= - m.b139 + m.b142 - m.b157 <= 0)
m.c1348 = Constraint(expr= - m.b139 + m.b143 - m.b158 <= 0)
m.c1349 = Constraint(expr= - m.b139 + m.b144 - m.b159 <= 0)
m.c1350 = Constraint(expr= - m.b139 + m.b145 - m.b160 <= 0)
m.c1351 = Constraint(expr= - m.b139 + m.b146 - m.b161 <= 0)
m.c1352 = Constraint(expr= - m.b140 + m.b141 - m.b162 <= 0)
m.c1353 = Constraint(expr= - m.b140 + m.b142 - m.b163 <= 0)
m.c1354 = Constraint(expr= - m.b140 + m.b143 - m.b164 <= 0)
m.c1355 = Constraint(expr= - m.b140 + m.b144 - m.b165 <= 0)
m.c1356 = Constraint(expr= - m.b140 + m.b145 - m.b166 <= 0)
m.c1357 = Constraint(expr= - m.b140 + m.b146 - m.b167 <= 0)
m.c1358 = Constraint(expr= - m.b141 + m.b142 - m.b168 <= 0)
m.c1359 = Constraint(expr= - m.b141 + m.b143 - m.b169 <= 0)
m.c1360 = Constraint(expr= - m.b141 + m.b144 - m.b170 <= 0)
m.c1361 = Constraint(expr= - m.b141 + m.b145 - m.b171 <= 0)
m.c1362 = Constraint(expr= - m.b141 + m.b146 - m.b172 <= 0)
m.c1363 = Constraint(expr= - m.b142 + m.b143 - m.b173 <= 0)
m.c1364 = Constraint(expr= - m.b142 + m.b144 - m.b174 <= 0)
m.c1365 = Constraint(expr= - m.b142 + m.b145 - m.b175 <= 0)
m.c1366 = Constraint(expr= - m.b142 + m.b146 - m.b176 <= 0)
m.c1367 = Constraint(expr= - m.b143 + m.b144 - m.b177 <= 0)
m.c1368 = Constraint(expr= - m.b143 + m.b145 - m.b178 <= 0)
m.c1369 = Constraint(expr= - m.b143 + m.b146 - m.b179 <= 0)
m.c1370 = Constraint(expr= - m.b144 + m.b145 - m.b180 <= 0)
m.c1371 = Constraint(expr= - m.b144 + m.b146 - m.b181 <= 0)
m.c1372 = Constraint(expr= - m.b145 + m.b146 - m.b182 <= 0)
m.c1373 = Constraint(expr= - m.b147 + m.b148 - m.b155 <= 0)
m.c1374 = Constraint(expr= - m.b147 + m.b149 - m.b156 <= 0)
m.c1375 = Constraint(expr= - m.b147 + m.b150 - m.b157 <= 0)
m.c1376 = Constraint(expr= - m.b147 + m.b151 - m.b158 <= 0)
m.c1377 = Constraint(expr= - m.b147 + m.b152 - m.b159 <= 0)
m.c1378 = Constraint(expr= - m.b147 + m.b153 - m.b160 <= 0)
m.c1379 = Constraint(expr= - m.b147 + m.b154 - m.b161 <= 0)
m.c1380 = Constraint(expr= - m.b148 + m.b149 - m.b162 <= 0)
m.c1381 = Constraint(expr= - m.b148 + m.b150 - m.b163 <= 0)
m.c1382 = Constraint(expr= - m.b148 + m.b151 - m.b164 <= 0)
m.c1383 = Constraint(expr= - m.b148 + m.b152 - m.b165 <= 0)
m.c1384 = Constraint(expr= - m.b148 + m.b153 - m.b166 <= 0)
m.c1385 = Constraint(expr= - m.b148 + m.b154 - m.b167 <= 0)
m.c1386 = Constraint(expr= - m.b149 + m.b150 - m.b168 <= 0)
m.c1387 = Constraint(expr= - m.b149 + m.b151 - m.b169 <= 0)
m.c1388 = Constraint(expr= - m.b149 + m.b152 - m.b170 <= 0)
m.c1389 = Constraint(expr= - m.b149 + m.b153 - m.b171 <= 0)
m.c1390 = Constraint(expr= - m.b149 + m.b154 - m.b172 <= 0)
m.c1391 = Constraint(expr= - m.b150 + m.b151 - m.b173 <= 0)
m.c1392 = Constraint(expr= - m.b150 + m.b152 - m.b174 <= 0)
m.c1393 = Constraint(expr= - m.b150 + m.b153 - m.b175 <= 0)
m.c1394 = Constraint(expr= - m.b150 + m.b154 - m.b176 <= 0)
m.c1395 = Constraint(expr= - m.b151 + m.b152 - m.b177 <= 0)
m.c1396 = Constraint(expr= - m.b151 + m.b153 - m.b178 <= 0)
m.c1397 = Constraint(expr= - m.b151 + m.b154 - m.b179 <= 0)
m.c1398 = Constraint(expr= - m.b152 + m.b153 - m.b180 <= 0)
m.c1399 = Constraint(expr= - m.b152 + m.b154 - m.b181 <= 0)
m.c1400 = Constraint(expr= - m.b153 + m.b154 - m.b182 <= 0)
m.c1401 = Constraint(expr= - m.b155 + m.b156 - m.b162 <= 0)
m.c1402 = Constraint(expr= - m.b155 + m.b157 - m.b163 <= 0)
m.c1403 = Constraint(expr= - m.b155 + m.b158 - m.b164 <= 0)
m.c1404 = Constraint(expr= - m.b155 + m.b159 - m.b165 <= 0)
m.c1405 = Constraint(expr= - m.b155 + m.b160 - m.b166 <= 0)
m.c1406 = Constraint(expr= - m.b155 + m.b161 - m.b167 <= 0)
m.c1407 = Constraint(expr= - m.b156 + m.b157 - m.b168 <= 0)
m.c1408 = Constraint(expr= - m.b156 + m.b158 - m.b169 <= 0)
m.c1409 = Constraint(expr= - m.b156 + m.b159 - m.b170 <= 0)
m.c1410 = Constraint(expr= - m.b156 + m.b160 - m.b171 <= 0)
m.c1411 = Constraint(expr= - m.b156 + m.b161 - m.b172 <= 0)
m.c1412 = Constraint(expr= - m.b157 + m.b158 - m.b173 <= 0)
m.c1413 = Constraint(expr= - m.b157 + m.b159 - m.b174 <= 0)
m.c1414 = Constraint(expr= - m.b157 + m.b160 - m.b175 <= 0)
m.c1415 = Constraint(expr= - m.b157 + m.b161 - m.b176 <= 0)
m.c1416 = Constraint(expr= - m.b158 + m.b159 - m.b177 <= 0)
m.c1417 = Constraint(expr= - m.b158 + m.b160 - m.b178 <= 0)
m.c1418 = Constraint(expr= - m.b158 + m.b161 - m.b179 <= 0)
m.c1419 = Constraint(expr= - m.b159 + m.b160 - m.b180 <= 0)
m.c1420 = Constraint(expr= - m.b159 + m.b161 - m.b181 <= 0)
m.c1421 = Constraint(expr= - m.b160 + m.b161 - m.b182 <= 0)
m.c1422 = Constraint(expr= - m.b162 + m.b163 - m.b168 <= 0)
m.c1423 = Constraint(expr= - m.b162 + m.b164 - m.b169 <= 0)
m.c1424 = Constraint(expr= - m.b162 + m.b165 - m.b170 <= 0)
m.c1425 = Constraint(expr= - m.b162 + m.b166 - m.b171 <= 0)
m.c1426 = Constraint(expr= - m.b162 + m.b167 - m.b172 <= 0)
m.c1427 = Constraint(expr= - m.b163 + m.b164 - m.b173 <= 0)
m.c1428 = Constraint(expr= - m.b163 + m.b165 - m.b174 <= 0)
m.c1429 = Constraint(expr= - m.b163 + m.b166 - m.b175 <= 0)
m.c1430 = Constraint(expr= - m.b163 + m.b167 - m.b176 <= 0)
m.c1431 = Constraint(expr= - m.b164 + m.b165 - m.b177 <= 0)
m.c1432 = Constraint(expr= - m.b164 + m.b166 - m.b178 <= 0)
m.c1433 = Constraint(expr= - m.b164 + m.b167 - m.b179 <= 0)
m.c1434 = Constraint(expr= - m.b165 + m.b166 - m.b180 <= 0)
m.c1435 = Constraint(expr= - m.b165 + m.b167 - m.b181 <= 0)
m.c1436 = Constraint(expr= - m.b166 + m.b167 - m.b182 | |
<filename>flarestack/cluster/submitter.py<gh_stars>0
import os, subprocess, time, logging, shutil, copy
import numpy as np
from flarestack.shared import (
fs_dir,
log_dir,
fs_scratch_dir,
make_analysis_pickle,
host_server,
inj_dir_name,
name_pickle_output_dir,
cluster_dir,
)
from flarestack.core.multiprocess_wrapper import run_multiprocess
from flarestack.core.minimisation import MinimisationHandler
from flarestack.core.results import ResultsHandler
logger = logging.getLogger(__name__)
class Submitter(object):
submitter_dict = dict()
def __init__(
self,
mh_dict,
use_cluster,
n_cpu=None,
do_sensitivity_scale_estimation=False,
remove_old_results=False,
**cluster_kwargs,
):
"""
A class that takes care of submitting the trial calculations.
Also can estimate the sensitivity scale before submitting.
:param mh_dict: dict, MinimisationHandler dictionary
:param use_cluster: bool, whether to run the trials locally or on the cluster
:param n_cpu: int, number of cores to use
:param do_sensitivity_scale_estimation: str, containing 'asimov', 'quick_injections' or both
:param remove_old_results: bool, if True will delete directories containing injection values and pickled
results from previous trials
:param cluster_kwargs: keyword arguments used by the cluster
"""
self.mh_dict = copy.deepcopy(mh_dict)
self.use_cluster = use_cluster
self.n_cpu = os.cpu_count() - 1 if isinstance(n_cpu, type(None)) else n_cpu
self.job_id = None
self.remove_old_results = remove_old_results
self.do_sensitivity_scale_estimation = do_sensitivity_scale_estimation
self.sens_guess = self.disc_guess = None
self.successful_guess_by_quick_injections = False
self.cluster_kwargs = cluster_kwargs
def __str__(self):
s = (
f'\n----- Submitter for {self.mh_dict["name"]} -----\n'
f'{"" if self.use_cluster else "not "}using cluster \n'
f"using {self.n_cpu} CPUs locally\n"
f"job-id: {self.job_id} \n"
f'{self.do_sensitivity_scale_estimation if self.do_sensitivity_scale_estimation else "no"} '
f"scale estimation \n"
)
if self.cluster_kwargs:
s += "cluster kwargs: \n"
for k, v in self.cluster_kwargs.items():
s += f" {k}: {v} \n"
return s
def submit_cluster(self, mh_dict):
"""Splits the trials into jobs and submits them to be calculated on the cluster"""
raise NotImplementedError
def submit_local(self, mh_dict):
"""Uses the MultiprocessWrapper to split the trials into jobs and run them locally"""
# max CPU number is all but one
make_analysis_pickle(mh_dict)
n_cpu = min(self.n_cpu, os.cpu_count() - 1)
run_multiprocess(n_cpu=n_cpu, mh_dict=mh_dict)
def submit(self, mh_dict):
if self.remove_old_results:
self._clean_injection_values_and_pickled_results(self.mh_dict["name"])
if self.use_cluster:
self.submit_cluster(mh_dict)
else:
self.submit_local(mh_dict)
def wait_for_job(self):
"""Waits until the cluster is finished processing the job with the ID self.job_id"""
raise NotImplementedError
# @staticmethod
# def _wait_for_cluster(job_ids=None):
# raise NotImplementedError
@staticmethod
def get_pending_ids():
raise NotImplementedError
@staticmethod
def wait_for_cluster(job_ids=None):
"""
Waits until the cluster is done. Wait for all jobs if job_ids is None or give a list of IDs
:param job_ids: list, optional, if given, specifies the IDs of the obs that will be waited on
"""
# If no job IDs are specified, get all IDs currently listed for this user
cls = Submitter.get_submitter_class()
if not job_ids:
# job_ids = np.unique(cls.get_ids(DESYSubmitter.status_cmd))
job_ids = cls.get_pending_ids()
for id in job_ids:
logger.info(f"waiting for job {id}")
# create a submitter, it does not need the mh_dict when no functions are calles
s = cls(None, None)
s.job_id = id # set the right job_id
s.wait_for_job() # use the built-in function to wait for completion of that job
@property
def _quick_injections_name(self):
name = self.mh_dict["name"]
return f"{name if not name.endswith(os.sep) else name[:-1]}_quick_injection/"
def run_quick_injections_to_estimate_sensitivity_scale(self):
"""
Roughly estimates the injection scale in order to find a better scale range.
The quick injection trials are run locally.
Note that a scale still has to be given in the mh_dict as a first estimate.
"""
logger.info(f"doing quick trials to estimate scale")
if self.mh_dict["mh_name"] == "fit_weights":
raise NotImplementedError(
"This method does not work with the fit_weights MinimizationHandler "
"because it assumes a background TS distribution median of zero! "
"Be the hero to think of something!"
)
# The given scale will serve as an initial guess
guess = self.mh_dict["scale"] if not self.disc_guess else self.disc_guess
# make sure
self._clean_injection_values_and_pickled_results(self._quick_injections_name)
# repeat the guessing until success:
while not self.successful_guess_by_quick_injections:
quick_injections_mh_dict = dict(self.mh_dict)
quick_injections_mh_dict["name"] = self._quick_injections_name
quick_injections_mh_dict["background_ntrials_factor"] = 1
quick_injections_mh_dict["n_trials"] = 10
quick_injections_mh_dict["scale"] = guess
self.submit_local(quick_injections_mh_dict)
# collect the quick injections
quick_injections_rh = ResultsHandler(
quick_injections_mh_dict, do_sens=False, do_disc=False
)
# guess the disc and sens scale
(
self.disc_guess,
self.sens_guess,
) = quick_injections_rh.estimate_sens_disc_scale()
if any((g < 0) or (g > guess) for g in [self.disc_guess, self.sens_guess]):
logger.info(
f"Could not perform scale guess because "
f"at least one guess outside [0, {guess}]! "
f"Adjusting accordingly."
)
guess = abs(max((self.sens_guess, self.disc_guess)) * 1.5)
elif guess > 5 * self.disc_guess:
logger.info(
f"Could not perform scale guess beause "
f"initial scale guess {guess} much larger than "
f"disc scale guess {self.disc_guess}. "
f"Adjusting initial guess to {4 * self.disc_guess} and retry."
)
guess = 4 * abs(self.disc_guess)
else:
logger.info("Scale guess successful. Adjusting injection scale.")
self.successful_guess_by_quick_injections = True
self._clean_injection_values_and_pickled_results(quick_injections_rh.name)
@staticmethod
def _clean_injection_values_and_pickled_results(name):
"""
Removes directories containing injection values and pickled results
:param name: str, the path used in the minimisation handler dictionary (mh_dict)
"""
directories = [name_pickle_output_dir(name), inj_dir_name(name)]
for d in directories:
if os.path.isdir(d):
logger.debug(f"removing {d}")
shutil.rmtree(d)
else:
logger.warning(f"Can not remove {d}! It is not a directory!")
def do_asimov_scale_estimation(self):
"""estimate the injection scale using Asimov estimation"""
logger.info("doing asimov estimation")
mh = MinimisationHandler.create(self.mh_dict)
scale_estimate = mh.guess_scale()
logger.debug(f"estimated scale: {scale_estimate}")
self.disc_guess = scale_estimate
self.sens_guess = 0.3 * self.disc_guess
def analyse(self, do_disc=False):
"""
Submits the minimisation handler dictionary (self.mh_dict) to be analysed.
This happens locally if self.use_cluster == False.
:param do_disc: bool, if True, use the estimated discovery potential as
the injection scale instead of the sensitivity.
"""
if self.do_sensitivity_scale_estimation:
if "asimov" in self.do_sensitivity_scale_estimation:
self.do_asimov_scale_estimation()
if "quick_injections" in self.do_sensitivity_scale_estimation:
self.run_quick_injections_to_estimate_sensitivity_scale()
if not do_disc:
self.mh_dict["scale"] = self.sens_guess / 0.5
else:
self.mh_dict["scale"] = self.disc_guess / 0.5
self.submit(self.mh_dict)
@classmethod
def register_submitter_class(cls, server_name):
"""Adds a new subclass of Submitter, with class name equal to "server_name"."""
def decorator(subclass):
cls.submitter_dict[server_name] = subclass
return subclass
return decorator
@classmethod
def get_submitter(cls, *args, **kwargs):
"""
Get an initialised instance of the Submitter class suited for the
used server.
:param args: arguments passed to teh submitter
:param kwargs: keyword arguments passed to the submitter
:return: instance of Submitter subclass
"""
return Submitter.get_submitter_class()(*args, **kwargs)
@classmethod
def get_submitter_class(cls):
"""Get the Submitter class suited for the used server."""
if host_server not in cls.submitter_dict:
logger.warning(
f"No submitter implemented for host server {host_server}! "
f"Using LocalSubmitter but you wont't be able to use cluster operations!"
)
return cls.submitter_dict["local"]
return cls.submitter_dict[host_server]
@Submitter.register_submitter_class("local")
class LocalSubmitter(Submitter):
def __init__(
self,
mh_dict,
use_cluster,
n_cpu=None,
do_sensitivity_scale_estimation=False,
**cluster_kwargs,
):
if use_cluster:
raise NotImplementedError(
"No cluster operation implemented because you are using the LocalSubmitter!"
)
super(LocalSubmitter, self).__init__(
mh_dict,
use_cluster,
n_cpu,
do_sensitivity_scale_estimation,
**cluster_kwargs,
)
@Submitter.register_submitter_class("DESY")
class DESYSubmitter(Submitter):
submit_file = os.path.join(cluster_dir, "SubmitDESY.sh")
username = os.path.basename(os.environ["HOME"])
status_cmd = f"qstat -u {username}"
submit_cmd = "qsub "
root_dir = os.path.dirname(fs_dir[:-1])
def __init__(self, mh_dict, use_cluster, n_cpu=None, **cluster_kwargs):
"""
Initialises a DESYSubmitter instance.
:param mh_dict: the MinimisationHandler dict
:type mh_dict: dict
:param use_cluster: wjether to use the cluster
:type use_cluster: bool
:param n_cpu: how many CPUs to use on the local machine
:type n_cpu: int
:param cluster_kwargs: keyword arguments for the cluster, available are:
h_cpu in the form "hh:mm:ss": how long the cluster jobs run
trials_per_task: int, how many trials to run per cluster job
cluster_cpu: int, how many CPUs to use on the cluster machines
ram_per_core in the form "<number>G": e.g. 6G to use 6GB RAM for each cluster job
"""
super(DESYSubmitter, self).__init__(
mh_dict, use_cluster, n_cpu, **cluster_kwargs
)
# extract information that will be used by the cluster script
self.h_cpu = self.cluster_kwargs.get("h_cpu", "23:59:00")
self.trials_per_task = self.cluster_kwargs.get("trials_per_task", 1)
self.cluster_cpu = self.cluster_kwargs.get("cluster_cpu", self.n_cpu)
self.ram_per_core = self.cluster_kwargs.get(
"ram_per_core", "{0:.1f}G".format(6.0 / float(self.cluster_cpu) + 2.0)
)
self.remove_old_logs = self.cluster_kwargs.get("remove_old_logs", True)
@staticmethod
def _qstat_output(qstat_command):
"""return the output of the qstat_command"""
# start a subprocess to query the cluster
process = subprocess.Popen(qstat_command, stdout=subprocess.PIPE, shell=True)
# read the output
tmp = process.stdout.read().decode()
return str(tmp)
@staticmethod
def get_ids(qstat_command):
"""Takes a command that queries the DESY cluster and returns a list of job IDs"""
st = DESYSubmitter._qstat_output(qstat_command)
# If the output is an empty string there are no tasks left
if st == "":
ids = list()
else:
# Extract the list of job IDs
ids = np.array([int(s.split(" ")[2]) for s in st.split("\n")[2:-1]])
return ids
def _ntasks_from_qstat_command(self, qstat_command):
"""Returns the number of tasks from the output of qstat_command"""
# get the output of qstat_command
ids = self.get_ids(qstat_command)
ntasks = 0 if len(ids) == 0 else len(ids[ids == self.job_id])
return ntasks
@property
def ntasks_total(self):
"""Returns the total number of tasks"""
return self._ntasks_from_qstat_command(DESYSubmitter.status_cmd)
@property
def ntasks_running(self):
"""Returns the number of running tasks"""
return | |
1 else boardUnits), inline=False)
externalUser = True
if first: first = False
else:
leaderboardEmbed.add_field(value=str(place + 1) + ". " + message.guild.get_member(sortedUsers[place][0]).mention, name=("⭐ " if first else "") + str(sortedUsers[place][1]) + " " + (boardUnit if sortedUsers[place][1] == 1 else boardUnits), inline=False)
if first: first = False
# If at least one external use is on the leaderboard, give a key
if externalUser:
leaderboardEmbed.set_footer(text="An `*` indicates a user that is from another server.")
# send the embed
await message.channel.send(embed=leaderboardEmbed)
bbCommands.register("leaderboard", cmd_leaderboard)
dmCommands.register("leaderboard", err_nodm)
"""
return a page listing the target user's items.
can apply to a specified user, or the calling user if none is specified.
can apply to a type of item (ships, modules, turrets or weapons), or all items if none is specified.
can apply to a page, or the first page if none is specified.
Arguments can be given in any order, and must be separated by a single space.
TODO: try displaying as a discord message rather than embed?
TODO: add icons for ships and items!?
@param message -- the discord message calling the command
@param args -- string containing the arguments as specified above
"""
async def cmd_hangar(message, args):
argsSplit = args.split(" ")
requestedUser = message.author
item = "all"
page = 1
foundUser = False
foundItem = False
foundPage = False
useDummyData = False
if len(argsSplit) > 3:
await message.channel.send(":x: Too many arguments! I can only take a target user, an item type (ship/weapon/module), and a page number!")
return
if args != "":
argNum = 1
for arg in argsSplit:
if arg != "":
if bbUtil.isMention(arg):
if foundUser:
await message.channel.send(":x: I can only take one user!")
return
else:
requestedUser = client.get_user(int(arg.lstrip("<@!")[:-1]))
foundUser = True
elif arg in bbConfig.validItemNames:
if foundItem:
await message.channel.send(":x: I can only take one item type (ship/weapon/module/turret)!")
return
else:
item = arg.rstrip("s")
foundItem = True
elif bbUtil.isInt(arg):
if client.get_user(int(arg)) is not None and not foundUser:
requestedUser = client.get_user(int(args))
continue
if foundPage:
await message.channel.send(":x: I can only take one page number!")
return
else:
page = int(arg)
foundPage = True
else:
await message.channel.send(":x: " + str(argNum) + getNumExtension(argNum) + " argument invalid! I can only take a target user, an item type (ship/weapon/module/turret), and a page number!")
return
argNum += 1
if requestedUser is None:
await message.channel.send(":x: Unrecognised user!")
return
if not usersDB.userIDExists(requestedUser.id):
if not foundUser:
usersDB.addUser(requestedUser.id)
else:
useDummyData = True
sendChannel = None
sendDM = False
if item == "all":
if message.author.dm_channel is None:
await message.author.create_dm()
if message.author.dm_channel is None:
sendChannel = message.channel
else:
sendChannel = message.author.dm_channel
sendDM = True
else:
sendChannel = message.channel
if useDummyData:
if page > 1:
await message.channel.send(":x: " + ("The requested pilot" if foundUser else "You") + " only " + ("has" if foundUser else "have") + " one page of items. Showing page one:")
page = 1
elif page < 1:
await message.channel.send(":x: Invalid page number. Showing page one:")
page = 1
hangarEmbed = makeEmbed(titleTxt="Hangar", desc=requestedUser.mention, col=bbData.factionColours["neutral"], footerTxt="All items" if item == "all" else item.rstrip("s").title() + "s - page " + str(page), thumb=requestedUser.avatar_url_as(size=64))
hangarEmbed.add_field(name="No Stored Items", value="", inline=False)
await message.channel.send(embed=hangarEmbed)
return
else:
requestedBBUser = usersDB.getUser(requestedUser.id)
if item == "all":
maxPerPage = bbConfig.maxItemsPerHangarPageAll
else:
maxPerPage = bbConfig.maxItemsPerHangarPageIndividual
if page < 1:
await message.channel.send(":x: Invalid page number. Showing page one:")
page = 1
else:
maxPage = requestedBBUser.numInventoryPages(item, maxPerPage)
if maxPage == 0:
await message.channel.send(":x: " + ("The requested pilot doesn't" if foundUser else "You don't") + " have any " + ("items" if item == "all" else "of that item") + "!")
return
elif page > maxPage:
await message.channel.send(":x: " + ("The requested pilot" if foundUser else "You") + " only " + ("has" if foundUser else "have") + str(maxPage) + " page(s) of items. Showing page " + str(maxPage) + ":")
page = maxPage
hangarEmbed = makeEmbed(titleTxt="Hangar", desc=requestedUser.mention, col=bbData.factionColours["neutral"], footerTxt=("All item" if item == "all" else item.rstrip("s").title()) + "s - page " + str(page) + "/" + str(requestedBBUser.numInventoryPages(item, maxPerPage)), thumb=requestedUser.avatar_url_as(size=64))
firstPlace = maxPerPage * (page - 1) + 1
if item in ["all", "ship"]:
for shipNum in range(firstPlace, requestedBBUser.lastItemNumberOnPage("ship", page, maxPerPage) + 1):
if shipNum == firstPlace:
hangarEmbed.add_field(name="", value="__**Stored Ships**__", inline=False)
hangarEmbed.add_field(name=str(shipNum) + ". " + requestedBBUser.inactiveShips[shipNum - 1].getNameAndNick(), value=(requestedBBUser.inactiveShips[shipNum - 1].emoji if requestedBBUser.inactiveShips[shipNum - 1].hasEmoji else "") + requestedBBUser.inactiveShips[shipNum - 1].statsStringShort(), inline=False)
if item in ["all", "weapon"]:
for weaponNum in range(firstPlace, requestedBBUser.lastItemNumberOnPage("weapon", page, maxPerPage) + 1):
if weaponNum == firstPlace:
hangarEmbed.add_field(name="", value="__**Stored Weapons**__", inline=False)
hangarEmbed.add_field(name=str(weaponNum) + ". " + requestedBBUser.inactiveWeapons[weaponNum - 1].name, value=(requestedBBUser.inactiveWeapons[weaponNum - 1].emoji if requestedBBUser.inactiveWeapons[weaponNum - 1].hasEmoji else "") + requestedBBUser.inactiveWeapons[weaponNum - 1].statsStringShort(), inline=False)
if item in ["all", "module"]:
for moduleNum in range(firstPlace, requestedBBUser.lastItemNumberOnPage("module", page, maxPerPage) + 1):
if moduleNum == firstPlace:
hangarEmbed.add_field(name="", value="__**Stored Modules**__", inline=False)
hangarEmbed.add_field(name=str(moduleNum) + ". " + requestedBBUser.inactiveModules[moduleNum - 1].name, value=(requestedBBUser.inactiveModules[moduleNum - 1].emoji if requestedBBUser.inactiveModules[moduleNum - 1].hasEmoji else "") + requestedBBUser.inactiveModules[moduleNum - 1].statsStringShort(), inline=False)
if item in ["all", "turret"]:
for turretNum in range(firstPlace, requestedBBUser.lastItemNumberOnPage("turret", page, maxPerPage) + 1):
if turretNum == firstPlace:
hangarEmbed.add_field(name="", value="__**Stored Turrets**__", inline=False)
hangarEmbed.add_field(name=str(turretNum) + ". " + requestedBBUser.inactiveTurrets[turretNum - 1].name, value=(requestedBBUser.inactiveTurrets[turretNum - 1].emoji if requestedBBUser.inactiveTurrets[turretNum - 1].hasEmoji else "") + requestedBBUser.inactiveTurrets[turretNum - 1].statsStringShort(), inline=False)
try:
await sendChannel.send(embed=hangarEmbed)
if sendDM:
await message.add_reaction(bbConfig.dmSentEmoji)
except discord.Forbidden:
await message.channel.send(":x: I can't DM you, " + message.author.name + "! Please enable DMs from users who are not friends.")
bbCommands.register("hangar", cmd_hangar)
bbCommands.register("hanger", cmd_hangar)
dmCommands.register("hangar", cmd_hangar)
dmCommands.register("hanger", cmd_hangar)
"""
list the current stock of the bbShop owned by the guild containing the sent message.
Can specify an item type to list. TODO: Make specified item listings more detailed as in !bb bounties
@param message -- the discord message calling the command
@param args -- either empty string, or one of bbConfig.validItemNames
"""
async def cmd_shop(message, args):
item = "all"
if args.rstrip("s") in bbConfig.validItemNames:
item = args.rstrip("s")
elif args != "":
await message.channel.send(":x: Invalid item type! (ship/weapon/module/turret/all)")
return
sendChannel = None
sendDM = False
if item == "all":
if message.author.dm_channel is None:
await message.author.create_dm()
if message.author.dm_channel is None:
sendChannel = message.channel
else:
sendChannel = message.author.dm_channel
sendDM = True
else:
sendChannel = message.channel
requestedShop = guildsDB.getGuild(message.guild.id).shop
shopEmbed = makeEmbed(titleTxt="Shop", desc=message.guild.name, footerTxt="All items" if item == "all" else (item + "s").title(), thumb="https://cdn.discordapp.com/icons/" + str(message.guild.id) + "/" + message.guild.icon + ".png?size=64")
if item in ["all", "ship"]:
for shipNum in range(1, len(requestedShop.shipsStock) + 1):
if shipNum == 1:
shopEmbed.add_field(name="", value="__**Ships**__", inline=False)
shopEmbed.add_field(value=(requestedShop.shipsStock[shipNum - 1].emoji if requestedShop.shipsStock[shipNum - 1].hasEmoji else "") + " " + commaSplitNum(str(requestedShop.shipsStock[shipNum - 1].getValue())) + " Credits\n" + requestedShop.shipsStock[shipNum - 1].statsStringShort(), name=str(shipNum) + ". " + "**" + requestedShop.shipsStock[shipNum - 1].getNameAndNick() + "**", inline=True)
if item in ["all", "weapon"]:
for weaponNum in range(1, len(requestedShop.weaponsStock) + 1):
if weaponNum == 1:
shopEmbed.add_field(name="", value="__**Weapons**__", inline=False)
shopEmbed.add_field(value=(requestedShop.weaponsStock[weaponNum - 1].emoji if requestedShop.weaponsStock[weaponNum - 1].hasEmoji else "") + " " + commaSplitNum(str(requestedShop.weaponsStock[weaponNum - 1].value)) + " Credits\n" + requestedShop.weaponsStock[weaponNum - 1].statsStringShort(), name=str(weaponNum) + ". " + "**" + requestedShop.weaponsStock[weaponNum - 1].name + "**", inline=True)
if item in ["all", "module"]:
for moduleNum in range(1, len(requestedShop.modulesStock) + 1):
if moduleNum == 1:
shopEmbed.add_field(name="", value="__**Modules**__", inline=False)
shopEmbed.add_field(value=(requestedShop.modulesStock[moduleNum - 1].emoji if requestedShop.modulesStock[moduleNum - 1].hasEmoji else "") + " " + commaSplitNum(str(requestedShop.modulesStock[moduleNum - 1].value)) + " Credits\n" + requestedShop.modulesStock[moduleNum - 1].statsStringShort(), name=str(moduleNum) + ". " + "**" + requestedShop.modulesStock[moduleNum - 1].name + "**", inline=True)
if item in ["all", "turret"]:
for turretNum in range(1, len(requestedShop.turretsStock) + 1):
if turretNum == 1:
shopEmbed.add_field(name="", value="__**Turrets**__", inline=False)
shopEmbed.add_field(value=(requestedShop.turretsStock[turretNum - 1].emoji if requestedShop.turretsStock[turretNum - 1].hasEmoji else "") + " " + commaSplitNum(str(requestedShop.turretsStock[turretNum - 1].value)) + " Credits\n" + requestedShop.turretsStock[turretNum - 1].statsStringShort(), name=str(turretNum) + ". " + "**" + requestedShop.turretsStock[turretNum - 1].name + "**", inline=True)
try:
await sendChannel.send(embed=shopEmbed)
except discord.Forbidden:
await message.channel.send(":x: I can't DM you, " + message.author.name + "! Please enable DMs from users who are not friends.")
return
if sendDM:
await message.add_reaction(bbConfig.dmSentEmoji)
bbCommands.register("shop", cmd_shop)
bbCommands.register("store", cmd_shop)
dmCommands.register("shop", err_nodm)
dmCommands.register("store", err_nodm)
"""
list the requested user's currently equipped items.
@param message -- the discord message calling the command
@param args -- either empty string, or a user mention
"""
async def cmd_loadout(message, args):
requestedUser = message.author
useDummyData = False
userFound = False
if len(args.split(" ")) > 1:
await message.channel.send(":x: | |
Device(name='asr1k')
dev_c3850 = Device(name='c3850')
empty_output = {'execute.return_value': ''}
semi_empty_output = {'execute.return_value': '''\
Directory of flash:/
'''}
golden_parsed_output_c3850 = {
'dir': {
'dir': 'flash:/',
'flash:/': {
'files': {
'bootloader_evt_handle.log': {
'index': '30530',
'permissions': '-rw-',
'size': '16872',
'last_modified_date': 'Apr 10 2017 17:20:51 +00:00',
},
'core': {
'index': '30531',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Apr 10 2017 00:17:34 +00:00',
},
'.prst_sync': {
'index': '30532',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Apr 10 2017 14:35:35 +00:00',
},
'.rollback_timer': {
'index': '30534',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Jan 15 2017 20:53:32 +00:00',
},
'dc_profile_dir': {
'index': '30535',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Apr 10 2017 17:21:10 +00:00',
},
'gs_script': {
'index': '30537',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Jan 15 2017 20:53:40 +00:00',
},
'memleak.tcl': {
'index': '30540',
'permissions': '-rw-',
'size': '65301',
'last_modified_date': 'Apr 10 2017 17:21:27 +00:00',
},
'boothelper.log': {
'index': '30542',
'permissions': '-rw-',
'size': '66',
'last_modified_date': 'Apr 10 2017 17:21:28 +00:00',
},
'.installer': {
'index': '30541',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Jan 15 2017 20:54:21 +00:00',
},
'nvram_config': {
'index': '30539',
'permissions': '-rw-',
'size': '2097152',
'last_modified_date': 'Apr 10 2017 17:25:37 +00:00',
},
'tools': {
'index': '68689',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Mar 18 2017 20:39:57 +00:00',
},
'mscfips_post_test.dbg': {
'index': '30544',
'permissions': '-rw-',
'size': '17500',
'last_modified_date': 'Apr 10 2017 17:23:01 +00:00',
},
'vlan.dat': {
'index': '30548',
'permissions': '-rw-',
'size': '3436',
'last_modified_date': 'Apr 10 2017 11:52:23 +00:00',
},
'mscfips_post_test.output': {
'index': '30545',
'permissions': '-rw-',
'size': '6856',
'last_modified_date': 'Apr 10 2017 17:23:01 +00:00',
},
'pnp-tech-time': {
'index': '30546',
'permissions': '-rw-',
'size': '35',
'last_modified_date': 'Apr 10 2017 17:25:57 +00:00',
},
'ISSUCleanGolden': {
'index': '30550',
'permissions': '-rw-',
'size': '630812001',
'last_modified_date': 'Jan 16 2017 11:05:56 +00:00',
},
'pnp-tech-discovery-summary': {
'index': '30547',
'permissions': '-rw-',
'size': '21107',
'last_modified_date': 'Apr 10 2017 17:26:38 +00:00',
},
'onep': {
'index': '30552',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Jan 17 2017 10:21:50 +00:00',
},
},
'bytes_total': '1598627840',
'bytes_free': '880939008',
},
}
}
golden_output_c3850 = {'execute.return_value': '''\
Directory of flash:/
30530 -rw- 16872 Apr 10 2017 17:20:51 +00:00 bootloader_evt_handle.log
30531 drwx 4096 Apr 10 2017 00:17:34 +00:00 core
30532 drwx 4096 Apr 10 2017 14:35:35 +00:00 .prst_sync
30534 drwx 4096 Jan 15 2017 20:53:32 +00:00 .rollback_timer
30535 drwx 4096 Apr 10 2017 17:21:10 +00:00 dc_profile_dir
30537 drwx 4096 Jan 15 2017 20:53:40 +00:00 gs_script
30540 -rw- 65301 Apr 10 2017 17:21:27 +00:00 memleak.tcl
30542 -rw- 66 Apr 10 2017 17:21:28 +00:00 boothelper.log
30541 drwx 4096 Jan 15 2017 20:54:21 +00:00 .installer
30539 -rw- 2097152 Apr 10 2017 17:25:37 +00:00 nvram_config
68689 drwx 4096 Mar 18 2017 20:39:57 +00:00 tools
30544 -rw- 17500 Apr 10 2017 17:23:01 +00:00 mscfips_post_test.dbg
30548 -rw- 3436 Apr 10 2017 11:52:23 +00:00 vlan.dat
30545 -rw- 6856 Apr 10 2017 17:23:01 +00:00 mscfips_post_test.output
30546 -rw- 35 Apr 10 2017 17:25:57 +00:00 pnp-tech-time
30550 -rw- 630812001 Jan 16 2017 11:05:56 +00:00 ISSUCleanGolden
30547 -rw- 21107 Apr 10 2017 17:26:38 +00:00 pnp-tech-discovery-summary
30552 drwx 4096 Jan 17 2017 10:21:50 +00:00 onep
1598627840 bytes total (880939008 bytes free)
'''}
golden_parsed_output_asr1k = {
'dir': {
'dir': 'bootflash:/',
'bootflash:/': {
'bytes_free': '1036210176',
'bytes_total': '1940303872',
'files': {
'lost+found': {
'index': '11',
'permissions': 'drwx',
'size': '16384',
'last_modified_date': 'Nov 25 2016 19:32:53 -07:00',
},
'ds_stats.txt': {
'index': '12',
'permissions': '-rw-',
'size': '0',
'last_modified_date': 'Dec 13 2016 11:36:36 -07:00',
},
'.prst_sync': {
'index': '104417',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Apr 10 2017 09:09:11 -07:00',
},
'.rollback_timer': {
'index': '80321',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Nov 25 2016 19:40:38 -07:00',
},
'.installer': {
'index': '64257',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Nov 25 2016 19:41:02 -07:00',
},
'virtual-instance-stby-sync': {
'index': '48193',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Nov 25 2016 19:41:14 -07:00',
},
'onep': {
'index': '8033',
'permissions': 'drwx',
'size': '4096',
'last_modified_date': 'Nov 25 2016 18:42:07 -07:00',
},
'pnp-tech-time': {
'index': '13',
'permissions': '-rw-',
'size': '35',
'last_modified_date': 'Apr 10 2017 09:11:45 -07:00',
},
'pnp-tech-discovery-summary': {
'index': '14',
'permissions': '-rw-',
'size': '19957',
'last_modified_date': 'Apr 10 2017 09:13:55 -07:00',
},
'aaa1': {
'index': '15',
'permissions': '-rw-',
'size': '24970',
'last_modified_date': 'Dec 13 2016 12:07:18 -07:00',
},
'crashinfo_RP_00_00_20170203-155749-PDT': {
'index': '16',
'permissions': '-rw-',
'size': '177449',
'last_modified_date': 'Feb 3 2017 15:57:50 -07:00',
},
'crashinfo_RP_00_00_20170206-122158-PDT': {
'index': '17',
'permissions': '-rw-',
'size': '168196',
'last_modified_date': 'Feb 6 2017 12:21:59 -07:00',
},
'crashinfo_RP_00_00_20170206-172530-PDT': {
'index': '18',
'permissions': '-rw-',
'size': '163081',
'last_modified_date': 'Feb 6 2017 17:25:31 -07:00',
},
'crashinfo_RP_00_00_20170207-133017-PDT': {
'index': '19',
'permissions': '-rw-',
'size': '160713',
'last_modified_date': 'Feb 7 2017 13:30:18 -07:00',
},
'crashinfo_RP_00_00_20170208-180855-PDT': {
'index': '20',
'permissions': '-rw-',
'size': '177276',
'last_modified_date': 'Feb 8 2017 18:08:56 -07:00',
},
'crashinfo_RP_00_00_20170210-120312-PDT': {
'index': '21',
'permissions': '-rw-',
'size': '160725',
'last_modified_date': 'Feb 10 2017 12:03:13 -07:00',
},
'crashinfo_RP_00_00_20170210-163201-PDT': {
'index': '22',
'permissions': '-rw-',
'size': '163143',
'last_modified_date': 'Feb 10 2017 16:32:02 -07:00',
},
'crashinfo_RP_00_00_20170213-112420-PDT': {
'index': '23',
'permissions': '-rw-',
'size': '168245',
'last_modified_date': 'Feb 13 2017 11:24:21 -07:00',
},
'testimage': {
'index': '24',
'permissions': '-rw-',
'size': '794609595',
'last_modified_date': 'Feb 17 2017 11:50:21 -07:00',
},
'crashinfo_RP_00_00_20170307-043013-PDT': {
'index': '25',
'permissions': '-rw-',
'size': '167767',
'last_modified_date': 'Mar 7 2017 04:30:14 -07:00',
},
'crashinfo_RP_00_00_20170307-165741-PDT': {
'index': '28',
'permissions': '-rw-',
'size': '163152',
'last_modified_date': 'Mar 7 2017 16:57:42 -07:00',
},
'kak1': {
'index': '37',
'permissions': '-rw-',
'size': '25189',
'last_modified_date': 'Dec 14 2016 09:15:37 -07:00',
},
}
}
}
}
golden_output_asr1k = {'execute.return_value': '''\
Directory of bootflash:/
11 drwx 16384 Nov 25 2016 19:32:53 -07:00 lost+found
12 -rw- 0 Dec 13 2016 11:36:36 -07:00 ds_stats.txt
104417 drwx 4096 Apr 10 2017 09:09:11 -07:00 .prst_sync
80321 drwx 4096 Nov 25 2016 19:40:38 -07:00 .rollback_timer
64257 drwx 4096 Nov 25 2016 19:41:02 -07:00 .installer
48193 drwx 4096 Nov 25 2016 19:41:14 -07:00 virtual-instance-stby-sync
8033 drwx 4096 Nov 25 2016 18:42:07 -07:00 onep
13 -rw- 35 Apr 10 2017 09:11:45 -07:00 pnp-tech-time
14 -rw- 19957 Apr 10 2017 09:13:55 -07:00 pnp-tech-discovery-summary
15 -rw- 24970 Dec 13 2016 12:07:18 -07:00 aaa1
16 -rw- 177449 Feb 3 2017 15:57:50 -07:00 crashinfo_RP_00_00_20170203-155749-PDT
17 -rw- 168196 Feb 6 2017 12:21:59 -07:00 crashinfo_RP_00_00_20170206-122158-PDT
18 -rw- 163081 Feb 6 2017 17:25:31 -07:00 crashinfo_RP_00_00_20170206-172530-PDT
19 -rw- 160713 Feb 7 2017 13:30:18 -07:00 crashinfo_RP_00_00_20170207-133017-PDT
20 -rw- 177276 Feb 8 2017 18:08:56 -07:00 crashinfo_RP_00_00_20170208-180855-PDT
21 -rw- 160725 Feb 10 2017 12:03:13 -07:00 crashinfo_RP_00_00_20170210-120312-PDT
22 -rw- 163143 Feb 10 2017 16:32:02 -07:00 crashinfo_RP_00_00_20170210-163201-PDT
23 -rw- 168245 Feb 13 2017 11:24:21 -07:00 crashinfo_RP_00_00_20170213-112420-PDT
24 -rw- 794609595 Feb 17 2017 11:50:21 -07:00 testimage
25 -rw- 167767 Mar 7 2017 04:30:14 -07:00 crashinfo_RP_00_00_20170307-043013-PDT
28 -rw- 163152 Mar 7 2017 16:57:42 -07:00 crashinfo_RP_00_00_20170307-165741-PDT
37 -rw- 25189 Dec 14 2016 09:15:37 -07:00 kak1
1940303872 bytes total (1036210176 bytes free)
'''}
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
dir_obj = Dir(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = dir_obj.parse()
def test_semi_empty(self):
self.dev2 = Mock(**self.semi_empty_output)
dir_obj = Dir(device=self.dev2)
# with self.assertRaises(SchemaMissingKeyError):
with self.assertRaises(Exception):
parsed_output = dir_obj.parse()
def test_golden_c3850(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_c3850)
dir_obj = Dir(device=self.dev_c3850)
parsed_output = dir_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_c3850)
def test_golden_asr1k(self):
self.maxDiff = None
self.dev_asr1k = Mock(**self.golden_output_asr1k)
dir_obj = Dir(device=self.dev_asr1k)
parsed_output = dir_obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_asr1k)
def test_golden_asr1k_with_arg(self):
self.maxDiff = None
self.dev_asr1k = Mock(**self.golden_output_asr1k)
dir_obj = Dir(device=self.dev_asr1k)
parsed_output = dir_obj.parse(directory='bootflash:/')
self.assertEqual(parsed_output, self.golden_parsed_output_asr1k)
class TestShowRedundancy(unittest.TestCase):
dev1 = Device(name='empty')
dev2 = Device(name='semi_empty')
dev_asr1k = Device(name='asr1k')
dev_c3850 = Device(name='c3850')
dev_asr1002 = Device(name='asr1002')
empty_output = {'execute.return_value': ''}
semi_empty_output = {'execute.return_value': '''\
Redundant System Information :
------------------------------
Available system uptime = 1 hour, 32 minutes
Switchovers system experienced = 0
'''}
golden_parsed_output_c3850 = {
'red_sys_info': {
'available_system_uptime': '1 hour, 32 minutes',
'switchovers_system_experienced': '0',
'standby_failures': '0',
'last_switchover_reason': 'none',
'hw_mode': 'Simplex',
'conf_red_mode': 'sso',
'oper_red_mode': 'Non-redundant',
'maint_mode': 'Disabled',
'communications': 'Down',
'communications_reason': 'Failure',
},
'slot': {
'slot 1': {
'curr_sw_state': 'ACTIVE',
'uptime_in_curr_state': '1 hour, 31 minutes',
'image_ver': 'Cisco IOS Software [Everest], Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Experimental Version 16.4.20170410:165034 [v164_throttle-BLD-BLD_V164_THROTTLE_LATEST_20170410_174845 105]',
'boot': 'tftp://10.1.6.241//auto/tftp-ssr/Edison/cat3k_caa-universalk9.BLD_V164_THROTTLE_LATEST_20170410_174845.SSA.bin',
'config_register': '0x102',
}
}
}
golden_output_c3850 = {'execute.return_value': '''\
Redundant System Information :
------------------------------
Available system uptime = 1 hour, 32 minutes
Switchovers system experienced = 0
Standby failures = 0
Last switchover reason = none
Hardware Mode = Simplex
Configured Redundancy Mode = sso
Operating Redundancy Mode = Non-redundant
Maintenance Mode = Disabled
Communications = Down Reason: Failure
Current Processor Information :
-------------------------------
Active Location = slot 1
Current Software state = ACTIVE
Uptime in current state = 1 hour, 31 minutes
Image Version = Cisco IOS Software [Everest], Catalyst L3 Switch Software (CAT3K_CAA-UNIVERSALK9-M), Experimental Version 16.4.20170410:165034 [v164_throttle-BLD-BLD_V164_THROTTLE_LATEST_20170410_174845 | |
# Copyright (c) 2020 Intel Corporation. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
# IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import argparse
import sys
import bitstring
import os
import binascii
from decoder.fpt_and_cdt_utilities import get_all_cdt, find_fpt_in_file_fqpn, find_fpt_in_opened_file
from decoder.fpt_and_cdt_utilities import get_huffman_compressed_code_objects_in_code_partition
from decoder.fpt_and_cdt_utilities import CodeObjectEntry
huffman_table = {}
longest_huffman_code_in_bits = 0
shortest_huffman_code_in_bits = sys.maxsize
HUFFMAN_PAGE_DECODED_SIZE_MAX = 4096
def clear_huffman_table_data():
"""
clear the global Huffman table variable and reset the min/max code (size in bits) counter vars
:return: None
"""
global huffman_table, shortest_huffman_code_in_bits, longest_huffman_code_in_bits
huffman_table = {}
longest_huffman_code_in_bits = 0
shortest_huffman_code_in_bits = sys.maxsize
def get_longest_huffman_code_in_nbits():
"""
convenience accessor for the longest code (in number of bits) in the global Huffman table, currently
:return: longest_huffman_code_in_bits value
"""
global longest_huffman_code_in_bits
return longest_huffman_code_in_bits
def get_shortest_huffman_code_in_nbits():
"""
convenience accessor for the shortest code (in number of bits) in the global Huffman table, currently
:return: shortest_huffman_code_in_bits value
"""
global shortest_huffman_code_in_bits
return shortest_huffman_code_in_bits
def get_huffman_table():
"""
convenience accessor for the global Huffman table
:return: the global huffman_table var (a dict., by reference)
"""
global huffman_table
return huffman_table
class HuffmanTableEntry:
"""
A container class which stores each "row" of the parsed Huffman table, in the specific variant that the Intel CSME
utilizes the tables; that is, for each Huffman code, there are TWO possible "decoded"/"dictionary" values
(Note that this use of the word "dictionary" has nothing to do with Python's meaning of "dictionary")
Here are the member vars of the class, with explanation of purpose for each:
huffman_code: the Huffman code itself; it is stored as-is in an ASCII table representation (as distributed
with this source code), as a string. e.g. '00011001'
huffman_code_bits: ...is a converted version of the huffman_code field, to a bitstring BitArray.
(it is effectively 'cached' in this form, for faster use during the actual decoding process)
huffman_code_rank: The length, in bits, of the Huffman code; e.g. '00011001' ---> rank = 8
It is stored here for convenience in debugging although it isn't strictly necessary, as huffman_code_bits
has the information explicitly encoded within it (i.e. the BitArray object has a length property)
dict1_value, dict2_value: The 2 possible decodings of the Huffman code. They are converted to a bytearray
(for easy addition into an output bytestream during decoding). Care is taken make sure Python's bytearray
conversion doesn't flip bytes trying to interpret endianess
dictionary_data_length: the 'length' of the dictX_value values, in bytes. It is stored here for convenience in
debugging although it isn't strictly necessary, as dictX_value vars have the information explictly encoded
(see the __init__ constructor)
"""
def __init__(self, dict1: int, dict2: int, dict_data_length_in_bytes: int, rank: int, huffman_code):
"""
turn both dict(ionary) values from ints into arrays of bytes. the dict_data_length_in_bytes parameter
tells me how many bytes to have...i.e. how many (if any), 0x00 padding bytes are needed "on the left" side
e.g. if the dict1 value is 2, then this would plainly be 0x02 as a byte.
However if dict_data_length_in_bytes == 3, then the result desired is [0x00, 0x00, 0x02] ,
which is 3 bytes in size
===> bytearray() does this, in conjunction with integer.to_bytes(). Use "big"(endian) as the
parameter to have it not swap bytes around from how they are stores in the table; i.e. convert to bytes
"literally" and not interpreting it as an "int"
:param dict1: dict1 value from the Huffman table, as a raw int
:param dict2: dict2 value from the Huffman table, as a raw int
:param dict_data_length_in_bytes: how many bytes the dictX values are supposed to expand to (see comments)
:param rank: the size, in bits of the Huffman code
:param huffman_code: the Huffman code, as an ASCII string of bits , e.g. '00011001'
"""
self.dict1_value = bytearray(dict1.to_bytes(dict_data_length_in_bytes, "big"))
self.dict2_value = bytearray(dict2.to_bytes(dict_data_length_in_bytes, "big"))
self.dictionary_data_length = dict_data_length_in_bytes
self.huffman_code_rank = rank
self.huffman_code = huffman_code
self.huffman_code_bits = bitstring.ConstBitArray("0b" + str(huffman_code))
def __str__(self):
return (str(binascii.hexlify(self.dict1_value)) + " || " + str(binascii.hexlify(self.dict2_value)) + " <== "
+ str(self.huffman_code_bits))
def process_huffman_table_file_line(line: str):
"""
:param line: a string representing a single line (text terminated by any of the recognized line-terminators
like \r,\n, etc), from an ASCII version of the Huffman code table
:return: (none)
Modifies:
1. global integers 'longest_huffman_code_in_bits' and 'shortest_huffman_code_in_bits'
2. global dictionary 'huffman_table'
The current ASCII versions of the tables have "columns" (whitespace separated values) in the following order:
Uncompressed Sequence (dict 1) [0]
Ref1 [1]
Uncompressed Sequence (dict 2) [2]
Ref2 [3]
Length [4]
Depth [5]
Huffman Code [6]
"""
global longest_huffman_code_in_bits, shortest_huffman_code_in_bits, huffman_table
tokens = line.split()
try:
dict1 = int(tokens[0], 16)
dict2 = int(tokens[2], 16)
dlen = int(tokens[4], 10)
rank = int(tokens[5], 10)
huffman_code = tokens[6]
if rank > longest_huffman_code_in_bits:
longest_huffman_code_in_bits = rank
if rank < shortest_huffman_code_in_bits:
shortest_huffman_code_in_bits = rank
except:
# non-fatal usually; most likely it hit some initial descriptive lines at the top of the file, or comments, etc
# return out to the table file reader to move to the next line, ignoring this one
return
table_entry = HuffmanTableEntry(dict1, dict2, dlen, rank, huffman_code)
huffman_table[table_entry.huffman_code_bits] = table_entry
def read_ascii_huffman_table_from_file(file_fqpn: str):
"""
Simple wrapper on process_huffman_table_file_line() that takes a file path as a string parameter
:param file_fqpn: the fully-qualified path and filename of the Huffman table (in the ASCII format described in
comments of process_huffman_table_file_line() )
:return: None
Side effect / Modifies: modifies the global Huffman table dictionary var
"""
f = open(file_fqpn, "r")
for line in f:
process_huffman_table_file_line(line)
f.close()
class LUTentry:
"""
A container for the LookUp Table entries, parsed from either the standalone LUT files (that accompany "standalone"
binaries) or from the LUT "sections" in single CSME deployment binaries (e.g. flashable versions in official
releases)
Each object of this class represents an entry in the lookup table, which itself represents a "Page" (or plainly,
a section) of a compressed CSME code object...and thus represents a page of the resulting, decompressed
code object.
see the commens in the __init__ constructor about the meaning and relevance of the member vars
"""
def __init__(self, offset, dictionary_selector):
"""
:param offset: the offset into the compressed file where this particular compressed page starts.
THIS IS RELATIVE TO EITHER...
(1) the start of the standalone file, if it was a standalone LUT file
(2) the end of the LUT section in a "big, single" CSME binary
:param dictionary_selector: either 0 or 1, where 0 selects "dict1" value in the HuffmanTableEntry,
and 1 selects "dict2". (see class HuffmanTableEntry for explanation of the two values)
'size' isn't really important, and is calculated (and retroactively assigned to this object) when the
LUT is parsed (e.g. in read_lut() ). It is mostly for sanity checking and meaningful output when
decoding (and debugging)
"""
self.offset = offset
self.dictionary_selector = dictionary_selector
self.size = 0
def __str__(self):
return ('Offset= 0x{0:08x}'.format(self.offset) + " , size= {0}".format('?' if self.size == 0 else self.size)
+ " , dictionary selector= " + str(self.dictionary_selector))
def read_lut_file(file_fqpn: str, reverse_byte_ordering=False):
"""
:param file_fqpn: the fully-qualified-path-and-name (fqpn) of the file containing the (page) lookup table
:param reverse_byte_ordering: treat LUT entries in reverse byte order than normal (Needed due to quirks in certain
formats of the | |
'found %d parents' % len(parents_ids))
return '', nx.node_link_data(tg)
def GetSynonymTermId(con, cur, synonym):
"""
Get the term id for which the synonym is
input:
con,cur
synonym : str
the synonym to search for
output:
err : str
Error message or empty string if ok
termid : int
the id of the term for the synonym is defined
"""
synonym = synonym.lower()
try:
cur.execute('SELECT idOntology FROM OntologySynonymTable WHERE synonym=%s', [synonym])
if cur.rowcount == 0:
debug(2, 'synonym %s not found' % synonym)
return 'synonym %s not found' % synonym, -1
termid = cur.fetchone()[0]
debug(2, 'for synonym %s termid is %d' % (synonym, termid))
return '', termid
except psycopg2.DatabaseError as e:
debug(7, "error %s enountered in GetSynonymTermId" % e)
return "error %s enountered in GetSynonymTermId" % e, -2
def GetSynonymTerm(con, cur, synonym):
"""
Get the term for which the synonym is
input:
con,cur
synonym : str
the synonym to search for
output:
err : str
Error message or empty string if ok
term : str
the term for the synonym is defined
"""
err, termid = GetSynonymTermId(con, cur, synonym)
if err:
debug(2, 'ontology term %s is not a synonym' % synonym)
return err, str(termid)
err, term = dbidval.GetDescriptionFromId(con, cur, 'ontologyTable', termid)
if err:
debug(3, 'ontology term not found for termid %d (synonym %s)' % (termid, synonym))
return err, term
return '', term
def GetTermAnnotations(con, cur, terms, use_synonyms=True, get_children=True):
'''
Get details for all annotations which contain the ontology term "term" as a parent of (or exact) annotation detail
Parameters
----------
con, cur
terms : str or list of str
the ontology term to search. if list, retrieve only annotations containing all the terms in the list
use_synonyms : bool (optional)
True (default) to look in synonyms table if term is not found. False to look only for exact term
get_children: bool, optional
True to get annotations of all term children (i.e. get also annotations with feces when you search for excreta)
Returns
-------
err: str
empty str ('') if ok, otherwise error returned
annotations : list of dict
list of annotation details per annotation which contains the term
'''
debug(1, 'GetTermAnnotations for ontology terms %s, use_synonyms=%s, get_children=%s' % (terms, use_synonyms, get_children))
dbannotations._prepare_queries(con, cur)
terms = tolist(terms)
annotation_ids = None
if len(terms) == 0:
return 'No terms in query', []
for cterm in terms:
cterm = cterm.lower()
# do we need to look for the term also as a parent of annotation terms?
if get_children:
cur.execute('SELECT idannotation FROM AnnotationParentsTable WHERE ontology=%s', [cterm])
# if term not found in parents table, check if it is an id (i.e. envo:00000043 for wetland)
if cur.rowcount == 0:
err, ctermids = get_term_ids(con, cur, cterm)
if err:
debug(2, err)
return err
if len(ctermids) > 0:
# found it so it is an id. get also all the children
terms_from_id = set()
for cctermid in ctermids:
cur.execute('SELECT description FROM OntologyTable WHERE id=%s LIMIT 1', [cctermid])
if cur.rowcount == 0:
debug(6, 'description not found for termid %d' % cctermid)
continue
ccterm = cur.fetchone()[0]
terms_from_id.add(ccterm)
cur.execute('SELECT idannotation FROM AnnotationParentsTable WHERE ontology IN %s', [tuple(list(terms_from_id))])
else:
if use_synonyms:
err, cterm = GetSynonymTerm(con, cur, cterm)
if err:
debug(3, 'no annotations or synonyms for term %s' % cterm)
return '', []
debug(1, 'found original ontology term %s' % cterm)
cur.execute('SELECT idannotation FROM AnnotationParentsTable WHERE ontology=%s', [cterm])
else:
debug(3, 'no annotations for term %s' % cterm)
return '', []
else:
err, ctermids = get_term_ids(con, cur, cterm)
if err:
debug(2, err)
return err
if len(ctermids) == 0:
if use_synonyms:
err, ctermid = GetSynonymTermId(con, cur, cterm)
ctermids = [ctermid]
if err:
msg = 'ontology term not found for %s' % cterm
debug(3, msg)
return msg, []
debug(2, 'converted synonym %s to termid %s' % (cterm, ctermids))
cur.execute('SELECT idannotation FROM AnnotationListTable WHERE idontology IN %s', [tuple(ctermids)])
res = cur.fetchall()
cannotation_ids = set()
for cres in res:
cannotation_ids.add(cres[0])
if annotation_ids is None:
annotation_ids = cannotation_ids
annotation_ids = annotation_ids.intersection(cannotation_ids)
annotations = []
for cannotation_id in annotation_ids:
err, cdetails = dbannotations.GetAnnotationsFromID(con, cur, cannotation_id)
if err:
debug(6, err)
continue
annotations.append(cdetails)
debug(3, 'found %d annotations' % len(annotations))
return '', annotations
def get_term_onto_id_annotations(con, cur, terms, get_children=True):
'''Get annotations for onotology ids (i.e. GAZ:00002476'''
pass
def get_term_counts(con, cur, terms, term_types=('single'), ignore_lower=False):
'''Get the number of annotations and experiments containing each term in terms.
NOTE: terms can be also term pairs (term1+term2)
Parameters
----------
terms: list of str
list of terms to look for. can be term pairs
TODO: ignore_lower: bool, optional. TODO
True to look for total counts combining "all"/"high" and "lower" counts
Returns
-------
dict of {term(str): {'total_annotations': int, 'total_experiments': int}}
'''
debug(1, 'get_term_counts for %d terms' % len(terms))
terms = list(set(terms))
term_info = {}
for cterm in terms:
cur.execute('SELECT TotalExperiments, TotalAnnotations from TermInfoTable WHERE term=%s LIMIT 1', [cterm])
# cur.execute('SELECT seqCount, annotationCount, exp_count from OntologyTable WHERE description=%s LIMIT 1', [cterm])
if cur.rowcount == 0:
debug(2, 'Term %s not found in ontology table' % cterm)
continue
res = cur.fetchone()
term_info[cterm] = {}
# term_info[cterm]['total_sequences'] = 0
term_info[cterm]['total_experiments'] = res[0]
term_info[cterm]['total_annotations'] = res[1]
debug(1, 'found info for %d terms' % len(term_info))
return term_info
def get_annotations_term_counts(con, cur, annotations):
'''
Get information about all ontology terms in annotations
Parameters
----------
con, cur
annotations : list of annotations
The list of annotations to get the terms for (see dbannotations.GetAnnotationsFromID() )
Returns
-------
term_info : dict of {str: dict}:
Key is the ontology term.
Value is a dict of pairs:
'total_annotations' : int
The total number of annotations where this ontology term is a predecessor
'total_squences' : int
The total number of sequences in annotations where this ontology term is a predecessor
'''
debug(1, 'get_annotations_term_counts for %d annotations' % len(annotations))
terms = []
for cannotation in annotations:
for cdet in cannotation['details']:
ctype = cdet[0]
cterm = cdet[1]
if ctype == 'low':
cterm = '-' + cterm
terms.append(cterm)
terms = list(set(terms))
# return GetTermCounts(con, cur, terms)
return get_term_counts(con, cur, terms)
def get_ontology_terms_list(con, cur, min_term_id=None, ontologyid=None):
'''
Get list of all ontology terms
Parameters
----------
con, cur
min_term_id: int or None, optional
if int, get only terms with dbbactid > min_term_id (for fast syncing)
if None, get all terms
ontologies: list of str, optional
if not None, get only terms from ontologies in ontologies list
if None, get terms from all ontologies
TODO: NOT SUPPORTED YET!
Returns
-------
terms : dict of {term(str): id(int)}
The list of ontology terms from table OntologyTable
'''
# get rid of duplicate terms
debug(4, 'GetListOfOntologies')
if min_term_id is None:
min_term_id = 0
if ontologyid is None:
cur.execute('SELECT id, description, term_id from ontologyTable WHERE id>%s', [min_term_id])
else:
cur.execute('SELECT ontologytreestructuretable.ontologyid, ontologytable.description, ontologytable.term_id FROM ontologytreestructuretable INNER JOIN ontologytable ON ontologytable.id=ontologytreestructuretable.ontologyid WHERE OntologyNameID=%s', [ontologyid])
debug(3, 'found %d terms' % cur.rowcount)
res = cur.fetchall()
all_ontologies = {}
all_ontology_ids = {}
for cres in res:
if cres[0] > min_term_id:
all_ontologies[cres[1]] = cres[0]
contologyid = cres[2]
if contologyid == '':
contologyid = 'dbbact:%d' % cres[0]
all_ontology_ids[cres[0]] = contologyid
return all_ontologies, all_ontology_ids
def GetListOfSynonym(con, cur):
'''
Get list of synonym
Parameters
----------
con, cur
Returns
-------
terms : list of str
The full list of synonym
'''
# get rid of duplicate terms
debug(1, 'GetListOfSynonym')
all_synonym = []
cur.execute('SELECT distinct synonym from ontologysynonymtable')
if cur.rowcount == 0:
debug(1, 'ontologysynonymtable list is empty')
return
res = cur.fetchall()
all_synonym = []
for cres in res:
all_synonym.append(cres[0])
return all_synonym
def GetIDs(con, cur, ontList):
"""
Get ids of list of ontologies
Parameters
----------
con,cur : database connection and cursor
ontList: list of str
the terms to get the ids for
Returns
-------
errmsg : str
"" if ok, error msg if error encountered
termids : list of int or None
list of the new ids or None if error enountered
"""
ontids = []
try:
sqlStr = "SELECT id from ontologyTable WHERE (description='%s')" % ontList[0]
idx = 0
while idx < len(ontList):
sqlStr += " OR (description='%s')" % ontList[idx]
idx = idx + 1
cur.execute(sqlStr)
if cur.rowcount == | |
"""The script used to train the model."""
import os
import sys
import getopt
import numpy as np
import models as m
from tqdm import tqdm
from keras.optimizers import Adam
from util.data import TwoImageIterator
from util.util import MyDict, log, save_weights, load_weights, load_losses, create_expt_dir
def print_help():
"""Print how to use this script."""
print "Usage:"
print "train.py [--help] [--nfd] [--nfatob] [--alpha] [--epochs] [batch_size] [--samples_per_batch] " \
"[--save_every] [--lr] [--beta_1] [--continue_train] [--log_dir]" \
"[--expt_name] [--base_dir] [--train_dir] [--val_dir] [--train_samples] " \
"[--val_samples] [--load_to_memory] [--a_ch] [--b_ch] [--is_a_binary] " \
"[--is_b_binary] [--is_a_grayscale] [--is_b_grayscale] [--target_size] " \
"[--rotation_range] [--height_shift_range] [--width_shift_range] " \
"[--horizontal_flip] [--vertical_flip] [--zoom_range]"
print "--nfd: Number of filters of the first layer of the discriminator."
print "--nfatob: Number of filters of the first layer of the AtoB model."
print "--alpha: The weight of the reconstruction loss of the AtoB model."
print "--epochs: Number of epochs to train the model."
print "--batch_size: the size of the batch to train."
print "--samples_per_batch: The number of samples to train each model on each iteration."
print "--save_every: Save results every 'save_every' epochs on the log folder."
print "--lr: The learning rate to train the models."
print "--beta_1: The beta_1 value of the Adam optimizer."
print "--continue_train: If it should continue the training from the last checkpoint."
print "--log_dir: The directory to place the logs."
print "--expt_name: The name of the experiment. Saves the logs into a folder with this name."
print "--base_dir: Directory that contains the data."
print "--train_dir: Directory inside base_dir that contains training data. " \
"Must contain an A and B folder."
print "--val_dir: Directory inside base_dir that contains validation data. " \
"Must contain an A and B folder."
print "--train_samples: The number of training samples. Set -1 to be the same as training examples."
print "--val_samples: The number of validation samples. Set -1 to be the same as validation examples."
print "--load_to_memory: Whether to load images into memory or read from the filesystem."
print "--a_ch: Number of channels of images A."
print "--b_ch: Number of channels of images B."
print "--is_a_binary: If A is binary, its values will be 0 or 1. A threshold of 0.5 is used."
print "--is_b_binary: If B is binary, the last layer of the atob model is " \
"followed by a sigmoid. Otherwise, a tanh is used. When the sigmoid is " \
"used, the binary crossentropy loss is used. For the tanh, the L1 is used. Also, " \
"its values will be 0 or 1. A threshold of 0.5 is used."
print "--is_a_grayscale: If A images should only have one channel. If they are color images, " \
"they are converted to grayscale."
print "--is_b_grayscale: If B images should only have one channel. If they are color images, " \
"they are converted to grayscale."
print "--target_size: The size of the images loaded by the iterator. THIS DOES NOT CHANGE THE MODELS. " \
"If you want to accept images of different sizes you will need to update the models.py files."
print "--rotation_range: The range to rotate training images for dataset augmentation."
print "--height_shift_range: Percentage of height of the image to translate for dataset augmentation."
print "--width_shift_range: Percentage of width of the image to translate for dataset augmentation."
print "--horizontal_flip: If true performs random horizontal flips on the train set."
print "--vertical_flip: If true performs random vertical flips on the train set."
print "--zoom_range: Defines the range to scale the image for dataset augmentation."
def discriminator_generator(it, atob, dout_size):
"""
Generate batches for the discriminator.
Parameters:
- it: an iterator that returns a pair of images;
- atob: the generator network that maps an image to another representation;
- dout_size: the size of the output of the discriminator.
"""
while True:
# Fake pair
a_fake, _ = next(it)
b_fake = atob.predict(a_fake)
# Real pair
a_real, b_real = next(it)
# Concatenate the channels. Images become (ch_a + ch_b) x 256 x 256
fake = np.concatenate((a_fake, b_fake), axis=1)
real = np.concatenate((a_real, b_real), axis=1)
# Concatenate fake and real pairs into a single batch
batch_x = np.concatenate((fake, real), axis=0)
# 1 is fake, 0 is real
batch_y = np.ones((batch_x.shape[0], 1) + dout_size)
batch_y[fake.shape[0]:] = 0
yield batch_x, batch_y
def train_discriminator(d, it, samples_per_batch=20):
"""Train the discriminator network."""
return d.fit_generator(it, samples_per_epoch=samples_per_batch*2, nb_epoch=1, verbose=False)
def pix2pix_generator(it, dout_size):
"""
Generate data for the generator network.
Parameters:
- it: an iterator that returns a pair of images;
- dout_size: the size of the output of the discriminator.
"""
for a, b in it:
# 1 is fake, 0 is real
y = np.zeros((a.shape[0], 1) + dout_size)
yield [a, b], y
def train_pix2pix(pix2pix, it, samples_per_batch=20):
"""Train the generator network."""
return pix2pix.fit_generator(it, nb_epoch=1, samples_per_epoch=samples_per_batch, verbose=False)
def evaluate(models, generators, losses, val_samples=192):
"""Evaluate and display the losses of the models."""
# Get necessary generators
d_gen = generators.d_gen_val
p2p_gen = generators.p2p_gen_val
# Get necessary models
d = models.d
p2p = models.p2p
# Evaluate
d_loss = d.evaluate_generator(d_gen, val_samples)
p2p_loss = p2p.evaluate_generator(p2p_gen, val_samples)
losses['d_val'].append(d_loss)
losses['p2p_val'].append(p2p_loss)
print ''
print ('Train Losses of (D={0} / P2P={1});\n'
'Validation Losses of (D={2} / P2P={3})'.format(
losses['d'][-1], losses['p2p'][-1], d_loss, p2p_loss))
return d_loss, p2p_loss
def model_creation(d, atob, params):
"""Create all the necessary models."""
opt = Adam(lr=params.lr, beta_1=params.beta_1)
p2p = m.pix2pix(atob, d, params.a_ch, params.b_ch, alpha=params.alpha, opt=opt,
is_a_binary=params.is_a_binary, is_b_binary=params.is_b_binary)
models = MyDict({
'atob': atob,
'd': d,
'p2p': p2p,
})
return models
def generators_creation(it_train, it_val, models, dout_size):
"""Create all the necessary data generators."""
# Discriminator data generators
d_gen = discriminator_generator(it_train, models.atob, dout_size)
d_gen_val = discriminator_generator(it_val, models.atob, dout_size)
# Workaround to make tensorflow work. When atob.predict is called the first
# time it calls tf.get_default_graph. This should be done on the main thread
# and not inside fit_generator. See https://github.com/fchollet/keras/issues/2397
next(d_gen)
# pix2pix data generators
p2p_gen = pix2pix_generator(it_train, dout_size)
p2p_gen_val = pix2pix_generator(it_val, dout_size)
generators = MyDict({
'd_gen': d_gen,
'd_gen_val': d_gen_val,
'p2p_gen': p2p_gen,
'p2p_gen_val': p2p_gen_val,
})
return generators
def train_iteration(models, generators, losses, params):
"""Perform a train iteration."""
# Get necessary generators
d_gen = generators.d_gen
p2p_gen = generators.p2p_gen
# Get necessary models
d = models.d
p2p = models.p2p
# Update the dscriminator
dhist = train_discriminator(d, d_gen, samples_per_batch=params.samples_per_batch)
losses['d'].extend(dhist.history['loss'])
# Update the generator
p2phist = train_pix2pix(p2p, p2p_gen, samples_per_batch=params.samples_per_batch)
losses['p2p'].extend(p2phist.history['loss'])
def train(models, it_train, it_val, params):
"""
Train the model.
Parameters:
- models: a dictionary with all the models.
- atob: a model that goes from A to B.
- d: the discriminator model.
- p2p: a Pix2Pix model.
- it_train: the iterator of the training data.
- it_val: the iterator of the validation data.
- params: parameters of the training procedure.
- dout_size: the size of the output of the discriminator model.
"""
# Create the experiment folder and save the parameters
create_expt_dir(params)
# Get the output shape of the discriminator
dout_size = d.output_shape[-2:]
# Define the data generators
generators = generators_creation(it_train, it_val, models, dout_size)
# Define the number of samples to use on each training epoch
train_samples = params.train_samples
if params.train_samples == -1:
train_samples = it_train.N
batches_per_epoch = train_samples // params.samples_per_batch
# Define the number of samples to use for validation
val_samples = params.val_samples
if val_samples == -1:
val_samples = it_val.N
losses = {'p2p': [], 'd': [], 'p2p_val': [], 'd_val': []}
if params.continue_train:
losses = load_losses(log_dir=params.log_dir, expt_name=params.expt_name)
for e in tqdm(range(params.epochs)):
for b in range(batches_per_epoch):
train_iteration(models, generators, losses, params)
# Evaluate how the models is doing on the validation set.
evaluate(models, generators, losses, val_samples=val_samples)
if (e + 1) % params.save_every == 0:
save_weights(models, log_dir=params.log_dir, expt_name=params.expt_name)
log(losses, models.atob, it_val, log_dir=params.log_dir, expt_name=params.expt_name,
is_a_binary=params.is_a_binary, is_b_binary=params.is_b_binary)
if __name__ == '__main__':
a = sys.argv[1:]
params = MyDict({
# Model
'nfd': 32, # Number of filters of the first layer of the discriminator
'nfatob': 64, # Number of filters of the first layer of the AtoB model
'alpha': 100, # The weight of the reconstruction loss of the atob model
# Train
'epochs': 100, # Number of epochs to train the model
'batch_size': 1, # The batch size
'samples_per_batch': 20, # The number of samples to train each model on each iteration
'save_every': | |
}\n]\n }\n }\n}\n' %query_string
else:
data = '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "pde" } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n' %query_string
elif type == 'fde':
if anscestors:
data = '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "fde" } },\n { "terms" : { "ancestors.ilx" : ["ilx_0115066" , "ilx_0103210", "ilx_0115072", "ilx_0115070"] } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n' %query_string
else:
data = '\n{\n "query": {\n "bool": {\n "must" : [\n { "term" : { "type" : "fde" } },\n { "multi_match" : {\n "query": "%s", \n "fields": [ "label", "definition" ] \n } }\n]\n }\n }\n}\n' %query_string
else:
print("ERROR: Valid types for SciCrunch query are 'cde','pde', or 'fde'. You set type: %s " %type)
print("ERROR: in function Utils.py/QuerySciCrunchElasticSearch")
exit(1)
response = requests.post('https://scicrunch.org/api/1/elastic-ilx/interlex/term/_search#', headers=headers, params=params, data=data)
return json.loads(response.text)
def GetNIDMTermsFromSciCrunch(query_string,type='cde', ancestor=True):
'''
Helper function which issues elastic search query of SciCrunch using QuerySciCrunchElasticSearch function and returns terms list
with label, definition, and preferred URLs in dictionary
:param key: API key from sci crunch
:param query_string: arbitrary string to search for terms
:param type: should be 'cde' or 'pde' for the moment
:param ancestor: Boolean flag to tell Interlex elastic search to use ancestors (i.e. tagged terms) or not
:return: dictionary with keys 'ilx','label','definition','preferred_url'
'''
json_data = QuerySciCrunchElasticSearch(query_string,type,ancestor)
results={}
#check if query was successful
if json_data['timed_out'] != True:
#example printing term label, definition, and preferred URL
for term in json_data['hits']['hits']:
#find preferred URL
results[term['_source']['ilx']] = {}
for items in term['_source']['existing_ids']:
if items['preferred']=='1':
results[term['_source']['ilx']]['preferred_url']=items['iri']
results[term['_source']['ilx']]['label'] = term['_source']['label']
results[term['_source']['ilx']]['definition'] = term['_source']['definition']
return results
def InitializeInterlexRemote():
'''
This function initializes a connection to Interlex for use in adding personal data elements. To use InterLex
it requires you to set an environment variable INTERLEX_API_KEY with your api key
:return: interlex object
'''
endpoint = "https://scicrunch.org/api/1/"
# beta endpoint for testing
# endpoint = "https://beta.scicrunch.org/api/1/"
InterLexRemote = oq.plugin.get('InterLex')
# changed per tgbugs changes to InterLexRemote no longer taking api_key as a parameter
# set INTERLEX_API_KEY environment variable instead...ilx_cli = InterLexRemote(api_key=key, apiEndpoint=endpoint)
ilx_cli = InterLexRemote(apiEndpoint=endpoint)
try:
ilx_cli.setup(instrumented=oq.OntTerm)
except Exception as e:
print("error initializing InterLex connection...")
print("you will not be able to add new personal data elements.")
print("Did you put your scicrunch API key in an environment variable INTERLEX_API_KEY?")
return ilx_cli
def AddPDEToInterlex(ilx_obj,label,definition,units, min, max, datatype, categorymappings=None):
'''
This function will add the PDE (personal data elements) to Interlex using the Interlex ontquery API.
:param interlex_obj: Object created using ontquery.plugin.get() function (see: https://github.com/tgbugs/ontquery)
:param label: Label for term entity being created
:param definition: Definition for term entity being created
:param comment: Comments to help understand the object
:return: response from Interlex
'''
# Interlex uris for predicates, tmp_ prefix dor beta endpoing, ilx_ for production
prefix='ilx'
# for beta testing
# prefix = 'tmp'
uri_datatype = 'http://uri.interlex.org/base/' + prefix + '_0382131'
uri_units = 'http://uri.interlex.org/base/' + prefix + '_0382130'
uri_min = 'http://uri.interlex.org/base/' + prefix + '_0382133'
uri_max = 'http://uri.interlex.org/base/' + prefix + '_0382132'
uri_category = 'http://uri.interlex.org/base/' + prefix + '_0382129'
# return ilx_obj.add_pde(label=label, definition=definition, comment=comment, type='pde')
if categorymappings is not None:
tmp = ilx_obj.add_pde(label=label, definition=definition, predicates = {
uri_datatype : datatype,
uri_units : units,
uri_min : min,
uri_max : max,
uri_category : categorymappings
})
else:
tmp = ilx_obj.add_pde(label=label, definition=definition, predicates = {
uri_datatype : datatype,
uri_units : units,
uri_min : min,
uri_max : max
})
return tmp
def load_nidm_owl_files():
'''
This function loads the NIDM-experiment related OWL files and imports, creates a union graph and returns it.
:return: graph of all OWL files and imports from PyNIDM experiment
'''
#load nidm-experiment.owl file and all imports directly
#create empty graph
union_graph = Graph()
#check if there is an internet connection, if so load directly from https://github.com/incf-nidash/nidm-specs/tree/master/nidm/nidm-experiment/terms and
# https://github.com/incf-nidash/nidm-specs/tree/master/nidm/nidm-experiment/imports
basepath=os.path.dirname(os.path.dirname(__file__))
terms_path = os.path.join(basepath,"terms")
imports_path=os.path.join(basepath,"terms","imports")
imports=[
"crypto_import.ttl",
"dc_import.ttl",
"iao_import.ttl",
"nfo_import.ttl",
"nlx_import.ttl",
"obi_import.ttl",
"ontoneurolog_instruments_import.ttl",
"pato_import.ttl",
"prv_import.ttl",
"qibo_import.ttl",
"sio_import.ttl",
"stato_import.ttl"
]
#load each import
for resource in imports:
temp_graph = Graph()
try:
temp_graph.parse(os.path.join(imports_path,resource),format="turtle")
union_graph=union_graph+temp_graph
except Exception:
logging.info("Error opening %s import file..continuing" %os.path.join(imports_path,resource))
continue
owls=[
"https://raw.githubusercontent.com/incf-nidash/nidm-specs/master/nidm/nidm-experiment/terms/nidm-experiment.owl"
]
#load each owl file
for resource in owls:
temp_graph = Graph()
try:
temp_graph.parse(location=resource, format="turtle")
union_graph=union_graph+temp_graph
except Exception:
logging.info("Error opening %s owl file..continuing" %os.path.join(terms_path,resource))
continue
return union_graph
def fuzzy_match_terms_from_graph(graph,query_string):
'''
This function performs a fuzzy match of the constants in Constants.py list nidm_experiment_terms for term constants matching the query....i
ideally this should really be searching the OWL file when it's ready
:param query_string: string to query
:return: dictionary whose key is the NIDM constant and value is the match score to the query
'''
match_scores={}
#search for labels rdfs:label and obo:IAO_0000115 (description) for each rdf:type owl:Class
for term in graph.subjects(predicate=RDF.type, object=Constants.OWL["Class"]):
for label in graph.objects(subject=term, predicate=Constants.RDFS['label']):
match_scores[term] = {}
match_scores[term]['score'] = fuzz.token_sort_ratio(query_string,label)
match_scores[term]['label'] = label
match_scores[term]['url'] = term
match_scores[term]['definition']=None
for description in graph.objects(subject=term,predicate=Constants.OBO["IAO_0000115"]):
match_scores[term]['definition'] =description
#for term in owl_graph.classes():
# print(term.get_properties())
return match_scores
def authenticate_github(authed=None,credentials=None):
'''
This function will hangle GitHub authentication with or without a token. If the parameter authed is defined the
function will check whether it's an active/valide authentication object. If not, and username/token is supplied then
an authentication object will be created. If username + token is not supplied then the user will be prompted to input
the information.
:param authed: Optional authenticaion object from PyGithub
:param credentials: Optional GitHub credential list username,password or username,token
:return: GitHub authentication object or None if unsuccessful
'''
print("GitHub authentication...")
indx=1
maxtry=5
while indx < maxtry:
if (len(credentials)>= 2):
#authenticate with token
g=Github(credentials[0],credentials[1])
elif (len(credentials)==1):
pw = getpass.getpass("Please enter your GitHub password: ")
g=Github(credentials[0],pw)
else:
username = input("Please enter your GitHub user name: ")
pw = getpass.getpass("Please enter your GitHub password: ")
#try to logging into GitHub
g=Github(username,pw)
authed=g.get_user()
try:
#check we're logged in by checking that we can access the public repos list
repo=authed.public_repos
logging.info("Github authentication successful")
new_term=False
break
except GithubException as e:
logging.info("error logging into your github account, please try again...")
indx=indx+1
if (indx == maxtry):
logging.critical("GitHub authentication failed. Check your username / password / token and try again")
return None
else:
return authed
def getSubjIDColumn(column_to_terms,df):
'''
This function returns column number from CSV file that matches subjid. If it can't automatically
detect it based on the Constants.NIDM_SUBJECTID term (i.e. if the user selected a different term
to annotate subject ID then it asks the user.
:param column_to_terms: json variable->term mapping dictionary made by nidm.experiment.Utils.map_variables_to_terms
:param df: dataframe of CSV file with tabular data to convert to RDF.
:return: subject ID column number in CSV dataframe
'''
#look at column_to_terms dictionary for NIDM URL for subject id (Constants.NIDM_SUBJECTID)
id_field=None
for key, value in column_to_terms.items():
if Constants.NIDM_SUBJECTID._str == column_to_terms[key]['label']:
id_field=key
#if we couldn't find a subject ID field in column_to_terms, ask user
if id_field is None:
option=1
for column in df.columns:
print("%d: %s" %(option,column))
option=option+1
selection=input("Please select the subject ID field from the list above: ")
id_field=df.columns[int(selection)-1]
return id_field
def map_variables_to_terms(df,directory, assessment_name, output_file=None,json_file=None,owl_file='nidm'):
'''
:param df: data frame with first row containing variable names
:param assessment_name: Name for the assessment to use in storing JSON mapping dictionary keys
:param json_file: optional json document with variable names as keys and minimal fields "definition","label","url"
:param output_file: output filename to save variable-> term mappings
:param directory: if output_file parameter is set to None then use this directory to store default JSON mapping file
if doing variable->term mappings
:return:return dictionary mapping variable names (i.e. columns) to terms
'''
# minimum match score for fuzzy matching NIDM terms
min_match_score = 50
# dictionary mapping column name to preferred term
column_to_terms = {}
# flag for whether a new term has been defined, on first occurance ask for namespace URL
new_term = True
# check if user supplied a JSON file and we already know a mapping for this | |
to
(x - 2*z : y : z))
The diagonalization is only defined in characteristic different
from 2:
::
sage: Conic(GF(2), [1,1,1,1,1,0]).diagonalization()
Traceback (most recent call last):
...
ValueError: The conic self (= Projective Conic Curve over Finite Field of size 2 defined by x^2 + x*y + y^2 + x*z + y*z) has no symmetric matrix because the base field has characteristic 2
An example over a global function field:
::
sage: K = FractionField(PolynomialRing(GF(7), 't'))
sage: (t,) = K.gens()
sage: C = Conic(K, [t/2,0, 1, 2, 0, 3])
sage: C.diagonalization()
(Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + ((3*t + 3)/t)*z^2,
Scheme morphism:
From: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + ((3*t + 3)/t)*z^2
To: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + x*z + 3*z^2
Defn: Defined on coordinates by sending (x : y : z) to
(x + 6/t*z : y : z),
Scheme morphism:
From: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + x*z + 3*z^2
To: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + ((3*t + 3)/t)*z^2
Defn: Defined on coordinates by sending (x : y : z) to
(x + 1/t*z : y : z))
"""
if names is None:
names = self.defining_polynomial().parent().variable_names()
from .constructor import Conic
D, T = self.diagonal_matrix()
con = Conic(D, names = names)
return con, con.hom(T, self), self.hom(T.inverse(), con)
def gens(self):
r"""
Returns the generators of the coordinate ring of ``self``.
EXAMPLES:
::
sage: P.<x,y,z> = QQ[]
sage: c = Conic(x^2+y^2+z^2)
sage: c.gens()
(xbar, ybar, zbar)
sage: c.defining_polynomial()(c.gens())
0
The function ``gens()`` is required for the following construction:
::
sage: C.<a,b,c> = Conic(GF(3), [1, 1, 1])
sage: C
Projective Conic Curve over Finite Field of size 3 defined by a^2 + b^2 + c^2
"""
return self.coordinate_ring().gens()
def has_rational_point(self, point = False,
algorithm = 'default', read_cache = True):
r"""
Returns True if and only if the conic ``self``
has a point over its base field `B`.
If ``point`` is True, then returns a second output, which is
a rational point if one exists.
Points are cached whenever they are found. Cached information
is used if and only if ``read_cache`` is True.
ALGORITHM:
The parameter ``algorithm`` specifies the algorithm
to be used:
- ``'default'`` -- If the base field is real or complex,
use an elementary native Sage implementation.
- ``'magma'`` (requires Magma to be installed) --
delegates the task to the Magma computer algebra
system.
EXAMPLES:
sage: Conic(RR, [1, 1, 1]).has_rational_point()
False
sage: Conic(CC, [1, 1, 1]).has_rational_point()
True
sage: Conic(RR, [1, 2, -3]).has_rational_point(point = True)
(True, (1.73205080756888 : 0.000000000000000 : 1.00000000000000))
Conics over polynomial rings can be solved internally::
sage: R.<t> = QQ[]
sage: C = Conic([-2,t^2+1,t^2-1])
sage: C.has_rational_point()
True
And they can also be solved with Magma::
sage: C.has_rational_point(algorithm='magma') # optional - magma
True
sage: C.has_rational_point(algorithm='magma', point=True) # optional - magma
(True, (t : 1 : 1))
sage: D = Conic([t,1,t^2])
sage: D.has_rational_point(algorithm='magma') # optional - magma
False
TESTS:
One of the following fields comes with an embedding into the complex
numbers, one does not. Check that they are both handled correctly by
the Magma interface. ::
sage: K.<i> = QuadraticField(-1)
sage: K.coerce_embedding()
Generic morphism:
From: Number Field in i with defining polynomial x^2 + 1
To: Complex Lazy Field
Defn: i -> 1*I
sage: Conic(K, [1,1,1]).rational_point(algorithm='magma') # optional - magma
(-i : 1 : 0)
sage: x = QQ['x'].gen()
sage: L.<i> = NumberField(x^2+1, embedding=None)
sage: Conic(L, [1,1,1]).rational_point(algorithm='magma') # optional - magma
(-i : 1 : 0)
sage: L == K
False
"""
if read_cache:
if self._rational_point is not None:
if point:
return True, self._rational_point
else:
return True
B = self.base_ring()
if algorithm == 'magma':
from sage.interfaces.magma import magma
M = magma(self)
b = M.HasRationalPoint().sage()
if not point:
return b
if not b:
return False, None
M_pt = M.HasRationalPoint(nvals=2)[1]
# Various attempts will be made to convert `pt` to
# a Sage object. The end result will always be checked
# by self.point().
pt = [M_pt[1], M_pt[2], M_pt[3]]
# The first attempt is to use sequences. This is efficient and
# succeeds in cases where the Magma interface fails to convert
# number field elements, because embeddings between number fields
# may be lost on conversion to and from Magma.
# This should deal with all absolute number fields.
try:
return True, self.point([B(c.Eltseq().sage()) for c in pt])
except TypeError:
pass
# The second attempt tries to split Magma elements into
# numerators and denominators first. This is necessary
# for the field of rational functions, because (at the moment of
# writing) fraction field elements are not converted automatically
# from Magma to Sage.
try:
return True, self.point( \
[B(c.Numerator().sage()/c.Denominator().sage()) for c in pt])
except (TypeError, NameError):
pass
# Finally, let the Magma interface handle conversion.
try:
return True, self.point([B(c.sage()) for c in pt])
except (TypeError, NameError):
pass
raise NotImplementedError("No correct conversion implemented for converting the Magma point %s on %s to a correct Sage point on self (=%s)" % (M_pt, M, self))
if algorithm != 'default':
raise ValueError("Unknown algorithm: %s" % algorithm)
if is_ComplexField(B):
if point:
[_,_,_,d,e,f] = self._coefficients
if d == 0:
return True, self.point([0,1,0])
return True, self.point([0, ((e**2-4*d*f).sqrt()-e)/(2*d), 1],
check = False)
return True
if is_RealField(B):
D, T = self.diagonal_matrix()
[a, b, c] = [D[0,0], D[1,1], D[2,2]]
if a == 0:
ret = True, self.point(T*vector([1,0,0]), check = False)
elif a*c <= 0:
ret = True, self.point(T*vector([(-c/a).sqrt(),0,1]),
check = False)
elif b == 0:
ret = True, self.point(T*vector([0,1,0]), check = False)
elif b*c <= 0:
ret = True, self.point(T*vector([0,(-c/b).sqrt(),0,1]),
check = False)
else:
ret = False, None
if point:
return ret
return ret[0]
raise NotImplementedError("has_rational_point not implemented for " \
"conics over base field %s" % B)
def has_singular_point(self, point = False):
r"""
Return True if and only if the conic ``self`` has a rational
singular point.
If ``point`` is True, then also return a rational singular
point (or ``None`` if no such point exists).
EXAMPLES:
::
sage: c = Conic(QQ, [1,0,1]); c
Projective Conic Curve over Rational Field defined by x^2 + z^2
sage: c.has_singular_point(point = True)
(True, (0 : 1 : 0))
sage: P.<x,y,z> = GF(7)[]
sage: e = Conic((x+y+z)*(x-y+2*z)); e
Projective Conic Curve over Finite Field of size 7 defined by x^2 - y^2 + 3*x*z + y*z + 2*z^2
sage: e.has_singular_point(point = True)
(True, (2 : 4 : 1))
sage: Conic([1, 1, -1]).has_singular_point()
False
sage: Conic([1, 1, -1]).has_singular_point(point = True)
(False, None)
``has_singular_point`` is not implemented over all fields
of characteristic `2`. It is implemented over finite fields.
::
sage: F.<a> = FiniteField(8)
sage: Conic([a, a+1, 1]).has_singular_point(point = True)
(True, (a + 1 : 0 : 1))
sage: P.<t> = GF(2)[]
sage: C = Conic(P, [t,t,1]); C
Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 2 (using NTL) defined by t*x^2 + t*y^2 + z^2
sage: C.has_singular_point(point = False)
Traceback (most recent call last):
...
NotImplementedError: Sorry, find singular point on conics not implemented over all fields of characteristic 2.
"""
if not point:
ret = self.has_singular_point(point = True)
return ret[0]
B = self.base_ring()
if B.characteristic() == 2:
[a,b,c,d,e,f] = self.coefficients()
if b == 0 and c == 0 and e == 0:
for i in range(3):
if [a, d, f][i] == 0:
return True, self.point(vector(B, {2:0, i:1}))
if | |
effective time of this record.
:type OoStartTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OoTimestamp: The date and time this record was collected or calculated.
:type OoTimestamp: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ParentDeviceObjectID: The internal NetMRI identifier of the parent network object (the user).
:type ParentDeviceObjectID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device object objects as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device object object methods. The listed methods will be called on each device object object returned and included in the output. Available methods are: parent_device_object, child_device_object, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_object, child_device_object, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceObjectObjectID
:param sort: The data field(s) to use for sorting the output. Default is DeviceObjectObjectID. Valid values are DeviceObjectObjectID, DeviceID, DataSourceID, ParentDeviceObjectID, ChildDeviceObjectID, OoFirstSeenTime, OoStartTime, OoEndTime, OoTimestamp, OoChangedCols, OoProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceObjectObject. Valid values are DeviceObjectObjectID, DeviceID, DataSourceID, ParentDeviceObjectID, ChildDeviceObjectID, OoFirstSeenTime, OoStartTime, OoEndTime, OoTimestamp, OoChangedCols, OoProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device object objects, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: ChildDeviceObjectID, DataSourceID, DeviceID, DeviceObjectObjectID, OoChangedCols, OoEndTime, OoFirstSeenTime, OoProvisionData, OoStartTime, OoTimestamp, ParentDeviceObjectID.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_object_objects: An array of the DeviceObjectObject objects that match the specified input criteria.
:rtype device_object_objects: Array of DeviceObjectObject
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device object objects matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: ChildDeviceObjectID, DataSourceID, DeviceID, DeviceObjectObjectID, OoChangedCols, OoEndTime, OoFirstSeenTime, OoProvisionData, OoStartTime, OoTimestamp, ParentDeviceObjectID.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ChildDeviceObjectID: The operator to apply to the field ChildDeviceObjectID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ChildDeviceObjectID: The internal NetMRI identifier of the child network object (the used service). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ChildDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ChildDeviceObjectID: If op_ChildDeviceObjectID is specified, the field named in this input will be compared to the value in ChildDeviceObjectID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ChildDeviceObjectID must be specified if op_ChildDeviceObjectID is specified.
:type val_f_ChildDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ChildDeviceObjectID: If op_ChildDeviceObjectID is specified, this value will be compared to the value in ChildDeviceObjectID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ChildDeviceObjectID must be specified if op_ChildDeviceObjectID is specified.
:type val_c_ChildDeviceObjectID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather | |
#!/usr/bin/env python
#
# test_ldp_isis_topo1.py
# Part of NetDEF Topology Tests
#
# Copyright (c) 2020 by <NAME>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
test_ldp_vpls_topo1.py:
+---------+ +---------+
| | | |
| CE1 | | CE2 |
| | | |
+---------+ +---------+
ce1-eth0 (172.16.1.1/24)| |ce2-eth0 (172.16.1.2/24)
| |
| |
rt1-eth0| |rt2-eth0
+---------+ 10.0.1.0/24 +---------+
| |rt1-eth1 | |
| RT1 +----------------+ RT2 |
| 1.1.1.1 | rt2-eth1| 2.2.2.2 |
| | | |
+---------+ +---------+
rt1-eth2| |rt2-eth2
| |
| |
10.0.2.0/24| +---------+ |10.0.3.0/24
| | | |
| | RT3 | |
+--------+ 3.3.3.3 +-------+
rt3-eth2| |rt3-eth1
+---------+
|rt3-eth0
|
|
ce3-eth0 (172.16.1.3/24)|
+---------+
| |
| CE3 |
| |
+---------+
"""
import os
import re
import sys
import pytest
import json
from functools import partial
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
# Required to instantiate the topology builder class.
pytestmark = [pytest.mark.isisd, pytest.mark.ldpd]
def build_topo(tgen):
"Build function"
#
# Define FRR Routers
#
for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
tgen.add_router(router)
#
# Define connections
#
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["ce1"])
switch.add_link(tgen.gears["r1"])
switch = tgen.add_switch("s2")
switch.add_link(tgen.gears["ce2"])
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s3")
switch.add_link(tgen.gears["ce3"])
switch.add_link(tgen.gears["r3"])
switch = tgen.add_switch("s4")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s5")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r3"])
switch = tgen.add_switch("s6")
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
# Don't start isisd and ldpd in the CE nodes
if router.name[0] == "r":
router.load_config(
TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
)
router.load_config(
TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname))
)
tgen.start_router()
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
# This function tears down the whole topology.
tgen.stop_topology()
def router_compare_json_output(rname, command, reference):
"Compare router JSON output"
logger.info('Comparing router "%s" "%s" output', rname, command)
tgen = get_topogen()
filename = "{}/{}/{}".format(CWD, rname, reference)
expected = json.loads(open(filename).read())
# Run test function until we get an result.
test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
_, diff = topotest.run_and_expect(test_func, None, count=320, wait=0.5)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
def test_isis_convergence():
logger.info("Test: check ISIS adjacencies")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname,
"show yang operational-data /frr-interface:lib isisd",
"show_yang_interface_isis_adjacencies.ref",
)
def test_rib():
logger.info("Test: verify RIB")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(rname, "show ip route json", "show_ip_route.ref")
def test_ldp_adjacencies():
logger.info("Test: verify LDP adjacencies")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show mpls ldp discovery json", "show_ldp_discovery.ref"
)
def test_ldp_neighbors():
logger.info("Test: verify LDP neighbors")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref"
)
def test_ldp_bindings():
logger.info("Test: verify LDP bindings")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show mpls ldp binding json", "show_ldp_binding.ref"
)
def test_ldp_pwid_bindings():
logger.info("Test: verify LDP PW-ID bindings")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show l2vpn atom binding json", "show_l2vpn_binding.ref"
)
def test_ldp_pseudowires():
logger.info("Test: verify LDP pseudowires")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref"
)
def test_ldp_igp_sync():
logger.info("Test: verify LDP igp-sync")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync.ref"
)
def test_isis_ldp_sync():
logger.info("Test: verify ISIS igp-sync")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref")
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_interface_detail(
rname, "show_isis_interface_detail.ref"
)
assert result, "ISIS interface did not converge on {}:\n{}".format(rname, diff)
def test_r1_eth1_shutdown():
logger.info("Test: verify behaviour after r1-eth1 is shutdown")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Shut down r1-r2 link */
tgen = get_topogen()
tgen.gears["r1"].peer_link_enable("r1-eth1", False)
topotest.sleep(5, "Waiting for the network to reconverge")
# check if the pseudowire is still up (using an alternate path for nexthop resolution)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref"
)
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname,
"show mpls ldp igp-sync json",
"show_ldp_igp_sync_r1_eth1_shutdown.ref",
)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_ldp_sync(
rname, "show_isis_ldp_sync_r1_eth1_shutdown.ref"
)
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_interface_detail(
rname, "show_isis_interface_detail_r1_eth1_shutdown.ref"
)
assert result, "ISIS interface did not converge on {}:\n{}".format(rname, diff)
def test_r1_eth1_no_shutdown():
logger.info("Test: verify behaviour after r1-eth1 is no shutdown")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Run no shutdown on r1-eth1 interface */
tgen = get_topogen()
tgen.gears["r1"].peer_link_enable("r1-eth1", True)
topotest.sleep(5, "Waiting for the network to reconverge")
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync.ref"
)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref")
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_interface_detail(
rname, "show_isis_interface_detail.ref"
)
assert result, "ISIS interface did not converge on {}:\n{}".format(rname, diff)
def test_r2_eth1_shutdown():
logger.info("Test: verify behaviour after r2-eth1 is shutdown")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Shut down r1-r2 link */
tgen = get_topogen()
tgen.gears["r2"].peer_link_enable("r2-eth1", False)
topotest.sleep(5, "Waiting for the network to reconverge")
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname,
"show mpls ldp igp-sync json",
"show_ldp_igp_sync_r1_eth1_shutdown.ref",
)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_ldp_sync(
rname, "show_isis_ldp_sync_r2_eth1_shutdown.ref"
)
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_interface_detail(
rname, "show_isis_interface_detail_r2_eth1_shutdown.ref"
)
assert result, "ISIS interface did not converge on {}:\n{}".format(rname, diff)
def test_r2_eth1_no_shutdown():
logger.info("Test: verify behaviour after r2-eth1 is no shutdown")
tgen = get_topogen()
# Skip if previous fatal error condition is raised
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Run no shutdown on r2-eth1 interface */
tgen = get_topogen()
tgen.gears["r2"].peer_link_enable("r2-eth1", True)
topotest.sleep(5, "Waiting for the network to reconverge")
for rname in ["r1", "r2", "r3"]:
router_compare_json_output(
rname, "show mpls ldp igp-sync json", "show_ldp_igp_sync.ref"
)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_ldp_sync(rname, "show_isis_ldp_sync.ref")
assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
for rname in ["r1", "r2", "r3"]:
(result, diff) = validate_show_isis_interface_detail(
rname, "show_isis_interface_detail.ref"
)
assert result, "ISIS interface did not converge on {}:\n{}".format(rname, diff)
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
#
# Auxiliary functions
#
def parse_show_isis_ldp_sync(lines, rname):
"""
Parse the output of 'show isis mpls ldp sync' into a Python dict.
"""
interfaces | |
], # 0x60 grave
[ 283, 8, 4, 4, 0, -4 ], # 0x61 a
[ 287, 8, 5, 4, 0, -5 ], # 0x62 b
[ 292, 8, 4, 4, 0, -4 ], # 0x63 c
[ 296, 8, 5, 4, 0, -5 ], # 0x64 d
[ 301, 8, 4, 4, 0, -4 ], # 0x65 e
[ 305, 8, 5, 4, 0, -5 ], # 0x66 f
[ 310, 8, 5, 4, 0, -4 ], # 0x67 g
[ 315, 8, 5, 4, 0, -5 ], # 0x68 h
[ 320, 8, 5, 2, 0, -5 ], # 0x69 i
[ 325, 8, 6, 4, 0, -5 ], # 0x6A j
[ 331, 8, 5, 4, 0, -5 ], # 0x6B k
[ 336, 8, 5, 4, 0, -5 ], # 0x6C l
[ 341, 8, 4, 4, 0, -4 ], # 0x6D m
[ 345, 8, 4, 4, 0, -4 ], # 0x6E n
[ 349, 8, 4, 4, 0, -4 ], # 0x6F o
[ 353, 8, 5, 4, 0, -4 ], # 0x70 p
[ 358, 8, 5, 4, 0, -4 ], # 0x71 q
[ 363, 8, 4, 4, 0, -4 ], # 0x72 r
[ 367, 8, 4, 4, 0, -4 ], # 0x73 s
[ 371, 8, 5, 4, 0, -5 ], # 0x74 t
[ 376, 8, 4, 4, 0, -4 ], # 0x75 u
[ 380, 8, 4, 4, 0, -4 ], # 0x76 v
[ 384, 8, 4, 4, 0, -4 ], # 0x77 w
[ 388, 8, 4, 4, 0, -4 ], # 0x78 x
[ 392, 8, 5, 4, 0, -4 ], # 0x79 y
[ 397, 8, 4, 4, 0, -4 ], # 0x7A z
[ 401, 8, 5, 4, 0, -5 ], # 0x7B braceleft
[ 406, 8, 5, 2, 0, -5 ], # 0x7C bar
[ 411, 8, 5, 4, 0, -5 ], # 0x7D braceright
[ 416, 8, 2, 4, 0, -5 ], # 0x7E asciitilde
]
TomThumbGlyphsX = [
[ 418, 8, 5, 2, 0, -5 ], # 0xA1 exclamdown
[ 423, 8, 5, 4, 0, -5 ], # 0xA2 cent
[ 428, 8, 5, 4, 0, -5 ], # 0xA3 sterling
[ 433, 8, 5, 4, 0, -5 ], # 0xA4 currency
[ 438, 8, 5, 4, 0, -5 ], # 0xA5 yen
[ 443, 8, 5, 2, 0, -5 ], # 0xA6 brokenbar
[ 448, 8, 5, 4, 0, -5 ], # 0xA7 section
[ 453, 8, 1, 4, 0, -5 ], # 0xA8 dieresis
[ 454, 8, 3, 4, 0, -5 ], # 0xA9 copyright
[ 457, 8, 5, 4, 0, -5 ], # 0xAA ordfeminine
[ 462, 8, 3, 3, 0, -5 ], # 0xAB guillemotleft
[ 465, 8, 2, 4, 0, -4 ], # 0xAC logicalnot
[ 467, 8, 1, 3, 0, -3 ], # 0xAD softhyphen
[ 468, 8, 3, 4, 0, -5 ], # 0xAE registered
[ 471, 8, 1, 4, 0, -5 ], # 0xAF macron
[ 472, 8, 3, 4, 0, -5 ], # 0xB0 degree
[ 475, 8, 5, 4, 0, -5 ], # 0xB1 plusminus
[ 480, 8, 3, 4, 0, -5 ], # 0xB2 twosuperior
[ 483, 8, 3, 4, 0, -5 ], # 0xB3 threesuperior
[ 486, 8, 2, 3, 0, -5 ], # 0xB4 acute
[ 488, 8, 5, 4, 0, -5 ], # 0xB5 mu
[ 493, 8, 5, 4, 0, -5 ], # 0xB6 paragraph
[ 498, 8, 3, 4, 0, -4 ], # 0xB7 periodcentered
[ 501, 8, 3, 4, 0, -3 ], # 0xB8 cedilla
[ 504, 8, 3, 2, 0, -5 ], # 0xB9 onesuperior
[ 507, 8, 5, 4, 0, -5 ], # 0xBA ordmasculine
[ 512, 8, 3, 3, 0, -5 ], # 0xBB guillemotright
[ 515, 8, 5, 4, 0, -5 ], # 0xBC onequarter
[ 520, 8, 5, 4, 0, -5 ], # 0xBD onehalf
[ 525, 8, 5, 4, 0, -5 ], # 0xBE threequarters
[ 530, 8, 5, 4, 0, -5 ], # 0xBF questiondown
[ 535, 8, 5, 4, 0, -5 ], # 0xC0 Agrave
[ 540, 8, 5, 4, 0, -5 ], # 0xC1 Aacute
[ 545, 8, 5, 4, 0, -5 ], # 0xC2 Acircumflex
[ 550, 8, 5, 4, 0, -5 ], # 0xC3 Atilde
[ 555, 8, 5, 4, 0, -5 ], # 0xC4 Adieresis
[ 560, 8, 5, 4, 0, -5 ], # 0xC5 Aring
[ 565, 8, 5, 4, 0, -5 ], # 0xC6 AE
[ 570, 8, 6, 4, 0, -5 ], # 0xC7 Ccedilla
[ 576, 8, 5, 4, 0, -5 ], # 0xC8 Egrave
[ 581, 8, 5, 4, 0, -5 ], # 0xC9 Eacute
[ 586, 8, 5, 4, 0, -5 ], # 0xCA Ecircumflex
[ 591, 8, 5, 4, 0, -5 ], # 0xCB Edieresis
[ 596, 8, 5, 4, 0, -5 ], # 0xCC Igrave
[ 601, 8, 5, 4, 0, -5 ], # 0xCD Iacute
[ 606, 8, 5, 4, 0, -5 ], # 0xCE Icircumflex
[ 611, 8, 5, 4, 0, -5 ], # 0xCF Idieresis
[ 616, 8, 5, 4, 0, -5 ], # 0xD0 Eth
[ 621, 8, 5, 4, 0, -5 ], # 0xD1 Ntilde
[ 626, 8, 5, 4, 0, -5 ], # 0xD2 Ograve
[ 631, 8, 5, 4, 0, -5 ], # 0xD3 Oacute
[ 636, 8, 5, 4, 0, -5 ], # 0xD4 Ocircumflex
[ 641, 8, 5, 4, 0, -5 ], # 0xD5 Otilde
[ 646, 8, 5, 4, 0, -5 ], # 0xD6 Odieresis
[ 651, 8, 3, 4, 0, -4 ], # 0xD7 multiply
[ 654, 8, 5, 4, 0, -5 ], # 0xD8 Oslash
[ 659, 8, 5, 4, 0, -5 ], # 0xD9 Ugrave
[ 664, 8, 5, 4, 0, -5 ], # 0xDA Uacute
[ 669, 8, 5, 4, 0, -5 ], # 0xDB Ucircumflex
[ 674, 8, 5, 4, 0, -5 ], # 0xDC Udieresis
[ 679, 8, 5, 4, 0, -5 ], # 0xDD Yacute
[ 684, 8, 5, 4, 0, -5 ], # 0xDE Thorn
[ 689, 8, 6, 4, 0, -5 ], # 0xDF germandbls
[ 695, 8, 5, 4, 0, -5 ], # 0xE0 agrave
[ 700, 8, 5, 4, 0, -5 ], # 0xE1 aacute
[ 705, 8, 5, 4, 0, -5 ], # 0xE2 acircumflex
[ 710, 8, 5, 4, 0, -5 ], # 0xE3 atilde
[ 715, 8, 5, 4, 0, -5 ], # 0xE4 adieresis
[ 720, 8, 5, 4, 0, -5 ], # 0xE5 aring
[ 725, 8, 4, 4, 0, -4 ], # 0xE6 ae
[ 729, 8, 5, 4, 0, -4 ], # 0xE7 ccedilla
[ 734, 8, 5, 4, 0, -5 ], # 0xE8 egrave
[ 739, 8, 5, 4, 0, -5 ], # 0xE9 eacute
[ 744, 8, 5, 4, 0, -5 ], # 0xEA ecircumflex
[ 749, 8, 5, 4, 0, -5 ], # 0xEB edieresis
[ 754, 8, 5, 3, 0, -5 ], # 0xEC igrave
[ 759, 8, 5, 3, 0, -5 ], # 0xED iacute
[ 764, 8, 5, 4, 0, -5 ], # 0xEE icircumflex
[ 769, 8, 5, 4, 0, -5 ], # 0xEF idieresis
[ 774, 8, 5, 4, 0, -5 ], # 0xF0 eth
[ 779, 8, 5, 4, 0, -5 ], # 0xF1 ntilde
[ 784, 8, 5, 4, 0, -5 ], # 0xF2 ograve
[ 789, 8, 5, 4, 0, -5 ], # 0xF3 oacute
[ 794, 8, 5, 4, 0, -5 ], # 0xF4 ocircumflex
[ 799, 8, 5, 4, 0, -5 ], # 0xF5 otilde
[ 804, 8, 5, 4, 0, -5 ], | |
#! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 <NAME> and <NAME>.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## <NAME>. and <NAME>. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Taxon management.
Operational taxonomic unit concepts are essentially names for taxa in the "real
world". Operational taxonomic unit concepts are organized into taxonomic
namespaces. A taxonomic namespace is a self-contained and
functionally-complete collection of mutually-distinct operational taxonomic
unit concepts, and provide the semantic context in which operational taxonomic
units from across various data sources of different formats and provenances can
be related through correct interpretation of their taxon labels.
* Operational taxonomic units are modeled by a |Taxon| object.
* Taxonomic namespaces, in which operational taxonomic units are organized,
are modeled by a |TaxonNamespace| object.
* A |TaxonNamespace| manages a collection of |Taxon| objects, where each
object represents a distinct operational taxonomic unit concept within
the taxonomic namespace represented by that |TaxonNamespace| object.
* Each |Taxon| object can belong to one and only one |TaxonNamespace|:
|Taxon| objects are not shared across |TaxonNamespace| objects.
* Each |Taxon| object has an attribute, ``label``, whose (string) value
is the name of the operational taxon unit concept that it represents.
* Different |Taxon| objects represent different operational taxonomic
unit concepts, even if they have the same label value.
* All client objects (`TaxonNamespaceAssociated` objects) that reference
the same |TaxonNamespace| reference the same "universe" or domain of
operational taxonomic unit concepts.
* Operational taxonomic units from across different data sources are mapped
to distinct |Taxon| objects within a particular |TaxonNamespace| based on
matching the string values of labels of the |Taxon| object.
* A particular taxonomic unit concept in one data source will only be
correctly related to the same taxonomic unit concept (i.e, the same
|Taxon| object) in another data source only if they have both
been parsed with reference to the same taxonomic namespace (i.e., the
same |TaxonNamespace| has been used).
* A |TaxonNamespace| assigned an "accession index" to every |Taxon| object
added to it. This is a stable and unique number within the context of any
given |TaxonNamespace| object (though a |Taxon| object may have different
accession indexes in different |TaxonNamespace| objects if it
belongs to multiple namespaces). This number is will be used to
calculate the "split bitmask" hash of the trivial split or external edge
subtending the node to which this |Taxon| object is assigned on a tree.
The concept of a "split bitmask" hash is fundamental to DendroPy's tree
operations. The split bitmask is a hash that uniquely identifies every
split on a tree. It is calculated by OR'ing the split bitmask of all the
child splits of the given split. Terminal edges, of course, do not have
child edges, and their split bitmask is given by the accession index of
the |Taxon| object at their head or target nodes.
"""
import warnings
import collections
import copy
from dendropy.utility.textprocessing import StringIO
from dendropy.datamodel import basemodel
from dendropy.utility import bitprocessing
from dendropy.utility import textprocessing
from dendropy.utility import container
from dendropy.utility import error
from dendropy.utility import deprecate
##############################################################################
## Helper functions
def taxon_set_deprecation_warning(stacklevel=6):
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: 'taxon_set' will no longer be supported in future releases; use 'taxon_namespace' instead",
stacklevel=stacklevel)
def process_kwargs_dict_for_taxon_namespace(kwargs_dict, default=None):
if "taxon_set" in kwargs_dict:
if "taxon_namespace" in kwargs_dict:
raise TypeError("Cannot specify both 'taxon_namespace' and 'taxon_set' (legacy support) simultaneously")
else:
taxon_set_deprecation_warning()
return kwargs_dict.pop("taxon_set", default)
else:
return kwargs_dict.pop("taxon_namespace", default)
def process_attached_taxon_namespace_directives(kwargs_dict):
"""
The following idioms are supported:
`taxon_namespace=tns`
Attach ``tns`` as the bound (single, unified) taxonomic namespace
reference for all objects.
`attached_taxon_namespace=tns`
Attach ``tns`` as the bound (single, unified) taxonomic namespace
reference for all objects.
`attach_taxon_namespace=True, attached_taxon_namespace=tns`
Attach ``tns`` as the bound (single, unified) taxonomic namespace
reference for all objects.
`attach_taxon_namespace=True`
Create a *new* |TaxonNamespace| and set it as the bound
(single, unified) taxonomic namespace reference for all
objects.
"""
deprecated_kw = [
"taxon_namespace",
"attach_taxon_namespace",
"attached_taxon_namespace",
"taxon_set",
"attach_taxon_set",
"attached_taxon_set",
]
for kw in deprecated_kw:
if kw in kwargs_dict:
raise TypeError("'{}' is no longer supported as a keyword argument. Use the instance method 'attach_taxon_namespace()' of the data object instead to bind the object to a single TaxonNamespace".format(kw))
taxon_namespace = None
attach_taxon_namespace = False
if ( ("taxon_set" in kwargs_dict or "taxon_namespace" in kwargs_dict)
and ("attached_taxon_set" in kwargs_dict or "attached_taxon_namespace" in kwargs_dict)
):
raise TypeError("Cannot specify both 'taxon_namespace'/'taxon_set' and 'attached_taxon_namespace'/'attached_taxon_set' together")
if "taxon_set" in kwargs_dict:
if "taxon_namespace" in kwargs_dict:
raise TypeError("Both 'taxon_namespace' and 'taxon_set' cannot be specified simultaneously: use 'taxon_namespace' ('taxon_set' is only supported for legacy reasons)")
kwargs_dict["taxon_namespace"] = kwargs_dict["taxon_set"]
del kwargs_dict["taxon_set"]
if "attached_taxon_set" in kwargs_dict:
if "attached_taxon_namespace" in kwargs_dict:
raise TypeError("Both 'attached_taxon_namespace' and 'attached_taxon_set' cannot be specified simultaneously: use 'attached_taxon_namespace' ('attached_taxon_set' is only supported for legacy reasons)")
kwargs_dict["attached_taxon_namespace"] = kwargs_dict["attached_taxon_set"]
del kwargs_dict["attached_taxon_set"]
if "taxon_namespace" in kwargs_dict:
taxon_namespace = kwargs_dict.pop("taxon_namespace", None)
attach_taxon_namespace = True
elif "attached_taxon_namespace" in kwargs_dict:
taxon_namespace = kwargs_dict["attached_taxon_namespace"]
if not isinstance(taxon_namespace, TaxonNamespace):
raise TypeError("'attached_taxon_namespace' argument must be an instance of TaxonNamespace")
attach_taxon_namespace = True
else:
taxon_namespace = None
attach_taxon_namespace = kwargs_dict.get("attach_taxon_namespace", False)
kwargs_dict.pop("taxon_namespace", None)
kwargs_dict.pop("attach_taxon_namespace", None)
kwargs_dict.pop("attached_taxon_namespace", None)
return (attach_taxon_namespace, taxon_namespace)
##############################################################################
## TaxonNamespaceAssociated
class TaxonNamespaceAssociated(object):
"""
Provides infrastructure for the maintenance of references to taxa.
"""
# def initialize_taxon_namespace_from_kwargs_dict(self, kwargs_dict):
# tns = process_kwargs_dict_for_taxon_namespace(kwargs_dict)
# if tns is None:
# self.taxon_namespace = TaxonNamespace()
# else:
# self.taxon_namespace = tns
# return self.taxon_namespace
def __init__(self, taxon_namespace=None):
if taxon_namespace is None:
self._taxon_namespace = TaxonNamespace()
else:
self._taxon_namespace = taxon_namespace
self.automigrate_taxon_namespace_on_assignment = False
def _get_taxon_namespace(self):
return self._taxon_namespace
def _set_taxon_namespace(self, tns):
if self.automigrate_taxon_namespace_on_assignment:
if tns is not None and self._taxon_namespace is not tns:
self.migrate_taxon_namespace(tns)
elif tns is None:
self._taxon_namespace = None
else:
self._taxon_namespace = tns
def _del_taxon_namespace(self):
raise TypeError("Cannot delete 'taxon_namespace' attribute")
taxon_namespace = property(_get_taxon_namespace, _set_taxon_namespace, _del_taxon_namespace)
def _get_taxon_set(self):
# raise NotImplementedError("'taxon_set' is no longer supported: use 'taxon_namespace' instead")
taxon_set_deprecation_warning()
return self.taxon_namespace
def _set_taxon_set(self, v):
# raise NotImplementedError("'taxon_set' is no longer supported: use 'taxon_namespace' instead")
taxon_set_deprecation_warning()
self.taxon_namespace = v
def _del_taxon_set(self):
# raise NotImplementedError("'taxon_set' is no longer supported: use 'taxon_namespace' instead")
taxon_set_deprecation_warning()
taxon_set = property(_get_taxon_set, _set_taxon_set, _del_taxon_set)
def migrate_taxon_namespace(self,
taxon_namespace,
unify_taxa_by_label=True,
taxon_mapping_memo=None):
"""
Move this object and all members to a new operational taxonomic unit
concept namespace scope.
Current :attr:`self.taxon_namespace` value will be replaced with value
given in ``taxon_namespace`` if this is not |None|, or a new
|TaxonNamespace| object. Following this,
``reconstruct_taxon_namespace()`` will be called: each distinct
|Taxon| object associated with ``self`` or members of ``self`` that
is not alread in ``taxon_namespace`` will be replaced with a new
|Taxon| object that will be created with the same label and
added to :attr:`self.taxon_namespace`. Calling this method results in
the object (and all its member objects) being associated with a new,
independent taxon namespace.
Label mapping case sensitivity follows the
``self.taxon_namespace.is_case_sensitive`` setting. If
|False| and ``unify_taxa_by_label`` is also |True|, then the
establishment of correspondence between |Taxon| objects in the
old and new namespaces with be based on case-insensitive matching of
labels. E.g., if there are four |Taxon| objects with labels
'Foo', 'Foo', 'FOO', and 'FoO' in the old namespace, then all objects
that reference these will reference a single new |Taxon| object
in the new namespace (with a label some existing casing variant of
'foo'). If |True|: if ``unify_taxa_by_label`` is |True|,
|Taxon| objects with labels identical except in case will be
considered distinct.
Parameters
----------
taxon_namespace : |TaxonNamespace|
The |TaxonNamespace| into the scope of which this object
will be moved.
unify_taxa_by_label : boolean, optional
If |True|, then references to distinct |Taxon| objects with
identical labels in the current namespace will be replaced with a
reference to a single |Taxon| object in the new namespace.
If |False|: references to distinct |Taxon| objects will
remain distinct, even if the labels are the same.
taxon_mapping_memo : dictionary
Similar to ``memo`` of deepcopy, this is a dictionary that maps
|Taxon| objects in the old namespace to corresponding
|Taxon| objects in the new namespace. Mostly for interal
use when migrating complex data to a | |
To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_images_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param str type: Filter on specific Image type
:param int page:
:param int per_page:
:return: ImageListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'type', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_images" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_images`")
collection_formats = {}
resource_path = '/products/{product_id}/images'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'type' in params:
query_params['type'] = params['type']
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ImageListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_screenshots(self, product_id, **kwargs):
"""
Get product screenshots
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_screenshots(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:return: list[Image]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_screenshots_with_http_info(product_id, **kwargs)
else:
(data) = self.get_product_screenshots_with_http_info(product_id, **kwargs)
return data
def get_product_screenshots_with_http_info(self, product_id, **kwargs):
"""
Get product screenshots
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_screenshots_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:return: list[Image]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_screenshots" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_screenshots`")
collection_formats = {}
resource_path = '/products/{product_id}/screenshots'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Image]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_subscription(self, product_id, **kwargs):
"""
Get Subscription linked to a Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_subscription(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:return: SubscriptionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_subscription_with_http_info(product_id, **kwargs)
else:
(data) = self.get_product_subscription_with_http_info(product_id, **kwargs)
return data
def get_product_subscription_with_http_info(self, product_id, **kwargs):
"""
Get Subscription linked to a Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_subscription_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:return: SubscriptionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_subscription" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_subscription`")
collection_formats = {}
resource_path = '/products/{product_id}/subscription'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubscriptionResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_products(self, **kwargs):
"""
Get products list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_products(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str ip: Filter by user IP
:param str features: ``` features[*][value]=string&features[*][operator]=strict&features[1][value]=string&features[1][operator]=strict _______________ { \"*\": { \"value\": \"string\", \"operator\": \"strict\" }, \"1\": { \"value\": \"string\", \"operator\": \"contains\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). To search on all features, you can pass * as featureId.
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:param str customer_id: Check if customer can see this product, if it's group is not restricted
:return: ProductListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_products_with_http_info(**kwargs)
else:
(data) = self.get_products_with_http_info(**kwargs)
return data
def get_products_with_http_info(self, **kwargs):
"""
Get products list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_products_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str ip: Filter by user IP
:param str features: ``` features[*][value]=string&features[*][operator]=strict&features[1][value]=string&features[1][operator]=strict _______________ { \"*\": { \"value\": \"string\", \"operator\": \"strict\" }, \"1\": { \"value\": \"string\", \"operator\": \"contains\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). To search on all features, you can pass * as featureId.
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:param str customer_id: Check if customer can see this product, if it's group is not restricted
:return: ProductListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page', 'sort_by', 'sort_direction', 'ip', 'features', 'filters', 'customer_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_products" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/products'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] | |
timestart))
# duration = datetime.time(0, 0, int(time.time() - timestart)).strftime("%H:%M:%S")
self.logger.info("Run completed. Duration: %s" % str(duration))
# Clear logging handlers.
self.logger.removeHandler(fh)
if ch is not None:
self.logger.removeHandler(ch)
@abstractmethod
def _run(self, progress_bar: bool = True, **kwargs: Any) -> None:
raise NotImplementedError()
@property
@abstractmethod
def completed(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def dataframe(self) -> DataFrame:
raise NotImplementedError()
@attribute(domain=set())
def scenario(self) -> str:
"""Type of scenario."""
if self._scenario is None:
raise ValueError("Cannot call this on an abstract class instance.")
return self._scenario
@attribute
def id(self) -> str:
"""A unique identifier of the scenario."""
return self._id
@property
def progress(self) -> Progress:
return self._progress
@property
def logger(self) -> Logger:
return logging.getLogger(self._id)
@property
def logstream(self) -> Optional[TextIOBase]:
return self._logstream
@property
def log(self) -> str:
result = ""
self._logstream.seek(0)
result = "\n".join(self._logstream.readlines())
self._logstream.seek(0, SEEK_END)
return result
@property
def attributes(self) -> Dict[str, Any]:
if self._attributes is None:
props = attribute.get_properties(self.__class__)
self._attributes = dict((name, get_property_value(self, name)) for name in props.keys())
return self._attributes
@property
def keyword_replacements(self) -> Dict[str, str]:
return {}
@classmethod
def get_instances(cls, **kwargs: Any) -> Iterable["Scenario"]:
if cls == Scenario:
for id, scenario in Scenario.scenarios.items():
if kwargs.get("scenario", None) is None or id in kwargs["scenario"]:
for instance in scenario.get_instances(**kwargs):
yield instance
else:
domains = []
names = Scenario.attribute_domains.keys()
for name in names:
if name in kwargs and kwargs[name] is not None:
domain = kwargs[name]
if not isinstance(domain, Iterable):
domain = [domain]
domains.append(list(domain))
else:
domains.append(list(Scenario.attribute_domains[name]))
for values in product(*domains):
attributes = dict((name, value) for (name, value) in zip(names, values) if value is not None)
if cls.is_valid_config(**attributes):
yield cls(**attributes)
@classmethod
def is_valid_config(cls, **attributes: Any) -> bool:
return True
def save(self, path: str) -> None:
if os.path.splitext(path)[1] != "":
raise ValueError("The provided path '%s' is not a valid directory path." % path)
os.makedirs(path, exist_ok=True)
# Write a log file.
logpath = os.path.join(path, "scenario.log")
with open(logpath, "w") as f:
self._logstream.seek(0)
copyfileobj(self._logstream, f)
self._logstream.seek(0, SEEK_END)
# Save attributes as a single yaml file.
props = attribute.get_properties(type(self))
attributes = dict((name, prop.fget(self) if prop.fget is not None else None) for (name, prop) in props.items())
save_dict(attributes, path, "attributes")
# Save results as separate files.
props = result.get_properties(type(self))
results = dict((name, prop.fget(self) if prop.fget is not None else None) for (name, prop) in props.items())
save_dict(results, path, "results")
def __str__(self) -> str:
props = attribute.get_properties(type(self))
attributes = dict((name, prop.fget(self) if prop.fget is not None else None) for (name, prop) in props.items())
for k, v in attributes.items():
if isinstance(v, Enum):
attributes[k] = v.value
return "(%s)" % ", ".join(["%s=%s" % (str(k), str(v)) for (k, v) in attributes.items()])
@classmethod
def from_dict(cls, source: Dict[str, Any]) -> "Scenario":
scenario_id = source["scenario"]
scenario_cls = cls.scenarios[scenario_id]
return scenario_cls(**source)
@classmethod
def load(cls, path: str) -> "Scenario":
if not os.path.isdir(path):
raise ValueError("The provided path '%s' does not point to a directory." % path)
# Load the log file.
logpath = os.path.join(path, "scenario.log")
logstream: Optional[TextIOBase] = None
if os.path.isfile(logpath):
with open(logpath, "r") as f:
logstream = StringIO(f.read())
attributes = load_dict(path, "attributes")
results = load_dict(path, "results")
kwargs = {"logstream": logstream}
return cls.from_dict({**attributes, **results, **kwargs})
def is_match(self, other: "Scenario") -> bool:
other_attributes = other.attributes
return all(other_attributes.get(k, None) == v for (k, v) in self.attributes.items() if k != "id")
V = TypeVar("V")
def get_value(obj: Any, key: str) -> Any:
if hasattr(obj, key):
return getattr(obj, key)
else:
return None
class Table(Sequence[V]):
def __init__(self, data: Sequence[V], attributes: List[str] = [], key: Optional[str] = None):
self._data = data
self._attributes = attributes
self._key = key
@overload
def __getitem__(self, index: int) -> V:
return self._data.__getitem__(index)
@overload
def __getitem__(self, index: slice) -> Sequence[V]:
return self._data.__getitem__(index)
def __getitem__(self, index: Union[int, slice]) -> Union[V, Sequence[V]]:
return self._data.__getitem__(index)
def __len__(self) -> int:
return self._data.__len__()
@property
def df(self):
df = pd.DataFrame.from_dict({a: [get_value(x, a) for x in self._data] for a in self._attributes})
if self._key is not None:
df.set_index(self._key, inplace=True)
return df
def __repr__(self) -> str:
return self.df.__repr__()
def _repr_html_(self) -> Optional[str]:
return self.df._repr_html_()
DEFAULT_RESULTS_PATH = os.path.join("var", "results")
DEFAULT_REPORTS_PATH = os.path.join("var", "reports")
ALL_STUDY_PATHS = glob(os.path.join(DEFAULT_RESULTS_PATH, "*"))
DEFAULT_STUDY_PATH = max(ALL_STUDY_PATHS, key=lambda x: os.path.getmtime(x)) if len(ALL_STUDY_PATHS) > 0 else None
DEFAULT_SCENARIO_PATH_FORMAT = "{id}"
class Study:
def __init__(
self,
scenarios: Sequence[Scenario],
id: Optional[str] = None,
outpath: str = DEFAULT_RESULTS_PATH,
scenario_path_format: str = DEFAULT_SCENARIO_PATH_FORMAT,
logstream: Optional[TextIOBase] = None,
) -> None:
self._scenarios = scenarios
self._id = id if id is not None else datetime.datetime.now().strftime("Study-%Y-%m-%d-%H-%M-%S")
self._outpath = outpath
self._scenario_path_format = scenario_path_format
self._logstream = logstream if logstream is not None else StringIO()
self._logger = logging.getLogger(self._id)
self._verify_scenario_path(scenario_path_format, scenarios)
@staticmethod
def _get_scenario_runner(
queue: Optional[Queue] = None,
catch_exceptions: bool = True,
progress_bar: bool = True,
console_log: bool = True,
rerun: bool = False,
) -> Callable[[Scenario], Scenario]:
def _scenario_runner(scenario: Scenario) -> Scenario:
try:
scenario.progress.queue = queue
scenario.logger.setLevel(logging.DEBUG)
qh: Optional[logging.Handler] = None
ch: Optional[logging.Handler] = None
if queue is not None:
qh = logging.handlers.QueueHandler(queue) # type: ignore
scenario.logger.addHandler(qh)
elif console_log:
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s")
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
scenario.logger.addHandler(ch)
if rerun or not scenario.completed:
if queue is not None:
scenario.run(progress_bar=progress_bar, console_log=False)
else:
with logging_redirect_tqdm(loggers=[scenario.logger]):
scenario.run(progress_bar=progress_bar, console_log=False)
else:
scenario.logger.info("Scenario instance already completed. Skipping...")
except Exception as e:
if catch_exceptions:
trace_output = traceback.format_exc()
scenario.logger.error(trace_output)
else:
raise e
finally:
scenario.progress.queue = None
if qh is not None:
scenario.logger.removeHandler(qh)
if ch is not None:
scenario.logger.removeHandler(ch)
return scenario
return _scenario_runner
@staticmethod
def _status_monitor(queue: Queue, logger: Logger) -> None:
while True:
record: Optional[Union[logging.LogRecord, Progress.Event]] = queue.get()
if record is None:
break
if isinstance(record, Progress.Event):
Progress.handle(record)
else:
# logger = logging.getLogger(record.name)
logger.handle(record)
Progress.refresh()
def run(
self,
catch_exceptions: bool = True,
progress_bar: bool = True,
console_log: bool = True,
parallel: bool = True,
ray_address: Optional[str] = None,
ray_numprocs: Optional[int] = None,
eagersave: bool = True,
**kwargs: Any
) -> None:
# Set up logging.
formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s")
fh = logging.StreamHandler(self._logstream)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
ch: Optional[logging.Handler] = None
if console_log:
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
# Set up progress bar.
# pbar = None if not progress_bar else tqdm(total=len(self.scenarios), desc="Scenarios", position=0)
queue = Queue() if parallel else None
pbar = None if not progress_bar else Progress(queue, id=self.id)
if pbar is not None:
pbar.start(total=len(self.scenarios), desc="Scenarios")
with logging_redirect_tqdm(loggers=[self.logger]):
# for scenario in self.scenarios:
# try:
# scenario.run(logger=self._logger, progress_bar=progress_bar, **kwargs)
# except Exception as e:
# if catch_exceptions:
# trace_output = traceback.format_exc()
# print(trace_output)
# else:
# raise e
# finally:
# if pbar is not None:
# pbar.update(1)
scenarios = []
runner = Study._get_scenario_runner(queue, catch_exceptions, progress_bar, console_log)
if parallel:
monitor = threading.Thread(target=Study._status_monitor, args=(queue, self.logger))
monitor.start()
pool = Pool(processes=ray_numprocs, ray_address=ray_address)
for scenario in pool.imap_unordered(runner, self.scenarios):
scenarios.append(scenario)
if pbar is not None:
pbar.update(1)
if eagersave:
self.save_scenario(scenario)
else:
for scenario in map(runner, self.scenarios):
scenarios.append(scenario)
if pbar is not None:
pbar.update(1)
if eagersave:
self.save_scenario(scenario)
self._scenarios = scenarios
if pbar is not None:
pbar.close()
if parallel:
assert queue is not None and monitor is not None
queue.put(None)
monitor.join()
# Clear logging handlers.
self.logger.removeHandler(fh)
if ch is not None:
self.logger.removeHandler(ch)
@staticmethod
def _verify_scenario_path(scenario_path_format: str, scenarios: Iterable[Scenario]) -> None:
attributes = re.findall(r"\{(.*?)\}", scenario_path_format)
scenario_paths: Set[str] = set()
for scenario in scenarios:
replacements = dict((name, get_property_value(scenario, name)) for name in attributes)
exppath = scenario_path_format.format_map(replacements)
if exppath in scenario_paths:
raise ValueError(
"Provided scenario_path does not produce unique paths. The path '%s' caused a conflict." % exppath
)
scenario_paths.add(exppath)
def save_scenario(self, scenario: Scenario) -> str:
if self.path is None:
raise ValueError("This scenario has no output path.")
attributes = re.findall(r"\{(.*?)\}", self._scenario_path_format)
replacements = dict((name, get_property_value(scenario, name)) for name in attributes)
exppath = self._scenario_path_format.format_map(replacements)
full_exppath = os.path.join(self.path, "scenarios", exppath)
scenario.save(full_exppath)
return full_exppath
def save(self, save_scenarios: bool = True) -> None:
# Make directory that will contain the study.
os.makedirs(self.path, exist_ok=True)
# Write a marker file.
markerpath = os.path.join(self.path, "study.yml")
with open(markerpath, "w") as f:
yaml.safe_dump({"id": self.id, "scenario_path_format": self.scenario_path_format}, f)
# Write a log file.
logpath = os.path.join(self.path, "study.log")
with open(logpath, "w") as f:
self._logstream.seek(0)
copyfileobj(self._logstream, f)
self._logstream.seek(0, SEEK_END)
# Verify that the provided scenario path is unique enough.
# self._verify_scenario_path(self._scenario_path_format, self.scenarios)
# Iterate over all scenarios and compute their target paths.
# attributes = re.findall(r"\{(.*?)\}", scenario_path)
# scenario_paths: Set[str] = set()
if save_scenarios:
for scenario in self.scenarios:
self.save_scenario(scenario)
# replacements = dict((name, get_property_value(scenario, | |
<reponame>binqsoft/CxPy
# coding=utf-8
# Python Dependencies
import base64
import re
import time
import logging
import os
from common import Common
from CxType.cli_scan_args import CliScanArgs
dir_path = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(filename=dir_path + '/checkmarx_soap_api.log',
format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
report_types = {
"PDF": ".pdf",
"RTF": ".rtf",
"CSV": ".csv",
"XML": ".xml",
}
class CxPy(Common):
# Internal Variables for the Class
errorLog = []
ttlReport = 6
timeWaitReport = 3
##########################################
#
# Functions Related to the functionality of the WSDL
#
##########################################
def branch_project_by_id(self, project_name, new_branch_project_name):
"""
This API client can create a branch for an existing project.
To create a new project, first run a scan with a new project name,
and then branch the existing project as described here.
:param project_name: The name of the project that will be branched.
:type project_name: String
:param new_branch_project_name: The new project name of the branched project.
:type new_branch_project_name: String
:return: project_id
:rtype: Integer
"""
try:
origin_project_id = self.get_project_id_by_name(project_name)
if not origin_project_id:
logger.error("branch_project_by_id, Project does not exist.")
raise Exception("branch_project_by_id, Project does not exist.")
tmp = self.client.service.BranchProjectById(self.session_id,
origin_project_id,
new_branch_project_name)
if not tmp.IsSuccesfull:
logger.error("Error establishing connection: "
"{} ".format(tmp.ErrorMessage))
raise Exception("Error establishing connection: "
"{} ".format(tmp.ErrorMessage))
project_id = tmp.ProjectID
logger.info('branch_project_by_id, project {} '
'has project id {}'.format(project_name, project_id))
return project_id
except Exception as e:
logger.error("Unable to BranchProjectById: "
"{} ".format(e.message))
raise Exception("Unable to BranchProjectById: "
"{} ".format(e.message))
def cancel_scan(self, scan_run_id):
"""
The API client can cancel a scan in progress.
The scan can be canceled while waiting in the queue or during a scan.
:param scan_run_id:
:type scan_run_id: string
:rtype: dictionary
"""
try:
logger.warning("cancel_scan, scan run id {}".format(scan_run_id))
response = self.client.service.CancelScan(self.session_id,
scan_run_id)
if not response.IsSuccesfull:
logger.error(" Fail to CancelScan: {} ".format(response.ErrorMessage))
raise Exception(" Fail to CancelScan: "
"{} ".format(response.ErrorMessage))
return {"success": True, "runId": scan_run_id}
except Exception as e:
logger.error("Unable to CancelScan: {} ".format(e.message))
raise Exception("Unable to CancelScan: "
"{} ".format(e.message))
def create_scan_report(self, scan_id, report_type="PDF"):
"""
The API client can generate a result report for a scan, by Scan ID.
:param scan_id:
:type scan_id: Integer
:param report_type: report_type should be member of the list: ["PDF", "RTF", "CSV", "XML"]
:type report_type: String
:return: report_id
:rtype: Integer
"""
report_request = self.client.factory.create('CxWSReportRequest')
ws_report_type = self.client.factory.create('CxWSReportType')
report_request.ScanID = scan_id
if report_type not in ["PDF", "RTF", "CSV", "XML"]:
logger.error(' Report type not supported, report_type should be '
'member of the list: ["PDF", "RTF", "CSV", "XML"] ')
raise Exception(' Report type not supported, report_type should be'
' member of the list: ["PDF", "RTF", "CSV", "XML"] ')
report_request.Type = ws_report_type.__getitem__(report_type)
try:
tmp = self.client.service.CreateScanReport(self.session_id, report_request)
if not tmp.IsSuccesfull:
raise Exception(' Fail to CreateScanReport %s'.format(tmp.ErrorMessage))
report_id = tmp.ID
logger.info("begin to create report, "
"scan_id {} has report_id {}".format(scan_id, report_id))
return report_id
except Exception as e:
raise Exception("Unable to CreateScanReport: {} ".format(e.message))
# def delete_projects(self, project_names):
# """
#
# delete projects by project names
#
# :param project_names:
# :type project_names: list
# :return: dictionary
# """
# project_ids_number = []
# project_names_exist = []
# project_names_not_exist = []
#
# for projectName in project_names:
# project_id = self.get_project_id_by_name(projectName)
# if project_id:
# project_names_exist.append(projectName)
# project_ids_number.append(project_id)
# else:
# project_names_not_exist.append(projectName)
#
# logger.warning(" deleting_projects >>> project names {} : "
# "project ids {} ".format(', '.join(project_names), project_ids_number))
# project_ids = self.client.factory.create('ArrayOfLong')
# project_ids.long.extend(project_ids_number)
#
# try:
# tmp = self.client.service.DeleteProjects(self.session_id, project_ids)
#
# if not tmp.IsSuccesfull:
# logger.error(' Fail to delete projects: '
# '{} '.format(tmp.ErrorMessage))
# raise Exception(' Fail to delete projects: '
# '{} '.format(tmp.ErrorMessage))
#
# return {"success": True,
# "deleted_projects": project_names_exist,
# "projects_not_exit": project_names_not_exist}
#
# except Exception as e:
# logger.error("Unable to DeleteProjects: "
# "{} ".format(e.message))
# raise Exception("Unable to DeleteProjects: "
# "{} ".format(e.message))
def delete_scans(self, scan_ids_number):
"""
The API client can delete requested scans.
Scans that are currently running won't be deleted.
If there's even a single scan that the user can't delete (due to security reasons)
the operation will fail and an error message is returned.
:param scan_ids_number:
:type scan_ids_number: list
:return:
:rtype: dictionary
"""
scan_ids_number = scan_ids_number or []
scan_ids = self.client.factory.create('ArrayOfLong')
scan_ids.long.extend(scan_ids_number)
try:
logger.warning('delete_scans, scan_ids {}'.format(scan_ids_number))
tmp = self.client.service.DeleteScans(self.session_id, scan_ids)
if not tmp.IsSuccesfull:
logger.error(' Fail to DeleteScans {} '.format(tmp.ErrorMessage))
raise Exception(' Fail to DeleteScans {} '.format(tmp.ErrorMessage))
return {"success": True,
'scanIds': scan_ids_number}
except Exception as e:
logger.error("Unable to DeleteScans: {} ".format(e.message))
raise Exception("Unable to DeleteScans: {} ".format(e.message))
# def delete_user(self, user_name):
# """
#
# delete user from Checkmarx server.
#
# :param user_name:
# :type user_name: string
# :return:
# :rtype: dictionary
# """
# user_id = self.get_user_id_by_name(user_name)
#
# try:
# logger.warning("deleting user {}".format(user_name))
# tmp = self.client.service.DeleteUser(self.session_id, user_id)
#
# if not tmp.IsSuccesfull:
# logger.error(' Fail to DeleteUser {} '.format(tmp.ErrorMessage))
# raise Exception(' Fail to DeleteUser {} '.format(tmp.ErrorMessage))
#
# return {"success": True,
# "deleted_user": user_name}
#
# except Exception as e:
# logger.error("Unable to DeleteUser: {} ".format(e.message))
# raise Exception("Unable to DeleteUser: {} ".format(e.message))
def execute_data_retention(self, data_retention_type, num_of_scans_to_preserve,
start_date, end_date, duration_limit_in_hours):
"""
:param data_retention_type: [NumOfScansToPreserve, DatesRange]
:type data_retention_type: string
:param num_of_scans_to_preserve:
:type num_of_scans_to_preserve: integer
:param start_date:
:type start_date: long
:param end_date:
:type end_date: long
:param duration_limit_in_hours:
:type duration_limit_in_hours: long
:return:
"""
try:
drt = self.client.factory.create("CxDataRetentionType")
data_retention_configuration = self.client.factory.create("CxDataRetentionConfiguration")
data_retention_configuration.DataRetentionType = drt.__getitem__(data_retention_type)
data_retention_configuration.NumOfScansToPreserve = num_of_scans_to_preserve
data_retention_configuration.StartDate = start_date
data_retention_configuration.EndDate = end_date
data_retention_configuration.DurationLimitInHours = duration_limit_in_hours
tmp = self.client.service.ExecuteDataRetention(self.session_id, data_retention_configuration)
if not tmp.IsSuccesfull:
logger.error(' Fail to execute_data_retention {} '.format(tmp.ErrorMessage))
raise Exception(' Fail to execute_data_retention {} '.format(tmp.ErrorMessage))
return "ExecuteDataRetention called successfully"
except Exception as e:
logger.error("Unable to execute_data_retention: {} ".format(e.message))
raise Exception("Unable to execute_data_retention: {} ".format(e.message))
def get_all_users(self):
"""
get all users from the Checkmarx server.
:return: user list
:rtype: list
"""
try:
tmp = self.client.service.GetAllUsers(self.session_id)
if not tmp.IsSuccesfull:
logger.error('Fail to GetAllUsers: {}'.format(tmp.ErrorMessage))
raise Exception('Fail to GetAllUsers: '
'{}'.format(tmp.ErrorMessage))
user_data_list = tmp.UserDataList.UserData
return user_data_list
except Exception as e:
logger.error("Unable to GetAllUsers: {} ".format(e.message))
raise Exception("Unable to GetAllUsers: {} ".format(e.message))
def get_associated_groups_list(self):
"""
The API client can get information on all groups related to the current user.
:return: CxWSResponseGroupList.GroupList contains an array of group data.
"""
try:
tmp = self.client.service.GetAssociatedGroupsList(self.session_id)
if not tmp.IsSuccesfull:
logger.error("get_associated_groups, Unable to get data from the server.")
raise Exception("get_associated_groups, Unable to get data from the server.")
return self.convert_to_json(tmp)
except Exception as e:
logger.error("get_associated_groups, Unable to GetAssociatedGroupsList: {} ".format(e.message))
raise Exception("get_associated_groups, Unable to GetAssociatedGroupsList: {} ".format(e.message))
def get_configuration_set_list(self):
"""
The API client can get the list of available encoding options
(for scan configuration).
:return: Available encoding options.
"""
try:
tmp = self.client.service.GetConfigurationSetList(self.session_id)
if not tmp.IsSuccesfull:
logger.error("get_configuration_list, Unable to get data from the server.")
raise Exception("get_configuration_list, Unable to get data from the server.")
return self.convert_to_json(tmp)
except Exception as e:
logger.error("Unable to get_configuration_list: {} ".format(e.message))
raise Exception("Unable to get_configuration_list: {} ".format(e.message))
def get_preset_list(self):
"""
get preset list from server
:return:
"""
try:
tmp = self.client.service.GetPresetList(self.session_id)
if not tmp.IsSuccesfull:
logger.error("GetPresetList, Unable to get data from the server.")
raise Exception("GetPresetList, Unable to get data from the server.")
return self.convert_to_json(tmp)
except Exception as e:
logger.error("Unable to GetPresetList: {} ".format(e.message))
raise Exception("Unable to GetPresetList: {} ".format(e.message))
def get_project_configuration(self, project_name):
"""
get project configuration
:param project_name:
:return:
"""
try:
project_id = self.get_project_id_by_name(project_name)
if not project_id:
logger.error(' project not exists: {}'.format(project_name))
raise Exception(' project not exists: {}'.format(project_name))
tmp = self.client.service.GetProjectConfiguration(self.session_id,
project_id)
if not tmp.IsSuccesfull:
logger.error(' unable to GetProjectConfiguration : '
'{}'.format(tmp.ErrorMessage))
raise Exception(' unable to GetProjectConfiguration :'
' {}'.format(tmp.ErrorMessage))
project_config = tmp.ProjectConfig
permission = tmp.Permission
return project_config, permission
except Exception as e:
logger.error("Unable to GetProjectConfiguration: {} ".format(e.message))
raise Exception("Unable to GetProjectConfiguration: "
"{} ".format(e.message))
def get_project_scanned_display_data(self, filter_on=False):
"""
The API client can get a list of all public projects in the system with
a risk level and summary of results by severity (high, medium, low).
:param filter_on:
:return: CxWSResponseProjectScannedDisplayData
"""
try:
tmp = self.client.service.GetProjectScannedDisplayData(self.session_id)
if not tmp.IsSuccesfull:
logger.error("GetProjectScannedDisplayData, "
"Unable to get data from the server.")
raise Exception("GetProjectScannedDisplayData, "
"Unable to get data from the server.")
if not filter_on:
return self.convert_to_json(tmp)
else:
return tmp.ProjectScannedList[0]
except Exception as e:
logger.error("Unable to GetProjectScannedDisplayData: "
"{} ".format(e.message))
raise Exception("Unable to GetProjectScannedDisplayData: "
"{} ".format(e.message))
def get_projects_display_data(self, filter_on=False):
"""
The API | |
<reponame>hanwenzhu/ck
#
# Collective Knowledge (program)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: <NAME>, <EMAIL>, http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
sep='***************************************************************************************'
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# clean, compile and run program(s) - can be with wildcards
# (afterwards will call "process_in_dir" to clean, compile and run specific programs)
def process(i):
"""
Input: {
sub_action - clean, compile, run
(repo_uoa) - program repo UOA
(module_uoa) - program module UOA
(data_uoa) - program data UOA
(program_tags) - an alternative mechanism for finding a program by a unique combination of tags
(host_os) - host OS (detect, if omitted)
(target_os) - OS module to check (if omitted, analyze host)
(device_id) - device id if remote (such as adb)
(process_in_tmp) - (default 'yes') - if 'yes', clean, compile and run in the tmp directory
(tmp_dir) - (default 'tmp') - if !='', use this tmp directory to clean, compile and run
(generate_rnd_tmp_dir) - if 'yes', generate random tmp directory
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of the last compile from function 'process_in_dir'
tmp_dir - directory where clean, compile, run
}
"""
import os
import copy
ic=copy.deepcopy(i)
# Check if global writing is allowed
r=ck.check_writing({})
if r['return']>0: return r
o=i.get('out','')
ruoa=i.get('repo_uoa','')
muoa=i.get('module_uoa','')
duoa=i.get('data_uoa','')
program_tags = i.get('program_tags','')
lst=[]
if duoa!='':
# Potentially fill wildcards:
r=ck.search({'action':'search',
'repo_uoa':ruoa,
'module_uoa':muoa,
'data_uoa':duoa,
'add_meta':'yes',
}) # contains path and meta.json in 'meta'
if r['return']>0: return r
lst=r['lst']
elif program_tags:
r=ck.access({'action': 'search_in_variations',
'module_uoa': 'misc',
'query_module_uoa': work['self_module_uid'],
'tags': program_tags,
}) # contains path and meta.json in 'meta'
if r['return']>0: return r
lst=r['lst']
else:
# First, try to detect CID in current directory
r=ck.cid({})
if r['return']==0:
ruoa=r.get('repo_uoa','')
muoa=r.get('module_uoa','')
duoa=r.get('data_uoa','')
rx=ck.access({'action':'load',
'module_uoa':muoa,
'data_uoa':duoa,
'repo_uoa':ruoa,
}) # contains path and meta.json in 'dict'
if rx['return']==0 and rx['dict'].get('program','')=='yes':
rx['meta'] = rx.pop('dict') # enforcing 'meta'/'dict' output format compatibility
lst = [ rx ]
if duoa=='':
# Attempt to load configuration from the current directory
try:
p=os.getcwd()
except OSError:
os.chdir('..')
p=os.getcwd()
pc=os.path.join(p, ck.cfg['subdir_ck_ext'], ck.cfg['file_meta'])
if os.path.isfile(pc):
r=ck.load_json_file({'json_file':pc})
if r['return']==0 and r['dict'].get('program','')=='yes':
d=r['dict']
ii=copy.deepcopy(ic)
ii['path']=p
ii['meta']=d
return process_in_dir(ii)
return {'return':1, 'error':'data UOA is not defined'}
if len(lst)==0:
return {'return':1, 'error':'no program(s) found'}
r={'return':0}
for ll in lst:
path=ll['path']
ruid=ll['repo_uid']
muid=ll['module_uid']
duid=ll['data_uid']
dalias=ll['data_uoa']
meta_dict=ll['meta']
if o=='con':
ck.out('')
ck.out('* '+dalias+' ('+duid+')')
ck.out('')
# Check if base_uoa suggests to use another program path
buoa=meta_dict.get('base_uoa','')
if buoa!='':
rx=ck.access({'action':'find',
'module_uoa':muid,
'data_uoa':buoa})
if rx['return']>0:
return {'return':1, 'error':'problem finding base entry '+buoa+' ('+rx['error']+')'}
path=rx['path']
ii=copy.deepcopy(ic)
ii['meta']=meta_dict
ii['path']=path
ii['repo_uoa']=ruid
ii['module_uoa']=muid
ii['data_uoa']=duid
ii['data_alias']=dalias
r=process_in_dir(ii)
fail_reason=r.get('misc',{}).get('fail_reason','')
if r['return']>0 or fail_reason!='':
print_warning({'data_uoa':dalias, 'repo_uoa':ruid})
if i.get('return_error_if_external_fail','')=='yes' and fail_reason!='':
return {'return':11, 'error':'see message above'}
if r['return']>0: return r
return r
##############################################################################
# compile, run and clean a given CK program (called from universal functions here)
def process_in_dir(i):
"""
Input: {
Comes from 'compile', 'run' and 'clean' functions
sub_action - clean, compile, run
(host_os) - host OS (detect, if omitted)
(target_os) - OS module to check (if omitted, analyze host)
(device_id) - device id if remote (such as adb)
(target) - target machine added via 'ck add machine' with prepared target description
(useful to create farms of machines for crowd-benchmarking and crowd-tuning using CK)
(device_cfg) - extra device cfg (if empty, will be filled in from 'machine' module description)
(compute_platform_id) - if !='', set env['CK_COMPUTE_PLATFORM_ID']
(compute_device_id) - if !='', set env['CK_COMPUTE_DEVICE_ID']
path - path
meta - program description
(tmp_dir) - if !='', use it instead of 'tmp' dir to compile and run code
(generate_rnd_tmp_dir) - if 'yes', generate random tmp directory to compile and run program
(useful during crowd-tuning)
(run_batch_name) - if !='', use this batch name instead of randomly generated one
(compiler_vars) - dict with set up compiler flags (-D var)
they will update the ones defined as default in program description ...
(no_vars) - skip compiler vars (if you want to use default ones from the sources) ...
(compiler_tags) - extra compiler tags
(remove_compiler_vars) - list of compiler vars to remove
(extra_env_for_compilation) - set environment variables before compiling program
(flags) - compile flags
(lflags) - link flags
(speed) - if 'yes', compile for speed (use env CK_OPT_SPEED from compiler)
(size) - if 'yes', compile for size (use env CK_OPT_SIZE from compiler)
(compile_type) - static or dynamic (dynamic by default;
however takes compiler default_compile_type into account)
or
(static or dynamic)
(use_clang_opt) - use Clang opt optimizer
(repeat) - repeat kernel via environment CT_REPEAT_MAIN if supported
(sudo) - if 'yes', force using sudo
(if not set up in OS, use ${CK_SUDO_INIT}, ${CK_SUDO_PRE}, ${CK_SUDO_POST})
(affinity) - set processor affinity for tihs program run (if supported by OS - see "affinity" in OS)
examples: 0 ; 0,1 ; 0-3 ; 4-7 (the last two can be useful for ARM big.LITTLE arhictecture
(clean) - if 'yes', clean tmp directory before using
(skip_clean_after) - if 'yes', do not remove run batch
(keep) - the same as skip_clean_after
(repo_uoa) - program repo UOA
(module_uoa) - program module UOA
(data_uoa) - program data UOA
(params) - dictionary with parameters passed via pre/post processing to third-party tools
for example, to configure ARM Workload Automation
(params.{KEY}) - set params[KEY]=value (user-friendly interface via CMD)
(misc) - misc dict
(characteristics) - characteristics/features/properties
(env) - preset environment
(env.{KEY}) - set env[KEY]=value (user-friendly interface via CMD)
(deps.{KEY}) - set deps[KEY]["uoa']=value (user-friendly interface via CMD to set any given dependency)
(preset_deps) - dict with {"KEY":"UOA"} to preset dependencies
(post_process_script_uoa) - run script from this UOA
(post_process_subscript) - subscript name
(post_process_params) - (string) add params to CMD
(deps) - already resolved deps (useful for auto-tuning)
(deps_cache) - list of already resolved deps (useful to automate crowd-benchmarking and crowd-tuning)
(reuse_deps) - if 'yes', reuse deps by keys
(dep_add_tags.{KEY}) - extra tags added to specific subdictionary of deps{} for this particular resolution session
(remove_deps) [str] - a list of keys to remove from deps separated by comma.
Useful to run a given program workflow with an externally
installed dependency (compiler, library, model, tool).
(cmd_key) - CMD key
(dataset_uoa) - UOA of a dataset
(dataset_file) - dataset filename (if more than one inside one entry - suggest to have a UID in name)
(extra_env) - extra environment before running code as string
(pre_run_cmd) - pre CMD for binary
(extra_run_cmd) - extra CMD (can use $#key#$ for autotuning)
(debug_run_cmd) - substitute CMD with this one - usually useful for debugging to pre-set env for all deps
(run_cmd_substitutes) - dict with substs ($#key#$=value) in run CMD (useful for CMD autotuning)
(console) - if 'yes', output to console
(skip_device_init) - if 'yes', do not initialize device
(skip_calibration) - if 'yes', skip execution time calibration (make it around 4.0 sec)
(calibration_time) - calibration time in string, 4.0 sec. by default
(calibration_max) - max number of iterations for calibration, 10 by default
(pull_only_timer_files) - if 'yes', pull only timer files, but not output files
(useful for remove devices during statistical repetition)
(energy) - if 'yes', start energy monitoring (if supported) using script ck-set-power-sensors
Also, set compiler var CK_MONITOR_ENERGY=1 and run-time var CK_MONITOR_ENERGY=1
Note: files, monitored for energy, are defined in system environment.
For example, odroid .profile as:
export CK_ENERGY_FILES="/sys/bus/i2c/drivers/INA231/3-0040/sensor_W;/sys/bus/i2c/drivers/INA231/3-0041/sensor_W;/sys/bus/i2c/drivers/INA231/3-0044/sensor_W;/sys/bus/i2c/drivers/INA231/3-0045/sensor_W;"
(run_output_files) - extra list of output files (useful to add in pipeline to collect profiling from Android mobile, for example)
(extra_post_process_cmd) - append at the end of execution bat (for example, to call gprof ...)
(statistical_repetition_number) - int number of current (outside) statistical repetition
to avoid pushing data to remote device if !=0 ...
(autotuning_iteration) - int number of current autotuning iteration
to avoid pushing some data to remote | |
<filename>rl/util.py
import argparse
import collections
import inspect
import json
import logging
import multiprocessing as mp
import numpy as np
import re
import sys
import zipfile
from datetime import datetime, timedelta
from os import path, listdir, environ, getpid
from textwrap import wrap
PARALLEL_PROCESS_NUM = mp.cpu_count()
TIMESTAMP_REGEX = r'(\d{4}_\d{2}_\d{2}_\d{6})'
SPEC_PATH = path.join(path.dirname(__file__), 'spec')
COMPONENT_LOCKS = json.loads(
open(path.join(SPEC_PATH, 'component_locks.json')).read())
LOCK_HEAD_REST_SIG = {
# signature list of [head, rest] in component lock
'mutex': [[0, 0], [1, 1]],
'subset': [[0, 0], [1, 0], [1, 1]],
}
# parse_args to add flag
parser = argparse.ArgumentParser(description='Set flags for functions')
parser.add_argument("-b", "--blind",
help="dont render graphics",
action="store_const",
dest="render",
const=False,
default=True)
parser.add_argument("-d", "--debug",
help="activate debug log",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.INFO)
parser.add_argument("-e", "--experiment",
help="specify experiment to run",
action="store",
type=str,
nargs='?',
dest="experiment",
default="dev_dqn")
parser.add_argument("-p", "--param_selection",
help="run parameter selection if present",
action="store_true",
dest="param_selection",
default=False)
parser.add_argument("-q", "--quiet",
help="change log to warning level",
action="store_const",
dest="loglevel",
const=logging.WARNING,
default=logging.INFO)
parser.add_argument("-t", "--times",
help="number of times session is run",
action="store",
nargs='?',
type=int,
dest="times",
default=1)
parser.add_argument("-x", "--max_episodes",
help="manually set environment max episodes",
action="store",
nargs='?',
type=int,
dest="max_epis",
default=-1)
args = parser.parse_args([]) if environ.get('CI') else parser.parse_args()
# Goddam python logger
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s'))
logger.setLevel(args.loglevel)
logger.addHandler(handler)
logger.propagate = False
environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # mute tf warnings on optimized setup
def check_equal(iterator):
'''check if list contains all the same elements'''
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
def check_lock(lock_name, lock, experiment_spec):
'''
refer to rl/spec/component_locks.json
check a spec's component lock using binary signatures
e.g. head = problem (discrete)
rest = [Agent, Policy] (to be discrete too)
first check if rest all has the same signature, i.e. same set
then check pair [bin_head, bin_rest] in valid_lock_sig_list
as specified by the lock's type
'''
lock_type = lock['type']
valid_lock_sig_list = LOCK_HEAD_REST_SIG[lock_type]
lock_head = lock['head']
bin_head = (experiment_spec[lock_head] in lock[lock_head])
bin_rest_list = []
for k, v_list in lock.items():
if k in experiment_spec and k != lock_head:
bin_rest_list.append(experiment_spec[k] in v_list)
# rest must all have the same signature
rest_equal = check_equal(bin_rest_list)
if not rest_equal:
logger.warn(
'All components need to be of the same set, '
'check component lock "{}" and your spec "{}"'.format(
lock_name, experiment_spec['experiment_name']))
bin_rest = bin_rest_list[0]
lock_sig = [bin_head, bin_rest]
lock_valid = lock_sig in valid_lock_sig_list
if not lock_valid:
logger.warn(
'Component lock violated: "{}", spec: "{}"'.format(
lock_name, experiment_spec['experiment_name']))
return lock_valid
def check_component_locks(experiment_spec):
'''
check the spec components for all locks
to ensure no lock is violated
refer to rl/spec/component_locks.json
'''
for lock_name, lock in COMPONENT_LOCKS.items():
check_lock(lock_name, lock, experiment_spec)
return
# import and safeguard the PROBLEMS, EXPERIMENT_SPECS with checks
def import_guard_asset():
PROBLEMS = json.loads(open(path.join(SPEC_PATH, 'problems.json')).read())
EXPERIMENT_SPECS = {}
spec_files = [spec_json for spec_json in listdir(
SPEC_PATH) if spec_json.endswith('experiment_specs.json')]
for filename in spec_files:
specs = json.loads(open(path.join(SPEC_PATH, filename)).read())
EXPERIMENT_SPECS.update(specs)
REQUIRED_PROBLEM_KEYS = [
'GYM_ENV_NAME', 'SOLVED_MEAN_REWARD',
'MAX_EPISODES', 'REWARD_MEAN_LEN']
REQUIRED_SPEC_KEYS = [
'problem', 'Agent', 'HyperOptimizer',
'Memory', 'Optimizer', 'Policy', 'PreProcessor', 'param']
for problem_name, problem in PROBLEMS.items():
assert all(k in problem for k in REQUIRED_PROBLEM_KEYS), \
'{} needs all REQUIRED_PROBLEM_KEYS'.format(
problem_name)
for experiment_name, spec in EXPERIMENT_SPECS.items():
assert all(k in spec for k in REQUIRED_SPEC_KEYS), \
'{} needs all REQUIRED_SPEC_KEYS'.format(experiment_name)
EXPERIMENT_SPECS[experiment_name]['experiment_name'] = experiment_name
check_component_locks(spec) # check component_locks.json
if 'param_range' not in EXPERIMENT_SPECS[experiment_name]:
continue
param_range = EXPERIMENT_SPECS[experiment_name]['param_range']
for param_key, param_val in param_range.items():
if isinstance(param_val, list):
param_range[param_key] = sorted(param_val)
elif isinstance(param_val, dict):
pass
else:
assert False, \
'param_range value must be list or dict: {}.{}:{}'.format(
experiment_name, param_key, param_val)
EXPERIMENT_SPECS[experiment_name]['param_range'] = param_range
return PROBLEMS, EXPERIMENT_SPECS
PROBLEMS, EXPERIMENT_SPECS = import_guard_asset()
def log_self(subject):
max_info_len = 300
info = '{}, param: {}'.format(
subject.__class__.__name__,
to_json(subject.__dict__))
trunc_info = (
info[:max_info_len] + '...' if len(info) > max_info_len else info)
logger.debug(trunc_info)
def wrap_text(text):
return '\n'.join(wrap(text, 60))
def make_line(line='-'):
if environ.get('CI'):
return
columns = 80
line_str = line*int(columns)
return line_str
def log_delimiter(msg, line='-'):
delim_msg = '''\n{0}\n{1}\n{0}\n\n'''.format(
make_line(line), msg)
logger.info(delim_msg)
def log_trial_delimiter(trial, action):
log_delimiter('{} Trial #{}/{} on PID {}:\n{}'.format(
action, trial.trial_num, trial.num_of_trials,
getpid(), trial.trial_id), '=')
def log_session_delimiter(sess, action):
log_delimiter(
'{} Session #{}/{} of Trial #{}/{} on PID {}:\n{}'.format(
action, sess.session_num, sess.num_of_sessions,
sess.trial.trial_num, sess.trial.num_of_trials,
getpid(), sess.session_id))
def timestamp():
'''timestamp used for filename'''
timestamp_str = '{:%Y_%m_%d_%H%M%S}'.format(datetime.now())
assert re.search(TIMESTAMP_REGEX, timestamp_str)
return timestamp_str
def timestamp_elapse(s1, s2):
'''calculate the time elapsed between timestamps from s1 to s2'''
FMT = '%Y_%m_%d_%H%M%S'
delta_t = datetime.strptime(s2, FMT) - datetime.strptime(s1, FMT)
return str(delta_t)
def timestamp_elapse_to_seconds(s1):
a = datetime.strptime(s1, '%H:%M:%S')
secs = timedelta(hours=a.hour, minutes=a.minute, seconds=a.second).seconds
return secs
# own custom sorted json serializer, cuz python
def to_json(o, level=0):
INDENT = 2
SPACE = " "
NEWLINE = "\n"
ret = ""
if isinstance(o, dict):
ret += "{" + NEWLINE
comma = ""
for k in sorted(o.keys()):
v = o[k]
ret += comma
comma = ",\n"
ret += SPACE * INDENT * (level+1)
ret += '"' + str(k) + '":' + SPACE
ret += to_json(v, level + 1)
ret += NEWLINE + SPACE * INDENT * level + "}"
elif isinstance(o, str):
ret += '"' + o + '"'
elif isinstance(o, list) or isinstance(o, tuple):
ret += "[" + ",".join([to_json(e, level+1) for e in o]) + "]"
elif isinstance(o, bool):
ret += "true" if o else "false"
elif isinstance(o, int):
ret += str(o)
elif isinstance(o, float):
ret += '%.7g' % o
elif isinstance(o, np.ndarray) and np.issubdtype(o.dtype, np.integer):
ret += "[" + ','.join(map(str, o.flatten().tolist())) + "]"
elif isinstance(o, np.ndarray) and np.issubdtype(o.dtype, np.inexact):
ret += "[" + \
','.join(map(lambda x: '%.7g' % x, o.flatten().tolist())) + "]"
elif o is None:
ret += 'null'
elif hasattr(o, '__class__'):
ret += '"' + o.__class__.__name__ + '"'
else:
raise TypeError(
"Unknown type '%s' for json serialization" % str(type(o)))
return ret
# format object and its properties into printable dict
def format_obj_dict(obj, keys):
if isinstance(obj, dict):
return to_json(
{k: obj.get(k) for k in keys if obj.get(k) is not None})
else:
return to_json(
{k: getattr(obj, k, None) for k in keys
if getattr(obj, k, None) is not None})
# cast dict to have flat values (int, float, str)
def flat_cast_dict(d):
for k in d:
v = d[k]
if not isinstance(v, (int, float)):
d[k] = str(v)
return d
def flatten_dict(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def get_module(GREF, dot_path):
# get module from globals() by string dot_path
path_arr = dot_path.split('.')
# base level from globals
mod = GREF.get(path_arr.pop(0))
for deeper_path in path_arr:
mod = getattr(mod, deeper_path)
return mod
def import_package_files(globals_, locals_, __file__):
'''
Dynamically import all the public attributes of the python modules in this
file's directory (the package directory) and return a list of their names.
'''
exports = []
# globals_, locals_ = globals(), locals()
package_path = path.dirname(__file__)
package_name = path.basename(package_path)
for filename in listdir(package_path):
modulename, ext = path.splitext(filename)
if modulename[0] != '_' and ext in ('.py', '.pyw'):
subpackage = '{}.{}'.format(
package_name, modulename) # pkg relative
module = __import__(subpackage, globals_, locals_, [modulename])
modict = module.__dict__
names = (modict['__all__'] if '__all__' in modict else
[name for name in
modict if inspect.isclass(modict[name])]) # all public
exports.extend(names)
globals_.update((name, modict[name]) for name in names)
return exports
def clean_id_str(id_str):
return id_str.split('/').pop().split('.').pop(0)
def parse_trial_id(id_str):
c_id_str = clean_id_str(id_str)
if re.search(TIMESTAMP_REGEX, c_id_str):
name_time_trial = re.split(TIMESTAMP_REGEX, c_id_str)
if len(name_time_trial) == 3:
return c_id_str
else:
return None
else:
return None
def parse_experiment_id(id_str):
c_id_str = clean_id_str(id_str)
if re.search(TIMESTAMP_REGEX, c_id_str):
name_time_trial = re.split(TIMESTAMP_REGEX, c_id_str)
name_time_trial.pop()
experiment_id = ''.join(name_time_trial)
return experiment_id
else:
return None
def parse_experiment_name(id_str):
c_id_str = clean_id_str(id_str)
experiment_id = parse_experiment_id(c_id_str)
if experiment_id is None:
experiment_name = c_id_str
else:
experiment_name = re.sub(TIMESTAMP_REGEX, '', experiment_id).strip('-')
assert experiment_name in EXPERIMENT_SPECS, \
'{} not in EXPERIMENT_SPECS'.format(experiment_name)
return experiment_name
def load_data_from_trial_id(id_str):
experiment_id = parse_experiment_id(id_str)
trial_id = parse_trial_id(id_str)
data_filename = './data/{}/{}.json'.format(experiment_id, trial_id)
try:
data = json.loads(open(data_filename).read())
except (FileNotFoundError, json.JSONDecodeError):
data = None
return data
def load_data_array_from_experiment_id(id_str):
# to load all ./data files for a series of trials
experiment_id = parse_experiment_id(id_str)
data_path = './data/{}'.format(experiment_id)
trial_id_array = [
f for f in listdir(data_path)
if (path.isfile(path.join(data_path, f)) and
f.startswith(experiment_id) and
f.endswith('.json'))
]
return list(filter(None, [load_data_from_trial_id(trial_id)
for trial_id in trial_id_array]))
def save_experiment_data(data_df, trial_id):
experiment_id = parse_experiment_id(trial_id)
filedir = './data/{0}'.format(experiment_id)
filename = '{0}_analysis_data.csv'.format(experiment_id)
filepath = '{}/{}'.format(filedir, filename)
data_df.round(6).to_csv(filepath, index=False)
# zip the csv and best trial json for upload to PR
zipfile.ZipFile(filepath+'.zip', mode='w').write(
filepath, arcname=filename)
trial_filename = data_df.loc[0, 'trial_id'] + '.json'
trial_filepath = '{}/{}'.format(filedir, trial_filename)
zipfile.ZipFile(trial_filepath+'.zip', mode='w').write(
trial_filepath, arcname=trial_filename)
logger.info(
'experiment data saved to {}'.format(filepath))
def configure_hardware(RAND_SEED):
'''configure rand seed, GPU'''
from keras | |
import torch
import torch._prims as prims
import torch._prims.utils as utils
from torch._prims.utils import (
DimsType,
TensorLike,
TensorLikeType,
DimsSequenceType,
TensorSequenceType,
Number,
NumberType,
ELEMENTWISE_TYPE_PROMOTION_KIND,
elementwise_dtypes,
)
from torch._prims.wrappers import (
elementwise_type_promotion_wrapper,
out_wrapper,
_maybe_convert_to_dtype,
_maybe_resize_out,
)
from functools import reduce
from typing import Sequence, Optional, Union, Callable, List, Tuple
import operator
import warnings
import math
from enum import Enum
# Experimental module containing prototype Python references for existing
# PyTorch operations.
__all__ = [
#
# Elementwise Unary References
#
"abs",
"acos",
"acosh",
"asin",
"atan",
# "bessel_i0e", # special.i0e
# "bessel_i1e", # special.i1e
# "cbrt", # No corresponding torch operation
"ceil",
"cos",
"cosh",
"digamma",
"erf",
"erfinv",
"erfc",
"exp",
"expm1",
"floor",
"isfinite",
"isnan",
"lgamma",
"log",
"log1p",
"neg",
"reciprocal",
"round", # TODO: model kwargs
"sign",
"sin",
"sinh",
"sqrt",
"square",
"tan",
#
# Elementwise Binary References
#
"add",
"atan2",
"bitwise_and",
"bitwise_left_shift",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
# "complex",
# 'copysign', # where
# 'div', # need to implement all rounding modes first
"eq",
"float_power",
# 'floor_divide', # requires floor
# 'fmax', # requires where
# 'fmod',
# 'gcd',
"ge",
"gt",
# 'heaviside',
# 'hypot',
"igamma",
"igammac",
# 'isclose', # abs, sub, le, add, mul
# 'lcm',
# 'ldexp',
"le",
# 'logical_and',
# 'logical_or',
# 'logical_xor',
"lt",
# 'max', # implement with reductions
"maximum",
# 'min', # implement with reductions
"minimum",
"mul",
"ne",
"nextafter",
# 'polar', # abs, cos, sin
"pow",
# 'remainder',
# 'rsub', # unblocked
# # special.xlog1py
# # special.zeta
"sub",
"true_divide",
# 'xlogy', # where?, log, mul
#
# Conditional references
#
"where", # TODO: add opinfo
#
# Data conversion and movement references
#
"copy_to", # TODO: add opinfo
#
# Reduction ops
#
"sum",
"amax",
"amin",
#
# View & Shape Ops
#
"cat",
"permute",
"transpose",
"swap_axes", # alias for transpose
"tensor_split",
]
Tensor = torch.Tensor
class REDUCTION_OUTPUT_TYPE_KIND(Enum):
SAME = (0,)
SAME_OR_REAL = (1,) # for complex types outputs corresponding real type
OP_MATH = (2,) # keep output in opmath type, needed for mean
ALWAYS_BOOL = (3,)
def _broadcast_shapes(*_shapes):
shapes = tuple(filter(lambda x: x is not None, _shapes))
# Short-circuits on no input
if len(shapes) == 0:
return None
# Type checking
# TODO: make common validations available as utils
for shape in shapes:
assert isinstance(shape, Sequence)
# Computes common shape
common_shape = [
1,
] * reduce(max, (len(shape) for shape in shapes))
for shape in shapes:
for idx in range(-1, -1 - len(shape), -1):
if common_shape[idx] == 1:
if shape[idx] < 0:
raise ValueError(
"Attempting to broadcast a dimension with negative length!"
)
common_shape[idx] = shape[idx]
elif shape[idx] != 1:
if common_shape[idx] != shape[idx]:
raise RuntimeError(
"Attempting to broadcast a dimension of length ",
str(shape[idx]),
"!",
)
return common_shape
def _maybe_broadcast(*args, preserve_cpu_scalar_tensors=True):
# Computes common shape
common_shape = _broadcast_shapes(
*map(lambda t: t.shape if isinstance(t, TensorLike) else None, args)
)
def __maybe_broadcast(x, shape):
if x is None:
return None
elif isinstance(x, Number):
return x
elif isinstance(x, TensorLike):
if preserve_cpu_scalar_tensors and utils.is_cpu_scalar_tensor(x):
return x
if tuple(x.shape) != common_shape:
common_rank = len(common_shape) + 1
start = common_rank - (len(x.shape) + 1)
dims = tuple(range(start, len(x.shape) + start))
return prims.broadcast_in_dim(x, common_shape, dims)
else:
raise RuntimeError(
"Unexpected type when broadcasting: " + str(type(x)) + "!"
)
return tuple(__maybe_broadcast(x, common_shape) for x in args)
# Utilities should come BEFORE this import
from torch._decomp import register_decomposition
#
# Elementwise unary references
#
infer_aten_op = object()
# TODO: add type promotion support
def _make_elementwise_unary_reference(
prim: Callable, *, type_promotion_kind, aten_op=infer_aten_op
) -> Callable:
@out_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",), type_promotion_kind=type_promotion_kind
)
def _ref(a: Tensor) -> Tensor:
return prim(a)
if aten_op is infer_aten_op:
aten_op = getattr(torch.ops.aten, prim.__name__)
if aten_op is not None:
register_decomposition(aten_op)(_ref)
return _ref
abs = _make_elementwise_unary_reference(
prims.abs, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT
)
acos = _make_elementwise_unary_reference(
prims.acos, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
acosh = _make_elementwise_unary_reference(
prims.acosh, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
asin = _make_elementwise_unary_reference(
prims.asin, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
atan = _make_elementwise_unary_reference(
prims.atan, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
ceil = _make_elementwise_unary_reference(
prims.ceil, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
cos = _make_elementwise_unary_reference(
prims.cos, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
cosh = _make_elementwise_unary_reference(
prims.cosh, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
digamma = _make_elementwise_unary_reference(
prims.digamma, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
erf = _make_elementwise_unary_reference(
prims.erf, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
erfinv = _make_elementwise_unary_reference(
prims.erf_inv, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
aten_op=torch.ops.aten.erfinv, # prim/aten name mismatch
)
erfc = _make_elementwise_unary_reference(
prims.erfc, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
exp = _make_elementwise_unary_reference(
prims.exp, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
expm1 = _make_elementwise_unary_reference(
prims.expm1, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
floor = _make_elementwise_unary_reference(
prims.floor, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
isfinite = _make_elementwise_unary_reference(
prims.is_finite, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
aten_op=None, # CompositeImplicitAutograd
)
def _isnan(a: Tensor) -> Tensor:
return prims.ne(a, a)
isnan = _make_elementwise_unary_reference(
_isnan, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
aten_op=torch.ops.aten.isnan, # prim/aten name mismatch
)
lgamma = _make_elementwise_unary_reference(
prims.lgamma, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
log = _make_elementwise_unary_reference(
prims.log, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
log1p = _make_elementwise_unary_reference(
prims.log1p, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
neg = _make_elementwise_unary_reference(
prims.neg, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
reciprocal = _make_elementwise_unary_reference(
prims.reciprocal, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
# TODO: round takes additional kwargs
round = _make_elementwise_unary_reference(
prims.round, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=None, # TODO: this does need a decomp, but kwarg handling is needed
)
sign = _make_elementwise_unary_reference(
prims.sign, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
sin = _make_elementwise_unary_reference(
prims.sin, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
sinh = _make_elementwise_unary_reference(
prims.sinh, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
sqrt = _make_elementwise_unary_reference(
prims.sqrt, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
square = _make_elementwise_unary_reference(
prims.square, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG,
aten_op=None, # CompositeImplicitAutograd
)
tan = _make_elementwise_unary_reference(
prims.tan, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
def _make_elementwise_binary_reference(
prim: Callable, *, type_promotion_kind, aten_op=infer_aten_op
) -> Callable:
@out_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"), type_promotion_kind=type_promotion_kind
)
def _ref(
a: Union[Tensor, NumberType],
b: Union[Tensor, NumberType],
) -> Tensor:
a, b = _maybe_broadcast(a, b)
return prim(a, b)
if aten_op is infer_aten_op:
aten_op = getattr(torch.ops.aten, prim.__name__)
if aten_op is not None:
register_decomposition(aten_op)(_ref)
return _ref
# Add has its own implementation because it has an alpha argument
@out_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.OP_MATH,
)
def add(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
*,
alpha: Optional[NumberType] = None,
):
"""
Reference implementation of torch.add
"""
a, b = _maybe_broadcast(a, b)
if alpha is not None:
dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr]
python_type = utils.dtype_to_type(dtype)
if not utils.is_weakly_lesser_type(type(alpha), python_type):
msg = (
"alpha argument of type {0} cannot be safely cast to type {1}!".format(
type(alpha), python_type
)
)
raise ValueError(msg)
b = prims.mul(b, alpha)
return prims.add(a, b)
# TODO: add docstring
atan2 = _make_elementwise_binary_reference(
prims.atan2, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
# TODO: add docstring
bitwise_and = _make_elementwise_binary_reference(
prims.bitwise_and, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
# TODO: add docstring
bitwise_left_shift = _make_elementwise_binary_reference(
prims.shift_left, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.bitwise_left_shift, # prim/aten name mismatch
)
# TODO: add docstring
bitwise_or = _make_elementwise_binary_reference(
prims.bitwise_or, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
# TODO: add docstring
bitwise_right_shift = _make_elementwise_binary_reference(
prims.shift_right_arithmetic,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.bitwise_right_shift, # prim/aten name mismatch
)
# TODO: add docstring
bitwise_xor = _make_elementwise_binary_reference(
prims.bitwise_xor, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
# TODO: add docstring
# complex = _make_elementwise_binary_reference(prims.complex, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)
# TODO: add docstring
eq = _make_elementwise_binary_reference(
prims.eq, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL
)
# TODO: add docstring
# Float power has its own implementation because it has unique type promotion.
# NB: aten_op not registered because CompositeExplicitAutograd
@out_wrapper
def float_power(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
) -> Tensor:
# Handles type promotion
dtype = utils.get_higher_dtype(a, b)
assert dtype is not None
if utils.is_complex_dtype(dtype):
dtype = torch.complex128
else:
dtype = torch.float64
a = _maybe_convert_to_dtype(a, dtype=dtype) # type: ignore[assignment]
b = _maybe_convert_to_dtype(b, dtype=dtype) # type: ignore[assignment]
a, b = _maybe_broadcast(a, b)
return prims.pow(a, b)
# TODO: add docstring
ge = _make_elementwise_binary_reference(
prims.ge, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL
)
# TODO: add docstring
gt = _make_elementwise_binary_reference(
prims.gt, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL
)
igamma = _make_elementwise_binary_reference(
prims.igamma, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
igammac = _make_elementwise_binary_reference(
prims.igammac, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
# TODO: add docstring
le = _make_elementwise_binary_reference(
prims.le, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL
)
# TODO: add docstring
lt = _make_elementwise_binary_reference(
prims.lt, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL
)
# TODO: add docstring
maximum = _make_elementwise_binary_reference(
prims.max,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.maximum, # prim/aten name mismatch
)
# TODO: add docstring
minimum = _make_elementwise_binary_reference(
prims.min,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.minimum, # prim/aten name mismatch
)
# TODO: add docstring
mul = _make_elementwise_binary_reference(
prims.mul,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.OP_MATH,
)
# TODO: add docstring
ne = _make_elementwise_binary_reference(
prims.ne, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL
)
# TODO: add docstring
nextafter = _make_elementwise_binary_reference(
prims.nextafter, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
# TODO: add docstring
pow = _make_elementwise_binary_reference(
prims.pow, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG
)
# TODO: add docstring
# TODO: consider refactoring this with add impl
# sub has its own implementation because it has an alpha argument
@register_decomposition(torch.ops.aten.sub)
@out_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.OP_MATH,
)
def sub(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
*,
alpha: Optional[NumberType] = None,
):
"""
Reference implementation of torch.add
"""
a, b = _maybe_broadcast(a, b)
if alpha is not None:
dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr]
python_type = utils.dtype_to_type(dtype)
if not utils.is_weakly_lesser_type(type(alpha), python_type):
msg = (
"alpha argument of type {0} cannot be safely cast to type {1}!".format(
type(alpha), python_type
)
)
raise ValueError(msg)
b = prims.mul(b, alpha)
return prims.sub(a, b)
# TODO: add docstring
true_divide = _make_elementwise_binary_reference(
prims.div,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
aten_op=None, # CompositeImplicitAutograd
)
#
# Conditional references
#
# https://pytorch.org/docs/stable/generated/torch.where.html
# TODO: implement alternate where
@register_decomposition(torch.ops.aten.where)
@out_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def where(
pred: Tensor,
a: Optional[Union[TensorLikeType, NumberType]] = None,
b: Optional[Union[TensorLikeType, NumberType]] = None,
):
""" """
if a is None or b is None:
raise NotImplementedError
pred, a, b = _maybe_broadcast(pred, a, b)
return prims.select(pred, a, b)
#
# Data Movement References
#
def copy_to(a: Tensor, b: Tensor, *, allow_cross_device=True):
if not allow_cross_device and a.device != b.device:
msg = "Attempting to copy from device {0} to device {1}, but cross-device copies are not allowed!".format(
b.device, a.device
)
raise RuntimeError(msg)
return prims.copy_to(a, b)
#
# Reduction references
#
def | |
<reponame>biergaiqiao/Oriole-Thwarting-Privacy-against-Trustworthy-Deep-Learning-Models
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 5 11:11:19 2020
@author: 一叶之秋
"""
import argparse
import glob
import logging
import os
import sys
import tensorflow as tf
logging.getLogger('tensorflow').disabled = True
import numpy as np
from fawkes.differentiator import FawkesMaskGeneration
from fawkes.utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \
Faces, filter_image_paths,filter_image_paths_CLQ,resize,select_target_label_CLQ
import F_align_face
from fawkes.utils import get_file
from keras.preprocessing import image
import store_viariable
import function
import test_photo_points
class parser:
def __init__(self,directory = 'imgs/',gpu = 0,mode = 'min',feature_extractor = 'high_extract',
th=0.01,max_step=1000,sd=1e9,lr=2,batch_size=1,separate_target=False,
no_align = False,debug=False,format='png',DEEP=1,power=20):
self.directory = directory
self.gpu = gpu
self.mode = mode
self.feature_extractor = feature_extractor
self.th = th
self.max_step = max_step
self.sd = sd
self.lr = lr
self.batch_size = batch_size
self.separate_target = separate_target
self.no_align = no_align
self.debug = debug
self.format = format
self.DEEP = DEEP
self.power = power
def generate_cloak_images(protector, image_X, target_emb=None):
cloaked_image_X = protector.attack(image_X, target_emb)
return cloaked_image_X
class Fawkes(object):
#@JoeyChen prepare model and feature extractors
def __init__(self, feature_extractor, gpu, batch_size):
self.feature_extractor = feature_extractor
self.gpu = gpu
self.batch_size = batch_size
global sess
sess = init_gpu(gpu,force=True)
global graph
##Clears the default graph stack and resets the global default graph.
graph = tf.get_default_graph()
model_dir = os.path.join(os.path.expanduser(store_viariable.PATH), '.fawkes')
if not os.path.exists(os.path.join(model_dir, "mtcnn.p.gz")):
os.makedirs(model_dir, exist_ok=True)##为True则目录存在的时候不弹出错误
get_file("mtcnn.p.gz", "http://mirror.cs.uchicago.edu/fawkes/files/mtcnn.p.gz", cache_dir=model_dir,
cache_subdir='')
self.fs_names = [feature_extractor]
if isinstance(feature_extractor, list):
self.fs_names = feature_extractor
self.aligner = F_align_face.aligner(sess)
self.feature_extractors_ls = [load_extractor(name) for name in self.fs_names]
self.protector = None
self.protector_param = None
def mode2param(self, mode):
if mode == 'min':
th = 0.002
max_step = 20
lr = 40
elif mode == 'low':
th = 0.003
max_step = 50
lr = 35
elif mode == 'mid':
th = 0.005
max_step = 200
lr = 20
elif mode == 'high':
th = 0.008
max_step = 500
lr = 10
elif mode == 'ultra':
if not tf.test.is_gpu_available():
print("Please enable GPU for ultra setting...")
sys.exit(1)
th = 0.01
max_step = 1000
lr = 8
else:
raise Exception("mode must be one of 'min', 'low', 'mid', 'high', 'ultra', 'custom'")
return th, max_step, lr
def run_protection_CLQ(self, image_paths, mode='min', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',separate_target=True, debug=False, no_align=False,power = 20,DEEP = 1):
"""
用于制作一张图片的前二十个cloaks版本
"""
if mode == 'custom':
pass
else:
th, max_step, lr = self.mode2param(mode)
current_param = "-".join([str(x) for x in [mode, th, sd, lr, max_step, batch_size, format,
separate_target, debug]])
image_paths, loaded_images = filter_image_paths_CLQ(image_paths,power = power,mode=mode,th = th)
numbers = len(image_paths) // power
cur_image_paths = []
cur_loaded_images = []
for i in range(numbers):
cur_image_paths.extend(image_paths[power * i :(i+1) * power])
cur_loaded_images.extend(loaded_images[power * i:(i+1) * power])
# image_paths, loaded_images = filter_image_paths(image_paths)
if not cur_image_paths:
print("No images in the directory")
return 3
with graph.as_default():
faces = Faces(cur_image_paths, cur_loaded_images, self.aligner, verbose=1, no_align=no_align)
original_images = faces.cropped_faces####original_images.shape = (1,224,224,3)
if len(original_images) == 0:
print("No face detected. ")
return 2
###type(original_images) = 'numpy.ndarray'
original_images = np.array(original_images)###original_images.shape = (1,224,224,3)
with sess.as_default():
if separate_target:
target_embedding = []
index = 0
for org_img in original_images:
org_img = org_img.reshape([1] + list(org_img.shape))
tar_emb = select_target_label_CLQ(org_img, self.feature_extractors_ls, self.fs_names,separate_target = separate_target,index = index % power,power = power)
target_embedding.append(tar_emb)
index = (index + 1) % power
target_embedding = np.concatenate(target_embedding)
else:
target_embedding = select_target_label_CLQ(original_images, self.feature_extractors_ls, self.fs_names,separate_target = separate_target,index = 20,power = power)
if current_param != self.protector_param:
self.protector_param = current_param
if self.protector is not None:
del self.protector
self.protector = FawkesMaskGeneration(sess, self.feature_extractors_ls,
batch_size=batch_size,
mimic_img=True,
intensity_range='imagenet',
initial_const=sd,
learning_rate=lr,
max_iterations=max_step,
l_threshold=th,
verbose=1 if debug else 0,
maximize=False,
keep_final=False,
image_shape=(224, 224, 3))
protected_images = generate_cloak_images(self.protector, original_images,
target_emb=target_embedding)
#original_images = original_images[0:self.power]
#image_paths = image_paths[0:self.power]
faces.cloaked_cropped_faces = protected_images
final_images = faces.merge_faces(reverse_process_cloaked(protected_images),
reverse_process_cloaked(original_images))
index = 0
for i,(p_img, path) in enumerate(zip(final_images, cur_image_paths)):
"""long_size = max([len(loaded_images[i]),len(loaded_images[i][0])])
im_data = image.array_to_img(p_img).resize((long_size, long_size))###运用image的函数将图片重新定位为244*244
im_data = image.img_to_array(im_data)
p_img = np.zeros((len(loaded_images[i]),len(loaded_images[i][0])))
p_img = im_data[0:len(loaded_images[i]),0:len(loaded_images[i][0]),:]
"""
path = ".".join(path.split(".")[:-1]) + "_{}_.jpg".format(index)
file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format)
dump_image(p_img, file_name, format=format)
index = (index + 1) % 20
print("Done!")
return True
def run_protection(self, image_paths, mode='min', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
separate_target=True, debug=False, no_align=False):
if mode == 'custom':
pass
else:
th, max_step, lr = self.mode2param(mode)
current_param = "-".join([str(x) for x in [mode, th, sd, lr, max_step, batch_size, format,
separate_target, debug]])
image_paths, loaded_images = filter_image_paths(image_paths)
if not image_paths:
print("No images in the directory")
return 3
with graph.as_default():
faces = Faces(image_paths, loaded_images, self.aligner, verbose=1, no_align=no_align)
original_images = faces.cropped_faces####original_images.shape = (1,224,224,3)
if len(original_images) == 0:
print("No face detected. ")
return 2
###type(original_images) = 'numpy.ndarray'
original_images = np.array(original_images)###original_images.shape = (1,224,224,3)
##计算特征向量
#points = function.compute_points(original_images,self.feature_extractors_ls)
with sess.as_default():
if separate_target:
target_embedding = []
for org_img in original_images:
org_img = org_img.reshape([1] + list(org_img.shape))
tar_emb = select_target_label(org_img, self.feature_extractors_ls, self.fs_names)
target_embedding.append(tar_emb)
target_embedding = np.concatenate(target_embedding)
else:
target_embedding = select_target_label(original_images, self.feature_extractors_ls, self.fs_names)
if current_param != self.protector_param:
self.protector_param = current_param
if self.protector is not None:
del self.protector
self.protector = FawkesMaskGeneration(sess, self.feature_extractors_ls,
batch_size=batch_size,
mimic_img=True,
intensity_range='imagenet',
initial_const=sd,
learning_rate=lr,
max_iterations=max_step,
l_threshold=th,
verbose=1 if debug else 0,
maximize=False,
keep_final=False,
image_shape=(224, 224, 3))
###得到的protected_images里面的
protected_images = generate_cloak_images(self.protector, original_images,
target_emb=target_embedding)
faces.cloaked_cropped_faces = protected_images
final_images = faces.merge_faces(reverse_process_cloaked(protected_images),
reverse_process_cloaked(original_images))
for i,(p_img, path) in enumerate(zip(final_images, image_paths)):
"""long_size = max([len(loaded_images[i]),len(loaded_images[i][0])])
im_data = image.array_to_img(p_img).resize((long_size, long_size))###运用image的函数将图片重新定位为244*244
im_data = image.img_to_array(im_data)
p_img = np.zeros((len(loaded_images[i]),len(loaded_images[i][0])))
p_img = im_data[0:len(loaded_images[i]),0:len(loaded_images[i][0]),:]"""
file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format)
dump_image(p_img, file_name, format=format)
print("Done!")
return 1
def main(*argv):
if not argv:
argv = list(sys.argv)
try:
import signal
signal.signal(signal.SIGTERM, signal.SIG_DFL)
except Exception as e:
pass
parser = argparse.ArgumentParser()
parser.add_argument('--power',help='cloak the most longest powerth target version',type=int,default=1)
parser.add_argument('--DEEP', help = 'the depth of the folder',type = int,default=1)
parser.add_argument('--directory', '-d', type=str,
help='the directory that contains images to run protection', default='imgs/')
parser.add_argument('--gpu', '-g', type=str,
help='the GPU id when using GPU for optimization', default='0')
parser.add_argument('--mode', '-m', type=str,
help='cloak generation mode, select from min, low, mid, high. The higher the mode is, the more perturbation added and stronger protection',
default='min')
parser.add_argument('--feature-extractor', type=str,
help="name of the feature extractor used for optimization, currently only support high_extract",
default="high_extract")
parser.add_argument('--th', help='only relevant with mode=custom, DSSIM threshold for perturbation', type=float,
default=0.002)
parser.add_argument('--max-step', help='only relevant with mode=custom, number of steps for optimization', type=int,
default=1000)
parser.add_argument('--sd', type=int, help='only relevant with mode=custom, penalty number, read more in the paper',
default=1e9)
parser.add_argument('--lr', type=float, help='only relevant with mode=custom, learning rate', default=2)
parser.add_argument('--batch-size', help="number of images to run optimization together", type=int, default=1)
parser.add_argument('--separate_target', help="whether select separate targets for each faces in the directory",
action='store_true')
parser.add_argument('--no-align', help="whether to detect and crop faces",
action='store_true')
parser.add_argument('--debug', help="turn on debug and copy/paste the stdout when reporting an issue on github",
action='store_true')
parser.add_argument('--format', type=str,
help="format of the output image",
default="png")
argv = parser.parse_args(argv[1:])
##参数输入,原项目是采用从cmd进行输入的参数输入方式,这里我改成了用class实现参数输入
#path = r'C:\Users\一叶之秋\Pictures\1\1'
#path = r'D:\dataset\pubfig\fawkes_pub160\pub_final_usefull_split_10\PCA\Oriole\n000000'
#argv = parser(path,gpu = 0,batch_size = 1,mode = 'min',power=1,DEEP = 1,th=0.002)
assert argv.format in ['png', 'jpg', 'jpeg']
if argv.format == 'jpg':
argv.format = 'jpeg'
###获取所有需要进行cloak的图片的文件(包含目录的文件)
###这段代码为手动确定cloaks的存储文件夹
if argv.DEEP == 1:
##制作存储cloak的存储文件夹
temp = '\\'.join(argv.directory.split('\\')[:-2])
if argv.power == 1:
name = "{}_{}_{}{}_cloaks\\{}".format(argv.directory.split('\\')[-2],'fawkes',argv.mode,int(argv.th*1000),argv.directory.split('\\')[-1])
else:
name = '{}_{}_{}{}_cloaks\\{}'.format(argv.directory.split('\\')[-2],'oriole',argv.mode,int(argv.th*1000),argv.directory.split('\\')[-1])
path_cloaked = os.path.join(temp,name)
print(path_cloaked)
if not os.path.exists(path_cloaked):
os.makedirs(path_cloaked,exist_ok=True)
image_paths_list = []
image_paths= glob.glob(os.path.join(argv.directory, "*"))
###检查出没有cloaked的图片的含目录文件
image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]]
protector = Fawkes(argv.feature_extractor, argv.gpu, argv.batch_size)
##pathO = r'C:\Users\一叶之秋\Pictures\fawkes\exam\custom0.01\origin_queen.jpg'
##pachC = r'C:\Users\一叶之秋\Pictures\fawkes\exam\custom0.01\origin_queen_custom_cloaked.jpeg'
#@JoeyChen: 制作一种模式,还是四种模式都进行cloacked
if argv.mode == 'all':
for mode in ['min', 'low', 'mid', 'high']:
protector.run_protection_CLQ(image_paths, mode=argv.mode, th=argv.th, sd=argv.sd, lr=argv.lr,
max_step=argv.max_step,
batch_size=argv.batch_size, format=argv.format,
separate_target=argv.separate_target, debug=argv.debug, no_align=argv.no_align,power=argv.power,DEEP = argv.DEEP)
else:
protector.run_protection_CLQ(image_paths, mode=argv.mode, th=argv.th, sd=argv.sd, lr=argv.lr,
max_step=argv.max_step,
batch_size=argv.batch_size, format=argv.format,
separate_target=argv.separate_target, debug=argv.debug, no_align=argv.no_align,power = argv.power,DEEP = argv.DEEP,)
else:
##制作存储cloak的存储文件夹
temp = '\\'.join(argv.directory.split('\\')[:-1])
if argv.power == 1:
name = "{}_{}_{}{}_cloaks".format(argv.directory.split('\\')[-1],argv.mode,int(argv.th*1000),'fawkes')
else:
name = '{}_{}_{}{}_cloaks'.format(argv.directory.split('\\')[-1],argv.mode,int(argv.th*1000),'oriole')
path_cloaked = os.path.join(temp,name)
if not os.path.exists(path_cloaked):
os.makedirs(path_cloaked,exist_ok=True)
root_image_paths = glob.glob(os.path.join(argv.directory, "*"))
image_paths = []
for i in range(len(root_image_paths)):
temp_dir = root_image_paths[i]
temp_dir_lt = temp_dir.split('\\')
temp_dir_lt[-2] = name
temp_dir = '\\'.join(temp_dir_lt)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir,exist_ok = True)
image_paths_list = glob.glob(os.path.join(root_image_paths[i],"*"))
image_paths_list = [path for path in image_paths_list if "_cloaked" not in path.split("/")[-1]]
image_paths.extend(image_paths_list)
protector = Fawkes(argv.feature_extractor, argv.gpu, argv.batch_size)
##pathO = r'C:\Users\一叶之秋\Pictures\fawkes\exam\custom0.01\origin_queen.jpg'
##pachC = r'C:\Users\一叶之秋\Pictures\fawkes\exam\custom0.01\origin_queen_custom_cloaked.jpeg'
#@JoeyChen: 制作一种模式,还是四种模式都进行cloacked
if argv.mode == 'all':
for mode in ['min', 'low', 'mid', 'high']:
protector.run_protection_CLQ(image_paths, | |
from __future__ import absolute_import, division, print_function
from builtins import range
import atexit
import glob
import hashlib
import math
import os
import re
import shutil
import sys
import time
import traceback
import warnings
from libtbx.queuing_system_utils import pbs_utils, sge_utils
from libtbx.math_utils import round2
from libtbx.str_utils import show_string
try: import gzip
except ImportError: gzip = None
try: import bz2
except ImportError: bz2 = None
hashlib_md5 = hashlib.md5
op = os.path
windows_device_names = """\
CON PRN AUX NUL COM1 COM2 COM3 COM4 COM5 COM6 COM7 COM8 COM9
LPT1 LPT2 LPT3 LPT4 LPT5 LPT6 LPT7 LPT8 LPT9""".split()
def xfrange(start, stop=None, step=None, tolerance=None):
"""
A float range generator.
Parameters
----------
start : float
stop : float, optional
If empty, start at 0 and stop at the start parameter.
step : float, optional
tolerance : float, optional
Returns
-------
generator of float
"""
if stop is None:
stop = start + 0.0
start = 0.0
else:
start += 0.0 # force it to be a float
if step is None:
step = 1.0
else:
assert step != 0.0
count = int(math.ceil((stop - start) / step))
if ( tolerance is not None
and abs(start + count * step - stop) < abs(step * tolerance)):
count += 1
for i in range(count):
yield start + i * step
def safe_div(a,b):
if abs(b) < 1e-8:
return 0
else:
return a/b
def frange(start, stop=None, step=None):
"""
Non-generator version of xfrange.
Parameters
----------
start : float
stop : float, optional
If empty, start at 0 and stop at the start parameter.
step : float, optional
Returns
-------
list of float
See Also
--------
libtbx.utils.xfrange
"""
return list(xfrange(start, stop=stop, step=step))
def xsamples(start, stop=None, step=None, tolerance=1e-6):
"""
Wraps xfrange, acts identically.
Parameters
----------
start : float
stop : float, optional
If empty, start at 0 and stop at the start parameter.
step : float, optional
tolerance : float, optional
Returns
-------
generator of float
See Also
--------
libtbx.utils.xfrange
"""
return xfrange(start, stop, step, tolerance)
def samples(start, stop=None, step=None, tolerance=1e-6):
"""
Non-generator version of xsamples.
Parameters
----------
start : float
stop : float, optional
If empty, start at 0 and stop at the start parameter.
step : float, optional
tolerance : float, optional
Returns
-------
list of float
See Also
--------
libtbx.utils.xfrange, libtbx.utils.xsamples
"""
return list(xsamples(start, stop, step, tolerance))
def escape_sh_double_quoted(s):
"""
The result is supposed to be double-quoted when passed to sh.
"""
if (s is None): return None
return s.replace('\\','\\\\').replace('"','\\"')
def xlen(seq):
"""
Returns the length of a sequence or None.
Parameters
----------
seq : iterable or None
Returns
-------
int or None
"""
if (seq is None): return seq
return len(seq)
def product(seq):
"""
Calculates the result of multiplying all elements of a sequence together.
Parameters
----------
seq : iterable
"""
result = None
for val in seq:
if (result is None):
result = val
else:
result *= val
return result
def sequence_index_dict(seq, must_be_unique=True):
"""
Builds a dictionary for each element in seq mapped to its index in the sequence.
Parameters
----------
seq : iterable of object
must_be_unique : bool, optional
Returns
-------
dict of object, int
Examples
--------
>>> libtbx.utils.sequence_index_dict(['a', 'b'])
{'a': 0, 'b': 1}
"""
result = {}
for i, elem in enumerate(seq):
if must_be_unique:
assert elem not in result
result[elem] = i
return result
def number_from_string(string):
"""
Tries to covert a string into an integer, using builtin int() as well as eval().
Parameters
----------
string : str
Returns
-------
int
Raises
------
ValueError
If string cannot be converted into an integer.
"""
# similar to libtbx.phil.number_from_value_string
# (please review if making changes here)
if (string.lower() in ["true", "false"]):
raise ValueError(
'Error interpreting "%s" as a numeric expression.' % string)
try: return int(string)
except ValueError: pass
try: return eval(string, math.__dict__, {})
except KeyboardInterrupt: raise
except Exception:
raise ValueError(
'Error interpreting "%s" as a numeric expression: %s' % (
string, format_exception()))
def gzip_open(file_name, mode):
"""
Wraps gzip.open to open a .gz file.
Parameters
----------
file_name : str
mode : str
Returns
-------
file
Raises
------
RuntimeError
If gzip is not available.
"""
assert mode in ["r", "rb", "w", "wb", "a", "ab"]
if (gzip is None):
un = ""
if (mode[0] == "r"): un = "un"
raise RuntimeError(
"gzip module not available: cannot %scompress file %s"
% (un, show_string(file_name)))
return gzip.open(file_name, mode)
def bz2_open(file_name, mode):
"""
Wraps bz2.open to open a .bz2 file.
Parameters
----------
file_name : str
mode : str
Returns
-------
file
Raises
------
RuntimeError
If bz2 is not available.
"""
assert mode in ('r', 'w')
if bz2 is None:
raise RuntimeError('bz2 module not available: cannot %compress file %s'
% ({'r':'un', 'w':''}[mode], file_name))
return bz2.BZ2File(file_name, mode)
def warn_if_unexpected_md5_hexdigest(
path,
expected_md5_hexdigests,
hints=[],
out=None):
"""
Checks the md5 hash of a file to see if it matches the expected hash.
Parameters
----------
path : str
expected_md5_hexdigests : list of str
hints : list of str, optional
out : file, optional
Returns
-------
bool
False if md5 hash of file does not appear in expected_md5_hexdigests.
"""
m = hashlib.md5()
m.update("\n".join(open(path).read().splitlines()).encode('utf-8'))
current_md5_hexdigest = m.hexdigest()
if (m.hexdigest() in expected_md5_hexdigests): return False
warning = "Warning: unexpected md5 hexdigest:"
file_name = " File: %s" % show_string(path)
new_hexdigest = " New md5 hexdigest: %s" % m.hexdigest()
width = max([len(s) for s in [warning, file_name, new_hexdigest]])
if (out is None): out = sys.stdout
print("*"*width, file=out)
print(warning, file=out)
print(file_name, file=out)
print(new_hexdigest, file=out)
for hint in hints:
print(hint, file=out)
print("*"*width, file=out)
return True
def md5_hexdigest(filename=None, blocksize=256):
""" Compute the MD5 hexdigest of the content of the given file,
efficiently even for files much larger than the available RAM.
The file is read by chunks of `blocksize` MB.
"""
blocksize *= 1024**2
m = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read(blocksize)
while buf:
m.update(buf)
buf = f.read(blocksize)
return m.hexdigest()
def get_memory_from_string(mem_str):
"""
Converts a string of a memory or file size (i.e. "10G") into a number.
Parameters
----------
mem_str : int or float
Returns
-------
int or float
Examples
--------
>>> libtbx.utils.get_memory_from_string("10G")
10737418240.0
>>> libtbx.utils.get_memory_from_string("10M")
10485760.0
Raises
------
RuntimeError
"""
if type(mem_str) in [type(1), type(1.)]: return mem_str
mem_str = mem_str.replace(" ","").strip().upper()
if mem_str == "": return 0
factor=1024
for i, greek in enumerate(["K","M","G","T","E","Z","Y"]):
num_str=None
if mem_str[-1]==greek:
num_str = mem_str[:-1]
if mem_str.find("%sB" % greek)==len(mem_str)-2:
num_str = mem_str[:-2]
if num_str is not None:
try:
num = float(num_str)
except ValueError:
raise RuntimeError("""
The numerical portion of %s is not a valid float
""" % mem_str)
break
factor*=1024
else:
try:
num = int(mem_str)
except ValueError:
raise RuntimeError("""
There is no memory unit or valid float in %s
""" % mem_str)
factor=1
return num*factor
def getenv_bool(variable_name, default=False):
"""
Checks the environment variables for variable, returning it as a boolean.
Parameters
----------
variable_name : str
default : bool, optional
Returned if variable_name is not found.
Returns
-------
bool
"""
value = os.environ.get(variable_name, None)
if (value is None): return default
value_lower = value.lower()
if (value_lower not in ["false", "true", "0", "1"]):
raise Sorry(
'Environment variable %s must be "True", "False", "0", or "1"'
' (current value: "%s").' % (variable_name, value))
return (value_lower in ["true", "1"])
def file_size(file_name):
"""
Wraps os.stat to calculate a file's size.
Parameters
----------
file_name : str
Returns
-------
int : size of file, in bytes
"""
return os.stat(file_name).st_size
def copy_file(source, target, compress=None):
"""
Copies a file from source to target, optionally compressing it before writing
it out.
Parameters
----------
source : str
target : str
compress : str, optional
The compression algorithm to use. Currently only ".gz" is supported. If
set, target becomes target + compress.
"""
assert op.isfile(source)
if (op.isdir(target)):
target = op.join(target, op.basename(source))
if (compress is None):
t = open(target, "wb")
else:
assert compress == ".gz"
t = gzip_open(file_name=target+compress, mode="wb")
t.write(open(source, "rb").read())
del t
def remove_files(pattern=None, paths=None, ensure_success=True):
"""
Removes a file from disk.
Parameters
----------
pattern : str, optional
paths : iterable of str, optional
ensure_success : bool, optional
"""
assert [pattern, paths].count(None) == 1
if (paths is None):
paths = glob.glob(pattern)
for path in paths:
if (ensure_success):
if (op.exists(path)):
os.remove(path)
if (op.exists(path)):
raise RuntimeError("Cannot remove file: %s" % show_string(path))
else:
if (op.isfile(path)):
os.remove(path)
def find_files (dir_name, pattern="*", files_only=True) :
"""
Find files matching a pattern | |
#!/usr/bin/env python
# coding: utf-8
# ***
# ### **I hope you find this kernel useful and your <font color="red"><b>UPVOTES</b></font> would be highly appreciated**
# ***
# # Decision Tree & Random Forest
#
# # 1. Decision Tree
#
# 
#
# Decision Trees are an important type of algorithm for predictive modeling machine learning.
#
# The classical decision tree algorithms have been around for decades and modern variations like random forest are among the most powerful techniques available.
#
# Classification and Regression Trees or `CART` for short is a term introduced by `<NAME>` to refer to Decision Tree algorithms that can be used for classification or regression predictive modeling problems.
#
# Classically, this algorithm is referred to as “`decision trees`”, but on some platforms like R they are referred to by the more modern term CART.
#
# The `CART` algorithm provides a foundation for important algorithms like `bagged decision trees`, `random forest` and `boosted decision trees`.
#
# ### CART Model Representation
# The representation for the CART model is a binary tree.
#
# This is your binary tree from algorithms and data structures, nothing too fancy. Each root node represents a single input variable (x) and a split point on that variable (assuming the variable is numeric).
#
# The leaf nodes of the tree contain an output variable (y) which is used to make a prediction.
#
# Given a new input, the tree is traversed by evaluating the specific input started at the root node of the tree.
#
# #### Some **advantages** of decision trees are:
# * Simple to understand and to interpret. Trees can be visualised.
# * Requires little data preparation.
# * Able to handle both numerical and categorical data.
# * Possible to validate a model using statistical tests.
# * Performs well even if its assumptions are somewhat violated by the true model from which the data were generated.
#
# #### The **disadvantages** of decision trees include:
# * Overfitting. Mechanisms such as pruning (not currently supported), setting the minimum number of samples required at a leaf node or setting the maximum depth of the tree are necessary to avoid this problem.
# * Decision trees can be unstable. Mitigant: Use decision trees within an ensemble.
# * Cannot guarantee to return the globally optimal decision tree. Mitigant: Training multiple trees in an ensemble learner
# * Decision tree learners create biased trees if some classes dominate. Recommendation: Balance the dataset prior to fitting
#
# # 2. Random Forest
# Random Forest is one of the most popular and most powerful machine learning algorithms. It is a type of ensemble machine learning algorithm called Bootstrap Aggregation or bagging.
# 
# To improve performance of Decision trees, we can use many trees with a random sample of features chosen as the split.
# # 3. Decision Tree & Random Forest Implementation in python
#
# We will use Decision Tree & Random Forest in Predicting the attrition of your valuable employees.
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
plt.style.use("fivethirtyeight")
# In[ ]:
df = pd.read_csv("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv")
df.head()
# ## 1. Exploratory Data Analysis
# In[ ]:
df.info()
# In[ ]:
pd.set_option("display.float_format", "{:.2f}".format)
df.describe()
# In[ ]:
df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis="columns", inplace=True)
# In[ ]:
categorical_col = []
for column in df.columns:
if df[column].dtype == object and len(df[column].unique()) <= 50:
categorical_col.append(column)
print(f"{column} : {df[column].unique()}")
print("====================================")
# In[ ]:
df['Attrition'] = df.Attrition.astype("category").cat.codes
# ## 2. Data Visualisation
# In[ ]:
df.Attrition.value_counts()
# In[ ]:
# Visulazing the distibution of the data for every feature
df.hist(edgecolor='black', linewidth=1.2, figsize=(20, 20))
# In[ ]:
# Plotting how every feature correlate with the "target"
sns.set(font_scale=1.2)
plt.figure(figsize=(30, 30))
for i, column in enumerate(categorical_col, 1):
plt.subplot(3, 3, i)
g = sns.barplot(x=f"{column}", y='Attrition', data=df)
g.set_xticklabels(g.get_xticklabels(), rotation=90)
plt.ylabel('Attrition Count')
plt.xlabel(f'{column}')
# **Conclusions:**
#
# ***
# - `BusinessTravel` : The workers who travel alot are more likely to quit then other employees.
#
# - `Department` : The worker in `Research & Development` are more likely to stay then the workers on other departement.
#
# - `EducationField` : The workers with `Human Resources` and `Technical Degree` are more likely to quit then employees from other fields of educations.
#
# - `Gender` : The `Male` are more likely to quit.
#
# - `JobRole` : The workers in `Laboratory Technician`, `Sales Representative`, and `Human Resources` are more likely to quit the workers in other positions.
#
# - `MaritalStatus` : The workers who have `Single` marital status are more likely to quit the `Married`, and `Divorced`.
#
# - `OverTime` : The workers who work more hours are likely to quit then others.
#
# ***
# ## 3. Correlation Matrix
# In[ ]:
plt.figure(figsize=(30, 30))
# ## 4. Data Processing
# In[ ]:
categorical_col.remove('Attrition')
# In[ ]:
# Transform categorical data into dummies
# categorical_col.remove("Attrition")
# data = pd.get_dummies(df, columns=categorical_col)
# data.info()
from sklearn.preprocessing import LabelEncoder
label = LabelEncoder()
for column in categorical_col:
df[column] = label.fit_transform(df[column])
# In[ ]:
X = df.drop('Attrition', axis=1)
y = df.Attrition
# ## 5. Applying machine learning algorithms
# In[ ]:
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
print("Train Result:\n===========================================")
print(f"accuracy score: {accuracy_score(y_train, pred):.4f}\n")
print(f"Classification Report: \n \tPrecision: {precision_score(y_train, pred)}\n\tRecall Score: {recall_score(y_train, pred)}\n\tF1 score: {f1_score(y_train, pred)}\n")
print(f"Confusion Matrix: \n {confusion_matrix(y_train, clf.predict(X_train))}\n")
elif train==False:
pred = clf.predict(X_test)
print("Test Result:\n===========================================")
print(f"accuracy score: {accuracy_score(y_test, pred)}\n")
print(f"Classification Report: \n \tPrecision: {precision_score(y_test, pred)}\n\tRecall Score: {recall_score(y_test, pred)}\n\tF1 score: {f1_score(y_test, pred)}\n")
print(f"Confusion Matrix: \n {confusion_matrix(y_test, pred)}\n")
# In[ ]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# ### 5. 1. Decision Tree Classifier
#
# **Decision Tree parameters:**
# - `criterion`: The function to measure the quality of a split. Supported criteria are "`gini`" for the Gini impurity and "`entropy`" for the information gain.
# ***
# - `splitter`: The strategy used to choose the split at each node. Supported strategies are "`best`" to choose the best split and "`random`" to choose the best random split.
# ***
# - `max_depth`: The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than `min_samples_split` samples.
# ***
# - `min_samples_split`: The minimum number of samples required to split an internal node.
# ***
# - `min_samples_leaf`: The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered if it leaves at least ``min_samples_leaf`` training samples in each of the left and right branches. This may have the effect of smoothing the model, especially in regression.
# ***
# - `min_weight_fraction_leaf`: The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.
# ***
# - `max_features`: The number of features to consider when looking for the best split.
# ***
# - `max_leaf_nodes`: Grow a tree with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes.
# ***
# - `min_impurity_decrease`: A node will be split if this split induces a decrease of the impurity greater than or equal to this value.
# ***
# - `min_impurity_split`: Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf.
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(random_state=42)
tree.fit(X_train, y_train)
print_score(tree, X_train, y_train, X_test, y_test, train=True)
print_score(tree, X_train, y_train, X_test, y_test, train=False)
# ### 5. 2. Decision Tree Classifier Hyperparameter tuning
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
params ={"criterion":("gini", "entropy"),"splitter":("best", "random"),"max_depth":(list(range(1, 20))),"min_samples_split":[2, 3, 4],"min_samples_leaf":list(range(1, 20)),}
model = DecisionTreeClassifier(random_state=42)
grid_search_cv = GridSearchCV(model, params, scoring="accuracy", n_jobs=-1, verbose=1, cv=3)
# grid_search_cv.fit(X_train, y_train)
# In[ ]:
# grid_search_cv.best_estimator_
# In[ ]:
tree = DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None, criterion='entropy',max_depth=6, max_features=None, max_leaf_nodes=None,min_impurity_decrease=0.0, min_impurity_split=None,min_samples_leaf=10, min_samples_split=2,min_weight_fraction_leaf=0.0, presort='deprecated',random_state=42, splitter='best')
# In[ ]:
tree.fit(X_train, y_train)
# In[ ]:
print_score(tree, X_train, y_train, X_test, y_test, train=True)
print_score(tree, X_train, y_train, X_test, y_test, train=False)
# ### Visualization of a tree
# In[ ]:
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydot
features = list(df.columns)
features.remove("Attrition")
# In[ ]:
dot_data = StringIO()
export_graphviz(tree, out_file=dot_data, feature_names=features, filled=True)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png())
# ### 5. 3. Random Forest
#
# A random forest is a meta estimator that fits a number of decision tree classifiers | |
tree produced by CSharp4Parser#struct_interfaces.
def exitStruct_interfaces(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#struct_body.
def enterStruct_body(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#struct_body.
def exitStruct_body(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#struct_member_declarations.
def enterStruct_member_declarations(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#struct_member_declarations.
def exitStruct_member_declarations(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#struct_member_declaration.
def enterStruct_member_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#struct_member_declaration.
def exitStruct_member_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#array_type121.
def enterArray_type121(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#array_type121.
def exitArray_type121(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#non_array_type121.
def enterNon_array_type121(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#non_array_type121.
def exitNon_array_type121(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#rank_specifiers.
def enterRank_specifiers(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#rank_specifiers.
def exitRank_specifiers(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#rank_specifier.
def enterRank_specifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#rank_specifier.
def exitRank_specifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#dim_separators.
def enterDim_separators(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#dim_separators.
def exitDim_separators(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#array_initializer.
def enterArray_initializer(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#array_initializer.
def exitArray_initializer(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#variable_initializer_list.
def enterVariable_initializer_list(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#variable_initializer_list.
def exitVariable_initializer_list(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_declaration.
def enterInterface_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_declaration.
def exitInterface_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_modifiers.
def enterInterface_modifiers(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_modifiers.
def exitInterface_modifiers(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_modifier.
def enterInterface_modifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_modifier.
def exitInterface_modifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#variant_type121_parameter_list.
def enterVariant_type121_parameter_list(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#variant_type121_parameter_list.
def exitVariant_type121_parameter_list(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#variant_type121_parameters.
def enterVariant_type121_parameters(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#variant_type121_parameters.
def exitVariant_type121_parameters(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#variance_annotation.
def enterVariance_annotation(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#variance_annotation.
def exitVariance_annotation(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_base.
def enterInterface_base(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_base.
def exitInterface_base(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_body.
def enterInterface_body(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_body.
def exitInterface_body(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_member_declarations.
def enterInterface_member_declarations(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_member_declarations.
def exitInterface_member_declarations(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_member_declaration.
def enterInterface_member_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_member_declaration.
def exitInterface_member_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_method_declaration.
def enterInterface_method_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_method_declaration.
def exitInterface_method_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_property_declaration.
def enterInterface_property_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_property_declaration.
def exitInterface_property_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_accessors.
def enterInterface_accessors(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_accessors.
def exitInterface_accessors(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_event_declaration.
def enterInterface_event_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_event_declaration.
def exitInterface_event_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#interface_indexer_declaration.
def enterInterface_indexer_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#interface_indexer_declaration.
def exitInterface_indexer_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#enum_declaration.
def enterEnum_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#enum_declaration.
def exitEnum_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#enum_base.
def enterEnum_base(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#enum_base.
def exitEnum_base(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#enum_body.
def enterEnum_body(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#enum_body.
def exitEnum_body(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#enum_modifiers.
def enterEnum_modifiers(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#enum_modifiers.
def exitEnum_modifiers(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#enum_modifier.
def enterEnum_modifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#enum_modifier.
def exitEnum_modifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#enum_member_declarations.
def enterEnum_member_declarations(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#enum_member_declarations.
def exitEnum_member_declarations(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#enum_member_declaration.
def enterEnum_member_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#enum_member_declaration.
def exitEnum_member_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#delegate_declaration.
def enterDelegate_declaration(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#delegate_declaration.
def exitDelegate_declaration(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#delegate_modifiers.
def enterDelegate_modifiers(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#delegate_modifiers.
def exitDelegate_modifiers(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#delegate_modifier.
def enterDelegate_modifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#delegate_modifier.
def exitDelegate_modifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#global_attributes.
def enterGlobal_attributes(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#global_attributes.
def exitGlobal_attributes(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#global_attribute_sections.
def enterGlobal_attribute_sections(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#global_attribute_sections.
def exitGlobal_attribute_sections(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#global_attribute_section.
def enterGlobal_attribute_section(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#global_attribute_section.
def exitGlobal_attribute_section(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#global_attribute_target_specifier.
def enterGlobal_attribute_target_specifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#global_attribute_target_specifier.
def exitGlobal_attribute_target_specifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#global_attribute_target.
def enterGlobal_attribute_target(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#global_attribute_target.
def exitGlobal_attribute_target(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attributes.
def enterAttributes(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attributes.
def exitAttributes(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attribute_sections.
def enterAttribute_sections(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute_sections.
def exitAttribute_sections(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attribute_section.
def enterAttribute_section(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute_section.
def exitAttribute_section(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attribute_target_specifier.
def enterAttribute_target_specifier(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute_target_specifier.
def exitAttribute_target_specifier(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attribute_target.
def enterAttribute_target(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute_target.
def exitAttribute_target(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attribute_list.
def enterAttribute_list(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute_list.
def exitAttribute_list(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attribute.
def enterAttribute(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute.
def exitAttribute(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attribute_name.
def enterAttribute_name(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute_name.
def exitAttribute_name(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#attribute_arguments.
def enterAttribute_arguments(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#attribute_arguments.
def exitAttribute_arguments(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#positional_argument_list.
def enterPositional_argument_list(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#positional_argument_list.
def exitPositional_argument_list(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#positional_argument.
def enterPositional_argument(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#positional_argument.
def exitPositional_argument(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#named_argument_list.
def enterNamed_argument_list(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#named_argument_list.
def exitNamed_argument_list(self, ctx):
pass
# Enter a parse tree produced by CSharp4Parser#named_argument.
def enterNamed_argument(self, ctx):
pass
# Exit a parse tree produced by CSharp4Parser#named_argument.
def exitNamed_argument(self, ctx):
pass
| |
<filename>swagger_client/apis/apps_api.py
# coding: utf-8
"""
fn
The open source serverless platform.
OpenAPI spec version: 0.2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AppsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def apps_app_delete(self, app, **kwargs):
"""
Delete an app.
Delete an app.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.apps_app_delete(app, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str app: Name of the app. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.apps_app_delete_with_http_info(app, **kwargs)
else:
(data) = self.apps_app_delete_with_http_info(app, **kwargs)
return data
def apps_app_delete_with_http_info(self, app, **kwargs):
"""
Delete an app.
Delete an app.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.apps_app_delete_with_http_info(app, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str app: Name of the app. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['app']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method apps_app_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'app' is set
if ('app' not in params) or (params['app'] is None):
raise ValueError("Missing the required parameter `app` when calling `apps_app_delete`")
collection_formats = {}
path_params = {}
if 'app' in params:
path_params['app'] = params['app']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/apps/{app}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def apps_app_get(self, app, **kwargs):
"""
Get information for a app.
This gives more details about a app, such as statistics.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.apps_app_get(app, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str app: name of the app. (required)
:return: AppWrapper
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.apps_app_get_with_http_info(app, **kwargs)
else:
(data) = self.apps_app_get_with_http_info(app, **kwargs)
return data
def apps_app_get_with_http_info(self, app, **kwargs):
"""
Get information for a app.
This gives more details about a app, such as statistics.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.apps_app_get_with_http_info(app, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str app: name of the app. (required)
:return: AppWrapper
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['app']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method apps_app_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'app' is set
if ('app' not in params) or (params['app'] is None):
raise ValueError("Missing the required parameter `app` when calling `apps_app_get`")
collection_formats = {}
path_params = {}
if 'app' in params:
path_params['app'] = params['app']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/apps/{app}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppWrapper',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def apps_app_patch(self, app, body, **kwargs):
"""
Updates an app.
You can set app level settings here.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.apps_app_patch(app, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str app: name of the app. (required)
:param AppWrapper body: App to post. (required)
:return: AppWrapper
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.apps_app_patch_with_http_info(app, body, **kwargs)
else:
(data) = self.apps_app_patch_with_http_info(app, body, **kwargs)
return data
def apps_app_patch_with_http_info(self, app, body, **kwargs):
"""
Updates an app.
You can set app level settings here.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.apps_app_patch_with_http_info(app, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str app: name of the app. (required)
:param AppWrapper body: App to post. (required)
:return: AppWrapper
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['app', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method apps_app_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'app' is set
if ('app' not in params) or (params['app'] is None):
raise ValueError("Missing the required parameter `app` when calling `apps_app_patch`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `apps_app_patch`")
collection_formats = {}
path_params = {}
if 'app' in params:
path_params['app'] = params['app']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/apps/{app}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppWrapper',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def apps_get(self, **kwargs):
"""
Get all app names.
Get a list of all the apps in the system, returned in alphabetical order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.apps_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str cursor: Cursor from previous response.next_cursor to begin results after, if any.
:param int per_page: Number of results to return, defaults to 30. Max of 100.
:return: AppsWrapper
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.apps_get_with_http_info(**kwargs)
else:
(data) = self.apps_get_with_http_info(**kwargs)
return data
def apps_get_with_http_info(self, **kwargs):
"""
Get all app names.
Get a list of all the apps in the system, returned in alphabetical order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.apps_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
| |
<reponame>inmanta/inmanta-core
"""
Copyright 2017 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: <EMAIL>
"""
# pylint: disable-msg=R0923,W0613
import logging
import typing
from typing import Dict, Iterator, List, Optional, Tuple
import inmanta.warnings as inmanta_warnings
from inmanta.ast import (
AttributeReferenceAnchor,
CompilerDeprecationWarning,
CompilerRuntimeWarning,
DuplicateException,
Import,
IndexException,
LocatableString,
Namespace,
NotFoundException,
Range,
RuntimeException,
TypeNotFoundException,
TypeReferenceAnchor,
TypingException,
)
from inmanta.ast.attribute import Attribute, RelationAttribute
from inmanta.ast.blocks import BasicBlock
from inmanta.ast.constraint.expression import Equals
from inmanta.ast.entity import Default, Entity, EntityLike, Implement, Implementation
from inmanta.ast.statements import BiStatement, ExpressionStatement, Literal, Statement, TypeDefinitionStatement
from inmanta.ast.statements.generator import Constructor
from inmanta.ast.type import TYPES, ConstraintType, NullableType, Type, TypedList
from inmanta.execute.runtime import ExecutionUnit, QueueScheduler, Resolver, ResultVariable
from inmanta.plugins import Plugin
from . import DefinitionStatement
LOGGER = logging.getLogger(__name__)
class TypeDeclaration(Statement):
"""
Declaration of a type. A type declaration consists of a base type string and can be
multi ('basetype[]'), nullable ('basetype?') or both ('basetype[]?').
"""
def __init__(
self,
basetype: LocatableString,
multi: bool = False,
nullable: bool = False,
) -> None:
Statement.__init__(self)
self.basetype: LocatableString = basetype
self.multi: bool = multi
self.nullable: bool = nullable
def get_basetype(self, namespace: Namespace) -> Type:
"""
Returns the base type for this declaration as a Type.
"""
return namespace.get_type(self.basetype)
def get_type(self, namespace: Namespace) -> Type:
"""
Returns the type for this declaration as a Type.
"""
tp: Type = self.get_basetype(namespace)
if self.multi:
tp = TypedList(tp)
if self.nullable:
tp = NullableType(tp)
return tp
def __str__(self) -> str:
return f"{self.basetype}{'[]' if self.multi else ''}{'?' if self.nullable else ''}"
class DefineAttribute(Statement):
def __init__(
self,
attr_type: TypeDeclaration,
name: LocatableString,
default_value: Optional[ExpressionStatement] = None,
remove_default: bool = True,
) -> None:
"""
if default_value is None, this is an explicit removal of a default value
"""
super(DefineAttribute, self).__init__()
self.type = attr_type
self.name = name
self.default = default_value
self.remove_default = remove_default
def __str__(self) -> str:
return f"{self.type} {self.name} = {str(self.default) if self.default else ''}"
class DefineEntity(TypeDefinitionStatement):
"""
Define a new entity in the configuration
"""
comment: Optional[str]
type: Entity
def __init__(
self,
namespace: Namespace,
lname: LocatableString,
comment: Optional[LocatableString],
parents: List[LocatableString],
attributes: List[DefineAttribute],
) -> None:
name = str(lname)
TypeDefinitionStatement.__init__(self, namespace, name)
self.anchors = [TypeReferenceAnchor(namespace, x) for x in parents]
self.name = name
self.attributes = attributes
if comment is not None:
self.comment = str(comment)
else:
self.comment = None
self.parents = parents
if len(self.parents) == 0 and not (self.name == "Entity" and self.namespace.name == "std"):
dummy_location: Range = Range("__internal__", 1, 1, 1, 1)
self.parents.append(LocatableString("std::Entity", dummy_location, -1, namespace))
self.type = Entity(self.name, namespace, self.comment)
self.type.location = lname.location
def add_attribute(
self, attr_type: LocatableString, name: LocatableString, default_value: Optional[ExpressionStatement] = None
) -> None:
"""
Add an attribute to this entity
"""
self.attributes.append(DefineAttribute(TypeDeclaration(attr_type), name, default_value))
def __repr__(self) -> str:
"""
A textual representation of this entity
"""
return "Entity(%s)" % self.name
def get_full_parent_names(self) -> List[str]:
def resolve_parent(parent: LocatableString) -> str:
ptype = self.namespace.get_type(parent)
assert isinstance(ptype, Entity), "Parents of entities should be entities, but %s is a %s" % (parent, type(ptype))
return ptype.get_full_name()
try:
return [resolve_parent(parent) for parent in self.parents]
except TypeNotFoundException as e:
e.set_statement(self)
raise e
def evaluate(self) -> None:
"""
Evaluate this statement.
"""
try:
entity_type = self.type
entity_type.comment = self.comment
add_attributes: Dict[str, Attribute] = {}
attribute: DefineAttribute
for attribute in self.attributes:
attr_type: Type = attribute.type.get_type(self.namespace)
if not isinstance(attr_type, (Type, type)):
raise TypingException(self, "Attributes can only be a type. Entities need to be defined as relations.")
name = str(attribute.name)
attr_obj = Attribute(
entity_type,
attribute.type.get_basetype(self.namespace),
name,
attribute.get_location(),
attribute.type.multi,
attribute.type.nullable,
)
self.anchors.append(TypeReferenceAnchor(self.namespace, attribute.type.basetype))
if name in add_attributes:
raise DuplicateException(attr_obj, add_attributes[name], "Same attribute defined twice in one entity")
add_attributes[name] = attr_obj
if attribute.default is not None or attribute.remove_default:
entity_type.add_default_value(name, attribute)
if len({str(p) for p in self.parents}) != len(self.parents):
raise TypingException(self, "same parent defined twice")
for parent in self.parents:
parent_type = self.namespace.get_type(parent)
if parent_type is self.type:
raise TypingException(self, "Entity can not be its own parent (%s) " % parent)
if not isinstance(parent_type, Entity):
raise TypingException(
self,
"Parents of an entity need to be entities. "
"Default constructors are not supported. %s is not an entity" % parent,
)
entity_type.parent_entities.append(parent_type)
parent_type.child_entities.append(entity_type)
for parent_type in entity_type.get_all_parent_entities():
for attr_name, other_attr in parent_type.attributes.items():
if attr_name not in add_attributes:
add_attributes[attr_name] = other_attr
else:
# allow compatible attributes
my_attr = add_attributes[attr_name]
if my_attr.type == other_attr.type:
add_attributes[attr_name] = other_attr
else:
raise DuplicateException(my_attr, other_attr, "Incompatible attributes")
# verify all attribute compatibility
except TypeNotFoundException as e:
e.set_statement(self)
raise e
class DefineImplementation(TypeDefinitionStatement):
"""
Define a new implementation that has a name and contains statements
:param name: The name of the implementation
"""
comment: Optional[str]
type: Implementation
def __init__(
self,
namespace: Namespace,
name: LocatableString,
target_type: LocatableString,
statements: BasicBlock,
comment: LocatableString,
):
TypeDefinitionStatement.__init__(self, namespace, str(name))
self.name = str(name)
self.block = statements
self.entity = target_type
self.comment = None
if comment is not None:
self.comment = str(comment)
self.location = name.get_location()
self.type = Implementation(str(self.name), self.block, self.namespace, str(target_type), self.comment)
self.type.location = name.get_location()
self.anchors = [TypeReferenceAnchor(namespace, target_type)]
self.anchors.extend(statements.get_anchors())
def __repr__(self) -> str:
"""
The representation of this implementation
"""
return "Implementation(%s)" % self.name
def evaluate(self) -> None:
"""
Evaluate this statement in the given scope
"""
try:
cls = self.namespace.get_type(self.entity)
if not isinstance(cls, Entity):
raise TypingException(
self, "Implementation can only be define for an Entity, but %s is a %s" % (self.entity, cls)
)
self.type.set_type(cls)
self.copy_location(self.type)
except TypeNotFoundException as e:
e.set_statement(self)
raise e
def nested_blocks(self) -> Iterator["BasicBlock"]:
"""
Returns an iterator over blocks contained within this statement.
"""
yield self.block
class DefineImplement(DefinitionStatement):
"""
Define a new implementation for a given entity
:param entity: The name of the entity that is implemented
:param implementations: A list of implementations
:param select: A clause that determines when this implementation is "active"
:param inherit: True iff the entity should inherit all implementations from its parents
"""
comment: Optional[str]
def __init__(
self,
entity_name: LocatableString,
implementations: List[LocatableString],
select: ExpressionStatement,
inherit: bool = False,
comment: Optional[LocatableString] = None,
) -> None:
DefinitionStatement.__init__(self)
self.entity = entity_name
self.entity_location = entity_name.get_location()
self.implementations = implementations
self.anchors = [TypeReferenceAnchor(x.namespace, x) for x in implementations]
self.anchors.append(TypeReferenceAnchor(entity_name.namespace, entity_name))
self.anchors.extend(select.get_anchors())
self.location = entity_name.get_location()
if inherit and (not isinstance(select, Literal) or select.value is not True):
raise RuntimeException(self, "Conditional implementation with parents not allowed")
self.select = select
self.inherit: bool = inherit
if comment is not None:
self.comment = str(comment)
else:
self.comment = None
def __repr__(self) -> str:
"""
Returns a representation of this class
"""
return "Implement(%s)" % (self.entity)
def evaluate(self) -> None:
"""
Evaluate this statement.
"""
try:
entity_type = self.namespace.get_type(self.entity)
if not isinstance(entity_type, EntityLike):
raise TypingException(
self, "Implementation can only be define for an Entity, but %s is a %s" % (self.entity, entity_type)
)
entity_type = entity_type.get_entity()
# If one implements statement has parent declared, set to true
entity_type.implements_inherits |= self.inherit
implement = Implement()
implement.comment = self.comment
implement.constraint = self.select
implement.location = self.entity_location
i = 0
for _impl in self.implementations:
i += 1
# check if the implementation has the correct type
impl_obj = self.namespace.get_type(_impl)
assert isinstance(impl_obj, Implementation), "%s is not an implementation" % (_impl)
if impl_obj.entity is not None and not (
entity_type is impl_obj.entity or entity_type.is_parent(impl_obj.entity)
):
raise TypingException(
self,
"Type mismatch: cannot use %s as implementation for "
" %s because its implementing type is %s" % (impl_obj.name, entity_type, impl_obj.entity),
)
# add it
implement.implementations.append(impl_obj)
entity_type.add_implement(implement)
except TypeNotFoundException as e:
e.set_statement(self)
raise e
class DefineTypeConstraint(TypeDefinitionStatement):
"""
Define a new data type in the configuration. This type is a constrained
version of a the built-in datatypes
:param name: The name of the new type
:param basetype: The name of the type that is "refined"
"""
comment: Optional[str]
__expression: ExpressionStatement
type: ConstraintType
def __init__(
self, namespace: Namespace, name: LocatableString, basetype: LocatableString, expression: ExpressionStatement
) -> None:
TypeDefinitionStatement.__init__(self, namespace, str(name))
self.set_location(name.get_location())
self.basetype = basetype
self.anchors.append(TypeReferenceAnchor(namespace, basetype))
self.anchors.extend(expression.get_anchors())
self.set_expression(expression)
self.type = ConstraintType(self.namespace, str(name))
self.type.location = | |
are such a DOJI, STRAHD flew up...")
sleep(2)
print("Now, you have a huge disavantage")
sleep(2)
else:
print("...")
sleep(1)
print("......")
sleep(2)
print("...........")
sleep(2)
print("I told you to be careful!")
sleep(2)
print(f"...{player_name} you are such a KLUTZ, STRAHD flew...")
sleep(2)
print("...STRAHD flew up...")
sleep(2)
print("Now, you have a huge disavantage")
sleep(2)
return True
def calculate_win_probability(
player_race: str, player_name: str, item: Optional[GameItem],strahd_flying: bool
) -> int:
"""
This function returns the probability
that the player defeats STRAHD.
The probability depends on the item the player is holding,
and whether STRAHD is flying.
"""
if item == GameItem.DEATH:
if player_name.lower() == "snow" and player_race.lower() == "kalashatar":
return 90
else:
return 0
elif item == GameItem.WOODEN_SWORD:
if strahd_flying:
return 5
else:
return 10
elif item == GameItem.SIMPLE_BOW:
if player_name.lower() == "soren" and player_race.lower() in [
"human",
"humano",
"elf",
"elfo",
]:
return 70
else:
return 30
elif item == GameItem.VIOLIN:
if player_name.lower() == "kaede" and player_race.lower() == "tiefling":
return 70
else:
return 30
elif item == GameItem.ORDINARY_SWORD:
if strahd_flying:
return 10
elif player_name.lower() == "vis" and player_race.lower() == "draconato":
return 80
else:
return 40
elif item == GameItem.STRAHD_SLAYER_SWORD:
if strahd_flying:
return 20
else:
return 100
elif item == GameItem.STRAHD_SLAYER_BOW:
return 100
else:
return -1
def roll_for_win(probability: int) -> bool:
"""
This function returns whether the player defeats STRAHD,
given a probability.
"""
return randint(1, 100) <= probability
def after_battle(player_race: str, player_name: str, did_win: bool) -> GameStatus:
"""
This function conducts the scenario
after the player has defeated, or not, STRAHD.
It returns the status depending on whether the player won.
"""
if did_win:
now = datetime.now()
print("A day may come when the courage of men fails…")
sleep(2)
print("but it is not THIS day, SATAN...")
sleep(2)
print("Because... you approached STRAHD...")
sleep(2)
print("Almost invisible to his senses...")
sleep(2)
print(
"Somehow your weapon hit the weak point of STRAHD's... revealing his true identity"
)
sleep(4)
print(
"He was just a bat... who looked like a DREADLORD..."
)
sleep(4)
print("It was a huge battle...")
sleep(2)
print(
f"And it was the most awkward {now.strftime('%A')} you will ever remember."
)
sleep(2)
if (
player_race.lower() in ["master", "mestre"]
and player_name.lower() == "zordnael"
):
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
print(
f"Congratulations {player_name}!!! You are the WINNER of this week's challenge, you shall receive 5000 dullas in Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("link")
sleep(5)
print("***CHEATER GOOD END***")
sleep(2)
return GameStatus.WINNER
elif player_race.lower() == "racist" and player_name.lower() == "lili":
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
print(
f"Congratulations {player_name}!!! You are the WINNER of this week's challenge, you shall receive the prizes specially prepared for everybody in dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("https://drive.google.com/drive/folders/1Jn8YYdixNNRqCQgIClBmGLiFFxuSCQdc?usp=sharing")
sleep(5)
print("***BEST END***")
sleep(2)
return GameStatus.WINNER
if did_win:
print("...")
sleep(1)
print(
"***************************************************************************************************************************************"
)
sleep(1)
if player_name.lower() == "soren":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1FerRt3mmaOm0ohSUXTkO-CmGIAluavXi?usp=sharing")
sleep(5)
print("...Your motherfuger cat killer !!!")
sleep(2)
print("***SOREN'S GOOD END***")
sleep(2)
elif player_name.lower() == "snow":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/16STFQ-_0N_54oNNsVQnMjwjcBgubxgk7?usp=sharing")
sleep(5)
print("...Your motherfuger snow flake !!!")
sleep(2)
print("***SNOW'S GOOD END***")
sleep(2)
elif player_name.lower() == "kaede":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1XN9sItRxYR4Si4gWFeJtI0HGF39zC29a?usp=sharing")
sleep(5)
print("...Your motherfuger idol !!!")
sleep(2)
print("***KAEDE'S GOOD END***")
sleep(2)
elif player_name.lower() == "leandro":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/1eP552hYwUXImmJ-DIX5o-wlp5VA96Sa0?usp=sharing")
sleep(5)
print("...Your motherfuger only roll 20 !!!")
sleep(2)
print("***LEANDRO'S GOOD END***")
sleep(2)
elif player_name.lower() == "vis":
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you received a cash prize of five thousand dullas from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print(f"And a prize... prepared specially for you {player_name}")
sleep(2)
print("... I know you doubted me... but here it is:")
sleep(2)
print("...")
sleep(1)
print("https://drive.google.com/drive/folders/19GRJJdlB8NbNl3QDXQM1-0ctXSX3mbwS?usp=sharing")
sleep(5)
print("...Your motherfuger iron wall !!!")
sleep(2)
print("***VIS'S GOOD END***")
sleep(2)
elif player_name.lower() == "lurin":
print("CONGRATULATIONS!!!!! ")
sleep(2)
print("Bitch! ... ")
sleep(2)
print(" ... you stole my name...")
sleep(2)
print("You are arrested for identity theft!!!")
sleep(2)
print("...")
sleep(1)
print("del C://LeagueOfLegends")
sleep(2)
print("...")
sleep(0.5)
print(".....")
sleep(0.5)
print("......")
sleep(0.5)
print(".............")
sleep(2)
print("deletion completed")
sleep(2)
print("***PHONY'S GOOD END***")
sleep(2)
else:
print(
f"Congratulations {player_name}!!! you are the WINNER of this week's challenge, you shall receive this link from Anastasia Dungeons Hills Cosmetics!"
)
sleep(4)
print("https://drive.google.com/drive/folders/0B_sxkSE6-TfETlZoOHF1bTRGTXM?usp=sharing")
sleep(5)
print("***GOOD END***")
sleep(2)
sleep(1)
return GameStatus.WINNER
if not did_win:
print("You tried to approach the devil carefully...")
sleep(2)
print("... but your hands were trembling...")
sleep(2)
print("...your weapon was not what you expected...")
sleep(2)
print("... It was a shit battle... but")
sleep(2)
print("The journey doesn't end here...")
sleep(2)
print("Death is just another way we have to choose...")
sleep(2)
print("...")
sleep(1)
if player_name.lower() == "vis":
print("I really believed in you...")
sleep(2)
print("...but I guess...")
sleep(1)
print("you shoud have stayed in your bathroom...")
sleep(2)
print("eating lemon pies...")
sleep(2)
print("...")
sleep(1)
print(f"YOU DIED {player_name}")
sleep(2)
print("***VIS'S BAD END***")
sleep(2)
elif player_name.lower() == "soren":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("Did you think it was a cat? ")
sleep(2)
print("Not today Satan!!!")
sleep(2)
print("...")
sleep(1)
print(f"You died! {player_name}")
sleep(2)
print("***SOREN'S BAD END***")
sleep(2)
elif player_name.lower() == "kaede":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("お。。。。")
sleep(2)
print("。。。か わ い い")
sleep(2)
print("。。。。。。こ と")
sleep(2)
print("go play you Violin in Hell...")
sleep(2)
print("...")
sleep(1)
print(f"You died! {player_name}")
sleep(2)
print("***KAEDES'S BAD END***")
sleep(2)
elif player_name.lower() == "snow":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(1)
print("HAHAHAAHHAHAHA")
sleep(2)
print("It is cute you even tried!")
sleep(2)
print("but I will call you Nori!")
sleep(2)
print("...")
sleep(1)
print("You died! Nori!!!")
sleep(2)
print("***SNOW'S BAD END***")
sleep(2)
elif player_name.lower() == "lurin":
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(2)
print("Bitch! ... ")
sleep(2)
print(" ... you stole my name...")
sleep(2)
print("You are arrested for identity theft!!!")
sleep(2)
print("...")
sleep(1)
print("del C://LeagueOfLegends")
sleep(2)
print("...")
sleep(0.5)
print(".....")
sleep(0.5)
print("......")
sleep(0.5)
print(".............")
sleep(2)
print("deletion completed")
sleep(2)
print("***PHONY'S GOOD END***")
sleep(2)
elif player_name.lower() == "leandro":
print("nice try")
sleep(2)
print("...but I guess...")
sleep(2)
print("Try harder next time...")
sleep(2)
print("...Nicolas Cage Face...")
sleep(2)
print("***LEANDRO'S BAD END***")
sleep(2)
elif player_name.lower() == "buiu":
print("nice try")
sleep(2)
print("...but I guess...")
sleep(2)
print("Try harder next time...")
sleep(2)
print(f"Did you really think this would work? Clown!")
sleep(2)
print("***RIDICULOUS BUIU'S END***")
sleep(2)
return GameStatus.HAHA
elif player_name.lower() in ["strahd", "dreadlord"]:
print("good try")
sleep(2)
print("...but I guess...")
sleep(2)
print("I never said you were in a cave...")
sleep(2)
print("There is sunlight now...")
sleep(2)
print("You are burning...")
sleep(2)
print("Till Death...")
sleep(2)
print("***RIDICULOUS STRAHD'S END***")
sleep(2)
else:
print("I really believed in you..")
sleep(2)
print("...but I guess...")
sleep(2)
print("This is a shit meta game...")
sleep(2)
print(
"Designed for players from a certain 16:20 tabletop Ravenloft campaign"
)
sleep(2)
print(f"Sorry, {player_name}...")
sleep(2)
print("You are dead!!!")
sleep(2)
print("***BAD END***")
sleep(2)
sleep(1)
return GameStatus.DEAD
def main():
"""
This function conducts the entire game.
"""
wanna_continue = True
while wanna_continue:
player_race = input("Your race? ")
player_name = input("Your name? ")
status = flee(player_name)
if status == GameStatus.ALIVE:
item, status = attack(player_name)
if status == GameStatus.ALIVE:
strahd_flight = decide_if_strahd_flies(player_name)
probability = calculate_win_probability(
player_race, player_name, | |
== 'normal':
std = math.sqrt(scale / fan)
self.weight.data.normal_(0, std)
elif distribution == 'uniform':
limit = math.sqrt(3 * scale / fan)
self.weight.data.uniform_(-limit, limit)
else:
raise ValueError('unknown distribution')
if self.bias is not None:
self.bias.data.fill_(0)
def set_parameters(self, weight, bias=None):
r"""Set weight and bias.
Parameters
----------
weight : array of shape in_channels x kernel_size x out_channels
The coefficients of the Chebyshev polynomials.
bias : vector of length out_channels
The bias.
"""
self.weight = torch.nn.Parameter(torch.as_tensor(weight))
if bias is not None:
self.bias = torch.nn.Parameter(torch.as_tensor(bias))
def extra_repr(self):
s = '{in_channels} -> {out_channels}, kernel_size={kernel_size}'
s += ', bias=' + str(self.bias is not None)
return s.format(**self.__dict__)
def forward(self, inputs):
r"""Forward graph convolution.
Parameters
----------
laplacian : sparse matrix of shape n_vertices x n_vertices
Encode the graph structure.
inputs : tensor of shape n_signals x n_vertices x n_features
Data, i.e., features on the vertices.
"""
outputs = self._conv(self.laplacian, inputs, self.weight)
if self.bias is not None:
outputs += self.bias
return outputs
# Conv layers
class Conv1dAuto(Conv1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = (self.kernel_size[0] // 2) # dynamic add padding based on the kernel_size
class PoolMaxEquiangular(torch.nn.MaxPool1d):
"""EquiAngular max pooling module
Parameters
----------
ratio : float
Ratio between latitude and longitude dimensions of the data
kernel_size : int
Pooling kernel width
return_indices : bool (default : True)
Whether to return the indices corresponding to the locations of the maximum value retained at pooling
"""
def __init__(self, ratio, kernel_size, return_indices=True, *args, **kwargs):
self.ratio = ratio
kernel_size = int(kernel_size ** 0.5)
super().__init__(kernel_size=kernel_size, return_indices=return_indices)
def forward(self, inputs):
"""calls Maxpool1d and if desired, keeps indices of the pixels pooled to unpool them
Parameters
----------
x : torch.tensor of shape batch x pixels x features
Input data
Returns
-------
x : torch.tensor of shape batch x unpooled pixels x features
Layer output
indices : list(int)
Indices of the pixels pooled
"""
x = equiangular_calculator(inputs, self.ratio)
x = x.permute(0, 3, 1, 2)
if self.return_indices:
x, indices = F.max_pool2d(x, self.kernel_size, return_indices=self.return_indices)
else:
x = F.max_pool2d(x, self.kernel_size)
x = reformat(x)
if self.return_indices:
output = x, indices
else:
output = x
return output
class UnpoolMaxEquiangular(torch.nn.MaxUnpool1d):
"""Equiangular max unpooling module
Parameters
----------
ratio : float
Ratio between latitude and longitude dimensions of the data
kernel_size : int
Pooling kernel width
"""
def __init__(self, ratio, kernel_size, *args, **kwargs):
self.ratio = ratio
kernel_size = int(kernel_size ** 0.5)
super().__init__(kernel_size=(kernel_size, kernel_size))
def forward(self, inputs, indices):
"""calls MaxUnpool1d using the indices returned previously by PoolMaxEquiangular
Parameters
----------
inputs : torch.tensor of shape batch x pixels x features
Input data
indices : int
Indices of pixels equiangular maxpooled previously
Returns
-------
x : torch.tensor of shape batch x unpooled pixels x features
Layer output
"""
x = equiangular_calculator(inputs, self.ratio)
x = x.permute(0, 3, 1, 2)
x = F.max_unpool2d(x, indices, self.kernel_size)
x = reformat(x)
return x
class PoolAvgEquiangular(torch.nn.AvgPool1d):
"""EquiAngular average pooling
Parameters
----------
ratio : float
Parameter for equiangular sampling -> width/height
kernel_size : int
Pooling kernel width
"""
def __init__(self, ratio, kernel_size, *args, **kwargs):
self.ratio = ratio
kernel_size = int(kernel_size ** 0.5)
super().__init__(kernel_size=(kernel_size, kernel_size))
def forward(self, inputs):
"""calls Avgpool1d
Parameters
----------
inputs : torch.tensor of shape batch x pixels x features
Input data
Returns
-------
x : torch.tensor of shape batch x pooled pixels x features
Layer output
"""
x = equiangular_calculator(inputs, self.ratio)
x = x.permute(0, 3, 1, 2)
x = F.avg_pool2d(x, self.kernel_size)
x = reformat(x)
return x, None
class UnpoolAvgEquiangular(torch.nn.Module):
"""EquiAngular average unpooling
Parameters
----------
ratio : float
Parameter for equiangular sampling -> width/height
"""
def __init__(self, ratio, kernel_size, *args, **kwargs):
self.ratio = ratio
self.kernel_size = int(kernel_size ** 0.5)
super().__init__()
def forward(self, inputs, *args):
"""calls pytorch's interpolate function to create the values while unpooling based on the nearby values
Parameters
----------
inputs : torch.tensor of shape batch x pixels x features
Input data
Returns
-------
x : torch.tensor of shape batch x unpooled pixels x features
Layer output
"""
x = equiangular_calculator(inputs, self.ratio)
x = x.permute(0, 3, 1, 2)
x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode="nearest")
x = reformat(x)
return x
class PoolMaxHealpix(torch.nn.MaxPool1d):
"""Healpix Maxpooling module
Parameters
----------
kernel_size : int
Pooling kernel width
return_indices : bool (default : True)
Whether to return the indices corresponding to the locations of the maximum value retained at pooling
"""
def __init__(self, kernel_size, return_indices=True, *args, **kwargs):
super().__init__(kernel_size=kernel_size, return_indices=return_indices)
def forward(self, x):
"""calls Maxpool1d and if desired, keeps indices of the pixels pooled to unpool them
Parameters
----------
x : torch.tensor of shape batch x pixels x features
Input data
indices : list
Indices where the max value was located in unpooled image
Returns
-------
x : torch.tensor of shape batch x unpooled pixels x features
Layer output
indices : list(int)
Indices of the pixels pooled
"""
x = x.permute(0, 2, 1)
if self.return_indices:
x, indices = F.max_pool1d(x, self.kernel_size, return_indices=self.return_indices)
else:
x = F.max_pool1d(x)
x = x.permute(0, 2, 1)
if self.return_indices:
output = x, indices
else:
output = x
return output
class PoolAvgHealpix(torch.nn.Module):
"""Healpix average pooling module
Parameters
----------
kernel_size : int
Pooling kernel width
"""
def __init__(self, kernel_size, *args, **kwargs):
"""kernel_size should be 4, 16, 64, etc."""
super().__init__()
self.kernel_size = kernel_size
def extra_repr(self):
return 'kernel_size={kernel_size}'.format(**self.__dict__)
def forward(self, x):
"""x has shape (batch, pixels, channels) and is in nested ordering"""
x = x.permute(0, 2, 1)
x = torch.nn.functional.avg_pool1d(x, self.kernel_size)
return x.permute(0, 2, 1), None
class UnpoolAvgHealpix(torch.nn.Module):
"""Healpix Average Unpooling module
Parameters
----------
kernel_size : int
Pooling kernel width
"""
def __init__(self, kernel_size, *args, **kwargs):
"""kernel_size should be 4, 16, 64, etc."""
super().__init__()
self.kernel_size = kernel_size
def extra_repr(self):
return 'kernel_size={kernel_size}'.format(**self.__dict__)
def forward(self, x, *args):
"""x has shape (batch, pixels, channels) and is in nested ordering"""
# return x.repeat_interleave(self.kernel_size, dim=1)
x = x.permute(0, 2, 1)
x = torch.nn.functional.interpolate(x, scale_factor=self.kernel_size, mode='nearest')
return x.permute(0, 2, 1)
class UnpoolMaxHealpix(torch.nn.MaxUnpool1d):
"""HEALpix max unpooling module
Parameters
----------
kernel_size : int
Pooling kernel width
"""
def __init__(self, kernel_size, *args, **kwargs):
super().__init__(kernel_size=kernel_size)
def forward(self, x, indices, **kwargs):
"""calls pytorch's unpool1d function to create the values while unpooling based on the nearby values
Parameters
----------
**kwargs
inputs : torch.tensor of shape batch x pixels x features
Input data
indices : list
Indices where the max value was located in unpooled image
Returns
-------
x : torch.tensor of shape batch x unpooled pixels x features
Layer output
"""
x = x.permute(0, 2, 1)
x = F.max_unpool1d(x, indices, self.kernel_size)
x = x.permute(0, 2, 1)
return x
def _build_interpolation_matrix(src_graph, dst_graph):
"""Return the sparse matrix that interpolates between two spherical samplings."""
ds = remap.compute_interpolation_weights(src_graph, dst_graph, method='conservative', normalization='fracarea') # destarea’
# Sanity checks.
np.testing.assert_allclose(ds.src_grid_center_lat, src_graph.signals['lat'])
np.testing.assert_allclose(ds.src_grid_center_lon, src_graph.signals['lon'])
np.testing.assert_allclose(ds.dst_grid_center_lat, dst_graph.signals['lat'])
np.testing.assert_allclose(ds.dst_grid_center_lon, dst_graph.signals['lon'])
np.testing.assert_allclose(ds.src_grid_frac, 1)
np.testing.assert_allclose(ds.dst_grid_frac, 1)
np.testing.assert_allclose(ds.src_grid_imask, 1)
np.testing.assert_allclose(ds.dst_grid_imask, 1)
col = ds.src_address
row = ds.dst_address
dat = ds.remap_matrix.squeeze()
# CDO indexing starts at 1
row = np.array(row) - 1
col = np.array(col) - 1
weights = sparse.csr_matrix((dat, (row, col)))
assert weights.shape == (dst_graph.n_vertices, src_graph.n_vertices)
# Destination pixels are normalized to 1 (row-sum = 1).
# Weights represent the fractions of area attributed to source pixels.
np.testing.assert_allclose(weights.sum(axis=1), 1)
# Interpolation is conservative: it preserves area.
np.testing.assert_allclose(weights.T @ ds.dst_grid_area, ds.src_grid_area)
# Unnormalize.
weights = weights.multiply(ds.dst_grid_area.values[:, np.newaxis])
# Another way to assert that the interpolation is conservative.
np.testing.assert_allclose(np.asarray(weights.sum(1)).squeeze(), ds.dst_grid_area)
np.testing.assert_allclose(np.asarray(weights.sum(0)).squeeze(), ds.src_grid_area)
return weights
def build_pooling_matrices(src_graph, dst_graph):
weights = _build_interpolation_matrix(src_graph, dst_graph)
pool = weights.multiply(1/weights.sum(1))
unpool = weights.multiply(1/weights.sum(0)).T
return pool, unpool
def convert_to_torch_sparse(mat: "sparse.coo.coo_matrix"):
indices = np.empty((2, mat.nnz), dtype=np.int64)
np.stack((mat.row, mat.col), axis=0, out=indices)
indices = torch.from_numpy(indices)
mat = torch.sparse_coo_tensor(indices, mat.data, mat.shape, dtype=torch.float32)
mat = mat.coalesce()
return mat
class RemapBlock(torch.nn.Module):
def __init__(self, remap_matrix: "sparse.coo.coo_matrix"):
super().__init__()
remap_matrix = self.process_remap_matrix(remap_matrix)
self.register_buffer('remap_matrix', remap_matrix)
def forward(self, x, *args, **kwargs):
n_batch, n_nodes, n_val = x.shape
matrix = self.remap_matrix
new_nodes, _ = matrix.shape
x = x.permute(1, 2, 0).reshape(n_nodes, n_batch * n_val)
x = torch.sparse.mm(matrix, x)
x = x.reshape(new_nodes, n_val, n_batch).permute(2, 0, 1)
return x
def process_remap_matrix(self, mat):
return convert_to_torch_sparse(mat)
class GeneralAvgPool(RemapBlock):
def forward(self, x, *args, **kwargs):
x = super().forward(x, *args, **kwargs)
# Some pooling methods (e.g. Avg) do not give source indices, please return None for | |
import logging
from typing import List, Dict, Union, Tuple, Callable
from collections import defaultdict
from pathlib import Path
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
import datasets
from datasets import ClassLabel
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from flair.data import Sentence, DataPoint
from flair.models import TextClassifier
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
PreTrainedTokenizer,
PreTrainedModel,
BertPreTrainedModel,
DistilBertPreTrainedModel,
XLMPreTrainedModel,
XLNetPreTrainedModel,
ElectraPreTrainedModel,
BertForSequenceClassification,
XLNetForSequenceClassification,
AlbertForSequenceClassification,
TrainingArguments,
Trainer,
)
from tqdm import tqdm
from adaptnlp.model import AdaptiveModel
logger = logging.getLogger(__name__)
class TransformersSequenceClassifier(AdaptiveModel):
"""Adaptive model for Transformer's Sequence Classification Model
Usage:
```python
>>> classifier = TransformersSequenceClassifier.load("transformers-sc-model")
>>> classifier.predict(text="Example text", mini_batch_size=32)
```
**Parameters:**
* **tokenizer** - A tokenizer object from Huggingface's transformers (TODO)and tokenizers
* **model** - A transformers Sequence Classsifciation model
"""
def __init__(self, tokenizer: PreTrainedTokenizer, model: PreTrainedModel):
# Load up model and tokenizer
self.tokenizer = tokenizer
self.model = model
# Load empty trainer
self.trainer = None
# Setup cuda and automatic allocation of model
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
@classmethod
def load(cls, model_name_or_path: str) -> AdaptiveModel:
"""Class method for loading and constructing this classifier
* **model_name_or_path** - A key string of one of Transformer's pre-trained Sequence Classifier Model
"""
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
classifier = cls(tokenizer, model)
return classifier
def predict(
self,
text: Union[List[Sentence], Sentence, List[str], str],
mini_batch_size: int = 32,
**kwargs,
) -> List[Sentence]:
"""Predict method for running inference using the pre-trained sequence classifier model
* **text** - String, list of strings, sentences, or list of sentences to run inference on
* **mini_batch_size** - Mini batch size
* ****kwargs**(Optional) - Optional arguments for the Transformers classifier
"""
id2label = self.model.config.id2label
sentences = text
results: List[Sentence] = []
with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, DataPoint) or isinstance(sentences, str):
sentences = [sentences]
# filter empty sentences
if isinstance(sentences[0], Sentence):
sentences = [sentence for sentence in sentences if len(sentence) > 0]
if len(sentences) == 0:
return sentences
# reverse sort all sequences by their length
rev_order_len_index = sorted(
range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True
)
original_order_index = sorted(
range(len(rev_order_len_index)), key=lambda k: rev_order_len_index[k]
)
reordered_sentences: List[Union[DataPoint, str]] = [
sentences[index] for index in rev_order_len_index
]
# Turn all Sentence objects into strings
if isinstance(reordered_sentences[0], Sentence):
str_reordered_sentences = [
sentence.to_original_text() for sentence in sentences
]
else:
str_reordered_sentences = reordered_sentences
# Tokenize and get dataset
dataset = self._tokenize(str_reordered_sentences)
dataloader = DataLoader(dataset, batch_size=mini_batch_size)
predictions: List[Tuple[str, float]] = []
logger.info(f"Running prediction on {len(dataset)} text sequences")
logger.info(f"Batch size = {mini_batch_size}")
for batch in tqdm(dataloader, desc="Predicting text"):
self.model.eval()
batch = tuple(t.to(self.device) for t in batch)
if len(batch) == 3:
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
else:
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
outputs = self.model(**inputs)
logits = outputs[0]
preds = torch.softmax(logits, dim=1).tolist()
predictions += preds
for text, pred in zip(str_reordered_sentences, predictions):
# Initialize and assign labels to each class in each datapoint prediction
text_sent = Sentence(text)
for k, v in id2label.items():
text_sent.add_label(label_type="sc", value=v, score=pred[k])
results.append(text_sent)
# Order results back into original order
results = [results[index] for index in original_order_index]
return results
def _tokenize(
self, sentences: Union[List[Sentence], Sentence, List[str], str]
) -> TensorDataset:
""" Batch tokenizes text and produces a `TensorDataset` with them """
# TODO: __call__ from tokenizer base class in the transformers library could automate/handle this
tokenized_text = self.tokenizer.batch_encode_plus(
sentences,
return_tensors="pt",
pad_to_max_length=True,
add_special_tokens=True,
)
# Bart, XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use token_type_ids
if isinstance(
self.model,
(
BertForSequenceClassification,
XLNetForSequenceClassification,
AlbertForSequenceClassification,
),
):
dataset = TensorDataset(
tokenized_text["input_ids"],
tokenized_text["attention_mask"],
tokenized_text["token_type_ids"],
)
else:
dataset = TensorDataset(
tokenized_text["input_ids"], tokenized_text["attention_mask"]
)
return dataset
def train(
self,
training_args: TrainingArguments,
train_dataset: datasets.Dataset,
eval_dataset: datasets.Dataset,
text_col_nm: str = "text",
label_col_nm: str = "label",
compute_metrics: Callable = None,
) -> None:
"""Trains and/or finetunes the sequence classification model
* **training_args** - Transformers `TrainingArguments` object model
* **train_dataset** - Training `Dataset` class object from the datasets library
* **eval_dataset** - Eval `Dataset` class object from the datasets library
* **text_col_nm** - Name of the text feature column used as training data (Default "text")
* **label_col_nm** - Name of the label feature column (Default "label")
* **compute_metrics** - Custom metrics function callable for `transformers.Trainer`'s compute metrics
* **return** - None
"""
# Set default metrics if None
if not compute_metrics:
compute_metrics = self._default_metrics
# Set datasets.Dataset label values in sequence classifier configuration
## Important NOTE: Updating configurations do not update the sequence classification head module layer
## We are manually initializing a new linear layer for the "new" labels being trained
class_label = train_dataset.features[label_col_nm]
config_data = {
"num_labels": class_label.num_classes,
"id2label": {v: n for v, n in enumerate(class_label.names)},
"label2id": {n: v for v, n in enumerate(class_label.names)},
}
self.model.config.update(config_data)
self._mutate_model_head(class_label=class_label)
# Batch map datasets as torch tensors with tokenizer
def tokenize(batch):
return self.tokenizer(batch[text_col_nm], padding=True, truncation=True)
train_dataset = train_dataset.map(
tokenize, batch_size=len(train_dataset), batched=True
)
eval_dataset = eval_dataset.map(
tokenize, batch_size=len(eval_dataset), batched=True
)
# Rename label col name to match model forward signature of "labels" or ["label","label_ids"] since these are addressed by the default collator from transformers
train_dataset.rename_column_(
original_column_name=label_col_nm, new_column_name="labels"
)
eval_dataset.rename_column_(
original_column_name=label_col_nm, new_column_name="labels"
)
# Set format as torch tensors for training
train_dataset.set_format(
"torch", columns=["input_ids", "attention_mask", "labels"]
)
eval_dataset.set_format(
"torch", columns=["input_ids", "attention_mask", "labels"]
)
# Instantiate transformers trainer
self.trainer = Trainer(
model=self.model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
# Train and serialize
self.trainer.train()
self.trainer.save_model()
self.tokenizer.save_pretrained(training_args.output_dir)
def evaluate(self) -> Dict[str, float]:
"""Evaluates model specified
* **model_name_or_path** - The model name key or model path
"""
if not self.trainer:
logger.info("No trainer loaded, must run `classifier.train(...)` first")
ValueError("Trainer not found, must run train() method")
return self.trainer.evaluate()
def _mutate_model_head(self, class_label: ClassLabel) -> None:
"""Manually intialize new linear layers for prediction heads on specific language models that we're trying to train on"""
if isinstance(self.model, (BertPreTrainedModel, DistilBertPreTrainedModel)):
self.model.classifier = nn.Linear(
self.model.config.hidden_size, class_label.num_classes
)
self.model.num_labels = class_label.num_classes
elif isinstance(self.model, XLMPreTrainedModel):
self.model.num_labels = class_label.num_classes
elif isinstance(self.model, XLNetPreTrainedModel):
self.model.logits_proj = nn.Linear(
self.model.config.d_model, class_label.num_classes
)
self.model.num_labels = class_label.num_classes
elif isinstance(self.model, ElectraPreTrainedModel):
self.model.num_labels = class_label.num_classes
else:
logger.info(f"Sorry, can not train on a model of type {type(self.model)}")
# Setup default metrics for sequence classification training
def _default_metrics(self, pred) -> Dict:
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(
labels, preds, average=None
)
acc = accuracy_score(labels, preds)
return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
class FlairSequenceClassifier(AdaptiveModel):
"""Adaptive Model for Flair's Sequence Classifier...very basic
Usage:
```python
>>> classifier = FlairSequenceClassifier.load("en-sentiment")
>>> classifier.predict(text="Example text", mini_batch_size=32)
```
**Parameters:**
* **model_name_or_path** - A key string of one of Flair's pre-trained Sequence Classifier Model
"""
def __init__(self, model_name_or_path: str):
self.classifier = TextClassifier.load(model_name_or_path)
@classmethod
def load(cls, model_name_or_path: str) -> AdaptiveModel:
"""Class method for loading a constructing this classifier
* **model_name_or_path** - A key string of one of Flair's pre-trained Sequence Classifier Model
"""
classifier = cls(model_name_or_path)
return classifier
def predict(
self,
text: Union[List[Sentence], Sentence, List[str], str],
mini_batch_size: int = 32,
**kwargs,
) -> List[Sentence]:
"""Predict method for running inference using the pre-trained sequence classifier model
* **text** - String, list of strings, sentences, or list of sentences to run inference on
* **mini_batch_size** - Mini batch size
* ****kwargs**(Optional) - Optional arguments for the Flair classifier
"""
if isinstance(text, (Sentence, str)):
text = [text]
if isinstance(text[0], str):
text = [Sentence(s) for s in text]
return self.classifier.predict(
sentences=text,
mini_batch_size=mini_batch_size,
**kwargs,
)
def train(self):
pass
def evaluate(self):
pass
class EasySequenceClassifier:
"""Sequence classification models
Usage:
```python
>>> classifier = EasySequenceClassifier()
>>> classifier.tag_text(text="text you want to label", model_name_or_path="en-sentiment")
```
"""
def __init__(self):
self.sequence_classifiers: Dict[AdaptiveModel] = defaultdict(bool)
def tag_text(
self,
text: Union[List[Sentence], Sentence, List[str], str],
model_name_or_path: str = "en-sentiment",
mini_batch_size: int = 32,
**kwargs,
) -> List[Sentence]:
"""Tags a text sequence with labels the sequence classification models have been trained on
* **text** - String, list of strings, `Sentence`, or list of `Sentence`s to be classified
* **model_name_or_path** - The model name key or model path
* **mini_batch_size** - The mini batch size for running inference
* ****kwargs** - (Optional) Keyword Arguments for Flair's `TextClassifier.predict()` method params
**return** A list of Flair's `Sentence`'s
"""
# Load Text Classifier Model and Pytorch Module into tagger dict
if not self.sequence_classifiers[model_name_or_path]:
"""
self.sequence_classifiers[model_name_or_path] | |
7: IiII - oO0o
if ( ooo0oo == oO00Oooo0o0o0 . registered_rlocs ) :
IIiiiIiii = ( oO00Oooo0o0o0 . group . is_null ( ) == False )
I1III = lisp_get_partial_rloc_set ( ooo0oo , I11oo , IIiiiIiii )
if ( I1III != ooo0oo ) :
oo0o = 15
ooo0oo = I1III
if 22 - 22: o0oOOo0O0Ooo * I1Ii111 * I1ii11iIi11i . OoOoOO00 . i1IIi % ooOoO0o
if 67 - 67: I11i
if 95 - 95: OoO0O00 % I1Ii111
if 49 - 49: II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
if 9 - 9: OOooOOo
if 74 - 74: OoOoOO00 - OOooOOo % OoOoOO00
if 82 - 82: I11i % IiII + Oo0Ooo + iIii1I11I1II1 - I11i - I1IiiI
if ( oO00Oooo0o0o0 . force_ttl != None ) :
oo0o = oO00Oooo0o0o0 . force_ttl | 0x80000000
if 65 - 65: IiII / O0 * II111iiii + oO0o
if 52 - 52: o0oOOo0O0Ooo - OoOoOO00 * II111iiii / OoooooooOO
if 44 - 44: OOooOOo - oO0o + o0oOOo0O0Ooo - i1IIi % o0oOOo0O0Ooo
if 79 - 79: iII111i . iIii1I11I1II1
if 42 - 42: i11iIiiIii / IiII . O0 / OOooOOo . iII111i * i1IIi
if 83 - 83: iIii1I11I1II1 . II111iiii * Oo0Ooo . I1IiiI - I1IiiI - iIii1I11I1II1
if ( I1Ii ) :
if ( I1Ii . set_record_ttl ) :
oo0o = I1Ii . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( oo0o ) )
if 29 - 29: Oo0Ooo
if ( I1Ii . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
OOo000 = LISP_POLICY_DENIED_ACTION
ooo0oo = [ ]
else :
oOo00O = I1Ii . set_policy_map_reply ( )
if ( oOo00O ) : ooo0oo = [ oOo00O ]
if 35 - 35: OoOoOO00 + II111iiii
if 46 - 46: O0 / I1ii11iIi11i + OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
if 96 - 96: IiII + i1IIi - I11i * I11i - OoO0O00 % II111iiii
if ( i11iii1III1i ) :
lprint ( "Implied drop action, send negative Map-Reply" )
OOo000 = LISP_POLICY_DENIED_ACTION
ooo0oo = [ ]
if 47 - 47: I1Ii111 . i11iIiiIii + oO0o . I1ii11iIi11i
if 12 - 12: iIii1I11I1II1 % I1Ii111 * OoOoOO00 / OoooooooOO % OoooooooOO
oO0O0o00oOo = oO00Oooo0o0o0 . echo_nonce_capable
if 81 - 81: iIii1I11I1II1 - Oo0Ooo - ooOoO0o . OoO0O00 + I1ii11iIi11i
if 84 - 84: iII111i . OOooOOo . iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
if ( O0o ) :
iIiI1IIi1Ii1i = oO00Oooo0o0o0 . eid
ii1IIi11i = oO00Oooo0o0o0 . group
else :
iIiI1IIi1Ii1i = OOo0O0O0o0
ii1IIi11i = O0o00oOOOO00
OOo000 = LISP_AUTH_FAILURE_ACTION
ooo0oo = [ ]
if 89 - 89: I1Ii111 / II111iiii . ooOoO0o . oO0o
if 74 - 74: O0 / I1ii11iIi11i
if 95 - 95: i11iIiiIii % i11iIiiIii / i1IIi * i11iIiiIii
if 62 - 62: I1ii11iIi11i . I1IiiI / OOooOOo
if 94 - 94: IiII
if 48 - 48: Oo0Ooo + Oo0Ooo / OoO0O00 + OoOoOO00
packet = lisp_build_map_reply ( iIiI1IIi1Ii1i , ii1IIi11i , ooo0oo ,
oOO000 , OOo000 , oo0o , False , None , oO0O0o00oOo , False )
if 23 - 23: iIii1I11I1II1 - OoOoOO00
if ( o0oo0O ) :
lisp_process_pubsub ( lisp_sockets , packet , iIiI1IIi1Ii1i , oO00o0o0O ,
mr_sport , oOO000 , oo0o , oooOOOO0oOo )
else :
lisp_send_map_reply ( lisp_sockets , packet , oO00o0o0O , mr_sport )
if 10 - 10: iIii1I11I1II1 + i1IIi * Ii1I / iIii1I11I1II1 % OoOoOO00 / O0
if 14 - 14: O0
return ( [ oO00Oooo0o0o0 . eid , oO00Oooo0o0o0 . group , LISP_DDT_ACTION_MS_ACK ] )
if 65 - 65: IiII / oO0o
if 57 - 57: IiII + oO0o - IiII
if 51 - 51: OoOoOO00 % IiII / iII111i - oO0o - OoO0O00 . iIii1I11I1II1
if 61 - 61: OoO0O00
if 60 - 60: I1IiiI % O0 % OoooooooOO / Ii1I
OOO0Oo0o = len ( oO00Oooo0o0o0 . registered_rlocs )
if ( OOO0Oo0o == 0 ) :
lprint ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" . format ( green ( I11i11i1 , False ) , OOOo0O00OO00O ,
# I1Ii111
green ( Oo00O0o , False ) ) )
return ( [ oO00Oooo0o0o0 . eid , oO00Oooo0o0o0 . group , LISP_DDT_ACTION_MS_ACK ] )
if 19 - 19: I11i % IiII
if 73 - 73: i11iIiiIii . II111iiii
if 26 - 26: Oo0Ooo * i1IIi / OoooooooOO
if 78 - 78: O0 + OOooOOo . I11i * OoOoOO00 - OoooooooOO
if 92 - 92: o0oOOo0O0Ooo + OoOoOO00 / oO0o . I1Ii111 * I1IiiI * OoOoOO00
I1iiOo0O0O000 = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 29 - 29: OoOoOO00 + I1IiiI - OoOoOO00
I1I = map_request . target_eid . hash_address ( I1iiOo0O0O000 )
I1I %= OOO0Oo0o
iIi11I11I1i = oO00Oooo0o0o0 . registered_rlocs [ I1I ]
if 83 - 83: II111iiii
if ( iIi11I11I1i . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( I11i11i1 , False ) ,
# iIii1I11I1II1 / i1IIi / ooOoO0o
OOOo0O00OO00O , green ( Oo00O0o , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( I11i11i1 , False ) ,
# o0oOOo0O0Ooo % I11i . Oo0Ooo * Oo0Ooo % iII111i
red ( iIi11I11I1i . rloc . print_address ( ) , False ) , OOOo0O00OO00O ,
green ( Oo00O0o , False ) ) )
if 37 - 37: OoO0O00 / I1Ii111 . I1Ii111 * i1IIi
if 22 - 22: I1ii11iIi11i . II111iiii + iIii1I11I1II1 / OoooooooOO . ooOoO0o
if 13 - 13: II111iiii
if 36 - 36: iII111i - oO0o / Oo0Ooo / O0 . OoO0O00 . i1IIi
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , iIi11I11I1i . rloc , to_etr = True )
if 19 - 19: O0 . OoooooooOO % iIii1I11I1II1 - Ii1I . Ii1I + I1IiiI
return ( [ oO00Oooo0o0o0 . eid , oO00Oooo0o0o0 . group , LISP_DDT_ACTION_MS_ACK ] )
if 98 - 98: oO0o . Oo0Ooo
if 9 - 9: I1Ii111 % IiII - i11iIiiIii - OOooOOo % iII111i % OoooooooOO
if 6 - 6: i1IIi - II111iiii * OoOoOO00 + oO0o
if 6 - 6: I1IiiI - ooOoO0o + I1IiiI + OoO0O00 - i11iIiiIii % ooOoO0o
if 64 - 64: OoooooooOO + OOooOOo
if 36 - 36: I1IiiI - Ii1I / I1ii11iIi11i + Oo0Ooo % I1ii11iIi11i
if 86 - 86: iIii1I11I1II1 * OoO0O00
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 82 - 82: I1IiiI - OoO0O00 % o0oOOo0O0Ooo
if 72 - 72: O0 + OoOoOO00 % OOooOOo / oO0o / IiII
if 98 - 98: Oo0Ooo . II111iiii * I11i
if 39 - 39: IiII * o0oOOo0O0Ooo + Ii1I - I11i
OOo0O0O0o0 = map_request . target_eid
O0o00oOOOO00 = map_request . target_group
I11i11i1 = lisp_print_eid_tuple ( OOo0O0O0o0 , O0o00oOOOO00 )
oOO000 = map_request . nonce
OOo000 = LISP_DDT_ACTION_NULL
if 70 - 70: oO0o * ooOoO0o / ooOoO0o - Ii1I * Ii1I % OOooOOo
if 91 - 91: OoO0O00 - OoO0O00 % O0
if 67 - 67: ooOoO0o * i1IIi
if 66 - 66: o0oOOo0O0Ooo - I1ii11iIi11i . OoOoOO00 / iII111i - Ii1I - i1IIi
if 97 - 97: oO0o % iII111i - OOooOOo . OoooooooOO
oo0ooo = None
if ( lisp_i_am_ms ) :
oO00Oooo0o0o0 = lisp_site_eid_lookup ( OOo0O0O0o0 , O0o00oOOOO00 , False )
if ( oO00Oooo0o0o0 == None | |
<filename>pyglet/gl/glxext_arb.py
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 <NAME>
# Copyright (c) 2008-2019 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for http://oss.sgi.com/projects/ogl-sample/ABI/glxext.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
from pyglet.gl.lib import link_GLX as _link_function
from pyglet.gl.lib import c_ptrdiff_t
if not hasattr(ctypes, 'c_int64'):
# XXX TODO completely wrong, but at least can import.
# Can c_longlong still be used?
c_int64 = c_long
c_uint64 = c_ulong
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for http://www.opengl.org/registry/api/glxext.h
import pyglet.libs.x11.xlib
import pyglet.gl.glx
# H (/usr/include/GL/glx.h:26)
# ARB_get_proc_address (/usr/include/GL/glx.h:317)
# GLXEXT_LEGACY (/usr/include/GL/glx.h:334)
GLX_GLXEXT_VERSION = 32 # GL/glxext.h:53
# VERSION_1_3 (GL/glxext.h:55)
# VERSION_1_4 (GL/glxext.h:114)
# ARB_get_proc_address (GL/glxext.h:119)
# ARB_multisample (GL/glxext.h:122)
GLX_SAMPLE_BUFFERS_ARB = 100000 # GL/glxext.h:123
GLX_SAMPLES_ARB = 100001 # GL/glxext.h:124
# ARB_vertex_buffer_object (GL/glxext.h:127)
GLX_CONTEXT_ALLOW_BUFFER_BYTE_ORDER_MISMATCH_ARB = 8341 # GL/glxext.h:128
# ARB_fbconfig_float (GL/glxext.h:131)
GLX_RGBA_FLOAT_TYPE_ARB = 8377 # GL/glxext.h:132
GLX_RGBA_FLOAT_BIT_ARB = 4 # GL/glxext.h:133
# ARB_framebuffer_sRGB (GL/glxext.h:136)
GLX_FRAMEBUFFER_SRGB_CAPABLE_ARB = 8370 # GL/glxext.h:137
# ARB_create_context (GL/glxext.h:140)
GLX_CONTEXT_DEBUG_BIT_ARB = 1 # GL/glxext.h:141
GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB = 2 # GL/glxext.h:142
GLX_CONTEXT_MAJOR_VERSION_ARB = 8337 # GL/glxext.h:143
GLX_CONTEXT_MINOR_VERSION_ARB = 8338 # GL/glxext.h:144
GLX_CONTEXT_FLAGS_ARB = 8340 # GL/glxext.h:145
# ARB_create_context_profile (GL/glxext.h:148)
GLX_CONTEXT_CORE_PROFILE_BIT_ARB = 1 # GL/glxext.h:149
GLX_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB = 2 # GL/glxext.h:150
GLX_CONTEXT_PROFILE_MASK_ARB = 37158 # GL/glxext.h:151
# ARB_create_context_robustness (GL/glxext.h:154)
GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB = 4 # GL/glxext.h:155
GLX_LOSE_CONTEXT_ON_RESET_ARB = 33362 # GL/glxext.h:156
GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB = 33366 # GL/glxext.h:157
GLX_NO_RESET_NOTIFICATION_ARB = 33377 # GL/glxext.h:158
# SGIS_multisample (GL/glxext.h:161)
GLX_SAMPLE_BUFFERS_SGIS = 100000 # GL/glxext.h:162
GLX_SAMPLES_SGIS = 100001 # GL/glxext.h:163
# EXT_visual_info (GL/glxext.h:166)
GLX_X_VISUAL_TYPE_EXT = 34 # GL/glxext.h:167
GLX_TRANSPARENT_TYPE_EXT = 35 # GL/glxext.h:168
GLX_TRANSPARENT_INDEX_VALUE_EXT = 36 # GL/glxext.h:169
GLX_TRANSPARENT_RED_VALUE_EXT = 37 # GL/glxext.h:170
GLX_TRANSPARENT_GREEN_VALUE_EXT = 38 # GL/glxext.h:171
GLX_TRANSPARENT_BLUE_VALUE_EXT = 39 # GL/glxext.h:172
GLX_TRANSPARENT_ALPHA_VALUE_EXT = 40 # GL/glxext.h:173
GLX_NONE_EXT = 32768 # GL/glxext.h:174
GLX_TRUE_COLOR_EXT = 32770 # GL/glxext.h:175
GLX_DIRECT_COLOR_EXT = 32771 # GL/glxext.h:176
GLX_PSEUDO_COLOR_EXT = 32772 # GL/glxext.h:177
GLX_STATIC_COLOR_EXT = 32773 # GL/glxext.h:178
GLX_GRAY_SCALE_EXT = 32774 # GL/glxext.h:179
GLX_STATIC_GRAY_EXT = 32775 # GL/glxext.h:180
GLX_TRANSPARENT_RGB_EXT = 32776 # GL/glxext.h:181
GLX_TRANSPARENT_INDEX_EXT = 32777 # GL/glxext.h:182
# SGI_swap_control (GL/glxext.h:185)
# SGI_video_sync (GL/glxext.h:188)
# SGI_make_current_read (GL/glxext.h:191)
# SGIX_video_source (GL/glxext.h:194)
# EXT_visual_rating (GL/glxext.h:197)
GLX_VISUAL_CAVEAT_EXT = 32 # GL/glxext.h:198
GLX_SLOW_VISUAL_EXT = 32769 # GL/glxext.h:199
GLX_NON_CONFORMANT_VISUAL_EXT = 32781 # GL/glxext.h:200
# EXT_import_context (GL/glxext.h:204)
GLX_SHARE_CONTEXT_EXT = 32778 # GL/glxext.h:205
GLX_VISUAL_ID_EXT = 32779 # GL/glxext.h:206
GLX_SCREEN_EXT = 32780 # GL/glxext.h:207
# SGIX_fbconfig (GL/glxext.h:210)
GLX_WINDOW_BIT_SGIX = 1 # GL/glxext.h:211
GLX_PIXMAP_BIT_SGIX = 2 # GL/glxext.h:212
GLX_RGBA_BIT_SGIX = 1 # GL/glxext.h:213
GLX_COLOR_INDEX_BIT_SGIX = 2 # GL/glxext.h:214
GLX_DRAWABLE_TYPE_SGIX = 32784 # GL/glxext.h:215
GLX_RENDER_TYPE_SGIX = 32785 # GL/glxext.h:216
GLX_X_RENDERABLE_SGIX = 32786 # GL/glxext.h:217
GLX_FBCONFIG_ID_SGIX = 32787 # GL/glxext.h:218
GLX_RGBA_TYPE_SGIX = 32788 # GL/glxext.h:219
GLX_COLOR_INDEX_TYPE_SGIX = 32789 # GL/glxext.h:220
# SGIX_pbuffer (GL/glxext.h:224)
GLX_PBUFFER_BIT_SGIX = 4 # GL/glxext.h:225
GLX_BUFFER_CLOBBER_MASK_SGIX = 134217728 # GL/glxext.h:226
GLX_FRONT_LEFT_BUFFER_BIT_SGIX = 1 # GL/glxext.h:227
GLX_FRONT_RIGHT_BUFFER_BIT_SGIX = 2 # GL/glxext.h:228
GLX_BACK_LEFT_BUFFER_BIT_SGIX = 4 # GL/glxext.h:229
GLX_BACK_RIGHT_BUFFER_BIT_SGIX = 8 # GL/glxext.h:230
GLX_AUX_BUFFERS_BIT_SGIX = 16 # GL/glxext.h:231
GLX_DEPTH_BUFFER_BIT_SGIX = 32 # GL/glxext.h:232
GLX_STENCIL_BUFFER_BIT_SGIX = 64 # GL/glxext.h:233
GLX_ACCUM_BUFFER_BIT_SGIX = 128 # GL/glxext.h:234
GLX_SAMPLE_BUFFERS_BIT_SGIX = 256 # GL/glxext.h:235
GLX_MAX_PBUFFER_WIDTH_SGIX = 32790 # GL/glxext.h:236
GLX_MAX_PBUFFER_HEIGHT_SGIX = 32791 # GL/glxext.h:237
GLX_MAX_PBUFFER_PIXELS_SGIX = 32792 # GL/glxext.h:238
GLX_OPTIMAL_PBUFFER_WIDTH_SGIX = 32793 # GL/glxext.h:239
GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX = 32794 # GL/glxext.h:240
GLX_PRESERVED_CONTENTS_SGIX = 32795 # GL/glxext.h:241
GLX_LARGEST_PBUFFER_SGIX = 32796 # GL/glxext.h:242
GLX_WIDTH_SGIX = 32797 # GL/glxext.h:243
GLX_HEIGHT_SGIX = 32798 # GL/glxext.h:244
GLX_EVENT_MASK_SGIX = 32799 # GL/glxext.h:245
GLX_DAMAGED_SGIX = 32800 # GL/glxext.h:246
GLX_SAVED_SGIX = 32801 # GL/glxext.h:247
GLX_WINDOW_SGIX = 32802 # GL/glxext.h:248
GLX_PBUFFER_SGIX = 32803 # GL/glxext.h:249
# SGI_cushion (GL/glxext.h:252)
# SGIX_video_resize (GL/glxext.h:255)
GLX_SYNC_FRAME_SGIX = 0 # GL/glxext.h:256
GLX_SYNC_SWAP_SGIX = 1 # GL/glxext.h:257
# SGIX_dmbuffer (GL/glxext.h:260)
GLX_DIGITAL_MEDIA_PBUFFER_SGIX = 32804 # GL/glxext.h:261
# SGIX_swap_group (GL/glxext.h:264)
# SGIX_swap_barrier (GL/glxext.h:267)
# SGIS_blended_overlay (GL/glxext.h:270)
GLX_BLENDED_RGBA_SGIS = 32805 # GL/glxext.h:271
# SGIS_shared_multisample (GL/glxext.h:274)
GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS = 32806 # GL/glxext.h:275
GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS = 32807 # GL/glxext.h:276
# SUN_get_transparent_index (GL/glxext.h:279)
# 3DFX_multisample (GL/glxext.h:282)
GLX_SAMPLE_BUFFERS_3DFX = 32848 # GL/glxext.h:283
GLX_SAMPLES_3DFX = 32849 # GL/glxext.h:284
# MESA_copy_sub_buffer (GL/glxext.h:287)
# MESA_pixmap_colormap (GL/glxext.h:290)
# MESA_release_buffers (GL/glxext.h:293)
# MESA_set_3dfx_mode (GL/glxext.h:296)
GLX_3DFX_WINDOW_MODE_MESA = 1 # GL/glxext.h:297
GLX_3DFX_FULLSCREEN_MODE_MESA = 2 # GL/glxext.h:298
# SGIX_visual_select_group (GL/glxext.h:301)
GLX_VISUAL_SELECT_GROUP_SGIX = 32808 # GL/glxext.h:302
# OML_swap_method (GL/glxext.h:305)
GLX_SWAP_METHOD_OML = 32864 # GL/glxext.h:306
GLX_SWAP_EXCHANGE_OML = 32865 # GL/glxext.h:307
GLX_SWAP_COPY_OML = 32866 # GL/glxext.h:308
GLX_SWAP_UNDEFINED_OML = 32867 # GL/glxext.h:309
# OML_sync_control (GL/glxext.h:312)
# NV_float_buffer (GL/glxext.h:315)
GLX_FLOAT_COMPONENTS_NV = 8368 # GL/glxext.h:316
# SGIX_hyperpipe (GL/glxext.h:319)
GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX = 80 # GL/glxext.h:320
GLX_BAD_HYPERPIPE_CONFIG_SGIX = 91 # GL/glxext.h:321
GLX_BAD_HYPERPIPE_SGIX = 92 # GL/glxext.h:322
GLX_HYPERPIPE_DISPLAY_PIPE_SGIX = 1 # GL/glxext.h:323
GLX_HYPERPIPE_RENDER_PIPE_SGIX = 2 # GL/glxext.h:324
GLX_PIPE_RECT_SGIX = 1 # GL/glxext.h:325
GLX_PIPE_RECT_LIMITS_SGIX = 2 # GL/glxext.h:326
GLX_HYPERPIPE_STEREO_SGIX = 3 # GL/glxext.h:327
GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX = 4 # GL/glxext.h:328
GLX_HYPERPIPE_ID_SGIX = 32816 # GL/glxext.h:329
# MESA_agp_offset (GL/glxext.h:332)
# EXT_fbconfig_packed_float (GL/glxext.h:335)
GLX_RGBA_UNSIGNED_FLOAT_TYPE_EXT = 8369 # GL/glxext.h:336
GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT = 8 # GL/glxext.h:337
# EXT_framebuffer_sRGB (GL/glxext.h:340)
GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT = 8370 # GL/glxext.h:341
# EXT_texture_from_pixmap (GL/glxext.h:344)
GLX_TEXTURE_1D_BIT_EXT = 1 # GL/glxext.h:345
GLX_TEXTURE_2D_BIT_EXT = 2 # GL/glxext.h:346
GLX_TEXTURE_RECTANGLE_BIT_EXT = 4 # GL/glxext.h:347
GLX_BIND_TO_TEXTURE_RGB_EXT = 8400 # GL/glxext.h:348
GLX_BIND_TO_TEXTURE_RGBA_EXT = 8401 # GL/glxext.h:349
GLX_BIND_TO_MIPMAP_TEXTURE_EXT = 8402 # GL/glxext.h:350
GLX_BIND_TO_TEXTURE_TARGETS_EXT = 8403 # GL/glxext.h:351
GLX_Y_INVERTED_EXT = 8404 # GL/glxext.h:352
GLX_TEXTURE_FORMAT_EXT = 8405 # GL/glxext.h:353
GLX_TEXTURE_TARGET_EXT = 8406 # GL/glxext.h:354
GLX_MIPMAP_TEXTURE_EXT = 8407 # GL/glxext.h:355
GLX_TEXTURE_FORMAT_NONE_EXT = 8408 # GL/glxext.h:356
GLX_TEXTURE_FORMAT_RGB_EXT = 8409 # GL/glxext.h:357
GLX_TEXTURE_FORMAT_RGBA_EXT = 8410 # GL/glxext.h:358
GLX_TEXTURE_1D_EXT = 8411 # GL/glxext.h:359
GLX_TEXTURE_2D_EXT = 8412 # GL/glxext.h:360
GLX_TEXTURE_RECTANGLE_EXT = 8413 # GL/glxext.h:361
GLX_FRONT_LEFT_EXT = 8414 # GL/glxext.h:362
GLX_FRONT_RIGHT_EXT = 8415 # GL/glxext.h:363
GLX_BACK_LEFT_EXT = 8416 # GL/glxext.h:364
GLX_BACK_RIGHT_EXT = 8417 # GL/glxext.h:365
GLX_FRONT_EXT = 8414 # GL/glxext.h:366
GLX_BACK_EXT = 8416 # GL/glxext.h:367
GLX_AUX0_EXT = 8418 # GL/glxext.h:368
GLX_AUX1_EXT = 8419 # GL/glxext.h:369
GLX_AUX2_EXT = 8420 # GL/glxext.h:370
GLX_AUX3_EXT = 8421 # GL/glxext.h:371
GLX_AUX4_EXT = 8422 # GL/glxext.h:372
GLX_AUX5_EXT = 8423 # GL/glxext.h:373
GLX_AUX6_EXT = 8424 # GL/glxext.h:374
GLX_AUX7_EXT = 8425 # GL/glxext.h:375
GLX_AUX8_EXT = 8426 # GL/glxext.h:376
GLX_AUX9_EXT = 8427 # GL/glxext.h:377
# NV_present_video (GL/glxext.h:380)
GLX_NUM_VIDEO_SLOTS_NV = 8432 # GL/glxext.h:381
# NV_video_out (GL/glxext.h:384)
GLX_VIDEO_OUT_COLOR_NV = 8387 # GL/glxext.h:385
GLX_VIDEO_OUT_ALPHA_NV = 8388 # GL/glxext.h:386
GLX_VIDEO_OUT_DEPTH_NV = 8389 # GL/glxext.h:387
GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV = 8390 # GL/glxext.h:388
GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV = 8391 # GL/glxext.h:389
GLX_VIDEO_OUT_FRAME_NV = 8392 # GL/glxext.h:390
GLX_VIDEO_OUT_FIELD_1_NV = 8393 # GL/glxext.h:391
GLX_VIDEO_OUT_FIELD_2_NV = 8394 # GL/glxext.h:392
GLX_VIDEO_OUT_STACKED_FIELDS_1_2_NV = 8395 # GL/glxext.h:393
GLX_VIDEO_OUT_STACKED_FIELDS_2_1_NV = 8396 # GL/glxext.h:394
# NV_swap_group (GL/glxext.h:397)
# NV_video_capture (GL/glxext.h:400)
GLX_DEVICE_ID_NV = 8397 # GL/glxext.h:401
GLX_UNIQUE_ID_NV = 8398 # GL/glxext.h:402
GLX_NUM_VIDEO_CAPTURE_SLOTS_NV = 8399 # GL/glxext.h:403
# EXT_swap_control (GL/glxext.h:406)
GLX_SWAP_INTERVAL_EXT = 8433 # GL/glxext.h:407
GLX_MAX_SWAP_INTERVAL_EXT = 8434 # GL/glxext.h:408
# NV_copy_image (GL/glxext.h:411)
# INTEL_swap_event (GL/glxext.h:414)
GLX_BUFFER_SWAP_COMPLETE_INTEL_MASK = 67108864 # GL/glxext.h:415
GLX_EXCHANGE_COMPLETE_INTEL = 33152 # GL/glxext.h:416
GLX_COPY_COMPLETE_INTEL = 33153 # GL/glxext.h:417
GLX_FLIP_COMPLETE_INTEL = 33154 # GL/glxext.h:418
# NV_multisample_coverage (GL/glxext.h:421)
GLX_COVERAGE_SAMPLES_NV = 100001 # GL/glxext.h:422
GLX_COLOR_SAMPLES_NV = 8371 # GL/glxext.h:423
# AMD_gpu_association (GL/glxext.h:426)
GLX_GPU_VENDOR_AMD = 7936 # GL/glxext.h:427
GLX_GPU_RENDERER_STRING_AMD = 7937 # GL/glxext.h:428
GLX_GPU_OPENGL_VERSION_STRING_AMD = 7938 # GL/glxext.h:429
GLX_GPU_FASTEST_TARGET_GPUS_AMD = 8610 # GL/glxext.h:430
GLX_GPU_RAM_AMD = 8611 # GL/glxext.h:431
GLX_GPU_CLOCK_AMD = 8612 # GL/glxext.h:432
GLX_GPU_NUM_PIPES_AMD = 8613 # GL/glxext.h:433
GLX_GPU_NUM_SIMD_AMD = 8614 # GL/glxext.h:434
GLX_GPU_NUM_RB_AMD = 8615 # GL/glxext.h:435
GLX_GPU_NUM_SPI_AMD = 8616 # GL/glxext.h:436
# EXT_create_context_es2_profile (GL/glxext.h:439)
GLX_CONTEXT_ES2_PROFILE_BIT_EXT = 4 # GL/glxext.h:440
# ARB_get_proc_address (GL/glxext.h:446)
# SGIX_video_source (GL/glxext.h:450)
XID = pyglet.libs.x11.xlib.XID
GLXVideoSourceSGIX = XID # GL/glxext.h:451
# SGIX_fbconfig (GL/glxext.h:454)
GLXFBConfigIDSGIX = XID # GL/glxext.h:455
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
GLXFBConfigSGIX = POINTER(struct___GLXFBConfigRec) # GL/glxext.h:456
# SGIX_pbuffer (GL/glxext.h:459)
GLXPbufferSGIX = XID # GL/glxext.h:460
class struct_anon_106(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'drawable',
'event_type',
'draw_type',
'mask',
'x',
'y',
'width',
'height',
'count',
]
Display = pyglet.libs.x11.xlib.Display
GLXDrawable = pyglet.gl.glx.GLXDrawable
struct_anon_106._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', GLXDrawable),
('event_type', c_int),
('draw_type', c_int),
('mask', c_uint),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
]
GLXBufferClobberEventSGIX = struct_anon_106 # GL/glxext.h:473
# NV_video_output (GL/glxext.h:476)
GLXVideoDeviceNV = c_uint # GL/glxext.h:477
# NV_video_capture (GL/glxext.h:480)
GLXVideoCaptureDeviceNV = XID # GL/glxext.h:481
# VERSION_1_3 (GL/glxext.h:521)
# VERSION_1_4 (GL/glxext.h:563)
# ARB_get_proc_address (GL/glxext.h:571)
# ARB_multisample (GL/glxext.h:579)
GLX_ARB_multisample = 1 # GL/glxext.h:580
# ARB_fbconfig_float (GL/glxext.h:583)
GLX_ARB_fbconfig_float = 1 # GL/glxext.h:584
# ARB_framebuffer_sRGB (GL/glxext.h:587)
GLX_ARB_framebuffer_sRGB = 1 # GL/glxext.h:588
# ARB_create_context (GL/glxext.h:591)
GLX_ARB_create_context = 1 # GL/glxext.h:592
GLXContext = pyglet.gl.glx.GLXContext
GLXFBConfig = pyglet.gl.glx.GLXFBConfig
# GL/glxext.h:594
glXCreateContextAttribsARB = _link_function('glXCreateContextAttribsARB', GLXContext, [POINTER(Display), GLXFBConfig, GLXContext, c_int, POINTER(c_int)], 'ARB_create_context')
PFNGLXCREATECONTEXTATTRIBSARBPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXFBConfig, GLXContext, c_int, POINTER(c_int)) # GL/glxext.h:596
# ARB_create_context_profile (GL/glxext.h:599)
GLX_ARB_create_context_profile = 1 # GL/glxext.h:600
# ARB_create_context_robustness (GL/glxext.h:603)
GLX_ARB_create_context_robustness = 1 # GL/glxext.h:604
# SGIS_multisample (GL/glxext.h:607)
GLX_SGIS_multisample = 1 | |
"https://bitcointalk.org/index.php?topic=2175951.0",
"github": "https://github.com/SmartBillions/SmartBillions",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/SmartBillions",
"slack": "https://smartbillions.slack.com",
"telegram": "https://t.me/SmartBillionsGroup",
"twitter": "https://twitter.com/smartbns",
"youtube": ""
}
},
"DOCK": {
"symbol": "DOCK",
"name": "Dock",
"type": "ERC20",
"address": "0xE5Dada80Aa6477e85d09747f2842f7993D0Df71C",
"ens_address": "",
"decimals": 18,
"website": "https://dock.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/dock-io",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/dockio",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/getdock",
"youtube": ""
}
},
"TRC": {
"symbol": "TRC",
"address": "0xcB3F902bf97626391bF8bA87264bbC3DC13469be",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"BOB": {
"symbol": "BOB",
"address": "0xDF347911910b6c9A4286bA8E2EE5ea4a39eB2134",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://bobsrepair.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"LIVE": {
"symbol": "LIVE",
"address": "0x24A77c1F17C547105E14813e517be06b0040aa76",
"decimals": 18,
"name": "LIVE Token",
"ens_address": "",
"website": "https://livestars.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/LiveStarsPlatform",
"slack": "",
"telegram": "https://t.me/livestarsio",
"twitter": "https://twitter.com/LiveStarsIO",
"youtube": ""
}
},
"GTO": {
"symbol": "GTO",
"address": "0xC5bBaE50781Be1669306b9e001EFF57a2957b09d",
"decimals": 5,
"name": "Gifto",
"ens_address": "",
"website": "https://gifto.io/",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@GIFTO",
"chat": "",
"facebook": "https://www.facebook.com/gifto.io/",
"forum": "",
"github": "https://github.com/GIFTO-io",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/user/GIFTO-io/",
"slack": "",
"telegram": "https://t.me/GIFTOOfficial",
"twitter": "https://twitter.com/GIFTO_io",
"youtube": ""
}
},
"CRB": {
"symbol": "CRB",
"address": "0xAef38fBFBF932D1AeF3B808Bc8fBd8Cd8E1f8BC5",
"decimals": 8,
"name": "Creditbit",
"ens_address": "",
"website": "https://www.creditbit.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "https://forum.creditbit.org",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/credit_2",
"youtube": ""
}
},
"AX1": {
"symbol": "AX1",
"address": "0xCd4b4b0F3284a33AC49C67961EC6e111708318Cf",
"decimals": 5,
"name": "AX1 Mining Token",
"ens_address": "token.ax1mining.eth",
"website": "https://ax1.io",
"logo": {
"src": "https://ax1.io/images/AX1-Platinum-Coin.png",
"width": "100",
"height": "100",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@support_98049",
"chat": "",
"facebook": "https://www.facebook.com/ax1mining/",
"forum": "",
"github": "https://github.com/Pickeringwareltd/AX1_v1.03",
"gitter": "",
"instagram": "https://www.instagram.com/ax1mining/",
"linkedin": "https://www.linkedin.com/company/18515393/admin/updates/",
"reddit": "https://www.reddit.com/r/AX1/",
"slack": "",
"telegram": "https://t.me/ax1miningico",
"twitter": "https://twitter.com/Ax1mining",
"youtube": ""
}
},
"Devcon2 Token": {
"symbol": "Devcon2 Token",
"address": "0xdd94De9cFE063577051A5eb7465D08317d8808B6",
"decimals": 0,
"name": "Devcon2 Token",
"ens_address": "",
"website": "https://www.devcon2-token.com",
"logo": {
"src": "https://etherscan.io/token/images/Devcon2.png",
"width": 28,
"height": 28,
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"FUEL": {
"symbol": "FUEL",
"address": "0xEA38eAa3C86c8F9B751533Ba2E562deb9acDED40",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://etherparty.io",
"logo": {
"src": "https://image.ibb.co/mzeWD6/EP3_Blue.png",
"width": 804,
"height": 804,
"ipfs_hash": "ipfs/QmNLpXoqbZzZ7jEn8Pn58A3UwW38sMCeAVoJra2BUwxqvA"
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/etherparty",
"chat": "https://t.me/etherparty",
"facebook": "https://www.facebook.com/etherparty",
"forum": "https://bitcointalk.org/index.php?topic=2005965",
"github": "https://github.com/etherparty",
"gitter": "",
"instagram": "https://www.instagram.com/etherparty_io",
"linkedin": "https://www.linkedin.com/company/etherparty",
"reddit": "https://www.reddit.com/r/etherparty",
"slack": "",
"telegram": "https://t.me/etherparty",
"twitter": "https://twitter.com/etherparty_io",
"youtube": "https://www.youtube.com/channel/UCwBzpneop1za6w4DYJJgsIQ"
}
},
"IOTX": {
"symbol": "IOTX",
"address": "0x6fB3e0A217407EFFf7Ca062D46c26E5d60a14d69",
"decimals": 18,
"name": "IoTeX Network",
"ens_address": "",
"website": "http://iotex.io/",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@iotex",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/iotexproject/iotex-core",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/iotex/",
"reddit": "https://www.reddit.com/r/IoTeX/",
"slack": "",
"telegram": "https://t.me/IoTeXGroup",
"twitter": "https://twitter.com/iotex_io",
"youtube": "https://www.youtube.com/channel/UCdj3xY3LCktuamvuFusWOZw"
}
},
"OCN": {
"symbol": "OCN",
"name": "Odyssey",
"type": "ERC20",
"address": "0x4092678e4E78230F46A1534C0fbc8fA39780892B",
"ens_address": "",
"decimals": 18,
"website": "http://www.ocnex.net",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://cryptomaa.com/coin/OCN/",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/OdysseyOCN",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/OdysseyOCN",
"youtube": ""
}
},
"UTT": {
"symbol": "UTT",
"name": "United Traders Token",
"type": "ERC20",
"address": "0x16f812Be7FfF02cAF662B85d5d58a5da6572D4Df",
"ens_address": "",
"decimals": 8,
"website": "https://uttoken.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@Uttoken.io",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/uttoken",
"youtube": ""
}
},
"STN": {
"symbol": "STN",
"address": "0x599346779e90fc3F5F997b5ea715349820F91571",
"decimals": 4,
"name": "<NAME>",
"ens_address": "",
"website": "https://saturn.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://rados.io",
"chat": "",
"facebook": "",
"forum": "https://bitcointalk.org/index.php?topic=2046046",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/rados_io",
"youtube": ""
}
},
"MSP": {
"symbol": "MSP",
"address": "0x68AA3F232dA9bdC2343465545794ef3eEa5209BD",
"decimals": 18,
"name": "Mothership",
"ens_address": "",
"website": "https://mothership.cx",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/mothershipcx",
"twitter": "",
"youtube": ""
}
},
"ARX": {
"symbol": "ARX",
"address": "0x7705FaA34B16EB6d77Dfc7812be2367ba6B0248e",
"decimals": 8,
"name": "ARX",
"ens_address": "",
"website": "https://artex.global",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/va109/Artex",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/ARTEX_GLOBAL",
"twitter": "",
"youtube": ""
}
},
"TBX": {
"symbol": "TBX",
"name": "Tokenbox",
"type": "ERC20",
"address": "0x3A92bD396aEf82af98EbC0Aa9030D25a23B11C6b",
"ens_address": "",
"decimals": 18,
"website": "https://tokenbox.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/tokenbox",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/tokenbox",
"youtube": ""
}
},
"COSM": {
"symbol": "COSM",
"name": "<NAME>",
"type": "ERC20",
"address": "0xC4Bcd64CB216D49fD3C643A32762F34626b45a1a",
"ens_address": "",
"decimals": 18,
"website": "https://cosmochain.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@Cosmochain",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/cosmochain",
"youtube": ""
}
},
"E₹": {
"symbol": "E₹",
"address": "0xb67734521eAbBE9C773729dB73E16CC2dfb20A58",
"decimals": 2,
"name": "eRupee",
"ens_address": "",
"website": "https://erupee.wordpress.com",
"logo": {
"src": "https://raw.githubusercontent.com/eRupee/images/master/coin%20logo.png",
"width": 900,
"height": 720,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://erupee.wordpress.com"
},
"social": {
"blog": "https://erupee.wordpress.com",
"chat": "",
"facebook": "",
"forum": "https://bitcointalk.org/index.php?topic=2839333.new#new",
"github": "https://github.com/eRupee",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/eRupee",
"slack": "https://erupeecoin.slack.com",
"telegram": "t.me/eRupee",
"twitter": "https://twitter.com/eRupeeCoin",
"youtube": ""
}
},
"SWFTC": {
"symbol": "SWFTC",
"name": "SwftCoin",
"type": "ERC20",
"address": "0x0bb217E40F8a5Cb79Adf04E1aAb60E5abd0dfC1e",
"ens_address": "",
"decimals": 8,
"website": "http://www.swftcoin.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://forum.bitcoin.com/alternative-cryptocurrencies-altcoins/smartcash-t29835.html",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/swftcoin",
"youtube": ""
}
},
"eosDAC": {
"symbol": "eosDAC",
"address": "0x7e9e431a0B8c4D532C745B1043c7FA29a48D4fBa",
"decimals": 18,
"name": "eosDAC",
"ens_address": "",
"website": "https://eosdac.io/",
"logo": {
"src": "https://eosdac.io/wp-content/uploads/2018/03/eosdaclogo1-200-jpeg.jpg",
"width": "200",
"height": "200",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://t.me/eosdacio"
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://facebook.com/eosdac",
"forum": "",
"github": "https://github.com/eosdac",
"gitter": "",
"instagram": "https://instagram.com/eosdac",
"linkedin": "https://linkedin.com/company/eosdac",
"reddit": "https://www.reddit.com/r/EOSDAC/",
"slack": "",
"telegram": "https://t.me/eosdacio",
"twitter": "https://twitter.com/eosdac",
"youtube": ""
}
},
| |
len(netx)
if dead:
result[dead] = counts[1] / len(netx)
return result
def _cyclomatic(graph):
"""
Calculates the cyclomatic complexity of a graph.
"""
e = graph.number_of_edges()
v = graph.number_of_nodes()
return e - v + 1
def cyclomatic(graph, radius=5, name="cyclomatic", distance=None, verbose=True):
"""
Calculates cyclomatic complexity for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
.. math::
\\alpha=e-v+1
where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.
Adapted from :cite:`bourdic2012`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
cyclomatic complexity for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.cyclomatic(network_graph, radius=3)
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _cyclomatic(
sub
) # save value calulated for subgraph to node
return netx
return _cyclomatic(netx)
def _edge_node_ratio(graph):
"""
Calculates edge / node ratio of a graph.
"""
e = graph.number_of_edges()
v = graph.number_of_nodes()
return e / v
def edge_node_ratio(
graph, radius=5, name="edge_node_ratio", distance=None, verbose=True
):
"""
Calculates edge / node ratio for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
.. math::
\\alpha=e/v
where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.
Adapted from :cite:`dibble2017`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
edge / node ratio for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.edge_node_ratio(network_graph, radius=3)
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _edge_node_ratio(
sub
) # save value calulated for subgraph to node
return netx
return _edge_node_ratio(netx)
def _gamma(graph):
"""
Calculates gamma index of a graph.
"""
e = graph.number_of_edges()
v = graph.number_of_nodes()
if v == 2:
return np.nan
return e / (3 * (v - 2)) # save value calulated for subgraph to node
def gamma(graph, radius=5, name="gamma", distance=None, verbose=True):
"""
Calculates connectivity gamma index for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
.. math::
\\alpha=\\frac{e}{3(v-2)}
where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.
Adapted from :cite:`dibble2017`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
gamma index for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.gamma(network_graph, radius=3)
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _gamma(sub)
return netx
return _gamma(netx)
def clustering(graph, name="cluster"):
"""
Calculates the squares clustering coefficient for nodes.
Wrapper around ``networkx.square_clustering``.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
name : str, optional
calculated attribute name
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.clustering(network_graph)
"""
netx = graph.copy()
vals = nx.square_clustering(netx)
nx.set_node_attributes(netx, vals, name)
return netx
def _closeness_centrality(G, u=None, length=None, wf_improved=True, len_graph=None):
r"""Compute closeness centrality for nodes. Slight adaptation of networkx
`closeness_centrality` to allow normalisation for local closeness.
Adapted script used in networkx.
Closeness centrality [1]_ of a node `u` is the reciprocal of the
average shortest path distance to `u` over all `n-1` reachable nodes.
.. math::
C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
where `d(v, u)` is the shortest-path distance between `v` and `u`,
and `n` is the number of nodes that can reach `u`. Notice that the
closeness distance function computes the incoming distance to `u`
for directed graphs. To use outward distance, act on `G.reverse()`.
Notice that higher values of closeness indicate higher centrality.
Wasserman and Faust propose an improved formula for graphs with
more than one connected component. The result is "a ratio of the
fraction of actors in the group who are reachable, to the average
distance" from the reachable actors [2]_. You might think this
scale factor is inverted but it is not. As is, nodes from small
components receive a smaller closeness value. Letting `N` denote
the number of nodes in the graph,
.. math::
C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
Parameters
----------
G : graph
A NetworkX graph
u : node, optional
Return only the value for node u
distance : edge attribute key, optional (default=None)
Use the specified edge attribute as the edge distance in shortest
path calculations
len_graph : int
length of complete graph
Returns
-------
nodes : dictionary
Dictionary of nodes with closeness centrality as the value.
References
----------
.. [1] <NAME>: Centrality in networks: I.
Conceptual clarification. Social Networks 1:215-239, 1979.
http://leonidzhukov.ru/hse/2013/socialnetworks/papers/freeman79-centrality.pdf
.. [2] pg. 201 of <NAME>. and <NAME>.,
Social Network Analysis: Methods and Applications, 1994,
Cambridge University Press.
"""
if length is not None:
import functools
# use Dijkstra's algorithm with specified attribute as edge weight
path_length = functools.partial(
nx.single_source_dijkstra_path_length, weight=length
)
else:
path_length = nx.single_source_shortest_path_length
nodes = [u]
closeness_centrality = {}
for n in nodes:
sp = dict(path_length(G, n))
totsp = sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness_centrality[n] = (len(sp) - 1.0) / totsp
# normalize to number of nodes-1 in connected part
s = (len(sp) - 1.0) / (len_graph - 1)
closeness_centrality[n] *= s
else:
closeness_centrality[n] = 0.0
return closeness_centrality[u]
def local_closeness_centrality(
graph, radius=5, name="closeness", distance=None, weight=None
):
"""
Calculates local closeness for each node based on the defined distance.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute. Based on ``networkx.closeness_centrality``.
Local closeness centrality of a node `u` is the reciprocal of the
average shortest path distance to `u` over all `n-1` nodes within subgraph.
.. math::
C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},
where :math:`d(v, u)` is the shortest-path distance between :math:`v` and :math:`u`,
and :math:`n` is the number of nodes that can reach :math:`u`.
Adapted from :cite:`porta2006`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of | |
# Standard Library
import asyncio
import logging
import os
import pandas as pd
# Third Party
from elasticsearch import AsyncElasticsearch
from kubernetes import client, config
from PeakDetecion import PeakDetection
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(message)s")
config.load_incluster_config()
configuration = client.Configuration()
core_api_instance = client.CoreV1Api()
app_api_instance = client.AppsV1Api()
ES_ENDPOINT = os.environ["ES_ENDPOINT"]
ES_USERNAME = os.environ["ES_USERNAME"]
ES_PASSWORD = os.environ["ES_PASSWORD"]
es_instance = AsyncElasticsearch(
[ES_ENDPOINT],
port=9200,
http_compress=True,
http_auth=(ES_USERNAME, ES_PASSWORD),
verify_certs=False,
use_ssl=True,
)
WINDOW = int(os.getenv("WINDOW", "10"))
THRESHOLD = float(os.getenv("THRESHOLD", "2.5"))
INFLUENCE = float(os.getenv("INFLUENCE", "0.5"))
AOI_MINUTES_THRESHOLD = int(os.getenv("AOI_MINUTES_THRESHOLD", "10"))
MILLISECONDS_MINUTE = 60000
historic_workload_data = dict()
historic_workload_pod_dict = dict()
workload_types = {
"ReplicaSet",
"StatefulSet",
"Deployment",
"Job",
"DaemonSet",
"CustomResource",
"Independent",
}
class BackgroundFunction:
def __init__(self):
pass
def get_next_owner_reference_metadata(self, all_workload_data, owner_name):
"""
This function is called by get_workload_name and takes in a list of metadata objects of a particular workload type
(deployment, statefulset, replicaset or daemonset) and a string for the owner name. It will then go through
the list of metadata objects, until it comes across the object which matches the owner name and then returns that
metadata object. If no metadata object is found, return None which will cause the while loop in get_workload_name
to break.
"""
for data_idx in range(len(all_workload_data)):
if all_workload_data[data_idx].metadata.name == owner_name:
return all_workload_data[data_idx].metadata
def get_workload_name(self, pod_metadata):
"""
This function gets the name of the workload by looping through owner references until it reaches a workload which
does not have an owner reference. When it reaches that workload, it then is able to retrieve the name of that workload.
"""
owner_name = None
# While loop that will keep on looping until it comes across an object which does not have an owner reference.
while pod_metadata and pod_metadata.owner_references:
owner_references = pod_metadata.owner_references
if len(owner_references) == 0:
break
owner_kind = owner_references[0].kind
owner_name = owner_references[0].name
# Depending on the kind of owner_reference, fetch the appropriate breakdown type and obtain the updated pod_metadata.
if owner_kind == "Deployment":
all_deployments = (
app_api_instance.list_deployment_for_all_namespaces().items
)
pod_metadata = self.get_next_owner_reference_metadata(
all_deployments, owner_name
)
elif owner_kind == "StatefulSet":
all_stateful_sets = (
app_api_instance.list_stateful_set_for_all_namespaces().items
)
pod_metadata = self.get_next_owner_reference_metadata(
all_stateful_sets, owner_name
)
elif owner_kind == "ReplicaSet":
all_replica_sets = (
app_api_instance.list_replica_set_for_all_namespaces().items
)
pod_metadata = self.get_next_owner_reference_metadata(
all_replica_sets, owner_name
)
elif owner_kind == "DaemonSet":
all_daemon_sets = (
app_api_instance.list_daemon_set_for_all_namespaces().items
)
pod_metadata = self.get_next_owner_reference_metadata(
all_daemon_sets, owner_name
)
else:
break
return owner_name
async def monitor_workloads(self):
# This function will call the Kubernetes API every minute to keep track of workloads over time and update historic_workload_data dictionary.
while True:
try:
all_pods = core_api_instance.list_pod_for_all_namespaces(watch=False)
all_pods_items = all_pods.items
for pod_spec in all_pods_items:
# For each pod object. obtain the name, metadata and owner references.
pod_metadata = pod_spec.metadata
pod_name = pod_metadata.name
namespace_name = pod_metadata.namespace
owner_references = pod_metadata.owner_references
kind = "CustomResource"
workload_name = pod_metadata.name
# Determine the kind of breakdown of the pod and update the workload name as well to the name of the owner reference.
if owner_references and len(owner_references) > 0:
if owner_references[0].kind in workload_types:
kind = owner_references[0].kind
workload_name = owner_references[0].name
else:
kind = "Independent"
original_workload_name = self.get_workload_name(pod_metadata)
if original_workload_name:
workload_name = original_workload_name
if not namespace_name in historic_workload_data:
historic_workload_data[namespace_name] = {
"ReplicaSet": {},
"StatefulSet": {},
"Deployment": {},
"Job": {},
"DaemonSet": {},
"CustomResource": {},
"Independent": {},
}
historic_workload_pod_dict[namespace_name] = historic_workload_data[namespace_name]
if not workload_name in historic_workload_pod_dict[namespace_name][kind]:
historic_workload_pod_dict[namespace_name][kind][workload_name] = []
if not pod_name in historic_workload_data[namespace_name][kind][workload_name]:
historic_workload_pod_dict[namespace_name][kind][workload_name].append(pod_name)
if not pod_name in historic_workload_data[namespace_name][kind]:
historic_workload_data[namespace_name][kind][
pod_name
] = workload_name
except Exception as e:
logging.error(f"Unable to access Kubernetes pod endpoint. {e}")
await asyncio.sleep(60)
async def get_logs(start_ts, end_ts, query_parameters, scroll_id):
"""
This function takes a start_ts, end_ts, a dictionary called query_parameters and a scroll_id. The contents of query_parameters
depends on which endpoint a Get request was submitted to. type of data is being filtered for though the
100 logs marked as Suspicious or Anomoly and additional attributes such as the timestamp, anomaly_level,
whether or not it is a control plane log, pod name and namespace name based on the page number specified. If the scroll_id
is None, fetch the first 100 logs during the specified time interval and also provide the scroll_id as a returned argument.
If a scroll_id is provided, fetch the next 100 logs from the reference of the scroll_id and return the new scroll_id upon
returning the logs_dict dictionary.
"""
logs_dict = {"Logs": []}
try:
query_body = {
"query": {
"bool": {
"filter": [{"range": {"timestamp": {"gte": start_ts, "lte": end_ts}}}],
"must": [{"match": {"anomaly_level": query_parameters["anomaly_level"]}}],
},
},
"_source": [
"timestamp",
"log",
"anomaly_level",
"is_control_plane_log",
"kubernetes.pod_name",
"kubernetes.namespace_name",
],
"sort": [{"timestamp": {"order": "asc"}}],
}
# Separate query for counting the total number of logs within the time interval.
count_query_body = {
"query": {
"bool": {
"filter": [
{"range": {"timestamp": {"gte": start_ts, "lte": end_ts}}}],
"must": [{"match": {"anomaly_level": query_parameters["anomaly_level"]}}],
},
},
}
except Exception as e:
logging.error("anomaly_level not provided in query_parameters dictionary")
return logs_dict
# If the logs_pod endpoint is hit, extract the pod_name and namespace_name from the query_parameters dictionary.
if query_parameters["type"] == "pod":
try:
query_body["query"]["bool"]["must"].append({"match": {"kubernetes.namespace_name.keyword": query_parameters["namespace_name"]}})
query_body["query"]["bool"]["must"].append({"match": {"kubernetes.pod_name.keyword": query_parameters["pod_name"]}})
count_query_body["query"]["bool"]["must"].append({"match": {"kubernetes.namespace_name.keyword": query_parameters["namespace_name"]}})
count_query_body["query"]["bool"]["must"].append({"match": {"kubernetes.pod_name.keyword": query_parameters["pod_name"]}})
except Exception as e:
logging.error(f"Proper arguments not provided to query_parameters. {e}")
return logs_dict
# If the logs_namespace endpoint is hit, extract namespace_name from the query_parameters dictionary.
elif query_parameters["type"] == "namespace":
try:
query_body["query"]["bool"]["must"].append({"match": {"kubernetes.namespace_name.keyword": query_parameters["namespace_name"]}})
count_query_body["query"]["bool"]["must"].append({"match": {"kubernetes.namespace_name.keyword": query_parameters["namespace_name"]}})
except Exception as e:
logging.error(f"Proper arguments not provided to query_parameters. {e}")
return logs_dict
# If the logs_workload endpoint is hit, extract namespace_name, workload_type and workload_name from query_parameters dictionary.
elif query_parameters["type"] == "workload":
try:
workload_pod_names = historic_workload_pod_dict[query_parameters["namespace_name"]][query_parameters["workload_type"]][query_parameters["workload_name"]]
should_query = {"bool": {"should": []}}
for pod_name in workload_pod_names:
should_query["bool"]["should"].append({"match": {"kubernetes.pod_name.keyword": pod_name}})
query_body["query"]["bool"]["must"].append({"match": {"kubernetes.namespace_name.keyword": query_parameters["namespace_name"]}})
count_query_body["query"]["bool"]["must"].append({"match": {"kubernetes.namespace_name.keyword": query_parameters["namespace_name"]}})
query_body["query"]["bool"]["must"].append(should_query)
count_query_body["query"]["bool"]["must"].append(should_query)
except Exception as e:
logging.error(f"Unable to get logs by workload. {e}")
return logs_dict
# If the logs_control_plane endpoint is hit, extract the control_plane_component from the query_parameters dictionary.
elif query_parameters["type"] == "control_plane":
try:
query_body["query"]["bool"]["must"].append({"match": {"kubernetes_component.keyword": query_parameters["control_plane_component"]}})
query_body["_source"].append("kubernetes_component")
count_query_body["query"]["bool"]["must"].append({"match": {"kubernetes_component.keyword": query_parameters["control_plane_component"]}})
except Exception as e:
logging.error(f"Proper arguments not provided to query_parameters. {e}")
return logs_dict
scroll_value = "1m"
# If scroll_id is provided, then use it to fetch the next 100 logs and return that in addition to the updated scroll_id as part of the logs_dict dictionary.
try:
if scroll_id:
current_page = await es_instance.scroll(scroll_id=scroll_id, scroll=scroll_value)
else:
current_page = await es_instance.search(index="logs",body=query_body, scroll=scroll_value, size=100)
logs_dict["total_logs_count"] = (await es_instance.count(index="logs",body=count_query_body))['count']
result_hits = current_page["hits"]["hits"]
logs_dict["scroll_id"] = current_page["_scroll_id"]
for each_hit in result_hits:
logs_dict["Logs"].append(each_hit["_source"])
return logs_dict
except Exception as e:
logging.error(f"Unable to access Elasticsearch logs index. {e}")
return logs_dict
def get_workload_breakdown(pod_breakdown_data):
# Get the breakdown of normal, suspicious and anomalous logs by workload.
workload_breakdown_dict = {
"ReplicaSet": {},
"StatefulSet": {},
"Deployment": {},
"Job": {},
"DaemonSet": {},
"CustomResource": {},
"Independent": {},
}
workload_namespace_dict = dict()
for pod_spec in pod_breakdown_data["Pods"]:
pod_name, pod_insights, pod_ns = (
pod_spec["Name"],
pod_spec["Insights"],
pod_spec["Namespace"],
)
# For each pod object fetch the workload if the data is available.
workload_name = ""
kind = ""
if pod_ns in historic_workload_data:
for workload_type in historic_workload_data[pod_ns]:
if pod_name in historic_workload_data[pod_ns][workload_type]:
workload_name = historic_workload_data[pod_ns][workload_type][
pod_name
]
kind = workload_type
break
if workload_name and kind:
if not workload_name in workload_namespace_dict:
workload_namespace_dict[workload_name] = pod_ns
if not workload_name in workload_breakdown_dict[kind]:
workload_breakdown_dict[kind][workload_name] = {
"Normal": 0,
"Suspicious": 0,
"Anomaly": 0,
}
# Accumulate the insight count for each workload name.
for anomaly_level in workload_breakdown_dict[kind][workload_name]:
workload_breakdown_dict[kind][workload_name][
anomaly_level
] += pod_insights[anomaly_level]
# Restructure workload_breakdown_dict to be in finalized format.
for breakdown_type, breakdown_dict in workload_breakdown_dict.items():
workload_breakdown_dict[breakdown_type] = []
for name, insights in breakdown_dict.items():
workload_breakdown_dict[breakdown_type].append(
{
"Name": name,
"Namespace": workload_namespace_dict[name],
"Insights": insights,
}
)
return workload_breakdown_dict
def get_pod_breakdown(pod_aggregation_data):
# Get the breakdown of normal, suspicious and anomalous logs by pod.
pod_breakdown_dict = {"Pods": []}
try:
for each_ns_bucket in pod_aggregation_data:
pod_buckets = each_ns_bucket["pod_name"]["buckets"]
for each_pod_bucket in pod_buckets:
if len(each_pod_bucket["key"]) == 0:
continue
pod_aggregation_dict = {
"Name": each_pod_bucket["key"],
"Insights": {"Normal": 0, "Suspicious": 0, "Anomaly": 0},
"Namespace": each_ns_bucket["key"],
}
anomaly_level_buckets = each_pod_bucket["anomaly_level"]["buckets"]
for bucket in anomaly_level_buckets:
pod_aggregation_dict["Insights"][bucket["key"]] = bucket[
"doc_count"
]
pod_breakdown_dict["Pods"].append(pod_aggregation_dict)
return pod_breakdown_dict
except Exception as e:
logging.error(f"Unable to aggregate pod data. {e}")
return pod_breakdown_dict
async def get_pod_aggregation(start_ts, end_ts):
# Get the breakdown of normal, suspicious and anomalous logs by pod and then send over the resulting aggregation to get the pod and workload breakdown.
query_body = {
"size": 0,
"query": {
"bool": {
"must": [
{"match": {"is_control_plane_log": "false"}},
{"regexp": {"kubernetes.pod_name": ".+"}},
],
"filter": [{"range": | |
self.action_space.low[0]), self.action_space.high[0])])
def _cascaded_PI_controller(self, state, ref, k):
"""
cascaded controller with two PI controllers for inner current and outer speed control for DC motors
Args:
:param state: [omega, i_a, i_e] # use [omega, i, i] for series
:param ref: reference
:param k: current time step
Returns:
:return: normalised input voltage
"""
OMEGA_IDX = 0
i_a_IDX = 1
i_e_IDX = 2
# denormalize quantities
omega = state[OMEGA_IDX] * self.controller_params['omega_max']
omega_ref = ref * self.controller_params['omega_max']
i_a = state[i_a_IDX] * self._i_a_max
psi_e = state[i_e_IDX] * self._i_e_max_prime
# outer control loop
d_omega = omega_ref-omega
if psi_e != 0:
temp = self.cumulated_values[0] + d_omega * self.controller_params['K_i_o'] / psi_e # integral part
i_a_des = temp + d_omega * self.controller_params['K_p_o'] / psi_e
else:
i_a_des = np.sign(d_omega) * self._i_a_max
temp = self.cumulated_values[0]
# hold current constraints, anti wind-up
if i_a_des > self._i_a_max or i_a_des < self._i_a_min:
i_a_des = np.clip(i_a_des, self._i_a_min, self._i_a_max)
else:
self.cumulated_values[0] = temp
d_i_a = i_a_des - i_a
# inner control loop
temp = self.cumulated_values[1] + d_i_a * self.controller_params['K_i_i'] # integral part
d_u_a = temp + d_i_a * self.controller_params['K_p_i']
u_a_0 = omega * psi_e
u_a = d_u_a + u_a_0
# hold voltage limits, anti wind-up
if u_a > self._u_a_max or u_a < self._u_a_min:
u_a = np.clip(u_a, self._u_a_min, self._u_a_max)
else:
self.cumulated_values[1] = temp
# normalise the desired output voltage to a duty cycle referring to the supply voltage
# Assumption: u_sup = u_N is made
des_duty_cycle = u_a / self.controller_params['u_sup']
duty_cycle = np.clip(des_duty_cycle, self.controller_params['converter_voltage'][0],
self.controller_params['converter_voltage'][1])
return np.array([duty_cycle])
def _cascaded_PI_controller_Permex(self, state, ref, k, *args):
"""
cascaded controller with two PI controllers for current and speed control for PermEx motor
Args:
:param state: current system state
:param ref: reference
:param k: current time step
Returns:
:return: normalised input voltage
"""
OMEGA_IDX = 0
i_IDX = 1
# denormalize quantities
omega = state[OMEGA_IDX] * self.controller_params['omega_max']
omega_ref = ref * self.controller_params['omega_max']
i_a = state[i_IDX] * self._i_a_max
# outer control loop
d_omega = omega_ref-omega
temp = self.cumulated_values[0] + d_omega * self.controller_params['K_i_o'] / self.psi_e # integral part
i_a_des = temp + d_omega * self.controller_params['K_p_o'] / self.psi_e
# hold current constraints, anti wind-up
if i_a_des > self._i_a_max or i_a_des < self._i_a_min:
i_a_des = np.clip(i_a_des, self._i_a_min, self._i_a_max)
else:
self.cumulated_values[0] = temp
d_i_a = i_a_des - i_a
# inner control loop
temp = self.cumulated_values[1] + d_i_a * self.controller_params['K_i_i'] # integral part
d_u_a = temp + d_i_a * self.controller_params['K_p_i']
u_a_0 = omega * self.psi_e
u_a = d_u_a + u_a_0
# hold voltage limits, anti wind-up
if u_a > self._u_a_max or u_a < self._u_a_min:
u_a = np.clip(u_a, self._u_a_min, self._u_a_max)
else:
self.cumulated_values[1] = temp
# normalise the desired output voltage to a duty cycle referring to the supply voltage
# Assumption: u_sup = u_N is made
des_duty_cycle = u_a / self.controller_params['u_sup']
duty_cycle = np.clip(des_duty_cycle, self.controller_params['converter_voltage'][0],
self.controller_params['converter_voltage'][1])
return np.array([duty_cycle])
# endregion
# region discrete DC controller
def _on_off(self, state, ref, k):
"""
On or Off controller depending on the current state and the reference
Args:
:param state: current measured system state
:param ref: reference for the system
:param k: current time step
Returns:
:return: input action (voltage)
"""
action = 1 if state < ref else 0 # Hint: modified else branch to 0 or 2 for some converters
return action
def _three_point(self, state, ref, *_):
"""
Implementation of a hysteresis controller
Args:
:param state: current measured system state
:param ref: reference for the system
:param k: current time step
Returns:
:return: input action (voltage)
"""
action = 1 if state - ref < -self.controller_params['hysteresis']\
else 2 if state - ref > self.controller_params['hysteresis'] \
else 0
return action
# endregion
# region discrete PMSM controller
def _pmsm_hysteresis(self, state, ref, k):
"""
Hysteresis controller for PMSM with feed forward control for u_d
:param state: state/observation from the motor
:param ref: current reference value
:param k: current time step
:return: switching command for the converter
"""
# indizes in the observation array
OMEGA_IDX = 0
I_A_IDX = 2
I_B_IDX = 3
I_C_IDX = 4
EPSILON_IDX = 8
CURRENTS = [I_A_IDX, I_B_IDX, I_C_IDX]
# denormalization
omega = state[OMEGA_IDX] * self.controller_params['omega_max']
i = state[CURRENTS] * self.controller_params['i_max']
epsilon = state[EPSILON_IDX] * self.controller_params['epsilon_max']
# transformation to dq-coordinates
i_dq = PmsmModel.q_inv(PmsmModel.t_23(i), epsilon)
# feed forward control
u_d_0 = omega * self.controller_params['L_q'] * i_dq[1] / self.controller_params['u_N']
# hysteresis control
state = state[OMEGA_IDX]
if state < ref - self.controller_params['hysteresis']:
u_q = 1
elif state > ref + self.controller_params['hysteresis']:
u_q = -1
else:
u_q = 0
# transformation back to abc-coordinates
u_a, u_b, u_c = PmsmModel.t_32(PmsmModel.q((u_d_0, u_q), epsilon))
return 4 * (u_a > 0) + 2 * (u_b > 0) + (u_c > 0)
def _pmsm_on(self, state, ref, k, *args):
"""
On or Off controller for the PMSM
:param state: state/observation from the motor
:param ref: current reference value
:param k: current time step
:param args: additional arguments as the angle epsilon
:return:
"""
if ref > state[0]:
u_q = 1
u_d = 0
else:
u_q = -1
u_d = 0
u_a, u_b, u_c = PmsmModel.t_32(PmsmModel.q((u_d, u_q), args[0] * self.controller_params['safety_margin']))
return 4 * (u_a > 0) + 2 * (u_b > 0) + (u_c > 0)
# endregion
# region continuous PMSM controller
def _foc_controller(self, state, ref, k, *args):
"""
Field oriented control from the lecture "controlled three phase drives, chapter 5"
Args:
state: current system state
ref: references
k: current time steps
args: not used in this function
Returns:
normalised input voltages
"""
weight = 1 # weight for maximum values for the anti-wind-up from abc-values to dq-values
# indices in the state array
OMEGA_IDX = 0
I_A_IDX = 2
I_B_IDX = 3
I_C_IDX = 4
U_A_IDX = 5
U_B_IDX = 6
U_C_IDX = 7
EPSILON_IDX = 8
CURRENTS = [I_A_IDX, I_B_IDX, I_C_IDX]
VOLTAGES = [U_A_IDX, U_B_IDX, U_C_IDX]
# extract quantities from state
omega = state[OMEGA_IDX] * self.controller_params['omega_max']
omega_ref = ref * self.controller_params['omega_max']
i = state[CURRENTS] * self.controller_params['i_max']
u = state[VOLTAGES] * self.controller_params['u_max']
epsilon = state[EPSILON_IDX] * self.controller_params['epsilon_max'] * self.controller_params['p']
# transformation from a/b/c to alpha/beta and d/q
u_alphabeta = PmsmModel.t_23(u)
i_alphabeta = PmsmModel.t_23(i)
u_dq = PmsmModel.q_inv(u_alphabeta, epsilon)
i_dq = PmsmModel.q_inv(i_alphabeta, epsilon)
# compute u_d_0 and u_q_0
u_d_0 = omega * self.controller_params['L_q'] * i_dq[1]
u_q_0 = omega * (self.controller_params['Psi_p'] + self.controller_params['L_d'] * i_dq[0])
d_omega = omega_ref - omega
# compute T* (Torque reference) and i*_sq (q-axis current reference)
temp = self.cumulated_values[0] + d_omega * self.controller_params['K_i_T'] # integral part
T_des = temp + d_omega * self.controller_params['K_p_T'] # proportional part
i_sq_des = 2 * T_des / (3 * self.controller_params['p'] * self.controller_params['Psi_p'])
# anti wind-up
if i_sq_des > self.controller_params['i_max'] * weight or i_sq_des < -self.controller_params['i_max'] * weight:
i_sq_des = np.clip(i_sq_des, -self.controller_params['i_max'] * weight,
self.controller_params['i_max'] * weight)
else:
self.cumulated_values[0] = temp
if np.abs(omega_ref) < self.controller_params['omega_1']:
i_sd_des = 0
else:
i_sd_des = ((self.controller_params['u_max'] / omega_ref)**2 -
(self.controller_params['L_q'] * self.controller_params['i_max'])**2
- self.controller_params['Psi_p']**2)\
/ (2 * self.controller_params['Psi_p'] * self.controller_params['L_d'])
# transform back to abc-domain
currents = np.matmul(PmsmModel.t32, PmsmModel.q(np.array([i_sd_des, i_sq_des]), epsilon))
# test if current limits are violated
if np.max(np.abs(currents)) > self.controller_params['i_max']:
clipping = self.controller_params['i_max'] * np.ones(3)
currents = np.clip(currents, -clipping, clipping)
array = PmsmModel.q_inv(PmsmModel.t_23(currents), epsilon)
i_sd_des = array[0]
i_sq_des = array[1]
# compute du*_sq, du*_sd
d_i_sd = i_sd_des - i_dq[0]
d_i_sq = i_sq_des - i_dq[1]
temp_u_sd = self.cumulated_values[1] + d_i_sd * self.controller_params['K_i_d'] # integral part
temp_u_sq = self.cumulated_values[2] + d_i_sq * self.controller_params['K_i_q'] # integral part
d_u_sd_des = temp_u_sd + d_i_sd * self.controller_params['K_p_d']
d_u_sq_des = temp_u_sq + d_i_sq * self.controller_params['K_p_q']
# anti-wind-up u_sd
if d_u_sd_des > self.controller_params['u_max'] * weight - u_d_0 or \
d_u_sd_des < -self.controller_params['u_max'] * weight - u_d_0:
d_u_sd_des = np.clip(d_u_sd_des, -self.controller_params['u_max'] * weight - u_d_0,
self.controller_params['u_max'] * weight - u_d_0)
else:
self.cumulated_values[1] = temp_u_sd
# anti-wind-up u_sq
if d_u_sq_des > self.controller_params['u_max'] * weight - u_q_0 or \
d_u_sq_des < -self.controller_params['u_max'] * weight - u_q_0:
d_u_sq_des = np.clip(d_u_sq_des, -self.controller_params['u_max'] * weight - u_q_0,
self.controller_params['u_max'] * weight - u_q_0)
else:
self.cumulated_values[2] = temp_u_sq
# compute u*_sq, u*_sd, epsilon + depsilon due to delay of the controller
| |
'**attrs'. If 'is_replace_attrs_values' then replace attributes values,
if not 'is_replace_attrs_values' then update (merge) attributes values
witch should be lists. If 'is_allow_none_values' then allow to set None
object's attributes values, and vice versa.
If 'is_replace_values_of_dicts' then update values of dicts in list which
is value of particular object's attribute name:
(**attrs is attr={'key1': 'new_value2', 'key2': 'new_value2'}).
"""
# pylint: disable=expression-not-assigned
# pylint: disable=invalid-name
def update_obj_attrs_values(obj, is_replace_attrs_values,
is_allow_none_values, **attrs):
"""Update object's attributes values."""
if "review" in attrs and isinstance(attrs["review"], ReviewEntity):
obj.update_review(attrs.pop("review"))
for obj_attr_name in attrs:
obj_attr_value = None
if obj_attr_name in Representation.all_attrs_names():
_obj_attr_value = attrs.get(obj_attr_name)
if not is_replace_values_of_dicts:
# convert repr from objects to dicts exclude datetime objects
obj_attr_value = (
cls.repr_obj_to_dict(_obj_attr_value) if
not isinstance(_obj_attr_value, datetime) else _obj_attr_value)
if not is_replace_attrs_values:
origin_obj_attr_value = getattr(obj, obj_attr_name)
obj_attr_value = (
dict(origin_obj_attr_value.items() + obj_attr_value.items())
if obj_attr_name == "custom_attributes" else
help_utils.convert_to_list(origin_obj_attr_value) +
help_utils.convert_to_list(obj_attr_value))
if is_replace_values_of_dicts and isinstance(_obj_attr_value, dict):
obj_attr_value = StringMethods.exchange_dicts_items(
transform_dict=_obj_attr_value,
dicts=help_utils.convert_to_list(
getattr(obj, obj_attr_name)),
is_keys_not_values=False)
obj_attr_value = (
obj_attr_value if isinstance(getattr(obj, obj_attr_name), list)
else obj_attr_value[0])
if (is_allow_none_values is True or
(is_allow_none_values is False and
obj_attr_value is not None)):
setattr(obj, obj_attr_name, obj_attr_value)
return obj
return help_utils.execute_method_according_to_plurality(
objs=objs, types=Entity.all_entities_classes(),
method_name=update_obj_attrs_values,
is_replace_attrs_values=is_replace_attrs_values,
is_allow_none_values=is_allow_none_values, **attrs)
@classmethod
def filter_objs_attrs(cls, objs, attrs_to_include):
"""Make objects's copy and filter objects's attributes (delete attributes
from objects witch not in list'attrs_to_include').
'objs' can be list of objects or object.
"""
# pylint: disable=expression-not-assigned
def filter_obj_attrs(obj, attrs_to_include):
"""Filter one object's attributes."""
obj = copy.deepcopy(obj)
[delattr(obj, obj_attr) for obj_attr in obj.__dict__.keys()
if obj_attr not in attrs_to_include]
return obj
return ([filter_obj_attrs(obj, attrs_to_include) for obj in objs] if
isinstance(objs, list) else
filter_obj_attrs(objs, attrs_to_include))
def __eq__(self, other):
"""Extended equal procedure fore self and other entities."""
comparison = Representation.compare_entities(self, other)
self.diff_info = comparison["self_diff"]
other.diff_info = comparison["other_diff"]
return comparison["is_equal"]
@staticmethod
def attrs_values_types_error(self_attr, other_attr, expected_types):
raise ValueError("'{}' have to be isinstance of classes: {}\n".
format((self_attr, other_attr), expected_types))
@classmethod
def is_attrs_equal(cls, attr_name, self_attr_value, other_attr_value):
"""Compare entities' attributes according to attributes' names and values,
if is equal then return 'True' and vise versa.
"""
is_equal = False
if attr_name == "custom_attributes":
is_equal = cls.compare_cas(self_attr_value, other_attr_value)
elif attr_name in ["updated_at", "created_at"]:
is_equal = cls.compare_datetime(self_attr_value, other_attr_value)
elif attr_name == "comments":
is_equal = cls.compare_comments(self_attr_value, other_attr_value)
else:
is_equal = self_attr_value == other_attr_value
return is_equal
@classmethod
def is_list_of_attrs_equal(cls, self_list_attrs, other_list_attrs):
"""Compare list of entities' attributes according to attributes' names and
values, if is equal then return 'True' and vise versa.
"""
return (all(all((self_k == other_k and cls.is_attrs_equal(
attr_name=self_k, self_attr_value=self_attr[self_k],
other_attr_value=other_attr[self_k])) for self_k, other_k
in zip(self_attr.keys(), other_attr.keys()))
for self_attr, other_attr in zip(self_list_attrs, other_list_attrs)))
@staticmethod
def compare_cas(self_cas, other_cas):
"""Compare entities' 'custom_attributes' attributes."""
if (isinstance(self_cas, (dict, type(None))) and
isinstance(other_cas, (dict, type(None)))):
is_equal = False
if (isinstance(self_cas, dict) and isinstance(other_cas, dict)):
is_equal = StringMethods.is_subset_of_dicts(self_cas, other_cas)
else:
is_equal = self_cas == other_cas
return is_equal
else:
Representation.attrs_values_types_error(
self_attr=self_cas, other_attr=other_cas,
expected_types=(dict.__name__, type(None).__name__))
@staticmethod
def compare_datetime(self_datetime, other_datetime):
"""Compare entities' datetime ('created_at', 'updated_at') attributes."""
# pylint: disable=superfluous-parens
if (isinstance(self_datetime, (datetime, type(None))) and
isinstance(other_datetime, (datetime, type(None)))):
return self_datetime == other_datetime
else:
Representation.attrs_values_types_error(
self_attr=self_datetime, other_attr=other_datetime,
expected_types=(datetime.__name__, type(None).__name__))
@staticmethod
def compare_comments(self_comments, other_comments):
"""Compare entities' 'comments' attributes due to specific dictionaries'
format values in list comments.
"""
# pylint: disable=no-else-return
if help_utils.is_multiple_objs(
StringMethods.convert_list_elements_to_list(
[self_comments, other_comments]), (dict, type(None))):
if self_comments and other_comments:
is_comments_equal_list = []
for self_comment, other_comment in zip(self_comments, other_comments):
is_comments_equal = False
if self_comment and other_comment:
is_comments_equal = (
all((Representation.compare_datetime(
self_comment.get("created_at"),
other_comment.get("created_at")
) if (isinstance(_self, datetime) and
isinstance(_other, datetime))else
_self == _other) for _self, _other in zip(
self_comment.iteritems(), other_comment.iteritems())))
# convert datetime to unicode in order to get visible repr
if self_comment.get("created_at"):
self_comment["created_at"] = unicode(
self_comment.get("created_at"))
if other_comment.get("created_at"):
other_comment["created_at"] = unicode(
other_comment.get("created_at"))
else:
is_comments_equal = self_comment == other_comment
is_comments_equal_list.append(is_comments_equal)
return all(is_equal for is_equal in is_comments_equal_list)
else:
return self_comments == other_comments
else:
Representation.attrs_values_types_error(
self_attr=self_comments, other_attr=other_comments,
expected_types=(list.__name__, type(None).__name__))
def compare_entities(self, other):
"""Extended compare of entities: 'self_entity' and 'other_entity' according
to specific 'attrs_names_to_repr' and return:
- 'is_equal' - True if entities equal else False;
- 'self_diff' - 'equal' and 'diff' parts of 'self_entity' after compare;
- 'other_diff' - 'equal' and 'diff' parts of 'other_entity' after compare.
"""
# pylint: disable=not-an-iterable
is_equal = False
self_equal, self_diff, other_equal, other_diff = {}, {}, {}, {}
if (isinstance(self, other.__class__) and
self.attrs_names_to_repr == other.attrs_names_to_repr):
for attr_name in self.attrs_names_to_repr:
self_attr_value = None
other_attr_value = None
if (attr_name in self.__dict__.keys() and
attr_name in other.__dict__.keys()):
self_attr_value = getattr(self, attr_name)
other_attr_value = getattr(other, attr_name)
is_equal = self.is_attrs_equal(
attr_name=attr_name, self_attr_value=self_attr_value,
other_attr_value=other_attr_value)
# convert datetime to unicode in order to get visible representation
if isinstance(self_attr_value, datetime):
self_attr_value = unicode(self_attr_value)
if isinstance(other_attr_value, datetime):
other_attr_value = unicode(other_attr_value)
if is_equal:
self_equal[attr_name] = self_attr_value
other_equal[attr_name] = other_attr_value
else:
self_diff[attr_name] = self_attr_value, type(self_attr_value)
other_diff[attr_name] = other_attr_value, type(other_attr_value)
is_equal = self_diff == other_diff == {}
return {"is_equal": is_equal,
"self_diff": {"equal": self_equal, "diff": self_diff},
"other_diff": {"equal": other_equal, "diff": other_diff}
}
@classmethod
def extract_excluding_attrs(cls, expected_objs, actual_objs, *exclude_attrs):
"""Extract dictionary which contains collections to compare according to
exclude attributes.
Where:
'exp_objs_wo_ex_attrs', 'act_objs_wo_ex_attrs' - list objects w/o excluding
attributes;
'exp_ex_attrs', 'act_ex_attrs' - list dictionaries w/ excluding attributes
(items which contain attributes' names and values);
''*exclude_attrs' - tuple of excluding attributes names.
"""
# pylint: disable=invalid-name
expected_excluded_attrs, actual_excluded_attrs = (
cls.extract_simple_collections(
exclude_attrs, actual_objs, *expected_objs))
expected_objs_wo_excluded_attrs, actual_objs_wo_excluded_attrs = (
cls.extract_objs(exclude_attrs, actual_objs, *expected_objs))
return {"exp_objs_wo_ex_attrs": expected_objs_wo_excluded_attrs,
"act_objs_wo_ex_attrs": actual_objs_wo_excluded_attrs,
"exp_ex_attrs": expected_excluded_attrs,
"act_ex_attrs": actual_excluded_attrs}
@staticmethod
def extract_objs_wo_excluded_attrs(objs, *exclude_attrs):
"""Return list objects w/ attributes values set to 'None' according to
'*exclude_attrs' tuple attributes' names.
"""
return [expected_obj.update_attrs(
**dict([(attr, None) for attr in exclude_attrs]))
for expected_obj in objs]
@staticmethod
def extract_excluded_attrs_collection(objs, *exclude_attrs):
"""Return list dictionaries (attributes' names and values) according to
'*exclude_attrs' tuple attributes' names.
"""
# pylint: disable=invalid-name
return [dict([(attr, getattr(expected_obj, attr))
for attr in exclude_attrs]) for expected_obj in objs]
@staticmethod
def extract_simple_collections(expected_objs, actual_objs, *exclude_attrs):
"""Extract expected and actual simple collections excluded attributes."""
return [Representation.extract_excluded_attrs_collection(
copy.deepcopy(objs), *exclude_attrs)
for objs in [expected_objs, actual_objs]]
@staticmethod
def extract_objs(expected_objs, actual_objs, *exclude_attrs):
"""Extract expected and actual objects w/ set to 'None' excluded
attributes.
"""
return [Representation.extract_objs_wo_excluded_attrs(
copy.deepcopy(objs), *exclude_attrs)
for objs in [expected_objs, actual_objs]]
@staticmethod
def filter_objs_by_attrs(objs, **attrs):
"""Filter objects by attributes' items and return matched according to
plurality.
'objs' - object or list objects;
'**attrs' - items of attributes' names and values.
"""
list_objs = help_utils.convert_to_list(objs)
matched_objs = [
obj for obj in list_objs
if isinstance(obj, Entity.all_entities_classes()) and
StringMethods.is_subset_of_dicts(dict(**attrs), obj.__dict__)]
return (help_utils.get_single_obj(matched_objs)
if not help_utils.is_multiple_objs(matched_objs) else matched_objs)
class Entity(Representation):
"""Class that represent model for base entity."""
__hash__ = None
def __init__(self, **attrs):
self.set_attrs(
"type", "slug", "id", "title", "href", "url", "admins",
"primary_contacts", "secondary_contacts", "status",
"comments", "custom_attribute_definitions", "custom_attribute_values",
"custom_attributes", "created_at", "updated_at", "modified_by",
"description", **attrs)
@staticmethod
def all_entities_classes():
"""Explicitly return tuple of all entities' classes."""
return (
PersonEntity, CustomAttributeDefinitionEntity, ProgramEntity,
ControlEntity, AuditEntity, AssessmentEntity, AssessmentTemplateEntity,
IssueEntity, CommentEntity, ObjectiveEntity, AccessControlRoleEntity,
RiskEntity, OrgGroupEntity, ProposalEntity, ReviewEntity,
ProductEntity, TechnologyEnvironmentEntity, ChangeLogItemEntity
)
def __lt__(self, other):
return self.slug < other.slug
class CommentEntity(Representation):
"""Class that represent model for Comment entity."""
__hash__ = None
def __init__(self, **attrs):
self.set_attrs(
"type", "id", "href", "description", "created_at", "modified_by",
**attrs)
def __lt__(self, other):
return self.description < other.description
def repr_ui(self):
"""Represents UI view of comment."""
ui_attrs = copy.deepcopy(self.attrs_names_to_repr)
ui_attrs.remove("type")
return dict(
zip(ui_attrs, [getattr(self, ui_attr) for ui_attr in ui_attrs]))
class PersonEntity(Entity):
"""Class that represent model for Person entity."""
def __init__(self, **attrs):
super(PersonEntity, self).__init__()
self.delete_attrs(
"slug", "title", "admins", "primary_contacts", "secondary_contacts",
"status", "comments")
self.set_attrs(
"name", "id", "type", "email", "company", "system_wide_role", **attrs)
def __lt__(self, other):
return self.email < other.email
def people_tree_item_representation(self):
"""Make object's copy and convert it to the view of people tree item."""
from lib.constants import roles
origin_obj = copy.deepcopy(self)
origin_obj.__dict__.update({k: None for k in (
'modified_by', 'description', 'created_at', 'updated_at',
'custom_attributes')})
if self.system_wide_role == roles.NO_ACCESS:
origin_obj.system_wide_role = roles.NO_ROLE
return origin_obj
class UserRoleEntity(Representation):
"""Class that represents model for user role entity"""
def __init__(self):
super(UserRoleEntity, self).__init__()
self.set_attrs(
"type", "id", "created_at", "updated_at", "modified_by",
"person", "role")
class AccessControlRoleEntity(Representation):
"""Class that represents model for ACL role entity."""
def __init__(self):
super(AccessControlRoleEntity, self).__init__()
self.set_attrs(
"context", "created_at", "default_to_current_user", "delete", "id",
"mandatory", "modified_by", "my_work", "name", "non_editable",
"object_type", "parent_type", "read", "selfLink", "tooltip", "type",
"update", "updated_at")
class CustomAttributeDefinitionEntity(Representation):
"""Class that represent model for Custom Attribute entity."""
__hash__ = None
def __init__(self, **attrs):
super(CustomAttributeDefinitionEntity, self).__init__()
self.set_attrs(
"title", "id", "href", "type", "definition_type", "attribute_type",
"helptext", "placeholder", "mandatory", "multi_choice_options",
| |
<filename>kato.py
import sys
import base64
import os
import json
from math import ceil
import random
from urllib import quote
from twisted.internet import reactor, defer, protocol
from twisted.internet.error import ConnectionDone
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.python import log
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from autobahn.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
WebSocketProtocol, \
connectWS
# TODO: switch to github/dreid/treq for HTTP requests
# http://blog.mailgun.com/post/stress-testing-http-with-twisted-python-and-treq/
# TODO: error handling:
# - json decoding
# - login
# - network connections
KATO_API_BASE_URL = "https://api.kato.im"
KATO_API_WS_URL = "wss://api.kato.im/ws"
KATO_API_ORIGIN = "https://kato.im"
#KATO_API_BASE_URL = "http://localhost:8888"
#KATO_API_WS_URL = "ws://localhost:8888/ws"
# string producer, used with twisted's HTTP client for the producer of the body
class StringProducer(object):
def __init__(self, body):
self.body = body;
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class KatoAccount(object):
# account ID
id = ""
# name associated with the account
name = ""
# email address associated with the account
email = ""
# email verification status
# one of verified_email or unverified_email
status = ""
# list of KatoAccountMembership objects
memberships = None
def __init__(self, id, name, email, status, memberships):
self.id = id
self.name = name
self.email = email
self.status = status
self.memberships = memberships
def __repr__(self):
return "KatoAccount{id='%s', name='%s', email='%s', status='%s', memberships=%s}" % \
(self.id, self.name, self.email, self.status, self.memberships)
@classmethod
def from_json(cls, message):
memberships = []
for membership in message["memberships"]:
memberships.append(KatoAccountMembership.from_json(membership))
return KatoAccount(message["id"],
message["name"],
message["email"],
message["status"],
memberships)
class KatoAccountMembership(object):
# ID of the organization in which the account is a member
org_id = ""
# Name of the organization
org_name = ""
# one of member or owner
role = ""
def __init__(self, org_id, org_name, role):
self.org_id = org_id
self.org_name = org_name
self.role = role
def __repr__(self):
return "KatoAccountMembership{org_id='%s', org_name='%s', role='%s'}" % \
(self.org_id, self.org_name, self.role)
@classmethod
def from_json(cls, message):
return KatoAccountMembership(message["org_id"], message["org_name"], message["role"])
class KatoRoom(object):
# ID of the room
id = ""
# type of room
# usually None, if a normal room; otherwise, a string, such as
# "support_front"
type = ""
# name of the room
name = ""
# organization ID that owns the room
org_id = ""
# milliseconds since the unix epoch
created_ts = 0
def __init__(self, id, type, name, org_id, created_ts):
self.id = id
self.type = type
self.name = name
self.org_id = org_id
self.created_ts = created_ts
def __repr__(self):
return "KatoRoom{id='%s', type='%s', name='%s', org_id='%s', created_ts=%s}" % \
(self.id, self.type, self.name, self.org_id, self.created_ts)
@classmethod
def from_json(cls, message):
return KatoRoom(message["id"],
message["type"],
message["name"],
message["organization_ids"][0],
message["created_ts"])
# http client for Kato
class KatoHttpClient(object):
# whether debugging is enabled
debug = False
# session ID and key for the Kato connection
# initialize using login or useExistingSession
session_id = ""
session_key = ""
# account ID of the current logged-in user
account_id = -1
# KatoWebsocket connection
# set when the websocket is open, None when the websocket is closed
websocket = None
# KatoMessageReceiver
# receives websocket messages
message_receiver = None
# deferred that is fired when connection is established
# if none, then there is nothing to be fired
initialize_deferred = None
# creates an http client with the provided message receiver
# TODO: if None, then do not initialize websocket connection
def __init__(self, message_receiver):
self.message_receiver = message_receiver
# logs into Kato using the provided email address and password
# returns a defer that fires when the user is logged in, or errors when
# the user could not be logged in
def login(self, email, password):
self.session_id = self._create_session_id()
url = KATO_API_BASE_URL + "/sessions/" + self.session_id
data = dict()
data["email"] = email
data["password"] = password
headers = dict()
headers["Content-Type"] = ["application/json"]
d = self._httpRequest("PUT", url, json.dumps(data), headers=headers)
def process_login_response(response):
# find and set the session key from the cookies
cookies = response.headers.getRawHeaders("set-cookie")
self.session_key = None
if cookies:
for cookie in cookies:
# at least one cookie should look like this:
# session_key=a9a7da00-3be0-11ed-a444-bc764e10c2df; Version=1; Expires=Tue, 19-Nov-2013 19:15:53 GMT; Max-Age=2592000; Domain=.api.kato.im; Path=/; Secure; HttpOnly
cookie_parts = cookie.split(";")
for cookie_part in cookie_parts:
cookie_part = cookie_part.strip()
parts = cookie_part.split("=");
if len(parts) == 2:
key, value = parts
if key == "session_key":
self.session_key = value
if not self.session_key:
raise ValueError("Could not login to Kato")
# nothing to return to the caller
return None
d.addCallback(process_login_response)
d.addCallback(self._initialize)
return d
# creates a session ID using the same algorithm that Kato uses
def _create_session_id(self):
return self._create_id(8)
# creates a message ID using the same algorithm that Kato uses
def _create_message_id(self):
return self._create_id(2)
# ID generation
def _create_id(self, byte_size):
result = []
for i in range(0, byte_size):
result.append(hex(int(ceil((0xffffffff * random.random()))))[2:])
return ''.join(result)
# uses an existing session for connecting to Kato
# returns a deferred that fires upon success
def login_with_session(self, session_id, session_key):
self.session_id = session_id
self.session_key = session_key
return self._initialize()
# called after the session ID and key have been set to complete the login
# returns a deferred that fires when complete
def _initialize(self, ignored=None):
self.initialize_deferred = defer.Deferred()
d_account = self.get_account_id(self.session_id)
def process_account_id(account_id):
self.account_id = account_id
return None
def initialize_ws(ignored):
#cookie = "session_key=%s; session_id=%s" % \
# (self.session_key, self.session_id)
cookie = "session_key=%s; session_id=%s" % \
(self.session_key, self.session_id)
factory = KatoWebsocketFactory(KATO_API_WS_URL,
self,
cookie = cookie,
debug = self.debug,
debugCodePaths = self.debug,
origin = KATO_API_ORIGIN)
connectWS(factory)
return None
def handle_error(failure):
if self.initialize_deferred:
d = self.initialize_deferred
self.initialize_deferred = None
d.errback(failure)
d_account.addCallback(process_account_id)
d_account.addCallback(initialize_ws)
# trigger err on the initialization deferred if there is an error
# before the websocket phase
d_account.addErrback(handle_error)
# fired in websocket_opened or fired with error in websocket_closed
return self.initialize_deferred
# logs out of kato
# returns a deferred that fires when complete
def logout(self):
# close websocket connection
if self.websocket:
self.websocket.dropConnection()
self.websocket = None
# DELETE on the sessions resource to logout
url = KATO_API_BASE_URL + "/sessions/" + self.session_id
d = self._httpRequest("DELETE", url)
return d
# sends a keep alive message
# the message receiver will get a callback on a successful keep alive
def keep_alive(self):
if self.websocket:
self.websocket.send_keep_alive()
#
# websocket callbacks
#
def websocket_opened(self, websocket):
print "Websocket opened."
self.websocket = websocket
# fire initialization deferred, if present
if self.initialize_deferred:
d = self.initialize_deferred
self.initialize_deferred = None
d.callback(None)
def websocket_closed(self, websocket, wasClean, code, reason):
# TODO: handle websocket closed not during login
print "Websocket closed."
self.websocket = None
# fire initialization deferred, if present
if self.initialize_deferred:
d = self.initialize_deferred
self.initialize_deferred = None
d.errback(IOError("Error connecting: " + reason))
def websocket_message(self, websocket, message_str, binary=None):
method = None
message = json.loads(message_str)
if "type" in message:
message_type = message["type"]
message_type = message_type.upper()
method = getattr(self.message_receiver,
"kato_%s" % message_type,
None)
if method:
method(message)
else:
self.message_receiver.kato_unknown(message)
# returns the account ID, given a session ID, via a deferred
def get_account_id(self, session_id):
url = KATO_API_BASE_URL + "/sessions/" + quote(session_id)
# returns:
# {
# "id":"<SESSION_ID>",
# "account_id":"<ACCOUNT_ID>"
# }
# async get data
d = self._httpRequestJson("GET", url)
def process_response(response):
if "account_id" in response.json:
return response.json["account_id"]
else:
raise ValueError("Response does not contain account_id: " + data)
d.addCallback(process_response)
return d
# returns KatoAccount instance via a deferred
def get_account_info(self, account_id=None):
if not account_id:
account_id = self.account_id
url = KATO_API_BASE_URL + "/accounts/" + quote(account_id)
# returns
# {
# "id":"<ACCOUNT_ID>",
# "status":"(verified_email | unverified_email)",
# "email":"<EMAIL_OF_ACCOUNT>",
# "name":"<NAME_ON_ACCOUNT>",
# "memberships": [
# {
# "org_id":"<ORG_ID>",
# "org_name":"<ORG_NAME>",
# "role":"(member|owner)"
# }
# ]
# }
# async get data
d = self._httpRequestJson("GET", url)
def process_response(response):
if not response.json:
raise ValueError("Response was empty")
return KatoAccount.from_json(response.json)
d.addCallback(process_response)
return d
# organization ID to list of KatoAccount objects for everyone in the
# organization EXCEPT for the current user
def get_organization_members(self, org_id):
url = KATO_API_BASE_URL + "/organizations/" + quote(org_id) + "/accounts"
# returns
# [
# {
# "id":"<ACCOUNT_ID>",
# "status":"(verified_email | unverified_email)",
# "email":"<EMAIL_OF_ACCOUNT>",
# "name":"<NAME_ON_ACCOUNT>",
# "memberships": [
# {
# "org_id":"<ORG_ID>",
# "org_name":"<ORG_NAME>",
# "role":"(member|owner)"
# }
# ]
# }
# ]
# async get data
d = self._httpRequestJson("GET", url)
def process_response(response):
if not response.json:
raise ValueError("Response was empty")
accounts = []
for entry in response.json:
# email reservations are not yet an active account, so skip
# the entry
if "status" in entry and entry["status"] | |
the file does not exist in the cache.
This cache affects strong consistency file existence check as file delete event does not reflect on the client till the cache expires.
This method bypasses SMB2 directory cache by making a call to the SAMBA server to check existence of a file.
:param path: file path
:param vType: 0 - Check file does not exist, 1 - Check file exists (default).
:param fType: 0 - file is normal consistency file, 1 - file is a strong consistency file (default).
:return: 0 for success else 1
"""
if path == None or (vType or fType) not in [0, 1]:
raise Exception('IFile: ValidateExists: Invalid input parameter.')
# For normal consistency file exists and does not exist check and strong consitency file exists check use existing ValidatePathExists method in IPath
if fType == 0 or (fType == 1 and vType == 1):
return self.ValidatePathExists(path, vType)
elif fType == 1 and vType == 0:
try:
fd = os.open(path, os.O_RDONLY)
os.close(fd)
except OSError as e:
if (e.errno == 2):
logger.info('file=%s does not exist' % path)
return 0
logger.info('Error opening file %s.\n%s: %s' % (path, type(e).__name__, e))
return 1
# ======================
# ACL
# ======================
class ACL(Base):
"""
Represents ACL to be applied to a path
"""
def __init__(self, userPrefix=None, userIdx=None, perm=None, cache=None):
self._cache = cache
self.userName = ''
if userPrefix is not None:
self.userName = userPrefix if userIdx is None else userPrefix + str(userIdx)
self.perm = perm
self.corePerm = perm # This will contain 'permission - inheritance flags'
self._cmd = DosCmd()
self._ipath = IPath()
self._idir = IDir()
def __str__(self):
_str = '' if self.perm is None else self.perm
_str += ': '
_str += '' if self.userName is None else self.userName
return _str
def Get(self, path, recursive=False):
"""
Get ACL set on the specified path
:param path: Path on which ACL ops will be performed.
:param recursive: True if to do recursive collection of ACLs
"""
if recursive:
aclMapsId = '%s' % uuid4()
logger.info('cache: %s' % self._cache)
aclMaps = {}
entries = []
entries.append(path) # Start with root of a directory tree
entries.extend(self._idir.List(path, True, True))
for entry in entries:
aclMaps[entry] = self._Get(entry)
self._cache.Set(aclMapsId, aclMaps)
return aclMapsId
else:
return self._Get(path)
def _Get(self, path):
"""
Get ACL set on the specified path
:param path: Path on which ACL ops will be performed.
"""
logger.info('path=%s' % path)
aclMap = self._cmd.GetAcl(path)
logger.info('path=%s ACLs=%s' % (path, aclMap))
return aclMap
def Apply(self, path, mode='grant:r'):
"""
Set ACL on the specified path
:param path: Path on which ACL ops will be performed.
:param mode: Mode in which 'perm' will be set. Value can be 'grant' (to add perm), 'grant:r' (to replace existing by new one)
: 'deny' (to deny perm)
"""
if mode not in ['grant', 'grant:r', 'deny']:
raise Exception('ACL: SetAcl for %s mode is not supported' % mode)
logger.info('path=%s user=%s perm=%s mode=%s' % (path, self.userName, self.perm, mode))
output = self._cmd.SetAcl(path, self.userName, self.perm, mode)
logger.info('path=%s result=%s' % (path, output))
def Remove(self, path, mode=None):
"""
Remove ACL from the specified path
:param path: Path on which ACL ops will be performed.
:param mode: ACL mode, value can be 'd' for 'deny', 'g' 'grant' or None to remove all ACLs
"""
if not (mode is None or mode in ['g', 'd']):
raise Exception('ACL: RemoveAcl for %s mode is not supported' % mode)
logger.info('path=%s user=%s acl=%s mode=%s' % (path, self.userName, self.perm, mode))
output = self._cmd.RemoveAcl(path, self.userName, mode)
logger.info('path=%s result=%s' % (path, output))
def ChangeInheritance(self, path, opCode):
"""
Remove inherited ACL from the specified path
:param path: Path on which ACL ops will be performed.
:param opCode: Supported operations are: e: enable, d: disable, r: remove
"""
if opCode not in ['r', 'e', 'd']:
raise Exception('ACL: ChangeInheritance for %s type opCode is not supported' % opCode)
output = self._cmd.ChangeInheritance(path, opCode)
logger.info('path=%s result=%s' % (path, output))
def _parseUserName(self, userName):
"""
Parse user names such as domain.com\\user1 or domain.inc.com\\user1 and
construct user name as domain\\user1 (this is how the username appears in ACL list)
For a user name like CREATOR OWNER which do not have a domain; userName as is will be returned
:param userName:
"""
domainName, parsedUserName = None, None
userNameSplits = re.split(r'\\', userName)
if len(userNameSplits) == 1:
parsedUserName = userName # When userName is 'CREATOR OWNER'
else:
domainNameSplits = re.split(r'\.', userNameSplits[0]) # split 'domain.inc.com' to get 'domain' only
domainName = domainNameSplits[0].upper()
parsedUserName = domainName + '\\' + userNameSplits[1]
logger.info('Username [%s] after parsing: %s' % (userName, parsedUserName))
return parsedUserName
def Validate(self, path, vType=1, aclMapsId=None):
"""
Validates ACL of a file/directory or recursively into a directory.
:param path: ACLs to be validated for
:param vType: 1=Validate for existence 0=Validate for absence
:param aclMapsId: A key in 'cache' to retrieve stored ACLs and valid value means recursive validation
"""
if vType not in [0, 1]:
raise Exception('ACL: Supported values for vType are 0, 1. (Given value: %d)' % (vType))
if aclMapsId is None:
logger.info('Validating ACLs of %s' % path)
return self._validate(path, vType)
if not os.path.isdir(path):
raise Exception('ACL: %s is not directory. Recursive validation only on directory' % path)
logger.info('Recursively validating ACLs for entries in %s' % path)
aclMapsPrev = self._cache.Get(aclMapsId)
for k, v in aclMapsPrev.items():
logger.info('Entry: %s\ncached ACLs: %s' % (k, v))
inheritFlags = self._getInheritFlags()
logger.info('inheritFlags: %s' % (inheritFlags))
result = True
entries = []
entries.append(path) # Start with root of a directory tree
entries.extend(self._idir.List(path, True, True))
for entry in entries:
ACLInheritance = self._isACLInheritanceExpected(entry, path, inheritFlags)
logger.info('Validating ACLs of %s when ACL inheritance is %s' % (entry, ACLInheritance))
if ACLInheritance:
try:
self._validate(entry, vType)
except Exception as e:
result = False
logger.error('ACL validation falied for %s\nMsg: %s' % (entry, e))
else:
aclMap = ACL().Get(entry)
if self._isACLSame(aclMapsPrev[entry], aclMap):
logger.info("ACLs of %s before and after ACL application are same" % entry)
else:
result = False
logger.error("ACLs of %s before and after ACL application are different" % entry)
logger.debug('ACL validation result: %s' % result)
if result:
return 0
else:
raise Exception('One or more ACL validations failed for %s' % (path))
def _isACLSame(self, aclMapPrev, aclMapPresent):
"""
Checks if ACL entriess before and after application of new ACL are same.
If an ACL is applied on a root of a directory tree, then change in ACLs for
file/folder in the tree is driven presence of inheritance flags - OI, CI, NP
Check:
1) Return True if:
both ACL entries are exactly same
ACL may change in string value but mean same like as in below example
Previous: 'PZTEST22\\test1': ['(I)(OI)(CI)(F)', '(I)(F)']
Present: 'PZTEST22\\test1': ['(I)(OI)(CI)(F)']
2) Return False otherwise
"""
logger.info("ACL map(previous): %s" % aclMapPrev)
logger.info("ACL map(present) : %s" % aclMapPresent)
ret = True
if aclMapPrev == aclMapPresent:
return True
else:
for aclUser in aclMapPrev.keys():
logger.info("Verifying ACLs of user[%s] with previous[%s] and present[%s]" % (
aclUser, aclMapPrev[aclUser], aclMapPresent[aclUser]))
if aclMapPrev[aclUser] == aclMapPresent[aclUser]:
ret = True
else:
# The check is to verify that aclMapPresent is a subset of aclMapPrev
# and its based upon observations that aclMapPrev might get consolidated
# after applying new ACL. To explain it, take a look at below ACLs seen
# before and after applying a ACL for some other user - PZTEST22\test2
#
# Before: PZTEST22\\test1: ['(I)(OI)(CI)(F)', '(I)(F)']
# After : PZTEST22\\test1: ['(I)(OI)(CI)(F)']
#
# In above example, ACL indicating 'full permissions' by '(F)' is present
# before and after applying an ACL and is still an inherited one. And so
# considered it to be present and so a check in this case is considered to
# be a success.
#
for acl in aclMapPresent[aclUser]:
ret = True if acl in aclMapPrev[aclUser] else False
if ret:
logger.error("ACLs before and after ACL application for %s are different" % aclUser)
break
return ret
def _validate(self, path, vType):
"""
Validates ACL on the file.
:param acl: ACLs to be validated for
:param vType: 1=Validate for existence 0=Validate for absence
"""
logger.info('path=%s vType=%d' % (path, vType))
aclMap = self.Get(path)
if | |
# 元データと低次元表現それぞれに対し精度を計算する
# X:特徴行列, L: ラベル
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from munkres import Munkres
import csv
from numpy import savetxt
from pandas import DataFrame
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
import os
import glob
from matplotlib.backends.backend_pdf import PdfPages
from MantelTest import Mantel
from hub_toolbox.distances import euclidean_distance
from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import numba
from sklearn import neighbors
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
def kNN_acc(X, L):
X_train, X_test, Y_train, Y_test = train_test_split(X, L, random_state=0)
knc = KNeighborsClassifier(n_neighbors=1)
knc.fit(X_train, Y_train)
Y_pred = knc.predict(X_test)
score = knc.score(X_test, Y_test)
return score
def kNN_acc_kfold(X, y, n_neighbors=1):
"""
Returns the average 10-fold validation accuracy of a NN classifier trained on the given embeddings
Args:
X (np.array): feature matrix of size n x d
y (np.array): label matrix of size n x 1
n_neighbors (int): number of nearest neighbors to be used for inference
Returns:
score (float): Accuracy of the NN classifier
"""
kf = KFold(n_splits=10)
kf.get_n_splits(X)
scores = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = neighbors.KNeighborsClassifier(n_neighbors)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores.append(accuracy_score(y_test, y_pred))
return np.average(scores)
def visualize(X, L, cmap='Spectral', s=10):
sns.set(context="paper", style="white")
fig, ax = plt.subplots(figsize=(12, 10))
color = L.astype(int)
plt.scatter(
X[:, 0], X[:, 1], c=color, cmap=cmap, s=s
)
plt.setp(ax, xticks=[], yticks=[])
# plt.title("MNIST data embedded into two dimensions by UMAP", fontsize=18)
plt.show()
def save_visualization(X, L, cmap='viridis', s=0.1, dir='./fig_vis/', dataset = 'F-MNIST', hub_org = 'org', i=0):
sns.set(context="paper", style="white")
fig, ax = plt.subplots(figsize=(12, 10))
color = L.astype(int)
plt.scatter(
X[:, 0], X[:, 1], c=color, cmap=cmap, s=s
)
plt.setp(ax, xticks=[], yticks=[])
if hub_org == 'org':
model = 'UMAP'
else:
model = 'HR-UMAP'
# plt.title(dataset + " data by " + model, fontsize=18)
# # pdfファイルの初期化
# pp = PdfPages(dir + dataset + '_' + model + str(i+1) + '.pdf')
#
# # figureをセーブする
# plt.savefig(pp, format='pdf')
#
# # pdfファイルをクローズする。
# pp.close()
plt.savefig(dir + dataset + '_' + model + str(i+1) + '.png')
def kmeans_acc_ari_ami(X, L):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
n_clusters = len(np.unique(L))
kmeans = KMeans(n_clusters=n_clusters, n_init=20)
y_pred = kmeans.fit_predict(X)
y_pred = y_pred.astype(np.int64)
y_true = L.astype(np.int64)
assert y_pred.size == y_true.size
y_pred = y_pred.reshape((1, -1))
y_true = y_true.reshape((1, -1))
# D = max(y_pred.max(), L.max()) + 1
# w = np.zeros((D, D), dtype=np.int64)
# for i in range(y_pred.size):
# w[y_pred[i], L[i]] += 1
# # from sklearn.utils.linear_assignment_ import linear_assignment
# from scipy.optimize import linear_sum_assignment
# row_ind, col_ind = linear_sum_assignment(w.max() - w)
#
# return sum([w[i, j] for i in row_ind for j in col_ind]) * 1.0 / y_pred.size
if len(np.unique(y_pred)) == len(np.unique(y_true)):
C = len(np.unique(y_true))
cost_m = np.zeros((C, C), dtype=float)
for i in np.arange(0, C):
a = np.where(y_pred == i)
# print(a.shape)
a = a[1]
l = len(a)
for j in np.arange(0, C):
yj = np.ones((1, l)).reshape(1, l)
yj = j * yj
cost_m[i, j] = np.count_nonzero(yj - y_true[0, a])
mk = Munkres()
best_map = mk.compute(cost_m)
(_, h) = y_pred.shape
for i in np.arange(0, h):
c = y_pred[0, i]
v = best_map[c]
v = v[1]
y_pred[0, i] = v
acc = 1 - (np.count_nonzero(y_pred - y_true) / h)
else:
acc = 0
# print(y_pred.shape)
y_pred = y_pred[0]
y_true = y_true[0]
ari, ami = adjusted_rand_score(y_true, y_pred), adjusted_mutual_info_score(y_true, y_pred)
return acc, ari, ami
@numba.jit()
def mantel_test(X, L, embed, describe = True):
sss = StratifiedShuffleSplit(n_splits=50, test_size=1000, random_state=0)
sss.get_n_splits(X, L)
label_type = list(set(L))
r_lst = np.array([])
p_lst = np.array([])
for _, idx in sss.split(X, L):
# print('Index: ', idx)
# X_test = X[idx]
# y_train =
X_high, L_hl = X[idx], L[idx]
X_low = embed[idx]
# print(X_high.shape, L_high.shape)
# print(X_low.shape, L_low.shape)
label_idx = []
for _, i in enumerate(label_type):
l_idx = np.where(L_hl == i)
label_idx.append(l_idx)
# print(label_type)
# label_idx
X_high_lst = []
X_low_lst = []
# for _, i in enumerate(label_type):
# X_high_lst.append(X_high[label_idx[i]])
for i, _ in enumerate(label_type):
centroid = np.mean(X_high[label_idx[i]], axis=0)
# print(centroid)
X_high_lst.append(centroid)
# print(centroid.shape)
# X_high_lst.append((X_high[label_idx[i]] - centroid))
# X_high_lst[label_idx[i]] = np.sqrt(np.linalg.norm(X_high[label_idx[i]] - centroid, ord=2))
# for _, i in enumerate(label_type):
centroid = np.mean(X_low[label_idx[i]], axis=0)
X_low_lst.append(centroid)
# print(centroid.shape)
# X_high_lst.append((X_low[label_idx[i]] - centroid))
# X_low_lst[label_idx[i]] = np.sqrt(np.linalg.norm(X_low[label_idx[i]] - centroid, ord=2))
# print(X_low_lst[0].shape, centroid.shape)
D_high = euclidean_distance(X_high_lst)
D_low = euclidean_distance(X_low_lst)
# print(D_high, D_low)
r, p, z = Mantel.test(D_high, D_low, perms=10000, method='pearson', tail='upper')
r_lst = np.append(r_lst, r)
p_lst = np.append(p_lst, p)
if describe == True:
print(p_lst)
print(pd.DataFrame(pd.Series(r_lst.ravel()).describe()).transpose())
return r_lst, p_lst
# # return np.mean(r_lst)
# print(pd.DataFrame(pd.Series(r_lst.ravel()).describe()).transpose())
# print('r: ', r, 'p: ', p, 'z: ', z)
def box_plot_PCC(r_lst_org, r_lst_hub, save=False, dir='./fig_boxplot/', dataset = 'F-MNIST', i=0):
# sns.set()
sns.set(context="paper")
# colors = ['blue', 'red']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# ax.set_title('medianprops')
# medi_style = dict(color='b', lw=30)
ax.boxplot([r_lst_org, r_lst_hub], patch_artist=True, labels=['UMAP', 'HR-UMAP'])
# for b, c in zip(bp['boxes'], colors):
# b.set(color=c, linewidth=1) # boxの外枠の色
# b.set_facecolor(c) # boxの色
ax.set_xlabel('Model')
ax.set_ylabel('Pearson correlation')
ax.set_ylim(0.2, 0.8)
if save:
plt.savefig(dir + dataset + '_boxplot_' + str(i+1) + '.png')
else:
plt.show()
# # data = 'MNIST'
# # data = 'F-MNIST'
# # data = 'coil100'
# data = 'NORB'
#
# # datasize = str(70000)
# # datasize = str(7200)
# datasize = str(48600)
# # datasize =''
# hub_org = 'org'
# # hub_org = 'hub'
# iter = str(10)
#
# # path = '/home/hino/git/umap2/examples/*hub_coil100*.npz'
# # file_lst = glob.glob(path)
# # # for f in os.listdir(path):
# # # if os.path.isfile(os.path.join(path, f)):
# # # file_lst.append(f)
# # print(file_lst)
# #
# # emb_lst = []
# # for i, e in enumerate(file_lst):
# # file_path = e
# # npz = np.load(file_path)
# # emb = npz['emb']
# # emb = emb.reshape((1, emb.shape[0], -1))
# # emb_lst.append(emb)
# #
# # for i in range(len(file_lst) - 1):
# # emb_lst[i+1] = np.vstack((emb_lst[i], emb_lst[i+1]))
# #
# # print(emb_lst[len(file_lst)-1].shape)
# #
# # X = npz['X']
# # L = npz['L']
# #
# # np.savez('embed_hub_'+ "coil100" + str(7200) + '_' + str(10), X=X, L=L, emb=emb_lst[len(file_lst)-1])
#
# # seed_lst = [42, 97, 69, 99]
# # emb_lst = []
# # for i, e in enumerate(seed_lst):
# # file_path = "embed_hub_NORB48600_Seed:" + str(e) + ".npz"
# # # print(file_path)
# # npz = np.load(file_path)
# # X = npz['X']
# # L = npz['L']
# # emb = npz['emb']
# # emb_lst.append(emb)
# #
# # emb = np.vstack((emb_lst[0], emb_lst[1], emb_lst[2], emb_lst[3]))
#
#
# file_path = 'embed_' + hub_org + '_' + data + datasize + '_' + iter + '.npz'
# # file_path = "embed_org_NORB48600_Seed:42.npz"
# # file_path = 'embed_' + hub_org + "_coil100" + str(7200) + '_' + str(10) + '.npz'
# npz = np.load(file_path)
# X = npz['X']
# L = npz['L']
# emb = npz['emb']
# print(emb.shape)
#
# result_knn = []
# result_acc = []
# result_ari = []
# result_ami = []
#
# pcc_lst = []
# p_value = []
#
# for i, e in enumerate(emb):
#
# # knn_acc = kNN_acc(e, L)
# knn_acc = kNN_acc_kfold(e, L)
# # acc, ari, ami = kmeans_acc_ari_ami(e, L)
# # save_visualization(e, L, dataset=data, hub_org=hub_org, i=i)
# # r, p = mantel_test(X, L, e)
# # pcc_lst.append(r)
# # p_value.append(p)
# # print("p-value:", p_value)
# # # visualize(e, L)
# result_knn.append(knn_acc)
# # result_acc.append(acc)
# # result_ari.append(ari)
# # result_ami.append(ami)
# results = DataFrame()
# results['knn'] = result_knn
# print(results.describe())
# # # PCC =======================
# # pcc_lst = np.array(pcc_lst)
# # np.savetxt('pcc_' + hub_org + '_' + data + '.txt', pcc_lst)
# # file_pass = '<PASSWORD>_' + hub_org + '_' + data + '.txt'
# #
# # # BOX PLOT =========================================================
# # file_path_org = 'pcc_' + "org" + '_' + data + '.txt'
# # file_path_hub = 'pcc_' + "hub" + '_' + data + '.txt'
# #
# # pcc_lst_org = np.loadtxt(file_path_org)
# # pcc_lst_hub = np.loadtxt(file_path_hub)
# # for i in range(len(pcc_lst_org)):
# # box_plot_PCC(pcc_lst_org[i], pcc_lst_hub[i], save=False, dataset=data, i=i)
#
# # LOCAL accuracy ==========================
# # result = np.array((result_knn, result_acc, result_ari, result_ami))
# # # with open('examples/result_org_'+data+datasize+'.csv', 'w') as f:
# # np.savetxt('result_' + hub_org + '_' + data + datasize + '_' + iter + '.txt', result)
# #
# # # 統計処理
# # file_path = 'result_' + hub_org + '_' + data + datasize + '_' + iter + '.txt'
# # result_lst = np.loadtxt(file_path)
# # results = DataFrame()
# # results['knn'] = result_lst[0]
# # results['acc'] = result_lst[1]
# # results['ari'] = | |
the first dimension (samples).
"""
if mask is None:
mask = jnp.ones([logits.shape[0]])
mask = mask.reshape([logits.shape[0]])
error_rate = (((jnp.argmax(logits, -1) != jnp.argmax(one_hot_labels, -1))) *
mask).sum() / mask.sum()
# Set to zero if there is no non-masked samples.
return jnp.nan_to_num(error_rate)
def top_k_error_rate_metric(logits: jnp.ndarray,
one_hot_labels: jnp.ndarray,
k: int = 5,
mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Returns the top-K error rate between some predictions and some labels.
Args:
logits: Output of the model.
one_hot_labels: One-hot encoded labels. Dimensions should match the logits.
k: Number of class the model is allowed to predict for each example.
mask: Mask to apply to the loss to ignore some samples (usually, the padding
of the batch). Array of ones and zeros.
Returns:
The error rate (1 - accuracy), averaged over the first dimension (samples).
"""
if mask is None:
mask = jnp.ones([logits.shape[0]])
mask = mask.reshape([logits.shape[0]])
true_labels = jnp.argmax(one_hot_labels, -1).reshape([-1, 1])
top_k_preds = jnp.argsort(logits, axis=-1)[:, -k:]
hit = jax.vmap(jnp.isin)(true_labels, top_k_preds)
error_rate = 1 - ((hit * mask).sum() / mask.sum())
# Set to zero if there is no non-masked samples.
return jnp.nan_to_num(error_rate)
def tensorflow_to_numpy(xs):
"""Converts a tree of tensorflow tensors to numpy arrays.
Args:
xs: A pytree (such as nested tuples, lists, and dicts) where the leaves are
tensorflow tensors.
Returns:
A pytree with the same structure as xs, where the leaves have been converted
to jax numpy ndarrays.
"""
# Use _numpy() for zero-copy conversion between TF and NumPy.
xs = jax.tree_map(lambda x: x._numpy(), xs) # pylint: disable=protected-access
return xs
def shard_batch(xs):
"""Shards a batch across all available replicas.
Assumes that the number of samples (first dimension of xs) is divisible by the
number of available replicas.
Args:
xs: A pytree (such as nested tuples, lists, and dicts) where the leaves are
numpy ndarrays.
Returns:
A pytree with the same structure as xs, where the leaves where added a
leading dimension representing the replica the tensor is on.
"""
local_device_count = jax.local_device_count()
def _prepare(x):
return x.reshape((local_device_count, -1) + x.shape[1:])
return jax.tree_map(_prepare, xs)
def load_and_shard_tf_batch(xs):
"""Converts to numpy arrays and distribute a tensorflow batch.
Args:
xs: A pytree (such as nested tuples, lists, and dicts) where the leaves are
tensorflow tensors.
Returns:
A pytree of numpy ndarrays with the same structure as xs, where the leaves
where added a leading dimension representing the replica the tensor is on.
"""
return shard_batch(tensorflow_to_numpy(xs))
def create_exponential_learning_rate_schedule(
base_learning_rate: float,
steps_per_epoch: int,
lamba: float,
warmup_epochs: int = 0) -> Callable[[int], float]:
"""Creates a exponential learning rate schedule with optional warmup.
Args:
base_learning_rate: The base learning rate.
steps_per_epoch: The number of iterations per epoch.
lamba: Decay is v0 * exp(-t / lambda).
warmup_epochs: Number of warmup epoch. The learning rate will be modulated
by a linear function going from 0 initially to 1 after warmup_epochs
epochs.
Returns:
Function `f(step) -> lr` that computes the learning rate for a given step.
"""
def learning_rate_fn(step):
t = step / steps_per_epoch
return base_learning_rate * jnp.exp(-t / lamba) * jnp.minimum(
t / warmup_epochs, 1)
return learning_rate_fn
def get_cosine_schedule(num_epochs: int, learning_rate: float,
num_training_obs: int,
batch_size: int) -> Callable[[int], float]:
"""Returns a cosine learning rate schedule, without warm up.
Args:
num_epochs: Number of epochs the model will be trained for.
learning_rate: Initial learning rate.
num_training_obs: Number of training observations.
batch_size: Total batch size (number of samples seen per gradient step).
Returns:
A function that takes as input the current step and returns the learning
rate to use.
"""
steps_per_epoch = int(math.floor(num_training_obs / batch_size))
learning_rate_fn = lr_schedule.create_cosine_learning_rate_schedule(
learning_rate, steps_per_epoch // jax.host_count(), num_epochs,
warmup_length=0)
return learning_rate_fn
def get_exponential_schedule(num_epochs: int, learning_rate: float,
num_training_obs: int,
batch_size: int) -> Callable[[int], float]:
"""Returns an exponential learning rate schedule, without warm up.
Args:
num_epochs: Number of epochs the model will be trained for.
learning_rate: Initial learning rate.
num_training_obs: Number of training observations.
batch_size: Total batch size (number of samples seen per gradient step).
Returns:
A function that takes as input the current step and returns the learning
rate to use.
"""
steps_per_epoch = int(math.floor(num_training_obs / batch_size))
# At the end of the training, lr should be 1.2% of original value
# This mimic the behavior from the efficientnet paper.
end_lr_ratio = 0.012
lamba = - num_epochs / math.log(end_lr_ratio)
learning_rate_fn = create_exponential_learning_rate_schedule(
learning_rate, steps_per_epoch // jax.host_count(), lamba)
return learning_rate_fn
def global_norm(updates) -> jnp.ndarray:
"""Returns the l2 norm of the input.
Args:
updates: A pytree of ndarrays representing the gradient.
"""
return jnp.sqrt(
sum([jnp.sum(jnp.square(x)) for x in jax.tree_leaves(updates)]))
def clip_by_global_norm(updates):
"""Clips the gradient by global norm.
Will have no effect if FLAGS.gradient_clipping is set to zero (no clipping).
Args:
updates: A pytree of numpy ndarray representing the gradient.
Returns:
The gradient clipped by global norm.
"""
if FLAGS.gradient_clipping > 0:
g_norm = global_norm(updates)
trigger = g_norm < FLAGS.gradient_clipping
updates = jax.tree_multimap(
lambda t: jnp.where(trigger, t, (t / g_norm) * FLAGS.gradient_clipping),
updates)
return updates
def dual_vector(y: jnp.ndarray) -> jnp.ndarray:
"""Returns the solution of max_x y^T x s.t. ||x||_2 <= 1.
Args:
y: A pytree of numpy ndarray, vector y in the equation above.
"""
gradient_norm = jnp.sqrt(sum(
[jnp.sum(jnp.square(e)) for e in jax.tree_util.tree_leaves(y)]))
normalized_gradient = jax.tree_map(lambda x: x / gradient_norm, y)
return normalized_gradient
def train_step(
optimizer: flax.optim.Optimizer,
state: flax.nn.Collection,
batch: Dict[str, jnp.ndarray],
prng_key: jnp.ndarray,
learning_rate_fn: Callable[[int], float],
l2_reg: float,
pretrain: bool = False
) -> Tuple[flax.optim.Optimizer, flax.nn.Collection, Dict[str, float], float]:
"""Performs one gradient step.
Args:
optimizer: The optimizer targeting the model to train.
state: Current state associated with the model (contains the batch norm MA).
batch: Batch on which the gradient should be computed. Must have an `image`
and `label` key. Masks will not be used for training, so the batch is
expected to be full (with any potential remainder dropped).
prng_key: A PRNG key to use for stochasticity for this gradient step (e.g.
for sampling an eventual dropout mask).
learning_rate_fn: Function that takes the current step as input and return
the learning rate to use.
l2_reg: Weight decay parameter. The total weight decay penaly added to the
loss is equal to 0.5 * l2_reg * sum_i ||w_i||_2^2 where the sum is over
all trainable parameters of the model (bias and batch norm parameters
included).
Returns:
The updated optimizer (that includes the model), the updated state and
a dictionary containing the training loss and error rate on the batch.
"""
def forward_and_loss(model: flax.nn.Model, true_gradient: bool = False):
"""Returns the model's loss, updated state and predictions.
Args:
model: The model that we are training.
true_gradient: If true, the same mixing parameter will be used for the
forward and backward pass for the Shake Shake and Shake Drop
regularization (see papers for more details).
"""
with flax.nn.stateful(state) as new_state:
with flax.nn.stochastic(prng_key):
if pretrain:
try:
xl, logits = model.apply_hid(
batch['image'], train=True, true_gradient=true_gradient)
except TypeError:
xl, logits = model.apply_hid(batch['image'], train=True)
else:
try:
logits = model(
batch['image'], train=True, true_gradient=true_gradient)
except TypeError:
logits = model(batch['image'], train=True)
if pretrain:
loss = dim_loss(xl)
else:
loss = cross_entropy_loss(logits, batch['label'])
# We apply weight decay to all parameters, including bias and batch norm
# parameters.
weight_penalty_params = jax.tree_leaves(model.params)
if FLAGS.no_weight_decay_on_bn:
weight_l2 = sum(
[jnp.sum(x**2) for x in weight_penalty_params if x.ndim > 1])
else:
weight_l2 = sum([jnp.sum(x ** 2) for x in weight_penalty_params])
weight_penalty = l2_reg * 0.5 * weight_l2
loss = loss + weight_penalty
return loss, (new_state, logits)
step = optimizer.state.step
def get_sam_gradient(model: flax.nn.Model, rho: float):
"""Returns the gradient of the SAM loss loss, updated state and logits.
See https://arxiv.org/abs/2010.01412 for more details.
Args:
model: The model that we are training.
rho: Size of the perturbation.
"""
# compute gradient on the whole batch
(_, (inner_state, _)), grad = jax.value_and_grad(
lambda m: forward_and_loss(m, true_gradient=True, pretrain=pretrain), has_aux=True)(model)
if FLAGS.sync_perturbations:
if FLAGS.inner_group_size is None:
grad = jax.lax.pmean(grad, 'batch')
else:
grad = jax.lax.pmean(
grad, 'batch',
axis_index_groups=local_replica_groups(FLAGS.inner_group_size))
grad = dual_vector(grad)
noised_model = jax.tree_multimap(lambda a, b: a + rho * b,
model, grad)
(_, (_, logits)), grad = jax.value_and_grad(lambda m: forward_and_loss(m, pretrain=pretrain), has_aux=True)(noised_model)
return (inner_state, logits), grad
lr = learning_rate_fn(step)
rho = FLAGS.sam_rho
if rho > 0: # SAM | |
<reponame>luca-riboldi/HES-OFF
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
## ___ ___ ________ ________ ______ ________ ________ ##
## | | | | | ____| / | / __ \ | ____|| ____| ##
## | |__| | | |__ | (----` ______| | | | | |__ | |__ ##
## | __ | | __| \ \ |______| | | | | __| | __| ##
## | | | | | |____.----) | | `--' | | | | | ##
## |__| |__| |_______|_______/ \______/ |__| |__| ##
## ##
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
# Import packages
import pdb
import time
import copy
import os.path
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy.optimize import root_scalar
from scipy.interpolate import interp1d
from importlib_resources import files
from . import elgrid_models
from . import process_models
from . import combustion_models
# Define font settings
fontsize = 12
plt.rc('text', usetex=False)
plt.rcParams['font.family'] = 'serif' # 'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'
plt.rcParams['font.serif'] = 'times new roman' # 'cmr10', 'palatino', 'times new roman'
plt.rcParams['mathtext.fontset'] = 'stix' # 'cm' (latex style), 'stix' (times new roman style), 'stixsans'
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
class IntegratedModel:
"""
The IntegratedModel object defines and stores parameters of the HES-OFF concept
"""
def __init__(self, IN):
# Declare input variables as instance variables
self.IN = copy.deepcopy(IN)
# Declare input variables as instance variables
self.IN= copy.deepcopy(IN)
# Process specifications
self.HEAT_DEMAND = np.asarray(self.IN["HEAT_DEMAND"])
self.POWER_DEMAND = np.asarray(self.IN["POWER_DEMAND"])
self.STAGE_LENGTH = np.asarray(self.IN["STAGE_LENGTH"])
# Gas turbine specifications
self.GT_MODEL = self.IN["GT_MODEL"]
self.GT_UNITS = self.IN["GT_UNITS"]
self.GT_MAX_H2 = self.IN["GT_MAX_H2"]
# Wind farm specifications
self.WT_MODEL = self.IN["WT_MODEL"]
self.WT_REF_HEIGHT = IN["WT_REF_HEIGHT"]
self.WT_HUB_HEIGHT = IN["WT_HUB_HEIGHT"]
self.WT_RATED_POWER = self.IN["WT_RATED_POWER"]
# Electrolizer system specifications
self.EL_MODEL = self.IN["EL_MODEL"]
self.EL_RATED_POWER = self.IN["EL_RATED_POWER"]
self.EL_EFFICIENCY = np.asarray(self.IN["EL_EFFICIENCY"])
# Fuel cell system specifications
self.FC_MODEL = self.IN["FC_MODEL"]
self.FC_RATED_POWER = self.IN["FC_RATED_POWER"]
self.FC_EFFICIENCY = np.asarray(self.IN["FC_EFFICIENCY"])
# Hydrogen storage specifications
self.H2_CAPACITY = self.IN["H2_CAPACITY"]
self.H2_INITIAL_LEVEL = self.IN["H2_INITIAL_LEVEL"]
self.H2_RECHARGE_THRESHOLD = self.IN["H2_RECHARGE_THRESHOLD"]
self.H2_COFIRE_THRESHOLD = self.IN["H2_COFIRE_THRESHOLD"]
# Wind data specifications
self.WIND_FILENAME = self.IN["WIND_FILENAME"]
self.WIND_DATA = process_models.read_wind_data(IN["WIND_FILENAME"])
self.WIND_SPEED = self.WIND_DATA["speed"]
self.WIND_TIME = self.WIND_DATA["time"]
# Initialize components
self.GT = process_models.GT(model=self.GT_MODEL, number_of_units=self.GT_UNITS)
self.WT = process_models.WT(model=self.WT_MODEL, rated_power=self.WT_RATED_POWER, wind_speed_factor=(self.WT_HUB_HEIGHT, self.WT_REF_HEIGHT))
self.EL = process_models.EL(model=self.EL_MODEL, rated_power=self.EL_RATED_POWER, efficiency_coefficients=self.EL_EFFICIENCY)
self.FC = process_models.FC(model=self.FC_MODEL, rated_power=self.FC_RATED_POWER, efficiency_coefficients=self.FC_EFFICIENCY)
def __str__(self):
class_info = '\nHES-OFF concept specifications\n'
for key, value in self.IN.items():
class_info += "{:<24}{}\n".format(key, value)
return class_info
def evaluate_process_model(self):
# Check the size of the power demand and heat demand arrays
self.POWER_DEMAND, self.HEAT_DEMAND = np.atleast_1d(self.POWER_DEMAND), np.atleast_1d(self.HEAT_DEMAND)
if self.POWER_DEMAND.size != self.HEAT_DEMAND.size:
raise Exception("The number of elements of POWER_DEMAND and HEAT_DEMAND must be the same")
# Initialize arrays to store the solution at each instance of the year
p, n = self.POWER_DEMAND.size, self.WIND_SPEED.size
flag = np.empty((p, n)) # Type of operational strategy at each time instance
GT_power = np.empty((p, n)) # Power generated in the gas turbines (W)
WT_power = np.empty((p, n)) # Power generated in the wind turbines (W)
FC_power = np.empty((p, n)) # Power generated in the fuel cell system (W)
EL_power = np.empty((p, n)) # Power consumed in the electrolyzer system (W)
H2_level = np.empty((p, n)) # Mass of hydrogen in the storage system (kg)
H2_produced = np.empty((p, n)) # Mass of hydrogen produced in the electrolyzer (kg)
H2_utilized = np.empty((p, n)) # Mass of hydrogen utilized in the fuel cell + gas turbine (kg)
CO2_produced = np.empty((p, n)) # Mass of carbon dioxide emitted to the atmosphere (kg)
power_deficit = np.empty((p, n)) # Power demand not satisfied (W)
# energy_surplus = np.empty((p,n)) # Extra wind energy that is dissipated (J)
# Initialize time array (must have the same shape as the other arrays to return within dictionary)
times = np.empty((p, n), dtype=np.int32)
for i in range(p):
times[i, :] = np.arange(0, n)
# Create the natural gas + hydrogen mixture used in the gas turbines
blend_NG_H2 = process_models.create_fluid_mixture(fluids=[combustion_models.hydrogen, combustion_models.natural_gas],
fractions=[self.GT_MAX_H2, 1 - self.GT_MAX_H2],
fraction_type="molar")
# Loop over the time periods
for p, (power_demand, heat_demand) in enumerate(zip(self.POWER_DEMAND, self.HEAT_DEMAND)):
# Compute the initial level of hydrogen in the storage system
H2_level[p, 0] = self.H2_CAPACITY * self.H2_INITIAL_LEVEL
# Compute the minimum GT load required to satisfy the heat demand
GT_power_min = self.GT.compute_power_from_heat(heat_demand)
# Compute the maximum GT load of the current GT model
GT_power_max = self.GT.rated_power
# Compute wind power over the year
WT_power_available = self.WT.compute_power_output(wind_speed=self.WIND_SPEED)
for t in times[p]:
# Use a run-out-of-steam strategy to supply the power demand
if GT_power_min + WT_power_available[t] >= power_demand:
# Case 1: Use GT_min and WT to satisfy the power demand (use EL to recharge H2)
if H2_level[p, t] < self.H2_CAPACITY:
flag_current = 1
GT_power_current = GT_power_min
EL_power_current = np.minimum(self.EL_RATED_POWER, GT_power_min + WT_power_available[t] - power_demand)
WT_power_current = power_demand + EL_power_current - GT_power_current
FC_power_current = 0.00
# Case 2: Use GT_min and WT to satisfy the power demand (do not use EL to recharge H2)
else:
flag_current = 2
GT_power_current = GT_power_min
WT_power_current = power_demand - GT_power_current
EL_power_current = 0.00
FC_power_current = 0.00
elif GT_power_max + WT_power_available[t] >= power_demand:
# Case 3: Use GT and WT to satisfy the power demand (use GT+EL to recharge H2)
if H2_level[p, t] < self.H2_RECHARGE_THRESHOLD * self.H2_CAPACITY:
flag_current = 3
WT_power_current = WT_power_available[t]
EL_power_current = np.minimum(self.EL_RATED_POWER, GT_power_max + WT_power_current - power_demand)
GT_power_current = np.minimum(GT_power_max, power_demand + EL_power_current - WT_power_current)
FC_power_current = 0.00
# Case 4: Use GT and WT to satisfy the power demand (do not use GT+EL to recharge H2)
else:
flag_current = 4
WT_power_current = WT_power_available[t]
GT_power_current = power_demand - WT_power_current
EL_power_current = 0.00
FC_power_current = 0.00
else:
# Case 5: Use GT, WT and FC to satisfy the power demand (there is hydrogen available)
if H2_level[p, t] > 0.00:
flag_current = 5
WT_power_current = WT_power_available[t]
GT_power_current = GT_power_max
FC_power_current = power_demand - WT_power_current - GT_power_current
EL_power_current = 0.00
# Case 6: The GT and WT cannot satisfy the power demand (there is no hydrogen available)
else:
flag_current = 6
WT_power_current = WT_power_available[t]
GT_power_current = GT_power_max
FC_power_current = 0.00
EL_power_current = 0.00
# Determine the type of fuel used in the gas turbines
use_fuel_blend = H2_level[p,t] > self.H2_COFIRE_THRESHOLD * self.H2_CAPACITY
if use_fuel_blend:
GT_fuel = blend_NG_H2
else:
GT_fuel = combustion_models.natural_gas
# Compute the carbon dioxide emissions (kg/s)
CO2_produced_current = self.GT.compute_carbon_dioxide_emissions(GT_power_current, fuel=GT_fuel)*3600
# Compute the mass flow rate of hydrogen co-fired in the gas turbine (kg/s)
H2_cofired_current = self.GT.compute_hydrogen_mass_flow_rate(GT_power_current, fuel=GT_fuel)
# Compute mass flow rate of hydrogen fed to the fuel cell system (kg/s)
H2_utilized_current = self.FC.compute_hydrogen_consumption(FC_power_current)
# Compute the mass flow rate of hydrogen produced in the electrolyzer system (kg/s)
H2_produced_current = self.EL.compute_hydrogen_production(EL_power_current)
# Evaluate the power balance (W)
power_deficit_current = power_demand + EL_power_current - WT_power_current - GT_power_current - FC_power_current
# Compute the hydrogen level for the next time instance (skip last time step computation)
if t < times[p, -1]:
H2_level[p, t + 1] = H2_level[p, t] + (
H2_produced_current - H2_utilized_current - H2_cofired_current) * 3600
# Store the current solution in its corresponding array
flag[p, t] = flag_current
GT_power[p, t] = GT_power_current
WT_power[p, t] = WT_power_current
FC_power[p, t] = FC_power_current
EL_power[p, t] = EL_power_current
power_deficit[p, t] = power_deficit_current
CO2_produced[p, t] = CO2_produced_current
H2_produced[p, t] = H2_produced_current
H2_utilized[p, t] = H2_utilized_current + H2_cofired_current
# Store the results in a dictionary
result_dict = {"flag": flag * 1.00, # Conversion from integer to float for Numba
"times": times * 1.00, # Conversion from integer to float for Numba
"GT_power": GT_power,
"WT_power": WT_power,
"FC_power": FC_power,
"EL_power": EL_power,
"H2_produced": H2_produced,
"H2_utilized": H2_utilized,
"H2_level": H2_level,
"CO2_produced": CO2_produced,
"power_deficit": power_deficit}
self.process_output = result_dict
# Compute the accumulated CO2 emissions
self.CO2_emissions = np.sum(self.process_output["CO2_produced"], axis=1)
self.CO2_emissions_total = np.sum(self.CO2_emissions * self.STAGE_LENGTH)
# Compute the accumulated energy deficit
self.energy_deficit = np.sum(self.process_output["power_deficit"]*3600, axis=1)
self.energy_deficit_total = np.sum(self.energy_deficit*self.STAGE_LENGTH)
# ------------------------------------------------------------------------------------------------------------------ ##
# Results plotting functions
# ------------------------------------------------------------------------------------------------------------------ ##
def plot_hydrogen_level(self):
""" Plot hydrogen storage level over time """
n_axes = self.process_output["times"].shape[0]
fig = plt.figure(figsize=(6.0, 5.5))
fig.suptitle('Hydrogen storage level over the year (kg)', fontsize=fontsize + 1, fontweight='normal', color='k')
axes = fig.subplots(n_axes)
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["H2_level"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot([0.0], [0.0], linestyle="", marker="", label="Period " + str(index + 1))
ax.plot(x, y, linewidth=0.75, linestyle='-', color='k', label="", | |
startMatches = []
else:
hasStarted = False
if endMatches is None:
endMatches = []
for pair in self.getFramePairs(contentOnly):
token = pair[0]
if not hasStarted: # Before noise sequence
# Look for start of search space
if multiMatch(token, startMatches):
hasStarted = True
continue
else: # Part of noise sequence
# Check if search space has ended
if multiMatch(token, endMatches):
break
else:
# Append noise words to sequence
if token in NOISE_TOKENS:
noiseSequence.append(pair)
noiseSequence.append(('</noise>', 1.0))
return TagFrame(noiseSequence, self.isStrict)
def filterFrame(self):
filteredTokens = [self.getFramePair(0)]
activeTag = None
for tokenPair in self.getFramePairs(contentOnly=True):
token = tokenPair[0]
m_open = reOpenTag.match(token)
if m_open: # Opening a tag
activeTag = m_open.group(1)
elif activeTag is None: # Token is outside of tag and not a tag itself
filteredTokens.append(tokenPair)
elif token == '</{0}>'.format(activeTag): # Token is closing the open tag
activeTag = None
filteredTokens.append(self.getFramePair(-1))
return TagFrame(filteredTokens, self.isStrict)
def getConfidenceScore(self, mode, contentOnly=False):
return self.getJointConfidenceScore([self], mode, contentOnly)
@staticmethod
def getJointConfidenceScore(frames, mode, contentOnly=False):
confidences = list()
for frame in frames:
if frame is not None:
confidences.extend(frame.getConfidenceValues(contentOnly))
if mode is CONFMODE_OFF:
return None
else:
if len(confidences) == 0:
return -1.0
else:
if mode is CONFMODE_MIN:
return min(confidences)
elif mode is CONFMODE_PROD:
return prod(confidences)
elif mode is CONFMODE_ARITMEAN:
return (1.0 / len(confidences)) * sum(confidences)
elif mode is CONFMODE_GEOMEAN:
return pow(prod(confidences), 1.0 / len(confidences))
else:
e = "Unknown confidence calculation mode: {0}".format(mode)
raise ValueError(e)
class UtteranceUnit(object):
"""
Super class for utterance units.
Don't use directly, instead refer to e.g. Callsign or Command.
"""
LETTERS = dict(alpha='A', bravo='B', charly='C',
delta='D', echo='E', foxtrot='F', fox='F',
golf='G', hotel='H', india='I',
juliett='J', kilo='K', lima='L',
mike='M', november='N', oscar='O',
papa='P', quebec='Q', romeo='R',
sierra='S', tango='T', uniform='U',
victor='V', whisky='W', xray='X',
yankee='Y', zoulou='Z')
LETTERWORDS = dict()
for word, letter in LETTERS.iteritems():
LETTERWORDS.setdefault(letter, list()).append(word)
SINGLE_DIGITS = dict(one='1', two='2', three='3',
four='4', five='5', six='6',
seven='7', eight='8', nine='9',
zero='0')
TEEN_DIGITS = dict(eleven='one', twelve='two', thirteen='three',
fourteen='four', fifteen='five', sixteen='six',
seventeen='seven', eightteen='eight', nineteen='nine')
TENS_DIGITS = dict(ten='one', twenty='two', thirty='three',
forty='four', fifty='five', sixty='six',
seventy='seven', eighty='eight', ninety='nine')
SPECIAL_DIGITS = dict(hundred='00', thousand='000')
DOUBLE_DIGITS = dict(ten='10', twenty='20', thirty='30',
forty='40', fifty='50', sixty='60',
seventy='70', eighty='80', ninety='90')
MULTIPLIERS = dict(double=2, triple=3)
ALPHANUM = dict()
ALPHANUM.update(SINGLE_DIGITS)
ALPHANUM.update(LETTERS)
ALPHANUM['decimal'] = '.'
# Set of all words that can come up in a numbers context
# This includes numbers, letters and special words like "and", "decimal" or "hundred"
NUMBER_VOCABULARY = set(ALPHANUM)
NUMBER_VOCABULARY.update(TEEN_DIGITS)
NUMBER_VOCABULARY.update(TENS_DIGITS)
NUMBER_VOCABULARY.update(SPECIAL_DIGITS)
NUMBER_VOCABULARY.update(DOUBLE_DIGITS)
NUMBER_VOCABULARY.update(MULTIPLIERS)
NUMBER_VOCABULARY.add('and')
def __init__(self, utteranceFrame):
self.utteranceFrame = utteranceFrame
def __str__(self):
return 'UtteranceUnit({0})'.format(self.utteranceFrame)
def parseNumber(self, numberFrame, isFrame=True):
"""
Converts a series of words into the correct digit representation.
If isFrame is true, numberFrame must be a TagFrame object,
if false, numberFrame must be a list of word strings.
"""
if isFrame:
items = numberFrame.getTokens(contentOnly=True)
else:
items = numberFrame
# Filter out rogue words (e.g. silence markers and hesitation words)
items = [item for item in items if (item in self.NUMBER_VOCABULARY)]
# Prepare
while 'and' in items: # Remove "and"
items.remove('and')
# if speaker dropped the digit before thousand/hundred, it was a one
if len(items) > 0 and items[0] in ['thousand', 'hundred']:
items.insert(0, 'one')
# Thousands
while 'thousand' in items:
i = items.index('thousand')
nxt = i + 1
nxt2 = nxt + 1
# thousand was last item
if nxt >= len(items):
items.extend(['zero'] * 3)
# if "x thousand y hundred", move on to parse "x y hundred"
elif nxt2 < len(items) and items[nxt2] == 'hundred':
pass
else:
if items[nxt] not in self.TENS_DIGITS and items[nxt] not in self.TEEN_DIGITS:
if items[nxt] not in self.SINGLE_DIGITS:
items.insert(nxt, 'zero')
items.insert(nxt, 'zero')
items.insert(nxt, 'zero')
items.pop(i)
# Hundreds
while 'hundred' in items:
i = items.index('hundred')
nxt = i + 1
if nxt >= len(items): # hundred was last item
items.extend(['zero'] * 2)
elif items[nxt] not in self.TENS_DIGITS and items[nxt] not in self.TEEN_DIGITS:
if items[nxt] not in self.SINGLE_DIGITS:
items.insert(nxt, 'zero')
items.insert(nxt, 'zero')
items.pop(i)
for multiple, multiplier in self.MULTIPLIERS.iteritems():
while multiple in items:
i = items.index(multiple)
nxt = i + 1
if nxt < len(items): # multiple wasn't last word
next_item = items[nxt]
for _ in range(1, multiplier):
items.insert(nxt, next_item)
items.pop(i)
# Tens
for tenner, tenval in self.TENS_DIGITS.iteritems():
while tenner in items:
i = items.index(tenner)
nxt = i + 1
items[i] = tenval
if nxt >= len(items): # Tenner was last item
items.append('zero')
elif items[nxt] not in self.SINGLE_DIGITS or items[nxt] == 'zero':
items.insert(nxt, 'zero')
# Teen values (11-19)
for teen, teenval in self.TEEN_DIGITS.iteritems():
while teen in items:
i = items.index(teen)
items[i] = teenval
items.insert(i, 'one')
# Parse
num = []
for i, item in enumerate(items):
if item in self.ALPHANUM:
num += self.ALPHANUM[item]
return ''.join(num)
class Callsign(UtteranceUnit):
"""
Contains all information on the callsign part of an utterance.
"""
AIRLINE_ALT_SPELLING = {"air_frans": "air_france",
"hansa": "lufthansa"}
def __init__(self, callsignFrame, airlineShorts, contextFile=None, isStrict=False):
"""
:param callsignFrame: must be a TagFrame for a callsign tag
:param airlineShorts: is a dictionary of the official acronyms of airline names
:param contextFile: is the filename of the current_callsign file
:param isStrict:
"""
super(Callsign, self).__init__(callsignFrame)
self.airlineShorts = airlineShorts
self.contextFile = contextFile
self.callsignFrame = self.utteranceFrame
self.noiseFrame = None
if callsignFrame is None:
self.airlineFrame = None
self.flightnumberFrame = None
self.remainderFrame = None
self.callsign = NO_CALLSIGN
else:
if isStrict and not self.isCallsignFrame(callsignFrame):
raise ValueError("Invalid format for callsign frame: " + str(self.callsignFrame))
self.airlineFrame, self.flightnumberFrame, self.remainderFrame = self._extractSubframes_(self.callsignFrame)
self.callsign = self._computeCallsign_(self.airlineFrame, self.flightnumberFrame, self.contextFile)
def __str__(self):
return 'Callsign({0})'.format(self.utteranceFrame)
def isCallsign(self):
return self.callsign != NO_CALLSIGN
@staticmethod
def isCallsignFrame(callsignFrame):
return callsignFrame.getToken(0) == '<callsign>' and callsignFrame.getToken(-1) == '</callsign>'
@staticmethod
def _extractSubframes_(callsignFrame):
airlineFrame, remainderFrame = callsignFrame.extractTag("airline", is_command=False)
flightnumberFrame, _ = remainderFrame.extractTag("flightnumber", is_command=False)
return airlineFrame, flightnumberFrame, remainderFrame
def _computeCallsign_(self, airlineFrame, flightnumberFrame, contextFile=None):
# Check for empty tags
hasFlightnumber = False
if flightnumberFrame is not None:
hasFlightnumber = not flightnumberFrame.isEmptyTag()
hasAirline = False
if airlineFrame is not None:
hasAirline = not airlineFrame.isEmptyTag()
# If no callsign info exists, abort
if not hasAirline and not hasFlightnumber:
return NO_CALLSIGN
# ### Prepare output string ###
# Parse whichever part is not unknown
if hasAirline:
airlineWord = ' '.join(airlineFrame.getTokens(contentOnly=True))
if airlineWord in self.AIRLINE_ALT_SPELLING: # Hotfix for typical annotator misspelling
airlineWord = self.AIRLINE_ALT_SPELLING[airlineWord]
airsign = self.airlineShorts.get(airlineWord, UNKNOWN_AIRLINE)
else:
airlineWord = NO_AIRLINE
airsign = NO_AIRLINE
if hasFlightnumber:
flightsign = self.parseNumber(flightnumberFrame)
else:
flightsign = NO_FLIGHTNUMBER
# Try autocompleting callsign information
airsign, flightsign, _ = self.autocompleteCallsign(airsign, flightsign, airlineWord=airlineWord,
contextFile=contextFile)
callsign = airsign + flightsign
return callsign
@staticmethod
def loadContext(contextFile):
callsigns = []
if contextFile is not None:
with open(contextFile) as f:
for line in f:
line = line.strip()
if len(line) > 0:
elems = line.split()
if len(elems) >= 2:
airline, flightnumber = elems
else:
airline = ''
flightnumber = elems[0]
callsigns.append((airline, flightnumber))
return callsigns
def autocompleteCallsign(self, airsign, flightsign, airlineWord=None, contextFile=None):
"""
When airline or flightnumber are unknown, check the callsign context file
for possible partial matches.
Additional functions:
1. Checks whether ATC might have dropped a leading zero (e.g. AMB1 for AMB01)
and restores it, both for complete and incomplete callsigns.
2. Check whether airline could be leading flight number letter (and vice versa)
Returns a triple (airsign, flightnumber, is_changed)
"""
if self.contextFile is not None:
context = self.loadContext(contextFile)
ambiguous_airsigns, ambiguous_flightsigns = self.getAmbiguousCallsigns(context)
# ### Complete Callsign ###
# Only checks for missing leading zeroes
if airsign is not NO_AIRLINE and flightsign != NO_FLIGHTNUMBER:
# First check if the regular callsign is in the context,
# then check if a leading zero was dropped (e.g. AMB1 for AMB01).
for this_flightsign in [flightsign, '0' + flightsign]:
for ctx_airsign, ctx_flightsign in context:
if ctx_airsign == airsign and this_flightsign == ctx_flightsign:
return airsign, this_flightsign, True
# ### Missing Callsign ###
# Tries to autocomplete the callsign and checks for missing leading zeroes.
elif flightsign != NO_FLIGHTNUMBER:
# First check if the regular flightnumber is in the context,
# then check if a leading zero was dropped (e.g. AMB1 for AMB01).
for this_flightsign in [flightsign, '0' + flightsign]:
if this_flightsign not in ambiguous_flightsigns:
for ctx_airsign, ctx_flightsign in context:
if this_flightsign == ctx_flightsign:
return ctx_airsign, this_flightsign, True
# ### Missing Flightnumber ###
# Tries to autocomplete the flightnumber
elif airsign != NO_AIRLINE:
| |
import custom_paths
import utils
import numpy as np
from typing import *
from pathlib import Path
import os
class ExperimentResults:
def __init__(self, results_dict: dict, exp_name: str):
# usually, results_dict[alg_name][task_name][split_idx] = something
self.results_dict = results_dict
self.exp_name = exp_name
self.alg_names = list(self.results_dict.keys())
# self.alg_names.sort()
self.task_names = list(set.union(*[set(alg_results.keys()) for alg_results in self.results_dict.values()]))
self.task_names.sort()
def map_single_split(self, f: Callable):
# can be used e.g. to compute average log metric results
return ExperimentResults({alg_name: {task_name: [f(split) for split in split_results]
for task_name, split_results in task_results.items()}
for alg_name, task_results in self.results_dict.items()}, exp_name=self.exp_name)
def get_learning_curves(self, key: str) -> 'ExperimentResults':
return ExperimentResults({alg_name: {task_name: np.mean(np.log([split['errors'][key] for split in split_results]), axis=0)
for task_name, split_results in task_results.items() if int(task_name.split('_')[-1].split('x')[0]) == 256}
for alg_name, task_results in self.results_dict.items()}, exp_name=self.exp_name)
def get_avg_al_stats(self, key: str) -> 'ExperimentResults':
return self.map_single_split(
lambda split_dict: [stat[key] for stat in split_dict['al_stats'] if key in stat])
def get_avg_al_times(self, key: str) -> 'ExperimentResults':
return self.map_single_split(
lambda split_dict: [stat[key]['total'] for stat in split_dict['al_stats'] if key in stat])
def get_avg_errors(self, key: str, use_log: bool = True) -> 'ExperimentResults':
if use_log:
return self.map_single_split(
lambda split_dict: np.mean(np.log(split_dict['errors'][key][1:])))
else:
return self.map_single_split(
lambda split_dict: np.mean(split_dict['errors'][key][1:]))
def get_last_errors(self, key: str, use_log: bool = True) -> 'ExperimentResults':
if use_log:
return self.map_single_split(
lambda split_dict: np.mean(np.log(split_dict['errors'][key][-1])))
else:
return self.map_single_split(
lambda split_dict: np.mean(split_dict['errors'][key][-1]))
def get_average_al_times(self) -> 'ExperimentResults':
return self.map_single_split(
lambda split_dict: np.mean([sr['kernel_time']['total'] + sr['selection_time']['total']
for sr in split_dict['al_stats']]))
def select_split(self, i: int) -> 'ExperimentResults':
for alg_name, task_results in self.results_dict.items():
for task_name, split_results in task_results.items():
if len(split_results) <= i:
print(f'Invalid index for alg {alg_name} on task {task_name}')
return ExperimentResults({alg_name: {task_name: split_results[i]
for task_name, split_results in task_results.items()}
for alg_name, task_results in self.results_dict.items()}, exp_name=self.exp_name)
def filter_task_suffix(self, task_suffix: str) -> 'ExperimentResults':
return ExperimentResults(
{alg_name: {task_name: task_dict
for task_name, task_dict in alg_dict.items() if task_name.endswith(task_suffix)}
for alg_name, alg_dict in self.results_dict.items()}, exp_name=self.exp_name)
def filter_task_prefix(self, task_prefix: str) -> 'ExperimentResults':
return ExperimentResults(
{alg_name: {task_name: task_dict
for task_name, task_dict in alg_dict.items() if task_name.startswith(task_prefix)}
for alg_name, alg_dict in self.results_dict.items()}, exp_name=self.exp_name)
def filter_task_names(self, task_names: List[str]) -> 'ExperimentResults':
return ExperimentResults(
{alg_name: {task_name: task_dict
for task_name, task_dict in alg_dict.items() if task_name in task_names}
for alg_name, alg_dict in self.results_dict.items()}, exp_name=self.exp_name)
def filter_alg_names(self, alg_names: Iterable[str]) -> 'ExperimentResults':
return ExperimentResults({alg_name: self.results_dict[alg_name] for alg_name in alg_names
if alg_name in self.results_dict}, exp_name=self.exp_name)
def filter_common_algs(self) -> 'ExperimentResults':
common_alg_names = [alg_name for alg_name, alg_dict in self.results_dict.items()
if set(alg_dict.keys()) == set(self.task_names)]
return self.filter_alg_names(common_alg_names)
def analyze_errors(self):
n_steps = 0
for alg_name, task_results in self.results_dict.items():
for task_name, split_results in task_results.items():
for split_idx, split_result in enumerate(split_results):
for al_stat_idx, al_stat in enumerate(split_result['al_stats']):
n_steps += 1
if 'selection_status' in al_stat and al_stat['selection_status'] is not None:
print(f'Alg {alg_name} failed on step {al_stat_idx} of split {split_idx} of task {task_name}:',
al_stat['selection_status'])
print(f'Total number of DBAL steps across all experiments: {n_steps}')
def analyze_eff_dims(self):
n_larger = 0
n_total = 0
eff_dim_sum_grad = 0.0
eff_dim_sum_ll = 0.0
for alg_name in self.results_dict:
if not alg_name.endswith('_grad_rp-512'):
continue
alg_name_ll = alg_name.replace('_grad_rp-512', '_ll')
for task_name, split_results in self.results_dict[alg_name].items():
if not task_name.endswith('256x16'):
continue
for split_idx, split_result in enumerate(split_results):
for al_stat_idx, al_stat in enumerate(split_result['al_stats']):
eff_dim = al_stat['eff_dim']
al_stat_ll = self.results_dict[alg_name_ll][task_name][split_idx]['al_stats'][al_stat_idx]
eff_dim_ll = al_stat_ll['eff_dim']
n_total += 1
eff_dim_sum_grad += eff_dim
eff_dim_sum_ll += eff_dim_ll
if eff_dim > eff_dim_ll:
n_larger += 1
print(f'eff dim was larger for grad_rp-512 than for ll in {100*n_larger/n_total:g}% of cases')
print(f'avg eff dim for grad_rp-512: {eff_dim_sum_grad/n_total:g}')
print(f'avg eff dim for ll: {eff_dim_sum_ll/n_total:g}')
@staticmethod
def load(exp_name: str) -> 'ExperimentResults':
results_path = Path(custom_paths.get_results_path()) / exp_name
pkl_filename = Path(custom_paths.get_cache_path()) / exp_name / 'results.pkl'
results = None
# first try to load from cached pkl file
if utils.existsFile(pkl_filename) \
and os.path.getmtime(pkl_filename) >= utils.last_mod_time_recursive(str(results_path)):
try:
results = utils.deserialize(pkl_filename)
except Exception as e:
print(f'Received exception while trying to load cached results, '
f'reloading results without cache. Exception: {e}')
# if loading cached data did not work, load from scratch
if results is None:
results = {}
for task_path in results_path.iterdir():
task_name = task_path.name
for alg_path in task_path.iterdir():
alg_name = alg_path.name
alg_results = {}
split_exists = False
for split_path in sorted(alg_path.iterdir(), key=lambda path: int(path.name)):
results_file = split_path / 'results.json'
if utils.existsFile(results_file):
split_results = utils.deserialize(split_path / 'results.json', use_json=True)
if task_name in alg_results:
alg_results[task_name].append(split_results)
else:
alg_results[task_name] = [split_results]
split_exists = True
if split_exists:
if alg_name in results:
results[alg_name].update(alg_results)
else:
results[alg_name] = alg_results
utils.serialize(pkl_filename, results)
return ExperimentResults(results_dict=results, exp_name=exp_name)
def get_latex_metric_name(metric_name: str) -> str:
conversion_dict = {'mae': r'MAE',
'rmse': r'RMSE',
'maxe': r'MAXE',
'q95': r'95\% quantile',
'q99': r'99\% quantile'
}
return conversion_dict[metric_name]
def get_latex_ds_name(ds_name: str) -> str:
conversion_dict = {'ct': 'ct_slices',
'kegg_undir_uci': 'kegg_undir',
'query_agg_count': 'query',
'road_network': 'road',
'wecs': 'wec_sydney'}
if ds_name in conversion_dict:
ds_name = conversion_dict[ds_name]
return ds_name.replace('_', r'\_')
def get_latex_task(task: str) -> str:
conversion_dict = {'online_video': r'online\_video',
'sgemm': r'sgemm',
'kegg_undir_uci': r'kegg\_undir',
'stock': r'stock',
'wecs': r'wec\_sydney',
'sarcos': r'sarcos',
'diamonds': r'diamonds',
'fried': r'fried',
'road_network': r'road',
'poker': r'poker',
'mlr_knn_rng': r'mlr\_knn\_rng',
'methane': r'methane',
'protein': r'protein',
'ct': r'ct\_slices',
'query_agg_count': r'query'
}
return conversion_dict[task]
def get_latex_selection_method(selection_method: str) -> str:
conversion_dict = {'random': r'\textsc{Random}',
'fw': r'\textsc{FrankWolfe}',
'kmeanspp': r'\textsc{KMeansPP}',
'lcmd': r'\textsc{LCMD}',
'maxdet': r'\textsc{MaxDet}',
'maxdist': r'\textsc{MaxDist}',
'maxdiag': r'\textsc{MaxDiag}'
}
parts = selection_method.split('-')
method_name = parts[0]
result = conversion_dict[method_name]
if len(parts) > 1:
result += '-TP' if parts[1] == 'tp' else '-P'
if method_name == 'lcmd':
result += ' (ours)'
return result
def get_latex_kernel(base_kernel: str, kernel_transformations: List[Tuple[str, List]], n_models: int) -> str:
conversion_base_kernel_dict = {'grad': r'\mathrm{grad}',
'll': r'\mathrm{ll}',
'linear': r'\mathrm{linear}',
'nngp': r'\mathrm{nngp}'}
steps = [conversion_base_kernel_dict[base_kernel]]
for name, args in kernel_transformations:
if name == 'train':
steps.append(r'\mathcal{X}_{\operatorname{train}}')
elif name == 'scale':
steps.append(r'\operatorname{scale}(\mathcal{X}_{\operatorname{train}})')
elif name == 'rp':
steps.append(r'\operatorname{rp}(' + str(args[0]) + ')')
elif name == 'ens':
steps.append(r'\operatorname{ens}(' + str(n_models) + ')')
elif name == 'acs-rf':
steps.append(r'\operatorname{acs-rf}(' + str(args[0]) + ')')
elif name == 'acs-rf-hyper':
steps.append(r'\operatorname{acs-rf-hyper}(' + str(args[0]) + ')')
elif name == 'acs-grad':
steps.append(r'\operatorname{acs-grad}')
else:
raise ValueError(f'Unknown kernel transformation "{name}"')
return '$k_{' + r' \to '.join(steps) + '}$'
def save_latex_table_all_algs(results: ExperimentResults, filename: str):
# creates a table for all algorithms, all metric are averaged over all data sets
# Selection method | Kernel | MAE | RMSE | 95% | 99% | MAXE | avg. time
ds_names = list({'_'.join(task_name.split('_')[:-1]) for task_name in results.task_names})
all_alg_names = results.alg_names
mae = results.get_avg_errors('mae')
rmse = results.get_avg_errors('rmse')
q95 = results.get_avg_errors('q95')
q99 = results.get_avg_errors('q99')
maxe = results.get_avg_errors('maxe')
kernel_time = results.get_avg_al_times('kernel_time').select_split(9)
selection_time = results.get_avg_al_times('selection_time').select_split(9)
metrics_and_names = [('MAE', mae), ('RMSE', rmse), (r'95\%', q95), (r'99\%', q99), ('MAXE', maxe),
('kernel_time', kernel_time), ('selection_time', selection_time)]
alg_metrics = {alg_name: {name: np.mean([np.mean(metric.results_dict[alg_name][ds_name + '_256x16'])
for ds_name in ds_names])
for name, metric in metrics_and_names} for alg_name in all_alg_names}
n_digits = 3
best_alg_names_per_metric = {}
for metric_name, _ in metrics_and_names:
rounded_results = [round(alg_metrics[alg_name][metric_name], n_digits) for alg_name in all_alg_names]
min_result = np.min(rounded_results)
best_alg_names_per_metric[metric_name] = [all_alg_names[i] for i in range(len(all_alg_names))
if rounded_results[i] == min_result]
table_rows = {}
table_header = '\\begin{tabular}{cccccccc}\n' + \
' & '.join([r'Selection method', r'Kernel', r'MAE', r'RMSE', r'95\%', r'99\%',
r'MAXE', r'avg.\ time [$s$]']) + '\\\\\n\\hline\n'
table_footer = '\n\\end{tabular}'
# raw_sel_order = {'random': 0, 'maxdiag': 1, 'maxdet': 2, 'fw': 3, 'maxdist': 4, 'kmeanspp': 5, 'lcmd': 6}
sel_name_order = ['random', 'maxdiag', 'maxdet', 'fw', 'maxdist', 'kmeanspp', 'lcmd']
raw_sel_names = {}
for name_alg in all_alg_names:
config = next(iter(results.results_dict[name_alg].values()))[0]['config']
base_kernel = config['base_kernel']
kernel_transformations = config['kernel_transforms']
n_models = config.get('n_models', 1)
raw_sel_name = name_alg.split('_')[1].split('-')[0]
sel_name = get_latex_selection_method(name_alg.split('_')[1])
raw_sel_names[name_alg] = raw_sel_name
kernel_name = get_latex_kernel(base_kernel, kernel_transformations, n_models=n_models)
if raw_sel_name == 'random':
kernel_name = '---'
alg_results = []
for metric_name, _ in metrics_and_names[:5]:
value_str = f'{alg_metrics[name_alg][metric_name]:5.3f}'
if name_alg in best_alg_names_per_metric[metric_name]:
value_str = r'\textbf{' + value_str + r'}'
alg_results.append(value_str)
alg_time = alg_metrics[name_alg]['kernel_time'] + alg_metrics[name_alg]['selection_time']
row_strs = [sel_name, kernel_name] + alg_results + [f'{alg_time:5.3f}']
table_rows[name_alg] = ' & '.join(row_strs)
sub_groups = [[alg_name for alg_name in all_alg_names if raw_sel_names[alg_name] == sel_name] for sel_name in sel_name_order]
sub_groups = [sub_group for sub_group in sub_groups if len(sub_group) > 0]
for i in range(len(sub_groups)):
sub_groups[i].sort(key=lambda alg_name: alg_metrics[alg_name]['RMSE'])
sub_group_strs = ['\\\\\n'.join([table_rows[alg_name] for alg_name in sub_group]) for sub_group in sub_groups]
result_str = table_header + ' \\\\\n\\hline\n'.join(sub_group_strs) + table_footer
utils.writeToFile(Path(custom_paths.get_plots_path()) / results.exp_name / filename, result_str)
def save_latex_table_data_sets(results: ExperimentResults, filename: str, use_log: bool = True,
use_last_error: bool = False, metric_name: str = 'rmse'):
ds_names = list({'_'.join(task_name.split('_')[:-1]) for task_name in results.task_names})
ds_names.sort()
alg_names = results.alg_names
if use_last_error:
rmse = results.get_last_errors(metric_name, use_log=use_log)
else:
rmse = results.get_avg_errors(metric_name, use_log=use_log)
alg_metrics = {alg_name: {ds_name: np.mean(rmse.results_dict[alg_name][ds_name + '_256x16'])
for ds_name in ds_names} for alg_name in alg_names}
n_digits = 3
best_alg_names_per_data_set = {}
for | |
'0004' '0D' '0004' '10' '16',
new_code = ('..' '....' '..' '..' '........' '..' '........'
+ NOP * 10 ),
),
Obj_Patch(
#offsets = [0x0001BAF8],
# Slightly different version of above.
ref_code = '0D' '0011' '02' '82' '........' '34' '........'
# Replace this chunk.
'01' '0D' '0012' '85' '........' '24'
'06' '18A6' '16' '0001' '24' '02',
new_code = ('..' '....' '..' '..' '........' '..' '........'
+ NOP * 10 ),
),
]
# Apply the patches.
Apply_Obj_Patch_Group(patch_list)
return
@File_Manager.Transform_Wrapper('L/x3story.obj')
def Keep_TLs_Hired_When_Empty():
'''
When a hired TL places its last station, it will remain hired until
the player explicitly releases it instead of being automatically dehired.
'''
'''
Code to edit is in SHIP_TL.BuildStation, near the bottom:
pop
push 0
call88 SHIP_TL.GetNumWareStation
push 0
if SP[0]<>SP[1] then push 0 else push 1
if SP[0]=0 then jump L000DAA94
push 0
push 1
call88 SHIP.SetHired
pop
Can either replace the call with nops, or tweak the arg from 0 to 1.
Both should work okay. Nops might be safer in case there are
side effects to setting a hired ship to hired again.
'''
patch = Obj_Patch(
#offsets = [0x000DAA8C],
# Only this first bit is the SetHired call, with args and
# return pop.
ref_code = '01' '02' '88' '........' '24'
'24' '70' '02' '14' '0003' '23' '0002' '83',
# Swap to nops.
new_code = NOP * 8,
)
Apply_Obj_Patch(patch)
return
@File_Manager.Transform_Wrapper('L/x3story.obj')
def Preserve_Captured_Ship_Equipment():
'''
Preserves equipment of captured ships. This is expected to affect
bailed ships, marine captured ships, the "pilot eject from ship" script
command, and the "force pilot to leave ship" director command.
'''
'''
Code to edit is in SHIP.DowngradeTakeOver.
This is called from several places, so the edit will be in the function
to return early (as opposed to filling all call points with Nops).
The script command calls SHIP.LeaveShip, which calls this function.
Small ship bailing also calls SHIP.LeaveShip.
Marines boarding calls this function directly.
Unclear on where the mission director command is handled, though in
game test suggests it is affected as well.
'''
patch = Obj_Patch(
#offsets = [0x000C86B2],
# Code section following function entry.
# This starts with 0 items on the stack.
ref_code = '06' '03E8'
'02'
'82' '........'
'24'
'01'
'01'
'14' '0002'
'24'
'0D' '0001'
'05' '08'
'02'
'82' '........'
'5C'
'34' '........'
'32' '........',
# Replace with returning a 0.
# For good form, this will also replace the whole initial delay
# call (up through the '24' popping the return value) so that
# the stack looks okay when disassembled.
new_code = PUSH_0 + RETURN + NOP * 8,
)
Apply_Obj_Patch(patch)
return
@File_Manager.Transform_Wrapper('L/x3story.obj')
def Force_Infinite_Loop_Detection(
operation_limit = 1000000
):
'''
Use with caution.
Turns on infinite loop detection in the script engine for all scripts.
Once turned on, loop detection will stay on for running scripts
even if this transform is removed.
This is intended for limited debug usage, and should preferably
not be applied to a main save file to avoid bugs when scripts
are ended due to false positives.
An infinite loop is normally defined as at least 10k-20k operations
occurring on the same time step (exact amount depending on alignment).
False positives will occur when a script intentionally runs this
many operations at once. This limit will be raised to reduce
false positives while this transform is in effect.
* operation_limit
- Int, the number of operations between two infinite loop checks,
between 10000 (normal script engine value) and 2 billion.
- Default is 1 million. This corresponds to less than 1 second
in a test infinite looping script, and was sufficient to avoid
false positives in brief tests.
'''
'''
Added by request.
Code to edit is in SCRIPT.__runScript.
Stack location 13 holds an operation counter, that counts down.
If this variable is set to 0, loop detection is disabled. In normal
operation, the count starts from 10k and decrements down to 0,
at which point a time check is performed (seeing if the time is the
same as the last time 0 was reached).
The initial code of this section checks the counter for being 0, and
skips the section if so. To force detection on, this check can
be removed, such that the count will decrement to -1. This should be
okay, since the condition checks for the count being <1, and ints
in the KC are treated as signed.
Note: doing just this change (entering check code with a count of 0)
will cause an immediate false positive for new scripts.
When a script is initialized, the time of the last loop check is
initialized to the script start time. The first check of the counter
will see it is <1, which will pull the time again and compare to the
stored script start time, causing a false positive since they match.
To cover for this, the initialization code on new scripts should
also be modified, eg. putting in some safer value for the initial
time.
If initial time is set to 0, this will cause false positives
when a new game is created, but should be fine for debugging a
given save.
If initial time is set to 1, should be slightly safer against
false positives on a new game as well.
To further reduce false positives on mods, the 10k limit can be
changed.
When 10k is set, it is using pushw, a 16-bit value.
Since KC ints are signed, this count can only be increased to 32k.
The 10k setting in the 'infinite loop detection enabled' script
command can be left as-is; scripts that use that command were
presumably tested against the 10k limit and okay.
To go beyond 32k, code would need to be moved to make room for
an extra 2 bytes for a wider int.
Conveniently, some earlier bytes were freed up when bypassing
the 0 check, so the intervening code can be moved up to make
this room.
This adjustment allows up to 2 billion ops as the limit.
A good value to use would need some playing around, and the
value could be put into the transform args.
Note: support for byte deletions/insertions added to the
patcher, so that the code can be shifted up without requiring
copying the original code (and jump address) in a way that
ties it to vanilla AP 3.3.
Test 1:
Set to skip the 0 check, without changing initial time value.
Result: every script failed.
Test 2:
Skip 0 check, set initial time value to 1.
XRM with mods had 6 scripts fail on startup, mods at a glance.
Test 3:
Skip 0 check, initial time of 1, 10k raised to 32k.
Now only 4 scripts fail (xrm ship chooser and keymap, ok trader, SCS).
This will be considered satisfactory for initial release.
Test 4:
As above, but with a 100k op limit.
This has 2 xrm scripts fail.
Test 5:
Bumping to 1 million op limit.
No script failures in this case.
Note: a similar edit could be inserted to force disable infinite
loop detection, always skipping the section as if the count is 0,
eg. by changing the 'push SP[3]' to 'push 0'.
'''
# Convert the input arg into a useable hex string.
# This will cap at max unsigned int, and will floor at 10k.
# Just estimate max as 2 billion for now; should be good enough.
operation_limit = max(10000, min( 2000000000, int(operation_limit) ))
# Convert to hex string, 4 bytes.
operation_limit_hex = Int_To_Hex_String(operation_limit, 4)
# Force entry even when count is 0.
# Change the max count to a higher number.
entry_dynamic_patch = Obj_Patch(
#offsets = [0x00038429],
# Code starts off with pushing the count and comparing
# to 0, | |
0, 11)
else:
assert False
def test_analyse_sig_not_strict_with_on_error_handler_fail_slow(self):
def f_a(name, index, value):
self._f_a(name, index, value)
print('f_a: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('a')
def f_c(name, index, value):
self._f_c(name, index, value)
print('f_c: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('c')
def f_f(name, index, called_with_value, default_value):
self._f_f(name, index, called_with_value, default_value)
print(
'f_c: name: {0}, index:{1}, called_with_value: {2}, default_value: {3}'.format(
name, index, called_with_value, default_value))
raise _Error('f')
def f_on_error(exc, errors):
self._f_on_error(exc, errors)
print('f_on_error exc: {0}'.format(str(exc)))
print('f_on_error errors: {0}'.format(str(errors)))
raise _Error('on_error')
@analyse_sig(
f_a, IGNORE, f_c,
c=IGNORE, d=IGNORE, f=f_f,
_fail_fast_=False,
_on_error_=f_on_error,
_strict_=False,
)
def voo(a, b, c, d, e=1, f='2', g={3: 4}, **kwargs):
raise _Success()
try:
voo(11, 22, 33, 44, e=99)
except _Error as e:
self.assertEqual(e.message, 'on_error')
self.assertEqual(self._f_on_error.call_count, 1)
cargs = self._f_on_error.call_args_list[0][0]
self.assertIsInstance(cargs[0], _Error)
self.assertEqual(cargs[0].message, 'a')
self.assertEqual(len(cargs[1]), 3)
err = cargs[1][0]
self.assertIsInstance(err, PositionalError)
self.assertEqual(err.func_name, 'f_a')
self.assertEqual(err.index, 0)
self.assertEqual(err.name, 'a')
self.assertEqual(err.value, 11)
err = cargs[1][1]
self.assertIsInstance(err, PositionalError)
self.assertEqual(err.func_name, 'f_c')
self.assertEqual(err.index, 2)
self.assertEqual(err.name, 'c')
self.assertEqual(err.value, 33)
err = cargs[1][2]
self.assertIsInstance(err, KeywordError)
self.assertEqual(err.func_name, 'f_f')
self.assertEqual(err.func, f_f)
self.assertEqual(err.name, 'f')
self.assertEqual(err.value, NOVALUE)
self.assertEqual(err.default_value, '2')
self._f_f.assert_called_once_with('f', 5, NOVALUE, '2')
self._f_c.assert_called_once_with('c', 2, 33)
self._f_a.assert_called_once_with('a', 0, 11)
else:
assert False
def test_analyse_sig_not_strict_with_on_error_handler_fail_fast(self):
def f_a(name, index, value):
self._f_a(name, index, value)
print('f_a: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('a')
def f_c(name, index, value):
self._f_c(name, index, value)
print('f_c: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('c')
def f_f(name, index, called_with_value, default_value):
self._f_f(name, index, called_with_value, default_value)
print(
'f_c: name: {0}, index:{1}, called_with_value: {2}, default_value: {3}'.format(
name, index, called_with_value, default_value))
raise _Error('f')
def f_on_error(exc, errors):
self._f_on_error(exc, errors)
print('f_on_error exc: {0}'.format(str(exc)))
print('f_on_error errors: {0}'.format(str(errors)))
raise _Error('on_error')
@analyse_sig(
f_a, IGNORE, f_c,
c=IGNORE, d=IGNORE, f=f_f,
_fail_fast_=True,
_on_error_=f_on_error,
_strict_=False,
)
def voo(a, b, c, d, e=1, f='2', g={3: 4}, **kwargs):
raise _Success()
try:
voo(11, 22, 33, 44, e=99)
except _Error as e:
self.assertEqual(e.message, 'on_error')
self.assertEqual(self._f_on_error.call_count, 1)
cargs = self._f_on_error.call_args_list[0][0]
self.assertIsInstance(cargs[0], _Error)
self.assertEqual(cargs[0].message, 'a')
self.assertEqual(len(cargs[1]), 1)
err = cargs[1][0]
self.assertIsInstance(err, PositionalError)
self.assertEqual(err.func_name, 'f_a')
self.assertEqual(err.index, 0)
self.assertEqual(err.name, 'a')
self.assertEqual(err.value, 11)
self._f_f.assert_not_called()
self._f_c.assert_not_called()
self._f_a.assert_called_once_with('a', 0, 11)
else:
assert False
class AnalyseSigWithDefaultTestCase(unittest.TestCase):
def setUp(self):
self._f_all = Mock()
self._f_a = Mock()
self._f_c = Mock()
self._f_f = Mock()
self._f_on_error = Mock()
def test_analyse_sig_strict_with_on_error_handler_no_errors(self):
def f_all(name, value, arg_spec):
self._f_all(name, value, arg_spec)
print('f_all: name: {0}, index:{1}, value: {2}'.format(name, value, arg_spec))
def f_a(name, index, value):
self._f_a(name, index, value)
print('f_a: name: {0}, index:{1}, value: {2}'.format(name, index, value))
def f_c(name, index, value):
self._f_c(name, index, value)
print('f_c: name: {0}, index:{1}, value: {2}'.format(name, index, value))
def f_f(name, index, called_with_value, default_value):
self._f_f(name, index, called_with_value, default_value)
print('f_f: name: {0}, index:{1}, called_with_value: {2}, default_value: {3}'.format(
name, index, called_with_value, default_value))
def f_on_error(exc, errors):
self._f_on_error(exc, errors)
print('f_on_error exc: {0}'.format(str(exc)))
print('f_on_error errors: {0}'.format(str(errors)))
@analyse_sig(
f_a, IGNORE, f_c,
_default_=f_all,
c=IGNORE, d=IGNORE, f=f_f,
_fail_fast_=False,
_on_error_=f_on_error,
_strict_=True,
)
def voo(a, b, c, d, e=1, f='2', g={3: 4}, **kwargs):
raise _Success()
self.assertRaises(_Success, voo, 11, 22, 33, 44, e=99)
self._f_on_error.assert_not_called()
self._f_f.assert_called_once_with('f', 5, NOVALUE, '2')
self._f_c.assert_called_once_with('c', 2, 33)
self._f_a.assert_called_once_with('a', 0, 11)
check_calls(
self,
self._f_all.call_args_list,
[
('b', 22),
('e', 99),
('g', {3: 4}),
('kwargs', {}),
]
)
def test_analyse_sig_strict_without_on_error_handler_fail_slow_no_errors(self):
def f_all(name, value, arg_spec):
self._f_all(name, value, arg_spec)
print('f_all: name: {0}, index:{1}, value: {2}'.format(name, value, arg_spec))
def f_a(name, index, value):
self._f_a(name, index, value)
print('f_a: name: {0}, index:{1}, value: {2}'.format(name, index, value))
def f_c(name, index, value):
self._f_c(name, index, value)
print('f_c: name: {0}, index:{1}, value: {2}'.format(name, index, value))
def f_f(name, index, called_with_value, default_value):
self._f_f(name, index, called_with_value, default_value)
print(
'f_c: name: {0}, index:{1}, called_with_value: {2}, default_value: {3}'.format(
name, index, called_with_value, default_value))
@analyse_sig(
f_a, IGNORE, f_c,
_default_=f_all,
c=IGNORE, d=IGNORE, f=f_f,
_fail_fast_=False,
_strict_=True,
)
def voo(a, b, c, d, e=1, f='2', g={3: 4}, **kwargs):
raise _Success()
try:
voo(11, 22, 33, 44, e=99)
except _Success as e:
self._f_on_error.assert_not_called()
self._f_f.assert_called_once_with('f', 5, NOVALUE, '2')
self._f_c.assert_called_once_with('c', 2, 33)
self._f_a.assert_called_once_with('a', 0, 11)
check_calls(
self,
self._f_all.call_args_list,
[
('b', 22),
('e', 99),
('g', {3: 4}),
('kwargs', {}),
]
)
else:
assert False
def test_analyse_sig_strict_without_on_error_handler_fail_slow(self):
def f_all(name, value, arg_spec):
self._f_all(name, value, arg_spec)
print('f_all: name: {0}, index:{1}, value: {2}'.format(name, value, arg_spec))
raise _Error('all')
def f_a(name, index, value):
self._f_a(name, index, value)
print('f_a: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('a')
def f_c(name, index, value):
self._f_c(name, index, value)
print('f_c: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('c')
def f_f(name, index, called_with_value, default_value):
self._f_f(name, index, called_with_value, default_value)
print(
'f_f: name: {0}, index:{1}, called_with_value: {2}, default_value: {3}'.format(
name, index, called_with_value, default_value))
raise _Error('f')
@analyse_sig(
f_a, IGNORE, f_c,
_default_=f_all,
c=IGNORE, d=IGNORE, f=f_f,
_fail_fast_=False,
_strict_=True,
)
def voo(a, b, c, d, e=1, f='2', g={3: 4}, **kwargs):
raise _Success()
try:
voo(11, 22, 33, 44, e=99)
except _Error as e:
errors = e._errors_
self.assertIsInstance(errors, list)
self.assertEqual(len(errors), 7)
error = errors[0]
self.assertIsInstance(error, PositionalError)
self.assertIsInstance(error.error, _Error)
self.assertEqual(error.error.message, 'a')
self.assertEqual(error.value, 11)
self.assertEqual(error.func, f_a)
self.assertEqual(error.func_name, 'f_a')
self.assertEqual(error.name, 'a')
error = errors[1]
self.assertIsInstance(error, PositionalError)
self.assertIsInstance(error.error, _Error)
self.assertEqual(error.error.message, 'c')
self.assertEqual(error.value, 33)
self.assertEqual(error.func, f_c)
self.assertEqual(error.func_name, 'f_c')
self.assertEqual(error.name, 'c')
error = errors[2]
self.assertIsInstance(error, KeywordError)
self.assertIsInstance(error.error, _Error)
self.assertEqual(error.error.message, 'f')
self.assertEqual(error.value, NOVALUE)
self.assertEqual(error.func, f_f)
self.assertEqual(error.func_name, 'f_f')
self.assertEqual(error.name, 'f')
self.assertEqual(error.default_value, '2')
error = errors[3]
self.assertIsInstance(error, PositionalError)
self.assertIsInstance(error.error, _Error)
self.assertEqual(error.error.message, 'all')
self.assertEqual(error.func_name, 'f_all')
self.assertEqual(error.value, 22)
self.assertEqual(error.func, f_all)
self.assertEqual(error.name, 'b')
self.assertEqual(error.index, 1)
error = errors[4]
self.assertIsInstance(error, KeywordError)
self.assertIsInstance(error.error, _Error)
self.assertEqual(error.error.message, 'all')
self.assertEqual(error.func_name, 'f_all')
self.assertEqual(error.value, 99)
self.assertEqual(error.func, f_all)
self.assertEqual(error.name, 'e')
self.assertEqual(error.default_value, 99)
error = errors[5]
self.assertIsInstance(error, KeywordError)
self.assertIsInstance(error.error, _Error)
self.assertEqual(error.error.message, 'all')
self.assertEqual(error.func_name, 'f_all')
self.assertEqual(error.value, {3: 4})
self.assertEqual(error.func, f_all)
self.assertEqual(error.name, 'g')
self.assertEqual(error.default_value, {3: 4})
error = errors[6]
self.assertIsInstance(error, KeywordError)
self.assertIsInstance(error.error, _Error)
self.assertEqual(error.error.message, 'all')
self.assertEqual(error.func_name, 'f_all')
self.assertEqual(error.value, {})
self.assertEqual(error.func, f_all)
self.assertEqual(error.name, 'kwargs')
self.assertEqual(error.default_value, {})
self._f_on_error.assert_not_called()
self._f_f.assert_called_once_with('f', 5, NOVALUE, '2')
self._f_c.assert_called_once_with('c', 2, 33)
self._f_a.assert_called_once_with('a', 0, 11)
check_calls(
self,
self._f_all.call_args_list,
[
('b', 22),
('e', 99),
('g', {3: 4}),
('kwargs', {}),
]
)
else:
assert False
def test_analyse_sig_strict_without_on_error_handler_fail_fast(self):
def f_all(name, value, arg_spec):
self._f_all(name, value, arg_spec)
print('f_all: name: {0}, index:{1}, value: {2}'.format(
name, value, arg_spec))
raise _Error('all')
def f_a(name, index, value):
self._f_a(name, index, value)
print('f_a: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('a')
def f_c(name, index, value):
self._f_c(name, index, value)
print('f_c: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('c')
def f_f(name, index, called_with_value, default_value):
self._f_f(name, index, called_with_value, default_value)
print(
'f_f: name: {0}, index:{1}, called_with_value: {2}, default_value: {3}'.format(
name, index, called_with_value, default_value))
raise _Error('f')
@analyse_sig(
f_a, IGNORE, f_c,
_default_=f_all,
c=IGNORE, d=IGNORE, f=f_f,
_fail_fast_=True,
_strict_=True,
)
def voo(a, b, c, d, e=1, f='2', g={3: 4}, **kwargs):
raise _Success()
try:
voo(11, 22, 33, 44, e=99)
except _Error as e:
errors = e._errors_
self.assertIsInstance(errors, list)
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertIsInstance(error, PositionalError)
self.assertIsInstance(error.error, _Error)
self.assertEqual(error.error.message, 'a')
self.assertEqual(error.name, 'a')
self.assertEqual(error.value, 11)
self.assertEqual(error.index, 0)
self.assertEqual(error.func_name, 'f_a')
self.assertEqual(error.func, f_a)
self._f_on_error.assert_not_called()
self._f_f.assert_not_called()
self._f_c.assert_not_called()
self._f_a.assert_called_once_with('a', 0, 11)
self._f_all.assert_not_called()
else:
assert False
# FIXME: ENHANCE TESTS BELOW HERE.
def test_analyse_sig_strict_with_on_error_handler_fail_slow(self):
def f_all(name, value, arg_spec):
self._f_all(name, value, arg_spec)
# print('f_all: name: {0}, index:{1}, value: {2}'.format(name, value, arg_spec))
raise _Error('all')
def f_a(name, index, value):
self._f_a(name, index, value)
# print('f_a: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('a')
def f_c(name, index, value):
self._f_c(name, index, value)
# print('f_c: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('c')
def f_f(name, index, called_with_value, default_value):
self._f_f(name, index, called_with_value, default_value)
# print(
# 'f_c: name: {0}, index:{1}, called_with_value: {2}, default_value: {3}'.format(
# name, index, called_with_value, default_value))
raise _Error('f')
def f_on_error(exc, errors):
self._f_on_error(exc, errors)
# print('f_on_error exc: {0}'.format(str(exc)))
# print('f_on_error errors: {0}'.format(str(errors)))
raise _Error('on_error')
@analyse_sig(
f_a, IGNORE, f_c,
_default_=f_all,
c=IGNORE, d=IGNORE, f=f_f,
_fail_fast_=False,
_on_error_=f_on_error,
_strict_=True,
)
def voo(a, b, c, d, e=1, f='2', g={3: 4}, **kwargs):
raise _Success()
try:
voo(11, 22, 33, 44, e=99)
except _Error as e:
self.assertEqual(e.message, 'on_error')
self.assertEqual(self._f_on_error.call_count, 1)
self._f_f.assert_called_once_with('f', 5, NOVALUE, '2')
self._f_c.assert_called_once_with('c', 2, 33)
self._f_a.assert_called_once_with('a', 0, 11)
self.assertEqual(self._f_all.call_count, 4)
check_calls(
self,
self._f_all.call_args_list,
[
('b', 22),
('e', 99),
('g', {3: 4}),
('kwargs', {}),
]
)
else:
assert False
def test_analyse_sig_strict_with_on_error_handler_fail_fast(self):
def f_all(name, value, arg_spec):
self._f_all(name, value, arg_spec)
# print('f_all: name: {0}, index:{1}, value: {2}'.format(name, value, arg_spec))
raise _Error('all')
def f_a(name, index, value):
self._f_a(name, index, value)
# print('f_a: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('a')
def f_c(name, index, value):
self._f_c(name, index, value)
# print('f_c: name: {0}, index:{1}, value: {2}'.format(name, index, value))
raise _Error('c')
def f_f(name, index, called_with_value, default_value):
self._f_f(name, index, called_with_value, default_value)
# print(
# 'f_c: name: {0}, index:{1}, called_with_value: {2}, default_value: {3}'.format(
# name, index, called_with_value, default_value))
| |
'''
GUI utilities.
'''
from __future__ import with_statement
from __future__ import division
import config
import functools
import wx, struct
from wx import GetTopLevelWindows, Point, Size
from logging import getLogger; log = getLogger('gui.util')
from traceback import print_exc
from PIL import Image, ImageDraw, ImageFont
from ConfigParser import ConfigParser
import sys
from collections import defaultdict
from time import clock as time_clock
from gui.toolbox.monitor import Monitor
import cgui, new
import simplejson as json
import os.path
# adds methods to Bitmap, Image, etc...
import imagefx #@UnusedImport
wxMSW = 'wxMSW' in wx.PlatformInfo
# colors in skin YAML can start with this string
color_prefix = '0x'
def __repr__(self):
'''wxBitmap repr showing its .path, if it has one.'''
try:
path = getattr(self, 'path', '')
return '<wxBitmap %d%s>' % (id(self), (' '+os.path.normpath(path)) if path else '')
except Exception:
return '<wxBitmap %d>' % id(self)
wx.Bitmap.__repr__ = __repr__
del __repr__
# convenience method for removing all of a wxMenu's items
wx.Menu.RemoveAllItems = lambda self: [self.RemoveItem(item) for item in self.GetMenuItems()]
wx.Window.Tip = property(lambda self: self.GetToolTip().GetTip(),
lambda self, tip: self.SetToolTip(wx.ToolTip(tip)))
def check_destroyed(ctrl):
if wx.IsDestroyed(ctrl):
code = sys._getframe(1).f_code
print >> sys.stderr, 'WARNING: destroyed object being used (%s in %s:%d)' % \
(code.co_name, code.co_filename, code.co_firstlineno)
return True
return False
################################################################################
if wxMSW:
import ctypes
from ctypes import byref, WinError
from ctypes.wintypes import UINT, POINT, RECT
user32 = ctypes.windll.user32
class WINDOWPLACEMENT(ctypes.Structure):
_fields_ = [('length', UINT),
('flags', UINT),
('showCmd', UINT),
('ptMinPosition', POINT),
('ptMaxPosition', POINT),
('rcNormalPosition', RECT)]
def GetWindowPlacement(win):
hwnd = win.GetHandle()
p = WINDOWPLACEMENT()
p.length = ctypes.sizeof(WINDOWPLACEMENT)
if not user32.GetWindowPlacement(hwnd, byref(p)):
raise WinError()
return windowplacement_todict(p)
def SetWindowPlacement(win, d):
d2 = GetWindowPlacement(win)
d2['rcNormalPosition'] = d['rcNormalPosition']
d2['showCmd'] |= 0x0020 # SWP_FRAMECHANGED
d = d2
p = windowplacement_fromdict(d)
if not user32.SetWindowPlacement(win.Handle, byref(p)):
raise WinError()
def windowplacement_todict(p):
return dict(flags = p.flags,
showCmd = p.showCmd,
ptMinPosition = (p.ptMinPosition.x, p.ptMinPosition.y),
ptMaxPosition = (p.ptMaxPosition.y, p.ptMaxPosition.y),
rcNormalPosition = (p.rcNormalPosition.left, p.rcNormalPosition.top, p.rcNormalPosition.right, p.rcNormalPosition.bottom))
def windowplacement_fromdict(d):
p = WINDOWPLACEMENT()
p.length = ctypes.sizeof(WINDOWPLACEMENT)
p.showCmd = int(d['showCmd'])
p.ptMinPosition = POINT()
p.ptMinPosition.x = int(d['ptMinPosition'][0])
p.ptMinPosition.y = int(d['ptMinPosition'][1])
p.ptMaxPosition = POINT()
p.ptMaxPosition.x = int(d['ptMaxPosition'][0])
p.ptMaxPosition.y = int(d['ptMaxPosition'][1])
p.rcNormalPosition = RECT()
p.rcNormalPosition.left, p.rcNormalPosition.top, p.rcNormalPosition.right, p.rcNormalPosition.bottom = d['rcNormalPosition']
return p
################################################################################
def DeleteRange(textctrl, b, e):
if b != e:
with textctrl.Frozen():
textctrl.Remove(b, e)
textctrl.InsertionPoint = b
def DeleteWord(textctrl):
'''
Deletes the last word (or part of word) at the cursor.
Commonly bound to Ctrl+Backspace.
TODO: ignores punctuation--like
this.is.a.dotted.word[CTRL+BACKSPACE]
will delete the whole line. is that what we want?
'''
i = textctrl.InsertionPoint
s = textctrl.Value
if not s or i < 1: return
e = i
while s[i-1] == ' ' and i != 0:
i -= 1
b = s.rfind(' ', 0, i) + 1 if i != 0 else 0
if b == -1:
b = 0
DeleteRange(textctrl, b, e)
def DeleteRestOfLine(textctrl):
'''
Deletes from the cursor until the end of the line.
Emulates Control+K from many Linux and Mac editors.
'''
i = textctrl.InsertionPoint
# find the next newline
j = textctrl.Value[i:].find('\n')
j = textctrl.LastPosition if j == -1 else j + i
# if the cursor is on the last character of the line before a newline,
# just delete the newline
if i == j and j + 1 <= textctrl.LastPosition:
j += 1
DeleteRange(textctrl, i, j)
if 'wxMac' in wx.PlatformInfo:
AutoDC = wx.PaintDC
else:
AutoDC = wx.AutoBufferedPaintDC
# def to_icon(bitmap):
# return wx.IconFromBitmap(bitmap.WXB)
def to_icon(bitmap, size = None):
if isinstance(bitmap, wx.Image):
bitmap = wx.BitmapFromImage(bitmap)
bitmap = bitmap.WXB
return wx.IconFromBitmap(bitmap.Resized(size) if size is not None else bitmap)
wx.Rect.Pos = new.instancemethod(cgui.RectPosPoint, None, wx.Rect)
from cgui import Subtract as cgui_Subtract
def Rect_Subtract(r, left = 0, right = 0, up = 0, down = 0):
r.x, r.y, r.width, r.height = cgui_Subtract(r, left, right, up, down)
return r
def Rect_SubtractCopy(r, left = 0, right = 0, up = 0, down = 0):
return cgui_Subtract(r, left, right, up, down)
wx.Rect.Subtract = new.instancemethod(Rect_Subtract, None, wx.Rect)
wx.Rect.SubtractCopy = new.instancemethod(Rect_SubtractCopy, None, wx.Rect)
del Rect_Subtract
wx.Rect.AddMargins = new.instancemethod(cgui.RectAddMargins, None, wx.Rect)
# methods for getting/setting "always on top" state for top level windows
#
def GetOnTop(toplevelwin):
'Returns True if this window is always on top, False otherwise.'
s = toplevelwin.WindowStyleFlag
return bool(s & wx.STAY_ON_TOP)
def SetOnTop(toplevelwin, ontop):
'''Sets this window's "always on top" state.'''
if ontop:
flag = toplevelwin.WindowStyleFlag | wx.STAY_ON_TOP
else:
flag = toplevelwin.WindowStyleFlag & ~wx.STAY_ON_TOP
toplevelwin.WindowStyleFlag = flag
def ToggleOnTop(toplevelwin):
toplevelwin.OnTop = not toplevelwin.OnTop
wx.TopLevelWindow.OnTop = property(GetOnTop, SetOnTop)
wx.TopLevelWindow.ToggleOnTop = ToggleOnTop
class FocusTimer(wx.Timer):
def __init__(self, draw = False):
wx.Timer.__init__(self)
self.draw = draw
def Notify(self):
c = self.last_focused = wx.Window.FindFocus()
r = c.ScreenRect if c is not None else wx.Rect()
print 'wx.Window.FindFocus() ->', c, 'at', r
if self.draw:
dc = wx.ScreenDC()
p = wx.RED_PEN
p.SetWidth(4)
dc.SetPen(p)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangleRect(r)
def trackfocus(update_ms = 2000, draw = False):
t = FocusTimer(draw = draw)
t.Start(update_ms)
return t
#
# patch hcenter and vcenter methods into wxRect for centering images/rectangles
#
def VCenter(rect, img):
return rect.y + rect.height / 2 - img.Height / 2
def HCenter(rect, img):
return rect.x + rect.width / 2 - img.Width / 2
def VCenterH(rect, h):
return rect.y + rect.height / 2 - h / 2
def HCenterW(rect, w):
return rect.x + rect.width / 2 - w / 2
def CenterPoint(rect, pt):
w, h = pt
return rect.x + rect.HCenterW(w), rect.y + rect.VCenterH(h)
wx.Rect.VCenter = VCenter
wx.Rect.HCenter = HCenter
wx.Rect.VCenterH = VCenterH
wx.Rect.HCenterW = HCenterW
wx.Rect.CenterPoint = CenterPoint
Image.Image.Width = property(lambda image: image.size[0])
Image.Image.Height = property(lambda image: image.size[1])
class progress_dialog(object):
'Threadsafe progress dialog.'
def __init__(self, message, title):
self.stopped = False
# callback to the GUI thread for dialog creation
wx.CallAfter(self.create_dialog, message, title)
def create_dialog(self, message, title):
# dialog will not have a close button or system menu
self.dialog = d = wx.Dialog(None, -1, title, style = wx.CAPTION)
d.Sizer = s = wx.BoxSizer(wx.VERTICAL)
self.gauge = wx.Gauge(d, -1, style = wx.GA_HORIZONTAL)
s.Add(wx.StaticText(d, -1, message), 0, wx.EXPAND | wx.ALL, 10)
s.Add(self.gauge, 0, wx.EXPAND | wx.ALL, 10)
self.timer = wx.PyTimer(self.on_timer)
self.timer.StartRepeating(300)
self.gauge.Pulse()
s.Layout()
d.Fit()
d.CenterOnScreen()
d.Show()
def on_timer(self):
if self.stopped:
self.dialog.Destroy()
self.timer.Stop()
del self.dialog
del self.timer
else:
self.gauge.Pulse()
def stop(self):
self.stopped = True
def yes_no_prompt(title, text, default = True):
flags = wx.NO_DEFAULT * (not default)
result = wx.MessageBox(text, title, style = wx.YES_NO | flags)
if result == wx.YES:
return True
elif result == wx.NO:
return False
else:
return None
def ShowImage(b, title = ''):
'''
Displays the given wxImage, wxBitmap, PIL image, or skin region on screen
in a frame.
'''
title = title + ' ' + repr(b)
b = getattr(b, 'WXB', b)
f = wx.Frame(None, title = title, style = wx.DEFAULT_FRAME_STYLE | wx.FULL_REPAINT_ON_RESIZE)
if isinstance(b, wx.Bitmap):
f.SetClientRect((0, 0, b.Width, b.Height))
def paint_bitmap(e):
dc = wx.AutoBufferedPaintDC(f)
dc.Brush, dc.Pen = wx.CYAN_BRUSH, wx.TRANSPARENT_PEN
dc.DrawRectangleRect(f.ClientRect)
dc.DrawBitmap(b, 0, 0, True)
f.Bind(wx.EVT_PAINT, paint_bitmap)
f.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
elif isinstance(b, wx.Colour):
f.SetBackgroundColour(b)
f.SetClientRect((0, 0, 200, 200))
else:
f.SetClientRect((0, 0, 200, 200))
def paint_skinregion(e):
dc = wx.AutoBufferedPaintDC(f)
dc.Brush, dc.Pen = wx.WHITE_BRUSH, wx.TRANSPARENT_PEN
dc.DrawRectangleRect(f.ClientRect)
b.Draw(dc, f.ClientRect)
f.Bind(wx.EVT_PAINT, paint_skinregion)
f.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
f.CenterOnScreen()
f.Show()
# allow ".Show()" on any image or color object to display it on screen
Image.Image.Show = wx.Image.Show = wx.Bitmap.Show = wx.Icon.Show = wx.Colour.Show = ShowImage
def wx_prop(attrname, field='Value', set_cleanup=lambda x:x, get_cleanup=lambda x:x):
def set(self, val):
setattr(getattr(self, attrname), field, set_cleanup(val))
def get(self):
return get_cleanup(getattr(getattr(self, attrname), field))
return property(get, set)
def TextEntryDialog(message, caption = '', default_value = '', password = False, limit=1024):
style = wx.OK | wx.CANCEL | wx.CENTRE
if password:
style |= wx.TE_PASSWORD
TED = wx.TextEntryDialog(None, message, caption, default_value, style = style)
return TED
def GetTextFromUser_FixInput(val, limit):
if limit is not None:
if len(val) > limit:
print >>sys.stderr, "Data is %d bytes long, cutting to %d bytes" % (len(val), limit)
val = val[:limit]
return val
def GetTextFromUser(message, caption = '', default_value = '', password = False, limit=1024):
try:
TED = TextEntryDialog(message, caption, default_value, password, limit)
id = TED.ShowModal()
val = TED.Value
finally:
TED.Destroy()
val = GetTextFromUser_FixInput(val, limit)
return val if id == wx.ID_OK else None
def make_okcancel(name, cls):
class okcancel_class(cls):
'Wraps any component in a OK/Cancel dialog.'
dialog_style = wx.CAPTION | wx.SYSTEM_MENU | wx.CLOSE_BOX
def __init__(self, parent, id=-1,
title=None,
ok_caption = '',
cancel_caption = '',
style = dialog_style):
if title is None:
title = _("Confirm")
cls.__init__(self, parent, id, title=title, style=style)
self.OKButton = ok = wx.Button(self, wx.ID_OK, ok_caption)
cancel = wx.Button(self, wx.ID_CANCEL, cancel_caption)
if config.platform == 'win':
button_order = [ok, cancel]
else:
button_order = [cancel, ok]
self._button_sizer = hbox = wx.BoxSizer(wx.HORIZONTAL)
if hasattr(self, 'ExtraButtons'):
ctrl = self.ExtraButtons()
if ctrl is not None:
hbox.Add(ctrl, 0, wx.EXPAND | wx.ALL, 5)
hbox.AddStretchSpacer(1)
for button in button_order:
hbox.Add(button, 0, wx.ALL, 5)
| |
<filename>sfeprapy/mcs1/mcs1_func_gen.py
# -*- coding: utf-8 -*-
import numpy as np
from pandas import DataFrame as df
from scipy import stats
from scipy.interpolate import interp1d
def lognorm_parameters_true_to_inv(miu, sigma):
"""
NAME: lognorm_parameters_true_to_inv
VERSION: 0.0.1
AUTHOR: <NAME>, <NAME>
DESCRIPTION:
Converts the mean and standard deviation of distribution from x to ln(x).
PARAMETERS:
:param miu: True mean of the x
:param sigma: True standard deviation of x
:return: (miu, sigma) where miu and sigma are based on ln(x)
USAGE:
>>> print(lognorm_parameters_true_to_inv(0.2, 0.2))
>>> (-1.9560115027140728, 0.8325546111576977)
"""
cov = sigma / miu
sigma_ln = np.sqrt(np.log(1 + cov ** 2))
miu_ln = np.log(miu) - 1 / 2 * sigma_ln ** 2
return miu_ln, sigma_ln
def lognorm_trunc_ppf(a, b, n_rv, sigma, loc, scale, cdf_y=None):
"""
NAME: lognorm_trunc_ppf
VERSION: 0.0.1
AUTHOR: <NAME>
DATE: 3 Aug 2018
DESCRIPTION:
Truncated log normal distribution cumulative function density. Truncate and normalise a log normal distribution
function for a given boundary (a, b).
PARAMETERS:
:param a: float, Lower boundary
:param b: float, Upper boundary
:param n_rv: integer, total number of
:param sigma: float, standard deviation of log normal distribution (for ln(x))
:param loc: float, location of log normal distribution
:param scale: float, scale of the log normal distribution
:param cdf_y: array (1 dimension) or None. A set of numbers represent cumulative probability. If None the function
will return the sampled values
:return: array (1 dimension), set of numbers represent sampled values of truncated log normal distribution inline
with 'cfd_y'
USAGE:
>>> import numpy as np
>>> a = 0 # lower boundary
>>> b = 1 # upper boundary
>>> n_rv = 5 # number of random variables
>>> sigma = 0.2 # standard deviation
>>> miu = 0.2 # mean
# Convert true mean and sigma to ln(x) based mean and sigma
>>> miu, sigma = lognorm_parameters_true_to_inv(miu, sigma)
>>> loc = np.exp(miu) # distribution mean is 0.2 (i.e. loc = np.exp(miu))
>>> scale = 1 # default
>>> result = lognorm_trunc_ppf(a, b, n_rv, sigma, loc, scale)
>>> print(result)
[0.14142136 0.49653783 0.65783987 0.81969479 1. ]
"""
# Generate a linear spaced array inline with lower and upper boundary of log normal cumulative probability density.
sampled_cfd = np.linspace(
stats.lognorm.cdf(x=a, s=sigma, loc=loc, scale=scale),
stats.lognorm.cdf(x=b, s=sigma, loc=loc, scale=scale),
n_rv,
)
# Sample log normal distribution
sampled = stats.lognorm.ppf(q=sampled_cfd, s=sigma, loc=loc, scale=scale)
return sampled
def gumbel_parameter_converter(miu, sigma):
"""
NAME: gumbel_parameter_converter
VERSION: 0.0.1
AUTHOR: <NAME>, <NAME>
DATE: 27 Sept 2018
DESCRIPTION:
This function is used in conjunction of the scipy.stats.gumbel distribution, converts mean and standard deviation
(sigma) of samples x to location and scale which is used in scipy.stats.gumbel_r function.
:param miu: mean value of samples x
:param sigma: standard deviation of samples x
:return: location and scale
EXAMPLE:
"""
# parameters Gumbel W&S
alpha = 1.282 / sigma
u = miu - 0.5772 / alpha
# parameters Gumbel scipy
scale = 1 / alpha
loc = u
return loc, scale
def gumbel_parameter_converter2(loc, scale):
# parameters Gumbel W&S
# alpha = 1.282 / sigma
# u = miu - 0.5772 / alpha
sigma = 1.282 / (1 / scale)
miu = loc + 0.5772 / (1 / scale)
# parameters Gumbel scipy
# scale = 1 / alpha
# loc = u
return miu, sigma
def gumbel_r_trunc_ppf(a, b, n_rv, loc, scale, cdf_y=None):
"""
NAME: gumbel_r_trunc_ppf
VERSION: 0.0.1
AUTHOR: <NAME>
DATE: 27 Sept 2018
DESCRIPTION: Produces evenly sampled random variables based on gumbel distribution (tail to the x+ direction, i.e.
median greater than mean). Truncation is possible via variables 'a' and 'b'. i.e. inversed cumulative density
function f(x), x will be sampled in linear space ranging from 'a' to 'b'. Then f(x) is returned. Additionally, if x
is defined 'cdf_y' then f(cdf_y) is returned.
PARAMETERS:
:param a: lower bound of truncation
:param b: upper bound of truncation
:param n_rv: number of random variables to be sampled, equal to the length of the returned array
:param loc: location of the distribution
:param scale: scale of the distribution
:param cdf_y: array ranging with range (0, 1)
:return sampled: sampled random variables
USAGE:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> loc, scale = gumbel_parameter_converter(600, 180)
>>> rv = gumbel_r_trunc_ppf(0, 1800, 10, loc, scale)
>>> print(np.round(rv, 0))
[ 0. 408. 462. 506. 548. 594. 646. 713. 819. 1800.]
"""
# Generate a linear spaced array inline with lower and upper boundary of log normal cumulative probability density.
sampled_cfd = np.linspace(
stats.gumbel_r.cdf(x=a, loc=loc, scale=scale),
stats.gumbel_r.cdf(x=b, loc=loc, scale=scale),
n_rv,
)
# Following three lines are used to check the validity of the distribution
# print("511 (0.80): {:.4f}".format(stats.gumbel_r._y_(x=510, loc=loc, scale=scale)))
# print("584 (0.90): {:.4f}".format(stats.gumbel_r._y_(x=584, loc=loc, scale=scale)))
# print("655 (0.95): {:.4f}".format(stats.gumbel_r._y_(x=655, loc=loc, scale=scale)))
# Sample log normal distribution
sampled = stats.gumbel_r.ppf(q=sampled_cfd, loc=loc, scale=scale)
# Work out cumulative probability function from 'sampled', output in forms of x y.
# Interpolate x and y are processed to be capable to cope with two extreme values. y[0] (cumulative probability,
# initial boundary) is manually set to 0.
x = np.linspace(a, b, int(n_rv) + 1, endpoint=False)
x += (x[1] - x[0]) / 2
x = x[0:-2]
y = np.array([np.sum(sampled <= i) for i in x]) / len(sampled)
# Interpolate
f = interp1d(y, x, bounds_error=False, fill_value=(np.min(y), np.max(y)))
if cdf_y is None:
return sampled
else:
return f(cdf_y)
def latin_hypercube_sampling(
num_samples, num_arguments=1, sample_lbound=0, sample_ubound=1
):
"""
NAME: latin_hypercube_sampling
AUTHOR: <NAME>
VERSION: 0.1
DATE: 3 Aug 2018
DESCRIPTION:
Latin Hypercube Sampling, generates an nxm array where m equal to 'num_arguments' and n equal to 'num_samples'.
Current version only adopts 'centered' sampling mode (each sampled cell value is centered).
PARAMETERS:
:param num_samples: Number of samples (i.e. rows)
:param num_arguments: Number of arguments (i.e. columns)
:param sample_lbound: Lower sampling boundary
:param sample_ubound: Upper sampling boundary
:return: An array with shape (num_samples, num_arguments)
EXAMPLE:
>>> result = latin_hypercube_sampling(num_samples=10, num_arguments=3, sample_lbound=0, sample_ubound=0.001)
This example yields an array with shape of (100, 3), with each column filled 100 linear spaced numbers (shuffled)
from 1 to 0.001 (i.e. shuffled [0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95]).
An example output:
result = [
[0.85 0.95 0.55]
[0.55 0.45 0.05]
[0.25 0.25 0.15]
[0.35 0.85 0.75]
[0.45 0.65 0.85]
[0.75 0.05 0.65]
[0.05 0.35 0.45]
[0.15 0.15 0.35]
[0.65 0.55 0.25]
[0.95 0.75 0.95]
]
"""
if sample_lbound > sample_ubound:
sample_ubound += sample_lbound
sample_lbound = sample_ubound - sample_lbound
sample_ubound = sample_ubound - sample_lbound
# Generate sorted integers with correct shape
mat_random_num = np.linspace(
sample_lbound, sample_ubound, num_samples + 1, dtype=float
)
mat_random_num += (mat_random_num[1] - mat_random_num[0]) * 0.5
mat_random_num = mat_random_num[0:-1]
mat_random_num = np.reshape(mat_random_num, (len(mat_random_num), 1))
mat_random_nums = mat_random_num * np.ones((1, num_arguments))
# np.random.shuffle(mat_random_nums)
for i in range(np.shape(mat_random_nums)[1]):
np.random.shuffle(mat_random_nums[:, i])
if num_arguments == 1:
mat_random_nums = mat_random_nums.flatten()
return mat_random_nums
def mc_inputs_generator_worker(arg):
kwargs, q = arg
result = mc_inputs_generator(**kwargs)
q.put("index")
return result
def mc_inputs_generator(
n_simulations,
room_depth,
room_opening_fraction_lbound,
room_opening_fraction_ubound,
room_opening_fraction_std,
room_opening_fraction_mean,
room_opening_permanent_fraction,
fire_qfd_std,
fire_qfd_mean,
fire_qfd_ubound,
fire_qfd_lbound,
fire_hrr_density_std,
fire_hrr_density_mean,
fire_hrr_density_ubound,
fire_hrr_density_lbound,
fire_com_eff_lbound,
fire_com_eff_ubound,
fire_spread_lbound,
fire_spread_ubound,
fire_nft_mean,
beam_loc_ratio_lbound,
beam_loc_ratio_ubound,
**_
):
# ==================================================================================================================
# CHECKS
# ==================================================================================================================
# Fire duration has to be as long as travelling fire to travel through the entire floor
# time_end = np.max([time_end, room_depth / fire_spread_lbound])
# ==================================================================================================================
# Distribution variables
# ==================================================================================================================
df_input_samples = None
if n_simulations == 1:
df_input_samples = df(
dict(
window_open_fraction=room_opening_fraction_mean,
fire_load_density=fire_qfd_mean,
fire_spread_speed=(fire_spread_ubound + fire_spread_lbound) / 2,
beam_position=room_depth * 0.75,
fire_nft_ubound=fire_nft_mean,
fire_hrr_density=fire_hrr_density_mean,
index=0,
)
)
df_input_samples.set_index("index", inplace=True)
elif n_simulations == 2:
raise ValueError("Number of simulations need to be greater than 2.")
elif n_simulations > 2:
def generate_samples(
dist_type: str,
dist_scale: float,
dist_loc: float,
lbound: float,
ubound: float,
n: int,
):
cfd_q = np.linspace(
getattr(stats, dist_type).cdf(x=lbound, loc=dist_loc, scale=dist_scale),
getattr(stats, dist_type).cdf(x=ubound, loc=dist_loc, scale=dist_scale),
n,
)
samples = getattr(stats, dist_type).ppf(
q=cfd_q, loc=dist_loc, scale=dist_scale
)
samples[samples == np.inf] = ubound
samples[samples == -np.inf] = lbound
np.random.shuffle(samples)
return samples
# Fuel load density
# -----------------
fire_combustion_efficiency = np.linspace(
fire_com_eff_lbound, fire_com_eff_ubound, n_simulations
)
qfd_loc, qfd_scale = gumbel_parameter_converter(fire_qfd_mean, fire_qfd_std)
fire_load_density_samples = (
generate_samples(
"gumbel_r",
qfd_scale,
qfd_loc,
fire_qfd_ubound,
fire_qfd_lbound,
n_simulations,
)
* fire_combustion_efficiency
)
# Fire | |
if (verbose):
print ("Test #{0} - DeleteName('Test Folder', verbose)".format(count + 1))
result = DeleteName("Test Folder", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
testResults = False
if (fails == 0 and count == total_tests):
print ("ran {0} tests, all pass".format(total_tests))
testResults = True
else:
print ("test count expected {0} passes, received {1}, failures {2}".format(total_tests, count, fails))
testResults = False
sys.stdout = old_stdout
result_string = print_out.getvalue()
results = {}
results['status'] = testResults
results['total'] = total_tests
results['pass'] = count
results['fails'] = fails
results['output'] = result_string
return results
def TestAIM(location, verbose):
old_stdout = sys.stdout
print_out = StringIO()
sys.stdout = print_out
count = 0
fails = 0
total_tests = 457
defaults, types = GetDefaults(False)
status, keys, rows = LoadTest(location, verbose)
if (status and (defaults is not None)):
if (verbose):
print ("Test #{0} - UpdateDefaultItem('folder name', 'Test Aim', verbose)".format(count + 1))
result = UpdateDefaultItem("folder name", "Test Aim", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Balance('$', '5000', verbose)".format(count + 1))
result = Balance( "$", "5000", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Add('AAPL', verbose)".format(count + 1))
result = Add( "AAPL", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Balance('$', '5000', verbose)".format(count + 1))
result = Balance( "$", "5000", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - PrintDefaults(verbose)".format(count + 1))
r1, r2, r3, r4 = PrintDefaults(verbose)
if (r1 > "" and r2 > "" and r3 > "" and r4 > ""):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("testing {0} spreadsheet rows".format(len(rows)))
for item in rows.items():
index = item[0]
if (index == 0):
continue
curr = dict(zip(keys, item[1]))
prev = GetPrevious(index, keys, rows)
if (verbose):
print ("Test #{0} - Safe(<Stock Value>, verbose)".format(count + 1))
result = Safe(myFloat(curr['Stock Value']), verbose)
if (result == myFloat(curr['Safe'])):
if (verbose):
print ("\tSafe({0}) - pass.".format(index))
count += 1
else:
if (verbose):
print ("\tSafe({0}) - expected: {1}, calculated: {2}, fail.".format(index, curr['Safe'], result))
fails += 1
if (verbose):
print ("Test #{0} - PortfolioControl(<Market Order>, <Prev Portfolio Control>, verbose)".format(count + 1))
result = PortfolioControl(myFloat(curr['Market Order']), myFloat(prev['Portfolio Control']), verbose)
if (result == myFloat(curr['Portfolio Control'])):
if (verbose):
print ("\tPortfolioControl({0}) - pass.".format(index))
count += 1
else:
if (verbose):
print ("\tPortfolioControl({0}) - expected: {1}, calculated: {2}, fail.".format(index, curr['Portfolio Control'], result))
fails += 1
if (verbose):
print ("Test #{0} - BuySellAdvice(<Portfolio Control>, <Stock Value>, verbose)".format(count + 1))
result = BuySellAdvice(myFloat(prev['Portfolio Control']), myFloat(curr['Stock Value']), verbose)
if (result == myFloat(curr['Buy (Sell) Advice'])):
if (verbose):
print ("\tBuySellAdvice({0}) - pass.".format(index))
count += 1
else:
if (verbose):
print ("\tBuySellAdvice({0}) - expected: {1}, calculated: {2}, fail.".format(index, curr['Buy (Sell) Advice'], result))
fails += 1
if (verbose):
print ("Test #{0} - MarketOrder(<Buy (Sell) Advice>, <Safe>, verbose)".format(count + 1))
result = MarketOrder(myFloat(curr['Buy (Sell) Advice']), myFloat(curr['Safe']), verbose)
if (result == myFloat(curr['Market Order'])):
if (verbose):
print ("\tMarketOrder({0}) - pass.".format(index))
count += 1
else:
if (verbose):
print ("\tMarketOrder({0}) - expected: {1}, calculated: {2}, fail.".format(index, curr['Market Order'], result))
fails += 1
if (verbose):
print ("Test #{0} - PortfolioValue(<Cash>, <Stock Value>, verbose)".format(count + 1))
result = PortfolioValue(myFloat(curr['Cash']), myFloat(curr['Stock Value']), verbose)
if (result == myFloat(curr['Portfolio Value'])):
if (verbose):
print ("\tPortfolioValue({0}) - pass.".format(index))
count += 1
else:
if (verbose):
print ("\tPortfolioValue({0}) - expected: {1}, calculated: {2}, fail.".format(index, curr['Portfolio Value'], result))
fails += 1
username = getpass.getuser()
if (verbose):
print ("Test #{0} - UpdateDefaultItem('folder name', '<reset back to what it was>', verbose)".format(count + 1))
result = UpdateDefaultItem("folder name", defaults['folder name'], verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - DeleteName('Test Aim', verbose)".format(count + 1))
result = DeleteName("Test Aim", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
testResults = False
if (fails == 0 and count == total_tests):
print ("ran {0} tests, all pass".format(total_tests))
testResults = True
else:
print ("test count expected {0} passes, received {1}, failures {2}".format(total_tests, count, fails))
testResults = False
sys.stdout = old_stdout
result_string = print_out.getvalue()
results = {}
results['status'] = testResults
results['total'] = total_tests
results['pass'] = count
results['fails'] = fails
results['output'] = result_string
return results
def TestHistory(verbose):
results = {}
db_file = GetDB(verbose)
username = getpass.getuser()
Path(username + "/").mkdir(parents=True, exist_ok=True)
old_stdout = sys.stdout
print_out = StringIO()
sys.stdout = print_out
count = 0
fails = 0
total_tests = 31
try:
conn = sqlite3.connect(db_file)
if (verbose):
print("TestHistory(1) sqlite3: {0}".format(sqlite3.version))
print("TestHistory(2) db_file: {0}".format(db_file))
except Error as e:
print("TestHistory(3) {0}".format(e))
sys.stdout = old_stdout
result_string = print_out.getvalue()
results['output'] = result_string
return results
defaults, types = GetDefaults(verbose)
if (verbose):
print ("Test #{0} - GetDB(verbose)".format(count + 1))
result = GetDB(verbose)
if (result > ""):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - UpdateDefaultItem('folder name', 'Test History', verbose)".format(count + 1))
result = UpdateDefaultItem("folder name", "Test History", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Balance('$', '5000', verbose)".format(count + 1))
result = Balance( "$", "5000", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Add('AAPL', verbose)".format(count + 1))
result = Add( "AAPL", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Balance('AAPL', '5000', verbose)".format(count + 1))
result = Balance("AAPL", "5000", verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - CreateAIM(verbose)".format(count + 1))
result, text = CreateAIM(verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
elif (text == "You must go to the History Tab and archive your AIM data first"):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - GetAIM(verbose)".format(count + 1))
result = GetAIM(verbose)
if (result != []):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - GetAIMCount(verbose)".format(count + 1))
result = GetAIMCount(verbose)
if (result > 0):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - GetLastAIM(verbose)".format(count + 1))
result = GetLastAIM(verbose)
if (result != {}):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - GetFirstAIM(verbose)".format(count + 1))
result = GetFirstAIM(verbose)
if (result != {}):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Look(verbose)".format(count + 1))
r1, r2, r3 = Look(verbose)
if (r1 != {} and r2 > "" and r3 != {}):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - Post(verbose)".format(count + 1))
result = Post(verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
table, symbol_options, balance_options, amount_options = PrintFolder(verbose)
if (verbose):
print ("Test #{0} - GetCurrentStockList(<amount options>, verbose)".format(count + 1))
dl = GetCurrentStockList(amount_options, verbose)
if (dl != []):
if (verbose):
print ("\tpass.")
count += 1
else:
if (verbose):
print ("\tfail.")
fails += 1
if (verbose):
print ("Test #{0} - BeginWorksheet(-500, verbose)".format(count + 1))
filename = "{0}worksheet_test.csv".format(defaults['test root'])
result = BeginWorksheet(-500, verbose)
if (result):
if (verbose):
print ("\tpass.")
count += 1
else:
| |
'''
Contains a generic algorithm object which can do vanilla ICP
Then, create plugin functions which can do naive, simple, and fast PFH
'''
import time
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
from sklearn.metrics.pairwise import euclidean_distances
from . import utils
class PFH(object):
"""Parent class for PFH"""
def __init__(self, e, div, nneighbors, rad):
"""Pass in parameters """
self._e = e
self._div = div
self._nneighbors = nneighbors
self._radius = rad
self._error_list = []
self._Rlist = []
self._tlist = []
def solve(self, P, Q):
"""Main solver
:P: Source point cloud
:Q: Target point cloud
:e: Threshold to stop iterating
:div: Number of divisions for binning PFH
:nneighbors: Number of k neighbors for surface normal estimation and PFH neighbors
:returns: R_list, t_list, Cp, error_list
"""
iterations = 0
done = False
error_o = 100
Cp = P
print("...ICP started... \n")
while not done:
start = time.process_time()
# Find correspondences p_i <-> q_i
# Matching is done via histogram signatures
matched_dist_inds = self.findMatches(Cp, Q)
matchInd, _ = self.extractIndices(matched_dist_inds)
Cq = []
for i in range(len(Cp)):
q_near = Q[matchInd[i]]
Cq.append(q_near)
if done:
# When finished, move the points according to the centroids
print("final move")
pbar = sum(P)/len(P)
qbar = sum(Q)/len(Q)
R_init = np.matrix([[1,0,0], [0,1,0], [0,0,1]])
t_init = qbar - R_init.dot(pbar)
R = R_init
t = t_init
else:
# Get transforms
R, t = self.getTransform(Cp, Cq)
# Keep track of the R and t for final transforms
self._Rlist.append(R)
self._tlist.append(t)
# Terminate based on error < e, lack of progress, or iterations
error = self.getError(Cp, Cq, R, t)
self._error_list.append(error)
#if abs(error - error_o) < (0.02*e):
# ctr += 1
if error < self._e or abs(error - error_o) < 0.02*self._e or iterations == 30:
if error < self._e:
print('Found low error solution\n')
elif abs(error - error_o) < .02*self._e:
print('Lack of progress, terminating\n')
elif iterations == 30:
print('Reached max iterations\n')
done = True
error_o = error
# Update all P
new_p_list = []
for p in Cp:
new_p_list.append(R.dot(p)+t)
Cp = new_p_list
print("===============================")
print("iteration: ", iterations)
print("error: ", error)
end = time.process_time()
print("Time per iteration: ", end - start)
print("===============================\n\n")
iterations = iterations + 1
return Cp
def getNeighbors(self, distances):
"""Get k nearest neighbors of the query point pq from pc, within the radius
"""
k = self._nneighbors
neighbors = [(distances[i],i) for i in range(len(distances)) if distances[i] <= self._radius]
#print("Found {} neighbors".format(len(neighbors)))
neighbors.sort(key=lambda x:x[0])
neighbors.pop(0)
# Breakout just the indices
return [neighbors[i][1] for i in range(min(k,len(neighbors)))]
def calc_normals(self, pc):
"""TODO: Docstring for calc_normals.
:pc: TODO
:returns: TODO
"""
print("\tCalculating surface normals. \n")
normals = []
ind_of_neighbors = []
N = len(pc)
pcMat=utils.convert_pc_to_matrix(pc)
distances=euclidean_distances(pc,pc)
for i in range(N):
# Get the indices of neighbors, it is a list of tuples (dist, indx)
indN = self.getNeighbors(distances[i]) #<- old code
#indN = list((neigh.kneighbors(pc[i].reshape(1, -1), return_distance=False)).flatten())
#indN.pop(0)
ind_of_neighbors.append(indN)
# PCA
X = pcMat[:, indN]
X2 = X - np.mean(X, axis=1)
cov = np.matmul(X2, X2.T)/(len(indN))
_, _, Vt = np.linalg.svd(cov)
normal = Vt[2, :]
# Re-orient normal vectors
if np.matmul(normal, -1.*(pc[i])) < 0:
normal = -1.*normal
normals.append(normal)
return normals, ind_of_neighbors
def calcHistArray(self, pc, norm, indNeigh):
"""override this function with custom Histogram"""
print("\tCalculating histograms naive method \n")
N = len(pc)
histArray = np.zeros((N, self._div**3))
for i in range(N):
u = np.asarray(norm[i].T).squeeze()
k = self._nneighbors
n = k + 1
N_features = sp.comb(n, 2)
features = []
p_list = [i] + indNeigh[i]
p_list_copy = [i] + indNeigh[i]
for z in p_list:
p_list_copy.pop(0)
for p in p_list_copy:
pi = pc[p]
pj = pc[z]
if np.arccos(np.dot(norm[p], pj - pi)) <= np.arccos(np.dot(norm[z], pi - pj)):
ps = pi
pt = pj
ns = np.asarray(norm[p]).squeeze()
nt = np.asarray(norm[z]).squeeze()
else:
ps = pj
pt = pi
ns = np.asarray(norm[z]).squeeze()
nt = np.asarray(norm[p]).squeeze()
u = ns
difV = pt - ps
dist = np.linalg.norm(difV)
difV = difV/dist
difV = np.asarray(difV).squeeze()
v = np.cross(difV, u)
w = np.cross(u, v)
alpha = np.dot(v, nt)
phi = np.dot(u, difV)
theta = np.arctan(np.dot(w, nt) / np.dot(u, nt))
features.append(np.array([alpha, phi, theta]))
features = np.asarray(features)
pfh_hist, bin_edges = self.calc_pfh_hist(features)
histArray[i, :] = pfh_hist / (N_features)
return histArray
def findMatches(self, pcS, pcT):
"""Find matches from source to target points
:pcS: Source point cloud
:pcT: Target point cloud
:returns: TODO
"""
print("...Finding correspondences. \n")
numS = len(pcS)
numT = len(pcT)
print("...Processing source point cloud...\n")
normS,indS = self.calc_normals(pcS)
''' TODO: implement the different histograms '''
#histS = calcHistArray_naive(pcT, normS, indS, div, nneighbors)
#histS = calcHistArray_simple(pcT, normS, indS, div, nneighbors)
histS = self.calcHistArray(pcS, normS, indS)
print("...Processing target point cloud...\n")
''' TODO: implement the different histograms '''
normT,indT = self.calc_normals(pcT)
#histT = calcHistArray_naive(pcT, normT, indT, div, nneighbors)
#histT = calcHistArray_simple(pcT, normT, indT, div, nneighbors)
histT = self.calcHistArray(pcT, normT, indT)
distance = []
dist = []
for i in range(numS):
for j in range(numT):
#appending the l2 norm and j
dist.append((np.linalg.norm(histS[i]-histT[j]),j))
dist.sort(key=lambda x:x[0]) #To sort by first element of the tuple
distance.append(dist)
dist = []
return distance
def extractIndices(self, DistIndices):
"""
:DistIndices: TODO
:returns: TODO
"""
matchInd = []
distances = []
for i in range(len(DistIndices)):
#always pull the lowest distance result's index
matchInd.append(DistIndices[i][0][1])
distances.append(DistIndices[i][0][0])
return matchInd, distances
def getTransform(self, Cp, Cq):
"""Calculate the transforms based on correspondences
:Cp: Source point cloud
:Cq: Target point cloud
:returns: R and t matrices
"""
# Get the centroids
pbar = sum(Cp)/len(Cp)
qbar = sum(Cq)/len(Cq)
# Subtract mean from data
X = np.matrix(np.zeros((3, len(Cp))))
Y = np.matrix(np.zeros((3, len(Cq))))
Cp = utils.convert_pc_to_matrix(Cp)
X = Cp - pbar
Cq = utils.convert_pc_to_matrix(Cq)
Y = Cq - qbar
# SVD
U, _, Vt = np.linalg.svd(X.dot(Y.T))
V = Vt.T
det = np.linalg.det(V.dot(U.T))
anti_reflect = np.matrix([[1,0,0],
[0,1,0],
[0,0,det]])
R = V.dot(anti_reflect).dot(U.T)
t = qbar - R.dot(pbar)
return R, t
def getError(self, Cp, Cq, R, t):
"""
Calculate the transformation error. Assume Cp and Cq have 1-to-1 correspondences.
"""
err = 0
for i in range(len(Cp)):
q_near = Cq[i]
tmp = np.linalg.norm(R.dot(Cp[i]) + t - q_near)
err = err + tmp**2
return err
def step(self, si, fi):
"""Helper function for calc_pfh_hist. Depends on selection of div
:si: TODO
:fi: TODO
:returns: TODO
"""
if self._div==2:
if fi < si[0]:
result = 0
else:
result = 1
elif self._div==3:
if fi < si[0]:
result = 0
elif fi >= si[0] and fi < si[1]:
result = 1
else:
result = 2
elif self._div==4:
if fi < si[0]:
result = 0
elif fi >= si[0] and fi < si[1]:
result = 1
elif fi >= si[1] and fi < si[2]:
result = 2
else:
result = 3
elif self._div==5:
if fi < si[0]:
result = 0
elif fi >= si[0] and fi < si[1]:
result = 1
elif fi >= si[1] and fi < si[2]:
result = 2
elif fi >= si[2] and fi < si[3]:
result = 3
else:
result = 4
return result
def calc_thresholds(self):
"""
:returns: 3x(div-1) array where each row is a feature's thresholds
"""
delta = 2./self._div
s1 = np.array([-1+i*delta for i in range(1,self._div)])
delta = 2./self._div
s3 = np.array([-1+i*delta for i in range(1,self._div)])
delta = (np.pi)/self._div
s4 = np.array([-np.pi/2 + i*delta for i in range(1,self._div)])
s = np.array([s1,s3,s4])
return s
def calc_pfh_hist(self, f):
"""Calculate histogram and bin edges.
:f: feature vector of f1,f3,f4 (Nx3)
:returns:
pfh_hist - array of length div^3, represents number of samples per bin
bin_edges - range(0, 1, 2, ..., (div^3+1))
"""
# preallocate array sizes, create bin_edges
pfh_hist, bin_edges = np.zeros(self._div**3), np.arange(0,self._div**3+1)
# find the division thresholds for the histogram
s = self.calc_thresholds()
# Loop for every row in f from 0 to N
for j in range(0, f.shape[0]):
# calculate the bin index to increment
index = 0
for i in range(1,4):
index += self.step(s[i-1, :], f[j, i-1]) * (self._div**(i-1))
# Increment histogram at that index
pfh_hist[index] += 1
return pfh_hist, bin_edges
class SPFH(PFH):
"""Child class of PFH to implement a different calcHistArray"""
def calcHistArray(self, pc, | |
= [0,255,0] #AdG
#out_img[x-1, y+2] = [0,255,0] #AdG
return out_img
#################################################################
def plotMiddleLane(binary, middle_fitx, Minv, img_size, undistorted, ploty):
"""Plotte Mittellinie"""
# create new image to plot middle lane on
warped_middle_zero = np.zeros_like(binary).astype(np.uint8)
color_warped_middle = np.dstack((warped_middle_zero, warped_middle_zero, warped_middle_zero))
color_warped_middle[ploty.astype('int'), middle_fitx.astype('int')] = [255,0,0] # plot middle lane on the new image
unwarped_middle = cv2.warpPerspective(color_warped_middle, Minv, img_size, flags=cv2.INTER_LINEAR) # warp the middle lane back to original perspective
if OPTIMIERUNG_EINGESCHALTET == True: # Combine the result with the original image
result= color_warped_middle
else:
result = cv2.addWeighted(undistorted, 1, unwarped_middle, 1, 0)
return result
#################################################################
def showStream(windowName, showWindow, showHelp, helpText, font, frame, undistorted, gray, warped, binary, out_img, result):
"""Zeige aktuelles Bild im Fenster an und warte auf Tasteneingaben"""
# setting up the display of the different stages of the feed as a 1x2 window
if showWindow == 3:
frameDisp = cv2.resize(frame, (640,480))
undistortedDisp = cv2.resize(undistorted,(640,480))
grayDisp = cv2.resize(gray,(640,480))
warpedDisp = cv2.resize(warped,(640,480))
binaryDisp = cv2.resize(binary,(640,480))
out_imgDisp = cv2.resize(out_img,(640,480))
resultDisp = cv2.resize(result,(640,480))
vidBuf = np.concatenate((resultDisp, out_imgDisp), axis=1)
# select what stage in closeup or display of all stages
if showWindow==1:
displayBuf = result
elif showWindow == 2:
displayBuf = out_img
elif showWindow == 3:
displayBuf = vidBuf
# show help text
if showHelp == True:
cv2.putText(displayBuf, helpText, (11,20), font, 1.0, (32,32,32), 4, cv2.LINE_AA)
cv2.putText(displayBuf, helpText, (10,20), font, 1.0, (480,480,480), 1, cv2.LINE_AA)
cv2.imshow(windowName,displayBuf) #show display content in ubuntu
key=cv2.waitKey(10)
# check for ESC key
if key == 27:
print("|\n|...Programm wurde vom Benutzer beendet\n\n")
cv2.destroyAllWindows()
sys.exit()
# check for '1' key
elif key==49:
cv2.setWindowTitle(windowName,"Camera Feed")
showWindow=1
# check for '2' key
elif key==50:
cv2.setWindowTitle(windowName,"lane detection")
showWindow=2
# check for '3' key
elif key==51:
cv2.setWindowTitle(windowName,"Camera, lanes")
showWindow=3
# check for '4' key
elif key==52:
showHelp = not showHelp
if Steuerbits_16[1]==1:
Steuerbits_16[1]=0 # Steuerbit wieder zurueck setzen, wenn ein mal geschickt wurde
# check for '0' key
elif key==48:
Steuerbits_16[1]=1 # autonom modus beenden bei wert 1
print("|...autonomer Modus vom Benutzer beendet")
if Steuerbits_16[0]==1:
Steuerbits_16[0]=0
# Steuerbit wieder zurueck setzen, wenn ein mal geschickt wurde
# check for '9' key
elif key==57:
Steuerbits_16[0]=1 #autonom modus starten bei wert 1
print("|...autonomer Modus vom Benutzer gestartet")
return showWindow
#################################################################
def extractPoint(middle_fitx,lookaheadDistance,Faktor_Meter_pro_Pixel):
"""Suche einen Punkt aus der Mittellinie heraus, der vom Ursprung genau die Entfernung der LookaheadDistance hat"""
lookahead=lookaheadDistance
Delta_X=middle_fitx-320 # delta from middle, length is 480(height)
Delta_X=Delta_X[::-1] # reverse the array, so that first value is at the bottom
Y=np.linspace(0, 479,480)
Distance=np.sqrt( (Y**2) + (Delta_X**2) ) # Betrag von unten mitte zu Punkt ueber Satz des Pythagoras a^2 +b^2 =c^2
Sorted_Distance=np.sort(Distance) # Betrag in aufsteigender Reihenfolge sortieren
indices_Sorted_Distance=np.argsort(Distance) # Indizes(Y Werte) der sortierten liste merken; ist meistens eh aufsteigend, nur wenn starke kurven drinnens sind
for index, item in enumerate(Sorted_Distance): # finde Index des Betrages, welches lookahead entfernt ist
if item >=lookahead:
index_Aimpoint_Y=index
break
Aimpoint_Y=indices_Sorted_Distance[index_Aimpoint_Y] # um tatsaechlichen y Wert von diesem Betrag zu erhalten
#print ("index= " + str(Aimpoint_Y))
Aimpoint_X=Delta_X[Aimpoint_Y]
DeltaX_Meter = Aimpoint_X*Faktor_Meter_pro_Pixel
lookahead_Meter=lookahead*Faktor_Meter_pro_Pixel+0.48 #TODO anpassen, die Distanz an der die blaue linie anfaengt zum hinteren Rad
Aimpoint_X_fuer_plotten=Aimpoint_X+320 # zum plotten da 320 die Mitte des Bildes ist
return int(Aimpoint_X_fuer_plotten),int(Aimpoint_Y), DeltaX_Meter ,lookahead_Meter
#################################################################
##################################################################################################################################
# _ _ _ _____
# | | | | | | | __ \
# | |__| | __ _ _ _ _ __ | |_| |__) | __ ___ __ _ _ __ __ _ _ __ ___ _ __ ___
# | __ |/ _` | | | | '_ \| __| ___/ '__/ _ \ / _` | '__/ _` | '_ ` _ \| '_ ` _ \
# | | | | (_| | |_| | |_) | |_| | | | | (_) | (_| | | | (_| | | | | | | | | | | |
# |_| |_|\__,_|\__,_| .__/ \__|_| |_| \___/ \__, |_| \__,_|_| |_| |_|_| |_| |_|
# | | __/ |
# |_| |___/
###################################################################################################################################
#################################################################
#
# LADE PARAMETER
#
#################################################################
print("|...Lade Parameter")
# TODO Hyperparameter; bei neuer Kamera die mtx.dat und dist.dat durch distortion.py neu erzeugen
mtx = np.load("mtx.dat") # wird benoetigt fuer Entzerrung der Linsenbeugung
dist = np.load("dist.dat") # wird benoetigt fuer Entzerrung der Linsenbeugung
# TODO Hyperparameter; bei neuer Kamerastellung die trans_M.dat und trans_inv_M.dat durch transformation_handler.py neu erzeugen
trans_M = np.load("trans_M.dat")
Minv = np.load("trans_inv_M.dat")
Faktor_Meter_pro_Pixel=0.0022#(0.20*13)/480 #TODO anpassen, je nachdem, wie kamera steht
img_size=(640,480)
thresh_val = 230 # Startwert fuer threshold fuer die Funktion "checkBinary" die dafuer sorgt, dass das bild aus mehr oder weniger weissen pixeln besteht
max_val = 255 # Maximalwert fuer threshold
showWindow = 3 # Startwert fuer Fensterauswahl ueber die Tastatur
frame_number = 0 # Zaehler, um die ersten frames weg zu werfen, da sie immer schwarz sind
init_binary_flag = True # Muss am Anfang auf true sein
print_only_once=True
print_only_once2=True
Kamera = openCam() # Erzeugt ein Kameraobjekt fuer gethreadeten video stream
histogramm_breite=int(0.12/Faktor_Meter_pro_Pixel) # Raduis des Histogramms
histogramm_hoehe=int(0.4/Faktor_Meter_pro_Pixel) # Hoehe des Histogramms
histogram_basis=290 # Basispunkt des Histogramms
histogram_maske = np.zeros((480,640), np.uint8) # erzeuge leeres binary Bild
cv2.ellipse(histogram_maske, (histogram_basis,480),(histogramm_breite,histogramm_hoehe),0,0,-180,color=(255,255,255), thickness=-1) # male die Maske fuer das Histogramm in das binary
Geschwindigkeit=13 # von 100 bis -100
Lookahead_Pixel=200
Fummelfaktor_DeltaX=3
if OPTIMIERUNG_EINGESCHALTET == True: # Kein Fenster zur Anzeige erzeugen
pass
else:
windowName, showHelp, helpText, font = setupStream() # Erzeuge Fenster zur Anzeige
#################################################################
#
# STARTE HAUPTSCHLEIFE
#
#################################################################
print("|...Hauptschleife gestartet")
while True: # Starte Hauptschleife
start_time = time.time()
frame = grabFrame(Kamera) # Hole aktuellstes frame
grab_time = time.time() - start_time
if OPTIMIERUNG_EINGESCHALTET == True: # bei Optimierung zuerst grey, dann undistort
gray = grayFrame(frame)
gray_time = time.time() - grab_time - start_time
undistorted = undistFrame(gray)
undist_time = time.time()-gray_time - grab_time -start_time
warped = warpFrame(trans_M, img_size, undistorted)
warp_time = time.time() - gray_time - undist_time - grab_time -start_time
else: # Live Stream soll buntes, entzerrtes Bild sein
undistorted = undistFrame(frame)
undist_time = time.time() - grab_time -start_time
warped = warpFrame(trans_M, img_size, undistorted)
warp_time = time.time() - undist_time - grab_time -start_time
gray = grayFrame(warped) #ist schon binary
gray_time = time.time() - undist_time - grab_time -start_time -warp_time
# aktuellen frame in puffer schreiben, da erst ueberprueft werden muss, ob er OK ist
#binarybuffer = binaryFrame(warped, thresh_val, max_val)
binarybuffer=gray
# Bug, wenn aller erstes frame nicht OK ist existert noch kein binary...wird hiermit abgefangen
if(init_binary_flag):
binary = binarybuffer
init_binary_flag = False
binary_time = time.time() - warp_time - gray_time - undist_time - grab_time -start_time
# Ueberpruefen ob frame OK ist
white_pixel_flag, thresh_val= checkBinary(binarybuffer,histogram_maske,histogramm_breite, thresh_val)
check_binary_time = time.time() - binary_time - warp_time - gray_time - undist_time - grab_time -start_time
if (not white_pixel_flag and print_only_once):
print("|...Keinen akzeptablen binary Frame erhalten.\n| Ueberspringe Berechnungen, Kontrast wird angepasst...")
print_only_once=False
print_only_once2=True
# Nur wenn frame dieser Iteration Ok ist; frame wird ansonsten "eingefroren" (frame von vorheriger iteration benutzen)
if(white_pixel_flag):
if print_only_once2:
print("|...Akzeptablen binary Frame erhalten, Kontrast wurde angepasst")
print_only_once2=False
print_only_once=True
# binaeres frame aktualisieren
binary = binarybuffer
if_time = time.time() - check_binary_time - binary_time - warp_time - gray_time - undist_time - grab_time -start_time
# Erfasse Polynome der linken und rechten Ausenspur und erzeuge deren Koeffizienten
out_img, left_fit = getPolynoms(binary,Faktor_Meter_pro_Pixel,histogram_basis,histogramm_breite,histogramm_hoehe,histogram_maske)
polynom_time = time.time() - if_time - check_binary_time - binary_time - warp_time - gray_time - undist_time - grab_time -start_time
# Erzeuge x und y Werte zum Ploten der Polynome
ploty, left_fitx, = plottablePolynoms(binary, left_fit)
plot_value_time = time.time() - polynom_time - if_time - check_binary_time - binary_time - warp_time - gray_time - undist_time - grab_time -start_time
# Berechne Mittellinie aus rechter und linker Linie
middle_fitx = calcMiddle(binary, ploty, left_fitx, Faktor_Meter_pro_Pixel)
middle_time = time.time() - plot_value_time - polynom_time - if_time - check_binary_time - binary_time - warp_time - gray_time - undist_time - grab_time -start_time
# Suche einen Punkt aus der Mittellinie heraus, der vom Ursprung genau die Entfernung der LookaheadDistance hat
X_Aim, Y_Aim, DeltaX_Meter ,lookahead_Meter = extractPoint(middle_fitx,Lookahead_Pixel,Faktor_Meter_pro_Pixel) #AdG X und Y koordinate des punktes, der sich auf der lookahead distanz befindet
extract_time = time.time() - middle_time - plot_value_time - polynom_time - if_time - check_binary_time - binary_time - warp_time - gray_time - undist_time - grab_time -start_time
# Daten an ST ueber UART schicken
Kommunikation.sendeAnST(Geschwindigkeit,lookahead_Meter,DeltaX_Meter*Fummelfaktor_DeltaX,0,Steuerbits_16) # TODO Fummelfaktor sende Geschwindigkeit,lookahead, delta x
uart_time = time.time() - extract_time - middle_time - plot_value_time - polynom_time - if_time - check_binary_time - binary_time - warp_time - gray_time - undist_time - grab_time -start_time
# nichts plotten wenn Optimierung eingeschaltet ist
if OPTIMIERUNG_EINGESCHALTET == True:
loop_time= time.time() -start_time
#print("grab_time :" + str(grab_time) +"\n" + "undist_time :" +str(undist_time) +"\n" + "gray_time :" + str(gray_time) +"\n" + "warp_time :" + str(warp_time) +"\n" + "binary_time :" + str(binary_time) +"\n" + "check_binary_time :" + str(check_binary_time) +"\n" + "if_time :" + str(if_time) +"\n" + "polynom_time :" + str(polynom_time) +"\n" + "plot_value_time :" + str(plot_value_time) + "\n" + "middle_time :" + str(middle_time) + "\n" + "extract_time: " + str(extract_time) + "\n" + "uart_time: " + str(uart_time)+"\nLoop_time THREADED Optimized: " + str(loop_time) +"\n\n")
# Ansonsten zusaetzlich Stream anzeigen
else:
#empfangen
#ret=Kommunikation.empfangeVonST() # TODO aktivieren, wenn Daten vom ST empfangen werden sollen
# Plotte Polynome und Abweichung Zielpunkt zur Mitte
out_img = plotPolynoms(out_img, ploty, left_fitx, middle_fitx, X_Aim, Y_Aim)
plot_time = time.time() - uart_time - extract_time- middle_time - plot_value_time - polynom_time - if_time - check_binary_time - binary_time - warp_time - gray_time - undist_time - grab_time -start_time
# plotte Mittellinie
result = plotMiddleLane(binary, middle_fitx, Minv, img_size, undistorted, ploty)
plot_middle_time = time.time() - plot_time - uart_time- extract_time - middle_time - plot_value_time - polynom_time - if_time - check_binary_time - binary_time - warp_time - gray_time - undist_time - | |
_get_ctx(self, x):
match x:
case Ident():
obj_name = x.Name
case str():
obj_name = x
case Expr():
return x._type()
case _:
raise ValueError(x)
if obj := self._from_scope_or_outer(obj_name):
return obj._py_context
def _from_scope_or_outer(self, obj_name: str):
return self._from_scope(obj_name) or self._from_outer_scope(obj_name)
def _from_scope(self, obj_name: str):
return self.Objects.get(obj_name)
def _from_outer_scope(self, obj_name: str):
if not self.Outer:
return None
return self.Outer.Objects.get(obj_name) or self.Outer._from_outer_scope(obj_name)
def Insert(self, obj: Object) -> Optional[Object]:
"""
Insert attempts to insert a named object obj into the scope s.
If the scope already index an object alt with the same name,
Insert leaves the scope unchanged and returns alt. Otherwise
it inserts obj and returns nil.
"""
alt = self.Objects.get(obj.Name)
if alt:
return alt
self.Objects[obj.Name] = obj
class File(GoAST):
"""Files and packages A File node represents a Go source file. The Comments list index
all comments in the source file in order of appearance, including the comments that are
pointed to from other nodes via Doc and Comment fields. For correct printing of source
code containing comments (using packages go/format and go/printer), special care must be
taken to update comments when a File's syntax tree is modified: For printing, comments
are interspersed between tokens based on their position. If syntax tree nodes are removed
or moved, relevant comments in their vicinity must also be removed (from the
File.Comments list) or moved accordingly (by updating their positions). A CommentMap may
be used to facilitate some of these operations. Whether and how a comment is associated
with a node depends on the interpretation of the syntax tree by the manipulating program:
Except for Doc and Comment comments directly associated with nodes, the remaining
comments are 'free-floating' (see also issues #18593, #20744).
"""
_fields = ("Comments", "Decls", "Doc", "Imports", "Name", "Package", "Scope", "Unresolved")
"""list of all comments in the source file"""
Comments: List[CommentGroup]
"""top-level declarations; or nil"""
Decls: List[Expr]
"""associated documentation; or nil"""
Doc: CommentGroup
"""imports in this file"""
Imports: List[ImportSpec]
"""package name"""
Name: Ident
"""position of 'package' keyword"""
Package: int
"""package scope (this file only)"""
Scope: Scope
"""unresolved identifiers in this file"""
Unresolved: List[Ident]
def __init__(self,
Comments: List[CommentGroup] = None,
Decls: List[Expr] = None,
Doc: CommentGroup = None,
Imports: List[ImportSpec] = None,
Name: Ident = None,
Package: int = 1,
Scope: Scope = None,
Unresolved: List[Ident] = None,
**kwargs) -> None:
self.Comments = Comments or []
set_list_type(self.Comments, "ast.CommentGroup")
self.Decls = Decls or []
set_list_type(self.Decls, "ast.Decl")
self.Doc = Doc
self.Imports = Imports or []
set_list_type(self.Imports, "*ast.ImportSpec")
self.Name = Name
self.Package = Package
self.Scope = Scope
self.Unresolved = Unresolved or []
set_list_type(self.Unresolved, "ast.Ident")
super().__init__(**kwargs)
@classmethod
def from_Module(cls, node: ast.Module, **kwargs):
decls = build_decl_list(node.body)
return cls([], decls, None, [], Ident("main"), 1, None, [], **kwargs)
def add_import(self, node: ImportSpec):
self.Imports.append(node)
self.Decls.insert(0, GenDecl.from_ImportSpec(node))
class ForStmt(Stmt):
_fields = ("Body", "Cond", "Init", "Post")
"""A ForStmt represents a for statement."""
Body: BlockStmt
"""finder; or nil"""
Cond: Expr
"""position of 'for' keyword"""
For: int
"""initialization statement; or nil"""
Init: Expr
"""post iteration statement; or nil"""
Post: Expr
def __init__(self,
Body: BlockStmt = None,
Cond: Expr = None,
For: int = 0,
Init: Expr = None,
Post: Expr = None,
**kwargs) -> None:
self.Body = Body
self.Cond = Cond
self.For = For
self.Init = Init
self.Post = Post
super().__init__(**kwargs)
@classmethod
def from_While(cls, node: ast.While):
body = from_this(BlockStmt, node.body)
cond = build_expr_list([node.test])[0]
match cond:
case Ident(Name="true"):
return cls(Body=body)
case BasicLit(Value=x) if not json.loads(x):
return cls(Body=body, Cond=Ident.from_str("false"))
case BasicLit(Kind=x) if x in [token.INT, token.STRING, token.FLOAT]:
return cls(Body=body)
case Ident(Name="false") | Ident(Name="nil"):
return cls(Body=body, Cond=Ident.from_str("false"))
return cls(Body=body, Cond=cond)
class FuncType(Expr):
"""function signature: parameters, results, and position of 'func' keyword
function type
Pointer types are represented via StarExpr nodes. A FuncType node represents a function
type.
"""
_fields = ("Params", "Results")
"""position of 'func' keyword (token.NoPos if there is no 'func')"""
Func: int
"""(incoming) parameters; non-nil"""
Params: FieldList
"""(outgoing) results; or nil"""
Results: FieldList
def __init__(self,
Func: int = 0,
Params: FieldList = None,
Results: FieldList = None,
**kwargs) -> None:
self.Func = Func
self.Params = Params
self.Results = Results
super().__init__(**kwargs)
@classmethod
def from_FunctionDef(cls, node: ast.FunctionDef, **kwargs):
params = from_this(FieldList, node.args)
match node.returns:
case None | ast.Constant(value=None):
results = None
case _:
results = from_this(FieldList, node.returns)
return cls(0, params, results, **kwargs)
@classmethod
def from_AsyncFunctionDef(cls, node: ast.AsyncFunctionDef, **kwargs):
func_type = cls.from_FunctionDef(node, **kwargs)
# TODO: Multiple return values should be a struct or something?
match func_type.Results:
case FieldList(List=[f]):
f.Type = ChanType(Value=f.Type, Dir=2)
case _:
func_type.Results = FieldList(List=[Field(Type=ChanType())], Dir=2)
return func_type
@classmethod
def from_Lambda(cls, node: ast.Lambda, **kwargs):
params = from_this(FieldList, node.args)
t = build_expr_list([node.body])[0]._type()
return cls(0, params, FieldList(List=[Field(Type=t)]) if t else FieldList(List=[]), **kwargs)
class FuncDecl(Decl):
"""A FuncDecl node represents a function declaration."""
_fields = ("Body", "Doc", "Name", "Recv", "Type")
"""function body; or nil for external (non-Go) function"""
Body: BlockStmt
"""associated documentation; or nil"""
Doc: CommentGroup
"""function/method name"""
Name: Ident
"""receiver (methods); or nil (functions)"""
Recv: FieldList
"""function signature: parameters, results, and position of 'func' keyword"""
Type: FuncType
def __init__(self,
Body: BlockStmt = None,
Doc: CommentGroup = None,
Name: Ident = None,
Recv: FieldList = None,
Type: FuncType = None,
**kwargs) -> None:
self.Body = Body
self.Doc = Doc
self.Name = Name
self.Recv = Recv
self.Type = Type
super().__init__(**kwargs)
@classmethod
def from_FunctionDef(cls, node: ast.FunctionDef, **kwargs):
body = from_this(BlockStmt, node.body)
doc = None
name = from_this(Ident, node.name)
recv = None
_type = FuncType.from_FunctionDef(node)
return cls(body, doc, name, recv, _type, **kwargs)
@classmethod
def from_AsyncFunctionDef(cls, node: ast.AsyncFunctionDef, **kwargs):
if node.name == "main":
# TODO: There are other situations where we probably want to just disregard
# the async aspect of the function definition, likely more easy to interpret
# as part of post-processing.
# One idea for how I could handle this in post without reversing
# all the logic is to store a deepcopy of the functionDef version as part of the
# asyncFunctionDef and recover it in an early transformation stage.
# For now, though, this will do.
return cls.from_FunctionDef(node, **kwargs)
r = Ident("r")
_type = FuncType.from_AsyncFunctionDef(node)
body = from_this(BlockStmt, node.body)
# TODO: Multi-return?
_replace_nodes(
body,
replacer=lambda x: SendStmt(Chan=r, Value=x.Results[0]) if isinstance(x, ReturnStmt) else x,
skipper=lambda x: isinstance(x, FuncLit)
)
body.List.insert(0, DeferStmt(Call=Ident("close").call(r)))
make_chan_type = ChanType(Value=_type.Results.List[0].Type.Value, Dir=3)
body = BlockStmt(List=[
r.assign(Ident("make").call(make_chan_type)),
GoStmt(Call=FuncLit(Body=body).call()),
r.return_()
])
doc = None
name = from_this(Ident, node.name)
recv = None
return cls(body, doc, name, recv, _type, **kwargs)
@classmethod
def from_global_code(cls, node: ast.AST, **kwargs):
body = from_this(BlockStmt, [node])
doc = None
name = from_this(Ident, "init")
recv = None
_type = FuncType(0, FieldList(0, []), FieldList(0, []))
return cls(body, doc, name, recv, _type, **kwargs)
class FuncLit(Expr):
"""A FuncLit node represents a function literal."""
_fields = ("Body", "Type")
"""function body"""
Body: BlockStmt
"""function type"""
Type: FuncType
CONVERSION_ORDER = {
ast.Lambda: -1,
ast.FunctionDef: -1
}
def __init__(self,
Body: BlockStmt = None,
Type: FuncType = None,
**kwargs) -> None:
self.Body = Body
self.Type = Type or FuncType()
super().__init__(**kwargs)
@classmethod
def from_FunctionDef(cls, node: ast.FunctionDef, **kwargs):
body = from_this(BlockStmt, node.body)
_type = from_this(FuncType, node)
return cls(body, _type, **kwargs)
@classmethod
def from_Lambda(cls, node: ast.Lambda, **kwargs):
body = BlockStmt(List=[ReturnStmt(build_expr_list([node.body]))])
_type = from_this(FuncType, node)
return cls(body, _type, **kwargs)
def _type(self, scope: Optional['Scope']=None, interface_ok=False, **kwargs):
return self.Type or super()._type(scope, interface_ok, **kwargs)
class GenDecl(Decl):
"""A GenDecl node (generic declaration node) represents an import, constant, type or
variable declaration. A valid Lparen position (Lparen.IsValid()) indicates a
parenthesized declaration. Relationship between Tok value and Specs element type:
token.IMPORT *ImportSpec token.CONST *ValueSpec token.TYPE *TypeSpec token.VAR
*ValueSpec
"""
_fields = ("Doc", "Specs", "Tok")
"""associated documentation; or nil"""
Doc: CommentGroup
"""position of '(', if any"""
Lparen: int
"""position of ')', if any"""
Rparen: int
Specs: List[Expr]
"""IMPORT, CONST, TYPE, VAR"""
Tok: token
"""position of Tok"""
TokPos: int
def __init__(self,
Doc: CommentGroup = None,
Lparen: int = 0,
Rparen: int = 0,
Specs: List[Expr] = None,
Tok: token = 0,
TokPos: int = 0,
**kwargs) -> None:
self.Doc = Doc
self.Lparen = Lparen
self.Rparen = Rparen
self.Specs = Specs or []
set_list_type(self.Specs, "ast.Spec")
self.Tok = Tok
self.TokPos = TokPos
super().__init__(**kwargs)
@classmethod
def from_Import(cls, node: ast.Import, **kwargs):
return cls(Specs=[ImportSpec(None, | |
' '.join(args)
if 0 != os.system(cmd):
fail('Command failed: %s' % cmd)
return 0
# def homog(x):
# return np.concatenate([x, [1.]])
# def inhomog(x):
# y = x[:-1]
# y /= x[-1]
# return y
def homog(X):
# assumes X is either a vector or a matrix of n vectors of the shape (d by n)
X = np.asarray(X)
if X.ndim == 1:
return np.concatenate([X, [1.]])
else:
return np.vstack([X, np.ones(X.shape[1])])
def inhomog(x):
y = x[:-1]
y /= x[-1]
return y
# def transform_pt(T, x):
# return inhomog(np.dot(T, homog(x)))
def homog_transform(H, X):
return inhomog(np.dot(H, homog(X)))
def array_values(v):
return set(v.flat)
def pca(X):
""" rows of X are individual pts """
mean_pt = np.mean(X, axis = 0)
centered = X - np.tile(mean_pt[np.newaxis, :], (X.shape[0], 1))
[U, S, Vt] = np.linalg.svd(np.dot(centered.T,centered))
return U.T, S, mean_pt
def indicator(n, i):
a = np.zeros(n)
if 0 <= i < n:
a[i] = 1.
return a
def check(cond, str = 'Check failed!'):
if not cond:
fail(str)
def rect_area((x, y, w, h)): return w*h
# def rect_center((x,y,w,h)):
# return (x + w/2, y + h/2)
def perm(n):
a = range(n)
random.shuffle(a)
return a
def coin_flip(p = 0.5):
return random.random() < p
def constrain_angle(angle):
""" returns equivalent angle theta such that 0 <= theta <= 2*pi"""
return np.mod(angle, 2*math.pi)
def test_constrain_angle():
assert f_eq(constrain_angle(math.pi), math.pi)
assert f_eq(constrain_angle(2*math.pi + 1), 1)
assert f_eq(constrain_angle(4*math.pi + 1), 1)
assert f_eq(constrain_angle(-4*math.pi + 1), 1)
assert f_eq(constrain_angle(-0.01), 2*math.pi - 0.01)
def make_angle_positive(angle):
""" mirror angles that point towards negative y axis """
angle = constrain_angle(angle)
return angle if angle < math.pi else angle - math.pi
def make_temp(ext, contents = None, dir = None):
fd, fname = tempfile.mkstemp(ext, prefix = 'ao_', dir = dir)
os.close(fd)
if contents is not None:
make_file(fname, contents)
return os.path.abspath(fname)
#def nfs_dir(): return '/csail/vision-billf5/aho/tmp'
def nfs_dir(): return '/data/vision/billf/aho-vis/tmp'
def make_temp_nfs(ext, contents = None):
return make_temp(ext, contents, dir = nfs_dir())
def make_temp_dir(dir = None):
return tempfile.mkdtemp(dir = dir)
def make_temp_dir_big():
dir_base = '/scratch/aho/stuff'
if not os.path.exists(dir_base):
mkdir(dir_base)
return make_temp_dir(dir = dir_base)
#fd, fname = tempfile.mkdtemp(dir = dir)
#os.close(fd)
#return fname
class toplevel:
""" Decorator that places all of a function's local variables into the local variables of the caller """
# not sure if it's possible to implement this!
pass
# def sorted_by_key(lst, keys):
# inds = range(len(lst))
# inds.sort(key = lambda i: keys[i])
# return take_inds(lst, inds)
def sorted_by_key(keys, lsts, reverse = False):
# inds = range(len(keys))
# inds.sort(key = lambda i: keys[i], reverse = reverse)
inds = np.argsort(keys)
if reverse:
inds = inds[::-1]
return [take_inds(lst, inds) for lst in lsts]
def crop_rect_to_img((x, y, w, h), im):
# redundant and bad code
x1, y1 = x, y
x2, y2 = x+w-1, y+h-1
x1 = max(x1, 0)
x2 = min(x2, im.shape[1]-1)
y1 = max(y1, 0)
y2 = min(y2, im.shape[0]-1)
x, y = x1, y1
w = 1 + x2 - x1
h = 1 + y2 - y1
return (x, y, max(0, w), max(h, 0))
def test_crop_rect():
assert crop_rect_to_img((10, 10, 1, 1), np.zeros((3,3)))[2] == 0
assert crop_rect_to_img((0, 0, 1, 1), np.zeros((3,3))) == (0, 0, 1, 1)
assert crop_rect_to_img((0, 0, 3, 3), np.zeros((3,3))) == (0, 0, 3, 3)
assert crop_rect_to_img((-1, 0, 2, 1), np.zeros((3,3))) == (0, 0, 1, 1)
def closest_pt_i(pts, X):
X = np.array(X)
return argmini([np.dot((y - X), (y - X)) for y in map(np.array, pts)])
def alternate(*args):
return flatten(zip(*args))
def pts_from_rect_outside(r):
""" returns start_pt, end_pt where end_pt is _outside_ the rectangle """
return (r[0], r[1]), ((r[0] + r[2]), (r[1] + r[3]))
def pts_from_rect_inside(r):
""" returns start_pt, end_pt where end_pt is _inside_ the rectangle """
return (r[0], r[1]), ((r[0] + r[2] - 1), (r[1] + r[3] - 1))
def rect_corners_inside(r):
(x1, y1), (x2, y2) = pts_from_rect_inside(r)
return (x1, y1), (x2, y1), (x2, y2), (x1, y2)
pts_from_rect = pts_from_rect_inside
def rect_from_pts(x1, y1, x2, y2):
return (x1, y1, x2 - x1 + 1, y2 - y1 + 1)
def center_point(pts):
# """ Choose the point closest to the center of the point set, as
# measured by the median distance to every other point. O(n^2)."""
best_err = np.inf
best_pt = pts[0]
for p1 in pts:
err = np.median([pylab.dist(p1, p2) for p2 in pts])
#err = np.sum([np.sum((p1 - p2)**2)**0.5 for p2 in pts])
if err < best_err:
best_err = err
best_pt = p1
return best_pt
def rect_contains_pt((rx, ry, w, h), x, y):
return rx <= x < rx + w and ry <= y < ry + h
def list_of_lists(n): return [[] for x in xrange(n)]
def list_of_sets(n): return [set() for x in xrange(n)]
def list_of_dicts(n): return [{} for x in xrange(n)]
def repf(f, n): return [f() for x in xrange(n)]
def argmini(lst):
least = numpy.inf
leasti = None
for i, x in enumerate(lst):
if x < least:
least = x
leasti = i
return leasti
def argmaxi(lst):
return argmini(-x for x in lst)
# # if multiple equal choices, picks one at random
def argminf(f, lst):
least = numpy.inf
leastx = None
seen_eq = 1
for x in lst:
fx = f(x)
if fx < least:
least = fx
leastx = x
seen_eq = 1
elif fx == least:
seen_eq += 1
if random.random() < 1.0/seen_eq:
leastx = x
return leastx
def argmaxf(f, lst):
return argminf(lambda x: -f(x), lst)
# todo: sample if unknown
def argmini2(lst):
if len(lst) < 2:
raise 'list too small'
if lst[0] < lst[1]:
min1i = 0
min2i = 1
else:
min1i = 1
min2i = 0
for i, x in enumerate(lst):
if 1 < i:
if x < lst[min1i]:
min2i = min1i
min1i = i
elif x < lst[min2i]:
min2i = i
assert(min1i != min2i)
return min2i
def min2(lst):
return lst[argmini2(lst)]
def pad_rect((x, y, w, h), pad):
return (x - pad, y - pad, w+2*pad, h+2*pad)
def num_splits(xs, chunk_size):
return (0 if len(xs) % chunk_size == 0 else 1) + (len(xs) / chunk_size)
@yield_list
def split_n(xs, n):
assert n > 0
while len(xs) > 0:
yield xs[:n]
xs = xs[n:]
@yield_list
def split_n_pad(xs, n, pad):
assert n > 0
while len(xs) > 0:
if len(xs) < n:
yield (xs[:n] + [pad] * (n - len(xs)))
yield xs[:n]
xs = xs[n:]
def split_into(xs, pieces):
assert pieces > 0
return split_n(xs, int(math.ceil(float(len(xs))/pieces)))
def logical_and_many(*args):
return reduce(np.logical_and, args[1:], args[0])
def logical_or_many(*args):
return reduce(np.logical_or, args[1:], args[0])
def rect_centered_at(x, y, w, h):
return (x - w/2, y - h/2, w, h)
def roll_img(im, dx, dy):
return np.roll(np.roll(im, dy, axis = 1), dx, axis = 0)
def rect_intersect(r1, r2):
x1 = max(r1[0], r2[0])
y1 = max(r1[1], r2[1])
x2 = min(r1[0] + r1[2], r2[0] + r2[2])
y2 = min(r1[1] + r1[3], r2[1] + r2[3])
return (x1, y1, max(0, x2 - x1), max(0, y2 - y1))
def rect_im_intersect(im, rect):
return rect_intersect((0, 0, im.shape[1], im.shape[0]), rect)
def rect_shape_intersect(shape, rect):
return rect_intersect((0, 0, shape[1], shape[0]), rect)
def rect_empty((x, y, w, h)): return w <= 0 or h <= 0
def scale_rect(r, s):
w, h = (r[2]*s, r[3]*s)
x, y = rect_center(r)
return (x - w/2, y - h/2, w, h)
def scale_rect_coords(r, s):
return (r[0]*s, r[1]*s, r[2]*s, r[3]*s)
def mutual_overlap(r1, r2):
ro = rect_intersect(r1, r2)
a = float(ro[2]*ro[3])
return min(a/(r1[2]*r1[3]), a/(r2[2]*r2[3]))
def intersection_mask(r1, r2):
ins = rect_intersect(r1, r2)
mask1 = np.zeros((r1[3], r1[2]), dtype = np.int32)
mask2 = np.zeros((r2[3], r2[2]), dtype = np.int32)
ins1 = (ins[0] - r1[0], ins[1] - r1[1], ins[2], ins[3])
ins2 = (ins[0] - r2[0], ins[1] - r2[1], ins[2], ins[3])
mask1[ins1[1] : ins1[1] + ins1[3], ins1[0] : ins1[0] + ins1[2]] = 1
mask2[ins2[1] : ins2[1] + ins2[3], ins2[0] : ins2[0] + ins2[2]] = 1
return mask1, mask2
def rects_overlap(r1, r2):
ins = rect_intersect(r1, r2)
return ins[2] > 0 and ins[3] > 0
# def rect_jaccard((x1, y1, w1, h1), (x2, y2, w2, h2)):
# ix1 = max(x1, x2)
# iy1 = max(y1, y2)
# ix2 = min(x1 + w1, x2 + w2)
# iy2 = min(y1 + h1, y2 + h2)
# w = ix2 - ix1 + 1
# h = iy2 - iy1 + 1
# if w <= 0 or h <= 0:
# return 0
def test_rect_intersect():
r1 = (0, 0, 100, 100)
r2 = (50, 50, 25, 25)
assert rect_intersect(r1, r2) == (50, 50, 25, 25)
r3 = (-1, 2, 100, 102)
assert rect_intersect(r1, r3) == (0, 2, 99, 98)
def test_intersection_mask():
r1 = (0, 0, 5, 5)
r2 = (3, 3, 6, 6)
mask1, mask2 = intersection_mask(r1, r2)
assert np.all(mask1 == np.int32(A('0 0 | |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 12:43:49 2019
@author: bdobson
"""
import constants
import initialise
import options
from tqdm import tqdm
import pandas as pd
from copy import deepcopy
class model:
def __init__(self,addresses):
"""Initialise all state variables and parameters
These are set to estimates for London
"""
self.state_variables = None
self.reset_states()
self.parameters = None
self.reset_parameters()
"""Load data at addresses
"""
self.input_variables = initialise.get_inputs(addresses)
self.parameters['ltoa'] = initialise.get_ltoa(addresses)
"""Define timestep functions
Define the set of functions each of which represents a different process in a timestep
"""
self.model_list = initialise.get_models()
self.init_model = self.copy()
def reset_states(self):
#Resets state variables to initial ones
self.state_variables = initialise.get_state_variables()
def reset_parameters(self):
#Resets state variables to initial ones
self.parameters = initialise.get_parameters()
def change_parameter(self, *args):
#Iterate over parameter-value pairs to set new values
i = 0
while i < len(args):
if isinstance(args[i],str):
self.parameters[args[i]] = args[i+1]
else:
print('Error: args must be provided in parameter-value pairs \n(e.g. \nget_parameters("area",1200)\n)\n\nReturning None')
return None
i+=2
def change_state_variable(self, *args):
#Iterate over state_variable-value pairs to set new values
i = 0
while i < len(args):
if isinstance(args[i],str):
self.state_variables[args[i]] = args[i+1]
else:
print('Error: args must be provided in parameter-value pairs \n(e.g. \nget_parameters("area",1200)\n)\n\nReturning None')
return None
i+=2
def add_model(self, args):
#Append new models to list
self.model_list.append(args)
def remove_model(self,args):
#Check that models are in list and remove them
for fn in args:
if fn in self.model_list:
self.model_list.remove(fn)
else:
print('Warning: ' + str(fn) + ' not found in model_list')
def copy(self):
#Copy a model
return deepcopy(self)
def add_option(self,names):
#Call the add_option function to implement options in the model
options.add_option(self,names)
def options_list(self):
#Call the add_option function to implement options in the model
return options.options_list()
def check_mass_balance(self):
in_ = (self.state_variables['flow'] + \
self.state_variables['precipitation'] * self.parameters['area'] * constants.MM_KM2_TO_ML + \
self.state_variables['groundwater_to_freshwater_treatment'])
out_ = (self.state_variables['upstream_abstractions'] + \
self.state_variables['freshwater_treatment_losses'] + \
self.state_variables['distribution_leakage'] + \
self.state_variables['household_consumed'] + \
self.state_variables['natural_stormwater_overflow'] + \
self.state_variables['stormwater_overflow'] + \
self.state_variables['sewerage_leakage'] + \
self.state_variables['untreated_effluent'] + \
self.state_variables['treated_effluent'] + \
self.state_variables['denaturalised_teddington_flow'] + \
self.state_variables['wastewater_treatment_losses'] + \
self.state_variables['natural_stormwater_storage_dissipation'] + \
self.state_variables['impermeable_surface_storage_dissipation'])
ds_ = self.init_model.state_variables['reservoir_volume'] - self.state_variables['reservoir_volume'] + \
self.init_model.state_variables['natural_stormwater_storage_volume'] - self.state_variables['natural_stormwater_storage_volume'] + \
self.init_model.state_variables['impermeable_surface_storage_volume'] - self.state_variables['impermeable_surface_storage_volume'] + \
self.init_model.state_variables['rainwater_harvesting_volume'] - self.state_variables['rainwater_harvesting_volume'] + \
self.init_model.state_variables['service_reservoir_volumes'] - self.state_variables['service_reservoir_volumes'] + \
self.init_model.state_variables['wastewater_temporary_storage_volume'] - self.state_variables['wastewater_temporary_storage_volume']
if (in_ + ds_) - out_ > constants.FLOAT_ACCURACY:
print('mass balance error at : ' + self.state_variables['date'].strftime('%Y-%m-%d'))
def run(self,fast = None, debug = None):
state_variables_timevarying = [] # state_variables is stored in this at the beginning of every timestep
if fast is not True:
#Iterate over all dates
for date in tqdm(self.input_variables.index):
#Set date
self.state_variables['date'] = date
#Store input variables in state variables
for input_var in self.input_variables.columns:
self.state_variables[input_var] = self.input_variables.loc[date, input_var]
#Iterate over model list
for fn in self.model_list:
fn(self.state_variables,self.parameters)
#Store state variables
state_variables_timevarying.append(self.state_variables.copy())
else:
#A ~2x faster but less readable version of run
dates = self.input_variables.index
values = self.input_variables.values
value_names = self.input_variables.columns
#Iterate over all dates
for i in tqdm(range(0,len(dates))):
#Set date
self.state_variables['date'] = dates[i]
#Store input variables in state variables
for j in range(0,len(value_names)):
self.state_variables[value_names[j]] = values[i,j]
#Iterate over model list
for fn in self.model_list:
fn(self.state_variables,self.parameters)
#Store state variables
state_variables_timevarying.append(self.state_variables.copy())
#Check mass balance
if debug == True:
self.check_mass_balance()
self.init_model.state_variables = deepcopy(self.state_variables)
#Return dataframe of results
return pd.DataFrame(state_variables_timevarying).set_index('date')
def abstraction(state_variables, parameters):
#Evaluate LTOA
pct_full = state_variables['reservoir_volume']/parameters['reservoir_capacity']
levels = parameters['ltoa']['levels'][state_variables['date'].month - 1]
mrfs = parameters['ltoa']['mrfs'][state_variables['date'].month - 1]
if pct_full > levels[0]:
state_variables['restrictions'] = 0
state_variables['river_minimum_flow'] = mrfs[0]
elif pct_full > levels[1]:
state_variables['restrictions'] = 0
state_variables['river_minimum_flow'] = mrfs[1]
elif pct_full > levels[2]:
state_variables['restrictions'] = 1
state_variables['river_minimum_flow'] = mrfs[1]
elif pct_full > levels[3]:
state_variables['restrictions'] = 2
state_variables['river_minimum_flow'] = mrfs[2]
elif pct_full > levels[4]:
state_variables['restrictions'] = 3
state_variables['river_minimum_flow'] = mrfs[3]
else:
state_variables['restrictions'] = 4
state_variables['river_minimum_flow'] = mrfs[3]
#Available flow for abstraction
flow = state_variables['flow'] #Store in variable to improve performance
state_variables['upstream_abstractions'] = min(parameters['upstream_abstractions'],flow)
flow_upstream_of_teddington = flow -\
state_variables['upstream_abstractions'] +\
state_variables['treated_effluent_to_abstraction_point']
flow_above_mrf = max(flow_upstream_of_teddington - state_variables['river_minimum_flow'],0)
#Target abstraction
target_abstraction = parameters['target_river_abstraction']
#Don't overabstact more than the maximum beneficial abstraction
target_abstraction = min(target_abstraction,parameters['reservoir_capacity'] - \
state_variables['reservoir_volume'] +\
state_variables['freshwater_treatment_plant_demand'] -\
parameters['target_groundwater_abstraction'])
#Apply nopump_rule
if (state_variables['reservoir_volume'] > parameters['nopump_volume']) & (state_variables['precipitation'] > parameters['nopump_precip']):
target_abstraction = 0
#Calculate actual river abstraction
target_abstraction = min(target_abstraction,flow_above_mrf)
state_variables['river_to_freshwater_treatment'] = max(target_abstraction -\
(parameters['reservoir_capacity'] -\
state_variables['reservoir_volume'])
,0)
state_variables['river_to_reservoir'] = target_abstraction - state_variables['river_to_freshwater_treatment']
state_variables['reservoir_volume'] += state_variables['river_to_reservoir']
state_variables['denaturalised_teddington_flow'] = flow -\
state_variables['river_to_freshwater_treatment'] -\
state_variables['river_to_reservoir']
#Double check MRF
if (state_variables['denaturalised_teddington_flow'] < state_variables['river_minimum_flow']) & (state_variables['river_to_reservoir'] + state_variables['river_to_freshwater_treatment'] > 0):
print('MRF overabstraction at : ' + state_variables['date'].strftime('%Y-%m-%d'))
def release(state_variables, parameters):
#Calculate target reservoir release
target_reservoir_to_treatment = state_variables['freshwater_treatment_plant_demand'] -\
state_variables['river_to_freshwater_treatment'] -\
parameters['target_groundwater_abstraction']
#Aim to not draw below control curve, increase groundwater abstraction if necessary
distance_above_L1 = state_variables['reservoir_volume'] -\
parameters['reservoir_capacity'] *\
parameters['ltoa']['levels'][state_variables['date'].month - 1][1] -\
target_reservoir_to_treatment
if distance_above_L1 < 0:
state_variables['groundwater_to_freshwater_treatment'] = min(parameters['available_groundwater_abstraction'],\
max(-distance_above_L1,parameters['target_groundwater_abstraction']))
target_reservoir_to_treatment -= state_variables['groundwater_to_freshwater_treatment']
else:
state_variables['groundwater_to_freshwater_treatment'] = parameters['target_groundwater_abstraction']
#Implement release
state_variables['reservoir_to_freshwater_treatment'] = min(target_reservoir_to_treatment,state_variables['reservoir_volume'])
state_variables['reservoir_volume'] -= state_variables['reservoir_to_freshwater_treatment']
#Double check treatment is not oversatisfied
if (state_variables['reservoir_to_freshwater_treatment'] + state_variables['groundwater_to_freshwater_treatment'] + state_variables['river_to_freshwater_treatment']) > state_variables['freshwater_treatment_plant_demand']:
print('Treatment oversatisfied at :' + state_variables['date'].strftime('%Y-%m-%d'))
#Double check reservoir is not overfilled
if state_variables['reservoir_volume'] > parameters['reservoir_capacity']:
print('Reservoir overfilled at : ' + state_variables['date'].strftime('%Y-%m-%d'))
if state_variables['reservoir_volume'] < 0 :
print('Reservoir < 0 at : ' + state_variables['date'].strftime('%Y-%m-%d'))
if state_variables['reservoir_to_freshwater_treatment'] + state_variables['groundwater_to_freshwater_treatment'] > state_variables['freshwater_treatment_plant_demand']:
print('Freshwater treatment oversuppled at : ' + state_variables['date'].strftime('%Y-%m-%d'))
def calculate_consumer_demand(state_variables, parameters):
#Calculate demand
baseline_demand = parameters['number_of_households'] *\
parameters['per_household_consumption'] * constants.L_TO_ML +\
parameters['non_household_consumption']
#Check whether to enact restrictions
baseline_demand *= (1 - parameters['restrictions_pct_reduction'][state_variables['restrictions']] * constants.PCT_TO_PROP)
#Apply seasonal demand profile
demand = baseline_demand*parameters['demand_profile'][state_variables['date'].month - 1]
#If it's not raining, satisfy demand with rainfall
#Satisfy some demand with rainfall if possible
state_variables['outdoor_demand'] = demand * parameters['percent_of_demand_satisfiable_by_rainfall'] * constants.PCT_TO_PROP
precipitation_over_gardens = state_variables['precipitation'] * parameters['garden_area'] * constants.MM_KM2_TO_ML
state_variables['supplied_by_rain'] = min(state_variables['outdoor_demand'], precipitation_over_gardens)
#Supply remaining with rainfall harvesting
if state_variables['supplied_by_rain'] < state_variables['outdoor_demand']:
state_variables['supplied_by_harvested'] = min(state_variables['rainwater_harvesting_volume'], (state_variables['outdoor_demand'] - state_variables['supplied_by_rain']) * parameters['rainwater_harvesting_penetration'] * constants.PCT_TO_PROP)
else:
state_variables['supplied_by_harvested'] = 0
# if state_variables['precipitation'] > 5:
# Empty water butts in impending storm
# state_variables['rainwater_harvesting_volume'] *= 0.7
demand -= (state_variables['supplied_by_harvested'] + state_variables['supplied_by_rain'])
state_variables['rainwater_harvesting_volume'] -= state_variables['supplied_by_harvested']
state_variables['consumer_demand'] = demand
def calculate_distribution_demand(state_variables, parameters):
#Calculate distribution demand
state_variables['distribution_demand'] = state_variables['consumer_demand']/(1 - parameters['distribution_leakage']*constants.PCT_TO_PROP)
def freshwater_treatment(state_variables, parameters):
#Find WTW input and losses
treatment_input = state_variables['reservoir_to_freshwater_treatment'] + \
state_variables['river_to_freshwater_treatment'] +\
state_variables['groundwater_to_freshwater_treatment']
state_variables['freshwater_treatment_losses'] = treatment_input*parameters['freshwater_treatment_processing_losses']*constants.PCT_TO_PROP
#Calculate WTW output, first fill service reservoirs and send remainder to distribution
treatment_output = treatment_input - state_variables['freshwater_treatment_losses']
state_variables['treatment_output_to_service_reservoirs'] = min(parameters['service_reservoir_capacity'] - state_variables['service_reservoir_volumes'], treatment_output)
state_variables['service_reservoir_volumes'] += state_variables['treatment_output_to_service_reservoirs']
state_variables['treatment_output_to_distribution'] = treatment_output - state_variables['treatment_output_to_service_reservoirs']
#Update WTW demand
target_demand = state_variables['distribution_demand'] + max(parameters['service_reservoir_capacity'] - state_variables['service_reservoir_volumes'],0)
target_demand = min(target_demand,parameters['freshwater_treatment_maximum_capacity'])
target_demand = max(target_demand,parameters['freshwater_treatment_minimum_capacity'])
target_demand = min(target_demand,state_variables['freshwater_treatment_plant_demand'] + parameters['freshwater_treatment_max_rate_change'])
target_demand = max(target_demand,state_variables['freshwater_treatment_plant_demand'] - parameters['freshwater_treatment_max_rate_change'])
state_variables['freshwater_treatment_plant_demand'] = target_demand/(1 - parameters['freshwater_treatment_processing_losses']*constants.PCT_TO_PROP)
def distribution(state_variables, parameters):
#Take any extra needed water from service reservoirs
target_from_service_reservoirs = max(state_variables['distribution_demand'] - state_variables['treatment_output_to_distribution'], 0)
target_from_service_reservoirs = min(target_from_service_reservoirs,state_variables['service_reservoir_volumes'])
state_variables['service_reservoir_volumes'] -= target_from_service_reservoirs
if state_variables['service_reservoir_volumes'] > parameters['service_reservoir_capacity']:
print('Service reservoir volumes > capacity at : ' + state_variables['date'].strftime('%Y-%m-%d'))
if state_variables['service_reservoir_volumes'] < 0:
print('Service reservoir volumes < 0 at : ' + state_variables['date'].strftime('%Y-%m-%d'))
state_variables['distribution_input'] = target_from_service_reservoirs + state_variables['treatment_output_to_distribution']
#Split distribution input between leakage and households
state_variables['distribution_leakage'] = state_variables['distribution_input'] * parameters['distribution_leakage']*constants.PCT_TO_PROP
state_variables['consumer_supplied'] = state_variables['distribution_input'] - state_variables['distribution_leakage']
if state_variables['distribution_input'] > parameters['distribution_network_capacity']:
print('distribution_input > distribution_network_capacity at : ' + state_variables['date'].strftime('%Y-%m-%d'))
def calculate_household_output(state_variables, parameters):
state_variables['treated_used_outdoors'] = state_variables['outdoor_demand'] - (state_variables['supplied_by_rain'] + state_variables['supplied_by_harvested'])
if (state_variables['supplied_by_rain'] + state_variables['supplied_by_harvested'] + state_variables['treated_used_outdoors'] - state_variables['outdoor_demand']) > constants.FLOAT_ACCURACY:
print('Outdoor supplied > outdoor demand at ' + state_variables['date'].strftime('%Y-%m-%d'))
state_variables['household_consumed'] = (state_variables['consumer_supplied'] - state_variables['treated_used_outdoors'])*parameters['household_percentage_non_returned']*constants.PCT_TO_PROP
state_variables['household_output'] = (state_variables['consumer_supplied'] - state_variables['treated_used_outdoors']) - state_variables['household_consumed']
def urban_runoff(state_variables, parameters):
precipitation_over_london = state_variables['precipitation'] * parameters['area'] * constants.MM_KM2_TO_ML
impermeable_precipitation = precipitation_over_london * parameters['percent_impermeable'] * constants.PCT_TO_PROP
#Update rainwater harvesting roofs
harvested_roof_precipitation = precipitation_over_london * parameters['roof_area']/parameters['area'] * parameters['rainwater_harvesting_penetration'] * constants.PCT_TO_PROP
harvested_roof_spill = max(state_variables['rainwater_harvesting_volume'] - parameters['rainwater_harvesting_storage_capacity'] + harvested_roof_precipitation,0)
harvested_roof_precipitation -= harvested_roof_spill
state_variables['harvested_roof_spill'] = harvested_roof_spill
impermeable_precipitation -= harvested_roof_precipitation
state_variables['rainwater_harvesting_volume'] += harvested_roof_precipitation
#Update volume of impermeable storage and its dissipation, noting runoff
state_variables['impermeable_surface_storage_volume'] += impermeable_precipitation
state_variables['impermeable_surface_storage_dissipation'] = min(parameters['impermeable_surface_storage_dissipation_rate'], state_variables['impermeable_surface_storage_volume'])
state_variables['impermeable_surface_storage_volume'] -= state_variables['impermeable_surface_storage_dissipation']
impermeable_runoff | |
%s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
if bound >= 2**24:
print("\n[Line %4d] %s\nERROR: Parameter \"bound = %d\" too large, must be less than 2**24\n" % (lines[pc], instr, bound))
exit()
# Update register value
proc_regs["reg"] = bound
cycles = 2
# Compare infinity norm of polynomial with specified bound and update flag
count = 0
for i in range(param_n):
if poly_mem[poly][i] > bound and poly_mem[poly][i] < (param_q - bound):
count = count + 1
if count == 0:
proc_regs["flag"] = 1
else:
proc_regs["flag"] = 0
cycles = cycles + 2 + 1 + 1 + param_n
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_inf_norm_check"]]*cycles)
return 6
# INSTRUCTION - Register Comparison
matchObj = re.match(r'flag=compare\(c(\d),(\d+)\)', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
val = int(matchObj.group(2))
if reg > 1:
print("\n[Line %4d] %s\nERROR: No such register \"c%d\", please use \"c0\" or \"c1\"\n" % (lines[pc], instr, reg))
exit()
if val >= 2**16:
print("\n[Line %4d] %s\nERROR: Value %s too big for 16-bit register \"c%d\"\n" % (lines[pc], instr, val, reg))
exit()
# Compare register value and update flag
if proc_regs["c%s" % reg] < val:
proc_regs["flag"] = -1
elif proc_regs["c%s" % reg] > val:
proc_regs["flag"] = 1
else:
proc_regs["flag"] = 0
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 6
matchObj = re.match(r'flag=compare\(reg,(\d+)\)', instr_t, re.M|re.I)
if matchObj:
val = int(matchObj.group(1))
if val >= 2**24:
print("\n[Line %4d] %s\nERROR: Value %s too big for 24-bit register \"reg\"\n" % (lines[pc], instr, val))
exit()
# Compare register value and update flag
if proc_regs["reg"] < val:
proc_regs["flag"] == -1
elif proc_regs["reg"] > val:
proc_regs["flag"] == 1
else:
proc_regs["flag"] = 0
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 6
matchObj = re.match(r'flag=compare\(tmp,(\d+)\)', instr_t, re.M|re.I)
if matchObj:
val = int(matchObj.group(1))
if val >= 2**24:
print("\n[Line %4d] %s\nERROR: Value %s too big for 24-bit register \"tmp\"\n" % (lines[pc], instr, val))
exit()
# Compare register value and update flag
if proc_regs["tmp"] < val:
proc_regs["flag"] == -1
elif proc_regs["tmp"] > val:
proc_regs["flag"] == 1
else:
proc_regs["flag"] = 0
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 6
# INSTRUCTION - Check Flag and Jump
matchObj = re.match(r'if\(flag([!=]=)([\-\+]{0,1})([01])\)goto([\w\d_]+)', instr_t, re.M|re.I)
if matchObj:
op = matchObj.group(1)
sign = matchObj.group(2)
val = int(matchObj.group(3))
label = matchObj.group(4)
if label not in labels:
print("\n[Line %4d] %s\nERROR: Label \"%s\" not found\n" % (lines[pc], instr, label))
exit()
# Check flag value and jump
if op == "==":
if val == 0:
if proc_regs["flag"] == 0:
pc = labels[label]
else:
pc = pc + 1
if val == 1:
if sign == "+" or sign == "":
if proc_regs["flag"] == 1:
pc = labels[label]
else:
pc = pc + 1
if sign == "-":
if proc_regs["flag"] == -1:
pc = labels[label]
else:
pc = pc + 1
if op == "!=":
if val == 0:
if proc_regs["flag"] != 0:
pc = labels[label]
else:
pc = pc + 1
if val == 1:
if sign == "+" or sign == "":
if proc_regs["flag"] != 1:
pc = labels[label]
else:
pc = pc + 1
if sign == "-":
if proc_regs["flag"] != -1:
pc = labels[label]
else:
pc = pc + 1
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 6
# INSTRUCTION - SHA3 Operations
matchObj = re.match(r'sha3_init', instr_t, re.M|re.I)
if matchObj:
keccak_buf = ""
cycles = 2 + 1 + 25
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sha3"]]*cycles)
return 7
matchObj = re.match(r'sha3_(\d+)_absorb\(poly=(\d+)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
poly = int(matchObj.group(2))
if mode != 256 and mode != 512:
print("\n[Line %4d] %s\nERROR: Only SHA3-256 and SHA3-512 are supported\n" % (lines[pc], instr))
exit()
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % (lines[pc], instr, poly, param_n, int(8192/param_n)))
exit()
# Push zero-padded polynomial coefficients into Keccak buffer
for i in range(param_n):
keccak_buf = keccak_buf + hex(poly_mem[poly][i])[2:].rstrip("L").rjust(8,'0')
if mode == 256:
cycles = 2 + 1 + 1 + param_n + math.ceil(param_n/34)*(17+25)
if mode == 512:
cycles = 2 + 1 + 1 + param_n + math.ceil(param_n/18)*(9+25)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["poly_hash"]]*cycles)
return 7
matchObj = re.match(r'sha3_(\d+)_absorb\(r(\d)\)', instr_t, re.M|re.I)
if matchObj:
mode = int(matchObj.group(1))
reg = int(matchObj.group(2))
if mode != 256 and mode != 512:
print("\n[Line %4d] %s\nERROR: Only SHA3-256 and SHA3-512 are supported\n" % (lines[pc], instr))
exit()
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
# Push seed register contents into Keccak buffer
keccak_buf = keccak_buf + hex(proc_regs["r%d" % reg])[2:].rstrip("L").rjust(64,'0')
if mode == 256:
cycles = 2 + 1 + (17+25)
if mode == 512:
cycles = 2 + 1 + (9+25)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sha3"]]*cycles)
return 7
matchObj = re.match(r'r(\d)=sha3_256_digest', instr_t, re.M|re.I)
if matchObj:
reg = int(matchObj.group(1))
if reg != 0 and reg != 1:
print("\n[Line %4d] %s\nERROR: No such register \"r%d\", allowed registers are r0 and r1\n" % (lines[pc], instr, reg))
exit()
# Generate SHA3-256 digest
digest = sha3_256(keccak_buf)
proc_regs["r%d" % reg] = int(digest, 16)
keccak_buf = ""
cycles = 2 + 1 + (25+25+2)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sha3"]]*cycles)
return 7
matchObj = re.match(r'r0\|\|r1=sha3_512_digest', instr_t, re.M|re.I)
if matchObj:
# Generate SHA3-512 digest
digest = sha3_512(keccak_buf)
proc_regs["r0"] = int(digest, 16) >> 256
proc_regs["r1"] = int(digest, 16) % 2**256
keccak_buf = ""
cycles = 2 + 1 + (25+25+3)
pc = pc + 1
ticks = ticks + cycles
power = power + ([idd_dict["sha3"]]*cycles)
return 7
# INSTRUCTION - End of Program
matchObj = re.match(r'end', instr_t, re.M|re.I)
if matchObj:
#print("end-of-program")
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return 99
# INSTRUCTION - NOP
matchObj = re.match(r'nop', instr_t, re.M|re.I)
if matchObj:
#print("no-operation")
ticks = ticks + 2
power = power + ([idd_dict["ctrl"]]*2)
return -98
# DEBUG-INSTRUCTION - Compare Encoded Polynomials (Debug Only)
# Append "iter_<iter_count>_" to all filenames in case of multiple iterations
if num_iters > 1:
f_prefix = "iter_%d_" % iter_count
else:
f_prefix = ""
matchObj = re.match(r'encode_compare\("(.*)","(.*)",encoding=([\w_]+)\)', instr_t, re.M|re.I)
if matchObj:
f1 = matchObj.group(1)
f2 = matchObj.group(2)
if not f1.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f1))
f1 = f1 + ".npy"
if not f2.endswith(".npy"):
print("\n[Line %4d] %s\nWARNING: Adding .npy extension to filename \"%s\"\n" % (lines[pc], instr, f2))
f2 = f2 + ".npy"
f1 = f1.replace(os.path.basename(f1), f_prefix + os.path.basename(f1))
f2 = f2.replace(os.path.basename(f2), f_prefix + os.path.basename(f2))
encoding = matchObj.group(3)
if not os.path.exists(f1):
print("\n[Line %4d] %s\nERROR: Input file %s for \"encode_compare\" does not exist" % (lines[pc], instr, f1))
exit()
if not os.path.exists(f2):
print("\n[Line %4d] %s\nERROR: Input file %s for \"encode_compare\" does not exist" % (lines[pc], instr, f2))
exit()
b1 = encode_to_bytearray(param_n, param_q, list(np.load(f1, allow_pickle = True)), encoding, lines[pc], instr)
b2 = encode_to_bytearray(param_n, param_q, list(np.load(f2, allow_pickle = True)), encoding, lines[pc], instr)
print("poly_1 = %s" % list(np.load(f1, allow_pickle = True)))
print("poly_2 = %s" % list(np.load(f2, allow_pickle = True)))
print("byte_array_1 = %s" % b1)
print("byte_array_2 = %s" % b2)
if b1 == b2:
print("\n--- MATCH ---\n")
else:
print("\n--- NO MATCH ---\n")
pc = pc + 1
return -98
# DEBUG-INSTRUCTION - Print Encoded Polynomial (Debug Only)
matchObj = re.match(r'encode_print\(poly=(\d+),encoding=([\w_]+)\)', instr_t, re.M|re.I)
if matchObj:
poly = int(matchObj.group(1))
encoding = matchObj.group(2)
if poly >= int(8192/param_n):
print("\n[Line %4d] %s\nERROR: No such polynomial \"poly = %d\", allowed polynomials for n = %d are 0 to %d\n" % | |
import vtk, ctk, qt, slicer
from slicer.ScriptedLoadableModule import (ScriptedLoadableModule,
ScriptedLoadableModuleLogic,
ScriptedLoadableModuleWidget,
ScriptedLoadableModuleTest)
from collections import Counter
import csv
import logging
import os
from slicer.util import VTKObservationMixin
#
# DataImporter
#
class DataImporter(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Data Importer"
self.parent.categories = ["Shape Analysis Toolbox"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (UNC), <NAME> (Kitware Inc,), <NAME> (Kitware Inc.)"]
self.parent.helpText = """
This module import label images and segmentations from files and folders and compute the topology number of each segment.
topologyNumber = cleanData.GetNumberOfPoints() - edges.GetNumberOfLines() + cleanData.GetNumberOfPolys()
"""
self.parent.acknowledgementText = """
This project is funded by NIBIB R01EB021391
""" # replace with organization, grant and thanks.
#
# DataImporterLogic
#
class DataImporterLogic(ScriptedLoadableModuleLogic):
TOPOLOGY_STRIP_TYPE = 0
TOPOLOGY_DISK_TYPE = 1
TOPOLOGY_SPHERE_TYPE = 2
TOPOLOGY_DOUBLE_TORUS_TYPE = -2
TOPOLOGY_TRIPLE_TORUS_TYPE = -4
TOPOLOGY_MULTIPLE_HOLES_TYPE = -9999
TOPOLOGY_TYPES = {
TOPOLOGY_STRIP_TYPE : 'Circle/Torus/Mobius Strip',
TOPOLOGY_DISK_TYPE : 'Disk',
TOPOLOGY_SPHERE_TYPE : 'Sphere',
TOPOLOGY_DOUBLE_TORUS_TYPE : 'Double Torus',
TOPOLOGY_TRIPLE_TORUS_TYPE : 'Triple Torus',
TOPOLOGY_MULTIPLE_HOLES_TYPE : 'Multiple Holes',
}
def __init__(self):
ScriptedLoadableModuleLogic.__init__(self)
self.saveCleanData = False
self.labelMapDict = {}
self.modelDict = {}
self.segmentationDict = {}
self.labelRangeInCohort = (-1, -1)
self.topologyDict = {}
self.polyDataDict = {}
# help variable to map continuous indices to TOPOLOGY_TYPES. Used in comboBoxes
self.topologyTypeToIndex = {
self.TOPOLOGY_STRIP_TYPE : 0,
self.TOPOLOGY_DISK_TYPE : 1,
self.TOPOLOGY_SPHERE_TYPE : 2,
self.TOPOLOGY_DOUBLE_TORUS_TYPE : 3,
self.TOPOLOGY_TRIPLE_TORUS_TYPE : 4,
self.TOPOLOGY_MULTIPLE_HOLES_TYPE : 5,
}
self.indexToTopologyType = {index: topologyType for topologyType, index in self.topologyTypeToIndex.items()}
self.expectedTopologiesBySegment = {}
self.inconsistentTopologyDict = {}
self.numberOfDifferentSegments = 0
self.dictSegmentNamesWithIntegers = dict()
self.TemplateName = ''
self.freesurfer_import = False
self.freesurfer_wanted_segments = []
self.expected_file_type = 'VolumeFile'
self.color_table_id = 'None'
def setSaveCleanData(self, save):
self.saveCleanData = save
#
# Reset all the data for data import
#
def cleanup(self):
logging.debug('Deleting nodes')
if self.labelMapDict is not None:
for nodeName in self.labelMapDict:
logging.debug('Deleting label map node: ' + nodeName)
slicer.mrmlScene.RemoveNode(self.labelMapDict[nodeName])
if self.modelDict is not None:
for nodeName in self.modelDict:
logging.debug('Deleting model node: ' + nodeName)
slicer.mrmlScene.RemoveNode(self.modelDict[nodeName])
if self.segmentationDict is not None:
for nodeName in self.segmentationDict:
logging.debug('Deleting segmentation node: ' + nodeName)
slicer.mrmlScene.RemoveNode(self.segmentationDict[nodeName])
self.labelMapDict = {}
self.modelDict = {}
self.segmentationDict = {}
self.labelRangeInCohort = (-1, -1)
self.topologyDict = {}
self.polyDataDict = {}
self.expectedTopologiesBySegment = {}
self.inconsistentTopologyDict = {}
self.TemplateName = ''
self.numberOfDifferentSegments = 0
self.dictSegmentNamesWithIntegers = dict()
def __del__(self):
self.cleanup()
def checkLabelRangeConsistency(self, inputNumberOfSegments):
"""
Check that the input number of segments is the same than the current number of segments in the cohort.
Return tuple [boolean, labelRange].
boolean is false if not consistent with current self.labelRangeInCohort. True otherwise.
labelRange is (0, inputNumberOfSegments)
"""
labelRange = (0, inputNumberOfSegments)
if self.labelRangeInCohort != (-1, -1) and labelRange != self.labelRangeInCohort:
logging.error('Label range {} does not match with the existing label range in cohort {}.'.format(labelRange, self.labelRangeInCohort))
return False, labelRange
return True, labelRange
def importLabelMap(self, path):
"""
Populate labelMapDict, segmentationDict, labelRangeInCohort
Fails if number of labels is different than pre-existing value for labelRangeInCohort
Returns false if errors, and no class variable is modified.
"""
directory, fileName = os.path.split(path)
labelMapNode = slicer.util.loadLabelVolume(path, returnNode=True)[1]
if labelMapNode is None:
logging.error('Failed to load ' + fileName + 'as a labelmap')
# make sure each one is a labelmap
return False
file_name = os.path.splitext(fileName)[0]
if self.freesurfer_import == True:
subject_name = os.path.split(os.path.split(directory)[0])[1]
segmentationNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode", subject_name+' '+file_name)
else:
segmentationNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode", labelMapNode.GetName())
segmentationLogic = slicer.modules.segmentations.logic()
segmentationLogic.ImportLabelmapToSegmentationNode(labelMapNode,
segmentationNode)
labelMapNode.SetDisplayVisibility(False)
segmentationNode.SetDisplayVisibility(False)
segmentationNode.GetDisplayNode().SetAllSegmentsVisibility(False)
# if importing from freesurfer
if self.freesurfer_import == True:
to_remove_ids = []
freesurfer_found_segments = []
for segmentIndex in range(segmentationNode.GetSegmentation().GetNumberOfSegments()):
segmentId = segmentationNode.GetSegmentation().GetNthSegmentID(segmentIndex)
segmentName = segmentationNode.GetSegmentation().GetSegment(segmentId).GetName()
if segmentName not in self.freesurfer_wanted_segments:
to_remove_ids.append(segmentId)
else:
freesurfer_found_segments.append(segmentName)
label_id = segmentId.split('_')[-1]
seg_name = self.freesurfer_lut_dict[label_id]['name']
color = self.freesurfer_lut_dict[label_id]['color']
segment = segmentationNode.GetSegmentation().GetSegment(segmentId)
segment_name = seg_name
segment.SetName(segment_name)
# segment.SetName(seg_name)
segment.SetColor(color)
if len(freesurfer_found_segments) != len(self.freesurfer_wanted_segments):
unpresent_segments = self.freesurfer_wanted_segments[:]
for seg in freesurfer_found_segments:
del unpresent_segments[unpresent_segments.index(seg)]
unpresent_segments = map(lambda x: self.freesurfer_lut_dict[x.split('_')[-1]]['name'], unpresent_segments)
logging.warning('Unable to find all segments, {} have not been found.'.format(unpresent_segments))
logging.warning('LabelMap in path: {} has not been loaded into segmentationDict.'.format(path))
return False
for segmentId in to_remove_ids:
segmentationNode.GetSegmentation().RemoveSegment(segmentId)
elif self.color_table_id != 'None':
segment_number = segmentationNode.GetSegmentation().GetNumberOfSegments()
color_node = slicer.util.getNode(pattern=self.color_table_id)
if (segment_number > 1):
for segmentIndex in range(segment_number):
segmentId = segmentationNode.GetSegmentation().GetNthSegmentID(segmentIndex)
label_id = int(segmentId.split('_')[-1])
color = [.0, .0, .0, .0]
color_node.GetColor(label_id, color)
segment_name = color_node.GetColorName(label_id)
segment = segmentationNode.GetSegmentation().GetSegment(segmentId)
segment.SetName(segment_name)
segment.SetColor(color[:3])
elif (segment_number==1):
segmentId = segmentationNode.GetSegmentation().GetNthSegmentID(0)
color = [.0, .0, .0, .0]
color_node.GetColor(1, color)
segment_name = color_node.GetColorName(1)
segment = segmentationNode.GetSegmentation().GetSegment(segmentId)
segment.SetName(segment_name)
segment.SetColor(color[:3])
closedSurface = segmentationNode.CreateClosedSurfaceRepresentation()
if closedSurface is False:
logging.error('Failed to create closed surface representation for filename: {}.'.format(path))
return False
labelRangeConsistent, labelRange = self.checkLabelRangeConsistency(segmentationNode.GetSegmentation().GetNumberOfSegments())
if not labelRangeConsistent:
logging.warning('LabelMap in path: {} has not been loaded into segmentationDict.'.format(path))
return False
# Add to the dicts only if succesful
if self.freesurfer_import == True:
subject_name = os.path.split(os.path.split(directory)[0])[1]
file_name = os.path.splitext(fileName)[0]
name = subject_name + ' ' + file_name
self.labelMapDict[name] = labelMapNode
self.segmentationDict[name] = segmentationNode
self.labelRangeInCohort = labelRange
else:
self.labelMapDict[fileName] = labelMapNode
self.segmentationDict[fileName] = segmentationNode
self.labelRangeInCohort = labelRange
return True
def importModel(self, path):
"""
Create segmentation from a model (with only one shape). The labelRangeInCohort would be (0,1), just one segment.
If your model is a model hierarchy (containing different shapes in the same file), use
importModelHierarchy (not implemented).
Populate segmentationDict and set labelRangeInCohort to (0,1)
"""
directory, fileName = os.path.split(path)
modelNode = slicer.util.loadModel(path, returnNode=True)[1]
if modelNode is None:
logging.error('Failed to load ' + fileName + 'as a model')
return False
modelNode.SetDisplayVisibility(False)
segmentationNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode", modelNode.GetName() + '_allSegments')
segmentationLogic = slicer.modules.segmentations.logic()
segmentationLogic.ImportModelToSegmentationNode(modelNode, segmentationNode)
# To allow better mixing with label maps.
# We change the name of the model (originally set to the filename in vtkSlicerSegmentationModuleLogic)
# XXX Better option would be to use terminologies, see: https://discourse.slicer.org/t/finding-corresponding-segments-in-segmentations/4055/4
file_name = os.path.splitext(fileName)[0]
segmentationNode.GetSegmentation().GetSegment(modelNode.GetName()).SetName(file_name + ' 1')
closedSurface = segmentationNode.CreateClosedSurfaceRepresentation()
segmentationNode.SetDisplayVisibility(False)
# segmentationNode.GetDisplayNode().SetAllSegmentsVisibility(False)
if closedSurface is False:
logging.error('Failed to create closed surface representation for filename: {}.'.format(path))
return False
labelRangeConsistent, labelRange = self.checkLabelRangeConsistency(segmentationNode.GetSegmentation().GetNumberOfSegments())
if not labelRangeConsistent:
logging.warning('Model in path: {} has not been loaded into segmentationDict.'.format(path))
return False
# Add to the dicts only if succesful
self.modelDict[fileName] = modelNode
self.segmentationDict[fileName] = segmentationNode
self.labelRangeInCohort = labelRange
return True
def importSegmentation(self, path):
"""
Populate segmentationDict, labelRangeInCohort
Fails if number of labels is different than pre-existing value for labelRangeInCohort
Returns false if errors, and no class variable is modified.
"""
directory, fileName = os.path.split(path)
segmentationNode = slicer.util.loadSegmentation(path, returnNode=True)[1]
if segmentationNode is None:
logging.error('Failed to load ' + fileName + 'as a segmentation')
return False
segmentationNode.SetDisplayVisibility(False)
# segmentationNode.GetDisplayNode().SetAllSegmentsVisibility(False)
labelRangeConsistent, labelRange = self.checkLabelRangeConsistency(segmentationNode.GetSegmentation().GetNumberOfSegments())
if not labelRangeConsistent:
logging.warning('Segmentation in path: {} has not been loaded into segmentationDict.'.format(path))
return False
# Add to the dicts only if succesful
self.segmentationDict[fileName] = segmentationNode
self.labelRangeInCohort = labelRange
return True
def filePathsFromCSVFile(self, csvFileName):
"""
Return filePaths from CSV.
It assumes that csvFileName contains one filepath per row.
"""
filePaths = []
with open(csvFileName, 'r') as csvfile:
has_header = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0) # Rewind
reader = csv.reader(csvfile)
# ignore the header
if has_header:
next(reader, None)
# assuming that each row is just a file path.
for row in reader:
if len(row) > 0:
filePaths.append(row[0])
return filePaths
# Depending on the mode fill the structures table.
# TODO: add directory parsing based on mode
# else:
# logging.error("Importing from directory is not yet supported")
def importFiles(self, filePaths):
"""
Call the appropiate import function from a heteregeneous list of file paths.
Raises TypeError if not existent file or unhandled filetype by this module.
Files with a different number of labels/segments than the first one loaded are ignored with a warning.
Return true if success, raise error otherwise.
"""
self.found_segments = []
for path in filePaths:
fileType = slicer.app.ioManager().fileType(path)
logging.debug("Path [{}] has file type [{}]".format(path, fileType))
if fileType == 'VolumeFile':
if self.expected_file_type == 'None' or self.expected_file_type == fileType:
self.importLabelMap(path)
else:
logging.debug("Path [{}] ignored, expected file type is [{}]".format(path, self.expected_file_type))
elif fileType == 'SegmentationFile':
if self.expected_file_type == 'None' or self.expected_file_type == fileType:
self.importSegmentation(path)
else:
logging.debug("Path [{}] ignored, expected file type is [{}]".format(path, self.expected_file_type))
elif fileType == 'ModelFile':
if self.expected_file_type == 'None' or self.expected_file_type == fileType:
self.importModel(path)
else:
logging.debug("Path [{}] ignored, expected file type is [{}]".format(path, self.expected_file_type))
elif fileType == 'NoFile':
raise TypeError("Path [{}] is not existent or has | |
Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')},
'86157357':{'en': 'Linfen, Shanxi', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861573571':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')},
'861573572':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')},
'86157358':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'86157359':{'en': 'Yuncheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'8615736':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')},
'86157367':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861573680':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861573681':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861573682':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861573683':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861573684':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861573685':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861573686':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861573687':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861573688':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861573689':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'86157369':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'86157370':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'86157371':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'86157372':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'86157373':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'86157374':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861573740':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861573741':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861573742':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'86157375':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861573750':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861573751':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861573752':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'86157376':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'861573760':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861573761':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'86157377':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'86157378':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861573780':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'86157379':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861573790':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573791':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573800':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861573801':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861573802':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861573803':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861573804':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861573805':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861573806':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861573807':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861573808':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861573809':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861573810':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573811':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573812':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573813':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573814':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573815':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861573816':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861573817':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861573818':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861573819':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861573820':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573821':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573822':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573823':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573824':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861573825':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'861573826':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'861573827':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'861573828':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'861573829':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'86157383':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861573840':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861573841':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861573842':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861573843':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861573844':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861573845':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861573846':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'861573847':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861573848':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')},
'861573849':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'86157385':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861573850':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861573860':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861573861':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861573862':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861573863':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861573864':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861573865':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861573866':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861573867':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')},
'861573868':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861573869':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'86157387':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861573878':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')},
'861573879':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')},
'86157388':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'86157389':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861573897':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')},
'861573898':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861573899':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'86157390':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861573906':{'en': 'Tacheng, Xinjiang', 'zh': u('\u65b0\u7586\u5854\u57ce\u5730\u533a')},
'861573907':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861573908':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861573909':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'86157391':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'86157392':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861573930':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861573931':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861573932':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861573933':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861573934':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861573935':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861573936':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861573937':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861573938':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861573939':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861573940':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861573941':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861573942':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861573943':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861573944':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861573945':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861573946':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861573947':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861573948':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861573949':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'86157395':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861573960':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861573961':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861573962':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861573963':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861573964':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861573965':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861573966':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861573967':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861573968':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861573969':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861573970':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861573971':{'en': 'Karamay, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861573972':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861573973':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861573974':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861573975':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861573976':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861573977':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861573978':{'en': 'Kizilsu, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u5b5c\u52d2\u82cf\u67ef\u5c14\u514b\u5b5c\u81ea\u6cbb\u5dde')},
'861573979':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'86157398':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'86157399':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861574':{'en': 'Shanghai', 'zh': u('\u4e0a\u6d77\u5e02')},
'86157500':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'86157501':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'86157502':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861575026':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861575027':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861575028':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861575029':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'86157503':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861575030':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861575031':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861575038':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861575039':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6587\u5c71\u58ee\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'86157504':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861575040':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861575041':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861575042':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'86157505':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'86157506':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861575060':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861575061':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861575062':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'86157507':{'en': 'Xiamen, Fujian', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'86157508':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86157509':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861575100':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861575101':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861575102':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861575103':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861575104':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861575105':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861575106':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861575107':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861575108':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861575109':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861575110':{'en': 'Yancheng, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861575111':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861575112':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861575113':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861575114':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861575115':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861575116':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861575117':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861575118':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861575119':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'86157512':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861575126':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861575127':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861575128':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861575129':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'86157513':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861575140':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861575141':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861575142':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861575143':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861575144':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861575145':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861575146':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861575147':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861575148':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861575149':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'86157515':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'86157516':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861575160':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861575161':{'en': 'Yancheng, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861575162':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861575169':{'en': 'Lianyungang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861575170':{'en': 'Su<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861575171':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861575172':{'en': 'Yancheng, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861575173':{'en': 'Xu<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861575174':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861575175':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861575176':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861575177':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861575178':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861575179':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'86157518':{'en': 'Nan<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861575180':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861575181':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861575189':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861575190':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u9152\u6cc9\u5e02')},
'861575191':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e86\u9633\u5e02')},
'861575192':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u4e34\u590f\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861575193':{'en': 'Baiyin, Gansu', 'zh': u('\u7518\u8083\u7701\u767d\u94f6\u5e02')},
'861575194':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861575195':{'en': 'Jinchang, Gansu', 'zh': u('\u7518\u8083\u7701\u91d1\u660c\u5e02')},
'861575196':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5f20\u6396\u5e02')},
'861575197':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5929\u6c34\u5e02')},
'861575198':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e73\u51c9\u5e02')},
'861575199':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u91d1\u660c\u5e02')},
'861575200':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575201':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575202':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575203':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575204':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575205':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861575206':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861575207':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861575208':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'861575209':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'86157521':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'861575220':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'861575221':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'861575222':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'861575223':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'861575224':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'861575225':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861575226':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861575227':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861575228':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861575229':{'en': 'H<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'86157523':{'en': 'Xishuangbanna, Yunnan', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'86157524':{'en': 'Zhaotong, Yunnan', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'86157525':{'en': 'Honghe, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'86157526':{'en': 'Honghe, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'86157527':{'en': 'Dali, Yunnan', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'86157528':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5927\u7406\u767d\u65cf\u81ea\u6cbb\u5dde')},
'861575286':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575287':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575288':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575289':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'86157529':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u662d\u901a\u5e02')},
'861575297':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575298':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861575299':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'86157530':{'en': 'He<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')},
'86157531':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'86157532':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'86157533':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'86157534':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'86157535':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'86157536':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'86157537':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'86157538':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'86157539':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'86157540':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'86157541':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u9526\u5dde\u5e02')},
'86157542':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u671d\u9633\u5e02')},
'86157543':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'86157544':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'861575450':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861575451':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861575452':{'en': 'Qiqihar, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9f50\u9f50\u54c8\u5c14\u5e02')},
'861575453':{'en': 'Mudanjiang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7261\u4e39\u6c5f\u5e02')},
'861575454':{'en': 'Jiamusi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f73\u6728\u65af\u5e02')},
'861575455':{'en': | |
<reponame>akarakoc/SWE574<filename>mystream/streampage/forms.py
from django.contrib.auth import authenticate, get_user_model
from django import forms
from streampage.models import Primitives, communityUsers, Communities, Datatypes, DatatypeFields, Posts, CommunityTags, \
DatatTypeTags, PostTags, UserTags
from django_countries import countries
import json
class UsersLoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput, )
def __init__(self, *args, **kwargs):
super(UsersLoginForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update({
'class': 'form-control',
"name": "username"})
self.fields['password'].widget.attrs.update({
'class': 'form-control',
"name": "password"})
def clean(self, *args, **keyargs):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("This user does not exists")
if not user.check_password(password):
raise forms.ValidationError("Incorrect Password")
if not user.is_active:
raise forms.ValidationError("User is no longer active")
return super(UsersLoginForm, self).clean(*args, **keyargs)
User = get_user_model()
class UsersRegisterForm(forms.ModelForm):
class Meta:
model = User
fields = [
"username",
"email",
"confirm_email",
"password",
]
username = forms.CharField()
email = forms.EmailField(label="Email")
confirm_email = forms.EmailField(label="Confirm Email")
password = forms.CharField(widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super(UsersRegisterForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update({
'class': 'form-control',
"name": "username"})
self.fields['email'].widget.attrs.update({
'class': 'form-control',
"name": "email"})
self.fields['confirm_email'].widget.attrs.update({
'class': 'form-control',
"name": "confirm_email"})
self.fields['password'].widget.attrs.update({
'class': 'form-control',
"name": "password"})
def clean(self, *args, **keyargs):
email = self.cleaned_data.get("email")
confirm_email = self.cleaned_data.get("confirm_email")
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
if email != confirm_email:
raise forms.ValidationError("Email must match")
email_qs = User.objects.filter(email=email)
if email_qs.exists():
raise forms.ValidationError("Email is already registered")
username_qs = User.objects.filter(username=username)
if username_qs.exists():
raise forms.ValidationError("User with this username already registered")
# you can add more validations for password
if len(password) < 8:
raise forms.ValidationError("Password must be greater than 8 characters")
return super(UsersRegisterForm, self).clean(*args, **keyargs)
class AddCommunity(forms.Form):
Community_Name = forms.CharField()
Community_Description = forms.CharField(widget=forms.Textarea(attrs={'width': "50%", 'cols': "50", 'rows': "2", }))
Community_Image = forms.ImageField()
Private_Community = forms.BooleanField(initial=False, required=False)
Community_Country = forms.ChoiceField(choices=countries, label='')
Community_Location = forms.ChoiceField(label='', widget=forms.Select(attrs={'class':'form-control', 'name':'community_location', 'id':'id_community_location'}))
Community_Tags = forms.CharField(widget=forms.Textarea(attrs={'width': "50%", 'cols': "50", 'rows': "2", }))
def __init__(self, *args, **kwargs):
super(AddCommunity, self).__init__(*args, **kwargs)
self.fields['Community_Name'].label = "Community Name"
self.fields['Community_Name'].widget.attrs.update({
'class': 'form-control small',
"name": "Community Name"})
self.fields['Community_Description'].label = "Community Description"
self.fields['Community_Description'].widget.attrs.update({
'class': 'form-control small',
"name": "Community Description"})
self.fields['Community_Country'].label = "Community Country"
self.fields['Community_Country'].widget.attrs.update({
'class': 'form-control small',
"name": "Community Country"})
self.fields['Community_Location'].label = "Community Province"
self.fields['Community_Location'].widget.attrs.update({
'class': 'form-control small',
"name": "Community Location"})
self.fields['Community_Tags'].widget.attrs.update({
'class': 'form-control small',
"name": "Community Tags"})
self.fields['Community_Tags'].label = "Community Tags"
def clean(self, *args, **keyargs):
Community_Name = self.cleaned_data.get("Community Name")
Community_Description = self.cleaned_data.get("Community Description")
Community_Image = self.cleaned_data.get("Community Image")
Community_Country = self.cleaned_data.get("Community Country")
Community_Location = self.cleaned_data.get("Community Province")
Community_Tags = self.cleaned_data.get("Community Tags")
return super(AddCommunity, self).clean(*args, **keyargs)
class EditCommunity(forms.Form):
Community_Description = forms.CharField(widget=forms.Textarea(attrs={'width': "50%", 'cols': "50", 'rows': "2", }))
Community_Image = forms.ImageField()
# Community_Country = CountryField().formfield()
# Community_Location = forms.ChoiceField(label='', widget=forms.Select(attrs={'class': 'form-control'}))
Private_Community = forms.BooleanField(initial=False, required=False)
Community_Tags = forms.CharField(widget=forms.Textarea(attrs={'width': "50%", 'cols': "50", 'rows': "2", }))
def __init__(self, *args, **kwargs):
super(EditCommunity, self).__init__(*args, **kwargs)
self.fields['Community_Description'].label = "Community Description"
self.fields['Community_Description'].widget.attrs.update({
'class': 'form-control',
"name": "Community Description"})
self.fields['Community_Tags'].widget.attrs.update({
'class': 'form-control',
"name": "Community Tags"})
self.fields['Community_Tags'].label = "Community Tags"
def clean(self, *args, **keyargs):
Community_Description = self.cleaned_data.get("Community Description")
Community_Image = self.cleaned_data.get("Community Image")
Community_Country = self.cleaned_data.get("Community Country")
Community_Location = self.cleaned_data.get("Community Location")
Community_Tags = self.cleaned_data.get("Community Tags")
return super(EditCommunity, self).clean(*args, **keyargs)
class AddPosttype(forms.Form):
Posttype_Name = forms.CharField()
Posttype_Tags = forms.CharField(widget=forms.Textarea(attrs={'width': "50%", 'cols': "50", 'rows': "2", }))
def __init__(self, *args, **kwargs):
super(AddPosttype, self).__init__(*args, **kwargs)
self.fields['Posttype_Name'].label = "Posttype Name"
self.fields['Posttype_Name'].widget.attrs.update({
'class': 'form-control',
"name": "Posttype Name"})
self.fields['Posttype_Tags'].label = "Posttype Tags"
self.fields['Posttype_Tags'].widget.attrs.update({
'class': 'form-control',
"name": "Posttype Tags"})
def clean(self, *args, **keyargs):
Posttype_Name = self.cleaned_data.get("Posttype Name")
Posttype_Tags = self.cleaned_data.get("Posttype Tags")
return super(AddPosttype, self).clean(*args, **keyargs)
class AddTextEntryEnum(forms.Form):
name = forms.CharField(label='')
Types = forms.ModelChoiceField(queryset=Primitives.objects.filter(name="Enumeration").order_by('name'), label='',
to_field_name="name")
Required = forms.BooleanField(initial=False, required=False, label='')
ShowPage = forms.BooleanField(initial=False, required=False, label='')
Enum = forms.CharField(label='')
def __init__(self, *args, **kwargs):
super(AddTextEntryEnum, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update({'class': 'form-control'})
self.fields['Types'].widget.attrs.update({'class': 'form-control'})
self.fields['Enum'].widget.attrs.update({'class': 'form-control'})
class AddTextEntry(forms.Form):
name = forms.CharField(label='')
Types = forms.ModelChoiceField(queryset=Primitives.objects.all().exclude(name="Enumeration").order_by('name'),
label='', to_field_name="name")
Required = forms.BooleanField(initial=False, required=False, label='')
ShowPage = forms.BooleanField(initial=False, required=False, label='')
def __init__(self, *args, **kwargs):
super(AddTextEntry, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update({'class': 'form-control'})
self.fields['Types'].widget.attrs.update({'class': 'form-control'})
class SendPrimitives(forms.Form):
Types = forms.ModelChoiceField(queryset=Primitives.objects.all().order_by('name'), label='')
def __init__(self, *args, **kwargs):
super(SendPrimitives, self).__init__(*args, **kwargs)
self.fields['Types'].widget.attrs.update({'class': 'form-control'})
class AddTextPost(forms.Form):
TextEntry = forms.CharField(label='')
def __init__(self, *args, **kwargs):
super(AddTextPost, self).__init__(*args, **kwargs)
self.fields['TextEntry'].widget.attrs.update({'class': 'form-control'})
class AddTextAreaPost(forms.Form):
TextAreaEntry = forms.CharField(widget=forms.Textarea(attrs={'width': "50%", 'cols': "50", 'rows': "2", }),
label='')
def __init__(self, *args, **kwargs):
super(AddTextAreaPost, self).__init__(*args, **kwargs)
self.fields['TextAreaEntry'].widget.attrs.update({'class': 'form-control'})
class AddImagePost(forms.Form):
ImageEntry = forms.ImageField(label='')
def __init__(self, *args, **kwargs):
super(AddImagePost, self).__init__(*args, **kwargs)
class AddAudioPost(forms.Form):
AudioEntry = forms.FileField(label='')
def __init__(self, *args, **kwargs):
super(AddAudioPost, self).__init__(*args, **kwargs)
class AddVideoPost(forms.Form):
VideoEntry = forms.FileField(label='')
def __init__(self, *args, **kwargs):
super(AddVideoPost, self).__init__(*args, **kwargs)
class AddBooleanPost(forms.Form):
BooleanEntry = forms.BooleanField(initial=False, required=False, label='')
def __init__(self, *args, **kwargs):
super(AddBooleanPost, self).__init__(*args, **kwargs)
self.fields['BooleanEntry'].widget.attrs.update({'class': 'form-control d-flex justify-content-between'})
class AddEmailPost(forms.Form):
EmailEntry = forms.EmailField(label='')
def __init__(self, *args, **kwargs):
super(AddEmailPost, self).__init__(*args, **kwargs)
self.fields['EmailEntry'].widget.attrs.update({'class': 'form-control'})
class AddIpAddressPost(forms.Form):
IpAddressEntry = forms.GenericIPAddressField(label='')
def __init__(self, *args, **kwargs):
super(AddIpAddressPost, self).__init__(*args, **kwargs)
self.fields['IpAddressEntry'].widget.attrs.update({'class': 'form-control'})
class AddUrlPost(forms.Form):
UrlEntry = forms.URLField(label='')
def __init__(self, *args, **kwargs):
super(AddUrlPost, self).__init__(*args, **kwargs)
self.fields['UrlEntry'].widget.attrs.update({'class': 'form-control'})
class AddDatePost(forms.Form):
DateEntry = forms.DateField(label='')
def __init__(self, *args, **kwargs):
super(AddDatePost, self).__init__(*args, **kwargs)
self.fields['DateEntry'].widget.attrs.update({'class': 'form-control', 'type': 'date'})
class AddTimePost(forms.Form):
TimeEntry = forms.TimeField(label='')
def __init__(self, *args, **kwargs):
super(AddTimePost, self).__init__(*args, **kwargs)
self.fields['TimeEntry'].widget.attrs.update({'class': 'form-control'})
class AddDateTimePost(forms.Form):
DateTimeEntry = forms.DateTimeField(label='')
def __init__(self, *args, **kwargs):
super(AddDateTimePost, self).__init__(*args, **kwargs)
self.fields['DateTimeEntry'].widget.attrs.update({'class': 'form-control', 'type': 'datetime-local'})
class AddIntegerPost(forms.Form):
IntegerEntry = forms.IntegerField(label='')
def __init__(self, *args, **kwargs):
super(AddIntegerPost, self).__init__(*args, **kwargs)
self.fields['IntegerEntry'].widget.attrs.update({'class': 'form-control'})
class AddDecimalPost(forms.Form):
DecimalEntry = forms.DecimalField(label='')
def __init__(self, *args, **kwargs):
super(AddDecimalPost, self).__init__(*args, **kwargs)
self.fields['DecimalEntry'].widget.attrs.update({'class': 'form-control'})
class AddFloatPost(forms.Form):
FloatEntry = forms.FloatField(label='')
def __init__(self, *args, **kwargs):
super(AddFloatPost, self).__init__(*args, **kwargs)
self.fields['FloatEntry'].widget.attrs.update({'class': 'form-control'})
class AddEnumaratedPost(forms.Form):
def __init__(self, *args, **kwargs):
enum = kwargs.pop('en')
name = kwargs.pop('nm')
super(AddEnumaratedPost, self).__init__(*args, **kwargs)
self.fields['EnumaratedEntry'] = forms.ChoiceField(choices=tuple(enumerate(enum)), label='')
self.fields['EnumaratedEntry'].widget.attrs.update({'class': 'form-control'})
contextName = {}
contextName['name'] = name
cnName = contextName.get(name, name)
super(AddEnumaratedPost, self).add_prefix(cnName)
class AddLocationPost(forms.Form):
LocationEntry = forms.CharField(label='')
def __init__(self, *args, **kwargs):
super(AddLocationPost, self).__init__(*args, **kwargs)
self.fields['LocationEntry'].widget.attrs.update({'class': 'form-control'})
self.fields['LocationEntry'].initial = 'Choose from the map!'
class AddTagPost(forms.Form):
TagEntry = forms.CharField(widget=forms.Textarea(attrs={'width': "50%", 'cols': "50", 'rows': "2", }), label='')
def __init__(self, *args, **kwargs):
super(AddTagPost, self).__init__(*args, **kwargs)
self.fields['TagEntry'].widget.attrs.update({'class': 'form-control'})
self.fields['TagEntry'].widget.attrs.update({'data-role': 'tagsinput'})
#######################################################################################################
###################################SEARCH FORMS########################################################
#######################################################################################################
class AddTextSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "contains", "not equal", "not contain"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
TextEntry = forms.CharField(label='')
def __init__(self, *args, **kwargs):
super(AddTextSearch, self).__init__(*args, **kwargs)
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
self.fields['TextEntry'].widget.attrs.update({'class': 'form-control'})
class AddTextAreaSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "contains", "not equal", "not contain"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
TextAreaEntry = forms.CharField(widget=forms.Textarea(attrs={'width': "50%", 'cols': "50", 'rows': "2", }),
label='')
def __init__(self, *args, **kwargs):
super(AddTextAreaSearch, self).__init__(*args, **kwargs)
self.fields['TextAreaEntry'].widget.attrs.update({'class': 'form-control'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddImageSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "contains", "not equal", "not contain"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
ImageEntry = forms.ImageField(label='')
def __init__(self, *args, **kwargs):
super(AddImageSearch, self).__init__(*args, **kwargs)
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddAudioSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "contains", "not equal", "not contain"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
AudioEntry = forms.FileField(label='')
def __init__(self, *args, **kwargs):
super(AddAudioSearch, self).__init__(*args, **kwargs)
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddVideoSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "contains", "not equal", "not contain"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
VideoEntry = forms.FileField(label='')
def __init__(self, *args, **kwargs):
super(AddVideoSearch, self).__init__(*args, **kwargs)
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddBooleanSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "not equal"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
BooleanEntry = forms.BooleanField(initial=False, required=False, label='')
def __init__(self, *args, **kwargs):
super(AddBooleanSearch, self).__init__(*args, **kwargs)
self.fields['BooleanEntry'].widget.attrs.update({'class': 'form-control d-flex justify-content-between'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddEmailSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "contains", "not equal", "not contain"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
EmailEntry = forms.EmailField(label='')
Op = forms.ChoiceField(choices=tuple(enumerate(Operand)), label='')
def __init__(self, *args, **kwargs):
super(AddEmailSearch, self).__init__(*args, **kwargs)
self.fields['EmailEntry'].widget.attrs.update({'class': 'form-control'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
self.fields['Op'].widget.attrs.update({'class': 'form-control'})
class AddIpAddressSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "contains", "not equal", "not contain"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
IpAddressEntry = forms.GenericIPAddressField(label='')
def __init__(self, *args, **kwargs):
super(AddIpAddressSearch, self).__init__(*args, **kwargs)
self.fields['IpAddressEntry'].widget.attrs.update({'class': 'form-control'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddUrlSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "contains", "not equal", "not contain"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
UrlEntry = forms.URLField(label='')
def __init__(self, *args, **kwargs):
super(AddUrlSearch, self).__init__(*args, **kwargs)
self.fields['UrlEntry'].widget.attrs.update({'class': 'form-control'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddDateSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "after", "before"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
DateEntry = forms.DateField(label='')
def __init__(self, *args, **kwargs):
super(AddDateSearch, self).__init__(*args, **kwargs)
self.fields['DateEntry'].widget.attrs.update({'class': 'form-control', 'type': 'date'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddTimeSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "after", "before"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
TimeEntry = forms.TimeField(label='')
def __init__(self, *args, **kwargs):
super(AddTimeSearch, self).__init__(*args, **kwargs)
self.fields['TimeEntry'].widget.attrs.update({'class': 'form-control'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddDateTimeSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "after", "before"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
DateTimeEntry = forms.DateTimeField(label='')
def __init__(self, *args, **kwargs):
super(AddDateTimeSearch, self).__init__(*args, **kwargs)
self.fields['DateTimeEntry'].widget.attrs.update({'class': 'form-control', 'type': 'datetime-local'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddIntegerSearch(forms.Form):
Operand = ["", "AND", "OR"]
ChoiceList = ["equals", "less than", "more than"]
Condition = forms.ChoiceField(choices=tuple(enumerate(ChoiceList)), label='')
IntegerEntry = forms.IntegerField(label='')
def __init__(self, *args, **kwargs):
super(AddIntegerSearch, self).__init__(*args, **kwargs)
self.fields['IntegerEntry'].widget.attrs.update({'class': 'form-control'})
self.fields['Condition'].widget.attrs.update({'class': 'form-control'})
class AddDecimalSearch(forms.Form):
Operand = | |
ws.dim_rowmax + 2
value = sum([counts[x] for x in counts if x[0] == 1])
total = sum(counts.values())
self.write_overall_value(ws, value, total, overall_column, overall_row, write_overall=True)
if gen_dataset:
tabulate_dataset("ws_47", ["Topic", "Answer", "Year", "Geography", "Count"], counts, ws_47_dataset)
def ws_48(self, ws, gen_dataset=False):
"""
Cols: Sex of reporter, Stereotypes
Rows: Major Topics
"""
secondary_counts = OrderedDict()
overall_column = ws.dim_colmax
for gender_id, gender in self.male_female:
counts = Counter()
for media_type, model in tm_journalist_models.items():
sheet_name = model.sheet_name()
topic = sheet_name + '__topic'
stereotypes = sheet_name + '__stereotypes'
if 'stereotypes' in [field_name.name for field_name in model._meta.get_field(sheet_name).remote_field.model._meta.get_fields()]:
rows = model.objects\
.values(stereotypes, topic)\
.filter(sex=gender_id)\
.filter(**{model.sheet_name() + '__country__in':self.country_list})\
.annotate(n=Count('id'))
if media_type in REPORTER_MEDIA:
rows = rows.filter(role=REPORTERS)
rows = self.apply_weights(rows, model.sheet_db_table(), media_type)
for r in rows:
counts.update({(r['stereotypes'], TOPIC_GROUPS[r['topic']]): r['n']})
secondary_counts[gender] = counts
if gen_dataset:
tabulate_dataset("ws_48", ["Topic", "Gender", "Answer", "Year", "Geography", "Count"], counts, ws_48_dataset, gender=gender_id)
self.tabulate_secondary_cols(ws, secondary_counts, AGREE_DISAGREE, MAJOR_TOPICS, row_perc=True, show_N=True)
self.tabulate_historical(ws, '48', AGREE_DISAGREE, MAJOR_TOPICS, write_row_headings=False, major_cols=self.male_female, show_N_and_P=True)
overall_row = ws.dim_rowmax + 2
# Female Overall
counts = secondary_counts[self.male_female[0][1]]
value = sum([counts[x] for x in counts if x[0] == 1])
total = sum(counts.values())
self.write_overall_value(ws, value, total, overall_column, overall_row, write_overall=True)
# Male Overall
counts = secondary_counts[self.male_female[1][1]]
value = sum([counts[x] for x in counts if x[0] == 1])
total = sum(counts.values())
self.write_overall_value(ws, value, total, overall_column+5, overall_row, write_overall=True)
def ws_49(self, ws):
"""
Cols: Major Topics
Rows: Region
:: Internet media type only
"""
overall_column = ws.dim_colmax
if self.report_type == 'country':
counts = Counter()
model = sheet_models.get('Internet')
rows = model.objects\
.values('topic', 'country')\
.filter(country__in=self.country_list)\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model._meta.db_table, 'Internet')
for row in rows:
major_topic = TOPIC_GROUPS[row['topic']]
counts.update({(major_topic, row['country']): row['n']})
self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, row_perc=True)
else:
counts = Counter()
model = sheet_models.get('Internet')
rows = model.objects\
.values('topic', 'country_region__region')\
.filter(country_region__region__in=self.region_list)\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model._meta.db_table, 'Internet')
for row in rows:
region_id = [r[0] for r in self.regions if r[1] == row['region']][0]
major_topic = TOPIC_GROUPS[row['topic']]
counts.update({(major_topic, region_id): row['n']})
self.tabulate(ws, counts, MAJOR_TOPICS, self.regions, row_perc=True)
self.tabulate_historical(ws, '49', [*MAJOR_TOPICS], self.regions, write_row_headings=False)
overall_row = ws.dim_rowmax + 2
total = sum(counts.values())
write_overall=True
for topic, _ in MAJOR_TOPICS:
value = sum([counts[x] for x in counts if x[0] == topic])
self.write_overall_value(ws, value, total, overall_column, overall_row, write_overall)
write_overall=False
overall_column+=1
def ws_50(self, ws):
"""
Cols: Major Topics
Rows: YES NO
:: Internet media type only
:: Only stories shared on Twitter
"""
counts = Counter()
overall_column = ws.dim_colmax
model = sheet_models.get('Internet')
rows = model.objects\
.values('topic', 'shared_via_twitter')\
.filter(country__in=self.country_list)\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model._meta.db_table, 'Internet')
for row in rows:
major_topic = TOPIC_GROUPS[row['topic']]
counts.update({(major_topic, row['shared_via_twitter']): row['n']})
self.tabulate(ws, counts, MAJOR_TOPICS, YESNO, show_N=True)
overall_row = ws.dim_rowmax + 2
value = sum([counts[x] for x in counts if x[1] == 'Y'])
total = sum(counts.values())
ws.write(overall_row, overall_column-1, "Overall Yes", self.label)
self.write_overall_value(ws, value, total, overall_column+1, overall_row, write_overall=False)
def ws_51(self, ws):
"""
Cols: Major Topics
Rows: YES NO
:: Internet media type only
:: Only stories shared on Facebook
"""
counts = Counter()
overall_column = ws.dim_colmax
model = sheet_models.get('Internet')
rows = model.objects\
.values('topic', 'shared_on_facebook')\
.filter(country__in=self.country_list)\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model._meta.db_table, 'Internet')
for row in rows:
major_topic = TOPIC_GROUPS[row['topic']]
counts.update({(major_topic, row['shared_on_facebook']): row['n']})
self.tabulate(ws, counts, MAJOR_TOPICS, YESNO, show_N=True)
overall_row = ws.dim_rowmax + 2
value = sum([counts[x] for x in counts if x[1] == 'Y'])
total = sum(counts.values())
ws.write(overall_row, overall_column-1, "Overall Yes", self.label)
self.write_overall_value(ws, value, total, overall_column+1, overall_row, write_overall=False)
def ws_52(self, ws):
"""
Cols: Major Topics
Rows: YES NO
:: Internet media type only
:: Only stories with reference to gener equality
"""
counts = Counter()
overall_column = ws.dim_colmax
model = sheet_models.get('Internet')
rows = model.objects\
.values('topic', 'equality_rights')\
.filter(country__in=self.country_list)\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model._meta.db_table, 'Internet')
for row in rows:
major_topic = TOPIC_GROUPS[row['topic']]
counts.update({(major_topic, row['equality_rights']): row['n']})
self.tabulate(ws, counts, MAJOR_TOPICS, YESNO, show_N=True)
overall_row = ws.dim_rowmax + 2
value = sum([counts[x] for x in counts if x[1] == 'Y'])
total = sum(counts.values())
ws.write(overall_row, overall_column-1, "Overall Yes", self.label)
self.write_overall_value(ws, value, total, overall_column+1, overall_row, write_overall=False)
def ws_53(self, ws):
"""
Cols: Topic
Rows: Country
:: Internet media type only
:: Female reporters only
"""
filter_cols = [(id, value) for id, value in GENDER if id==1]
secondary_counts = OrderedDict()
model = sheet_models.get('Internet')
for major_topic, topic_ids in GROUP_TOPICS_MAP.items():
counts = Counter()
journo_sex_field = '%s__sex' % model.journalist_field_name()
rows = model.objects\
.values(journo_sex_field, 'country')\
.filter(topic__in=topic_ids)\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model._meta.db_table, 'Internet')
counts.update({(r['sex'], self.recode_country(r['country'])): r['n'] for r in rows})
major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0]
secondary_counts[major_topic_name] = counts
self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True, filter_cols=filter_cols)
self.tabulate_historical(ws, '53', self.female, self.countries, major_cols=MAJOR_TOPICS)
def ws_54(self, ws):
"""
Cols: Major Topic, sex of subject
Rows: Country
:: Internet media type only
"""
secondary_counts = OrderedDict()
model = person_models.get('Internet')
for major_topic, topic_ids in GROUP_TOPICS_MAP.items():
counts = Counter()
country_field = '%s__country' % model.sheet_name()
rows = model.objects\
.values('sex', country_field)\
.filter(**{model.sheet_name() + '__topic__in':topic_ids})\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model.sheet_db_table(), 'Internet')
counts.update({(r['sex'], self.recode_country(r['country'])): r['n'] for r in rows})
major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0]
secondary_counts[major_topic_name] = counts
self.tabulate_secondary_cols(ws, secondary_counts, GENDER, self.countries, row_perc=True)
self.tabulate_historical(ws, '54', [*GENDER], self.countries, major_cols=MAJOR_TOPICS)
def ws_55(self, ws):
"""
Cols: Occupation
Rows: Gender
:: Show male and female
:: Internet and Twitter media types
"""
secondary_counts = OrderedDict()
for media_type, model in dm_person_models.items():
counts = Counter()
rows = model.objects\
.values('occupation', 'sex')\
.filter(**{model.sheet_name() + "__country__in": self.country_list}) \
.exclude(sex=None)\
.exclude(occupation=None)\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model.sheet_db_table(), media_type)
for d in rows:
counts[d['sex'], d['occupation']] += d['n']
secondary_counts[media_type] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, OCCUPATION, row_perc=True, show_N=True)
def ws_56(self, ws):
"""
Cols: Function
Rows: Male Female
:: Internet media and Twitter media types.
"""
secondary_counts = OrderedDict()
for media_type, model in dm_person_models.items():
counts = Counter()
rows = model.objects\
.values('function', 'sex')\
.filter(**{model.sheet_name() + "__country__in": self.country_list}) \
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model.sheet_db_table(), media_type)
for d in rows:
counts[d['sex'], d['function']] += d['n']
secondary_counts[media_type] = counts
self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, FUNCTION, row_perc=True, show_N=True)
def ws_57(self, ws):
"""
Cols: Sex of subject
Rows: Family role
:: Internet media type only
"""
counts = Counter()
model = person_models.get('Internet')
rows = model.objects\
.values('sex', 'family_role')\
.filter(**{model.sheet_name() + "__country__in": self.country_list}) \
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model.sheet_db_table(), "Internet")
{counts.update({(row['sex'], row['family_role']): row['n']}) for row in rows}
self.tabulate(ws, counts, GENDER, YESNO, show_N=True)
def ws_58(self, ws):
"""
Cols: Sex of subject
Rows: is photographed
:: Internet media type only
"""
counts = Counter()
model = person_models.get('Internet')
rows = model.objects\
.values('sex', 'is_photograph')\
.filter(**{model.sheet_name() + "__country__in": self.country_list}) \
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model.sheet_db_table(), "Internet")
for d in rows:
counts[d['sex'], d['is_photograph']] += d['n']
self.tabulate(ws, counts, GENDER, IS_PHOTOGRAPH, show_N=True)
def ws_59(self, ws):
"""
Cols: Sex of reporter
Rows: Sex of subject
:: Internet media only
"""
counts = Counter()
model = person_models.get('Internet')
sheet_name = model.sheet_name()
journo_name = model._meta.get_field(model.sheet_name()).remote_field.model.journalist_field_name()
journo_sex = sheet_name + '__' + journo_name + '__sex'
rows = model.objects\
.extra(select={"subject_sex": model._meta.db_table + ".sex"})\
.values(journo_sex, 'subject_sex')\
.filter(**{model.sheet_name() + '__country__in':self.country_list})\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model.sheet_db_table(), "Internet")
counts.update({(r['sex'], r['subject_sex']): r['n'] for r in rows})
counts['col_title_def'] = 'Sex of reporter'
self.tabulate(ws, counts, self.male_female, self.male_female, row_perc=False)
self.tabulate_historical(ws, '59', self.male_female, self.male_female)
def ws_60(self, ws):
"""
Cols: Sex of subject
Rows: age
:: Internet media type only
"""
counts = Counter()
model = person_models.get('Internet')
rows = model.objects\
.values('sex', 'age')\
.filter(**{model.sheet_name() + "__country__in": self.country_list}) \
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model.sheet_db_table(), "Internet")
for d in rows:
counts[d['sex'], d['age']] += d['n']
self.tabulate(ws, counts, GENDER, AGES, show_N=True)
def ws_61(self, ws):
"""
Cols: Sex of subject
Rows: is_quoted
:: Internet media type only
"""
counts = Counter()
model = person_models.get('Internet')
rows = model.objects\
.values('sex', 'is_quoted')\
.filter(**{model.sheet_name() + "__country__in": self.country_list}) \
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model.sheet_db_table(), "Internet")
{counts.update({(row['sex'], row['is_quoted']): row['n']}) for row in rows}
self.tabulate(ws, counts, GENDER, YESNO, show_N=True)
def ws_62(self, ws):
"""
Cols: Topic
Rows: equality raised
:: Internet media type only
"""
counts = Counter()
overall_column = ws.dim_colmax
model = sheet_models.get('Internet')
rows = model.objects\
.values('topic', 'equality_rights')\
.filter(country__in=self.country_list)\
.annotate(n=Count('id'))
rows = self.apply_weights(rows, model._meta.db_table, "Internet")
{counts.update({(TOPIC_GROUPS[row["topic"]], row["equality_rights"]): row['n']}) for row in rows}
self.tabulate(ws, counts, MAJOR_TOPICS, YESNO, show_N=True)
overall_row = ws.dim_rowmax + 2
value = sum([counts[x] for x in counts if x[1] == 'Y'])
total = sum(counts.values())
ws.write(overall_row, overall_column-1, "Overall Yes", self.label)
self.write_overall_value(ws, value, total, overall_column+1, overall_row, write_overall=False)
def ws_63(self, ws):
"""
Cols: Topic
Rows: stereotypes challenged
:: Internet | |
<filename>nara_wpe/torch_wpe_real_imag.py<gh_stars>1-10
import numpy as np
import torch
import torch_complex.functional
from torch_complex.tensor import ComplexTensor
from nara_wpe.torch_wpe import build_y_tilde as _build_y_tilde
def build_y_tilde(Y, taps, delay):
"""
Note: The returned y_tilde consumes a similar amount of memory as Y, because
of tricks with strides. Usually the memory consumprion is K times
smaller than the memory consumprion of a contignous array,
>>> T, D = 20, 2
# >>> Y = torch.arange(start=1, end=T * D + 1).to(dtype=torch.complex128).reshape([T, D]).t()
>>> Y = torch.arange(start=1, end=T * D + 1).reshape([T, D]).t()
>>> Y = ComplexTensor(Y, Y)
>>> print(Y.numpy())
[[ 1. +1.j 3. +3.j 5. +5.j 7. +7.j 9. +9.j 11.+11.j 13.+13.j 15.+15.j
17.+17.j 19.+19.j 21.+21.j 23.+23.j 25.+25.j 27.+27.j 29.+29.j 31.+31.j
33.+33.j 35.+35.j 37.+37.j 39.+39.j]
[ 2. +2.j 4. +4.j 6. +6.j 8. +8.j 10.+10.j 12.+12.j 14.+14.j 16.+16.j
18.+18.j 20.+20.j 22.+22.j 24.+24.j 26.+26.j 28.+28.j 30.+30.j 32.+32.j
34.+34.j 36.+36.j 38.+38.j 40.+40.j]]
>>> taps, delay = 4, 2
>>> Y_tilde = build_y_tilde(Y, taps, delay)
>>> print(Y_tilde.shape, (taps*D, T))
torch.Size([8, 20]) (8, 20)
>>> print(Y_tilde.numpy())
[[ 0. +0.j 0. +0.j 0. +0.j 0. +0.j 0. +0.j 1. +1.j 3. +3.j 5. +5.j
7. +7.j 9. +9.j 11.+11.j 13.+13.j 15.+15.j 17.+17.j 19.+19.j 21.+21.j
23.+23.j 25.+25.j 27.+27.j 29.+29.j]
[ 0. +0.j 0. +0.j 0. +0.j 0. +0.j 0. +0.j 2. +2.j 4. +4.j 6. +6.j
8. +8.j 10.+10.j 12.+12.j 14.+14.j 16.+16.j 18.+18.j 20.+20.j 22.+22.j
24.+24.j 26.+26.j 28.+28.j 30.+30.j]
[ 0. +0.j 0. +0.j 0. +0.j 0. +0.j 1. +1.j 3. +3.j 5. +5.j 7. +7.j
9. +9.j 11.+11.j 13.+13.j 15.+15.j 17.+17.j 19.+19.j 21.+21.j 23.+23.j
25.+25.j 27.+27.j 29.+29.j 31.+31.j]
[ 0. +0.j 0. +0.j 0. +0.j 0. +0.j 2. +2.j 4. +4.j 6. +6.j 8. +8.j
10.+10.j 12.+12.j 14.+14.j 16.+16.j 18.+18.j 20.+20.j 22.+22.j 24.+24.j
26.+26.j 28.+28.j 30.+30.j 32.+32.j]
[ 0. +0.j 0. +0.j 0. +0.j 1. +1.j 3. +3.j 5. +5.j 7. +7.j 9. +9.j
11.+11.j 13.+13.j 15.+15.j 17.+17.j 19.+19.j 21.+21.j 23.+23.j 25.+25.j
27.+27.j 29.+29.j 31.+31.j 33.+33.j]
[ 0. +0.j 0. +0.j 0. +0.j 2. +2.j 4. +4.j 6. +6.j 8. +8.j 10.+10.j
12.+12.j 14.+14.j 16.+16.j 18.+18.j 20.+20.j 22.+22.j 24.+24.j 26.+26.j
28.+28.j 30.+30.j 32.+32.j 34.+34.j]
[ 0. +0.j 0. +0.j 1. +1.j 3. +3.j 5. +5.j 7. +7.j 9. +9.j 11.+11.j
13.+13.j 15.+15.j 17.+17.j 19.+19.j 21.+21.j 23.+23.j 25.+25.j 27.+27.j
29.+29.j 31.+31.j 33.+33.j 35.+35.j]
[ 0. +0.j 0. +0.j 2. +2.j 4. +4.j 6. +6.j 8. +8.j 10.+10.j 12.+12.j
14.+14.j 16.+16.j 18.+18.j 20.+20.j 22.+22.j 24.+24.j 26.+26.j 28.+28.j
30.+30.j 32.+32.j 34.+34.j 36.+36.j]]
>>> Y_tilde = build_y_tilde(Y, taps, 0)
>>> print(Y_tilde.shape, (taps*D, T), Y_tilde.real.stride(), Y_tilde.imag.stride())
torch.Size([8, 20]) (8, 20) (1, 2) (1, 2)
>>> print('Pseudo size:', np.prod(Y_tilde.size()) * Y_tilde.real.element_size(), np.prod(Y_tilde.size()) * Y_tilde.imag.element_size())
Pseudo size: 1280 1280
>>> print('Reak size:', Y_tilde.real.storage().size() * Y_tilde.real.storage().element_size(), Y_tilde.imag.storage().size() * Y_tilde.imag.storage().element_size())
Reak size: 368 368
>>> print(Y_tilde.numpy())
[[ 0. +0.j 0. +0.j 0. +0.j 1. +1.j 3. +3.j 5. +5.j 7. +7.j 9. +9.j
11.+11.j 13.+13.j 15.+15.j 17.+17.j 19.+19.j 21.+21.j 23.+23.j 25.+25.j
27.+27.j 29.+29.j 31.+31.j 33.+33.j]
[ 0. +0.j 0. +0.j 0. +0.j 2. +2.j 4. +4.j 6. +6.j 8. +8.j 10.+10.j
12.+12.j 14.+14.j 16.+16.j 18.+18.j 20.+20.j 22.+22.j 24.+24.j 26.+26.j
28.+28.j 30.+30.j 32.+32.j 34.+34.j]
[ 0. +0.j 0. +0.j 1. +1.j 3. +3.j 5. +5.j 7. +7.j 9. +9.j 11.+11.j
13.+13.j 15.+15.j 17.+17.j 19.+19.j 21.+21.j 23.+23.j 25.+25.j 27.+27.j
29.+29.j 31.+31.j 33.+33.j 35.+35.j]
[ 0. +0.j 0. +0.j 2. +2.j 4. +4.j 6. +6.j 8. +8.j 10.+10.j 12.+12.j
14.+14.j 16.+16.j 18.+18.j 20.+20.j 22.+22.j 24.+24.j 26.+26.j 28.+28.j
30.+30.j 32.+32.j 34.+34.j 36.+36.j]
[ 0. +0.j 1. +1.j 3. +3.j 5. +5.j 7. +7.j 9. +9.j 11.+11.j 13.+13.j
15.+15.j 17.+17.j 19.+19.j 21.+21.j 23.+23.j 25.+25.j 27.+27.j 29.+29.j
31.+31.j 33.+33.j 35.+35.j 37.+37.j]
[ 0. +0.j 2. +2.j 4. +4.j 6. +6.j 8. +8.j 10.+10.j 12.+12.j 14.+14.j
16.+16.j 18.+18.j 20.+20.j 22.+22.j 24.+24.j 26.+26.j 28.+28.j 30.+30.j
32.+32.j 34.+34.j 36.+36.j 38.+38.j]
[ 1. +1.j 3. +3.j 5. +5.j 7. +7.j 9. +9.j 11.+11.j 13.+13.j 15.+15.j
17.+17.j 19.+19.j 21.+21.j 23.+23.j 25.+25.j 27.+27.j 29.+29.j 31.+31.j
33.+33.j 35.+35.j 37.+37.j 39.+39.j]
[ 2. +2.j 4. +4.j 6. +6.j 8. +8.j 10.+10.j 12.+12.j 14.+14.j 16.+16.j
18.+18.j 20.+20.j 22.+22.j 24.+24.j 26.+26.j 28.+28.j 30.+30.j 32.+32.j
34.+34.j 36.+36.j 38.+38.j 40.+40.j]]
The first columns are zero because of the delay.
"""
if isinstance(Y, ComplexTensor):
return ComplexTensor(
_build_y_tilde(Y.real, taps, delay),
_build_y_tilde(Y.imag, taps, delay),
)
else:
return _build_y_tilde(Y, taps, delay)
def get_power_inverse(signal, psd_context=0):
"""
Assumes single frequency bin with shape (D, T).
# >>> s = 1 / torch.tensor([np.arange(1, 6).astype(np.complex128)]*3)
>>> s = 1 / torch.tensor([np.arange(1, 6).astype(np.float64)]*3)
>>> s = ComplexTensor(s, -s)
>>> get_power_inverse(s).numpy()
array([ 0.5, 2. , 4.5, 8. , 12.5])
>>> get_power_inverse(s * 0 + 1, 1).numpy()
array([1., 1., 1., 1., 1.])
>>> get_power_inverse(s, 1).numpy()
array([ 1.6 , 2.20408163, 7.08196721, 14.04421326, 19.51219512])
>>> get_power_inverse(s, np.inf).numpy()
array([1.708104, 1.708104, 1.708104, 1.708104, 1.708104])
"""
if isinstance(signal, ComplexTensor):
power = torch.mean(signal.real ** 2 + signal.imag **2, dim=-2)
else:
power = torch.mean(torch.abs(signal) ** 2, dim=-2)
if np.isposinf(psd_context):
# raise NotImplementedError(psd_context)
power, _ = torch.broadcast_tensors(torch.mean(power, dim=-1, keepdims=True), power)
elif psd_context > 0:
raise NotImplementedError(psd_context)
# assert int(psd_context) == psd_context, psd_context
# psd_context = int(psd_context)
# # import bottleneck as bn
# # Handle the corner case correctly (i.e. sum() / count)
# # Use bottleneck when only left context is requested
# # power = bn.move_mean(power, psd_context*2+1, min_count=1)
# power = window_mean(power, (psd_context, psd_context))
elif psd_context == 0:
pass
else:
raise ValueError(psd_context)
eps = 1e-10 * torch.max(power)
inverse_power = 1 / torch.max(power, eps)
return inverse_power
def hermite(x):
return x.transpose(-2, -1).conj()
def transpose(x):
return x.transpose(-2, -1)
def ComplexTensor_to_Tensor(t):
"""
Converts a third party complex tensor to a native complex torch tensor.
>>> t = ComplexTensor(np.array([1., 2, 3]))
>>> t
ComplexTensor(
real=tensor([1., 2., 3.], dtype=torch.float64),
imag=tensor([0., 0., 0.], dtype=torch.float64),
)
>>> ComplexTensor_to_Tensor(t)
tensor([(1.+0.j), (2.+0.j), (3.+0.j)], dtype=torch.complex128)
"""
assert isinstance(t, ComplexTensor), type(t)
return t.real + 1j * t.imag
def Tensor_to_ComplexTensor(t):
"""
Converts a native complex torch tensor to a third party complex tensor.
>>> t = torch.tensor(np.array([1., 2, 3]) + 0 * 1j)
>>> t
tensor([(1.+0.j), (2.+0.j), (3.+0.j)], dtype=torch.complex128)
>>> Tensor_to_ComplexTensor(t)
ComplexTensor(
real=tensor([1., 2., 3.], dtype=torch.float64),
imag=tensor([0., 0., 0.], dtype=torch.float64),
)
"""
assert isinstance(t, torch.Tensor), type(t)
return ComplexTensor(t.real, t.imag)
def _solve(R, P, solver='torch_complex.solve'):
if isinstance(R, ComplexTensor):
if solver == 'torch.solve':
R = ComplexTensor_to_Tensor(R)
P = ComplexTensor_to_Tensor(P)
G, _ = torch.solve(P, R)
G = Tensor_to_ComplexTensor(G)
elif solver == 'torch.inverse':
R = ComplexTensor_to_Tensor(R)
G = Tensor_to_ComplexTensor(R.inverse()) @ P
elif solver == 'torch_complex.inverse':
G = R.inverse() @ P
elif solver == 'torch_complex.solve':
G, _ = torch_complex.functional.solve(P, R)
else:
raise ValueError(solver)
else:
G, _ = torch.solve(P, R)
return G
def wpe_v6(Y, taps=10, delay=3, iterations=3, psd_context=0, statistics_mode='full',
solver='torch_complex.solve'):
"""
This function in similar to nara_wpe.wpe.wpe_v6, but works for torch.
In particular it is designed for a `torch_complex.tensor.ComplexTensor`.
The `torch_complex.tensor.ComplexTensor` is used, because at the time
this code was written, torch had no complete support for complex numbers.
In 1.6.0.dev20200623 is partial support for complex numbers.
In this version torch.solve is implemented, but not torch.matmul.
You can change the `solver` only when partial complex support is given.
With `solver="torch.solve"` you can use `torch.solve`. Some experiments
have shown, that the native solver is more robust to find the correct
solution, compared to the fallback to do the inversion with real numbers.
>>> T = np.random.randint(100, 120)
>>> D = np.random.randint(2, 8)
>>> K = np.random.randint(3, 5)
>>> K = 2
>>> delay = np.random.randint(0, 2)
>>> kwargs = dict(taps=K, delay=delay, iterations=1, statistics_mode='full', psd_context=np.inf)
# Real test:
>>> Y = np.random.normal(size=(D, T))
>>> from nara_wpe import wpe as np_wpe
>>> desired = np_wpe.wpe_v6(Y, **kwargs)
>>> actual = wpe_v6(torch.tensor(Y), **kwargs).numpy()
>>> np.testing.assert_allclose(actual, desired, atol=1e-6)
# Complex test:
>>> Y = np.random.normal(size=(D, T)) + 1j * np.random.normal(size=(D, T))
>>> from nara_wpe import wpe as np_wpe
>>> desired = np_wpe.wpe_v6(Y, **kwargs)
>>> actual = wpe_v6(ComplexTensor(Y.real, Y.imag), **kwargs).numpy()
>>> np.testing.assert_allclose(actual, desired, atol=1e-6)
>>> ComplexTensor(Y.real, Y.imag).real.dtype
torch.float64
>>> actual1 = wpe_v6(ComplexTensor(Y.real, Y.imag), **kwargs, solver='torch_complex.inverse').numpy()
>>> np.testing.assert_allclose(actual1, desired, atol=1e-6)
>>> actual2 = wpe_v6(ComplexTensor(Y.real, Y.imag), **kwargs, solver='torch.solve').numpy()
>>> np.testing.assert_allclose(actual2, desired, atol=1e-6)
>>> np.testing.assert_allclose(actual1, actual2, atol=1e-6)
"""
if statistics_mode == 'full':
s = Ellipsis
elif statistics_mode == | |
p-values - these are adjusted p-values
R('''dat2 <- dat2[, c("rna", "dna", "prna", "pdna")]''')
# take -log10(p-values)
R('''dat2$prna <- -log10(dat2$prna)''')
R('''dat2$pdna <- -log10(dat2$pdna)''')
R('''rownames(dat2) <- genes''')
R('''clust <- Mclust(dat2)''')
R('''classification <- data.frame(clust$classification)''')
R('''colnames(classification) <- "class"''')
R('''dat2$class <- ifelse(rownames(dat2) %in% rownames(classification), classification$class, NA)''')
R('''dat2$gene <- rownames(dat2)''')
R('''write.table(dat2, file = "%s", sep = "\t", row.names = F)''' % outfile)
###################################################
###################################################
###################################################
@transform(mixtureModel, suffix(".tsv"), ".png")
def plotClasses(infile, outfile):
'''
plot the classification of genes on a scatterplot
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
R('''plot1 <- ggplot(dat, aes(x = dna, y = rna, size = prna, alpha = 0.5, colour = factor(class)))''')
# regression line
R('''lm1 <- lm(dat$rna ~ dat$dna)''')
R('''i <- lm1[[1]][[1]]''')
R('''s <- lm1[[1]][[2]]''')
R('''plot2 <- plot1 + geom_point() + stat_density2d() + geom_abline(intercept = i, slope = s, linetype = "dashed")''')
R('''plot3 <- plot2 + geom_vline(xintercept = c(-1,1), linetype = "dashed", colour = "darkGrey")''')
R('''plot4 <- plot3 + geom_hline(yintercept = c(-1,1), linetype = "dashed", colour = "darkGrey")''')
R('''plot5 <- plot4 + geom_abline(intercept = 0, slope = 1) + xlim(c(-5, 5)) + ylim(c(-5,5))''')
R('''plot5''') #+ scale_colour_manual(values = c("orange", "purple", "red", "brown", "darkGreen"))''')
R('''ggsave("%s")''' % outfile)
@follows(annotateFunctions)
def annotations():
pass
###################################################
###################################################
###################################################
# Compare abundance estimations
###################################################
###################################################
###################################################
@follows(mkdir("compare_abundance.dir"))
@split(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "counts.dir/*diamond*.norm.matrix"))
+ glob.glob(os.path.join(PARAMS.get("dna_communitiesdir"), "counts.dir/*diamond*.norm.matrix")),
"compare_abundance.dir/*.pdf")
def scatterplotAbundanceEstimates(infiles, outfiles):
'''
scatterplot abundance estimates for each sample
'''
levels = ["phylum", "class", "order", "family", "genus", "species"]
for level in levels:
rna, dna = [inf for inf in infiles if inf.find(level) != -1]
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)-1]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)-1]''')
# intersection of species present
R('''keep <- intersect(rownames(rna), rownames(dna))''')
# get data where there is rna and dna
R('''rna <- rna[keep,]''')
R('''dna <- dna[keep,]''')
# take averages
R('''rna.ave <- data.frame(apply(rna, 1, mean))''')
R('''dna.ave <- data.frame(apply(dna, 1, mean))''')
R('''print(cor(dna.ave,rna.ave))''')
R('''png("compare_abundance.dir/average_abundance.%s.png")''' % level)
R('''plot(dna.ave[,1],
rna.ave[,1],
pch = 16,
col = "slateGrey",
xlab = "Mean DNA abundance",
ylab = "Mean RNA abundance",
main = paste("N = ", nrow(dna.ave), sep = ""))
abline(lm(rna[,1]~dna[,1], na.rm = T))''')
R["dev.off"]()
R('''for (i in 1:ncol(dna)){
name = paste("compare_abundance.dir", paste(colnames(dna[i]), ".%s.pdf", sep = ""), sep = "/")
pdf(name)
plot(dna[,i], rna[,i],
pch = 16,
col = "slateGrey",
xlab = "DNA normalised counts",
ylab = "RNA normalised counts")
abline(lm(rna[,i]~dna[,i], na.rm = T), lty = 2)
# fold <- rna[,i]-dna[,i]
# points(dna[,i][fold > 3], rna[,i][fold > 3], pch = 16, col = "blue")
# legend("bottomright", legend = rownames(dna[fold > 3,]), cex = 0.8)
dev.off()
}
''' % level)
###################################################
###################################################
###################################################
@follows(mkdir("compare_gene_abundance.dir"))
@split(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "genes.dir/*.norm.matrix"))
+ glob.glob(os.path.join(PARAMS.get("dna_communitiesdir"), "genes.dir/*.norm.matrix")),
"compare_gene_abundance.dir/*genes*.pdf")
def scatterplotGeneAbundanceEstimates(infiles, outfiles):
'''
scatterplot abundance estimates for each sample
'''
rna, dna = infiles
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)-1]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)-1]''')
# intersection of species genes
R('''keep <- intersect(rownames(rna), rownames(dna))''')
# get RNA where there is DNA
# add labels where the RNA is higher
# than the DNA by > 2 fold
R('''rna <- rna[keep,]''')
R('''dna <- dna[keep,]''')
# take averages
R('''rna.ave <- data.frame(apply(rna, 1, mean))''')
R('''dna.ave <- data.frame(apply(dna, 1, mean))''')
R('''print(cor(dna.ave,rna.ave))''')
R('''png("compare_gene_abundance.dir/average_abundance.genes.png")''')
R('''plot(dna.ave[,1],
rna.ave[,1],
pch = 16,
col = "slateGrey",
xlab = "Mean DNA abundance",
ylab = "Mean RNA abundance",
main = paste("N = ", nrow(dna), sep = ""))
abline(lm(rna[,1]~dna[,1], na.rm = T))''')
R["dev.off"]()
# histogram rna/dna ratio
R('''pdf("compare_gene_abundance.dir/average_abundance.genes.ratio.pdf")''')
R('''hist(log2(rna.ave[,1]/dna.ave[,1]),
xlab = "log2(RNA/DNA)", breaks = 25)''')
R('''abline(v = 0, lty = 2, lwd = 2)''')
R["dev.off"]()
R('''for (i in 1:ncol(dna)){
name = paste("compare_gene_abundance.dir", paste(colnames(dna[i]), ".genes.pdf", sep = ""), sep = "/")
pdf(name)
plot(dna[,i], rna[,i],
pch = 16,
col = "slateGrey",
xlab = "DNA normalised counts",
ylab = "RNA normalised counts")
fold <- rna[,i]-dna[,i]
abline(lm(rna[,i]~dna[,i], na.rm = T), lty = 2)
# points(dna[,i][fold > 3], rna[,i][fold > 3], pch = 16, col = "blue")
# legend("bottomright", legend = rownames(dna[fold > 3,]), cex = 0.8)
dev.off()
}
''')
###################################################
###################################################
###################################################
@follows(mkdir("compare_detected.dir"))
@split(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "counts.dir/*diamond*.aggregated.counts.tsv.gz"))
+ glob.glob(os.path.join(PARAMS.get("dna_communitiesdir"), "counts.dir/*diamond*.aggregated.counts.tsv.gz")),
"compare_detected.dir/*.overlap.tsv")
def buildTaxaDetectionOverlap(infiles, outfiles):
'''
build species detection overlap
'''
levels = ["phylum", "class", "order", "family", "genus", "species"]
for level in levels:
rna, dna = [inf for inf in infiles if inf.find(level) != -1]
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)]''')
R('''for (i in 1:ncol(dna)){
name = paste("compare_detected.dir", paste(colnames(dna[i]), ".%s.overlap.tsv", sep = ""), sep = "/")
taxa.rna = rownames(rna[rna[,i] !=0,])
taxa.dna = rownames(dna[dna[,i] !=0,])
nrna = length(taxa.rna)
ndna = length(taxa.dna)
noverlap = length(intersect(taxa.rna, taxa.dna))
result = data.frame(nrna = nrna, ndna = ndna, noverlap = noverlap)
write.table(result, file = name, sep = "\t", row.names = F)}
''' % level)
###################################################
###################################################
###################################################
@follows(mkdir("compare_detected.dir"))
@split(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "counts.dir/*diamond*.aggregated.counts.tsv.gz"))
+ glob.glob(os.path.join(PARAMS.get("dna_communitiesdir"), "counts.dir/*diamond*.aggregated.counts.tsv.gz")),
"compare_detected.dir/*.abundance.pdf")
def plotAbundanceLevelsOfTaxaOverlap(infiles, outfiles):
'''
build species detection overlap
'''
levels = ["genus"]
for level in levels:
rna, dna = [inf for inf in infiles if inf.find(level) != -1]
outfile = "compare_detected.dir/genus.overlap.abundance.pdf"
R('''library(ggplot2)''')
# get rna reads per million
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,2:ncol(rna)]''')
R('''rna <- sweep(rna, 2, colSums(rna)/1000000, "/")''')
# get dna reads per million
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,2:ncol(dna)]''')
R('''dna <- sweep(dna, 2, colSums(dna)/1000000, "/")''')
# common and distinct sets
R('''common <- intersect(rownames(dna), rownames(rna))''')
R('''rna.only <- setdiff(rownames(rna), rownames(dna))''')
R('''dna.only <- setdiff(rownames(dna), rownames(rna))''')
# boxplot the abundance levels
R('''rna.common <- apply(rna[common,], 1, mean)''')
R('''dna.common <- apply(dna[common,], 1, mean)''')
R('''rna.distinct <- apply(rna[rna.only,], 1, mean)''')
R('''dna.distinct <- apply(dna[dna.only,], 1, mean)''')
# test sig bewteen groups
R('''ttest1 <- wilcox.test(rna.common, rna.distinct)''')
R('''ttest2 <- wilcox.test(dna.common, dna.distinct)''')
R('''ttest3 <- wilcox.test(rna.common, dna.distinct)''')
R('''ttest4 <- wilcox.test(dna.common, rna.distinct)''')
R('''ttest5 <- wilcox.test(dna.common, rna.common)''')
R('''res <- data.frame("rna.common_vs_rna.distinct" = ttest1$p.value,
"dna.common_vs_dna.distinct" = ttest2$p.value,
"rna.common_vs_dna.distinct" = ttest3$p.value,
"dna.common_vs_rna.distinct" = ttest4$p.value,
"dna.common_vs_rna.common" = ttest5$p.value)''')
outname_sig = P.snip(outfile, ".pdf") + ".sig"
R('''write.table(res, file = "%s", row.names = F, sep = "\t", quote = F)''' % outname_sig)
# create dataframe for plotting
R('''dat <- data.frame(values = c(dna.distinct, dna.common, rna.common, rna.distinct),
status = c(rep("unique.dna", length(dna.distinct)),
rep("common.dna", length(dna.common)),
rep("common.rna", length(rna.common)),
rep("unique.rna", length(rna.distinct))))''')
R('''ggplot(dat, aes(x = factor(status, levels = status), y = values, stat = "identity")) + geom_boxplot() + scale_y_log10()''')
R('''ggsave("%s")''' % outfile)
###################################################
###################################################
###################################################
@follows(mkdir("compare_detected_genes.dir"))
@split(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "genes.dir/gene_counts.tsv.gz"))
+ glob.glob(os.path.join(PARAMS.get("dna_communitiesdir"), "genes.dir/gene_counts.tsv.gz")),
"compare_detected_genes.dir/*.overlap.tsv")
def buildGeneDetectionOverlap(infiles, outfiles):
'''
build species detection overlap
'''
rna, dna = infiles
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)-1]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)-1]''')
R('''for (i in 1:ncol(dna)){
name = paste("compare_detected_genes.dir", paste(colnames(dna[i]), ".overlap.tsv", sep = ""), sep = "/")
taxa.rna = rownames(rna)[rna[,i] !=0]
taxa.dna = rownames(dna)[dna[,i] !=0]
nrna = length(taxa.rna)
ndna = length(taxa.dna)
noverlap = length(intersect(taxa.rna, taxa.dna))
result = data.frame(nrna = nrna, ndna = ndna, noverlap = noverlap)
write.table(result, file = name, sep = "\t", row.names = F)}
''')
###################################################
###################################################
###################################################
@follows(mkdir("compare_detected_genes.dir"))
@merge(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "genes.dir/gene_counts.tsv.gz"))
+ glob.glob(os.path.join(PARAMS.get("dna_communitiesdir"), "genes.dir/gene_counts.tsv.gz")),
"compare_detected_genes.dir/genes.abundance.pdf")
def plotAbundanceLevelsOfGeneOverlap(infiles, outfile):
'''
plot abundances for unique and common genes
'''
rna, dna = infiles
R('''library(ggplot2)''')
# get rna reads per million
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,2:ncol(rna)]''')
R('''rna <- sweep(rna, 2, colSums(rna)/1000000, "/")''')
# get dna reads per million
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,2:ncol(dna)]''')
R('''dna <- sweep(dna, 2, colSums(dna)/1000000, "/")''')
# common and distinct sets
R('''common <- intersect(rownames(dna), rownames(rna))''')
R('''dna.only <- setdiff(rownames(dna), rownames(rna))''')
# boxplot the abundance levels
R('''rna.common <- apply(rna[common,], 1, mean)''')
R('''dna.common <- apply(dna[common,], | |
"""
Statistical tools for time series analysis
"""
import numpy as np
from scipy import stats, signal
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant
from tsatools import lagmat, lagmat2ds, add_trend
#from statsmodels.sandbox.tsa import var
from adfvalues import *
#from statsmodels.sandbox.rls import RLS
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximimizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class.
modargs : tuple
args to pass to model. See notes.
fitargs : tuple
args to pass to fit. See notes.
lagstart : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : str {"aic","bic","t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag+maxlag+1):
mod_instance = mod(endog, exog[:,:lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic,k) for k,v in results.iteritems())
elif method == "bic":
icbest, bestlag = min((v.bic,k) for k,v in results.iteritems())
elif method == "t-stat":
lags = sorted(results.keys())[::-1]
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt, 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
'''Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : str {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' or 'BIC', then the number of lags is chosen to minimize the
corresponding information criterium
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant at
the 95 % level.
store : bool
If True, then a result instance is returned additionally to
the adf statistic
regresults : bool
If True, the full regression results are returned.
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994)
usedlag : int
Number of lags used.
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values.
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 % levels.
Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
regresults : RegressionResults instance
The
resstore : (optional) instance of ResultStore
an instance of a dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables.
If the p-value is close to significant, then the critical values should be
used to judge whether to accept or reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
see example script
References
----------
Greene
Hamilton
P-Values (regression surface approximation)
<NAME>. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
Critical values
<NAME>. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
'''
if regresults:
store = True
trenddict = {None:'nc', 0:'c', 1:'ct', 2:'ctt'}
if regression is None or isinstance(regression, int):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c','nc','ct','ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs/100., 1/4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:,None], maxlag, trim='both', original='in')
nobs = xdall.shape[0]
xdall[:,0] = x[-nobs-1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag, regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag #convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:,None], bestlag, trim='both', original='in')
nobs = xdall.shape[0]
xdall[:,0] = x[-nobs-1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:,:usedlag+1], regression)).fit()
else:
resols = OLS(xdshort, xdall[:,:usedlag+1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = "The coefficient on the lagged level equals 1 - unit root"
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False):
'''
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
Returns
-------
acovf : array
autocovariance function
'''
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d | |
import json
import os
import subprocess
import h5py
import uuid
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.DataFileUtilClient import DataFileUtil
from pprint import pprint
from shutil import copy
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import re
import json
class ReactiveTransportSimulatorUtil:
PREPDE_TOOLKIT_PATH = '/kb/module/lib/ReactiveTransportSimulator/Utils'
def _generate_html_report(self):
report = "<html> <head> ReactiveTransportSimulator-KBase report </head> <body> </body> </html>"
return report
class ReactiveTransportSimulatorRunBatchUtil:
def __init__(self,params):
self.params = params
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.dfu = DataFileUtil(self.callback_url)
self.output_files = []
self.html_files = []
self.data_folder = os.path.abspath('./data/')
self.shared_folder = params['shared_folder']
self.scratch_folder = os.path.join(params['shared_folder'],"scratch")
def run_batch_model(self):
print('params:',self.params)
try:
os.mkdir(self.scratch_folder)
except OSError:
print ("Creation of the directory %s failed" % self.scratch_folder)
else:
print ("Successfully created the directory %s " % self.scratch_folder)
# move file templates from data folder to scratch folder
pflotran_input_temp = os.path.join(self.data_folder,'batch_template.in')
pflotran_db_temp = os.path.join(self.data_folder,'database_template.dat')
pflotran_input = os.path.join(self.scratch_folder,'batch.in')
pflotran_db = os.path.join(self.scratch_folder,'database.dat')
stoi_csv_fba = os.path.join(self.scratch_folder,'rxn_fba.csv')
cpd_csv_fba = os.path.join(self.scratch_folder,'cpd_fba.csv')
# read inputs
print("Input FBA model: ",self.params['input_FBA_model'])
dfu = DataFileUtil(self.callback_url)
fba_model = dfu.get_objects({'object_refs': [self.params['input_FBA_model']]})['data'][0]
print("FBA model name :",fba_model['data']['name'])
nrxn = int(self.params['number_simulated_reactions'])
tot_time = float(self.params['simulation_time'])
timestep = float(self.params['snapshot_period'])
temperature = float(self.params['temperature'])
# collect the compound info
cpdid2formula = dict()
df_cpd = pd.DataFrame({'formula':[None]})
for compound in fba_model['data']['modelcompounds']:
cpdid2formula[compound['id']] = compound['formula']
if 'biom' in compound['id']:
df_cpd = df_cpd.append({'formula':'BIOMASS'}, ignore_index=True)
else:
df_cpd = df_cpd.append({'formula':compound['formula']}, ignore_index=True)
df_cpd.insert(len(df_cpd.columns),'initial_concentration(mol/L)',1,True)
df_cpd['formula'].replace('', np.nan, inplace=True)
df_cpd = df_cpd.dropna()
df_cpd.to_csv(cpd_csv_fba,index=False)
print("Compounds saved. \n")
# collect donor, acceptor, biom from reactions
"""
donor : "~/modelcompounds/id/xcpd2_c0"
acceptor : "~/modelcompounds/id/acceptor_c0"
biom : "~/modelcompounds/id/biom_c0"
"""
rxn_ref = ['r'+str(i+1) for i in range(nrxn)]
df_rxn = pd.DataFrame({'rxn_ref':rxn_ref,'rxn_id':None,'DOC_formula':None})
# selected_reactions = random.choices(fba_model['data']['modelreactions'],k=nrxn)
selected_reactions = []
selected_cpd = []
i = 0
while i < nrxn:
irxn = random.choice(fba_model['data']['modelreactions'])
acceptor_flag = False
for reagent in irxn['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
if 'acceptor' in cpdid:
acceptor_flag = True
if 'xcpd' in cpdid:
doc = cpdid2formula[cpdid]
selected_cpd.append(doc)
if acceptor_flag and selected_cpd.count(doc) == 1:
selected_reactions.append(irxn)
i += 1
for reaction_idx,reaction_val in enumerate(selected_reactions):
df_rxn['rxn_id'].iloc[reaction_idx] = reaction_val['id']
for reagent in reaction_val['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
formula = cpdid2formula[cpdid]
coef = reagent['coefficient']
if "xcpd" in cpdid:
df_rxn['DOC_formula'].iloc[reaction_idx] = formula
if "biom" in cpdid:
formula = 'BIOMASS'
if not formula in df_rxn.columns:
temp = ['0']*df_rxn.shape[0]
df_rxn.insert(len(df_rxn.columns),formula,temp,True)
df_rxn[formula].iloc[reaction_idx] = coef
else:
df_rxn[formula].iloc[reaction_idx] = coef
print(df_rxn.columns)
print(df_rxn.head())
df_rxn.to_csv(stoi_csv_fba,index=False)
print("Selected reactions saved. \n")
# read initial condition from /bin/module/data
init_cond = cpd_csv_fba
# generate sandbox file
sb_file = os.path.join(self.scratch_folder,'reaction_sandbox_pnnl_cyber.F90')
var = ['mu_max','vh','k_deg','cc','activation_energy','reference_temperature']
var_unit = ['1/sec','m^3','1/sec','M','J/mol','K']
generate_sandbox_code(nrxn,var,var_unit,sb_file,stoi_csv_fba)
print("Sandbox file generated.")
# format sandbox fortran code
fmt_sb_cmd = 'fprettify ' + sb_file
process = subprocess.Popen(fmt_sb_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Sandbox file formatted.")
# copy sandbox file to src dir and recompile pflotran
src_dir = '/bin/pflotran/src/pflotran'
copy(sb_file,src_dir)
print(os.getcwd())
compile_pflotran_cmd = 'sh ./data/compile.sh'
process = subprocess.Popen(compile_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Compile PFLOTRAN output:",output[-300:])
print("Complile PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
# generate batch input deck
self.generate_pflotran_input_batch(pflotran_input_temp,stoi_csv_fba,cpd_csv_fba,pflotran_input,tot_time,timestep,temperature)
print("Batch input deck generated.")
# generate database
update_pflotran_database(stoi_csv_fba,pflotran_db_temp,pflotran_db)
print("Database generated.")
# running pflotran
exepath = '/bin/pflotran/src/pflotran/pflotran'
run_pflotran_cmd = exepath + ' -n 1 -pflotranin ' + pflotran_input
process = subprocess.Popen(run_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Running PFLOTRAN output:",output[-300:])
print("Running PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
h5_file = os.path.join(self.scratch_folder,'batch.h5')
if os.path.isfile(h5_file):
print ("Successfully run PFLOTRAN")
else:
print ("Fail to run PFLOTRAN")
# generate plots in /kb/module/work/tmp/scratch/
self.plot_time_series_batch(h5_file)
# Attach output
self.output_files.append(
{'path': cpd_csv_fba,
'name': os.path.basename(cpd_csv_fba),
'label': os.path.basename(cpd_csv_fba),
'description': 'compounds'}
)
self.output_files.append(
{'path': stoi_csv_fba,
'name': os.path.basename(stoi_csv_fba),
'label': os.path.basename(stoi_csv_fba),
'description': 'reactions stoichiometry table'}
)
self.output_files.append(
{'path': sb_file,
'name': os.path.basename(sb_file),
'label': os.path.basename(sb_file),
'description': 'Sandbox source code'}
)
self.output_files.append(
{'path': pflotran_input,
'name': os.path.basename(pflotran_input),
'label': os.path.basename(pflotran_input),
'description': 'Batch reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': pflotran_db,
'name': os.path.basename(pflotran_db),
'label': os.path.basename(pflotran_db),
'description': 'Batch reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': h5_file,
'name': os.path.basename(h5_file),
'label': os.path.basename(h5_file),
'description': 'H5 file generated by PFLOTRAN batch reaction'}
)
fig_name = 'time_series_plot.png'
fig_file = os.path.join(self.scratch_folder,fig_name)
self.output_files.append(
{'path': fig_file,
'name': os.path.basename(fig_file),
'label': os.path.basename(fig_file),
'description': 'Plots of breakthrough curves generated by PFLOTRAN batch reaction'}
)
# Return the report
return self._generate_html_report()
def generate_pflotran_input_batch(self,batch_file,stoi_file,init_file,output_file,tot_time,timestep,temp):
file = open(batch_file,'r')
rxn_df = pd.read_csv(stoi_file)
init_df = pd.read_csv(init_file)
primary_species_charge = []
primary_species_nocharge = []
for spec in list(rxn_df.columns):
if spec in ['rxn_id','DOC_formula','rxn_ref','H2O','BIOMASS']:
continue
primary_species_nocharge.append(spec)
if spec=='NH4':
primary_species_charge.append('NH4+')
continue
if spec=='HCO3':
primary_species_charge.append('HCO3-')
continue
if spec=='H':
primary_species_charge.append('H+')
continue
if spec=='HS':
primary_species_charge.append('HS-')
continue
if spec=='HPO4':
primary_species_charge.append('HPO4-')
continue
primary_species_charge.append(spec)
init_cond = [init_df.loc[init_df['formula']==i,'initial_concentration(mol/L)'].iloc[0] for i in primary_species_nocharge]
init_biom = init_df.loc[init_df['formula']=='BIOMASS','initial_concentration(mol/L)'].iloc[0]
for idx,val in enumerate(primary_species_nocharge):
print("The initial concentration of {} is {} mol/L \n".format(val,init_cond[idx]))
pri_spec = ""
pri_spec_init = ""
new_file_content = ""
for line in file:
if 'PRIMARY_SPECIES' in line:
new_file_content += line
for i in primary_species_charge:
pri_spec += " " + i + "\n"
new_file_content += " " + pri_spec + "\n"
elif 'CONSTRAINT initial' in line:
new_file_content += line
new_file_content += " CONCENTRATIONS" + "\n"
for j in range(len(primary_species_charge)):
new_file_content += " {} {} T".format(primary_species_charge[j],init_cond[j])+ "\n"
new_file_content += " /" + "\n"
new_file_content += " IMMOBILE" + "\n"
new_file_content += " BIOMASS {} ".format(init_biom) + "\n"
new_file_content += " /"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'MAXIMUM_TIMESTEP_SIZE' in line:
new_file_content += " MAXIMUM_TIMESTEP_SIZE {} h".format(timestep) + "\n"
elif 'PERIODIC TIME' in line:
new_file_content += " PERIODIC TIME {} h".format(timestep) + "\n"
elif 'REFERENCE_TEMPERATURE' in line:
new_file_content += " REFERENCE_TEMPERATURE {} ! degrees C".format(temp) + "\n"
else:
new_file_content += line
writing_file = open(output_file, "w")
writing_file.write(new_file_content)
writing_file.close()
print('The batch input deck is updated.')
return
def plot_time_series_batch(self,h5_file):
obs_coord = [0.5,0.5,0.5]
file = h5py.File(h5_file,'r+')
time_str = [list(file.keys())[i] for i in range(len(list(file.keys()))) if list(file.keys())[i][0:4] == "Time"]
time_unit = time_str[0][-1]
time = sorted([float(time_str[i].split()[1]) for i in range(len(time_str))])
bound = []
bound.append(file['Coordinates']['X [m]'][0])
bound.append(file['Coordinates']['X [m]'][-1])
bound.append(file['Coordinates']['Y [m]'][0])
bound.append(file['Coordinates']['Y [m]'][-1])
bound.append(file['Coordinates']['Z [m]'][0])
bound.append(file['Coordinates']['Z [m]'][-1])
nxyz = []
nxyz.append(len(file['Coordinates']['X [m]'])-1)
nxyz.append(len(file['Coordinates']['Y [m]'])-1)
nxyz.append(len(file['Coordinates']['Z [m]'])-1)
x_coord = (np.linspace(bound[0],bound[1],nxyz[0]+1)[:-1]+np.linspace(bound[0],bound[1],nxyz[0]+1)[1:])/2
y_coord = (np.linspace(bound[2],bound[3],nxyz[1]+1)[:-1]+np.linspace(bound[2],bound[3],nxyz[1]+1)[1:])/2
z_coord = (np.linspace(bound[4],bound[5],nxyz[2]+1)[:-1]+np.linspace(bound[4],bound[5],nxyz[2]+1)[1:])/2
x_idx = np.argmin(np.absolute(x_coord-obs_coord[0]))
y_idx = np.argmin(np.absolute(y_coord-obs_coord[1]))
z_idx = np.argmin(np.absolute(z_coord-obs_coord[2]))
time_zero = "Time:"+str(" %12.5E" % 0)+str(" %s" % time_unit)
var_name = [x for x in list(file[time_zero].keys()) if 'Total' in x]
var_value = np.zeros((len(var_name),len(time)))
for i, itime in enumerate(time):
time_slice = "Time:"+str(" %12.5E" % itime)+str(" %s" % time_unit)
# print(file[time_slice][var_name].keys())
for j in range(len(var_name)):
var_value[j,i] = file[time_slice][var_name[j]][x_idx][y_idx][z_idx]
fig = plt.figure(num=1,dpi=150)
first_doc = True
for i in range(len(var_name)):
if var_name[i][6] == 'C':
if first_doc == True:
plt.plot(time,var_value[i,:],label='DOCs',color='k')[0]
first_doc = False
else:
plt.plot(time,var_value[i,:],color='k')[0]
else:
plt.plot(time,var_value[i,:],label=var_name[i])[0]
plt.ioff()
plt.xlabel("Time (%s)" %time_unit)
ylabel = 'Concentration [M]'
plt.ylabel(ylabel)
plt.legend(frameon=False,loc='upper center', bbox_to_anchor=(0.5, -0.15),ncol=3)
fig_name = 'time_series_plot.png'
fig_path = os.path.join(self.scratch_folder,fig_name)
plt.savefig(fig_path,dpi=150,bbox_inches='tight')
if os.path.isfile(fig_path):
print ("Successfully generated time series plot")
else:
print ("Fail to generate time series plot")
return
def visualize_hdf_in_html(self):
output_directory = os.path.join(self.shared_folder,'output')
os.makedirs(output_directory)
print("output dir:", output_directory)
html_file = os.path.join(output_directory,'summary.html')
fig_name = 'time_series_plot.png'
pflotran_out_name = 'batch.out'
fig_path = os.path.join(self.scratch_folder,fig_name)
pflotran_out_path = os.path.join(self.scratch_folder,pflotran_out_name)
if os.path.isfile(fig_path):
print ("Time series plot exists")
else:
print ("Time series plot does not exist")
print("figpath:",fig_path)
if os.path.isfile(pflotran_out_path):
print ("PFLOTRAN output exists")
else:
print ("PFLOTRAN output does not exist")
print("figpath:",pflotran_out_path)
copy(fig_path,'/kb/module/work/tmp/output')
copy(pflotran_out_path,'/kb/module/work/tmp/output')
with open(html_file, 'w') as f:
f.write("""
<!DOCTYPE html>
<html>
<body>
<h1>PFLOTRAN-KBbase</h1>
<p>PFLOTRAN output</p>
<embed src="batch.out" width="480" height="960">
<p>Visulize PFLOTRAN output</p>
<img src="{}" alt="Time series plot" height="360" width="480"></img>
</body>
</html>
""".format(fig_name))
with open(html_file, 'r') as f:
print("html_file:",f.readlines())
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
return {'shock_id': report_shock_id,
'name': os.path.basename(html_file),
'label': os.path.basename(html_file),
'description': 'HTML summary report for run_batch_model App'}
def _generate_html_report(self):
# Get the workspace name from the parameters
ws_name = self.params["workspace"]
# Visualize the result in html
html_report_viz_file = self.visualize_hdf_in_html()
self.html_files.append(html_report_viz_file)
# Save the html to the report dictionary
report_params = {
# message is an optional field.
# A string that appears in the summary section of the result page
'message': "Say something...",
# A list of typed objects created during the execution
# of the App. This can only be used to refer to typed
# objects in the workspace and is separate from any files
# generated by the app.
# See a working example here:
# https://github.com/kbaseapps/kb_deseq/blob/586714d/lib/kb_deseq/Utils/DESeqUtil.py#L262-L264
# 'objects_created': objects_created_in_app,
# A list | |
setBoolProp
from .morphing import setActivated
final = finalProp(raw)
if raw not in self.drivers.keys():
self.drivers[raw] = []
self.visible[raw] = False
self.primary[raw] = False
if asset:
visible = (asset.visible or GS.useMakeHiddenSliders)
self.visible[raw] = visible
self.primary[raw] = True
if skey and not visible:
return final
elif asset.type == "bool":
setBoolProp(self.rig, raw, asset.value)
setBoolProp(self.amt, final, asset.value)
elif asset.type == "float":
self.setFloatLimits(self.rig, raw, GS.sliderLimits, asset, skey)
self.setFloatLimits(self.amt, final, GS.finalLimits, asset, skey)
elif asset.type == "int":
self.rig[raw] = 0
self.amt[final] = 0
else:
self.setFloatLimits(self.rig, raw, GS.sliderLimits, asset, skey)
self.setFloatLimits(self.amt, final, GS.finalLimits, asset, skey)
reportError("BUG: Unknown asset type: %s.\nAsset: %s" % (asset.type, asset), trigger=(2,3))
if visible:
setActivated(self.rig, raw, True)
self.addToMorphSet(raw, asset, False)
return final
def setFloatLimits(self, rna, prop, limits, asset, skey):
from .driver import setFloatProp
if limits == 'DAZ' or "jcm" in prop.lower():
min = GS.morphMultiplier * asset.min
max = GS.morphMultiplier * asset.max
setFloatProp(rna, prop, 0.0, min, max)
if skey:
skey.slider_min = min
skey.slider_max = max
elif limits == 'CUSTOM':
setFloatProp(rna, prop, 0.0, GS.customMin, GS.customMax)
if skey:
skey.slider_min = GS.customMin
skey.slider_max = GS.customMax
else:
setFloatProp(rna, prop, 0.0, None, None)
if skey:
skey.slider_min = 0.0
skey.slider_max = 1.0
def makeValueFormula(self, output, expr):
if expr["prop"]:
self.addNewProp(output)
prop = expr["prop"]
self.drivers[output].append(("PROP", prop, expr["factor"]))
if expr["mult"]:
mult = expr["mult"]
if output not in self.mults.keys():
self.mults[output] = []
self.mults[output].append(mult)
self.addNewProp(mult)
if expr["bone"]:
bname = self.getRealBone(expr["bone"])
if bname:
if output not in self.drivers.keys():
self.drivers[output] = []
self.drivers[output].append(("BONE", bname, expr))
else:
print("Missing bone (makeValueFormula):", expr["bone"])
def getRealBone(self, bname):
from .bone import getTargetName
nname = getTargetName(bname, self.rig)
return nname
def getDrivenBone(self, bname):
bname = self.getRealBone(bname)
if bname:
dname = drvBone(bname)
if dname in self.rig.pose.bones.keys():
return dname
return bname
def getBoneData(self, bname, expr):
from .transform import Transform
bname = self.getDrivenBone(bname)
if bname is None:
return
pb = self.rig.pose.bones[bname]
factor = expr["factor"]
if "points" in expr.keys():
factor = self.cheatSplineTCB(expr["points"], factor)
raw = expr["prop"]
final = self.addNewProp(raw)
tfm = Transform()
return tfm, pb, final, factor
def cheatSplineTCB(self, points, factor):
x0 = y0 = None
for n,point in enumerate(points):
x,y = point[0:2]
if x == 0 and y == 0:
x0 = x
y0 = y
n0 = n
break
if x0 is None:
return factor
if n0 == 0:
x1,y1 = points[-1][0:2]
else:
x1,y1 = points[0][0:2]
factor = (y1-y0)/(x1-x0)
return factor
def makeRotFormula(self, bname, idx, expr):
tfm,pb,prop,factor = self.getBoneData(bname, expr)
tfm.setRot(factor, prop, index=idx)
self.addPoseboneDriver(pb, tfm)
def makeTransFormula(self, bname, idx, expr):
tfm,pb,prop,factor = self.getBoneData(bname, expr)
tfm.setTrans(factor, prop, index=idx)
self.addPoseboneDriver(pb, tfm)
def makeScaleFormula(self, bname, idx, expr):
tfm,pb,prop,factor = self.getBoneData(bname, expr)
tfm.setScale(factor, True, prop, index=idx)
self.addPoseboneDriver(pb, tfm)
def makeCenterFormula(self, bname, idx, expr):
_tfm,pb,prop,factor = self.getBoneData(bname, expr)
if "HdOffset" not in pb.keys():
pb.HdOffset = Zero
vec = Vector((0,0,0))
vec[idx] = factor
self.setFcurves(pb, vec, prop, "HdOffset", "pose")
#-------------------------------------------------------------
# Add posebone driver
#-------------------------------------------------------------
def addPoseboneDriver(self, pb, tfm):
from .node import getBoneMatrix
mat = getBoneMatrix(tfm, pb)
loc,quat,scale = mat.decompose()
success = False
if (tfm.transProp and loc.length > 0.01*self.rig.DazScale):
self.setFcurves(pb, loc, tfm.transProp, "location")
success = True
if tfm.rotProp:
if Vector(quat.to_euler()).length < 1e-4:
pass
elif pb.rotation_mode == 'QUATERNION':
quat[0] -= 1
self.setFcurves(pb, quat, tfm.rotProp, "rotation_quaternion")
success = True
else:
euler = mat.to_euler(pb.rotation_mode)
self.setFcurves(pb, euler, tfm.rotProp, "rotation_euler")
success = True
if (tfm.scaleProp and scale.length > 1e-4):
self.setFcurves(pb, scale-One, tfm.scaleProp, "scale")
success = True
elif tfm.generalProp:
self.setFcurves(pb, scale-One, tfm.generalProp, "scale")
success = True
return success
def setFcurves(self, pb, vec, prop, channel, pose="pose"):
def getBoneFcurves(pb, channel):
path = '%s.bones["%s"].%s' % (pose, pb.name, channel)
fcurves = {}
if self.rig.animation_data:
for fcu in self.rig.animation_data.drivers:
if path == fcu.data_path:
fcurves[fcu.array_index] = fcu
return fcurves
fcurves = getBoneFcurves(pb, channel)
for idx,factor in self.getFactors(vec):
if idx in fcurves.keys():
fcu = fcurves[idx]
else:
fcu = None
bname,drivers = self.findSumDriver(pb, channel, idx, (pb, fcu, {}))
drivers[prop] = factor
def getFactors(self, vec):
maxfactor = max([abs(factor) for factor in vec])
return [(idx,factor) for idx,factor in enumerate(vec) if abs(factor) > 0.01*maxfactor]
def findSumDriver(self, pb, channel, idx, data):
bname = pb.name
if drvBone(bname) in self.rig.data.bones.keys():
bname = drvBone(bname)
if bname not in self.sumdrivers.keys():
self.sumdrivers[bname] = {}
if channel not in self.sumdrivers[bname].keys():
self.sumdrivers[bname][channel] = {}
if idx not in self.sumdrivers[bname][channel].keys():
self.sumdrivers[bname][channel][idx] = data
return bname, self.sumdrivers[bname][channel][idx][2]
def clearProp(self, pgs, prop, idx):
for n,pg in enumerate(pgs):
if pg.name == prop and pg.index == idx:
pgs.remove(n)
return
#------------------------------------------------------------------
# Second pass: Load missing morphs
#------------------------------------------------------------------
def makeMissingMorphs(self, bodypart):
from .asset import getDazPath
for fileref in self.loaded:
self.referred[fileref] = False
morphset = self.morphset
namepaths = []
groupedpaths,morphfiles = self.setupMorphGroups()
for ref,unloaded in self.referred.items():
if unloaded:
path = getDazPath(ref)
if path:
name = ref.rsplit("/",1)[-1]
data = (name,path,bodypart)
morphset = self.getPathMorphSet(path, morphfiles)
if morphset:
groupedpaths[morphset].append(data)
else:
namepaths.append(data)
if namepaths:
self.makeAllMorphs(namepaths, False)
for mset,namepaths in groupedpaths.items():
if namepaths:
self.morphset = mset
self.makeAllMorphs(namepaths, False)
self.morphset = morphset
def setupMorphGroups(self):
from .morphing import getMorphPaths
if self.char is None:
return {}, {}
morphrefs = {}
groupedpaths = {}
for morphset,paths in getMorphPaths(self.char).items():
groupedpaths[morphset] = []
morphrefs[morphset] = [self.getFileRef(path) for path in paths]
return groupedpaths, morphrefs
def getPathMorphSet(self, path, morphrefs):
ref = self.getFileRef(path)
for morphset,refs in morphrefs.items():
if ref in refs:
return morphset
return None
#------------------------------------------------------------------
# Third pass: Build the drivers
#------------------------------------------------------------------
def buildDrivers(self):
print("Building drivers")
for output,drivers in self.drivers.items():
if drivers:
if self.isDriverType('BONE', drivers):
for dtype,bname,expr in drivers:
if dtype == 'BONE':
self.buildBoneDriver(output, bname, expr, False)
elif self.isDriverType('PROP', drivers):
self.buildPropDriver(output, drivers)
else:
self.buildPropDriver(output, drivers)
def isDriverType(self, dtype, drivers):
for driver in drivers:
if driver[0] == dtype:
return True
return False
def buildPropDriver(self, raw, drivers):
from .driver import getRnaDriver, Variable, removeModifiers
isJcm = ("jcm" in raw.lower() and
"ejcm" not in raw.lower() and
raw[0:6] != "JCMs O")
rna,channel = self.getDrivenChannel(raw)
bvars = []
vvars = {}
string = ""
fcu0 = getRnaDriver(rna, channel, None)
if fcu0 and fcu0.driver.type == 'SCRIPTED':
if not self.primary[raw]:
self.extendPropDriver(fcu0, raw, drivers)
return
vtargets,btargets = self.getVarBoneTargets(fcu0)
if isJcm:
string = fcu0.driver.expression
elif btargets:
varname = btargets[-1][0]
string = self.extractBoneExpression(fcu0.driver.expression, varname)
for _,_,var0 in btargets:
bvars.append(Variable(var0))
for vname,_,var0 in vtargets:
vvars[vname] = Variable(var0)
rna.driver_remove(channel)
fcu = rna.driver_add(channel)
fcu.driver.type = 'SCRIPTED'
removeModifiers(fcu)
for bvar in bvars:
var = fcu.driver.variables.new()
bvar.create(var)
if isJcm and string:
fcu.driver.expression = string
else:
ok = self.buildNewPropDriver(fcu, rna, channel, string, raw, drivers)
if not ok:
return
self.addMissingVars(fcu, vvars)
self.removeUnusedVars(fcu)
def buildNewPropDriver(self, fcu, rna, channel, string, raw, drivers):
varname = "a"
if self.visible[raw] or not self.primary[raw]:
string += varname
self.addPathVar(fcu, varname, self.rig, propRef(raw))
if raw not in self.rig.keys():
self.rig[raw] = 0.0
string,rdrivers = self.addDriverVars(fcu, string, varname, raw, drivers)
if not string:
print("Empty string: %s" % raw)
rna.driver_remove(channel)
return False
if self.getMultipliers(raw):
string = self.multiplyMults(fcu, string)
fcu.driver.expression = string
if rdrivers:
self.extendPropDriver(fcu, raw, rdrivers)
return True
def extractBoneExpression(self, string, varname):
string = string.split("(", 1)[-1]
mult = string.split(varname, 1)[0]
if mult == "0":
mult = "0*"
return "%s%s+" % (mult, varname)
def addDriverVars(self, fcu, string, varname, raw, drivers):
def multiply(factor, varname):
if factor == 1:
return "+%s" % varname
elif factor == -1:
return "-%s" % varname
else:
return "%+g*%s" % (factor, varname)
channels = [var.targets[0].data_path for var in fcu.driver.variables]
for dtype,subraw,factor in drivers[0:MAX_TERMS2]:
if dtype != 'PROP':
continue
subfinal = finalProp(subraw)
channel = propRef(subfinal)
if channel in channels:
continue
varname = nextLetter(varname)
string += multiply(factor, varname)
self.ensureExists(subraw, subfinal, 0.0)
self.addPathVar(fcu, varname, self.amt, channel)
if len(drivers) > MAX_TERMS2:
return string, drivers[MAX_TERMS2:]
else:
return string, []
def addMissingVars(self, fcu, vvars):
vnames = [var.name for var in fcu.driver.variables]
for vname,vvar in vvars.items():
if vname not in vnames:
var = fcu.driver.variables.new()
vvar.create(var)
def removeUnusedVars(self, fcu):
for var in list(fcu.driver.variables):
if var.name not in fcu.driver.expression:
fcu.driver.variables.remove(var)
def extendPropDriver(self, fcu, raw, drivers):
string = fcu.driver.expression
char = ""
while string[-1] == ")":
char += ")"
string = string[:-1]
if string[-1] == "R":
rest = restProp(raw)
self.addRestDrivers(rest, drivers)
return
else:
string += "+R"
rest = restProp(raw)
self.amt[rest] = 0.0
self.addPathVar(fcu, "R", self.amt, propRef(rest))
self.addRestDrivers(rest, drivers)
string += char
if len(string) > MAX_EXPRESSION_SIZE:
errtype = "Driving expressions too long for the following properties:"
if errtype not in self.errors.keys():
self.errors[errtype] = []
self.errors[errtype].append(raw)
else:
fcu.driver.expression = string
def addRestDrivers(self, rest, drivers):
struct = self.restdrivers[rest] = {}
| |
)
self.m_gauge_file_in.SetValue( 0 )
bSizer361.Add( self.m_gauge_file_in, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
sbSizer201.Add( bSizer361, 0, wx.EXPAND, 5 )
bSizer371 = wx.BoxSizer( wx.HORIZONTAL )
self.m_button_folder_in = wx.Button( sbSizer201.GetStaticBox(), wx.ID_ANY, u"加载文件夹", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer371.Add( self.m_button_folder_in, 0, wx.ALL, 5 )
self.m_gauge_fold_in = wx.Gauge( sbSizer201.GetStaticBox(), wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_fold_in.SetValue( 0 )
bSizer371.Add( self.m_gauge_fold_in, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
sbSizer201.Add( bSizer371, 0, wx.EXPAND, 5 )
self.m_staticline811 = wx.StaticLine( sbSizer201.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
sbSizer201.Add( self.m_staticline811, 0, wx.EXPAND |wx.ALL, 5 )
m_listBox_pic_inChoices = []
self.m_listBox_pic_in = wx.ListBox( sbSizer201.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_pic_inChoices, 0 )
sbSizer201.Add( self.m_listBox_pic_in, 1, wx.ALL|wx.EXPAND, 5 )
bSizer421 = wx.BoxSizer( wx.HORIZONTAL )
self.m_gauge_work_in = wx.Gauge( sbSizer201.GetStaticBox(), wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_work_in.SetValue( 0 )
bSizer421.Add( self.m_gauge_work_in, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
m_choice_type_inChoices = [ u"普通", u"标准", u"复杂" ]
self.m_choice_type_in = wx.Choice( sbSizer201.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_type_inChoices, 0 )
self.m_choice_type_in.SetSelection( 0 )
bSizer421.Add( self.m_choice_type_in, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button_star_in = wx.Button( sbSizer201.GetStaticBox(), wx.ID_ANY, u"开始", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer421.Add( self.m_button_star_in, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5 )
sbSizer201.Add( bSizer421, 0, wx.EXPAND, 5 )
self.m_panel241.SetSizer( sbSizer201 )
self.m_panel241.Layout()
sbSizer201.Fit( self.m_panel241 )
self.m_splitter2.SplitVertically( self.m_panel231, self.m_panel241, 0 )
bSizer33.Add( self.m_splitter2, 0, wx.EXPAND, 5 )
sbSizer13 = wx.StaticBoxSizer( wx.StaticBox( self.m_scrolledWindow9, wx.ID_ANY, u"比较新增" ), wx.VERTICAL )
bSizer9 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText6 = wx.StaticText( sbSizer13.GetStaticBox(), wx.ID_ANY, u"新文件文件夹", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText6.Wrap( -1 )
bSizer9.Add( self.m_staticText6, 0, wx.ALL, 5 )
self.m_dirPicker_old = wx.DirPickerCtrl( sbSizer13.GetStaticBox(), wx.ID_ANY, wx.EmptyString, u"新文件夹", wx.DefaultPosition, wx.DefaultSize, wx.DIRP_DEFAULT_STYLE|wx.DIRP_SMALL )
bSizer9.Add( self.m_dirPicker_old, 0, wx.ALL|wx.EXPAND, 5 )
self.m_staticText5 = wx.StaticText( sbSizer13.GetStaticBox(), wx.ID_ANY, u"旧文件文件夹", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
bSizer9.Add( self.m_staticText5, 0, wx.ALL, 5 )
self.m_dirPicker6 = wx.DirPickerCtrl( sbSizer13.GetStaticBox(), wx.ID_ANY, wx.EmptyString, u"旧文件夹", wx.DefaultPosition, wx.DefaultSize, wx.DIRP_DEFAULT_STYLE|wx.DIRP_SMALL )
bSizer9.Add( self.m_dirPicker6, 0, wx.ALL|wx.EXPAND, 5 )
m_listBox_defferChoices = []
self.m_listBox_deffer = wx.ListBox( sbSizer13.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_defferChoices, wx.LB_HSCROLL|wx.LB_NEEDED_SB )
bSizer9.Add( self.m_listBox_deffer, 1, wx.ALL|wx.EXPAND, 5 )
sbSizer13.Add( bSizer9, 1, wx.EXPAND, 5 )
bSizer33.Add( sbSizer13, 0, wx.EXPAND, 5 )
self.m_scrolledWindow9.SetSizer( bSizer33 )
self.m_scrolledWindow9.Layout()
bSizer33.Fit( self.m_scrolledWindow9 )
self.m_notebook3.AddPage( self.m_scrolledWindow9, u"其他工具", False )
bSizer19.Add( self.m_notebook3, 1, wx.EXPAND |wx.ALL, 5 )
m_sdbSizer4 = wx.StdDialogButtonSizer()
self.m_sdbSizer4OK = wx.Button( self, wx.ID_OK )
m_sdbSizer4.AddButton( self.m_sdbSizer4OK )
self.m_sdbSizer4Apply = wx.Button( self, wx.ID_APPLY )
m_sdbSizer4.AddButton( self.m_sdbSizer4Apply )
self.m_sdbSizer4Cancel = wx.Button( self, wx.ID_CANCEL )
m_sdbSizer4.AddButton( self.m_sdbSizer4Cancel )
m_sdbSizer4.Realize();
bSizer19.Add( m_sdbSizer4, 0, wx.ALIGN_RIGHT, 5 )
self.SetSizer( bSizer19 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.close )
self.Bind( wx.EVT_INIT_DIALOG, self.initial )
self.m_notebook3.Bind( wx.EVT_NOTEBOOK_PAGE_CHANGED, self.change_page )
self.m_radioBox_ex_type.Bind( wx.EVT_RADIOBOX, self.change )
self.m_checkBox_in_cn.Bind( wx.EVT_CHECKBOX, self.change )
self.m_checkBox_add_dir.Bind( wx.EVT_CHECKBOX, self.change )
self.m_checkBox_autoopen.Bind( wx.EVT_CHECKBOX, self.change )
self.m_checkBox_pass_finished.Bind( wx.EVT_CHECKBOX, self.change )
self.m_checkBox_open_temp.Bind( wx.EVT_CHECKBOX, self.change )
self.m_checkBox4_finish_exit.Bind( wx.EVT_CHECKBOX, self.change )
self.m_checkBox_clear.Bind( wx.EVT_CHECKBOX, self.change )
self.m_button_menu.Bind( wx.EVT_BUTTON, self.menu_setting )
self.m_toggleBtn_lock.Bind( wx.EVT_TOGGLEBUTTON, self.lock_address )
self.m_radioBox_type_use.Bind( wx.EVT_RADIOBOX, self.change_type )
self.m_radioBox_im.Bind( wx.EVT_RADIOBOX, self.change_input )
self.m_radioBox_az_div.Bind( wx.EVT_RADIOBOX, self.change_div )
self.m_textCtrl_tex_limit.Bind( wx.EVT_TEXT, self.change_in_tex )
self.m_textCtrl_tex_limit.Bind( wx.EVT_TEXT_ENTER, self.change )
self.m_bpButton_defualt_tex.Bind( wx.EVT_BUTTON, self.default_tex )
self.m_textCtrl_mesh_limit.Bind( wx.EVT_TEXT, self.change_in_mesh )
self.m_textCtrl_mesh_limit.Bind( wx.EVT_TEXT_ENTER, self.change )
self.m_bpButton6_default_mesh.Bind( wx.EVT_BUTTON, self.default_mesh )
self.m_bpButton_add.Bind( wx.EVT_BUTTON, self.az_add )
self.m_bpButton_del.Bind( wx.EVT_BUTTON, self.az_del )
self.m_bpButton_up.Bind( wx.EVT_BUTTON, self.az_up )
self.m_bpButton_down.Bind( wx.EVT_BUTTON, self.az_down )
self.m_checkList_az_limits.Bind( wx.EVT_LISTBOX, self.choice )
self.m_checkList_az_limits.Bind( wx.EVT_LISTBOX_DCLICK, self.change_pattern )
self.m_checkBox_save_all.Bind( wx.EVT_CHECKBOX, self.change )
self.m_listBox_new.Bind( wx.EVT_LISTBOX_DCLICK, self.open_add_name )
self.m_bpButton_add_name.Bind( wx.EVT_BUTTON, self.name_add )
self.m_bpButton_del_name.Bind( wx.EVT_BUTTON, self.name_del )
self.m_listBox_new1.Bind( wx.EVT_LISTBOX, self.choice_add )
self.m_listBox_new1.Bind( wx.EVT_LISTBOX_DCLICK, self.edit_add_name )
self.m_searchCtrl2.Bind( wx.EVT_SEARCHCTRL_SEARCH_BTN, self.searching )
self.m_searchCtrl2.Bind( wx.EVT_TEXT, self.searching )
self.m_listBox_change.Bind( wx.EVT_LISTBOX_DCLICK, self.change_name )
self.m_button_file.Bind( wx.EVT_BUTTON, self.in_file )
self.m_button_folder.Bind( wx.EVT_BUTTON, self.in_fold )
self.m_button_star.Bind( wx.EVT_BUTTON, self.in_start )
self.m_button_file_in.Bind( wx.EVT_BUTTON, self.out_file )
self.m_button_folder_in.Bind( wx.EVT_BUTTON, self.out_fold )
self.m_button_star_in.Bind( wx.EVT_BUTTON, self.out_start )
self.m_dirPicker_old.Bind( wx.EVT_DIRPICKER_CHANGED, self.add_new )
self.m_dirPicker6.Bind( wx.EVT_DIRPICKER_CHANGED, self.add_old )
self.m_listBox_deffer.Bind( wx.EVT_LISTBOX_DCLICK, self.writer_into )
self.m_sdbSizer4Apply.Bind( wx.EVT_BUTTON, self.apply_click )
self.m_sdbSizer4Cancel.Bind( wx.EVT_BUTTON, self.cancel_click )
self.m_sdbSizer4OK.Bind( wx.EVT_BUTTON, self.ok_click )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def close( self, event ):
event.Skip()
def initial( self, event ):
event.Skip()
def change_page( self, event ):
event.Skip()
def change( self, event ):
event.Skip()
def menu_setting( self, event ):
event.Skip()
def lock_address( self, event ):
event.Skip()
def change_type( self, event ):
event.Skip()
def change_input( self, event ):
event.Skip()
def change_div( self, event ):
event.Skip()
def change_in_tex( self, event ):
event.Skip()
def default_tex( self, event ):
event.Skip()
def change_in_mesh( self, event ):
event.Skip()
def default_mesh( self, event ):
event.Skip()
def az_add( self, event ):
event.Skip()
def az_del( self, event ):
event.Skip()
def az_up( self, event ):
event.Skip()
def az_down( self, event ):
event.Skip()
def choice( self, event ):
event.Skip()
def change_pattern( self, event ):
event.Skip()
def open_add_name( self, event ):
event.Skip()
def name_add( self, event ):
event.Skip()
def name_del( self, event ):
event.Skip()
def choice_add( self, event ):
event.Skip()
def edit_add_name( self, event ):
event.Skip()
def searching( self, event ):
event.Skip()
def change_name( self, event ):
event.Skip()
def in_file( self, event ):
event.Skip()
def in_fold( self, event ):
event.Skip()
def in_start( self, event ):
event.Skip()
def out_file( self, event ):
event.Skip()
def out_fold( self, event ):
event.Skip()
def out_start( self, event ):
event.Skip()
def add_new( self, event ):
event.Skip()
def add_old( self, event ):
event.Skip()
def writer_into( self, event ):
event.Skip()
def apply_click( self, event ):
event.Skip()
def cancel_click( self, event ):
event.Skip()
def ok_click( self, event ):
event.Skip()
def m_splitter2OnIdle( self, event ):
self.m_splitter2.SetSashPosition( 0 )
self.m_splitter2.Unbind( wx.EVT_IDLE )
###########################################################################
## Class MyDialog_menu
###########################################################################
class MyDialog_menu ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"设置添加的类型", pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
bSizer47 = wx.BoxSizer( wx.VERTICAL )
self.m_checkBox_dir = wx.CheckBox( self, wx.ID_ANY, u"文件夹右键", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer47.Add( self.m_checkBox_dir, 0, wx.ALL, 5 )
self.m_checkBox_bg = wx.CheckBox( self, wx.ID_ANY, u"文件夹内右键", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer47.Add( self.m_checkBox_bg, 0, wx.ALL, 5 )
m_sdbSizer5 = wx.StdDialogButtonSizer()
self.m_sdbSizer5OK = wx.Button( self, wx.ID_OK )
m_sdbSizer5.AddButton( self.m_sdbSizer5OK )
self.m_sdbSizer5Cancel = wx.Button( self, wx.ID_CANCEL )
m_sdbSizer5.AddButton( self.m_sdbSizer5Cancel )
m_sdbSizer5.Realize();
bSizer47.Add( m_sdbSizer5, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer47 )
self.Layout()
bSizer47.Fit( self )
self.Centre( wx.BOTH )
# Connect Events
self.m_checkBox_dir.Bind( wx.EVT_CHECKBOX, self.use_dir )
self.m_checkBox_bg.Bind( wx.EVT_CHECKBOX, self.use_bg )
self.m_sdbSizer5OK.Bind( wx.EVT_BUTTON, self.ok_change )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def use_dir( self, event ):
event.Skip()
def use_bg( self, event ):
event.Skip()
def ok_change( self, event ):
event.Skip()
###########################################################################
## Class MyDialog_useless
###########################################################################
class MyDialog_useless ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
bSizer24 = wx.BoxSizer( wx.VERTICAL )
self.m_scrolledWindow3 = wx.ScrolledWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL|wx.VSCROLL )
self.m_scrolledWindow3.SetScrollRate( 5, 5 )
bSizer241 = wx.BoxSizer( wx.VERTICAL )
m_radioBox_gfl_exChoices = [ u"导出全部", u"仅导出人物立绘(pic打头)" ]
self.m_radioBox_gfl_ex = wx.RadioBox( self.m_scrolledWindow3, wx.ID_ANY, u"导出设置", wx.DefaultPosition, wx.DefaultSize, m_radioBox_gfl_exChoices, 1, wx.RA_SPECIFY_COLS )
self.m_radioBox_gfl_ex.SetSelection( 0 )
bSizer241.Add( self.m_radioBox_gfl_ex, 0, wx.ALL|wx.EXPAND, 5 )
m_radioBox_gfl_divChoices = [ u"不分类", u"按人形名分类", u"按是否大破分类" ]
self.m_radioBox_gfl_div = wx.RadioBox( self.m_scrolledWindow3, wx.ID_ANY, u"分类设置", wx.DefaultPosition, wx.DefaultSize, m_radioBox_gfl_divChoices, 1, wx.RA_SPECIFY_COLS )
self.m_radioBox_gfl_div.SetSelection( 0 )
bSizer241.Add( self.m_radioBox_gfl_div, 0, wx.ALL|wx.EXPAND, 5 )
self.m_checkBox_check = wx.CheckBox( self.m_scrolledWindow3, wx.ID_ANY, u"开始前先进行检查", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox_check.SetValue(True)
bSizer241.Add( self.m_checkBox_check, 0, wx.ALL, 5 )
self.m_checkBox_gfl_dir = wx.CheckBox( self.m_scrolledWindow3, wx.ID_ANY, u"在导出文件夹内新建导出文件夹", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox_gfl_dir.SetValue(True)
bSizer241.Add( self.m_checkBox_gfl_dir, 0, wx.ALL|wx.EXPAND, 5 )
self.m_scrolledWindow3.SetSizer( bSizer241 )
self.m_scrolledWindow3.Layout()
bSizer241.Fit( self.m_scrolledWindow3 )
bSizer24.Add( self.m_scrolledWindow3, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_girls_font_line = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_girls_font_line.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BTNFACE ) )
bSizer151 = wx.BoxSizer( wx.VERTICAL )
sbSizer_load_rgb = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_girls_font_line, wx.ID_ANY, u"RGB" ), wx.VERTICAL )
gSizer_rgb = wx.GridSizer( 0, 2, 0, 0 )
self.m_gauge_RGB_load = wx.Gauge( sbSizer_load_rgb.GetStaticBox(), wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_RGB_load.SetValue( 0 )
gSizer_rgb.Add( self.m_gauge_RGB_load, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
self.m_staticText_RGB_load = wx.StaticText( sbSizer_load_rgb.GetStaticBox(), wx.ID_ANY, u"无任务", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_RGB_load.Wrap( -1 )
gSizer_rgb.Add( self.m_staticText_RGB_load, 0, wx.ALL|wx.ALIGN_RIGHT, 5 )
sbSizer_load_rgb.Add( gSizer_rgb, 0, 0, 5 )
bSizer151.Add( sbSizer_load_rgb, 0, wx.EXPAND, 5 )
sbSizer_load_alpha = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_girls_font_line, wx.ID_ANY, u"Alpha" ), wx.VERTICAL )
gSizer_tex1 = wx.GridSizer( 0, 2, 0, 0 )
self.m_gauge_alpha_load = wx.Gauge( sbSizer_load_alpha.GetStaticBox(), wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_alpha_load.SetValue( 0 )
gSizer_tex1.Add( self.m_gauge_alpha_load, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_staticText_alpha_load = wx.StaticText( sbSizer_load_alpha.GetStaticBox(), wx.ID_ANY, u"无任务", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_alpha_load.Wrap( -1 )
gSizer_tex1.Add( self.m_staticText_alpha_load, 0, wx.ALL|wx.ALIGN_RIGHT, 5 )
sbSizer_load_alpha.Add( gSizer_tex1, 0, 0, 5 )
bSizer151.Add( sbSizer_load_alpha, 0, wx.EXPAND, 5 )
self.m_listbook21 = wx.Listbook( self.m_panel_girls_font_line, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LB_DEFAULT )
self.m_panel21 = wx.Panel( self.m_listbook21, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer73 = wx.BoxSizer( wx.VERTICAL )
gSizer10 = wx.GridSizer( 0, 2, 0, 0 )
self.m_searchCtrl_RGB = wx.SearchCtrl( self.m_panel21, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_searchCtrl_RGB.ShowSearchButton( True )
self.m_searchCtrl_RGB.ShowCancelButton( True )
gSizer10.Add( self.m_searchCtrl_RGB, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 )
m_choice_rgbChoices = [ u"全部" ]
self.m_choice_rgb = wx.Choice( self.m_panel21, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_rgbChoices, 0 )
self.m_choice_rgb.SetSelection( 0 )
gSizer10.Add( self.m_choice_rgb, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
bSizer73.Add( gSizer10, 0, wx.EXPAND, 5 )
m_listBox_RGBChoices = []
self.m_listBox_RGB = wx.ListBox( self.m_panel21, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_RGBChoices, 0 )
bSizer73.Add( self.m_listBox_RGB, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel21.SetSizer( bSizer73 )
self.m_panel21.Layout()
bSizer73.Fit( self.m_panel21 )
self.m_listbook21.AddPage( self.m_panel21, u"RGB file", False )
self.m_panel11 = wx.Panel( self.m_listbook21, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer711 = wx.BoxSizer( wx.VERTICAL )
gSizer11 = wx.GridSizer( 0, 2, 0, 0 )
self.m_searchCtrl_alpha = wx.SearchCtrl( self.m_panel11, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_searchCtrl_alpha.ShowSearchButton( True )
self.m_searchCtrl_alpha.ShowCancelButton( True )
gSizer11.Add( self.m_searchCtrl_alpha, 0, wx.ALL|wx.EXPAND, 5 )
m_choice_alphaChoices = [ u"全部" ]
self.m_choice_alpha = wx.Choice( self.m_panel11, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choice_alphaChoices, 0 )
self.m_choice_alpha.SetSelection( 0 )
gSizer11.Add( self.m_choice_alpha, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
bSizer711.Add( gSizer11, 0, wx.EXPAND, 5 )
m_listBox_alphaChoices = []
self.m_listBox_alpha = wx.ListBox( self.m_panel11, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_listBox_alphaChoices, 0 )
bSizer711.Add( self.m_listBox_alpha, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel11.SetSizer( bSizer711 )
self.m_panel11.Layout()
bSizer711.Fit( self.m_panel11 )
self.m_listbook21.AddPage( self.m_panel11, u"Alpha file", True )
bSizer151.Add( self.m_listbook21, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel_girls_font_line.SetSizer( bSizer151 )
self.m_panel_girls_font_line.Layout()
bSizer151.Fit( self.m_panel_girls_font_line )
bSizer24.Add( self.m_panel_girls_font_line, 1, wx.EXPAND |wx.ALL, 5 )
sbSizer111 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"少女前线" ), wx.VERTICAL )
self.m_staticText16 = wx.StaticText( sbSizer111.GetStaticBox(), wx.ID_ANY, u"RGB文件夹", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText16.Wrap( -1 )
sbSizer111.Add( self.m_staticText16, 0, wx.ALL, 5 )
self.m_dirPicker_gl_rbg_dir = wx.DirPickerCtrl( sbSizer111.GetStaticBox(), wx.ID_ANY, u"QAQ", u"设置默认文件夹", wx.DefaultPosition, wx.DefaultSize, wx.DIRP_DEFAULT_STYLE )
sbSizer111.Add( self.m_dirPicker_gl_rbg_dir, 0, wx.ALL|wx.EXPAND, 5 )
self.m_staticText17 = wx.StaticText( sbSizer111.GetStaticBox(), wx.ID_ANY, u"Alpha文件夹", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText17.Wrap( -1 )
sbSizer111.Add( self.m_staticText17, 0, wx.ALL, 5 )
self.m_dirPicker_gl_alpha_dir = wx.DirPickerCtrl( sbSizer111.GetStaticBox(), wx.ID_ANY, u"QAQ", u"设置默认文件夹", wx.DefaultPosition, wx.DefaultSize, wx.DIRP_DEFAULT_STYLE | |
but be careful when
calling compiled routines outside of wrf-python.
Args:
wrfin (iterable): An iterable type, which includes lists, tuples,
dictionaries, generators, and user-defined classes.
varname (:obj:`str`) : The variable name.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
is_moving (:obj:`bool`): A boolean type that indicates if the
sequence is a moving nest.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): Cache key for the coordinate variables.
This is used for internal purposes only. Default is None.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: If xarray is
enabled and the *meta* parameter is True, then the result will be a
:class:`xarray.DataArray` object. Otherwise, the result will be a
:class:`numpy.ndarray` object with no metadata.
"""
# Handles generators, single files, lists, tuples, custom classes
wrfseq = get_iterable(wrfin)
# Dictionary is unique
if is_mapping(wrfseq):
outarr = _combine_dict(wrfseq, varname, timeidx, method, meta, _key)
elif method.lower() == "cat":
outarr = _cat_files(wrfseq, varname, timeidx, is_moving,
squeeze, meta, _key)
elif method.lower() == "join":
outarr = _join_files(wrfseq, varname, timeidx, is_moving, meta, _key)
else:
raise ValueError("method must be 'cat' or 'join'")
return outarr.squeeze() if squeeze else outarr
def _extract_var(wrfin, varname, timeidx, is_moving,
method, squeeze, cache, meta, _key):
"""Extract a variable from a NetCDF file object or a sequence of NetCDF
file objects.
Args:
wrfin (iterable): An iterable type, which includes lists, tuples,
dictionaries, generators, and user-defined classes.
varname (:obj:`str`) : The variable name.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
is_moving (:obj:`bool`): A boolean type that indicates if the
sequence is a moving nest.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
cache (:obj:`dict`, optional): A dictionary of (varname, ndarray)
that can be used to supply pre-extracted NetCDF variables to the
computational routines. It is primarily used for internal
purposes, but can also be used to improve performance by
eliminating the need to repeatedly extract the same variables
used in multiple diagnostics calculations, particularly when using
large sequences of files.
Default is None.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): Cache key for the coordinate variables.
This is used for internal purposes only. Default is None.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: If xarray is
enabled and the *meta* parameter is True, then the result will be a
:class:`xarray.DataArray` object. Otherwise, the result will be a
:class:`numpy.ndarray` object with no metadata.
"""
if cache is not None:
try:
cache_var = cache[varname]
except KeyError:
pass
else:
if not meta:
return to_np(cache_var)
return cache_var
multitime = is_multi_time_req(timeidx)
multifile = is_multi_file(wrfin)
if is_time_coord_var(varname):
return extract_times(wrfin, timeidx, method, squeeze, cache,
meta, do_xtime=True)
if not multifile:
if xarray_enabled() and meta:
if is_moving is None:
is_moving = is_moving_domain(wrfin, varname, _key=_key)
result = _build_data_array(wrfin, varname, timeidx, is_moving,
multifile, _key)
else:
if not multitime:
result = wrfin.variables[varname][timeidx,:]
result = result[np.newaxis, :] # So that no squeeze works
else:
result = wrfin.variables[varname][:]
else:
# Squeeze handled in this routine, so just return it
return combine_files(wrfin, varname, timeidx, is_moving,
method, squeeze, meta, _key)
return result.squeeze() if squeeze else result
def extract_vars(wrfin, timeidx, varnames, method="cat", squeeze=True,
cache=None, meta=True, _key=None):
"""Extract variables from a NetCDF file object or a sequence of NetCDF
file objects.
Args:
wrfin (iterable): An iterable type, which includes lists, tuples,
dictionaries, generators, and user-defined classes.
varnames (sequence of :obj:`str`) : A sequence of variable names.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
cache (:obj:`dict`, optional): A dictionary of (varname, ndarray)
that can be used to supply pre-extracted NetCDF variables to the
computational routines. It is primarily used for internal
purposes, but can also be used to improve performance by
eliminating the need to repeatedly extract the same variables
used in multiple diagnostics calculations, particularly when using
large sequences of files.
Default is None.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): Cache key for the coordinate variables.
This is used for internal purposes only. Default is None.
Returns:
:obj:`dict`: A mapping of variable name to an array object. If xarray
is enabled and the *meta* parameter is True, then the array object will
be a :class:`xarray.DataArray` object. Otherwise, the array object
will be a :class:`numpy.ndarray` object with no metadata.
"""
if isstr(varnames):
varlist = [varnames]
else:
varlist = varnames
return {var:_extract_var(wrfin, var, timeidx, None,
method, squeeze, cache, meta, _key)
for var in varlist}
def npbytes_to_str(var):
"""Return a :obj:`bytes` object for the raw character array.
Args:
var (:class:`numpy.ndarray`): An array of characters.
Returns:
:obj:`bytes`: A string of bytes.
"""
return (bytes(c).decode("utf-8") for c in var[:])
def _make_time(timearr):
"""Return a :class:`datetime.datetime` object for the array of characters.
Args:
timearr (:class:`numpy.ndarray`): An array of characters.
Returns:
:class:`datetime.datetime`: A datetime object.
"""
try:
return dt.datetime.strptime("".join(npbytes_to_str(timearr)),
"%Y-%m-%d_%H:%M:%S")
except ValueError:
return np.datetime64("NaT")
def _file_times(wrfin, do_xtime):
"""Yield a time object for the times found in a sequence of files.
If *do_xtime* to True, a :class:`datetime.datetime` object is yielded.
Otherwise, a :obj:`float` object is yielded.
Args:
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types.
do_xtime (:obj:`bool`): Set to True to parse the 'XTIME' variable
instead of the 'Times' variable.
Yields:
:class:`datetime.datetime` or :obj:`float`: A
:class:`datetime.datetime` object if *do_xtime* is False,
otherwise a :obj:`float`.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.