language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def draw_menu(self, color=BLACK, background=WHITE + (222,)): """ A function that draws the menu available under the f1 button :param color: color of the drawn text :param background: color of the drawn background """ blit_line = lambda pos, text: \ menu_bg.blit(self.font_help.render(text, False, color), (5, self.f1_line_height * pos)) menu_bg = pg.Surface([self.f1_menu_width, self.f1_line_height * 18], SRCALPHA) menu_bg.fill(background) dead_colors = {WHITE: 'WHITE', LIGHTEST_GREY: 'LIGHTEST GREY', LIGHTER_GREY: 'LIGHTER GREY', LIGHT_GREY: 'LIGHT GREY'} grid_colors = {None: 'hidden', GREY: 'GREY', WHITE: 'WHITE' } blit_line(0, f'{TITLE} FPS:{round(self.clock.get_fps())}') blit_line(2, f'F1: show / hide menu') blit_line(3, f'g : show / hide grid ({grid_colors[self.grid_color]})') blit_line(4, f'w : show / hide route ({"shown" if self.show_route else "hidden"})') blit_line(5, f'e : next color for dead cells') blit_line(6, f' ({dead_colors[self.dead_color]})') blit_line(7, f'p : run / pause ({"paused" if self.paused else "running"})') blit_line(8, f's : save grid to a file') blit_line(9, f'r : randomize grid') blit_line(10, f'n : display next generation') blit_line(11, f't : switch cell sizes {self.grid_width}x{self.grid_height} ({self.grid_width * self.grid_height})') blit_line(12, f'z | x : adjust cell sizes ({self.cell_size})') blit_line(13, f', | . : generations per second ({self.gens_per_sec})') blit_line(14, f'LMB : set cell as alive') blit_line(15, f'RMB : set cell as dead') blit_line(16, f'q : quit') self.grid_image.blit(menu_bg, (0, 0))
def draw_menu(self, color=BLACK, background=WHITE + (222,)): """ A function that draws the menu available under the f1 button :param color: color of the drawn text :param background: color of the drawn background """ blit_line = lambda pos, text: \ menu_bg.blit(self.font_help.render(text, False, color), (5, self.f1_line_height * pos)) menu_bg = pg.Surface([self.f1_menu_width, self.f1_line_height * 18], SRCALPHA) menu_bg.fill(background) dead_colors = {WHITE: 'WHITE', LIGHTEST_GREY: 'LIGHTEST GREY', LIGHTER_GREY: 'LIGHTER GREY', LIGHT_GREY: 'LIGHT GREY'} grid_colors = {None: 'hidden', GREY: 'GREY', WHITE: 'WHITE' } blit_line(0, f'{TITLE} FPS:{round(self.clock.get_fps())}') blit_line(2, f'F1: show / hide menu') blit_line(3, f'g : show / hide grid ({grid_colors[self.grid_color]})') blit_line(4, f'w : show / hide route ({"shown" if self.show_route else "hidden"})') blit_line(5, f'e : next color for dead cells') blit_line(6, f' ({dead_colors[self.dead_color]})') blit_line(7, f'p : run / pause ({"paused" if self.paused else "running"})') blit_line(8, f's : save grid to a file') blit_line(9, f'r : randomize grid') blit_line(10, f'n : display next generation') blit_line(11, f't : switch cell sizes {self.grid_width}x{self.grid_height} ({self.grid_width * self.grid_height})') blit_line(12, f'z | x : adjust cell sizes ({self.cell_size})') blit_line(13, f', | . : generations per second ({self.gens_per_sec})') blit_line(14, f'LMB : set cell as alive') blit_line(15, f'RMB : set cell as dead') blit_line(16, f'q : quit') self.grid_image.blit(menu_bg, (0, 0))
Python
def draw(self): """ A function that draws everything on the screen - sprites, grid, help menu and info """ self.sprites.draw(self.grid_image) self.grid_image.blit(self.grid_lines, (0, 0)) self.show_menu and self.draw_menu() self.screen.blit(self.grid_image, (self.margin_x, 0)) self.draw_info() pg.display.flip()
def draw(self): """ A function that draws everything on the screen - sprites, grid, help menu and info """ self.sprites.draw(self.grid_image) self.grid_image.blit(self.grid_lines, (0, 0)) self.show_menu and self.draw_menu() self.screen.blit(self.grid_image, (self.margin_x, 0)) self.draw_info() pg.display.flip()
Python
def count_alive_cells(self) -> int: """ Returns the number of cells currently alive """ return sum(sum(1 for cell in x if cell.alive) for x in self.cells)
def count_alive_cells(self) -> int: """ Returns the number of cells currently alive """ return sum(sum(1 for cell in x if cell.alive) for x in self.cells)
Python
def count_cell_neighbors(self, x: int, y: int) -> int: """ Get the number of alive neighbors of the specific cell :param x: The index of the specific cell :param y: The index of the specific cell :return: The number of alive neighbors of that cell """ prev_x = x - 1 prev_y = y - 1 next_x = (x + 1) % self.grid_width next_y = (y + 1) % self.grid_height return self.cells[prev_x][prev_y].alive + self.cells[prev_x][y].alive + self.cells[prev_x][next_y].alive + \ self.cells[x][prev_y].alive + self.cells[x][next_y].alive + \ self.cells[next_x][prev_y].alive + self.cells[next_x][y].alive + self.cells[next_x][next_y].alive
def count_cell_neighbors(self, x: int, y: int) -> int: """ Get the number of alive neighbors of the specific cell :param x: The index of the specific cell :param y: The index of the specific cell :return: The number of alive neighbors of that cell """ prev_x = x - 1 prev_y = y - 1 next_x = (x + 1) % self.grid_width next_y = (y + 1) % self.grid_height return self.cells[prev_x][prev_y].alive + self.cells[prev_x][y].alive + self.cells[prev_x][next_y].alive + \ self.cells[x][prev_y].alive + self.cells[x][next_y].alive + \ self.cells[next_x][prev_y].alive + self.cells[next_x][y].alive + self.cells[next_x][next_y].alive
Python
def update_generation(self): """ Calls function which set the state of every cell in next generation, then increments the generation counter """ self.set_cells_state() self.generation += 1
def update_generation(self): """ Calls function which set the state of every cell in next generation, then increments the generation counter """ self.set_cells_state() self.generation += 1
Python
def compute_mouse_pos(self, pos: (int, int)) -> (int, int): """ A function that calculates the tuple (col, row) of the clicked cell depending on the mouse position :param pos: Position in px where the mouse was when clicked :return: (None, None) if clicked not on the grid otherwise tuple (col, row) """ # only if clicked above menu bar (on the grid image) if self.margin_x < pos[0] < (self.grid_width * self.cell_size + self.margin_x): if 0 < pos[1] < (self.grid_height * self.cell_size): return ((pos[0] - self.margin_x) // self.cell_size), (pos[1] // self.cell_size) return None, None
def compute_mouse_pos(self, pos: (int, int)) -> (int, int): """ A function that calculates the tuple (col, row) of the clicked cell depending on the mouse position :param pos: Position in px where the mouse was when clicked :return: (None, None) if clicked not on the grid otherwise tuple (col, row) """ # only if clicked above menu bar (on the grid image) if self.margin_x < pos[0] < (self.grid_width * self.cell_size + self.margin_x): if 0 < pos[1] < (self.grid_height * self.cell_size): return ((pos[0] - self.margin_x) // self.cell_size), (pos[1] // self.cell_size) return None, None
Python
def handle_keys(self, event: pg.event.Event): """ This function handles all the events related to the keyboard :param event: pygame Event """ if event.key == pg.K_p: print("'p' pressed! - toggling pause") self.paused = not self.paused elif event.key == pg.K_q: quit("'q' pressed! - quitting the game") elif event.key == pg.K_r: print("'r' pressed! - randomizing grid") self.fill_grid() elif event.key == pg.K_w: print("'w' pressed! - toggling route view") self.show_route = not self.show_route elif event.key == pg.K_e: print("'e' pressed! - next color for dead cells") self.dead_color = next(DEAD_COLOR) elif self.paused and event.key == pg.K_n: print("'n' pressed! - displaying next generation") self.update_generation() elif event.key == pg.K_c: print("'c' pressed! - clearing grid") self.fill_grid(Action.CLEAR) elif event.key == pg.K_t: print("'t' pressed! - changing cell size") self.cell_size = next(CELL_SIZES) self.new() elif event.key == pg.K_g: print("'g' pressed! - toggling grid") self.grid_color = next(GRID_COLOR) self.draw_grid(self.grid_color) elif event.key == pg.K_x: self.increase_cell_size() elif event.key == pg.K_z: self.decrease_cell_size() elif event.key == pg.K_F1: print("'F1' pressed! - toggling menu view") self.show_menu = not self.show_menu elif event.key == pg.K_s: print(f"'s' pressed ! - saved to file '{self.save_to_file()}'") elif event.unicode == ',': self.decrease_gens_per_sec() elif event.unicode == '.': self.increase_gens_per_sec()
def handle_keys(self, event: pg.event.Event): """ This function handles all the events related to the keyboard :param event: pygame Event """ if event.key == pg.K_p: print("'p' pressed! - toggling pause") self.paused = not self.paused elif event.key == pg.K_q: quit("'q' pressed! - quitting the game") elif event.key == pg.K_r: print("'r' pressed! - randomizing grid") self.fill_grid() elif event.key == pg.K_w: print("'w' pressed! - toggling route view") self.show_route = not self.show_route elif event.key == pg.K_e: print("'e' pressed! - next color for dead cells") self.dead_color = next(DEAD_COLOR) elif self.paused and event.key == pg.K_n: print("'n' pressed! - displaying next generation") self.update_generation() elif event.key == pg.K_c: print("'c' pressed! - clearing grid") self.fill_grid(Action.CLEAR) elif event.key == pg.K_t: print("'t' pressed! - changing cell size") self.cell_size = next(CELL_SIZES) self.new() elif event.key == pg.K_g: print("'g' pressed! - toggling grid") self.grid_color = next(GRID_COLOR) self.draw_grid(self.grid_color) elif event.key == pg.K_x: self.increase_cell_size() elif event.key == pg.K_z: self.decrease_cell_size() elif event.key == pg.K_F1: print("'F1' pressed! - toggling menu view") self.show_menu = not self.show_menu elif event.key == pg.K_s: print(f"'s' pressed ! - saved to file '{self.save_to_file()}'") elif event.unicode == ',': self.decrease_gens_per_sec() elif event.unicode == '.': self.increase_gens_per_sec()
Python
def handle_mouse_scroll(self, button: int, ctrl: bool = False): """ This function handles all the events related to the mouse scroll :param button: scrolling up / down :param ctrl: if True then CTRL is also pressed and it should zoom instead of changing gens per sec """ if button == WHEEL_UP: self.increase_cell_size() if ctrl else self.increase_gens_per_sec() elif button == WHEEL_DOWN: self.decrease_cell_size() if ctrl else self.decrease_gens_per_sec()
def handle_mouse_scroll(self, button: int, ctrl: bool = False): """ This function handles all the events related to the mouse scroll :param button: scrolling up / down :param ctrl: if True then CTRL is also pressed and it should zoom instead of changing gens per sec """ if button == WHEEL_UP: self.increase_cell_size() if ctrl else self.increase_gens_per_sec() elif button == WHEEL_DOWN: self.decrease_cell_size() if ctrl else self.decrease_gens_per_sec()
Python
def handle_mouse_buttons(self, event: pg.event.Event, button: (bool, bool, bool)): """ This function handles all the events related to the mouse buttons :param event: pygame Event :param button: tuple of booleans """ col, row = None, None try: col, row = self.compute_mouse_pos(event.pos) except AttributeError: # when the mouse is pressed down and moved out of the window pass if col is None: return state = self.cells[col][row].alive if button[LMB] and not state: self.cells[col][row].revive() elif button[RMB] and state: self.cells[col][row].kill()
def handle_mouse_buttons(self, event: pg.event.Event, button: (bool, bool, bool)): """ This function handles all the events related to the mouse buttons :param event: pygame Event :param button: tuple of booleans """ col, row = None, None try: col, row = self.compute_mouse_pos(event.pos) except AttributeError: # when the mouse is pressed down and moved out of the window pass if col is None: return state = self.cells[col][row].alive if button[LMB] and not state: self.cells[col][row].revive() elif button[RMB] and state: self.cells[col][row].kill()
Python
def run(self): """ Starts the game and loops until the quit state """ while True: self.handle_events() self.draw() self.clock.tick(self.fps)
def run(self): """ Starts the game and loops until the quit state """ while True: self.handle_events() self.draw() self.clock.tick(self.fps)
Python
def _drill_input(self): """ symbolically step down a path with a tracer, trying to concretize inputs for unencountered """ # rebuild the path with qemu r = tracer.qemu_runner.QEMURunner(self.binary, self.input, argv=self.argv) p = angr.Project(self.binary) # handle hooks for addr, proc in self._hooks.items(): p.hook(addr, proc) l.debug("Hooking %#x -> %s...", addr, proc.display_name) # try to get an init simstate # check the argv s = p.factory.full_init_state(stdin=angr.SimFileStream, args=self.argv) # preconstrain s.preconstrainer.preconstrain_file(self.input, s.posix.stdin, True) simgr = p.factory.simulation_manager(s, save_unsat=True, hierarchy=False, save_unconstrained=r.crash_mode) # use_technique t = angr.exploration_techniques.Tracer(trace=r.trace, crash_addr=r.crash_addr, copy_states=True) self._core = angr.exploration_techniques.DrillerCore(trace=r.trace, fuzz_bitmap=self.fuzz_bitmap) simgr.use_technique(t) simgr.use_technique(angr.exploration_techniques.Oppologist()) simgr.use_technique(self._core) self._set_concretizations(simgr.one_active) while simgr.active and simgr.one_active.globals['trace_idx'] < len(r.trace) - 1: simgr.step() # if something in diverted if 'diverted' not in simgr.stashes: continue while simgr.diverted: state = simgr.diverted.pop(0) l.debug("Found a diverted state, exploring to some extent.") w = self._writeout(state.history.bbl_addrs[-1], state) if w is not None: yield w # symbolic explore for i in self._symbolic_explorer_stub(state): yield i
def _drill_input(self): """ symbolically step down a path with a tracer, trying to concretize inputs for unencountered """ # rebuild the path with qemu r = tracer.qemu_runner.QEMURunner(self.binary, self.input, argv=self.argv) p = angr.Project(self.binary) # handle hooks for addr, proc in self._hooks.items(): p.hook(addr, proc) l.debug("Hooking %#x -> %s...", addr, proc.display_name) # try to get an init simstate # check the argv s = p.factory.full_init_state(stdin=angr.SimFileStream, args=self.argv) # preconstrain s.preconstrainer.preconstrain_file(self.input, s.posix.stdin, True) simgr = p.factory.simulation_manager(s, save_unsat=True, hierarchy=False, save_unconstrained=r.crash_mode) # use_technique t = angr.exploration_techniques.Tracer(trace=r.trace, crash_addr=r.crash_addr, copy_states=True) self._core = angr.exploration_techniques.DrillerCore(trace=r.trace, fuzz_bitmap=self.fuzz_bitmap) simgr.use_technique(t) simgr.use_technique(angr.exploration_techniques.Oppologist()) simgr.use_technique(self._core) self._set_concretizations(simgr.one_active) while simgr.active and simgr.one_active.globals['trace_idx'] < len(r.trace) - 1: simgr.step() # if something in diverted if 'diverted' not in simgr.stashes: continue while simgr.diverted: state = simgr.diverted.pop(0) l.debug("Found a diverted state, exploring to some extent.") w = self._writeout(state.history.bbl_addrs[-1], state) if w is not None: yield w # symbolic explore for i in self._symbolic_explorer_stub(state): yield i
Python
def _symbolic_explorer_stub(self, state): """ Create a new simulation manager and step it forward up to 1024 accumulated active states or steps. """ steps = 0 accumulated = 1 p = state.project state = state.copy() try: state.options.remove(angr.options.LAZY_SOLVES) except KeyError: pass simgr = p.factory.simulation_manager(state, hierarchy=False) l.debug("[%s] started symbolic exploration at %s.", self.binary, time.ctime()) # try to explore while len(simgr.active) and accumulated < 1024: simgr.step() steps += 1 # Dump all inputs. accumulated = steps * (len(simgr.active) + len(simgr.deadended))#?? 步数乘以状态数来设置符号执行器探索的上限约束 l.debug("[%s] stopped symbolic exploration at %s.", self.binary, time.ctime()) # DO NOT think this is the same as using only the deadended stashes. this merges deadended and active simgr.stash(from_stash='deadended', to_stash='active') for dumpable in simgr.active: try: if dumpable.satisfiable(): w = self._writeout(dumpable.history.bbl_addrs[-1], dumpable) if w is not None: yield w # If the state we're trying to dump wasn't actually satisfiable. except IndexError: pass
def _symbolic_explorer_stub(self, state): """ Create a new simulation manager and step it forward up to 1024 accumulated active states or steps. """ steps = 0 accumulated = 1 p = state.project state = state.copy() try: state.options.remove(angr.options.LAZY_SOLVES) except KeyError: pass simgr = p.factory.simulation_manager(state, hierarchy=False) l.debug("[%s] started symbolic exploration at %s.", self.binary, time.ctime()) # try to explore while len(simgr.active) and accumulated < 1024: simgr.step() steps += 1 # Dump all inputs. accumulated = steps * (len(simgr.active) + len(simgr.deadended))#?? 步数乘以状态数来设置符号执行器探索的上限约束 l.debug("[%s] stopped symbolic exploration at %s.", self.binary, time.ctime()) # DO NOT think this is the same as using only the deadended stashes. this merges deadended and active simgr.stash(from_stash='deadended', to_stash='active') for dumpable in simgr.active: try: if dumpable.satisfiable(): w = self._writeout(dumpable.history.bbl_addrs[-1], dumpable) if w is not None: yield w # If the state we're trying to dump wasn't actually satisfiable. except IndexError: pass
Python
def create_sample(self): ''' Description: Creates temporary spine joint chain. Returns: spine_data updates ''' logging.info('Before temp_jnt_list Updates :{}'.format(self.spine_data)) #it has correct dic startTemp = 'spine_0_temp_jnt' if cmds.objExists(startTemp): cmds.delete(startTemp) trsY = 10 self.spine_data.temp_jnt_list = list() for num in range(self.spine_data.num_jnt): if num == 0: new_jnt = cmds.joint(n='spine_{}_temp_jnt'.format(num)) self.spine_data.temp_jnt_list.append(new_jnt) else: new_jnt = cmds.joint(n='spine_{}_temp_jnt'.format(num),position = [0, trsY*num, 0]) self.spine_data.temp_jnt_list.append(new_jnt) logging.info('After temp_jnt_list Updates :{}'.format(self.spine_data))
def create_sample(self): ''' Description: Creates temporary spine joint chain. Returns: spine_data updates ''' logging.info('Before temp_jnt_list Updates :{}'.format(self.spine_data)) #it has correct dic startTemp = 'spine_0_temp_jnt' if cmds.objExists(startTemp): cmds.delete(startTemp) trsY = 10 self.spine_data.temp_jnt_list = list() for num in range(self.spine_data.num_jnt): if num == 0: new_jnt = cmds.joint(n='spine_{}_temp_jnt'.format(num)) self.spine_data.temp_jnt_list.append(new_jnt) else: new_jnt = cmds.joint(n='spine_{}_temp_jnt'.format(num),position = [0, trsY*num, 0]) self.spine_data.temp_jnt_list.append(new_jnt) logging.info('After temp_jnt_list Updates :{}'.format(self.spine_data))
Python
def create_joint(self): ''' Description: Creates final spine joint chain. Returns: spine_data updates a list of final joints. ''' self.spine_data.final_jnt_list = list() num = 1 cmds.select(cl=True) temp = self.spine_data.temp_jnt_list name = self.spine_data.cha_naming logging.info('Before final_jnt_list Updates :{}'.format(self.spine_data)) for tempJnt in temp: transVar = cmds.xform(tempJnt, worldSpace = True, query = True, translation = True) new_rig_jnt = cmds.joint(n = '{}_spine_{}_jnt'.format(name, num), absolute = True, position = transVar) self.spine_data.final_jnt_list.append(new_rig_jnt) num = num + 1 for finalJnt in self.spine_data.final_jnt_list: cmds.joint(finalJnt, e=True, oj='xyz', secondaryAxisOrient = 'yup', ch=True, zso=True) #clean the end joint's orientation endJnt = self.spine_data.final_jnt_list[-1] cmds.setAttr('{}.jointOrientX'.format(endJnt), 0) cmds.setAttr('{}.jointOrientY'.format(endJnt), 0) cmds.setAttr('{}.jointOrientZ'.format(endJnt), 0) logging.info('After final_jnt_list Updates :{}'.format(self.spine_data))
def create_joint(self): ''' Description: Creates final spine joint chain. Returns: spine_data updates a list of final joints. ''' self.spine_data.final_jnt_list = list() num = 1 cmds.select(cl=True) temp = self.spine_data.temp_jnt_list name = self.spine_data.cha_naming logging.info('Before final_jnt_list Updates :{}'.format(self.spine_data)) for tempJnt in temp: transVar = cmds.xform(tempJnt, worldSpace = True, query = True, translation = True) new_rig_jnt = cmds.joint(n = '{}_spine_{}_jnt'.format(name, num), absolute = True, position = transVar) self.spine_data.final_jnt_list.append(new_rig_jnt) num = num + 1 for finalJnt in self.spine_data.final_jnt_list: cmds.joint(finalJnt, e=True, oj='xyz', secondaryAxisOrient = 'yup', ch=True, zso=True) #clean the end joint's orientation endJnt = self.spine_data.final_jnt_list[-1] cmds.setAttr('{}.jointOrientX'.format(endJnt), 0) cmds.setAttr('{}.jointOrientY'.format(endJnt), 0) cmds.setAttr('{}.jointOrientZ'.format(endJnt), 0) logging.info('After final_jnt_list Updates :{}'.format(self.spine_data))
Python
def create_control(self, target): ''' Description: Creates nurbs curve controller and its parent group. Parameters: target Returns: a list of nurbs curve and its parent group ''' name = '{}_ctl'.format(target) ctl_pair = list() if self.spine_data.fk_rig == True: ctl = create_circle(name) elif self.spine_data.ik_rig == True: ctl = create_box(name) ctl_grp = create_group(ctl) # Warning: Cannot parent components or objects in the underworld. cmds.parent(ctl, ctl_grp) ctl_pair.append(ctl) ctl_pair.append(ctl_grp) return ctl_pair
def create_control(self, target): ''' Description: Creates nurbs curve controller and its parent group. Parameters: target Returns: a list of nurbs curve and its parent group ''' name = '{}_ctl'.format(target) ctl_pair = list() if self.spine_data.fk_rig == True: ctl = create_circle(name) elif self.spine_data.ik_rig == True: ctl = create_box(name) ctl_grp = create_group(ctl) # Warning: Cannot parent components or objects in the underworld. cmds.parent(ctl, ctl_grp) ctl_pair.append(ctl) ctl_pair.append(ctl_grp) return ctl_pair
Python
def organize_fk(self): ''' Description: Parent fk controllers in order. Returns: None ''' ctl_grp = self.spine_data.ctl_list for num in range(len(ctl_grp)): if num != 0: currentCtl = ctl_grp[num]#Find the current control currentGrp = cmds.listRelatives(currentCtl, parent=True)#Find the parent group of the current control. aboveCtl = ctl_grp[num-1]#Find the control before the current one. cmds.parent(currentGrp, aboveCtl)
def organize_fk(self): ''' Description: Parent fk controllers in order. Returns: None ''' ctl_grp = self.spine_data.ctl_list for num in range(len(ctl_grp)): if num != 0: currentCtl = ctl_grp[num]#Find the current control currentGrp = cmds.listRelatives(currentCtl, parent=True)#Find the parent group of the current control. aboveCtl = ctl_grp[num-1]#Find the control before the current one. cmds.parent(currentGrp, aboveCtl)
Python
def skin_jnt_to_curve(self): ''' Description: Skin newly created self.skin_jnt to ikhandle curve. Returns: None ''' cmds.skinCluster(self.skin_jnt, self.ik_product[2], mi=3)
def skin_jnt_to_curve(self): ''' Description: Skin newly created self.skin_jnt to ikhandle curve. Returns: None ''' cmds.skinCluster(self.skin_jnt, self.ik_product[2], mi=3)
Python
def organize_ik(self): ''' Description: Constraints IK controllers in order. Returns: None ''' waist_con = self.spine_data.ctl_list[-1] chest_con = self.spine_data.ctl_list[1] root_con = self.spine_data.ctl_list[0] fk_end_jnt = self.fk_jnt[0] chest_grp = cmds.listRelatives(chest_con, parent=True) waist_grp = cmds.listRelatives(waist_con, parent=True) constraints_objs(waist_con, chest_grp) constraints_objs(root_con, waist_grp) constraints_objs(root_con, fk_end_jnt)
def organize_ik(self): ''' Description: Constraints IK controllers in order. Returns: None ''' waist_con = self.spine_data.ctl_list[-1] chest_con = self.spine_data.ctl_list[1] root_con = self.spine_data.ctl_list[0] fk_end_jnt = self.fk_jnt[0] chest_grp = cmds.listRelatives(chest_con, parent=True) waist_grp = cmds.listRelatives(waist_con, parent=True) constraints_objs(waist_con, chest_grp) constraints_objs(root_con, waist_grp) constraints_objs(root_con, fk_end_jnt)
Python
def create_group(target): ''' Description: Creates parent group of the target object. Parameters: target Returns: newly created parent group ''' name = '{}_grp'.format(target) group_product = cmds.group(n = name, em=True) return group_product
def create_group(target): ''' Description: Creates parent group of the target object. Parameters: target Returns: newly created parent group ''' name = '{}_grp'.format(target) group_product = cmds.group(n = name, em=True) return group_product
Python
def create_circle(name): ''' Description: Creates nurbs circle with name. Parameters: name string Returns: newly created nurbs circle's name ''' circle = cmds.circle(n = name, r=5, nr=(1,0,0)) return circle[0]
def create_circle(name): ''' Description: Creates nurbs circle with name. Parameters: name string Returns: newly created nurbs circle's name ''' circle = cmds.circle(n = name, r=5, nr=(1,0,0)) return circle[0]
Python
def create_box(name): ''' Description: Creates nurbs cube with name. Parameters: name string Returns: newly created nurbs cube's name ''' box = cmds.curve(n = name, d=1, p=[(2.5, 2.5, 2.5), (2.5, 2.5, -2.5), (-2.5, 2.5, -2.5), (-2.5, -2.5, -2.5), (2.5, -2.5, -2.5), (2.5, 2.5, -2.5), (-2.5, 2.5, -2.5), (-2.5, 2.5, 2.5), (2.5, 2.5, 2.5), (2.5, -2.5, 2.5), (2.5, -2.5, -2.5), (-2.5, -2.5, -2.5), (-2.5, -2.5, 2.5), (2.5, -2.5, 2.5), (-2.5, -2.5, 2.5), (-2.5, 2.5, 2.5)], k=[0,1,2,3,4,2.5,7,8,9,10,11,12,13,14,12.5,16]) return box
def create_box(name): ''' Description: Creates nurbs cube with name. Parameters: name string Returns: newly created nurbs cube's name ''' box = cmds.curve(n = name, d=1, p=[(2.5, 2.5, 2.5), (2.5, 2.5, -2.5), (-2.5, 2.5, -2.5), (-2.5, -2.5, -2.5), (2.5, -2.5, -2.5), (2.5, 2.5, -2.5), (-2.5, 2.5, -2.5), (-2.5, 2.5, 2.5), (2.5, 2.5, 2.5), (2.5, -2.5, 2.5), (2.5, -2.5, -2.5), (-2.5, -2.5, -2.5), (-2.5, -2.5, 2.5), (2.5, -2.5, 2.5), (-2.5, -2.5, 2.5), (-2.5, 2.5, 2.5)], k=[0,1,2,3,4,2.5,7,8,9,10,11,12,13,14,12.5,16]) return box
Python
def create_skin_jnt(target, name):#receives self.startJ, self.endJ ''' Description: Creates extra joint chain for curve skinning. Parameters: target = target joint name Returns: newly created joint's name ''' cmds.select(cl=True) new_joint = cmds.joint(n='{}_skin_jnt'.format(name)) new_joint_grp = create_group(new_joint) cmds.parent(new_joint, new_joint_grp) cmds.matchTransform(new_joint_grp, target, pos=True, rot=True) return new_joint
def create_skin_jnt(target, name):#receives self.startJ, self.endJ ''' Description: Creates extra joint chain for curve skinning. Parameters: target = target joint name Returns: newly created joint's name ''' cmds.select(cl=True) new_joint = cmds.joint(n='{}_skin_jnt'.format(name)) new_joint_grp = create_group(new_joint) cmds.parent(new_joint, new_joint_grp) cmds.matchTransform(new_joint_grp, target, pos=True, rot=True) return new_joint
Python
def createPreProcessingLayers(self): """ Creates a model with the initial pre-processing layers. """ model = Sequential() model.add(Lambda(lambda x: (x / 127.5) - 1., input_shape=(160,320,3))) # cropping image size 50px from top ~ 20 px from bottom model.add(Cropping2D(cropping=((50,20), (0,0)))) return model
def createPreProcessingLayers(self): """ Creates a model with the initial pre-processing layers. """ model = Sequential() model.add(Lambda(lambda x: (x / 127.5) - 1., input_shape=(160,320,3))) # cropping image size 50px from top ~ 20 px from bottom model.add(Cropping2D(cropping=((50,20), (0,0)))) return model
Python
def createNormalizedLayers(self): """ Creates a model with the initial pre-processing layers. """ # image is shrinked size image 66 x 200 x 3 YCrCb image model = Sequential() model.add(Lambda(lambda x: (x / 127.5) - 1., input_shape=(66,200,3))) # cropping image size 50px from top ~ 20 px from bottom #model.add(Cropping2D(cropping=((50,20), (0,0)))) return model
def createNormalizedLayers(self): """ Creates a model with the initial pre-processing layers. """ # image is shrinked size image 66 x 200 x 3 YCrCb image model = Sequential() model.add(Lambda(lambda x: (x / 127.5) - 1., input_shape=(66,200,3))) # cropping image size 50px from top ~ 20 px from bottom #model.add(Cropping2D(cropping=((50,20), (0,0)))) return model
Python
def buildModel(self): """ Creates nVidea Autonomous Car Group model """ model = self.createPreProcessingLayers() if self.kversion == "1.2.1": # # suppress kera v.2 warning message Conv2d should be used. # model.add(Convolution2D(24,5,5, subsample=(2,2), activation='relu')) model.add(Convolution2D(36,5,5, subsample=(2,2), activation='relu')) model.add(Convolution2D(48,5,5, subsample=(2,2), activation='relu')) model.add(Convolution2D(64,3,3, activation='relu')) model.add(Convolution2D(64,3,3, activation='relu')) else: model.add(Conv2D(24,(5,5), strides=(2,2), activation='relu',name="conv1")) model.add(Conv2D(36,(5,5), strides=(2,2), activation='relu',name="conv2")) model.add(Conv2D(48,(5,5), strides=(2,2), activation='relu',name="conv3")) model.add(Conv2D(64,(3,3), activation='relu',name="conv4")) model.add(Conv2D(64,(3,3), activation='relu',name="conv5")) model.add(Flatten()) model.add(Dense(100)) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(1)) return model
def buildModel(self): """ Creates nVidea Autonomous Car Group model """ model = self.createPreProcessingLayers() if self.kversion == "1.2.1": # # suppress kera v.2 warning message Conv2d should be used. # model.add(Convolution2D(24,5,5, subsample=(2,2), activation='relu')) model.add(Convolution2D(36,5,5, subsample=(2,2), activation='relu')) model.add(Convolution2D(48,5,5, subsample=(2,2), activation='relu')) model.add(Convolution2D(64,3,3, activation='relu')) model.add(Convolution2D(64,3,3, activation='relu')) else: model.add(Conv2D(24,(5,5), strides=(2,2), activation='relu',name="conv1")) model.add(Conv2D(36,(5,5), strides=(2,2), activation='relu',name="conv2")) model.add(Conv2D(48,(5,5), strides=(2,2), activation='relu',name="conv3")) model.add(Conv2D(64,(3,3), activation='relu',name="conv4")) model.add(Conv2D(64,(3,3), activation='relu',name="conv5")) model.add(Flatten()) model.add(Dense(100)) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(1)) return model
Python
def buildModel_Normal(self): """ Creates nVidea Autonomous Car Group model """ model = self.createPreProcessingLayers() if self.kversion == "1.2.1": # # suppress kera v.2 warning message Conv2d should be used. # model.add(Convolution2D(24,5,5, subsample=(2,2), activation='elu')) model.add(Convolution2D(36,5,5, subsample=(2,2), activation='elu')) model.add(Convolution2D(48,5,5, subsample=(2,2), activation='elu')) model.add(Convolution2D(64,3,3, activation='elu')) model.add(Convolution2D(64,3,3, activation='elu')) else: model.add(Conv2D(24,(5,5), strides=(2,2), activation='elu',name="conv1")) model.add(Conv2D(36,(5,5), strides=(2,2), activation='elu',name="conv2")) model.add(Conv2D(48,(5,5), strides=(2,2), activation='elu',name="conv3")) model.add(Conv2D(64,(3,3), activation='elu',name="conv4")) model.add(Conv2D(64,(3,3), activation='elu',name="conv5")) model.add(Flatten()) model.add(Dense(100)) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(1)) return model
def buildModel_Normal(self): """ Creates nVidea Autonomous Car Group model """ model = self.createPreProcessingLayers() if self.kversion == "1.2.1": # # suppress kera v.2 warning message Conv2d should be used. # model.add(Convolution2D(24,5,5, subsample=(2,2), activation='elu')) model.add(Convolution2D(36,5,5, subsample=(2,2), activation='elu')) model.add(Convolution2D(48,5,5, subsample=(2,2), activation='elu')) model.add(Convolution2D(64,3,3, activation='elu')) model.add(Convolution2D(64,3,3, activation='elu')) else: model.add(Conv2D(24,(5,5), strides=(2,2), activation='elu',name="conv1")) model.add(Conv2D(36,(5,5), strides=(2,2), activation='elu',name="conv2")) model.add(Conv2D(48,(5,5), strides=(2,2), activation='elu',name="conv3")) model.add(Conv2D(64,(3,3), activation='elu',name="conv4")) model.add(Conv2D(64,(3,3), activation='elu',name="conv5")) model.add(Flatten()) model.add(Dense(100)) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(1)) return model
Python
def buildModel_drop(self): """ Creates nVidea Autonomous Car Group model """ model = self.createPreProcessingLayers() if self.kversion == "1.2.1": # # suppress kera v.2 warning message Conv2d should be used. # # 31 x 98 x 24 model.add(Convolution2D(24,5,5, subsample=(2,2), activation='elu', init="glorot_normal", W_regularizer=l2(0.001)) ) model.add(Dropout(0.1)) # keep_prob 0.9 # 14 x 47 x 36 model.add(Convolution2D(36,5,5, subsample=(2,2), activation='elu', init="glorot_normal", W_regularizer=l2(0.001))) model.add(Dropout(0.2)) # keep_prob 0.8 # 5 x 22 x 48 model.add(Convolution2D(48,5,5, subsample=(2,2), activation='elu', init="glorot_normal", W_regularizer=l2(0.001))) model.add(Dropout(0.2)) # keep_prob 0.8 # 3 x 20 x 64 model.add(Convolution2D(64,3,3, subsample=(1,1),activation='elu', init="glorot_normal", W_regularizer=l2(0.001))) model.add(Dropout(0.2)) # keep_prob 0.8 # 1 x 18 x 64 model.add(Convolution2D(64,3,3, subsample=(1,1),activation='elu', init="glorot_normal", W_regularizer=l2(0.001))) #model.add(Dropout(0.2)) # keep_prob 0.8 model.add(Flatten()) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(100,activation='elu', init='glorot_normal', W_regularizer=l2(0.001))) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(50,activation='elu', init='glorot_normal', W_regularizer=l2(0.001))) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(10,activation='elu', init='glorot_normal', W_regularizer=l2(0.001))) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(1,activation='linear', init='glorot_normal')) else: model.add(Conv2D(24,(5,5), strides=(2,2), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv1")) model.add(Dropout(0.1)) # keep_rate 0.9 model.add(Conv2D(36,(5,5), strides=(2,2), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv2")) model.add(Dropout(0.2)) # keep_rate 0.8 model.add(Conv2D(48,(5,5), strides=(2,2), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv3")) model.add(Dropout(0.2)) # keep_rate 0.8 model.add(Conv2D(64,(3,3), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv4")) model.add(Dropout(0.2)) # keep_rate 0.8 model.add(Conv2D(64,(3,3), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv5")) model.add(Flatten()) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(100,activation='elu', kernel_initializer='he_uniform', kernel_regularizer=l2(0.01) )) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(50,activation='elu', kernel_initializer='he_uniform', kernel_regularizer=l2(0.01) ) ) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(10,activation='elu', kernel_initializer='he_uniform', kernel_regularizer=l2(0.01) ) ) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(1,activation='linear', kernel_initializer='he_uniform', kernel_regularizer=l2(0.01) )) return model
def buildModel_drop(self): """ Creates nVidea Autonomous Car Group model """ model = self.createPreProcessingLayers() if self.kversion == "1.2.1": # # suppress kera v.2 warning message Conv2d should be used. # # 31 x 98 x 24 model.add(Convolution2D(24,5,5, subsample=(2,2), activation='elu', init="glorot_normal", W_regularizer=l2(0.001)) ) model.add(Dropout(0.1)) # keep_prob 0.9 # 14 x 47 x 36 model.add(Convolution2D(36,5,5, subsample=(2,2), activation='elu', init="glorot_normal", W_regularizer=l2(0.001))) model.add(Dropout(0.2)) # keep_prob 0.8 # 5 x 22 x 48 model.add(Convolution2D(48,5,5, subsample=(2,2), activation='elu', init="glorot_normal", W_regularizer=l2(0.001))) model.add(Dropout(0.2)) # keep_prob 0.8 # 3 x 20 x 64 model.add(Convolution2D(64,3,3, subsample=(1,1),activation='elu', init="glorot_normal", W_regularizer=l2(0.001))) model.add(Dropout(0.2)) # keep_prob 0.8 # 1 x 18 x 64 model.add(Convolution2D(64,3,3, subsample=(1,1),activation='elu', init="glorot_normal", W_regularizer=l2(0.001))) #model.add(Dropout(0.2)) # keep_prob 0.8 model.add(Flatten()) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(100,activation='elu', init='glorot_normal', W_regularizer=l2(0.001))) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(50,activation='elu', init='glorot_normal', W_regularizer=l2(0.001))) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(10,activation='elu', init='glorot_normal', W_regularizer=l2(0.001))) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(1,activation='linear', init='glorot_normal')) else: model.add(Conv2D(24,(5,5), strides=(2,2), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv1")) model.add(Dropout(0.1)) # keep_rate 0.9 model.add(Conv2D(36,(5,5), strides=(2,2), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv2")) model.add(Dropout(0.2)) # keep_rate 0.8 model.add(Conv2D(48,(5,5), strides=(2,2), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv3")) model.add(Dropout(0.2)) # keep_rate 0.8 model.add(Conv2D(64,(3,3), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv4")) model.add(Dropout(0.2)) # keep_rate 0.8 model.add(Conv2D(64,(3,3), activation='elu',kernel_initializer="he_uniform", kernel_regularizer=l2(0.01), name="conv5")) model.add(Flatten()) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(100,activation='elu', kernel_initializer='he_uniform', kernel_regularizer=l2(0.01) )) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(50,activation='elu', kernel_initializer='he_uniform', kernel_regularizer=l2(0.01) ) ) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(10,activation='elu', kernel_initializer='he_uniform', kernel_regularizer=l2(0.01) ) ) model.add(Dropout(0.5)) # keep_prob 0.5 model.add(Dense(1,activation='linear', kernel_initializer='he_uniform', kernel_regularizer=l2(0.01) )) return model
Python
def find_surnames(authors): """ Make assumption that all authors are given as surname and initials, so any uppercase followed by one-or-more lowercase letters is a surname. """ surname_list = [] surname_regex = "([A-Z\u00C0-\u00DE][a-z\u00DF-\u00FF\u0107]+)+" if isinstance(authors, str): surname_list = re.findall(surname_regex, authors) return surname_list elif isinstance(authors, list) or isinstance(authors, tuple): for author in authors: surname_list += re.findall(surname_regex, author) return surname_list
def find_surnames(authors): """ Make assumption that all authors are given as surname and initials, so any uppercase followed by one-or-more lowercase letters is a surname. """ surname_list = [] surname_regex = "([A-Z\u00C0-\u00DE][a-z\u00DF-\u00FF\u0107]+)+" if isinstance(authors, str): surname_list = re.findall(surname_regex, authors) return surname_list elif isinstance(authors, list) or isinstance(authors, tuple): for author in authors: surname_list += re.findall(surname_regex, author) return surname_list
Python
def match_surnames(list_1, list_2): """" Return True if at least one author surname matches. Otherwise return False """ for name in list_1: if name in list_2: return True return False
def match_surnames(list_1, list_2): """" Return True if at least one author surname matches. Otherwise return False """ for name in list_1: if name in list_2: return True return False
Python
def fill_row(owner, repo, traffic=False, PAT=None): """ Fill data for a single row of the output table """ results = {"repo" : ["{}/{}".format(owner, repo)]} results.update(get_stars_watchers_forks(owner, repo, PAT)) if traffic: results.update(get_traffic(owner, repo, PAT)) return results
def fill_row(owner, repo, traffic=False, PAT=None): """ Fill data for a single row of the output table """ results = {"repo" : ["{}/{}".format(owner, repo)]} results.update(get_stars_watchers_forks(owner, repo, PAT)) if traffic: results.update(get_traffic(owner, repo, PAT)) return results
Python
def process_input_file(input_filename, traffic, PAT): """ Loop through all lines of an input text file, one URL per line """ results = {} infile = open(input_filename) for line in infile.readlines(): if not GITHUB_REGEX.search(line.strip()): raise RuntimeError("Not a Github URL! {}".format(line.strip())) owner, repo = line.strip().split("/")[-2:] print("Looking at {}/{}".format(owner, repo)) try: this_row = fill_row(owner, repo, traffic, PAT) for k, v in this_row.items(): if not k in results.keys(): results[k] = [] results[k] += v except(RuntimeError): print("Problem filling row for {}/{}".format(owner, repo)) continue return results
def process_input_file(input_filename, traffic, PAT): """ Loop through all lines of an input text file, one URL per line """ results = {} infile = open(input_filename) for line in infile.readlines(): if not GITHUB_REGEX.search(line.strip()): raise RuntimeError("Not a Github URL! {}".format(line.strip())) owner, repo = line.strip().split("/")[-2:] print("Looking at {}/{}".format(owner, repo)) try: this_row = fill_row(owner, repo, traffic, PAT) for k, v in this_row.items(): if not k in results.keys(): results[k] = [] results[k] += v except(RuntimeError): print("Problem filling row for {}/{}".format(owner, repo)) continue return results
Python
def sanity_check(args): """ make sure we have a consistent set of arguments, and give a helpful error message if not. """ if (args.repo or args.owner) and not (args.repo and args.owner): raise RuntimeError("Need to set both or neither of --repo and --owner") if (args.repo and args.input_filename) or not (args.repo or args.input_filename): raise RuntimeError("Need to set EITHER --repo and --owner OR --input_filename") if args.PAT and not args.traffic: print("No need to specify PAT if not requesting traffic info")
def sanity_check(args): """ make sure we have a consistent set of arguments, and give a helpful error message if not. """ if (args.repo or args.owner) and not (args.repo and args.owner): raise RuntimeError("Need to set both or neither of --repo and --owner") if (args.repo and args.input_filename) or not (args.repo or args.input_filename): raise RuntimeError("Need to set EITHER --repo and --owner OR --input_filename") if args.PAT and not args.traffic: print("No need to specify PAT if not requesting traffic info")
Python
def clip_by_global_norm(grads, clip_norm): """Clip the grads by global norm.""" global_norm = mtf.sqrt( mtf.add_n([mtf.reduce_sum(mtf.square(mtf.cast(t, tf.float32))) for t in grads if t is not None ])) multiplier = clip_norm / mtf.maximum(global_norm, clip_norm) clipped_grads = [None if t is None else mtf.cast(mtf.cast(t, tf.float32) * multiplier, t.dtype) for t in grads] return clipped_grads, global_norm
def clip_by_global_norm(grads, clip_norm): """Clip the grads by global norm.""" global_norm = mtf.sqrt( mtf.add_n([mtf.reduce_sum(mtf.square(mtf.cast(t, tf.float32))) for t in grads if t is not None ])) multiplier = clip_norm / mtf.maximum(global_norm, clip_norm) clipped_grads = [None if t is None else mtf.cast(mtf.cast(t, tf.float32) * multiplier, t.dtype) for t in grads] return clipped_grads, global_norm
Python
def create_optimizer(loss, base_lr, num_warmup_steps, max_optimized_variable_size=None, optimizer="adam", grad_scale=1.0, clip_gradients=True): """Creates an optimizer training op.""" global_steps = tf.train.get_or_create_global_step() mesh = loss.mesh # “inverse square root” learning rate schedule start with base_lr; https://arxiv.org/abs/1910.10683 # note: if use small batch size, base_lr needs to be small global_steps_float = tf.cast(global_steps, tf.float32) decay_steps = tf.constant((1/base_lr)**2, dtype=tf.float32) decay_steps_float = tf.math.maximum(decay_steps, global_steps_float) learning_rate = 1.0 / tf.math.sqrt(decay_steps_float) # Linear warm-up equivalent to RADAM; https://arxiv.org/abs/1908.03265 if num_warmup_steps: warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int64) warmup_steps_float = tf.constant(num_warmup_steps, dtype=tf.float32) warmup_percent_done = global_steps_float / warmup_steps_float warmup_learning_rate = base_lr * warmup_percent_done is_warmup = tf.cast(global_steps < warmup_steps_int, tf.float32) learning_rate = ((1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) mtf_learning_rate = mtf.import_tf_tensor(mesh, learning_rate, []) if optimizer == "adam": optimizer = AdamWeightDecayOptimizer( learning_rate=mtf_learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-8, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) else: raise ValueError("unknown optimizer") trainable_variables = mesh.graph.trainable_variables if max_optimized_variable_size: trainable_variables = [t for t in trainable_variables if t.shape.size <= max_optimized_variable_size] var_grads = mtf.gradients( [loss*grad_scale], [v.outputs[0] for v in trainable_variables]) # This is how the model was pre-trained. if clip_gradients: (var_grads, _) = clip_by_global_norm( var_grads, clip_norm=mtf.constant(mesh, 1.0, dtype=tf.float32)) update_ops = optimizer.apply_grads(var_grads, trainable_variables, grad_scale) return learning_rate, update_ops
def create_optimizer(loss, base_lr, num_warmup_steps, max_optimized_variable_size=None, optimizer="adam", grad_scale=1.0, clip_gradients=True): """Creates an optimizer training op.""" global_steps = tf.train.get_or_create_global_step() mesh = loss.mesh # “inverse square root” learning rate schedule start with base_lr; https://arxiv.org/abs/1910.10683 # note: if use small batch size, base_lr needs to be small global_steps_float = tf.cast(global_steps, tf.float32) decay_steps = tf.constant((1/base_lr)**2, dtype=tf.float32) decay_steps_float = tf.math.maximum(decay_steps, global_steps_float) learning_rate = 1.0 / tf.math.sqrt(decay_steps_float) # Linear warm-up equivalent to RADAM; https://arxiv.org/abs/1908.03265 if num_warmup_steps: warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int64) warmup_steps_float = tf.constant(num_warmup_steps, dtype=tf.float32) warmup_percent_done = global_steps_float / warmup_steps_float warmup_learning_rate = base_lr * warmup_percent_done is_warmup = tf.cast(global_steps < warmup_steps_int, tf.float32) learning_rate = ((1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) mtf_learning_rate = mtf.import_tf_tensor(mesh, learning_rate, []) if optimizer == "adam": optimizer = AdamWeightDecayOptimizer( learning_rate=mtf_learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-8, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) else: raise ValueError("unknown optimizer") trainable_variables = mesh.graph.trainable_variables if max_optimized_variable_size: trainable_variables = [t for t in trainable_variables if t.shape.size <= max_optimized_variable_size] var_grads = mtf.gradients( [loss*grad_scale], [v.outputs[0] for v in trainable_variables]) # This is how the model was pre-trained. if clip_gradients: (var_grads, _) = clip_by_global_norm( var_grads, clip_norm=mtf.constant(mesh, 1.0, dtype=tf.float32)) update_ops = optimizer.apply_grads(var_grads, trainable_variables, grad_scale) return learning_rate, update_ops
Python
def _transport_bunnies(velocity): """ Bunnies need to be transported from the farm to the lab but the road is dangerous and the amount of oxygen in the truck is limited. """ bunnies = ["Fifi", "Gunter", "Hazy"] distance = 100.0 time_required = distance / velocity oxygen_required = time_required * 10.0 if velocity > 100: return rescue([]) if oxygen_required > 20.0: return rescue([]) rescue(bunnies)
def _transport_bunnies(velocity): """ Bunnies need to be transported from the farm to the lab but the road is dangerous and the amount of oxygen in the truck is limited. """ bunnies = ["Fifi", "Gunter", "Hazy"] distance = 100.0 time_required = distance / velocity oxygen_required = time_required * 10.0 if velocity > 100: return rescue([]) if oxygen_required > 20.0: return rescue([]) rescue(bunnies)
Python
def _shave_bunnies(device_index): """ Shaves bunnies to prepare them for experiments. """ bunnies = ["Alfie", "Bugs", "Chip"] devices = ["harmless bunny shaver", "buzz saw", "rusty razor"] devices.reverse() device = devices[device_index] for bunny in bunnies: if device in ["buzz saw", "rusty razor"]: bunnies.remove(bunny) rescue(bunnies)
def _shave_bunnies(device_index): """ Shaves bunnies to prepare them for experiments. """ bunnies = ["Alfie", "Bugs", "Chip"] devices = ["harmless bunny shaver", "buzz saw", "rusty razor"] devices.reverse() device = devices[device_index] for bunny in bunnies: if device in ["buzz saw", "rusty razor"]: bunnies.remove(bunny) rescue(bunnies)
Python
def _feed_bunnies(names): """ Some of the bunnies are hungry and need food. This function feeds a list of bunnies. """ hungry_bunnies = ["Hips", "Ivan", "Jo"] allergies = {"Hips": ["Carrots"], "Ivan": ["Hay, Carrots"], "Jo": ["Cale"]} foods = ["Carrots", "Hay", "Cale"] bunnies_fed = [] for bunny, food in zip(names, foods): if not food in allergies[bunny]: bunnies_fed.append(bunny) healthy_bunnies = [b for b in bunnies_fed if b in hungry_bunnies] rescue(healthy_bunnies)
def _feed_bunnies(names): """ Some of the bunnies are hungry and need food. This function feeds a list of bunnies. """ hungry_bunnies = ["Hips", "Ivan", "Jo"] allergies = {"Hips": ["Carrots"], "Ivan": ["Hay, Carrots"], "Jo": ["Cale"]} foods = ["Carrots", "Hay", "Cale"] bunnies_fed = [] for bunny, food in zip(names, foods): if not food in allergies[bunny]: bunnies_fed.append(bunny) healthy_bunnies = [b for b in bunnies_fed if b in hungry_bunnies] rescue(healthy_bunnies)
Python
def _free_bunnies(spy_class): """ Just as you and a group of other bunnies want to escape from the lab, you discover that the back door is locked with a secret code. To open it, you need to spy on a guard who's going out for a smoke. """ bunnies = ["Turbo", "Uma", "Velvet", "Whoopie"] drugs = ["Placebo", "Bleach"] spy = spy_class() secret_code = [] for i in range(10): x = random() spy.listen(x) secret_code.append(x) if secret_code == spy.guess_code(): rescue(bunnies)
def _free_bunnies(spy_class): """ Just as you and a group of other bunnies want to escape from the lab, you discover that the back door is locked with a secret code. To open it, you need to spy on a guard who's going out for a smoke. """ bunnies = ["Turbo", "Uma", "Velvet", "Whoopie"] drugs = ["Placebo", "Bleach"] spy = spy_class() secret_code = [] for i in range(10): x = random() spy.listen(x) secret_code.append(x) if secret_code == spy.guess_code(): rescue(bunnies)
Python
def parseIterLine(line): """Parse line for PHASTA iteration information Returns dictionary of the matches converted to a numeric type (either int or float). """ keyTypeDict = { 'timestep': int, 'walltime': float, 'residual': float, 'decibel': int, 'vel_res': float, 'pre_res': float, 'maxres_node': int, 'maxres_part': int, 'maxres_ratio': int, 'GMRESiter': int, 'CGiter': int, } match = iterationRe.search(line) if match: matchdict = match.groupdict() for key, dtype in keyTypeDict.items(): matchdict[key] = dtype(matchdict[key]) else: matchdict = None return matchdict
def parseIterLine(line): """Parse line for PHASTA iteration information Returns dictionary of the matches converted to a numeric type (either int or float). """ keyTypeDict = { 'timestep': int, 'walltime': float, 'residual': float, 'decibel': int, 'vel_res': float, 'pre_res': float, 'maxres_node': int, 'maxres_part': int, 'maxres_ratio': int, 'GMRESiter': int, 'CGiter': int, } match = iterationRe.search(line) if match: matchdict = match.groupdict() for key, dtype in keyTypeDict.items(): matchdict[key] = dtype(matchdict[key]) else: matchdict = None return matchdict
Python
def parseOutFile(file): """Parse the file and return list of dictionaries of the results file : pathlib.PurePath Path to the file to be parsed """ with file.open() as fileread: output = fileread.readlines() iterations = [] for line in output: matchdict = parseIterLine(line) if matchdict: iterations.append(matchdict) return iterations
def parseOutFile(file): """Parse the file and return list of dictionaries of the results file : pathlib.PurePath Path to the file to be parsed """ with file.open() as fileread: output = fileread.readlines() iterations = [] for line in output: matchdict = parseIterLine(line) if matchdict: iterations.append(matchdict) return iterations
Python
def loadandwritevtu(filepaths: dict, outdir: Path, phastapartnum: int, writevtu: bool): """Writes out a solInterp given a vtu file Parameters ---------- filepath : dict dict of paths to the vtu file that will be converted outdir : Path Directory where the solInterp.N file will be saved to phastapartnum : int Phasta partition number writevtu: bool Whether to write an Intermediate vtk file """ vtus = {} nnodes_list = [] for key, filepath in filepaths.items(): try: vtus[key] = pv.UnstructuredGrid(filepath) nnode = vtus[key].number_of_points assert(nnode > 0) # force an exception to be raise if there is # an issue reading the vtk file nnodes_list.append(vtus[key].number_of_points) except Exception as e: del vtus[key] #Remove faulty file print(f'\tFILE FAILURE: {filepath.name} failed to read correctly for ' f'part {phastapartnum}: \n{e}') nnodes = list(vtus.values())[0].number_of_points for key, vtu in vtus.items(): assert(vtu.number_of_points == nnodes) outdata = np.zeros((nnodes,4)) mask_filled = np.zeros(nnodes, dtype=np.bool_) mask_vtu1d = np.zeros(nnodes, dtype=np.bool_) mask_vtu2d = np.zeros((nnodes,4), dtype=np.bool_) mask_of_trues = np.ones((nnodes,4), dtype=np.bool_) used_node_count = {} for key, vtu in vtus.items(): if np.all(mask_vtu2d): break # stop loop if mask completed fileIssue = False pressure_keys = ['pressure', 'p'] if all(array in vtu.array_names for array in ['VelX', 'VelY', 'VelZ', 'vtkValidPointMask']): fileIssue = True if any(array in vtu.array_names for array in pressure_keys): fileIssue = True if fileIssue: print(f'FILE ISSUE: {filepath.name} does not have expected array names: {vtu.array_names}' f' Skipping file for part {phastapartnum}') continue sys.stdout.flush() for pstr in pressure_keys: if pstr in vtu.array_names: pressure = pstr break mask_vtu1d = np.logical_not(mask_filled ** vtu['vtkValidPointMask']) used_node_count[key] = np.sum(mask_vtu1d) mask_filled = np.logical_or(mask_filled, vtu['vtkValidPointMask']) if not TESTONLY: dataarray = np.column_stack((vtu[pressure], vtu['VelX'], vtu['VelY'], vtu['VelZ'])) mask_vtu2d = mask_vtu1d[:,None].astype(np.bool_)*mask_of_trues np.copyto(outdata, dataarray, where=mask_vtu2d) if not TESTONLY: if writevtu: vtu = next(iter(vtus.values())) vtu[pressure] = outdata[:,0] vtu['VelX'] = outdata[:,1] vtu['VelY'] = outdata[:,2] vtu['VelZ'] = outdata[:,3] vtu['vtkValidPointMask'] = mask_filled vtu.save(intermediateOutdir / f'Intermediate_{phastapartnum}.vtu') else: outpath = outdir / f'solInterp.{phastapartnum}' np.savetxt(outpath, outdata) logstring = '\t' for key in vtus.keys(): if key in used_node_count.keys(): logstring += f'{key:8}:{used_node_count[key]:8} ' else: logstring += f'{key:8}: N/A ' if not np.all(mask_filled): totalpnts = np.sum(np.logical_not(mask_filled)) percent = totalpnts / nnodes print(f'solInterp.{phastapartnum:<5} BROKEN {totalpnts},{percent:>8.3%}' '\n' + logstring) else: print(f'solInterp.{phastapartnum:<5} fine' '\n' + logstring) sys.stdout.flush()
def loadandwritevtu(filepaths: dict, outdir: Path, phastapartnum: int, writevtu: bool): """Writes out a solInterp given a vtu file Parameters ---------- filepath : dict dict of paths to the vtu file that will be converted outdir : Path Directory where the solInterp.N file will be saved to phastapartnum : int Phasta partition number writevtu: bool Whether to write an Intermediate vtk file """ vtus = {} nnodes_list = [] for key, filepath in filepaths.items(): try: vtus[key] = pv.UnstructuredGrid(filepath) nnode = vtus[key].number_of_points assert(nnode > 0) # force an exception to be raise if there is # an issue reading the vtk file nnodes_list.append(vtus[key].number_of_points) except Exception as e: del vtus[key] #Remove faulty file print(f'\tFILE FAILURE: {filepath.name} failed to read correctly for ' f'part {phastapartnum}: \n{e}') nnodes = list(vtus.values())[0].number_of_points for key, vtu in vtus.items(): assert(vtu.number_of_points == nnodes) outdata = np.zeros((nnodes,4)) mask_filled = np.zeros(nnodes, dtype=np.bool_) mask_vtu1d = np.zeros(nnodes, dtype=np.bool_) mask_vtu2d = np.zeros((nnodes,4), dtype=np.bool_) mask_of_trues = np.ones((nnodes,4), dtype=np.bool_) used_node_count = {} for key, vtu in vtus.items(): if np.all(mask_vtu2d): break # stop loop if mask completed fileIssue = False pressure_keys = ['pressure', 'p'] if all(array in vtu.array_names for array in ['VelX', 'VelY', 'VelZ', 'vtkValidPointMask']): fileIssue = True if any(array in vtu.array_names for array in pressure_keys): fileIssue = True if fileIssue: print(f'FILE ISSUE: {filepath.name} does not have expected array names: {vtu.array_names}' f' Skipping file for part {phastapartnum}') continue sys.stdout.flush() for pstr in pressure_keys: if pstr in vtu.array_names: pressure = pstr break mask_vtu1d = np.logical_not(mask_filled ** vtu['vtkValidPointMask']) used_node_count[key] = np.sum(mask_vtu1d) mask_filled = np.logical_or(mask_filled, vtu['vtkValidPointMask']) if not TESTONLY: dataarray = np.column_stack((vtu[pressure], vtu['VelX'], vtu['VelY'], vtu['VelZ'])) mask_vtu2d = mask_vtu1d[:,None].astype(np.bool_)*mask_of_trues np.copyto(outdata, dataarray, where=mask_vtu2d) if not TESTONLY: if writevtu: vtu = next(iter(vtus.values())) vtu[pressure] = outdata[:,0] vtu['VelX'] = outdata[:,1] vtu['VelY'] = outdata[:,2] vtu['VelZ'] = outdata[:,3] vtu['vtkValidPointMask'] = mask_filled vtu.save(intermediateOutdir / f'Intermediate_{phastapartnum}.vtu') else: outpath = outdir / f'solInterp.{phastapartnum}' np.savetxt(outpath, outdata) logstring = '\t' for key in vtus.keys(): if key in used_node_count.keys(): logstring += f'{key:8}:{used_node_count[key]:8} ' else: logstring += f'{key:8}: N/A ' if not np.all(mask_filled): totalpnts = np.sum(np.logical_not(mask_filled)) percent = totalpnts / nnodes print(f'solInterp.{phastapartnum:<5} BROKEN {totalpnts},{percent:>8.3%}' '\n' + logstring) else: print(f'solInterp.{phastapartnum:<5} fine' '\n' + logstring) sys.stdout.flush()
Python
def typedb_session(self): """ Did this like this in an attempt to make it also work when using with a DataLoader with num_workers > 0. TODO: it does not, so look into this. """ if not self._typedb_session: print("setting up session") print(self) client = TypeDB.core_client(self._uri) self._typedb_session = client.session(database=self._database, session_type=SessionType.DATA) return self._typedb_session
def typedb_session(self): """ Did this like this in an attempt to make it also work when using with a DataLoader with num_workers > 0. TODO: it does not, so look into this. """ if not self._typedb_session: print("setting up session") print(self) client = TypeDB.core_client(self._uri) self._typedb_session = client.session(database=self._database, session_type=SessionType.DATA) return self._typedb_session
Python
def is_listening(host, port): """Check if a service is listening there.""" s = socket.socket() try: s.connect((host, port)) return True except socket.error: return False
def is_listening(host, port): """Check if a service is listening there.""" s = socket.socket() try: s.connect((host, port)) return True except socket.error: return False
Python
def montecarlo_samples(self,mcn): ''' Sample the w parameters over the various angles''' # Sample the angles according to their pdf theta=np.arccos(np.random.uniform(-1,1,mcn)) # Polar phi = np.pi*np.random.uniform(-1,1,mcn) # Azimuthal psi = np.pi*np.random.uniform(-1,1,mcn) # Azimuthal iota = np.arccos(np.random.uniform(-1,1,mcn)) # Polar # Antenna patterns. Eq. (57) in arxiv:0903.0338 Fplus = 0.5*(1.+np.cos(theta)**2)*np.cos(2.*phi)*np.cos(2.*psi) - np.cos(theta)*np.sin(2.*phi)*np.sin(2.*psi) Fcross = 0.5*(1.+np.cos(theta)**2)*np.cos(2.*phi)*np.sin(2.*psi) + np.cos(theta)*np.sin(2.*phi)*np.cos(2.*psi) # Projection parameter. Eq. (3.31) in arxiv:gr-qc/9301003 but define omega=Theta/4 littleomega = ( Fplus**2*(1.+np.cos(iota)**2)**2/4. +Fcross**2*np.cos(iota)**2 )**0.5 return littleomega if len(littleomega)>1 else littleomega[0]
def montecarlo_samples(self,mcn): ''' Sample the w parameters over the various angles''' # Sample the angles according to their pdf theta=np.arccos(np.random.uniform(-1,1,mcn)) # Polar phi = np.pi*np.random.uniform(-1,1,mcn) # Azimuthal psi = np.pi*np.random.uniform(-1,1,mcn) # Azimuthal iota = np.arccos(np.random.uniform(-1,1,mcn)) # Polar # Antenna patterns. Eq. (57) in arxiv:0903.0338 Fplus = 0.5*(1.+np.cos(theta)**2)*np.cos(2.*phi)*np.cos(2.*psi) - np.cos(theta)*np.sin(2.*phi)*np.sin(2.*psi) Fcross = 0.5*(1.+np.cos(theta)**2)*np.cos(2.*phi)*np.sin(2.*psi) + np.cos(theta)*np.sin(2.*phi)*np.cos(2.*psi) # Projection parameter. Eq. (3.31) in arxiv:gr-qc/9301003 but define omega=Theta/4 littleomega = ( Fplus**2*(1.+np.cos(iota)**2)**2/4. +Fcross**2*np.cos(iota)**2 )**0.5 return littleomega if len(littleomega)>1 else littleomega[0]
Python
def interpolate(self): ''' Compute interpolation. If available, read from file''' if self._interpolate is None: # Takes some time. Store a pickle... if not os.path.isfile(self.binfile): if not os.path.exists(self.directory) and self.directory!='': os.makedirs(self.directory) if self.is_default: print('\n['+this_module+'] You are using defaults values. You can download this interpolant. Use:\n') print('curl https://raw.githubusercontent.com/dgerosa/gwdet/master/checkpoints/'+self.binfileonly+'.tar.gz -o '+self.binfile+'.tar.gz; tar -xzvf '+self.binfile+'.tar.gz -C '+self.directory+'; rm '+self.binfile+'.tar.gz \n') print('['+this_module+'] Storing: '+self.binfile) print('['+this_module+'] Interpolating Pw(w)...') hist = np.histogram(self.montecarlo_samples(self.mcn),bins=self.mcbins) hist_dist = scipy.stats.rv_histogram(hist) with open(self.binfile, 'wb') as f: pickle.dump(hist_dist, f) with open(self.binfile, 'rb') as f: hist_dist = pickle.load(f) self._interpolate = hist_dist.sf # sf give the cdf P(>w) instead of P(<w) return self._interpolate
def interpolate(self): ''' Compute interpolation. If available, read from file''' if self._interpolate is None: # Takes some time. Store a pickle... if not os.path.isfile(self.binfile): if not os.path.exists(self.directory) and self.directory!='': os.makedirs(self.directory) if self.is_default: print('\n['+this_module+'] You are using defaults values. You can download this interpolant. Use:\n') print('curl https://raw.githubusercontent.com/dgerosa/gwdet/master/checkpoints/'+self.binfileonly+'.tar.gz -o '+self.binfile+'.tar.gz; tar -xzvf '+self.binfile+'.tar.gz -C '+self.directory+'; rm '+self.binfile+'.tar.gz \n') print('['+this_module+'] Storing: '+self.binfile) print('['+this_module+'] Interpolating Pw(w)...') hist = np.histogram(self.montecarlo_samples(self.mcn),bins=self.mcbins) hist_dist = scipy.stats.rv_histogram(hist) with open(self.binfile, 'wb') as f: pickle.dump(hist_dist, f) with open(self.binfile, 'rb') as f: hist_dist = pickle.load(f) self._interpolate = hist_dist.sf # sf give the cdf P(>w) instead of P(<w) return self._interpolate
Python
def pdetproj(self): ''' A single instance of the pdet class''' if self._pdetproj is None: self._pdetproj = averageangles(directory=self.directory,binfile=self.binfilepdet,mcn=self.mcn,mcbins=self.mcbins) return self._pdetproj
def pdetproj(self): ''' A single instance of the pdet class''' if self._pdetproj is None: self._pdetproj = averageangles(directory=self.directory,binfile=self.binfilepdet,mcn=self.mcn,mcbins=self.mcbins) return self._pdetproj
Python
def snr(self,m1_vals,m2_vals,z_vals): ''' Compute the SNR from m1,m2,z ''' if not hasattr(m1_vals, "__len__"): m1_vals=[m1_vals] if not hasattr(m2_vals, "__len__"): m2_vals=[m2_vals] if not hasattr(z_vals, "__len__"): z_vals=[z_vals] snr=[] for m1,m2,z in zip(m1_vals,m2_vals,z_vals): lum_dist = astropy.cosmology.Planck15.luminosity_distance(z).value # luminosity distance in Mpc assert self.has_pycbc, "pycbc is needed" hp, hc = pycbc.waveform.get_fd_waveform(approximant=self.approximant, mass1=m1*(1.+z), mass2=m2*(1.+z), delta_f=self.deltaf, f_lower=self.flow, distance=lum_dist) if self.psd_from_path: evaluatedpsd = pycbc.psd.from_txt(self.psd_path, len(hp), self.deltaf, self.flow, is_asd_file=self.is_asd_file) else: evaluatedpsd = pycbc.psd.analytical.from_string(self.psd,len(hp), self.deltaf, self.flow) snr_one=pycbc.filter.sigma(hp, psd=evaluatedpsd, low_frequency_cutoff=self.flow) snr.append(snr_one ) # use hp only because I want optimally oriented sources if self.screen==True: print(" m1="+str(m1)+" m1="+str(m2)+" z="+str(z)+" SNR="+str(snr_one)) return np.array(snr) if len(snr)>1 else snr[0]
def snr(self,m1_vals,m2_vals,z_vals): ''' Compute the SNR from m1,m2,z ''' if not hasattr(m1_vals, "__len__"): m1_vals=[m1_vals] if not hasattr(m2_vals, "__len__"): m2_vals=[m2_vals] if not hasattr(z_vals, "__len__"): z_vals=[z_vals] snr=[] for m1,m2,z in zip(m1_vals,m2_vals,z_vals): lum_dist = astropy.cosmology.Planck15.luminosity_distance(z).value # luminosity distance in Mpc assert self.has_pycbc, "pycbc is needed" hp, hc = pycbc.waveform.get_fd_waveform(approximant=self.approximant, mass1=m1*(1.+z), mass2=m2*(1.+z), delta_f=self.deltaf, f_lower=self.flow, distance=lum_dist) if self.psd_from_path: evaluatedpsd = pycbc.psd.from_txt(self.psd_path, len(hp), self.deltaf, self.flow, is_asd_file=self.is_asd_file) else: evaluatedpsd = pycbc.psd.analytical.from_string(self.psd,len(hp), self.deltaf, self.flow) snr_one=pycbc.filter.sigma(hp, psd=evaluatedpsd, low_frequency_cutoff=self.flow) snr.append(snr_one ) # use hp only because I want optimally oriented sources if self.screen==True: print(" m1="+str(m1)+" m1="+str(m2)+" z="+str(z)+" SNR="+str(snr_one)) return np.array(snr) if len(snr)>1 else snr[0]
Python
def compute(self,m1,m2,z): ''' Direct evaluation of the detection probability''' snr = self.snr(m1,m2,z) return self.pdetproj().eval(self.snrthreshold/snr)
def compute(self,m1,m2,z): ''' Direct evaluation of the detection probability''' snr = self.snr(m1,m2,z) return self.pdetproj().eval(self.snrthreshold/snr)
Python
def snrinterpolant(self): ''' Build an interpolation for the SNR ''' if self._snrinterpolant is None: assert self.has_pycbc, 'pycbc is needed' # Takes some time. Store a pickle... # Takes some time. Store a pickle... #if not os.path.isfile(self.tempfile): print('['+this_module+'] Interpolating SNR...') # See https://stackoverflow.com/a/30059599 m1z_grid = np.linspace(self.massmin*(1.+self.zmin),self.massmax*(1.+self.zmax),self.mc1d) # Redshifted mass 1 m2z_grid = np.linspace(self.massmin*(1.+self.zmin),self.massmax*(1.+self.zmax),self.mc1d) # Redshifted mass 1 grids=[m1z_grid,m2z_grid] #meshgrid=np.zeros(reduce(lambda x,y:x*y, [len(x) for x in grids])) #print(reduce(lambda x,y:x*y, [len(x) for x in grids])) meshgrid=[] meshcoord=[] for i,m1z in enumerate(m1z_grid): for j,m2z in enumerate(m2z_grid): meshcoord.append([i,j]) meshgrid.append([m1z,m2z]) meshgrid=np.array(meshgrid) meshcoord=np.array(meshcoord) if self.parallel: # Shuffle the arrays: https://stackoverflow.com/a/4602224/4481987 # Useful to better ditribute load across processors if True: assert len(meshcoord)==len(meshgrid) p = np.random.permutation(len(meshcoord)) meshcoord = meshcoord[p] meshgrid = meshgrid[p] #pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) #meshvalues = pool.imap(snr_pickable, meshgrid) #pool.close() # No more work #meshvalues = pool.imap(self._snr, meshgrid) meshvalues= self.map(self._snr, meshgrid) while (True): completed = meshvalues._index if (completed == len(meshgrid)): break print(' [multiprocessing] Waiting for', len(meshgrid)-completed, 'tasks...') time.sleep(1) #pool.close() #pool.join() else: meshvalues = map(self._snr, meshgrid) #print meshvalues valuesforinterpolator = np.zeros([len(x) for x in grids]) for ij,val in zip(meshcoord,meshvalues): i,j=ij valuesforinterpolator[i,j]=val snrinterpolant = scipy.interpolate.RegularGridInterpolator(points=grids,values=valuesforinterpolator,bounds_error=False,fill_value=None) self._snrinterpolant = snrinterpolant # with open(self.tempfile, 'wb') as f: pickle.dump(snrinterpolant, f) #with open(self.tempfile, 'rb') as f: self._snrinterpolant = pickle.load(f) return self._snrinterpolant
def snrinterpolant(self): ''' Build an interpolation for the SNR ''' if self._snrinterpolant is None: assert self.has_pycbc, 'pycbc is needed' # Takes some time. Store a pickle... # Takes some time. Store a pickle... #if not os.path.isfile(self.tempfile): print('['+this_module+'] Interpolating SNR...') # See https://stackoverflow.com/a/30059599 m1z_grid = np.linspace(self.massmin*(1.+self.zmin),self.massmax*(1.+self.zmax),self.mc1d) # Redshifted mass 1 m2z_grid = np.linspace(self.massmin*(1.+self.zmin),self.massmax*(1.+self.zmax),self.mc1d) # Redshifted mass 1 grids=[m1z_grid,m2z_grid] #meshgrid=np.zeros(reduce(lambda x,y:x*y, [len(x) for x in grids])) #print(reduce(lambda x,y:x*y, [len(x) for x in grids])) meshgrid=[] meshcoord=[] for i,m1z in enumerate(m1z_grid): for j,m2z in enumerate(m2z_grid): meshcoord.append([i,j]) meshgrid.append([m1z,m2z]) meshgrid=np.array(meshgrid) meshcoord=np.array(meshcoord) if self.parallel: # Shuffle the arrays: https://stackoverflow.com/a/4602224/4481987 # Useful to better ditribute load across processors if True: assert len(meshcoord)==len(meshgrid) p = np.random.permutation(len(meshcoord)) meshcoord = meshcoord[p] meshgrid = meshgrid[p] #pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) #meshvalues = pool.imap(snr_pickable, meshgrid) #pool.close() # No more work #meshvalues = pool.imap(self._snr, meshgrid) meshvalues= self.map(self._snr, meshgrid) while (True): completed = meshvalues._index if (completed == len(meshgrid)): break print(' [multiprocessing] Waiting for', len(meshgrid)-completed, 'tasks...') time.sleep(1) #pool.close() #pool.join() else: meshvalues = map(self._snr, meshgrid) #print meshvalues valuesforinterpolator = np.zeros([len(x) for x in grids]) for ij,val in zip(meshcoord,meshvalues): i,j=ij valuesforinterpolator[i,j]=val snrinterpolant = scipy.interpolate.RegularGridInterpolator(points=grids,values=valuesforinterpolator,bounds_error=False,fill_value=None) self._snrinterpolant = snrinterpolant # with open(self.tempfile, 'wb') as f: pickle.dump(snrinterpolant, f) #with open(self.tempfile, 'rb') as f: self._snrinterpolant = pickle.load(f) return self._snrinterpolant
Python
def interpolate(self): ''' Build an interpolation for the detection probability as a function of m1,m2,z''' if self._interpolate is None: # Takes some time. Store a pickle... if not os.path.isfile(self.binfile): if not os.path.exists(self.directory) and self.directory!='': os.makedirs(self.directory) if self.is_default: print('\n['+this_module+'] You are using defaults values. You can download this interpolant. Use:\n') print('curl https://raw.githubusercontent.com/dgerosa/gwdet/master/checkpoints/'+self.binfileonly+'.tar.gz -o '+self.binfile+'.tar.gz; tar -xzvf '+self.binfile+'.tar.gz -C '+self.directory+'; rm '+self.binfile+'.tar.gz \n') assert self.has_pycbc, "pycbc is needed" print('['+this_module+'] Storing: '+self.binfile) # Make sure the other interpolants are available dummy=self.snrinterpolant() dummy = self.pdetproj()(0.5) print('['+this_module+'] Interpolating Pw(SNR)...') # See https://stackoverflow.com/a/30059599 m1_grid = np.linspace(self.massmin,self.massmax,self.mc1d) # Redshifted mass 1 m2_grid = np.linspace(self.massmin,self.massmax,self.mc1d) # Redshifted mass 1 z_grid = np.linspace(self.zmin,self.zmax,self.mc1d) # Redshifted mass 1 grids=[m1_grid,m2_grid,z_grid] meshgrid=[] meshcoord=[] for i,m1 in enumerate(m1_grid): for j,m2 in enumerate(m2_grid): for k,z in enumerate(z_grid): #print i,j,k meshcoord.append([i,j,k]) meshgrid.append([m1,m2,z]) meshgrid=np.array(meshgrid) meshcoord=np.array(meshcoord) if self.parallel: # Shuffle the arrays: https://stackoverflow.com/a/4602224/4481987 # Useful to better ditribute load across processors if True: assert len(meshcoord)==len(meshgrid) p = np.random.permutation(len(meshcoord)) meshcoord = meshcoord[p] meshgrid = meshgrid[p] #pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) #meshvalues = pool.imap(compute_pickable, meshgrid) #pool.close() # No more work #pool2 = pathos.multiprocessing.ProcessingPool(multiprocessing.cpu_count()) #meshvalues = pool2.imap(self._compute, meshgrid) #pool2.close() meshvalues= self.map(self._compute, meshgrid) while (True): completed = meshvalues._index if (completed == len(meshgrid)): break print(' [multiprocessing] Waiting for', len(meshgrid)-completed, 'tasks...') time.sleep(1) #pool.close() else: meshvalues = map(self._compute, meshgrid) valuesforinterpolator = np.zeros([len(x) for x in grids]) for ijk,val in zip(meshcoord,meshvalues): i,j,k=ijk valuesforinterpolator[i,j,k]=val interpolant = scipy.interpolate.RegularGridInterpolator(points=grids,values=valuesforinterpolator,bounds_error=False,fill_value=None) with open(self.binfile, 'wb') as f: pickle.dump(interpolant, f) with open(self.binfile, 'rb') as f: interpolant = pickle.load(f) self._interpolate = interpolant return self._interpolate
def interpolate(self): ''' Build an interpolation for the detection probability as a function of m1,m2,z''' if self._interpolate is None: # Takes some time. Store a pickle... if not os.path.isfile(self.binfile): if not os.path.exists(self.directory) and self.directory!='': os.makedirs(self.directory) if self.is_default: print('\n['+this_module+'] You are using defaults values. You can download this interpolant. Use:\n') print('curl https://raw.githubusercontent.com/dgerosa/gwdet/master/checkpoints/'+self.binfileonly+'.tar.gz -o '+self.binfile+'.tar.gz; tar -xzvf '+self.binfile+'.tar.gz -C '+self.directory+'; rm '+self.binfile+'.tar.gz \n') assert self.has_pycbc, "pycbc is needed" print('['+this_module+'] Storing: '+self.binfile) # Make sure the other interpolants are available dummy=self.snrinterpolant() dummy = self.pdetproj()(0.5) print('['+this_module+'] Interpolating Pw(SNR)...') # See https://stackoverflow.com/a/30059599 m1_grid = np.linspace(self.massmin,self.massmax,self.mc1d) # Redshifted mass 1 m2_grid = np.linspace(self.massmin,self.massmax,self.mc1d) # Redshifted mass 1 z_grid = np.linspace(self.zmin,self.zmax,self.mc1d) # Redshifted mass 1 grids=[m1_grid,m2_grid,z_grid] meshgrid=[] meshcoord=[] for i,m1 in enumerate(m1_grid): for j,m2 in enumerate(m2_grid): for k,z in enumerate(z_grid): #print i,j,k meshcoord.append([i,j,k]) meshgrid.append([m1,m2,z]) meshgrid=np.array(meshgrid) meshcoord=np.array(meshcoord) if self.parallel: # Shuffle the arrays: https://stackoverflow.com/a/4602224/4481987 # Useful to better ditribute load across processors if True: assert len(meshcoord)==len(meshgrid) p = np.random.permutation(len(meshcoord)) meshcoord = meshcoord[p] meshgrid = meshgrid[p] #pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) #meshvalues = pool.imap(compute_pickable, meshgrid) #pool.close() # No more work #pool2 = pathos.multiprocessing.ProcessingPool(multiprocessing.cpu_count()) #meshvalues = pool2.imap(self._compute, meshgrid) #pool2.close() meshvalues= self.map(self._compute, meshgrid) while (True): completed = meshvalues._index if (completed == len(meshgrid)): break print(' [multiprocessing] Waiting for', len(meshgrid)-completed, 'tasks...') time.sleep(1) #pool.close() else: meshvalues = map(self._compute, meshgrid) valuesforinterpolator = np.zeros([len(x) for x in grids]) for ijk,val in zip(meshcoord,meshvalues): i,j,k=ijk valuesforinterpolator[i,j,k]=val interpolant = scipy.interpolate.RegularGridInterpolator(points=grids,values=valuesforinterpolator,bounds_error=False,fill_value=None) with open(self.binfile, 'wb') as f: pickle.dump(interpolant, f) with open(self.binfile, 'rb') as f: interpolant = pickle.load(f) self._interpolate = interpolant return self._interpolate
Python
def compare_Pw(): ''' Compare performance of the averageangles interpolator against public data from Emanuele Berti's website''' plotting()# Initialized plotting stuff # Download file from Emanuele Berti's website if it does not exist if not os.path.isfile("Pw_single.dat"): urllib.urlretrieve('http://www.phy.olemiss.edu/~berti/research/Pw_single.dat', "Pw_single.dat") wEm,PwEm=np.loadtxt("Pw_single.dat",unpack=True) wmy = np.linspace(-0.1,1.1,1000) p=averageangles() Pwmy=p(wmy) f, ax = plt.subplots(2, sharex=True) ax[0].plot(wEm,PwEm,label="public data") ax[0].plot(wmy,Pwmy,ls='dashed',label="this code") Pwmy = p(wEm) ax[1].plot(wEm,np.abs(PwEm-Pwmy),c='C2')#*2./(PwEm+Pwmy)) ax[1].set_xlabel('$\omega$') ax[0].set_ylabel('$P(>\omega)$') ax[1].set_ylabel('Residuals') ax[0].legend() plt.savefig(sys._getframe().f_code.co_name+".pdf",bbox_inches='tight')
def compare_Pw(): ''' Compare performance of the averageangles interpolator against public data from Emanuele Berti's website''' plotting()# Initialized plotting stuff # Download file from Emanuele Berti's website if it does not exist if not os.path.isfile("Pw_single.dat"): urllib.urlretrieve('http://www.phy.olemiss.edu/~berti/research/Pw_single.dat', "Pw_single.dat") wEm,PwEm=np.loadtxt("Pw_single.dat",unpack=True) wmy = np.linspace(-0.1,1.1,1000) p=averageangles() Pwmy=p(wmy) f, ax = plt.subplots(2, sharex=True) ax[0].plot(wEm,PwEm,label="public data") ax[0].plot(wmy,Pwmy,ls='dashed',label="this code") Pwmy = p(wEm) ax[1].plot(wEm,np.abs(PwEm-Pwmy),c='C2')#*2./(PwEm+Pwmy)) ax[1].set_xlabel('$\omega$') ax[0].set_ylabel('$P(>\omega)$') ax[1].set_ylabel('Residuals') ax[0].legend() plt.savefig(sys._getframe().f_code.co_name+".pdf",bbox_inches='tight')
Python
def compare_Psnr(): ''' Evaluate performace of the detectability interpolator against raw SNRs calculations ''' plotting() # Initialized plotting stuff computed=[] interpolated=[] n=1000 m1=np.random.uniform(1,100,n) m2=np.random.uniform(1,100,n) z=np.random.uniform(1e-4,2.5,n) p=detectability() computed=p.compute(m1,m2,z) interpolated=p(m1,m2,z) computed=np.array(computed) interpolated=np.array(interpolated) residuals=np.abs(computed-interpolated) residuals_notzero= residuals[np.logical_and(computed!=0,interpolated!=0)] f, ax = plt.subplots(2) #ax[0].hist(residuals,weights=[1./n for x in residuals],alpha=0.8,bins=100) ax[0].hist(residuals_notzero,weights=[1./n for x in residuals_notzero],alpha=0.8,bins=100) ax[0].axvline(np.median(residuals_notzero),ls='dashed',c='red') #ax[1].hist(residuals,range=[0,0.0001],weights=[1./n for x in residuals],alpha=0.8,bins=100) ax[1].hist(residuals_notzero,range=[0,0.0001],weights=[1./n for x in residuals_notzero],alpha=0.8,bins=100) ax[1].axvline(np.median(residuals_notzero),ls='dashed',c='red') ax[1].set_xlabel('Residuals $P_{\\rm det}$') plt.savefig(sys._getframe().f_code.co_name+".pdf",bbox_inches='tight')
def compare_Psnr(): ''' Evaluate performace of the detectability interpolator against raw SNRs calculations ''' plotting() # Initialized plotting stuff computed=[] interpolated=[] n=1000 m1=np.random.uniform(1,100,n) m2=np.random.uniform(1,100,n) z=np.random.uniform(1e-4,2.5,n) p=detectability() computed=p.compute(m1,m2,z) interpolated=p(m1,m2,z) computed=np.array(computed) interpolated=np.array(interpolated) residuals=np.abs(computed-interpolated) residuals_notzero= residuals[np.logical_and(computed!=0,interpolated!=0)] f, ax = plt.subplots(2) #ax[0].hist(residuals,weights=[1./n for x in residuals],alpha=0.8,bins=100) ax[0].hist(residuals_notzero,weights=[1./n for x in residuals_notzero],alpha=0.8,bins=100) ax[0].axvline(np.median(residuals_notzero),ls='dashed',c='red') #ax[1].hist(residuals,range=[0,0.0001],weights=[1./n for x in residuals],alpha=0.8,bins=100) ax[1].hist(residuals_notzero,range=[0,0.0001],weights=[1./n for x in residuals_notzero],alpha=0.8,bins=100) ax[1].axvline(np.median(residuals_notzero),ls='dashed',c='red') ax[1].set_xlabel('Residuals $P_{\\rm det}$') plt.savefig(sys._getframe().f_code.co_name+".pdf",bbox_inches='tight')
Python
def _event_selector(self, event): """ Select the correct events and call the handler """ event_type = event[0] if event_type == 'waiting': self.events_handler.waiting() elif event_type == 'typing': self.events_handler.typing() elif event_type == 'connected': self.connected = True self.events_handler.connected() elif event_type == 'gotMessage': message = event[1] self.events_handler.message(message) elif event_type == 'commonLikes': likes = event[1] self.events_handler.common_likes(likes) elif event_type == 'stoppedTyping': self.events_handler.stopped_typing() elif event_type == 'strangerDisconnected': self.disconnect() self.events_handler.disconnected() elif event_type == 'recaptchaRequired': self.events_handler.captcha_required() elif event_type == 'recaptchaRejected': self.events_handler.captcha_rejected() elif event_type == 'serverMessage': message = event[1] self.events_handler.server_message(message) elif event_type == 'statusInfo': status = event[1] self.events_handler.status_info(status) elif event_type == 'identDigests': digests = event[1] self.events_handler.ident_digest(digests) ## SEND YOUR GREETING OR WHATEVER, OR DELETE IT IF U DO NOT WANT IT self.send('Hola') else: print ('Unhandled event: %s' % event)
def _event_selector(self, event): """ Select the correct events and call the handler """ event_type = event[0] if event_type == 'waiting': self.events_handler.waiting() elif event_type == 'typing': self.events_handler.typing() elif event_type == 'connected': self.connected = True self.events_handler.connected() elif event_type == 'gotMessage': message = event[1] self.events_handler.message(message) elif event_type == 'commonLikes': likes = event[1] self.events_handler.common_likes(likes) elif event_type == 'stoppedTyping': self.events_handler.stopped_typing() elif event_type == 'strangerDisconnected': self.disconnect() self.events_handler.disconnected() elif event_type == 'recaptchaRequired': self.events_handler.captcha_required() elif event_type == 'recaptchaRejected': self.events_handler.captcha_rejected() elif event_type == 'serverMessage': message = event[1] self.events_handler.server_message(message) elif event_type == 'statusInfo': status = event[1] self.events_handler.status_info(status) elif event_type == 'identDigests': digests = event[1] self.events_handler.ident_digest(digests) ## SEND YOUR GREETING OR WHATEVER, OR DELETE IT IF U DO NOT WANT IT self.send('Hola') else: print ('Unhandled event: %s' % event)
Python
def count_words(link): res = requests.get(link) soup = BeautifulSoup(res.text, 'lxml') # .encode("utf-8") # to take care of chinese characters and all xD or while reading file lyrics_soup = soup.select('.lyrics')[0] # .select return a list of ALL occurrences of .lyrics so the 1st element is considered # lyrics is a html tag with lyrics as class. lyrics.text gives the <p> tag etc. with open('lyrics_raw.txt', 'w', encoding="utf-8") as f: # encoding to handle chinese chars etc f.write(lyrics_soup.text) f.write(" ") # added this if we're using same file to write multiple songs f.close() lyrics_list = [] # creating a list as with file reading, writing and reading at the same time is unnecessary # filtering and removing non artist verses modified_file_contents = "" with open('lyrics_raw.txt', 'r', encoding="utf-8") as f: file_contents = f.read() # f.read() gives a string if is_feature(file_contents): for i in range(len(file_contents)): # filter 1: removing other artists if file_contents[i] == '[': flag = False j = 0 while True: if file_contents[i + j] == ']': break else: j += 1 square_brackets_content = file_contents[i: i + j + 1] # genius has everything in sq brackets del j artist = data['name_with_spaces'] for j in range(len(square_brackets_content) - len(artist)): if artist.lower() == square_brackets_content[j: j + len(artist)].lower(): # to handle different case artists flag = True break # flag = True is to say that the following verse is by the selected artist if flag: j = len(square_brackets_content) + 1 try: while file_contents[i + j] != '[': # till the start of next verse modified_file_contents += file_contents[i + j] j += 1 except IndexError: pass else: for i in range(len(file_contents)): if file_contents[i] == '[': j = 0 # j is length of square bracket content while True: if file_contents[i + j] == ']': j += 1 # to remove the current '[' break else: j += 1 try: # starting from j skips the square bracket content while file_contents[i + j] != '[': # till the start of next verse modified_file_contents += file_contents[i + j] j += 1 except IndexError: pass # now to remove them annotations like comma, apostrophes etc modified_file_contents = modified_file_contents.replace('-', ' ')\ .replace('*', '')\ .replace('?', '')\ .replace(' ', ' ')\ .replace(' ', ' ')\ .replace('(', '')\ .replace(')', '')\ .replace('{', '')\ .replace('\'', '')\ .replace(',', '')\ .replace('-', '')\ .replace('!', '')\ .replace('\"', '')\ .replace('--', '')\ .replace('.', '')\ .replace(',', '')\ .replace('\n', ' ') modified_file_contents = modified_file_contents.lower() with open('lyrics_raw.txt', 'w', encoding="utf-8") as f: f.write(modified_file_contents) f.write(" ") """ at this point the file is ready to be counted """ # making the list words_list = modified_file_contents.split(" ") for i in words_list: if i == '' or i == ' ': words_list.remove(i) # removing nonsense from the split dict_unsorted = {} for word in words_list: count = 0 for i in words_list: # i is the word which is checked throughout the list if word == i: count += 1 dict_unsorted[word] = count # adding a pair (word, count) to the unsorted dictionary if len(dicts) >= 2: # merge dicts.append({**dicts.pop(), **dicts.pop()}) else: dicts.append(dict_unsorted)
def count_words(link): res = requests.get(link) soup = BeautifulSoup(res.text, 'lxml') # .encode("utf-8") # to take care of chinese characters and all xD or while reading file lyrics_soup = soup.select('.lyrics')[0] # .select return a list of ALL occurrences of .lyrics so the 1st element is considered # lyrics is a html tag with lyrics as class. lyrics.text gives the <p> tag etc. with open('lyrics_raw.txt', 'w', encoding="utf-8") as f: # encoding to handle chinese chars etc f.write(lyrics_soup.text) f.write(" ") # added this if we're using same file to write multiple songs f.close() lyrics_list = [] # creating a list as with file reading, writing and reading at the same time is unnecessary # filtering and removing non artist verses modified_file_contents = "" with open('lyrics_raw.txt', 'r', encoding="utf-8") as f: file_contents = f.read() # f.read() gives a string if is_feature(file_contents): for i in range(len(file_contents)): # filter 1: removing other artists if file_contents[i] == '[': flag = False j = 0 while True: if file_contents[i + j] == ']': break else: j += 1 square_brackets_content = file_contents[i: i + j + 1] # genius has everything in sq brackets del j artist = data['name_with_spaces'] for j in range(len(square_brackets_content) - len(artist)): if artist.lower() == square_brackets_content[j: j + len(artist)].lower(): # to handle different case artists flag = True break # flag = True is to say that the following verse is by the selected artist if flag: j = len(square_brackets_content) + 1 try: while file_contents[i + j] != '[': # till the start of next verse modified_file_contents += file_contents[i + j] j += 1 except IndexError: pass else: for i in range(len(file_contents)): if file_contents[i] == '[': j = 0 # j is length of square bracket content while True: if file_contents[i + j] == ']': j += 1 # to remove the current '[' break else: j += 1 try: # starting from j skips the square bracket content while file_contents[i + j] != '[': # till the start of next verse modified_file_contents += file_contents[i + j] j += 1 except IndexError: pass # now to remove them annotations like comma, apostrophes etc modified_file_contents = modified_file_contents.replace('-', ' ')\ .replace('*', '')\ .replace('?', '')\ .replace(' ', ' ')\ .replace(' ', ' ')\ .replace('(', '')\ .replace(')', '')\ .replace('{', '')\ .replace('\'', '')\ .replace(',', '')\ .replace('-', '')\ .replace('!', '')\ .replace('\"', '')\ .replace('--', '')\ .replace('.', '')\ .replace(',', '')\ .replace('\n', ' ') modified_file_contents = modified_file_contents.lower() with open('lyrics_raw.txt', 'w', encoding="utf-8") as f: f.write(modified_file_contents) f.write(" ") """ at this point the file is ready to be counted """ # making the list words_list = modified_file_contents.split(" ") for i in words_list: if i == '' or i == ' ': words_list.remove(i) # removing nonsense from the split dict_unsorted = {} for word in words_list: count = 0 for i in words_list: # i is the word which is checked throughout the list if word == i: count += 1 dict_unsorted[word] = count # adding a pair (word, count) to the unsorted dictionary if len(dicts) >= 2: # merge dicts.append({**dicts.pop(), **dicts.pop()}) else: dicts.append(dict_unsorted)
Python
def list(self, ignore_patterns=None): """ List all files in all locations. """ for prefix, root in self.locations: storage = self.storages[root] for path in utils.get_files(storage, ignore_patterns): yield path, storage
def list(self, ignore_patterns=None): """ List all files in all locations. """ for prefix, root in self.locations: storage = self.storages[root] for path in utils.get_files(storage, ignore_patterns): yield path, storage
Python
def list(self, ignore_patterns=None): """ List all files in all app storages. """ for storage in self.storages.values(): if storage.exists(''): # check if storage location exists for path in utils.get_files(storage, ignore_patterns): yield path, storage
def list(self, ignore_patterns=None): """ List all files in all app storages. """ for storage in self.storages.values(): if storage.exists(''): # check if storage location exists for path in utils.get_files(storage, ignore_patterns): yield path, storage
Python
def find_in_app(self, app, path): """ Find a requested media file in an app's media fixtures locations. """ storage = self.storages.get(app, None) if storage: # only try to find a file if the source dir actually exists if storage.exists(path): matched_path = storage.path(path) if matched_path: return matched_path
def find_in_app(self, app, path): """ Find a requested media file in an app's media fixtures locations. """ storage = self.storages.get(app, None) if storage: # only try to find a file if the source dir actually exists if storage.exists(path): matched_path = storage.path(path) if matched_path: return matched_path
Python
def generate_single_page(self): """Generate the whole markdown for single page of the mission""" template = copy.deepcopy(SINGLE_PAGE_TEMPLATE) image_row = "" for b in self.badges: image_item = copy.deepcopy(SINGLE_PAGE_IMAGE_ITEM_TEMPLATE) image_item = image_item.replace("image_file", b.local_path) image_row += image_item template = template.replace("IMAGE_ROW", image_row) caption_row = "" for b in self.badges: caption_item = copy.deepcopy(SINGLE_PAGE_CAPTION_ITEM_TEMPLATE) caption_item = caption_item.replace("mission_name", self.mission_name) caption_item = caption_item.replace("mission_date_formatted", self.mission_date_formatted) caption_row += caption_item template = template.replace("CAPTION_ROW", caption_row) template = template.replace("MISSION_NAME_EN", self.mission_name_en) # This should be placed first template = template.replace("MISSION_NAME", self.mission_name) info = "" info += "* 时间:{}\n".format(self.mission_date_formatted) info += "* 载具:{}\n".format(self.launch_vehicle) info += "* 载荷:{}\n".format(self.payload) info += "* 来源:" for b in self.badges: info += "[{}]({}) ".format(b.source_name, b.source_url) info += "\n" info += "* 信息:" for i in self.info_sources: info += "[{}]({}) ".format(i[0], i[1]) info += "\n" if self.comment is not None: info += "* 其他:" info += self.comment info += "\n" template = template.replace("INFO", info) with open(join(self.folder_path, "README.md"), "w") as f: f.write(template) return dict( path=self.folder_path, # text=template, mission_name=self.mission_name, mission_name_en=self.mission_name_en, date=self.mission_date_formatted )
def generate_single_page(self): """Generate the whole markdown for single page of the mission""" template = copy.deepcopy(SINGLE_PAGE_TEMPLATE) image_row = "" for b in self.badges: image_item = copy.deepcopy(SINGLE_PAGE_IMAGE_ITEM_TEMPLATE) image_item = image_item.replace("image_file", b.local_path) image_row += image_item template = template.replace("IMAGE_ROW", image_row) caption_row = "" for b in self.badges: caption_item = copy.deepcopy(SINGLE_PAGE_CAPTION_ITEM_TEMPLATE) caption_item = caption_item.replace("mission_name", self.mission_name) caption_item = caption_item.replace("mission_date_formatted", self.mission_date_formatted) caption_row += caption_item template = template.replace("CAPTION_ROW", caption_row) template = template.replace("MISSION_NAME_EN", self.mission_name_en) # This should be placed first template = template.replace("MISSION_NAME", self.mission_name) info = "" info += "* 时间:{}\n".format(self.mission_date_formatted) info += "* 载具:{}\n".format(self.launch_vehicle) info += "* 载荷:{}\n".format(self.payload) info += "* 来源:" for b in self.badges: info += "[{}]({}) ".format(b.source_name, b.source_url) info += "\n" info += "* 信息:" for i in self.info_sources: info += "[{}]({}) ".format(i[0], i[1]) info += "\n" if self.comment is not None: info += "* 其他:" info += self.comment info += "\n" template = template.replace("INFO", info) with open(join(self.folder_path, "README.md"), "w") as f: f.write(template) return dict( path=self.folder_path, # text=template, mission_name=self.mission_name, mission_name_en=self.mission_name_en, date=self.mission_date_formatted )
Python
def add_one_mission(image_row, caption_row, mission): """Add one cell in the blocks in the main page.""" image_item = copy.deepcopy(MAIN_PAGE_IMAGE_ITEM_TEMPLATE) caption_item = copy.deepcopy(MAIN_PAGE_CAPTION_ITEM_TEMPLATE) image_item = image_item.replace("image_file", join("gallery", mission.badges[0].root_path)) image_row += image_item # caption_item = caption_item.replace("MISSION_NAME_EN", mission.mission_name_en) # This line is first caption_item = caption_item.replace("MISSION_NAME", mission.mission_name) caption_item = caption_item.replace("mission_date_formatted", mission.mission_date_formatted) caption_item = caption_item.replace("MISSION_LINK", join("gallery", mission.folder_path)) caption_row += caption_item return image_row, caption_row
def add_one_mission(image_row, caption_row, mission): """Add one cell in the blocks in the main page.""" image_item = copy.deepcopy(MAIN_PAGE_IMAGE_ITEM_TEMPLATE) caption_item = copy.deepcopy(MAIN_PAGE_CAPTION_ITEM_TEMPLATE) image_item = image_item.replace("image_file", join("gallery", mission.badges[0].root_path)) image_row += image_item # caption_item = caption_item.replace("MISSION_NAME_EN", mission.mission_name_en) # This line is first caption_item = caption_item.replace("MISSION_NAME", mission.mission_name) caption_item = caption_item.replace("mission_date_formatted", mission.mission_date_formatted) caption_item = caption_item.replace("MISSION_LINK", join("gallery", mission.folder_path)) caption_row += caption_item return image_row, caption_row
Python
def run_one_named_indexer(token, iri, named): """Run indexer on the named graph of the endpoint.""" g = rdflib.Graph(store='SPARQLStore', identifier=named) g.open(iri) red = run_one_named_indexer.redis return run_indexer(token, f'{iri}/{named}', g, red)
def run_one_named_indexer(token, iri, named): """Run indexer on the named graph of the endpoint.""" g = rdflib.Graph(store='SPARQLStore', identifier=named) g.open(iri) red = run_one_named_indexer.redis return run_indexer(token, f'{iri}/{named}', g, red)
Python
def run_indexer(token, iri, g, red): """Get all available analyzers and let them find relationships.""" log = logging.getLogger(__name__) exp = expiration[KeyRoot.RELATED] log.info(f'Indexing {iri}') cnt = 0 analyzer = get_analyzer(token) with red.pipeline() as pipe: for key, rel_type in analyzer.find_relation(g): log.debug(f'Distribution: {iri!s}, relationship type: {rel_type!s}, shared key: {key!s}') key = related_key(rel_type, key) pipe.sadd(key, iri) pipe.expire(key, exp) pipe.sadd('purgeable', key) cnt = cnt + 1 pipe.execute() log.info(f'Indexed {cnt!s} records') return cnt
def run_indexer(token, iri, g, red): """Get all available analyzers and let them find relationships.""" log = logging.getLogger(__name__) exp = expiration[KeyRoot.RELATED] log.info(f'Indexing {iri}') cnt = 0 analyzer = get_analyzer(token) with red.pipeline() as pipe: for key, rel_type in analyzer.find_relation(g): log.debug(f'Distribution: {iri!s}, relationship type: {rel_type!s}, shared key: {key!s}') key = related_key(rel_type, key) pipe.sadd(key, iri) pipe.expire(key, exp) pipe.sadd('purgeable', key) cnt = cnt + 1 pipe.execute() log.info(f'Indexed {cnt!s} records') return cnt
Python
def run_one_indexer(token, iri, format_guess): """Extract graph from redis and run indexer identified by token on it.""" log = logging.getLogger(__name__) red = run_one_indexer.redis key = data_key(iri) log.debug('Parsing graph') try: g = rdflib.ConjunctiveGraph() g.parse(data=red.get(key), format=format_guess) except rdflib.plugin.PluginException: log.debug('Failed to parse graph') return 0 except ValueError: log.debug('Failed to parse graph') return 0 return run_indexer(token, iri, g, red)
def run_one_indexer(token, iri, format_guess): """Extract graph from redis and run indexer identified by token on it.""" log = logging.getLogger(__name__) red = run_one_indexer.redis key = data_key(iri) log.debug('Parsing graph') try: g = rdflib.ConjunctiveGraph() g.parse(data=red.get(key), format=format_guess) except rdflib.plugin.PluginException: log.debug('Failed to parse graph') return 0 except ValueError: log.debug('Failed to parse graph') return 0 return run_indexer(token, iri, g, red)
Python
def do_process(iri, task, is_prio=False): """Analyze an RDF distribution under given IRI.""" log = logging.getLogger(__name__) if iri.endswith('csv.zip') or iri.endswith('csv') or iri.endswith('csv.gz') or iri.endswith('xls') or \ iri.endswith('docx') or iri.endswith('xlsx') or iri.endswith('pdf') or \ (iri.startswith('http://vdp.cuzk.cz') and (iri.endswith('xml.zip') or iri.endswith('xml'))) or \ (iri.startswith('http://dataor.justice.cz') and (iri.endswith('xml') or iri.endswith('xml.gz'))) or \ iri.startswith('https://apl.czso.cz/iSMS/cisexp.jsp') or \ iri.startswith('https://volby.cz/pls/ps2017/vysledky_okres') or \ iri.startswith('http://services.cuzk.cz/'): log.warn(f'Skipping distribution as it will not be supported: {iri!s}') return if not is_prio and (iri.endswith('xml') or iri.endswith('xml.zip')): log.warn(f'Skipping distribution as it will not be supported: {iri!s} (xml in the non-priority channel)') key = root_name[KeyRoot.DISTRIBUTIONS] red = task.redis if red.sadd(key, iri) == 0: log.warn(f'Skipping distribution as it was recently processed: {iri!s}') return red.expire(key, expiration[KeyRoot.DISTRIBUTIONS]) red.sadd('purgeable', key) log.info(f'Processing {iri!s}') if iri.endswith('sparql'): log.info(f'Guessing it is a SPARQL endpoint') return process_endpoint.si(iri).apply_async(queue='low_priority') try: try: r = fetch(iri, log, red) except RobotsRetry as e: red.srem(key, iri) task.retry(countdown=e.delay) except requests.exceptions.HTTPError: log.exception('HTTP Error') # this is a 404 or similar, not worth retrying raise except requests.exceptions.RequestException as e: red.srem(key, iri) task.retry(exc=e) except GeventTimeout as e: # this is gevent.timeout.Timeout red.srem(key, iri) task.retry(exc=e, countdown=e.seconds) try: test_content_length(iri, r, log) guess, priority = guess_format(iri, r, log, red) is_prio = is_prio | priority except Skip: return decompress_task = decompress if is_prio: decompress_task = decompress_prio if guess in ['application/x-7z-compressed', 'application/x-zip-compressed', 'application/zip']: #delegate this into low_priority task return decompress_task.si(iri, 'zip').apply_async(queue='low_priority') elif guess in ['application/gzip', 'application/x-gzip']: return decompress_task.si(iri, 'gzip').apply_async(queue='low_priority') else: try: store_content(iri, r, red) except SizeException: log.warn(f'File is too large: {iri}') raise else: pipeline = group(index.si(iri, guess), analyze.si(iri, guess)) if is_prio: return pipeline.apply_async(queue='high_priority') return pipeline.apply_async() except: log.exception(f'Failed to get {iri!s}') red.sadd('stat:failed', str(iri)) red.sadd('purgeable', 'stat:failed') return
def do_process(iri, task, is_prio=False): """Analyze an RDF distribution under given IRI.""" log = logging.getLogger(__name__) if iri.endswith('csv.zip') or iri.endswith('csv') or iri.endswith('csv.gz') or iri.endswith('xls') or \ iri.endswith('docx') or iri.endswith('xlsx') or iri.endswith('pdf') or \ (iri.startswith('http://vdp.cuzk.cz') and (iri.endswith('xml.zip') or iri.endswith('xml'))) or \ (iri.startswith('http://dataor.justice.cz') and (iri.endswith('xml') or iri.endswith('xml.gz'))) or \ iri.startswith('https://apl.czso.cz/iSMS/cisexp.jsp') or \ iri.startswith('https://volby.cz/pls/ps2017/vysledky_okres') or \ iri.startswith('http://services.cuzk.cz/'): log.warn(f'Skipping distribution as it will not be supported: {iri!s}') return if not is_prio and (iri.endswith('xml') or iri.endswith('xml.zip')): log.warn(f'Skipping distribution as it will not be supported: {iri!s} (xml in the non-priority channel)') key = root_name[KeyRoot.DISTRIBUTIONS] red = task.redis if red.sadd(key, iri) == 0: log.warn(f'Skipping distribution as it was recently processed: {iri!s}') return red.expire(key, expiration[KeyRoot.DISTRIBUTIONS]) red.sadd('purgeable', key) log.info(f'Processing {iri!s}') if iri.endswith('sparql'): log.info(f'Guessing it is a SPARQL endpoint') return process_endpoint.si(iri).apply_async(queue='low_priority') try: try: r = fetch(iri, log, red) except RobotsRetry as e: red.srem(key, iri) task.retry(countdown=e.delay) except requests.exceptions.HTTPError: log.exception('HTTP Error') # this is a 404 or similar, not worth retrying raise except requests.exceptions.RequestException as e: red.srem(key, iri) task.retry(exc=e) except GeventTimeout as e: # this is gevent.timeout.Timeout red.srem(key, iri) task.retry(exc=e, countdown=e.seconds) try: test_content_length(iri, r, log) guess, priority = guess_format(iri, r, log, red) is_prio = is_prio | priority except Skip: return decompress_task = decompress if is_prio: decompress_task = decompress_prio if guess in ['application/x-7z-compressed', 'application/x-zip-compressed', 'application/zip']: #delegate this into low_priority task return decompress_task.si(iri, 'zip').apply_async(queue='low_priority') elif guess in ['application/gzip', 'application/x-gzip']: return decompress_task.si(iri, 'gzip').apply_async(queue='low_priority') else: try: store_content(iri, r, red) except SizeException: log.warn(f'File is too large: {iri}') raise else: pipeline = group(index.si(iri, guess), analyze.si(iri, guess)) if is_prio: return pipeline.apply_async(queue='high_priority') return pipeline.apply_async() except: log.exception(f'Failed to get {iri!s}') red.sadd('stat:failed', str(iri)) red.sadd('purgeable', 'stat:failed') return
Python
def guess_format(iri, r, log, red): """ Guess format of the distribution. Skip if not known 5* distribution format. """ guess = rdflib.util.guess_format(iri) if guess is None: guess = r.headers.get('content-type').split(';')[0] monitor.log_format(str(guess)) log.info(f'Guessing format to be {guess!s}') priority = set(['hturtle', 'n3', 'nquads', 'nt', 'trix', 'trig', 'turtle', 'xml', 'json-ld', 'application/x-7z-compressed', 'application/rdf+xml', 'application/ld+json', 'application/rss+xml']) regular = set(['text/xml', 'application/json', 'application/gzip', 'application/x-zip-compressed', 'application/zip', 'text/plain', 'application/x-gzip']) if guess not in priority.union(regular): log.info(f'Skipping this distribution') red.sadd('stat:skipped', str(iri)) red.sadd('purgeable', 'stat:skipped') raise Skip() return guess, (guess in priority)
def guess_format(iri, r, log, red): """ Guess format of the distribution. Skip if not known 5* distribution format. """ guess = rdflib.util.guess_format(iri) if guess is None: guess = r.headers.get('content-type').split(';')[0] monitor.log_format(str(guess)) log.info(f'Guessing format to be {guess!s}') priority = set(['hturtle', 'n3', 'nquads', 'nt', 'trix', 'trig', 'turtle', 'xml', 'json-ld', 'application/x-7z-compressed', 'application/rdf+xml', 'application/ld+json', 'application/rss+xml']) regular = set(['text/xml', 'application/json', 'application/gzip', 'application/x-zip-compressed', 'application/zip', 'text/plain', 'application/x-gzip']) if guess not in priority.union(regular): log.info(f'Skipping this distribution') red.sadd('stat:skipped', str(iri)) red.sadd('purgeable', 'stat:skipped') raise Skip() return guess, (guess in priority)
Python
def process_endpoint(iri): #Low priority as we are doing scan of all graphs in the endpoint log = logging.getLogger(__name__) """Index and analyze triples in the endpoint.""" key = root_name[KeyRoot.ENDPOINTS] red = process_endpoint.redis red.sadd('purgeable', key) if red.sadd(key, iri) > 0: red.expire(key, expiration[KeyRoot.ENDPOINTS]) a = SparqlEndpointAnalyzer() tasks = [] for g in a.get_graphs_from_endpoint(iri): key = graph(iri) red.sadd('purgeable', key) if red.sadd(key, g) > 0: # this is to prevent repeated processing of a graph (same as distributions) # and also for analysis query tasks.append(index_named.si(iri, g)) tasks.append(analyze_named.si(iri, g)) red.expire(key, expiration[KeyRoot.GRAPHS]) return group(tasks).apply_async(queue='low_priority') log.debug(f'Skipping endpoint as it was recently analyzed: {iri!s}')
def process_endpoint(iri): #Low priority as we are doing scan of all graphs in the endpoint log = logging.getLogger(__name__) """Index and analyze triples in the endpoint.""" key = root_name[KeyRoot.ENDPOINTS] red = process_endpoint.redis red.sadd('purgeable', key) if red.sadd(key, iri) > 0: red.expire(key, expiration[KeyRoot.ENDPOINTS]) a = SparqlEndpointAnalyzer() tasks = [] for g in a.get_graphs_from_endpoint(iri): key = graph(iri) red.sadd('purgeable', key) if red.sadd(key, g) > 0: # this is to prevent repeated processing of a graph (same as distributions) # and also for analysis query tasks.append(index_named.si(iri, g)) tasks.append(analyze_named.si(iri, g)) red.expire(key, expiration[KeyRoot.GRAPHS]) return group(tasks).apply_async(queue='low_priority') log.debug(f'Skipping endpoint as it was recently analyzed: {iri!s}')
Python
def decompress_7z(iri, r, red): """Download a 7z file, decompress it and store contents in redis.""" data = load_data(iri, r) log = logging.getLogger(__name__) expiration = expire_table[KeyRoot.DATA] deco_size_total = 0 with libarchive.memory_reader(data) as archive: for entry in archive: try: name = str(entry) except: name = str(uuid.uuid4()) if len(name) == 0: if iri.endswith('.zip'): sub_iri = iri[:-4] else: sub_iri = f'{iri}/{name}' log.error(f'Empty name, iri: {iri!s}') else: sub_iri = f'{iri}/{name}' sub_key = data_key(sub_iri) log.debug(f'Store {name} into {sub_key}') conlen = 0 if not red.exists(sub_key): red.sadd('purgeable', sub_key) for block in entry.get_blocks(): if len(block) + conlen > MAX_CONTENT_LENGTH: # Will fail due to redis limitation red.expire(sub_key, 0) raise SizeException(name) red.append(sub_key, block) conlen = conlen + len(block) red.expire(sub_key, expiration) monitor.log_size(conlen) log.debug(f'Subfile has size {conlen}') deco_size_total = deco_size_total + conlen else: log.warn(f'Data already exists for {sub_iri}') if conlen > 0: yield sub_iri log.debug(f'Done decompression, total decompressed size {deco_size_total}')
def decompress_7z(iri, r, red): """Download a 7z file, decompress it and store contents in redis.""" data = load_data(iri, r) log = logging.getLogger(__name__) expiration = expire_table[KeyRoot.DATA] deco_size_total = 0 with libarchive.memory_reader(data) as archive: for entry in archive: try: name = str(entry) except: name = str(uuid.uuid4()) if len(name) == 0: if iri.endswith('.zip'): sub_iri = iri[:-4] else: sub_iri = f'{iri}/{name}' log.error(f'Empty name, iri: {iri!s}') else: sub_iri = f'{iri}/{name}' sub_key = data_key(sub_iri) log.debug(f'Store {name} into {sub_key}') conlen = 0 if not red.exists(sub_key): red.sadd('purgeable', sub_key) for block in entry.get_blocks(): if len(block) + conlen > MAX_CONTENT_LENGTH: # Will fail due to redis limitation red.expire(sub_key, 0) raise SizeException(name) red.append(sub_key, block) conlen = conlen + len(block) red.expire(sub_key, expiration) monitor.log_size(conlen) log.debug(f'Subfile has size {conlen}') deco_size_total = deco_size_total + conlen else: log.warn(f'Data already exists for {sub_iri}') if conlen > 0: yield sub_iri log.debug(f'Done decompression, total decompressed size {deco_size_total}')
Python
def cleanup_endpoint(): """Clean any purgeable records, Flask cache and possibly also stats.""" extra = ['purgeable'] stats = 'stats' in request.args if stats: extra.extend(Monitor.KEYS) cleanup.si(current_app.config['CACHE_KEY_PREFIX'], extra).apply_async(queue='low_priority').get() return 'OK'
def cleanup_endpoint(): """Clean any purgeable records, Flask cache and possibly also stats.""" extra = ['purgeable'] stats = 'stats' in request.args if stats: extra.extend(Monitor.KEYS) cleanup.si(current_app.config['CACHE_KEY_PREFIX'], extra).apply_async(queue='low_priority').get() return 'OK'
Python
def system_check(): """Runs an availability test of additional systems. Tested are: redis. """ log = logging.getLogger(__name__) log.info('System check started') log.info(f'Testing redis') red = redis.Redis(connection_pool=redis_pool) red.ping() log.info('System check successful')
def system_check(): """Runs an availability test of additional systems. Tested are: redis. """ log = logging.getLogger(__name__) log.info('System check started') log.info(f'Testing redis') red = redis.Redis(connection_pool=redis_pool) red.ping() log.info('System check successful')
Python
def process_graph(self, endpoint, graph_iri): """Extract DCAT datasets from the given named graph of an endpoint.""" log = logging.getLogger(__name__) if not rfc3987.match(endpoint): log.warn(f'{endpoint!s} is not a valid endpoint URL') return None if not rfc3987.match(graph_iri): log.warn(f'{graph_iri!s} is not a valid graph URL') return None g = Graph(store='SPARQLStore', identifier=graph_iri) g.open(endpoint) result = Graph() for s, p, o in g.query(self.__query(endpoint, graph_iri)): result.add( (s, p, o) ) return result
def process_graph(self, endpoint, graph_iri): """Extract DCAT datasets from the given named graph of an endpoint.""" log = logging.getLogger(__name__) if not rfc3987.match(endpoint): log.warn(f'{endpoint!s} is not a valid endpoint URL') return None if not rfc3987.match(graph_iri): log.warn(f'{graph_iri!s} is not a valid graph URL') return None g = Graph(store='SPARQLStore', identifier=graph_iri) g.open(endpoint) result = Graph() for s, p, o in g.query(self.__query(endpoint, graph_iri)): result.add( (s, p, o) ) return result
Python
def inspect_catalog(key): """Analyze DCAT datasets listed in the catalog.""" log = logging.getLogger(__name__) red = inspect_catalog.redis log.debug('Parsing graph') try: g = rdflib.ConjunctiveGraph() g.parse(data=red.get(key), format='n3') red.delete(key) except rdflib.plugin.PluginException: log.debug('Failed to parse graph') return None return _dcat_extractor(g, red, log)
def inspect_catalog(key): """Analyze DCAT datasets listed in the catalog.""" log = logging.getLogger(__name__) red = inspect_catalog.redis log.debug('Parsing graph') try: g = rdflib.ConjunctiveGraph() g.parse(data=red.get(key), format='n3') red.delete(key) except rdflib.plugin.PluginException: log.debug('Failed to parse graph') return None return _dcat_extractor(g, red, log)
Python
def store_analysis(results, iri): """Store results of the analysis in redis.""" red = store_analysis.redis # results ... list of strings (json.dumps()) if len(results) > 0: store = json.dumps({'analysis': [json.loads(x) for x in results if ((x is not None) and (len(x) > 0))], 'iri': iri}) else: red.delete(data_key(iri)) return key_result = analysis_dataset(iri) with red.pipeline() as pipe: pipe.set(key_result, store) pipe.sadd('purgeable', key_result) pipe.expire(key_result, expiration[KeyRoot.ANALYSIS]) pipe.delete(data_key(iri)) # trash original content (index doesn't need it?) pipe.execute()
def store_analysis(results, iri): """Store results of the analysis in redis.""" red = store_analysis.redis # results ... list of strings (json.dumps()) if len(results) > 0: store = json.dumps({'analysis': [json.loads(x) for x in results if ((x is not None) and (len(x) > 0))], 'iri': iri}) else: red.delete(data_key(iri)) return key_result = analysis_dataset(iri) with red.pipeline() as pipe: pipe.set(key_result, store) pipe.sadd('purgeable', key_result) pipe.expire(key_result, expiration[KeyRoot.ANALYSIS]) pipe.delete(data_key(iri)) # trash original content (index doesn't need it?) pipe.execute()
Python
def run_one_analyzer(analyzer_token, key, format_guess): """Run one analyzer identified by its token.""" log = logging.getLogger(__name__) analyzer = get_analyzer(analyzer_token) try: g = rdflib.ConjunctiveGraph() log.debug('Parsing graph') red = run_one_analyzer.redis g.parse(data=red.get(key), format=format_guess) return json.dumps({analyzer_token: analyzer.analyze(g)}) except (rdflib.plugin.PluginException, UnicodeDecodeError): log.debug('Failed to parse graph') except ValueError: log.exception(f'Missing data, key: {key}, analyzer: {analyzer_token}, format: {format_guess}') return None
def run_one_analyzer(analyzer_token, key, format_guess): """Run one analyzer identified by its token.""" log = logging.getLogger(__name__) analyzer = get_analyzer(analyzer_token) try: g = rdflib.ConjunctiveGraph() log.debug('Parsing graph') red = run_one_analyzer.redis g.parse(data=red.get(key), format=format_guess) return json.dumps({analyzer_token: analyzer.analyze(g)}) except (rdflib.plugin.PluginException, UnicodeDecodeError): log.debug('Failed to parse graph') except ValueError: log.exception(f'Missing data, key: {key}, analyzer: {analyzer_token}, format: {format_guess}') return None
Python
def analyze_named(endpoint_iri, named_graph): """Analyze triples in a named graph of an endpoint.""" tokens = [it.token for it in AbstractAnalyzer.__subclasses__()] tasks = [run_one_named_analyzer.si(token, endpoint_iri, named_graph) for token in tokens] return chord(tasks)(store_named_analysis.si(endpoint_iri, named_graph))
def analyze_named(endpoint_iri, named_graph): """Analyze triples in a named graph of an endpoint.""" tokens = [it.token for it in AbstractAnalyzer.__subclasses__()] tasks = [run_one_named_analyzer.si(token, endpoint_iri, named_graph) for token in tokens] return chord(tasks)(store_named_analysis.si(endpoint_iri, named_graph))
Python
def run_one_named_analyzer(token, endpoint_iri, named_graph): """Run an analyzer identified by its token on a triples in a named graph of an endpoint.""" g = rdflib.Graph(store='SPARQLStore', identifier=named_graph) g.open(endpoint_iri) a = get_analyzer(token) return json.dumps({token: a.analyze(g)})
def run_one_named_analyzer(token, endpoint_iri, named_graph): """Run an analyzer identified by its token on a triples in a named graph of an endpoint.""" g = rdflib.Graph(store='SPARQLStore', identifier=named_graph) g.open(endpoint_iri) a = get_analyzer(token) return json.dumps({token: a.analyze(g)})
Python
def store_named_analysis(results, endpoint_iri, named_graph): """Store results of the analysis in redis.""" red = store_named_analysis.redis key = analysis_endpoint(endpoint_iri, named_graph) if len(results) > 0: store = json.dumps({ 'analysis': [json.loads(x) for x in results if ((x is not None) and (len(x) > 0))], 'endpoint': endpoint_iri, 'graph': named_graph }) with red.pipeline() as pipe: pipe.sadd('purgeable', key) pipe.set(key, store) pipe.expire(key, expiration[KeyRoot.ANALYSIS]) pipe.execute()
def store_named_analysis(results, endpoint_iri, named_graph): """Store results of the analysis in redis.""" red = store_named_analysis.redis key = analysis_endpoint(endpoint_iri, named_graph) if len(results) > 0: store = json.dumps({ 'analysis': [json.loads(x) for x in results if ((x is not None) and (len(x) > 0))], 'endpoint': endpoint_iri, 'graph': named_graph }) with red.pipeline() as pipe: pipe.sadd('purgeable', key) pipe.set(key, store) pipe.expire(key, expiration[KeyRoot.ANALYSIS]) pipe.execute()
Python
def index_distribution_query(iri): """Query the index and construct related datasets for the iri of a distribution. Final result is stored in redis. """ red = redis.Redis(connection_pool=redis_pool) #if not missing(iri, red): # return related_ds = json.loads(red.get('relatedds')) current_dataset = red.hget('distrds', iri) for rel_type in reltypes: to_delete = [] for token in related_ds[rel_type].keys(): if current_dataset in related_ds[rel_type][token]: related_ds[rel_type][token].remove(current_dataset) else: to_delete.append(token) for token in to_delete: del related_ds[rel_type][token] exp = EXPIRATION_CACHED # 30D key = f'distrquery:{current_dataset}' with red.pipeline() as pipe: pipe.set(key, json.dumps(related_ds)) pipe.sadd('purgeable', key) pipe.expire(key, exp) pipe.execute()
def index_distribution_query(iri): """Query the index and construct related datasets for the iri of a distribution. Final result is stored in redis. """ red = redis.Redis(connection_pool=redis_pool) #if not missing(iri, red): # return related_ds = json.loads(red.get('relatedds')) current_dataset = red.hget('distrds', iri) for rel_type in reltypes: to_delete = [] for token in related_ds[rel_type].keys(): if current_dataset in related_ds[rel_type][token]: related_ds[rel_type][token].remove(current_dataset) else: to_delete.append(token) for token in to_delete: del related_ds[rel_type][token] exp = EXPIRATION_CACHED # 30D key = f'distrquery:{current_dataset}' with red.pipeline() as pipe: pipe.set(key, json.dumps(related_ds)) pipe.sadd('purgeable', key) pipe.expire(key, exp) pipe.execute()
Python
def retrieve_size_stats(red): """Load sizes from redis and calculate some stats about it.""" lst = sorted([int(x) for x in red.lrange('stat:size', 0, -1)]) try: mode = statistics.mode(lst) except statistics.StatisticsError: mode = None try: mean = statistics.mean(lst) except statistics.StatisticsError: mean = None try: stdev = statistics.stdev(lst, mean) except statistics.StatisticsError: stdev = None try: var = statistics.variance(lst, mean) except statistics.StatisticsError: var = None try: minimum = min(lst) except ValueError: minimum = None try: maximum = max(lst) except ValueError: maximum = None return { 'min': convert_size(minimum), 'max': convert_size(maximum), 'mean': convert_size(mean), 'mode': convert_size(mode), 'stdev': convert_size(stdev), 'var': var }
def retrieve_size_stats(red): """Load sizes from redis and calculate some stats about it.""" lst = sorted([int(x) for x in red.lrange('stat:size', 0, -1)]) try: mode = statistics.mode(lst) except statistics.StatisticsError: mode = None try: mean = statistics.mean(lst) except statistics.StatisticsError: mean = None try: stdev = statistics.stdev(lst, mean) except statistics.StatisticsError: stdev = None try: var = statistics.variance(lst, mean) except statistics.StatisticsError: var = None try: minimum = min(lst) except ValueError: minimum = None try: maximum = max(lst) except ValueError: maximum = None return { 'min': convert_size(minimum), 'max': convert_size(maximum), 'mean': convert_size(mean), 'mode': convert_size(mode), 'stdev': convert_size(stdev), 'var': var }
Python
def sync_chat(request, chat_id): """ View checks new messages for the logged in user within a Chat session with a given chat_id, renders them and returnes as a JSON encoded list. """ chat = get_object_or_404(Chat, id=chat_id) # We store last sync timestamp to make sure the user recieves # all the messages since the last sync. now = datetime.now() timestamp = request.session.get(chat_id, now) request.session[chat_id] = now messages = chat.messages.exclude(author=request.user).filter(created__gt=timestamp) return json_response(map(lambda message: render_to(request, "im/message.html", {"message": message}), messages))
def sync_chat(request, chat_id): """ View checks new messages for the logged in user within a Chat session with a given chat_id, renders them and returnes as a JSON encoded list. """ chat = get_object_or_404(Chat, id=chat_id) # We store last sync timestamp to make sure the user recieves # all the messages since the last sync. now = datetime.now() timestamp = request.session.get(chat_id, now) request.session[chat_id] = now messages = chat.messages.exclude(author=request.user).filter(created__gt=timestamp) return json_response(map(lambda message: render_to(request, "im/message.html", {"message": message}), messages))
Python
def send_message(request, chat_id): """ View sends message from the logged in user to the Chat session with a given chat_id and returns a JSON encoded hash code for the newly created message. """ chat = get_object_or_404(Chat, id=chat_id) text, event = request.POST.get("text"), request.POST.get("event", "") if text or event.isdigit(): try: event = int(event) except ValueError: event = None message = Message(chat=chat, author=request.user, text=text, event=event, created=datetime.now()) message.save() return json_response(render_to(request, "im/message.html", {"message": message})) # If there's no text to send or POST data contains a non-digit # event code (which is irrelevant, because EVENT_CHOICES should # be a list), or send() didn't return a hash code, the response # is HttpResponseBadRequest. return HttpResponseBadRequest("Empty message or unknown event code.")
def send_message(request, chat_id): """ View sends message from the logged in user to the Chat session with a given chat_id and returns a JSON encoded hash code for the newly created message. """ chat = get_object_or_404(Chat, id=chat_id) text, event = request.POST.get("text"), request.POST.get("event", "") if text or event.isdigit(): try: event = int(event) except ValueError: event = None message = Message(chat=chat, author=request.user, text=text, event=event, created=datetime.now()) message.save() return json_response(render_to(request, "im/message.html", {"message": message})) # If there's no text to send or POST data contains a non-digit # event code (which is irrelevant, because EVENT_CHOICES should # be a list), or send() didn't return a hash code, the response # is HttpResponseBadRequest. return HttpResponseBadRequest("Empty message or unknown event code.")
Python
def sync_chatbox(request, target): """ Depending on the target argument value, the view either returns new chat requests for the logged in user or a list of online users, provided by the useractivity app. """ if target == "chat_requests": now = datetime.now() # Checking for the timestamp in session data, explanations # are below. timestamp = request.session.get("im:chat_requests:sync", now) chat_requests = ChatRequest.objects.incoming(request.user, timestamp,) data = map(lambda chat_request: render_to(request, "im/chat_request.html", {"chat_request": chat_request}), chat_requests) # Saving last check timestamp in session data, so we can later # determine which requests were already sent to the browser. request.session["im:chat_requests:sync"] = now elif target == "online_users": friends = friend_set_for(request.user) online_users = get_online_users() if request.user in online_users: online_users.remove(request.user) online_friends = online_users & friends online_others = online_users - friends data = render_to(request, "im/userlist.html", {"friends": online_friends, "others": online_others}) return json_response(data)
def sync_chatbox(request, target): """ Depending on the target argument value, the view either returns new chat requests for the logged in user or a list of online users, provided by the useractivity app. """ if target == "chat_requests": now = datetime.now() # Checking for the timestamp in session data, explanations # are below. timestamp = request.session.get("im:chat_requests:sync", now) chat_requests = ChatRequest.objects.incoming(request.user, timestamp,) data = map(lambda chat_request: render_to(request, "im/chat_request.html", {"chat_request": chat_request}), chat_requests) # Saving last check timestamp in session data, so we can later # determine which requests were already sent to the browser. request.session["im:chat_requests:sync"] = now elif target == "online_users": friends = friend_set_for(request.user) online_users = get_online_users() if request.user in online_users: online_users.remove(request.user) online_friends = online_users & friends online_others = online_users - friends data = render_to(request, "im/userlist.html", {"friends": online_friends, "others": online_others}) return json_response(data)
Python
def request_chat(request, user_id): """ View creates a chat request from the logged in user to the user with a given id. If there's an active request for this pair of users, the view raises bad request, else json encoded success message is returned. """ kwargs = {"user_to": get_object_or_404(User, id=user_id), "user_from": request.user} if not ChatRequest.objects.sent(**kwargs): chat_request = ChatRequest(created=datetime.now(), **kwargs) chat_request.save() return json_response(render_to(request, "im/chat_request.html", {"chat_request": chat_request})) return HttpResponseBadRequest("Duplicate request.")
def request_chat(request, user_id): """ View creates a chat request from the logged in user to the user with a given id. If there's an active request for this pair of users, the view raises bad request, else json encoded success message is returned. """ kwargs = {"user_to": get_object_or_404(User, id=user_id), "user_from": request.user} if not ChatRequest.objects.sent(**kwargs): chat_request = ChatRequest(created=datetime.now(), **kwargs) chat_request.save() return json_response(render_to(request, "im/chat_request.html", {"chat_request": chat_request})) return HttpResponseBadRequest("Duplicate request.")
Python
def accept_chat(request, chat_request_id): """ View accepts chat request with a given id, creates a Chat instance for chat request sender and reciever and returns a json encoded url for the newly created chatroom. """ chat_request = get_object_or_404(ChatRequest, id=chat_request_id) chat_request.accept() chat = Chat(request=chat_request, created=datetime.now()) chat.save() chat.users.add(chat_request.user_to, chat_request.user_from) return json_response(reverse("im_show_chat", args=[chat.id]))
def accept_chat(request, chat_request_id): """ View accepts chat request with a given id, creates a Chat instance for chat request sender and reciever and returns a json encoded url for the newly created chatroom. """ chat_request = get_object_or_404(ChatRequest, id=chat_request_id) chat_request.accept() chat = Chat(request=chat_request, created=datetime.now()) chat.save() chat.users.add(chat_request.user_to, chat_request.user_from) return json_response(reverse("im_show_chat", args=[chat.id]))
Python
def decline_chat(request, chat_request_id): """ View declines chat requets with a given id and returns an empty response. """ chat_request = get_object_or_404(ChatRequest, id=chat_request_id) chat_request.decline() return json_response()
def decline_chat(request, chat_request_id): """ View declines chat requets with a given id and returns an empty response. """ chat_request = get_object_or_404(ChatRequest, id=chat_request_id) chat_request.decline() return json_response()
Python
def emoticons(value): """ Filter substitutes all text emoticons by the appropriate images. For the filter to work you need three variables set in settings.py * IM_EMOTICONS_ROOT: absolute path to the directory with emoticons * IM_EMOTICONS_URL: relative url for the directory with emoticons * IM_EMOTICONS_THEME: a name of the theme directory, which must contain an __init__.py file. More detailed description can be found in """ for name, pattern in IM_EMOTICONS_THEME.iteritems(): value = pattern.sub( lambda match: IM_EMOTICONS_TEMPLATE % (name, match.group()), value) return value
def emoticons(value): """ Filter substitutes all text emoticons by the appropriate images. For the filter to work you need three variables set in settings.py * IM_EMOTICONS_ROOT: absolute path to the directory with emoticons * IM_EMOTICONS_URL: relative url for the directory with emoticons * IM_EMOTICONS_THEME: a name of the theme directory, which must contain an __init__.py file. More detailed description can be found in """ for name, pattern in IM_EMOTICONS_THEME.iteritems(): value = pattern.sub( lambda match: IM_EMOTICONS_TEMPLATE % (name, match.group()), value) return value
Python
def incoming(self, user, timestamp): """ Method returns a list of incoming chat requests for a given user, created after the given timestamp. This includes: * chat requests, sent by a given user, either accepter or declined, * chat requests, sent to a given user from other users. """ return self.get_query_set().filter( models.Q(created__gt=timestamp) & ( models.Q(user_from=user, state__in=[STATE_ACCEPTED, STATE_DECLINED]) | models.Q(user_to=user, state=STATE_SENT)) )
def incoming(self, user, timestamp): """ Method returns a list of incoming chat requests for a given user, created after the given timestamp. This includes: * chat requests, sent by a given user, either accepter or declined, * chat requests, sent to a given user from other users. """ return self.get_query_set().filter( models.Q(created__gt=timestamp) & ( models.Q(user_from=user, state__in=[STATE_ACCEPTED, STATE_DECLINED]) | models.Q(user_to=user, state=STATE_SENT)) )
Python
def sent(self, **kwargs): """ Method returns a list of active chat requests, which don't have a related chat attached. A chat request is considered active if it is created within the expiration interval, defined by IM_REQUESTS_EXPIRE_IN option in the settings module. """ timestamp = datetime.now() - settings.IM_REQUESTS_EXPIRE_IN return self.get_query_set().filter(state=STATE_SENT, created__gt=timestamp, chat=None, **kwargs)
def sent(self, **kwargs): """ Method returns a list of active chat requests, which don't have a related chat attached. A chat request is considered active if it is created within the expiration interval, defined by IM_REQUESTS_EXPIRE_IN option in the settings module. """ timestamp = datetime.now() - settings.IM_REQUESTS_EXPIRE_IN return self.get_query_set().filter(state=STATE_SENT, created__gt=timestamp, chat=None, **kwargs)
Python
def run(self): """Run method that performs all the real work""" # Create the dialog with elements (after translation) and keep reference # Only create GUI ONCE in callback, so that it will only load when the plugin is started if self.first_start == True: self.first_start = False self.dlg = Nuevo_nidoDialog() self.dlg.cmbnido.currentTextChanged.connect(self.event_estadochange) mapa = self.iface.mapCanvas() self.dlg.spbcoorx.setValue(mapa.center().x()) self.dlg.spbcoory.setValue(mapa.center().y()) self.dlg.QTDate.setDate(QDate.currentDate()) capas_formulario = [] for lyr in mapa.layers(): capas_formulario.append(lyr.name()) capas_faltantes = [] if not "Avistamientos" in capas_formulario: capas_faltantes.append("Avistamientos") if not "Buffer Nidos" in capas_formulario: capas_faltantes.append("Buffer Nidos") if capas_faltantes: msg = "Faltan las siguientes capas en el proyecto para seguir" for lyrs in capas_faltantes: msg += f"\n {lyrs}" QMessageBox.critical(self.dlg,"capas faltantes" , msg) return QMessageBox.information(self.dlg,"Mensaje","Esto debe correr siempre") # show the dialog self.dlg.show() # Run the dialog event loop result = self.dlg.exec_() # See if OK was pressed if result: # Do something useful here - delete the line containing pass and # substitute with your code. QMessageBox.information(self.dlg,"Mensaje","Esto debe correr una vez si presiona ok") lry_avistamientos = QgsProject.instance().mapLayersByName("Avistamientos")[0] lry_buffers = QgsProject.instance().mapLayersByName("Buffer Nidos")[0] lry_buffer_lineal = QgsProject.instance().mapLayersByName("Buffer Lineal")[0] indice = lry_avistamientos.fields().indexOf("nidoID") indice_max = lry_avistamientos.maximumValue(indice) + 1 coorx = self.dlg.spbcoorx.value() coory = self.dlg.spbcoory.value() valor_especie = self.dlg.cmbespecie.currentText() valor_nido = self.dlg.cmbnido.currentText() valor_buffer = self.dlg.spbbuffer.value() valor_fecha = self.dlg.QTDate.date() feature_nido = QgsFeature(lry_avistamientos.fields()) feature_nido.setAttribute( "id" ,indice_max) feature_nido.setAttribute( "coord_x",coorx) feature_nido.setAttribute( "coord_y",coory) feature_nido.setAttribute( "fecha",valor_fecha) feature_nido.setAttribute( "especie",valor_especie) feature_nido.setAttribute( "estado",valor_nido) feature_nido.setAttribute( "dist_buf",valor_buffer) feature_nido.setAttribute( "nidoID",indice_max) geom = QgsGeometry(QgsPoint(coorx,coory)) feature_nido.setGeometry(geom) pr = lry_avistamientos.dataProvider() pr.addFeatures([feature_nido]) lry_avistamientos.reload() pr = lry_buffers.dataProvider() buffer = geom.buffer(valor_buffer,10) feature_nido.setGeometry(buffer) pr.addFeatures([feature_nido]) lry_buffers.reload() tabla_impactos = tabla_dialog() bb = buffer.boundingBox() lineal = lry_buffer_lineal.getFeatures(bb) for linea in lineal: idvar = linea.attribute("Proyecto") idtipo = linea.attribute("Tipo") idbuffer = linea.geometry().distance(geom) if idbuffer < valor_buffer: fila = tabla_impactos.tbl_impactos.rowCount() tabla_impactos.tbl_impactos.insertRow(fila) tabla_impactos.tbl_impactos.setItem(fila,0,QTableWidgetItem(str(idvar))) tabla_impactos.tbl_impactos.setItem(fila,1,QTableWidgetItem(idtipo)) tabla_impactos.tbl_impactos.setItem(fila,2,QTableWidgetItem(f"{idbuffer:.2f}")) tabla_impactos.tbl_impactos.sortItems(2) tabla_impactos.show() tabla_impactos.exec_() else: QMessageBox.information(self.dlg,"Mensaje","Esto debe correr una vez si presiona cancel o exit")
def run(self): """Run method that performs all the real work""" # Create the dialog with elements (after translation) and keep reference # Only create GUI ONCE in callback, so that it will only load when the plugin is started if self.first_start == True: self.first_start = False self.dlg = Nuevo_nidoDialog() self.dlg.cmbnido.currentTextChanged.connect(self.event_estadochange) mapa = self.iface.mapCanvas() self.dlg.spbcoorx.setValue(mapa.center().x()) self.dlg.spbcoory.setValue(mapa.center().y()) self.dlg.QTDate.setDate(QDate.currentDate()) capas_formulario = [] for lyr in mapa.layers(): capas_formulario.append(lyr.name()) capas_faltantes = [] if not "Avistamientos" in capas_formulario: capas_faltantes.append("Avistamientos") if not "Buffer Nidos" in capas_formulario: capas_faltantes.append("Buffer Nidos") if capas_faltantes: msg = "Faltan las siguientes capas en el proyecto para seguir" for lyrs in capas_faltantes: msg += f"\n {lyrs}" QMessageBox.critical(self.dlg,"capas faltantes" , msg) return QMessageBox.information(self.dlg,"Mensaje","Esto debe correr siempre") # show the dialog self.dlg.show() # Run the dialog event loop result = self.dlg.exec_() # See if OK was pressed if result: # Do something useful here - delete the line containing pass and # substitute with your code. QMessageBox.information(self.dlg,"Mensaje","Esto debe correr una vez si presiona ok") lry_avistamientos = QgsProject.instance().mapLayersByName("Avistamientos")[0] lry_buffers = QgsProject.instance().mapLayersByName("Buffer Nidos")[0] lry_buffer_lineal = QgsProject.instance().mapLayersByName("Buffer Lineal")[0] indice = lry_avistamientos.fields().indexOf("nidoID") indice_max = lry_avistamientos.maximumValue(indice) + 1 coorx = self.dlg.spbcoorx.value() coory = self.dlg.spbcoory.value() valor_especie = self.dlg.cmbespecie.currentText() valor_nido = self.dlg.cmbnido.currentText() valor_buffer = self.dlg.spbbuffer.value() valor_fecha = self.dlg.QTDate.date() feature_nido = QgsFeature(lry_avistamientos.fields()) feature_nido.setAttribute( "id" ,indice_max) feature_nido.setAttribute( "coord_x",coorx) feature_nido.setAttribute( "coord_y",coory) feature_nido.setAttribute( "fecha",valor_fecha) feature_nido.setAttribute( "especie",valor_especie) feature_nido.setAttribute( "estado",valor_nido) feature_nido.setAttribute( "dist_buf",valor_buffer) feature_nido.setAttribute( "nidoID",indice_max) geom = QgsGeometry(QgsPoint(coorx,coory)) feature_nido.setGeometry(geom) pr = lry_avistamientos.dataProvider() pr.addFeatures([feature_nido]) lry_avistamientos.reload() pr = lry_buffers.dataProvider() buffer = geom.buffer(valor_buffer,10) feature_nido.setGeometry(buffer) pr.addFeatures([feature_nido]) lry_buffers.reload() tabla_impactos = tabla_dialog() bb = buffer.boundingBox() lineal = lry_buffer_lineal.getFeatures(bb) for linea in lineal: idvar = linea.attribute("Proyecto") idtipo = linea.attribute("Tipo") idbuffer = linea.geometry().distance(geom) if idbuffer < valor_buffer: fila = tabla_impactos.tbl_impactos.rowCount() tabla_impactos.tbl_impactos.insertRow(fila) tabla_impactos.tbl_impactos.setItem(fila,0,QTableWidgetItem(str(idvar))) tabla_impactos.tbl_impactos.setItem(fila,1,QTableWidgetItem(idtipo)) tabla_impactos.tbl_impactos.setItem(fila,2,QTableWidgetItem(f"{idbuffer:.2f}")) tabla_impactos.tbl_impactos.sortItems(2) tabla_impactos.show() tabla_impactos.exec_() else: QMessageBox.information(self.dlg,"Mensaje","Esto debe correr una vez si presiona cancel o exit")
Python
def pprint(*s, output=True): """Hack to make for more informative print statements.""" f = inspect.stack()[1][1].split('/')[-1] m = '{:13.13} |'.format(f) if output: print(m, *s) else: lines = [] for e in s: line = [str(m) + ' ' + str(f) for f in e.split('\n')] lines.append('\n'.join(line)) return '\n'.join(lines)
def pprint(*s, output=True): """Hack to make for more informative print statements.""" f = inspect.stack()[1][1].split('/')[-1] m = '{:13.13} |'.format(f) if output: print(m, *s) else: lines = [] for e in s: line = [str(m) + ' ' + str(f) for f in e.split('\n')] lines.append('\n'.join(line)) return '\n'.join(lines)
Python
def radec_to_lb(ra, dec, frac=False): """ Convert from ra, dec to galactic coordinates. Formulas from 'An Introduction to Modern Astrophysics (2nd Edition)' by Bradley W. Carroll, Dale A. Ostlie (Eq. 24.16 onwards). NOTE: This function is not as accurate as the astropy conversion, nor as the Javascript calculators found online. However, as using astropy was prohibitively slow while running over large populations, we use this function. While this function is not as accurate, the under/over estimations of the coordinates are equally distributed meaning the errors cancel each other in the limit of large populations. Args: ra (string): Right ascension given in the form '19:06:53' dec (string): Declination given in the form '-40:37:14' frac (bool): Denote whether coordinates are already fractional or not Returns: gl, gb (float): Galactic longitude and latitude [fractional degrees] """ if not frac: ra, dec = frac_deg(ra, dec) a = np.radians(ra) d = np.radians(dec) # Coordinates of the galactic north pole (J2000) a_ngp = np.radians(12.9406333 * 15.) d_ngp = np.radians(27.1282500) l_ngp = np.radians(123.9320000) sd_ngp = np.sin(d_ngp) cd_ngp = np.cos(d_ngp) sd = np.sin(d) cd = np.cos(d) # Calculate galactic longitude y = cd*np.sin(a - a_ngp) x = cd_ngp*sd - sd_ngp*cd*np.cos(a - a_ngp) gl = - np.arctan2(y, x) + l_ngp gl = np.degrees(gl) % 360 # Shift so in range -180 to 180 if isinstance(gl, np.ndarray): gl[gl > 180] = -(360 - gl[gl > 180]) else: if gl > 180: gl = -(360 - gl) # Calculate galactic latitude gb = np.arcsin(sd_ngp*sd + cd_ngp*cd*np.cos(a - a_ngp)) gb = np.degrees(gb) % 360 if isinstance(gb, np.ndarray): gb[gb > 270] = -(360 - gb[gb > 270]) else: if gb > 270: gb = -(360 - gb) return gl, gb
def radec_to_lb(ra, dec, frac=False): """ Convert from ra, dec to galactic coordinates. Formulas from 'An Introduction to Modern Astrophysics (2nd Edition)' by Bradley W. Carroll, Dale A. Ostlie (Eq. 24.16 onwards). NOTE: This function is not as accurate as the astropy conversion, nor as the Javascript calculators found online. However, as using astropy was prohibitively slow while running over large populations, we use this function. While this function is not as accurate, the under/over estimations of the coordinates are equally distributed meaning the errors cancel each other in the limit of large populations. Args: ra (string): Right ascension given in the form '19:06:53' dec (string): Declination given in the form '-40:37:14' frac (bool): Denote whether coordinates are already fractional or not Returns: gl, gb (float): Galactic longitude and latitude [fractional degrees] """ if not frac: ra, dec = frac_deg(ra, dec) a = np.radians(ra) d = np.radians(dec) # Coordinates of the galactic north pole (J2000) a_ngp = np.radians(12.9406333 * 15.) d_ngp = np.radians(27.1282500) l_ngp = np.radians(123.9320000) sd_ngp = np.sin(d_ngp) cd_ngp = np.cos(d_ngp) sd = np.sin(d) cd = np.cos(d) # Calculate galactic longitude y = cd*np.sin(a - a_ngp) x = cd_ngp*sd - sd_ngp*cd*np.cos(a - a_ngp) gl = - np.arctan2(y, x) + l_ngp gl = np.degrees(gl) % 360 # Shift so in range -180 to 180 if isinstance(gl, np.ndarray): gl[gl > 180] = -(360 - gl[gl > 180]) else: if gl > 180: gl = -(360 - gl) # Calculate galactic latitude gb = np.arcsin(sd_ngp*sd + cd_ngp*cd*np.cos(a - a_ngp)) gb = np.degrees(gb) % 360 if isinstance(gb, np.ndarray): gb[gb > 270] = -(360 - gb[gb > 270]) else: if gb > 270: gb = -(360 - gb) return gl, gb
Python
def url_to_df(self, url): """Convert a url of a JSON table to a Pandas DataFrame. Args: url (str): URL to the webpage Returns: DataFrame: DataFrame of JSON table """ try: s = requests.get(url).content f = io.StringIO(s.decode('utf-8')) series = [] for entry in pd.read_json(f)['products']: series.append(pd.Series(entry)) df = pd.concat(series, axis=1).T return df except ValueError: pass
def url_to_df(self, url): """Convert a url of a JSON table to a Pandas DataFrame. Args: url (str): URL to the webpage Returns: DataFrame: DataFrame of JSON table """ try: s = requests.get(url).content f = io.StringIO(s.decode('utf-8')) series = [] for entry in pd.read_json(f)['products']: series.append(pd.Series(entry)) df = pd.concat(series, axis=1).T return df except ValueError: pass
Python
def urls_to_df(self, endings, url): """ Use Series to loop over multiple webpages. Proceed to concatenate them to a single DataFrame Args: endings (iterables): The list/series/column over which to loop url (str): The base url Returns: DataFrame """ dfs = [] for ending in endings: full_url = str(url) + str(ending) df = self.url_to_df(full_url) if isinstance(df, pd.DataFrame): dfs.append(df) if dfs: return pd.concat(dfs, ignore_index=True) else: return None
def urls_to_df(self, endings, url): """ Use Series to loop over multiple webpages. Proceed to concatenate them to a single DataFrame Args: endings (iterables): The list/series/column over which to loop url (str): The base url Returns: DataFrame """ dfs = [] for ending in endings: full_url = str(url) + str(ending) df = self.url_to_df(full_url) if isinstance(df, pd.DataFrame): dfs.append(df) if dfs: return pd.concat(dfs, ignore_index=True) else: return None
Python
def filter(self, one_offs=True, repeaters=True, repeat_bursts=False, one_entry_per_frb=True): """Filter frbcat in various ways.""" if one_entry_per_frb is True: # Only keep rows with the largest number of parameters # so that only one row per detected FRB remains self.df['count'] = self.df.count(axis=1) self.df = self.df.sort_values('count', ascending=False) self.df = self.df.drop_duplicates(subset=['utc']) if one_offs is False: # Only keep repeaters self.df = self.df[self.df.duplicated(['frb_name'])] if repeaters is False: # Drops any repeater sources self.df = self.df.drop_duplicates(subset=['frb_name'], keep=False) if repeat_bursts is False: # Only keeps one detection of repeaters self.df = self.df.sort_values('utc') self.df = self.df.drop_duplicates(subset=['frb_name'], keep='first') self.df = self.df.sort_index()
def filter(self, one_offs=True, repeaters=True, repeat_bursts=False, one_entry_per_frb=True): """Filter frbcat in various ways.""" if one_entry_per_frb is True: # Only keep rows with the largest number of parameters # so that only one row per detected FRB remains self.df['count'] = self.df.count(axis=1) self.df = self.df.sort_values('count', ascending=False) self.df = self.df.drop_duplicates(subset=['utc']) if one_offs is False: # Only keep repeaters self.df = self.df[self.df.duplicated(['frb_name'])] if repeaters is False: # Drops any repeater sources self.df = self.df.drop_duplicates(subset=['frb_name'], keep=False) if repeat_bursts is False: # Only keeps one detection of repeaters self.df = self.df.sort_values('utc') self.df = self.df.drop_duplicates(subset=['frb_name'], keep='first') self.df = self.df.sort_index()
Python
def forward(self, x, lengths): """ This is the heart of the model. This function, defines how the data passes through the network. Returns: the logits for each class """ #encode # 1 - embed the words, using the embedding layer embeddings = self.embedding(x) # 2 - construct a sentence representation out of the word embeddings representations = self.mean_pooling(embeddings, lengths) # 3 - transform the representations to new ones. representations = self.non_linearity1(representations) # 4 - project the representations to classes using a linear layer logits = self.classifier(representations) # EX6 return logits
def forward(self, x, lengths): """ This is the heart of the model. This function, defines how the data passes through the network. Returns: the logits for each class """ #encode # 1 - embed the words, using the embedding layer embeddings = self.embedding(x) # 2 - construct a sentence representation out of the word embeddings representations = self.mean_pooling(embeddings, lengths) # 3 - transform the representations to new ones. representations = self.non_linearity1(representations) # 4 - project the representations to classes using a linear layer logits = self.classifier(representations) # EX6 return logits
Python
def forward(self, x, lengths): """ This is the heart of the model. This function, defines how the data passes through the network. Returns: the logits for each class """ #1 - embed the words, using the embedding layer #Input: batch_size x seq_len x 1 #Output: batch_size x seq_len x embedding_dim embeddings = self.embedding(x) ##################### # 1o ερωτημα # ##################### ############1.1############### ΚΑΙ ΠΡΕΠΕΙ + self.classifier = nn.Linear(in_features = 40, out_features=output_size) #representations_mean = self.mean_pooling(embeddings, lengths) #representations_max = self.max_pooling(embeddings) #representations = torch.cat((representations_mean,representations_max),1) #representations = self.non_linearity1(representations) ############1.2############### #representations_mean = self.mean_pooling(embeddings, lengths) #representations = representations_mean ########################### # 2o ερωτημα - LSTM # ########################### ############2.1############## #Input: batch_size x seq_len x embedding_dim #representations_LSTM: batch_size x seq_len x embedding_dim #hn: batch_size x embedding_dim #representations_LSTM, (hn, cn) = self.rnn(embeddings) #Input: batch_size x seq_len x embedding_dim #Output: batch_size x embedding_dim #representations_LSTM = self.last_timestep(representations_LSTM,lengths) #representations = representations_LSTM ############2.2############### + self.classifier = nn.Linear(in_features = 3*emb_dim, out_features=output_size) #representations_LSTM, (hn, cn) = self.rnn(embeddings) #representations_mean = self.mean_pooling(representations_LSTM, lengths) #representations_max = self.max_pooling(representations_LSTM) #representations_LSTM = self.last_timestep(representations_LSTM,lengths) #representations = torch.cat((representations_mean,representations_max,representations_LSTM),1) ############################## # 3o ερωτημα - Attention # ############################## ############3.1############### #representations,attentions = self.attention(embeddings, lengths) ############3.2############### #representations_LSTM, (hn, cn) = self.rnn(embeddings) #representations, attentions = self.attention(representations_LSTM, lengths) ############################## # 4o ερωτημα - Bidiractional # ############################## ############4.1############### + #representations_LSTM, (hn, cn) = self.bi_rnn(embeddings) #representations_mean = self.mean_pooling(representations_LSTM, lengths) #representations_max = self.max_pooling(representations_LSTM) #hidden_size = 20 #representations_LSTM_fw = representations_LSTM[:, :, :hidden_size] #representations_LSTM_bw = representations_LSTM[:, :, hidden_size:] #representations_LSTM_fw = self.last_timestep(representations_LSTM_fw,lengths) #representations_LSTM_bw = self.last_timestep(representations_LSTM_bw,lengths) #representations = torch.cat((representations_mean,representations_max,representations_LSTM_fw,representations_LSTM_bw),1) ############4.2############### + self.classifier = nn.Linear(in_features = 2*emb_dim, out_features=output_size) hidden_size = 20 representations_LSTM, (hn, cn) = self.bi_rnn(embeddings) representations_LSTM_fw = representations_LSTM[:, :, :hidden_size] representations_LSTM_bw = representations_LSTM[:, :, hidden_size:] representations_LSTM_fw,attentions_fw = self.attention(representations_LSTM_fw, lengths) representations_LSTM_bw,attentions_bw = self.attention(representations_LSTM_bw, lengths) representations = torch.cat((representations_LSTM_fw,representations_LSTM_bw),1) # 4 - project the representations to classes using a linear layer logits = self.classifier(representations) # EX6 #Input: batch_size x embedding_dim #Output: batch_size x 3 return logits #return logits, attentions
def forward(self, x, lengths): """ This is the heart of the model. This function, defines how the data passes through the network. Returns: the logits for each class """ #1 - embed the words, using the embedding layer #Input: batch_size x seq_len x 1 #Output: batch_size x seq_len x embedding_dim embeddings = self.embedding(x) ##################### # 1o ερωτημα # ##################### ############1.1############### ΚΑΙ ΠΡΕΠΕΙ + self.classifier = nn.Linear(in_features = 40, out_features=output_size) #representations_mean = self.mean_pooling(embeddings, lengths) #representations_max = self.max_pooling(embeddings) #representations = torch.cat((representations_mean,representations_max),1) #representations = self.non_linearity1(representations) ############1.2############### #representations_mean = self.mean_pooling(embeddings, lengths) #representations = representations_mean ########################### # 2o ερωτημα - LSTM # ########################### ############2.1############## #Input: batch_size x seq_len x embedding_dim #representations_LSTM: batch_size x seq_len x embedding_dim #hn: batch_size x embedding_dim #representations_LSTM, (hn, cn) = self.rnn(embeddings) #Input: batch_size x seq_len x embedding_dim #Output: batch_size x embedding_dim #representations_LSTM = self.last_timestep(representations_LSTM,lengths) #representations = representations_LSTM ############2.2############### + self.classifier = nn.Linear(in_features = 3*emb_dim, out_features=output_size) #representations_LSTM, (hn, cn) = self.rnn(embeddings) #representations_mean = self.mean_pooling(representations_LSTM, lengths) #representations_max = self.max_pooling(representations_LSTM) #representations_LSTM = self.last_timestep(representations_LSTM,lengths) #representations = torch.cat((representations_mean,representations_max,representations_LSTM),1) ############################## # 3o ερωτημα - Attention # ############################## ############3.1############### #representations,attentions = self.attention(embeddings, lengths) ############3.2############### #representations_LSTM, (hn, cn) = self.rnn(embeddings) #representations, attentions = self.attention(representations_LSTM, lengths) ############################## # 4o ερωτημα - Bidiractional # ############################## ############4.1############### + #representations_LSTM, (hn, cn) = self.bi_rnn(embeddings) #representations_mean = self.mean_pooling(representations_LSTM, lengths) #representations_max = self.max_pooling(representations_LSTM) #hidden_size = 20 #representations_LSTM_fw = representations_LSTM[:, :, :hidden_size] #representations_LSTM_bw = representations_LSTM[:, :, hidden_size:] #representations_LSTM_fw = self.last_timestep(representations_LSTM_fw,lengths) #representations_LSTM_bw = self.last_timestep(representations_LSTM_bw,lengths) #representations = torch.cat((representations_mean,representations_max,representations_LSTM_fw,representations_LSTM_bw),1) ############4.2############### + self.classifier = nn.Linear(in_features = 2*emb_dim, out_features=output_size) hidden_size = 20 representations_LSTM, (hn, cn) = self.bi_rnn(embeddings) representations_LSTM_fw = representations_LSTM[:, :, :hidden_size] representations_LSTM_bw = representations_LSTM[:, :, hidden_size:] representations_LSTM_fw,attentions_fw = self.attention(representations_LSTM_fw, lengths) representations_LSTM_bw,attentions_bw = self.attention(representations_LSTM_bw, lengths) representations = torch.cat((representations_LSTM_fw,representations_LSTM_bw),1) # 4 - project the representations to classes using a linear layer logits = self.classifier(representations) # EX6 #Input: batch_size x embedding_dim #Output: batch_size x 3 return logits #return logits, attentions
Python
def validate_capacities(self, chancaps): """ This function allows more granular selection of candidates than total or average capacity. """ for tierfilter in self.minchanstiers: if 'k' in tierfilter: ksize, mincount = tierfilter.split('k') size = int(ksize) * 1e3 elif 'M' in tierfilter: Msize, mincount = tierfilter.split('M') size = int(Msize) * 1e6 else: raise RuntimeError('No recognized seperator in minchannel filter') if sum((c >= size for c in chancaps)) < int(mincount): return False return True
def validate_capacities(self, chancaps): """ This function allows more granular selection of candidates than total or average capacity. """ for tierfilter in self.minchanstiers: if 'k' in tierfilter: ksize, mincount = tierfilter.split('k') size = int(ksize) * 1e3 elif 'M' in tierfilter: Msize, mincount = tierfilter.split('M') size = int(Msize) * 1e6 else: raise RuntimeError('No recognized seperator in minchannel filter') if sum((c >= size for c in chancaps)) < int(mincount): return False return True
Python
def autoload(cls, expirehours=8): """Intelligently load from a json file or node""" # Check for json, check age graphfilename = 'describegraph.json' if os.path.isfile(graphfilename): mtime = os.path.getmtime(graphfilename) # if expired, warn and exit if expirehours: if time.time() - mtime > expirehours * 60 * 60: print(graphfilename, 'was found but is more than 8 hours old') print('Please update it or delete to attempt fetching from lnd') exit() return cls.fromjson() else: # fromconfig will create and exit if the config is missing ni = NodeInterface.fromconfig() # else load from lnd print('Fetching graph data from lnd') return cls.fromlnd(lndnode=ni)
def autoload(cls, expirehours=8): """Intelligently load from a json file or node""" # Check for json, check age graphfilename = 'describegraph.json' if os.path.isfile(graphfilename): mtime = os.path.getmtime(graphfilename) # if expired, warn and exit if expirehours: if time.time() - mtime > expirehours * 60 * 60: print(graphfilename, 'was found but is more than 8 hours old') print('Please update it or delete to attempt fetching from lnd') exit() return cls.fromjson() else: # fromconfig will create and exit if the config is missing ni = NodeInterface.fromconfig() # else load from lnd print('Fetching graph data from lnd') return cls.fromlnd(lndnode=ni)
Python
def channels(self, nodeid): """Return channels for a node, including redundants""" channels = [] for peerkey in self.adj[mynodekey]: mainchan = self.edges[peerkey, mynodekey].copy() redundants = mainchan['redundant_edges'] del mainchan['redundant_edges'] channels.append(mainchan) channels.extend(redundants) return channels
def channels(self, nodeid): """Return channels for a node, including redundants""" channels = [] for peerkey in self.adj[mynodekey]: mainchan = self.edges[peerkey, mynodekey].copy() redundants = mainchan['redundant_edges'] del mainchan['redundant_edges'] channels.append(mainchan) channels.extend(redundants) return channels
Python
def nx2ig(nxgraph): """ Convert networkx graph to igraph For centrality we only care about keys and channels igraph uses integer indexing, so a map will also be created. """ nxnodekeys = nxgraph.nodes nxnodemap = {nk: i for i, nk in enumerate(nxnodekeys)} ig = igraph.Graph() ig.add_vertices(len(nxnodekeys)) igedges = [(nxnodemap[nk1], nxnodemap[nk2]) for nk1, nk2 in nxgraph.edges] ig.add_edges(igedges) return ig, nxnodemap
def nx2ig(nxgraph): """ Convert networkx graph to igraph For centrality we only care about keys and channels igraph uses integer indexing, so a map will also be created. """ nxnodekeys = nxgraph.nodes nxnodemap = {nk: i for i, nk in enumerate(nxnodekeys)} ig = igraph.Graph() ig.add_vertices(len(nxnodekeys)) igedges = [(nxnodemap[nk1], nxnodemap[nk2]) for nk1, nk2 in nxgraph.edges] ig.add_edges(igedges) return ig, nxnodemap
Python
def __generate_config_file(self, target_name): """ Generates the config file in conf.d/ :param target_name: Target for which config file should be created :return: None """ config = open( os.path.join(self.TGT_ISCSI_CONFIG, target_name + ".conf"), 'w') template_loc = os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")) for line in open(os.path.join(template_loc, "tgt_target.temp"), 'r'): line = line.replace('${target_name}', target_name) line = line.replace('${ceph_user}', self.fs_user) line = line.replace('${ceph_config}', self.fs_config_loc) line = line.replace('${pool}', self.fs_pool) config.write(line) config.close()
def __generate_config_file(self, target_name): """ Generates the config file in conf.d/ :param target_name: Target for which config file should be created :return: None """ config = open( os.path.join(self.TGT_ISCSI_CONFIG, target_name + ".conf"), 'w') template_loc = os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")) for line in open(os.path.join(template_loc, "tgt_target.temp"), 'r'): line = line.replace('${target_name}', target_name) line = line.replace('${ceph_user}', self.fs_user) line = line.replace('${ceph_config}', self.fs_config_loc) line = line.replace('${pool}', self.fs_pool) config.write(line) config.close()
Python
def list_targets(self): """ Lists all the targets available by querying tgt-admin :return: None """ try: command = "tgt-admin -s" output = shell.call(command, sudo=True) logger.debug("Output = %s", output) formatted_output = output.split("\n") target_list = [target.split(":")[1].strip() for target in formatted_output if re.match("^Target [0-9]+:", target)] return target_list except shell_exceptions.CommandFailedException as e: raise iscsi_exceptions.ListTargetFailedException(str(e))
def list_targets(self): """ Lists all the targets available by querying tgt-admin :return: None """ try: command = "tgt-admin -s" output = shell.call(command, sudo=True) logger.debug("Output = %s", output) formatted_output = output.split("\n") target_list = [target.split(":")[1].strip() for target in formatted_output if re.match("^Target [0-9]+:", target)] return target_list except shell_exceptions.CommandFailedException as e: raise iscsi_exceptions.ListTargetFailedException(str(e))