metadata
dict
text
stringlengths
60
3.49M
{ "source": "jnewblanc/sog", "score": 2 }
#### File: sog/sog/character.py ```python from datetime import datetime import inflect import os import pprint import random import re from common.attributes import AttributeHelper from common.ioLib import TestIo from common.item import Item from common.inventory import Inventory from common.general import getNeverDate, differentDay, dLog, secsSinceDate from common.general import getRandomItemFromList, truncateWithInt from common.general import logger from common.globals import DATADIR from common.storage import Storage from object import Weapon from magic import SpellList class Character(Item): """ Character class """ _instanceDebug = False attributesThatShouldntBeSaved = [ "client", "_currentlyAttacking", "_instanceDebug", "_lastAttackDate", "_lastInputDate", "_lastLoginDate", "_lastRegenDate", "_roomObj", "_secondsUntilNextAttack", "_spoolOut", "_vulnerable", ] # int attributes intAttributes = [ "_expToNextLevel", "_level", "_maxhp", "_hp", "_maxmana", "_mana", "_limitedSpellsLeft", "_broadcastLimit", "_slash", "_bludgeon", "_pierce", "_magic", "_dodge", "_coins", "_ac", "_weenykills", "_matchedkills", "_valiantkills", "_epickills", "_playerkills", "_bankBalance", "_taxesPaid", "_bankFeesPaid", "_dodgeBonus", ] # boolean attributes boolAttributes = [ "_achievedSkillForLevel", "_poisoned", "_plagued", "_evil", "_invisible", "_nonexistent", "_playtester", "_hidden", ] # string attributes strAttributes = ["_name", "_classname", "_gender", "_alignment"] # list attributes listAttributes = ["_knownSpells", "_doubleUpStatLevels"] # obsolete attributes (to be removed) obsoleteAtt = [ "_money", "_heal", "_maxspellpoints", "_spellpoints", "_hitpoints", "_maxhitpoints", "roomObj", "_statsearnedlastlevel", ] attributeInfo = {} wizardAttributes = ["_name", "_classname", "_gender", "_alignment"] genderDict = { 0: { "name": "male", "pronoun": "him", "possisivepronoun": "his", "bonusStats": ["strength", "constitution"], }, 1: { "name": "female", "pronoun": "her", "possisivepronoun": "her", "bonusStats": ["charisma", "intelligence"], }, 2: { "name": "fluid", "pronoun": "them", "possisivepronoun": "their", "bonusStats": ["piety", "dexterity"], }, } classDict = { 0: { "name": "fighter", "desc": "Master of combat, skilled in weapondry", "pros": "Attack/Defense Bonuses", "cons": "Poor Magic Use", "doubleUpStatLevels": [2, 9], "bonusStats": ["strength", "constitution"], "penaltyStats": ["intelligence", "piety"], "baseDamage": 2, "baseHealth": 23, "baseMagic": 2, "identifyLevel": 10, }, 1: { "name": "rogue", "desc": "A scoundrel fluent in stealth and trickery", "pros": "Hiding/Defense Bonuses", "cons": "Poor Attack", "doubleUpStatLevels": [3, 8], "bonusStats": ["dexterity", "charisma"], "penaltyStats": ["strength", "piety"], "baseDamage": 1, "baseHealth": 18, "baseMagic": 8, "identifyLevel": 8, }, 2: { "name": "mage", "desc": "A vulnerable and powerful scholarly spellcaster", "pros": "Spell abilities and Bonuses", "cons": "Can not use metal armor", "doubleUpStatLevels": [2, 8], "bonusStats": ["intelligence", "intelligence"], "penaltyStats": ["strength", "strength"], "baseDamage": 0, "baseHealth": 14, "baseMagic": 13, "identifyLevel": 5, }, 3: { "name": "cleric", "desc": "Healer and servant of higher powers", "pros": "Healing Abilities and Bonuses + Undead Turning", "cons": "Can not use bladed weapons", "doubleUpStatLevels": [4, 6], "bonusStats": ["piety", "piety"], "penaltyStats": ["strength", "dexterity"], "baseDamage": 0, "baseHealth": 16, "baseMagic": 11, "identifyLevel": 8, }, 4: { "name": "ranger", "desc": "A rough and wild hunter ", "pros": "Minor Defense Bonuses & Spell Abilities", "cons": "Poor Charisma", "doubleUpStatLevels": [4, 7], "bonusStats": ["dexterity", "intelligence"], "penaltyStats": ["charisma", "charisma"], "baseDamage": 1, "baseHealth": 17, "baseMagic": 9, "identifyLevel": 7, }, 5: { "name": "paladin", "desc": "A righteous fighter who hunts the forces of evil", "pros": "Minor Attack Bonuses, Healing, and Undead Turning", "cons": "Must be lawful, can not steal", "doubleUpStatLevels": [3, 6], "bonusStats": ["charisma", "piety"], "penaltyStats": ["intelligence", "constitution"], "baseDamage": 2, "baseHealth": 20, "baseMagic": 5, "identifyLevel": 9, }, } # end classDict classList = ["fighter", "rogue", "mage", "cleric", "ranger", "paladin"] genderList = ["male", "female", "fluid"] alignmentList = ["lawful", "neutral", "chaotic"] statList = [ "strength", "dexterity", "intelligence", "piety", "charisma", "constitution", "luck", ] skillDict = { "_slash": "swords and axes come easily to you", "_bludgeon": "hammers and maces are an extension of your arms", "_pierce": "you gravitate toward daggers and spears", "_magic": "an inner confidence that enhances spells", "_dodge": "being quick on your feet helps avoid blows", } def __init__(self, client=None, acctName=""): self.client = client self._acctName = acctName super().__init__() Storage.__init__(self) Inventory.__init__(self) self.setName("") if self.client: self._spoolOut = self.client.spoolOut else: testIo = TestIo() self._spoolOut = testIo.spoolOut self._dm = False self.setPromptSize("full") # set base status for onestat in self.statList: setattr(self, onestat, 8) self._expToNextLevel = 2 ** 9 self._level = 1 self._maxhp = 10 self._maxmana = 10 self._hp = 10 self._mana = 10 self._maxitems = 12 self._classname = "fighter" # skills are percentages, one of which can go up each level self.initializeStats() self._achievedSkillForLevel = False # Daily limits # Some spells, collectively, can only be cast X times per calendar day # need a way to reset this self._limitedSpellsLeft = 5 self._broadcastLimit = 5 # guilds offer benefits - must pay daily dues # Need way to collect dues or to boot player out of guild self._guild = "" self._guildJoinDate = getNeverDate() self._dailyGuildDues = "" self._totalGuildPayments = "" self._lastGuildPayment = getNeverDate() self._taxesPaid = 0 self._bankFeesPaid = 0 # if piety gets too low, neutral creatures will attack on sight and # shop keepers will not sell to you. self._poisoned = False # slowly lose hp self._plagued = False # hp will not regen & skill bonuses are ignored # hidden stats self._evil = False self._invisible = False self._nonexistent = False self._playtester = False self._bankBalance = 0 self._coins = 20 self._knownSpells = [] self._doubleUpStatLevels = [] self._creationDate = datetime.now() self._lastLogoutDate = getNeverDate() self._lastPoisonDate = getNeverDate() self._lastLoginDate = datetime.now() self._playtester = False self._ac = 0 # Each point is 5% damage reduction self._dodgeBonus = 0 # Percent - Extra chance of not being hit self._weenykills = 0 # kills below current level self._matchedkills = 0 # kills at current level self._valiantkills = 0 # kills above current level self._epickills = 0 # special kills self._playerkills = 0 # player kills self._turnkills = 0 # kills from turning self.resetTmpStats() self.resetDailyStats() self._instanceDebug = Character._instanceDebug return None def __str__(self): return "Character " + self.getName() + " of account " + str(self._acctName) def debug(self): return pprint.pformat(vars(self)) def toggleInstanceDebug(self): self._instanceDebug = not self._instanceDebug def setInstanceDebug(self, val): self._instanceDebug = bool(val) def getInstanceDebug(self): return self._instanceDebug def initializeStats(self, num=0): for onestat in self.skillDict.keys(): setattr(self, onestat, 0) def login(self): """ Login to game with a particular character * Return True if character was created or loaded """ buf = "" if self.selectCharacter(): pStr = __class__.__name__ + ".login: " charName = str(self.getName()) dLog("Attemping login for " + charName, self._instanceDebug) # Import existing character if self.load(logStr=__class__.__name__): dLog( pStr + "Character " + charName + " loaded for " + self._acctName, self._instanceDebug, ) else: dLog( pStr + "Character " + charName + " could not be loaded for " + self._acctName + " - New character?", self._instanceDebug, ) if self.create(charName): dLog( pStr + "Character " + charName + " created for " + self._acctName, self._instanceDebug, ) self.client.acctObj.addCharacterToAccount(charName) self.client.acctObj.save() else: buf = ( "Character " + charName + " could not be created for " + self._acctName ) dLog(pStr + buf, self._instanceDebug) self._spoolOut(buf + "\n") return False if not self.isValid(): buf = "Character " + charName + " is not valid" dLog(pStr + buf, self._instanceDebug) self._spoolOut(buf + "\n") return False else: return False self.setLoginDate() self.client.charObj = self self._spoolOut(buf) return True def create(self, charName, promptFlag=True): """ create a new character * Call promptForNewCharacter to prompt user for customization * return True if character is _creature * return False and scrub character if character was not created """ self.__init__(self.client, self.client.acctObj.getEmail()) self.setName(charName) self.setPromptSize("full") self.setLoginDate() # prompt to customize if self.promptForNewCharacter(promptFlag): if not self.isValid(): return False else: self.__init__(self.client, self.client.acctObj.getEmail()) return False self.autoCustomize() if self.isValid(): self.save() else: self.__init__(self.client, self.client.acctObj.getEmail()) return False return True def autoCustomize(self): """ Automatically customize some stats/attributes, based on others """ self.customizeStats() self.setDoubleUpStatLevels() self.randomlyIncrementStats(12) # set starting points for changing stats that depend on other stats self.setHitPoints(self.getMaxHP()) self.setMana(self.getMaxMana()) self.resetTmpStats() self.equipFist() def fixAttributes(self): """ Sometimes we change attributes, and need to fix them in rooms that are saved. This method lets us do that. First we call the generic superClass fixAttributes to fix the types and remove obsolete vars. Here, we can also add class specific logic for copying values from one attribute to another, etc """ try: self._maxmana = self._maxspellpoints except (AttributeError, TypeError): pass try: self._mana = self._spellpoints except (AttributeError, TypeError): pass try: self._hp = self._hitpoints except (AttributeError, TypeError): pass try: self._maxhp = self._maxhitpoints except (AttributeError, TypeError): pass AttributeHelper.fixAttributes(self) def postLoad(self): self.truncateInventory(12) self.resetTmpStats() self.equipFist() def resetTmpStats(self): """ Resets some stats that are not meant to be static/peristant This is typically used when re-entering the game """ self._equippedWeapon = None self._equippedArmor = None self._equippedShield = None self._equippedRing = None self._equippedNecklace = None self._follow = None self._lastCommand = "" self.setAc() self.setMaxWeightForCharacter() self._hidden = False self._blessed = False self._drunk = False self._roomObj = None self._attackTargets = [] self._lastInputDate = datetime.now() self._lastAttackDate = getNeverDate() self._lastAttackCmd = "attack" self._lastRegenDate = getNeverDate() self._lastLoginDate = datetime.now() self._currentlyAttacking = None self._secondsUntilNextAttack = 0 self._vulnerable = False # Check if it's a different day if differentDay(datetime.now(), self._lastLogoutDate): self.resetDailyStats() def resetDailyStats(self): """ Reset daily stats - typically called during login, if it's a different calendar day from last login and/or at midnight reset """ self._limitedSpellsLeft = 5 self._broadcastLimit = 5 def isValid(self): """ Returns true if the class instance was created properly """ # ToDo: determine if a better check is required for att in ["_name", "_classname", "_gender", "_alignment"]: if getattr(self, att) == "": logger.error("Character.isValid - Attribute " + att + "is not defined!") return False if not hasattr(self, "client"): logger.error( "Character.isValid - Character is missing " + "attribute: client" ) return False return True def getDesc(self, showAlignment=True): """ Returns a string that describes the in-game appearance """ buf = ( self.getName() + " is a " + self.condition() + ", level " + str(self.getLevel()) + " " + self.getGender() + " " + self.getClassName() ) if showAlignment: if self._alignment == "lawful": buf += " who tends to follow the rules" elif self._alignment == "chaotic": buf += " who lives life on the edge" buf += ".\n" return buf def examine(self): """ This is what other's see if they look at you """ buf = self.getDesc(showAlignment=False) return buf def getInfo(self, dm=0): """Display character""" # ROW_FORMAT = '{0:14}: {1:<30}\n' buf = self.getDesc() buf += self.healthInfo() buf += self.equippedInfo() buf += self.financialInfo() buf += self.expInfo() buf += self.statsInfo() buf += self.skillsInfo() buf += self.guildInfo() buf += self.inventoryInfo() return buf def inventoryInfo(self): buf = self.describeInventory(markerAfter=12) return buf def financialInfo(self): buf = "You have " + str(self.getCoins()) + " shillings in " + "your purse.\n" return buf def statsInfo(self): """ Display character stats""" buf = "Stats:\n" ROW_FORMAT = " {0:14}: {1:<30}\n" for onestat in self.statList: buf += ROW_FORMAT.format(onestat, str(getattr(self, onestat))) return buf def skillsInfo(self): """ Display character skills""" buf = "Skills:" ROW_FORMAT = " {0:14}: {1:<30}\n" if self.hasAchievedSkillForLevel(): buf += " Proficiency earned for level " + str(self.getLevel()) + "\n" else: if self.getPromptSize() == "full": buf += ( " To gain a proficiency at this level, you " + "must engage with higher level creatures\n" ) else: buf += ( " Proficiency not earned for level " + str(self.getLevel()) + "\n" ) for onestat in self.skillDict.keys(): buf += ROW_FORMAT.format( onestat.rstrip("_"), str(getattr(self, onestat)) + "%" ) return buf def healthInfo(self): hitTxt = str(self.getHitPoints()) + "/" + str(self.getMaxHP()) magTxt = str(self.getMana()) + "/" + str(self._maxmana) buf = "You have " + hitTxt + " health pts and " + magTxt + " magic pts." if self.isDm(): buf += " Your armor class is " + str(self._ac) buf += "\n" if self.isPoisoned(): buf += "You are slowly dying from poison.\n" if self.isPlagued(): buf += "You are infected with the plague.\n" return buf def expInfo(self): ROW_FORMAT = " {0:14}: {1:<30}\n" buf = "Experience:\n" buf += ROW_FORMAT.format("Level", str(self.getLevel())) if self.getPromptSize() == "full": buf += ( " " + str(max(0, self._expToNextLevel)) + " experience needed to get to level " + str(int(self.getLevel()) + 1) + "\n" ) else: buf += " - " + str(max(0, self._expToNextLevel)) + " to go." return buf def guildInfo(self): buf = "" if self._guild != "": buf = ( "Guilds:\n" + " You are a member of the " + self._guild + "guild.\n" + " You joined on " + self._guildJoinDate.strftime("%Y/%m/%d") + " You have paid " + self._totalGuildPayments + " to your guild and your daily dues are " + self._dailyGuildDues + "\n" ) return buf def equippedInfo(self, prefix="You are"): buf = "" buf += ( prefix + " carrying " + str(self.getInventoryWeight()) + "/" + str(self.getInventoryMaxWeight()) + " lbs of items.\n" ) equippedList = [] if self.getEquippedArmor(): equippedList.append("wearing " + self.getEquippedArmor().describe()) if self.getEquippedWeapon(): if not self.isAttackingWithFist(): equippedList.append("weilding " + self.getEquippedWeapon().describe()) if self.getEquippedShield(): equippedList.append("holding " + self.getEquippedShield().describe()) if self.getEquippedRing(): equippedList.append( "sporting " + self.getEquippedRing().describe() + " on your finger" ) if self.getEquippedNecklace(): equippedList.append( "presenting " + self.getEquippedNecklace().describe() + " around your neck" ) if len(equippedList) > 0: inf = inflect.engine() # instanciate a inflect engine buf += prefix + " " + inf.join(equippedList) + ".\n" return buf def selectCharacter(self): """ prompt user to select a character to load store resulting character name into self._name return True/False""" logPrefix = __class__.__name__ + " selectCharacter: " characterList = self.client.acctObj.getCharacterList() numOfCharacters = len(characterList) openCharacterSlots = ( self.client.acctObj.getMaxNumOfCharacters() - numOfCharacters ) self.setName("") while True: prompt = "Select character to play : \n" if openCharacterSlots > 0: prompt += " (0) Create new character\n" if numOfCharacters > 0: prompt += self.client.acctObj.showCharacterList(indent=" ") prompt += "Enter number or press [enter] to exit: " inNum = self.client.promptForNumberInput(prompt, numOfCharacters) if inNum == -1: # undisclosed way to exit with False Value return False elif inNum == 0: # Prompt for a new character name minNameLength = 3 maxNameLength = 40 prompt = ( "To create a character, you will need to provide " + "your character's name, class, gender, and " + "alignment\n" ) prompt += "Please enter your character's name: " errmsg = ( " You may only use alphanumerics, spaces, " + "underbars, and hyphens.\n The first letter must" + " be alphabetic and the name must be between " + str(minNameLength) + " and " + str(maxNameLength) + " characters long." ) charName = self.client.promptForInput( prompt, r"^[A-Za-z][A-Za-z0-9_\- ]{2,}$", errmsg ) # noqa: E501 if charName == "": dLog(logPrefix + "name is blank", self._instanceDebug) return False elif charName in self.client.acctObj.getCharactersOnDisk(): msg = ( "Invalid Character Name. You already have a " + "character named " + charName + ".\n" ) self._spoolOut(msg) dLog(logPrefix + msg, self._instanceDebug) continue elif not self.client.acctObj.characterNameIsUnique(charName): msg = "Name is already in use. Please try again\n" self._spoolOut(msg) dLog(logPrefix + msg, self._instanceDebug) continue self.setName(charName) break else: # use existing character name, as defined in characterList self.setName(characterList[inNum - 1]) break if re.match(r"^.+@.+\..+/.+$", self.getId()): return True logger.error(logPrefix + "Could not generate ID - " + self.getId()) return False def getArticle(gender): if gender == "male": article = "he" possessive = "his" predicate = "he is" if gender == "female": article = "she" possessive = "her" predicate = "she is" if gender == "fluid": article = "they" possessive = "their" predicate = "they are" if gender == "self": article = "you" possessive = "my" predicate = "you are" return (article, possessive, predicate) def knowsSpell(self, spell): if spell in self._knownSpells: return True return False def learnSpell(self, spell): if spell not in SpellList: return False if not self.knowsSpell(spell): self._knownSpells.append(spell) return True return False def getCoins(self): return self._coins def setCoins(self, num): self._coins = int(num) def addCoins(self, num): self._coins += int(num) self.save() def subtractCoins(self, num): self._coins -= int(num) self.save() def canAffordAmount(self, num): if self._coins >= int(num): return True return False def getBankBalance(self): return self._bankBalance def setBankBalance(self, num): self._bankBalance = int(num) def bankAccountAdd(self, num): self._bankBalance += int(num) def bankAccountSubtract(self, num): self._bankBalance -= int(num) def bankFeeAdd(self, num): self._bankFeesPaid += int(num) def getBankFeesPaid(self): return self._bankFeesPaid def calculateBankFees(self, num, rate): """ returns the bank fee and the amount remaining """ fee = int((rate / 100) * int(num)) remaining = int(num) - fee return (int(fee), int(remaining)) def bankDeposit(self, num, feeRate=5): """ deposit funds from character's purse to character's bank account * subtract entire amount from characters's coin purse * subtract bank deposit fees (default 5%) * add resulting amout to character's bank account """ if self.canAffordAmount(int(num)): self.subtractCoins(num) # character pays the actual value bankfee, remainingCoin = self.calculateBankFees(num, feeRate) self.bankAccountAdd(remainingCoin) self.bankFeeAdd(bankfee) self.save() logger.info( "bank - " + self.getName() + " deposited " + str(remainingCoin) + " and paid " + str(bankfee) + " in fees" ) return True return False def bankWithdraw(self, num, feeRate=0): """ withdraw funds from character's bank account to character's purse * remove entire amount from bank * subtract any bank withdraw fees (default is 0%) * add resulting amount to character's purse """ if self.canWithdraw(int(num)): self.bankAccountSubtract(num) bankfee, remainingCoin = self.calculateBankFees(num, feeRate) self.addCoins(remainingCoin) self.bankFeeAdd(bankfee) self.save() logger.info( "bank - " + self.getName() + " withdrew " + str(remainingCoin) + " and paid " + str(bankfee) + " in fees" ) return True return False def canWithdraw(self, num): if self._bankBalance >= int(num): return True return False def recordTax(self, num): """ Some transactions have a room penalty. For these, we record them as taxes paid. Maybe, in the future, we'll have ways for characters to recoup their paid taxes (lottery?) """ self._taxesPaid += max(0, int(num)) self.save() return True def getTax(self): return self._taxesPaid def setTax(self, num): self._taxesPaid = int(num) def dmTxt(self, msg): """ return the given msg only if the character is a DM """ if self.isDm(): return msg return "" def getFollowingInfo(self, whosAsking="me"): buf = "" if whosAsking == "me": (article, possessive, predicate) = self.getArticle("self") else: (article, possessive, predicate) = self.getArticle(self.getGender()) if self._follow is not None: buf = predicate + " following." + self.following return buf def getDrunkInfo(self, whosAsking="me"): buf = "" if whosAsking == "me": (article, possessive, predicate) = self.getArticle("self") else: (article, possessive, predicate) = self.getArticle(self.getGender()) if self._drunkSecs != "": buf = ( predicate + " drunk, and will sober up in " + self._drunkSecs + " seconds\n" ) return buf def getHiddenInfo(self): buf = "" if self.isHidden() != "": buf = "You are hidden.\n" return buf def dmInfo(self): buf = "" if self.isDm(): dblstatList = ", ".join(str(x) for x in self._doubleUpStatLevels) buf += "DM visible info:\n" ROW_FORMAT = " {0:16}: {1:<30}\n" buf += ( ROW_FORMAT.format("Prompt", self.getPromptSize()) + ROW_FORMAT.format("Hidden", str(self.isHidden())) + ROW_FORMAT.format("2xStatLvls", dblstatList) + ROW_FORMAT.format("DodgeBonus", str(self.getDodgeBonus())) + ROW_FORMAT.format( "WeaponDamage", str(self.getEquippedWeaponDamage()) + " (" + str(self.getEquippedWeaponDamageType()) + ")", ) + ROW_FORMAT.format("WeaponToHit", str(self.getEquippedWeaponToHit())) + ROW_FORMAT.format("BankBalance", str(self.getBankBalance())) + ROW_FORMAT.format("TaxesPaid", str(self.getTax())) + ROW_FORMAT.format("BankFeesPaid", str(self.getBankFeesPaid())) ) buf += " Kill Counts:\n" ROW_FORMAT = " {0:16}: {1:<30}\n" buf += ( ROW_FORMAT.format("Weenies", str(self._weenykills)) + ROW_FORMAT.format("Matched", str(self._matchedkills)) + ROW_FORMAT.format("Valiant", str(self._valiantkills)) + ROW_FORMAT.format("Epic", str(self._epickills)) + ROW_FORMAT.format("Player", str(self._playerkills)) ) return buf def getClassKey(self, className=""): """ Get the key for the classname """ if className == "": className = self.getClassName() return self.classList.index(className) def setDoubleUpStatLevels(self): """ set _doubleUpStatLevels based on class and randomness """ # There are two double up stat levels per class self._doubleUpStatLevels = self.classDict[self.getClassKey()][ "doubleUpStatLevels" ] # Randomly select an additional unused double up stat level # keep selecting a random number until we find an unused one while True: randX = random.randint(2, 11) if randX in self._doubleUpStatLevels: pass elif randX % 5 == 0: pass else: self._doubleUpStatLevels.append(randX) break def customizeStats(self): """ customize stats based on class, gender, alignment, and random """ # get the index numbers of the named elements to use for dict lookup classKey = self.getClassKey() genderKey = self.genderList.index(self.getGender()) self.setMaxHP() self.setMaxMana() # increment the value of the CLASS bonus stats for bonusStat in self.classDict[classKey]["bonusStats"]: self.incrementStat(bonusStat) # decrement the value of the CLASS penalty stats for bonusStat in self.classDict[classKey]["penaltyStats"]: self.decrementStat(bonusStat) # increment the value of the GENDER bonus stats for bonusStat in self.genderDict[genderKey]["bonusStats"]: self.incrementStat(bonusStat) # luck bonuses for lawful and chaotic alignments, since they are # inherently more limiting if self._alignment in ["lawful", "chaotic"]: self.incrementStat("luck") self.incrementStat("luck") def addHP(self, num=0): self._hp = min((self._hp + num), self.getMaxHP()) def addMana(self, num=0): self._mana = min((self._mana + num), self.getMaxMana()) def addExp(self, num): self._expToNextLevel -= num def canAttack(self): if self.checkCooldown(self.getSecondsUntilNextAttack(), "until next attack"): return True return False def canSeeHidden(self): if self.isDm(): return True if self.getClassName().lower() in ["ranger", "rogue"]: return True if random.randint(1, 100) < int(self.getLuck() / 3): return True return False def canSeeInTheDark(self): """ ToDo: a light spell should allow players to see in the dark """ if self.isDm(): return True return False def canSeeInvisible(self): if self.isDm(): return True return False def describe(self, count=1, article=""): return self._name def isAttacking(self): if self._currentlyAttacking is not None: return True return False def isAttackingWithFist(self): if self.getLastAttackCmd() in SpellList: return False if self.getEquippedWeapon().getName() == "fist": return True return False def isBlessed(self): return self._blessed def isDm(self): return self._dm def isDrunk(self): return self._drunk def isEvil(self): return self._evil def isHidden(self): return self._hidden def isInvisible(self): return self._invisible def isMagic(self): return False def isPlagued(self): return self._plagued def isPoisoned(self): return self._poisoned def isUnKillable(self): if self.isDm(): return True return False def isVulnerable(self): return self._vulnerable def getAc(self): return self._ac def getAcctName(self): return self._acctName def getAlignment(self): return self._alignment def getAttacking(self): return self._attackTargets def getCharisma(self): return int(self.charisma) def getClassName(self): return self._classname def getCurrentlyAttacking(self): if self.isAttacking(): return self._currentlyAttacking return None def getConstitution(self): return int(self.constitution) def getDexterity(self): return int(self.dexterity) def getDodgeBonus(self): return self._dodgeBonus def getEquippedWeapon(self): return self._equippedWeapon def getEquippedArmor(self): return self._equippedArmor def getEquippedShield(self): return self._equippedShield def getEquippedRing(self): return self._equippedRing def getEquippedNecklace(self): return self._equippedNecklace def getFollow(self): return self._follow def getExp(self): return self._expToNextLevel def getGender(self): return self._gender def getHitPoints(self): return self._hp def getHitPointPercent(self): """ returns the int percentage of health remaining """ percent = self.getHitPoints() * 100 / self.getMaxHP() return int(percent) def getId(self): return self._acctName + "/" + str(self.getName()) def getInputDate(self): if not hasattr(self, "_lastInputDate"): return getNeverDate() return(self._lastInputDate) def getIntelligence(self): return int(self.intelligence) def getLastAttackCmd(self): return self._lastAttackCmd def getLastAttackDate(self): return self._lastAttackDate def getLastCmd(self): return self._lastCommand def getLastPoisonDate(self): if not hasattr(self, "_lastPoisonDate"): self.setLastPoison() return getNeverDate() return self._lastPoisonDate def getLastRegenDate(self): if not hasattr(self, "_lastRegenDate"): self.setLastRegen() return getNeverDate() return self._lastRegenDate def getLastLoginDate(self): if not hasattr(self, "_lastLoginDate"): return getNeverDate() return self._lastLoginDate def getLimitedSpellCount(self): return int(self._limitedSpellsLeft) def getLevel(self): return self._level def getLimitedBroadcastCount(self): return int(self._broadcastLimit) def getLuck(self): return int(self.constitution) def getMana(self): return self._mana def getMaxHP(self): return self._maxhp def getMaxMana(self): return self._maxmana def getName(self): return self._name def getRoom(self): return self._roomObj def getSecondsUntilNextAttack(self): return self._secondsUntilNextAttack def getStrength(self): return int(self.strength) def getType(self): return self.__class__.__name__ def kidnaps(self): return False def sendsToJail(self): return False def reduceLimitedSpellCount(self, num=1): self._limitedSpellsLeft -= int(num) def reduceLimitedBroadcastCount(self, num=1): self._broadcastLimit -= int(num) def removeDm(self): self._dm = False def removeRoom(self): self._roomObj = None def setAlignment(self, _alignment): if _alignment in self.alignmentList: self._alignment = str(_alignment) else: logger.error("setAlignment: Attempt to set invalid gender") def setBlessed(self, val=True): self._blessed = val def setClassName(self, _classname): if _classname in self.classList: self._classname = str(_classname) else: logger.error("setClassName: Attempt to set invalid class") def setCurrentlyAttacking(self, player): self._currentlyAttacking = player def setDm(self): self._dm = True def setDrunk(self, val=True): self._drunk = val def setEvil(self, val=True): self._evil = val def setExpForLevel(self): """ set the character's exp to the amount required for next level """ self._expToNextLevel = 2 ** (9 + self.getLevel()) def setFollow(self, charObj=None): self._follow = charObj def setGender(self, _gender): if _gender in self.genderList: self._gender = str(_gender) else: logger.error("setGender: Attempt to set invalid gender") def setHidden(self, val=True): self._hidden = val def setInvisible(self, val=True): self._invisible = val def setHitPoints(self, num): self._hp = int(num) def setInputDate(self): self._lastInputDate = datetime.now() def setLevel(self, num): self._level = int(num) def setLoginDate(self): self._lastLoginDate = datetime.now() def setLogoutDate(self): self._lastLogoutDate = datetime.now() def setLastCmd(self, str1): self._lastCommand = str1 def setLastAttackCmd(self, str1): self._lastAttackCmd = str1 def setLastAttack(self, cmd="attack"): self.setLastAttackCmd(cmd) self.setLastAttackDate() def setLastAttackDate(self): self._lastAttackDate = datetime.now() def setLastRegen(self, when="now"): if when == "never": self._lastRegenDate = getNeverDate() else: self._lastRegenDate = datetime.now() def setLastPoison(self, when="now"): if when == "never": self._lastPoisonDate = getNeverDate() else: self._lastPoisonDate = datetime.now() def setMana(self, num): self._mana = int(num) def setMaxHP(self, num=0): if num == 0: baseHealth = self.classDict[self.getClassKey()]["baseHealth"] num = baseHealth * self.getLevel() self._maxhp = num def setMaxMana(self, num=0): if num == 0: baseMagic = self.classDict[self.getClassKey()]["baseMagic"] num = baseMagic * self.getLevel() self._maxmana = num def setName(self, _name): self._name = str(_name) def setPlagued(self, val=True): self._plagued = val def setPoisoned(self, val=True): self._poisoned = val def setRoom(self, roomObj): self._roomObj = roomObj def setSecondsUntilNextAttack(self, secs=3): self._secondsUntilNextAttack = int(secs) def setVulnerable(self, val=True): self._vulnerable = bool(val) def subtractMana(self, num): self._mana -= int(num) def subtractHP(self, num=0): self._hp -= max(0, num) def subtractlevel(self, num=1): self._level -= int(num) def hasExpToTrain(self): """ return True if character has enough exp to train for next level """ if self._expToNextLevel <= 0: return True return False def levelUp(self): """Level up a character""" self._level += 1 self.levelUpStats() self.reCalculateStats() self._achievedSkillForLevel = False def getDistribution(self, startR=15, endR=90, shifter=0, influence=12): """ Some reusable math to tweak number range distributions I'm not a math wizard, but I was able to create this formula that, along with tweaking the inputs, allows us to get the desired distributions. Beware: Tweaking this requires lots of (manual/visual) testing of stats/lvls when leveling up and level down. See testDeathLevels: pytest .\test\test_character.py -k testDeathLevels """ distMult = (100 - startR) / (endR - startR) percentChance = shifter + ((influence - startR) * distMult) return percentChance def getStatPoints(self, reverse=False, distShifter=90, luckPoint=True): """ determine how many stat points are gained/lost when leveling up/down. * dist shifter allows us to tweak the resulting distributions which want to change for leveling up vs leveling down """ statPoints = 1 # check to see if level is a doubleUp level if self.getLevel() in self._doubleUpStatLevels: # Grant extra stat point statPoints += 1 if self.getLevel() % 5 == 0: # Grant extra stat point every 5th level statPoints += 1 elif self.getLevel() > 10: # After level 10, chance of getting an extra point on levels not # divisible by 5 goes up significantly distShifter + 20 if luckPoint: # A luck based role to to determine if an extra point is added. percentChance = self.getDistribution( startR=11, endR=90, shifter=distShifter, influence=self.luck ) if reverse: percentChance = 100 - percentChance if percentChance > random.randint(1, 100): statPoints += 1 return statPoints def levelsToLose(self): """ How many levels are lost (when dying) """ chanceOfOnlyLosingOneLevel = 60 pietyScaler = 1.3 luckScaler = 1.1 influencer = (self.piety * pietyScaler) + (self.luck * luckScaler) chanceOfOnlyLosingOneLevel = self.getDistribution( startR=5, endR=150, shifter=75, influence=influencer ) if self.getClassName().lower() in ["cleric", "paladin"]: # 10% reduction for clerics and paladins chanceOfOnlyLosingOneLevel += 10 percentChance = int(chanceOfOnlyLosingOneLevel) if percentChance > random.randint(1, 100): levelsToLose = 1 else: levelsToLose = 2 return levelsToLose def expBonus(self, percent=10): """ deduct a certain percentage of experience for next level """ self.client.spoolOut( "Hermes blesses you! Your next " + "level will arrive in haste." ) multPercent = (100 - percent) / 100 self._expToNextLevel = int(self._expToNextLevel * multPercent) def levelUpStats(self, statPoints=0): """Level up a character's stats""" if statPoints == 0: statPoints = self.getStatPoints() if statPoints == 1: if random.randint(0, 99) < (self.luck * 2): # Based on luck, (roughly 20% chance) exp may be reduced self.expBonus() self.randomlyIncrementStats(statPoints) # increase max hitpoints/mana self.reCalculateStats() return None def levelDownStats(self, statPoints=0): """ decrease stats - used when someone dies """ if statPoints == 0: statPoints = self.getStatPoints(distShifter=20, reverse=True) self.randomlyDecrementStats(statPoints) self.reCalculateStats() return None def getRandomStat(self): """Randomly picks a stat and returns the stat name""" randX = random.randint(0, (len(self.statList) - 1)) # get the stat name, based on the random number return self.statList[randX] def randomlyIncrementStats(self, points=1): """Randomly assign points to attributes""" for x in range(1, points + 1): self.incrementStat(self.getRandomStat()) def randomlyDecrementStats(self, points=1): """Randomly assign points to attributes""" for x in range(1, points + 1): self.decrementStat(self.getRandomStat()) def incrementStat(self, stat): """ increment a given stat by one """ newvalue = int(getattr(self, stat)) + 1 setattr(self, stat, newvalue) def decrementStat(self, stat): """ increment a given stat by one """ newvalue = int(getattr(self, stat)) - 1 setattr(self, stat, newvalue) def reCalculateStats(self): self.setAc() self.setMaxHP() self.setMaxMana() self.setExpForLevel() self.setMaxWeightForCharacter() return None def setNearDeathExperience(self): """ set stats so that character can recover from a near death exp """ self.setHitPoints(1) self.setPoisoned(False) self.setPlagued(False) def setPromptSize(self, size): """ change the prompt verbosity """ if size in ["full", "brief"]: self._prompt = size elif size == "": # if promptStr is blank, toggle between the prompts if self._prompt == "full": self._prompt = "brief" else: self._prompt = "full" return None def getPromptSize(self): return self._prompt def setAc(self): """ calculate the character\'s armor class """ ac = 0 db = 0 equipppedList = [ self.getEquippedWeapon(), self.getEquippedArmor(), self.getEquippedShield(), self.getEquippedRing(), self.getEquippedNecklace(), ] for oneObj in equipppedList: if oneObj is not None: ac += oneObj.getAc() db += oneObj.getDodgeBonus() self._ac = ac self._dodgeBonus = db def getEquippedWeaponDamage(self): """ Given the equipped weapon and attack type, return the damage """ damage = self.getFistDamage() if self.isAttackingWithFist(): return damage if self.getEquippedWeapon().isBroken(): self._spoolOut("Your weapon is broken.\n") return 0 weapon = self.getEquippedWeapon() minDamage = weapon.getMinimumDamage() maxDamage = weapon.getMaximumDamage() dLog( "character.getEquippedWeaponDamage: weaponMin: " + str(minDamage) + " - weaponMax: " + str(maxDamage), self._instanceDebug, ) damage += random.randint(minDamage, maxDamage) return damage def getEquippedWeaponDamageType(self): return self.getEquippedWeapon().getDamageType() def decreaseChargeOfEquippedWeapon(self): """ decrease charge counters of equipped weapon + notify """ weapon = self.getEquippedWeapon() if weapon.getName() != "fist": weapon.decrementChargeCounter() if weapon.isBroken(): self._spoolOut( "Snap! Your " + weapon.describe(article="") + " breaks\n" ) elif self.getClassName() == "ranger" and weapon.getCharges() == 10: self._spoolOut( "Your " + weapon.describe(article="") + " is worse for wear and in need of repair.\n" ) def getEquippedProtection(self): """ returns equipped armor and/or shield, as a list """ armor = self.getEquippedArmor() shield = self.getEquippedShield() objList = [] if armor: objList.append(armor) if shield: objList.append(shield) return objList def decreaseChargeOfEquippedProtection(self): """ decrease charge counters of equipped armor/shield + notify """ for obj in self.getEquippedProtection(): obj.decrementChargeCounter() if obj.isBroken(): self._spoolOut("Your " + obj.describe(article="") + " falls apart\n") elif self.getClassName() == "ranger" and obj.getCharges() == 10: self._spoolOut( "Your " + obj.describe(article="") + " is worse for wear and in need of repair.\n" ) def getEquippedWeaponToHit(self): """ return tohit percentage of weapon """ weapon = self.getEquippedWeapon() if weapon.getName() == "fist": return 0 return weapon.getToHitBonus() def getCumulativeDodge(self): """ return dodge percentage of armor + shield """ logPrefix = "character.getCumulativeDodge: " # Start off with dodge skill dodgePct = self._dodge dLog(logPrefix + "dodgeSkill=" + str(dodgePct), self._instanceDebug) # Add on dodge from armor/shields for obj in self.getEquippedProtection(): if not obj.isBroken(): dodgePct += obj.getDodgeBonus() dLog(logPrefix + "withGear=" + str(dodgePct), self._instanceDebug) # It's a little bit strange to have to traverse back to the game/combat # class to get this data, but it seems to make more sense than trying # to pass it all around. if hasattr(self, "client"): fullAttackDict = self.client.gameObj.getAttackDict() attackDict = fullAttackDict.get(self.getLastAttackCmd(), {}) dodgePct += attackDict.get("dodge", 0) dLog(logPrefix + "totalDodge=" + str(dodgePct), self._instanceDebug) return dodgePct def getSkillPercentage(self, skill): if skill[0] != "_": skill = "_" + skill # Prepend underbar, if needed try: return getattr(self, skill) except KeyError: pass return 0 def getEquippedSkillPercentage(self): """ includes bonuses from skills and gear """ if self.isPlagued(): return 0 skillName = self.getEquippedWeaponDamageType() percent = self.getSkillPercentage(skillName) necklace = self.getEquippedNecklace() if necklace: percent += necklace.getProtectionFromSkill(skillName) return percent def hasAchievedSkillForLevel(self): return self._achievedSkillForLevel def rollToBumpSkillForLevel(self, skill, percentChance=33): """ given a skill name, if eligible, bump character's skill * Only one skill bump allowed per level * There's a random (default=33%) chance that skill is bumped * maximum skill is 50% """ if self.hasAchievedSkillForLevel(): return False skilllevel = self.getSkillPercentage(skill) if skilllevel > 50: return False if random.randint(1, 100) <= percentChance: setattr(self, skill, skilllevel + 10) self._achievedSkillForLevel = True return True return False def continuesToFollow(self, followingCharObj, chanceOfFollowing=90): """ Returns true if follow succeeds Follow ability should be based on stats, class, luck, and other character's level """ logPrefix = __class__.__name__ + " continuesToFollow: " debugMsg = "{} {} - Roll - {} < {}" # Increase chance, based on stats chanceOfFollowing += (divmod(self.dexterity, 5)[0] + divmod(self.luck, 10)[0]) # Increasee chance, based on class if self.getClassName().lower() in ["ranger"]: chanceOfFollowing += 5 else: # Increase/Decrease chance of following depending on level difference levelDiff = followingCharObj.getLevel() - self.getLevel() chanceOfFollowing -= levelDiff # Compare chance against random roll randX = random.randint(1, 100) if randX < chanceOfFollowing: dLog(debugMsg.format(logPrefix, "Pass", randX, chanceOfFollowing), self._instanceDebug,) return(True) dLog(debugMsg.format(logPrefix, "Fail", randX, chanceOfFollowing), self._instanceDebug,) return(False) def checkCooldown(self, secs, msgStr=""): if self._lastAttackDate == getNeverDate(): return True secsSinceLastAttack = secsSinceDate(self._lastAttackDate) secsRemaining = secs - secsSinceLastAttack # logger.debug("cooldown: ses(" + str(secs) + # ') - secsSinceLastAttack(' + # str(secsSinceLastAttack) + ") = secsRemaining(" + # str(secsRemaining) + ") - " + # dateStr(self._lastAttackDate)) if secsRemaining <= 0: return True buf = ( "You are not ready. " + str(truncateWithInt(secsRemaining, 1)) + " seconds remain" ) if msgStr != "": buf += " " + msgStr buf += ".\n" self._spoolOut(buf) return False def condition(self): """ Return a non-numerical health status """ status = "unknown" if self.getHitPoints() <= 0: status = "dead" elif self.getHitPoints() < self.getMaxHP() * 0.10: # Less than 10% of health remains status = "desperate" elif self.getHitPoints() < self.getMaxHP() * 0.25: # 11-25% of health remains status = "injured" elif self.getHitPoints() < self.getMaxHP() * 0.50: # 26-50% of health remains status = "drained" elif self.getHitPoints() < self.getMaxHP() * 0.75: # 51-75% of health remains status = "fatigued" elif self.getHitPoints() < self.getMaxHP() * 0.99: # 76-99% of health remains status = "healthy" elif self.getHitPoints() == self.getMaxHP(): # totally healthy status = "fresh" return status def dodge(self, basePercent=100, dodgeTxt=""): """ Return true if dodged * If basePercent is increased, chance of dodging goes down. * chances improved by dex, class, dodge skill, and dodgeBonus """ result = "dodge failed" randX = random.randint(1, 100) classMult = 2 if self.getClassName().lower() == "rogue" else 1 skillMult = self._dodge + self._dodgeBonus dodgeAdv = self.getDexterity() * (classMult + skillMult) / 10 dodgeCalc = (randX + dodgeAdv) * 2 if dodgeCalc > basePercent: result = "dodged" if dodgeTxt != "": dodgeTxt += " " dLog( "{0}{1} - character dodge calc ({2}) >? {0}odds ({3})".format( dodgeTxt, result, dodgeCalc, basePercent ), self._instanceDebug, ) if result == "dodged": return True return False def acDamageReduction(self, damage): """ reduce damage based on AC """ ac = self.getAc() # reduce AC if protection is broken for obj in self.getEquippedProtection(): if obj.isBroken(): ac -= obj.getAc() # reduce damage based on percentage: acReduction = int(damage * (0.05 * ac)) damage -= acReduction return max(0, damage) def getCircleSecs(self): """ Returns the number seconds a creature will wait given a successful circle - based on character level/stats""" secsToWait = random.randint(self.getLevel(), 20 + self.getDexterity()) return secsToWait def damageIsLethal(self, num=0): if num >= self.getHitPoints(): return True return False def takeDamage(self, damage=0, nokill=False): """ Take damage and check for death """ self.subtractHP(damage) if nokill and self.getHitPoints() <= 0: self.setNearDeathExperience() condition = self.condition() dLog(self.getName() + " takes " + str(damage) + " damage", self._instanceDebug) self.save() if self.getHitPoints() <= 0: if self.isDm(): self._spoolOut( "You would be dead if you weren't a dm." + " Resetting hp to maxhp.\n" ) self.setHitPoints(self._maxhp) else: self.processDeath() return condition def obituary(self): """ Notify/record death """ deathMsg = self.describe() + " has died" self.client.getGameObj().gameMsg(self.client.txtBanner(deathMsg) + "\n") logger.info("obituary: " + deathMsg) def processDeath(self, calculateLevelsToLose=True, silent=False): """ Do all the things related to dying """ levelsToLose = 1 if calculateLevelsToLose: levelsToLose = self.levelsToLose() for numlvl in range(1, levelsToLose + 1): self.levelDownStats() if self.getLevel() > 1: self.subtractlevel() self.setHitPoints(self.getMaxHP()) self.setPoisoned(False) self.setPlagued(False) self.save() if not silent: # primarily used for testing hundreds of deaths self._spoolOut("You are dead!\n") self.obituary() # return to starting room or guild self.client.gameObj.joinRoom(58, self) self._spoolOut(self.getRoom().display(self)) return True def searchSucceeds(self, obj, basePercent=30): """ Returns True if search succeeds * chance of success based on dex, level, and luck """ logPrefix = __class__.__name__ + " searchSucceeds: " if self.canSeeHidden(): dLog(logPrefix + "Pass - Character can see hidden", self._instanceDebug) return True percentChance = ( basePercent + self.getDexterity() + self.getLevel() + self.getLuck() ) if obj.getType() == "Creature" or obj.getType() == "Character": # +/- 10% per level difference percentChance += (self.getLevel() - obj.getLevel()) * 10 if random.randint(1, 20) == 1: # Always a 5 percent chance of success dLog(logPrefix + "Pass - Always 5% Chance", self._instanceDebug) return True randX = random.randint(1, 100) if randX <= percentChance: dLog( logPrefix + "Pass - Roll - " + str(randX) + " < " + str(percentChance), self._instanceDebug, ) return True dLog(logPrefix + "Failed", self._instanceDebug) return False def equipFist(self): """ equip fist, the default weapon - fist is a special weapon that is not in any inventory """ obj = Weapon() obj.setName("fist") obj._article = "a" obj._singledesc = "fist" obj.setMaximumDamage(self.getFistDamage()) self.equip(obj) def getFistDamage(self): """ calculate damage for the fist, the default weapon """ damage = int((self.getStrength() / 5) + (self.getLevel() / 2)) damage += self.classDict[self.getClassKey()]["baseDamage"] damage -= random.randint(0, 3) return max(0, damage) def equip(self, obj): # Deal with currently equipped item equippedObj = getattr(self, obj.getEquippedSlotName()) if equippedObj is None: # Nothing is currently equipped pass elif equippedObj == obj: # desired object is already in use return True elif obj is not None: # wearing some other item self.unEquip(obj) # Pass object so we know which slot to vacate slotName = obj.getEquippedSlotName() if slotName: setattr(self, slotName, obj) self.setAc() return True return False def unEquip(self, obj=None, slotName=""): if obj and slotName == "": # Use the current object to determine slot name if obj.isEquippable(): slotName = obj.getEquippedSlotName() if slotName == "": return False setattr(self, slotName, None) self.setAc() if self.getEquippedWeapon() is None: self.equipFist() return True def attemptToHide(self): randX = random.randint(0, 99) hidechance = self.getLevel() * 20 + self.dexterity if self.getClassName().lower() == "rogue": hidechance *= 2 # double the chance of success for rogues # consider additional bonus for guild status # half the chance of success if there are already creatures in the room if len(self._roomObj.getCreatureList()) > 0: hidechance /= 2 hidechance = max(66, hidechance) # Chance to hide tops out at 66% if hidechance > randX: self.setHidden() return True return False def hearsWhispers(self): """ calculate whether a character can hear whispers in a room todo: make this more random and skill/sluck based """ if self.getClassName().lower() == "ranger": return True return False def adjustPrice(self, price): """ Adjust the price of goods depending on character attributes * non-character price changes occur elsewhere """ # consider adjustments for charisma, alignment, luck return price def setMaxWeightForCharacter(self): """ Maxweight varies depending on attributes """ weight = 10 * max(7, int(self.strength)) self.setInventoryMaxWeight(weight) def fumbles(self, basePercent=20): """ Return true if player fumbles. * Fumble is a trip while attacking which causes player to unequip weapon and shield and wait 30 seconds before attacking again * random chance, partially based on dex. * if fumble, player's weapon is unequipped """ logPrefix = "char.fumbles: " fumbles = False if self.isAttackingWithFist(): return False fumbleRoll = random.randint(1, 100) percentage = basePercent - self.getDexterity() if fumbleRoll == 1: # always a 1% change of fumbling dLog(logPrefix + "Bad luck - 1% fumble triggered", self._instanceDebug) fumbles = True elif fumbleRoll < percentage: dLog( logPrefix + "Standard Roll: " + str(fumbleRoll) + " < " + str(percentage), self._instanceDebug, ) fumbles = True if fumbles: self.unEquip(slotName="_equippedWeapon") self.unEquip(slotName="_equippedShield") self.setSecondsUntilNextAttack(30) return fumbles def discardsEquippedWeapon(self): """ drop currently equipped weapon """ if self.isAttackingWithFist(): return True weaponObj = self.getEquippedWeapon() self.unEquip(slotName="_equippedWeapon") self.removeFromInventory(weaponObj) roomObj = self.getRoom() if roomObj: roomObj.addToInventory(weaponObj) return True def possibilyLoseHiddenWhenMoving(self): """ set hidden to false if you fail the roll. * when moving, there is a chance that you will not remain hidden * base chance of remaining hidden is 50% + dex * rangers and theives get improved chance = dex a ranger/thief with 20 dex has 99% chance of staying hidden """ if not self.isHidden: return False oddsOfStayingHidden = 60 + self.getDexterity() if self.getClassName() in ["rogue", "ranger"]: oddsOfStayingHidden += self.getDexterity() if random.randint(1, 100) >= oddsOfStayingHidden: self.setHidden(False) return True def processPoisonAndRegen(self, regenInterval=90, poisonInterval=60): """ At certain intervals, poison and hp regeneration kick in * poison should be faster and/or stronger than regen """ conAdj = self.getConstitution() - 12 intAdj = self.getIntelligence() - 12 regenHp = max(1, int(self.getMaxHP() / 10) + conAdj) regenMana = max(1, int(self.getMaxMana() / 8) + intAdj) poisonHp = max(1, int(self.getLevel() - conAdj)) if not self.isPlagued(): # no regen if plagued # Check the time if self.getLastRegenDate() == getNeverDate(): regenSecsRemaining = 0 else: regenSecsRemaining = regenInterval - secsSinceDate( self.getLastRegenDate() ) dLog( "regen counter: " + str(regenSecsRemaining) + " secs - " + str(self.getLastRegenDate()) + " - " + str(secsSinceDate(self.getLastRegenDate())), False, ) if regenSecsRemaining <= 0: self.addHP(regenHp) self.addMana(regenMana) self.setLastRegen() if self.isPoisoned(): # take damage if poisoned # Check the time if self.getLastPoisonDate() == getNeverDate(): poisonSecsRemaining = 0 else: poisonSecsRemaining = poisonInterval - secsSinceDate( self.getLastPoisonDate() ) dLog("poison cntr: " + str(regenSecsRemaining) + " secs", False) if poisonSecsRemaining <= 0: self._spoolOut( "As the poison circulates, you take " + poisonHp + " damage.\n" ) self.takeDamage(poisonHp) self.setLastPoison() def resistsPoison(self, chanceToPoison=80): """ Returns true/false if the player resists poison """ if self.getClassName() == "ranger": # Rangers are 3 times less likely to be poisoned chanceToPoison /= 3 chanceToPoison -= self.getLevel() return self.dodge(chanceToPoison, dodgeTxt="poison") def picksLock(self, lockLevel): """ Returns True if pick calculations succeed * Lock level makes it harder to pick * Dex makes it easier to pick * Rogues get a big pick advantage """ if (self.getClassName().lower() == "rogue") and ( self.getDexterity() - 5 > lockLevel ): return True elif (self.getDexterity() - 12) > lockLevel: # stat based return True return False def avoidsTrap(self, traplevel): """ Return true if trap is avoided dex, class, and traplevel are used to calulate """ trapPercent = 100 + traplevel * 10 if self.getClassName().lower() == "rogue": # Thieves are twice as good at avoiding traps trapPercent /= 2 return self.dodge(trapPercent, dodgeTxt="trap") def promptForClass(self, ROW_FORMAT): prompt = "Classes:\n" for oneNum, oneName in enumerate(self.classList): desc = str(oneName) + " - " + self.classDict[oneNum]["desc"] prompt = prompt + ROW_FORMAT.format(oneNum, desc) prompt = prompt + "Select your character's class: " inNum = self.client.promptForNumberInput(prompt, (len(self.classList) - 1)) if inNum == -1: return False self.setClassName(self.classList[inNum]) return True def promptForGender(self, ROW_FORMAT): prompt = "Genders:\n" for oneNum, oneName in enumerate(self.genderList): prompt += ROW_FORMAT.format(str(oneNum), oneName) prompt += "Select your character's gender: " inNum = self.client.promptForNumberInput(prompt, (len(self.genderList) - 1)) if inNum == -1: return False self.setGender(self.genderList[int(inNum)]) return True def promptForAlignment(self, ROW_FORMAT): prompt = "Alignment:\n" prompt += ROW_FORMAT.format("0", "Lawful - " + "friend of good, enemy of evil") aNumOptions = 0 if self.getClassName() != "paladin": prompt += ROW_FORMAT.format( "1", "Neutral - " + "Neither lawful, nor chaotic" ) aNumOptions = aNumOptions + 1 if self.getClassName().lower() not in ["cleric", "paladin"]: prompt += ROW_FORMAT.format( "2", "Chaotic - " + "unpredictable and untrustworthy" ) aNumOptions = aNumOptions + 1 prompt += "Select your character's alignment: " inNum = self.client.promptForNumberInput(prompt, aNumOptions) if inNum == -1: return False self.setAlignment(self.alignmentList[int(inNum)]) return True def promptForSkills(self, ROW_FORMAT): prompt = "Skills:\n" sList = {} for num, skill in enumerate(self.skillDict): prompt += ROW_FORMAT.format( num, skill.lstrip("_") + " - " + self.skillDict[skill] ) sList[num] = skill inNum = self.client.promptForNumberInput(prompt, len(self.skillDict)) if inNum == -1: return False setattr(self, sList[inNum], 10) # Set skill of choice to 10% return True def promptForDm(self, ROW_FORMAT): if self.client: # not set when testing if self.client.acctObj.isAdmin(): prompt = "Should this Character be a Dungeon Master (admin)?" if self.client.promptForYN(prompt): self.setDm() return True return False def promptForNewCharacter(self, promptFlag=True): """Prompt user to input character info and return the results""" if promptFlag: ROW_FORMAT = " ({0:1}) {1:<30}\n" self.promptForClass(ROW_FORMAT) self.promptForGender(ROW_FORMAT) self.promptForAlignment(ROW_FORMAT) self.promptForSkills(ROW_FORMAT) self.promptForDm(ROW_FORMAT) else: self.setClassName(getRandomItemFromList(self.classList)) self.setGender(getRandomItemFromList(self.genderList)) self.setAlignment(getRandomItemFromList(self.alignmentList)) self._dodge = 10 return True def setDataFilename(self, dfStr=""): """ sets the data file name. - Override the superclass because we want the account info to be in the account directory. """ logPrefix = __class__.__name__ + " setDataFilename-c: " # generate the data file name based on class and id try: id = self.getId() except AttributeError: pass if not id: logger.error(logPrefix + "Could not retrieve Id to " + "generate filename") return False if not re.match(r"^.+@.+\..+/.+$", id): logger.error( logPrefix + "ID is blank while generating filename." + "id=" + id ) return False self._datafile = os.path.abspath(DATADIR + "/Account/" + str(id) + ".pickle") return True def updateKillCount(self, opponent): opponentLevel = opponent.getLevel() killerLevel = self.getLevel() if opponentLevel < killerLevel: self._weenykills += 1 elif opponentLevel == killerLevel: self._matchedkills += 1 elif opponentLevel > killerLevel: self._valiantkills += 1 if opponent.getType() == "Character": self._playerkills += 1 if opponent.isPermanent(): self._epickills += 1 ``` #### File: sog/sog/client.py ```python import argparse from common.clientLib import Client def main(): parser = argparse.ArgumentParser(description="Client for SoG") parser.add_argument("--username", type=str, help="username for auto login") parser.add_argument("--password", type=str, help="password for auto login") parser.add_argument("--host", type=str, help="ip of server") parser.add_argument("--port", type=str, help="port of server") parser.add_argument("--debug", action="store_true", help="turn debugging on") args = parser.parse_args() clientObj = Client() clientObj.setDebug(False) if args.debug: clientObj.setDebug(True) clientObj.start(args) main() ``` #### File: sog/common/general.py ```python from datetime import datetime import logging from logging.handlers import TimedRotatingFileHandler from os.path import basename from pathlib import Path import random import re import sys from common.globals import LOGDIR class Terminator(Exception): """ Custom exception to trigger termination of all threads & main. """ pass def sig_handler(signal_received, frame): print("SIGINT, SIGTERM, or CTRL-C detected. Exiting gracefully") raise Terminator def getLogger(logName, loglevel=logging.DEBUG): logpath = Path(LOGDIR) logpath.mkdir(parents=True, exist_ok=True) FORMAT = "%(asctime)-15s %(levelname)s %(message)s" DATEFORMAT = "%m/%d/%y %H:%M:%S" LOGFILE = LOGDIR + "/" + logName + ".log" logger = logging.getLogger(logName) logger.setLevel(loglevel) # create a file handler # logHandler = logging.FileHandler(LOGFILE) # daily logs rotated after 30 days logHandler = TimedRotatingFileHandler(LOGFILE, when="d", interval=1, backupCount=30) logHandler.setLevel(loglevel) # create a logging format logFormatter = logging.Formatter(FORMAT, DATEFORMAT) # add the format to the handler logHandler.setFormatter(logFormatter) # add the handler to the logger logger.addHandler(logHandler) return logger def dLog(msg, show=False): """ Show debug log messages if flag is set """ if show: logger.debug(msg) return None def getNeverDate(): """ Return a date object/value that represents never """ return datetime(1900, 1, 1) def secsSinceDate(date1): """ return seconds since a given date """ if not date1: logger.error("secsSinceDate: date was not defined. Returning 0") return 0 if date1 == getNeverDate(): logger.warning("secsSinceDate: received NeverDate. Returning 0") return 0 return (datetime.now() - date1).total_seconds() def dateStr(date1, datefmt="%Y/%m/%d %H:%M"): """ return a given date Obj as a string, in our standard format """ if not date1: logger.error("dateStr: date was not defined. Returning ''") return "" if date1 == getNeverDate(): return "Never" elif date1 == "now": return datetime.now().strftime(datefmt) elif date1: return date1.strftime(datefmt) else: logger.error("dateStr: Could not parse - returned an empty value") return "" def differentDay(date1, date2): """ Compare dates to see if they are the same day * typically used for daily events, counters, stats, etc """ if not date1: logger.error("differentDay: date was not defined. Returning False") return False if not date2: logger.error("differentDay: date was not defined. Returning False") return False if date1.strftime("%Y/%m/%d") != date2.strftime("%Y/%m/%d"): return True return False def isIntStr(numstr): """ Return True if the given string contains ony digits """ if re.match("^[0-9]+$", str(numstr)): return True return False def isCountStr(numstr): """ Return True if the given string contains ony digits of # and digits """ if re.match("^#*[0-9]+$", str(numstr)): return True return False def sortItemFunc(s): """ Sort function """ return s.describe() def itemSort(itemList): newItemList = sorted(itemList, key=sortItemFunc) return newItemList def getRandomItemFromList(list1): """ Given a list, returns random element """ if len(list1) == 0: return None indexNum = random.randint(0, len(list1) - 1) return list1[indexNum] def truncateWithInt(num, decimalPlaces=3): """ Given a number, returns that number truncated to X decimal places """ if not num: logger.error("truncateWithInt: num was not defined. Returning 0") return 0 if not isinstance(num, float) and not isinstance(num, int): logger.error("truncateWithInt: invalid num. Returning 0") return 0 shifter = 10 ** decimalPlaces return int(num * shifter) / shifter def splitTargets(targetStr): """ break cmdargs into parts consisting of: 1) cmdargs are already stripped of their first arg 2) list of targets, including their number. Target examples: * staff * staff 2 * staff #2 * player * player #3 """ argStr = "" targetList = [] for arg in targetStr.split(" "): if argStr == "": # The first arg is the item argStr = arg elif isCountStr(arg): # if the first arg is a number targetList.append(argStr + " " + arg) argStr = "" else: # the last one is complete, this one is new targetList.append(argStr) argStr = arg if argStr != "": # if the last arg hasn't been appended targetList.append(argStr) return targetList def targetSearch(itemList, targetStr): """ returns the first matching item from itemList or None * breaks up targetStr into parts * calls itemSearch with the proper arguments """ targetWords = targetStr.split(" ") if len(targetWords) == 1: targetObj = itemSearch(itemList, targetWords[0]) if len(targetWords) > 1: targetObj = itemSearch(itemList, targetWords[0], targetWords[1]) return targetObj def itemSearch( itemList, name, desiredNum="#1", typeList=[], sortList=False ): # noqa: C901 """ Often we need a fuzzy search lookup of items in a list (of class instances). Given a list, return an item that matches the name, number, and type. * Requires that instances have getName and getType methods * can be used for objects in room, items in inventory, etc """ logPrefix = "sea: " myitem = None debugItemSearch = False # strip out anything that's not a digit (i.e. number signs) desiredNum = int(re.sub("[^0-9]", "", str(desiredNum))) if sortList: itemList = itemSort(itemList) dLog( logPrefix + "Trying to search for item " + name + " #" + str(desiredNum) + " in " + str([x.describe() for x in itemList]), debugItemSearch, ) cnt = 0 for oneitem in itemList: dLog( logPrefix + "Checking item name " + oneitem.getName() + "...", debugItemSearch, ) if re.match("^" + name.lower(), oneitem.getName().lower()): # fuzzy cnt += 1 dLog( logPrefix + "item name " + oneitem.getName() + " matched. Checking type...", debugItemSearch, ) if len(typeList) > 0: if oneitem.getType().lower() not in typeList: dLog( logPrefix + "skipping item " + oneitem.getName() + " because it doesn't match type " + str(typeList), debugItemSearch, ) continue # skip if not the desired type else: dLog( logPrefix + "skipping typecheck for item " + oneitem.getName(), debugItemSearch, ) dLog( logPrefix + "Checking number for item name " + oneitem.getName() + " . Looking for #" + str(desiredNum), debugItemSearch, ) if cnt == desiredNum: # skip if not desired number dLog( logPrefix + "Found item " + oneitem.getName() + " matching number " + str(cnt), debugItemSearch, ) myitem = oneitem break else: dLog( logPrefix + "Could not find " + oneitem.getName() + " with matching number " + str(desiredNum), debugItemSearch, ) else: dLog( logPrefix + "Item " + oneitem.getName() + " did not match.", debugItemSearch, ) if myitem: dLog(logPrefix + "Found item " + myitem.getName(), debugItemSearch) return myitem # Set up global logger global logger try: logger # Test to see if logger is defined except NameError: # Set up the logger if it doesn't already exist logname = re.sub("\\..*$", "", str(basename(sys.argv[0])).lower()) if logname == "": logname = "generic" logger = getLogger(logname) logLocation = LOGDIR + "\\" + logname + ".log" print("Log: " + logLocation) logger.info("") logger.info("-----------------------------------------------------------") logger.info("Log start: " + logLocation) ``` #### File: sog/common/inventory.py ```python import inflect import re import textwrap from common.general import getRandomItemFromList, dLog, itemSort # from common.general import logger class Inventory: """ A generic inventory SuperClass * used by characters, creatures, and rooms * a char/creature inventory contains objects * a room inventory may contain objects and/or creatures""" _instanceDebug = False def __init__(self, id=0): # id is unused in this case, but super often passes id anyway self._inventory = [] self._invWeight = 0 self._maxweight = 0 self._invValue = 0 self._inventoryTruncSize = 12 self._instanceDebug = Inventory._instanceDebug return None def getInventory(self): dLog( "inv getInventory: " + self.getItemId() + " " + str(self) + " -- " + str(self._inventory), Inventory._instanceDebug, ) return self._inventory def getInventoryByType(self, type): matchList = [] for item in self._inventory: if item.getType() == type: matchList.append(item) return matchList def getInventoryWeight(self): self._setInventoryWeight() return self._invWeight def getInventoryValue(self): self._setInventoryValue() return self._invValue def setInventoryMaxWeight(self, num=0): self._maxweight = int(num) def getInventoryMaxWeight(self): return self._maxweight def setInventoryTruncSize(self, num=12): self._inventoryTruncSize = int(num) def getInventoryTruncSize(self): return self._inventoryTruncSize def addToInventory(self, item, maxSize=99999): if len(self.getInventory()) >= maxSize: return False self._inventory.append(item) self._setInventoryWeight() self._setInventoryValue() return True def removeFromInventory(self, item): if item in self._inventory: self._inventory.remove(item) self._setInventoryWeight() self._setInventoryValue() return True def describeInventory( self, showIndex=False, markerAfter=0, markerTxt="", headerTxt="Inventory" ): """ Display inventory * showIndex - show the enumerated number in front of each item * markerAfter - add a separator after this many items * markerTxt - txt for the marker """ buf = headerTxt + ":\n" ROW_FORMAT = " " if showIndex: ROW_FORMAT += "({0:2}) " ROW_FORMAT += "{1:<60}\n" itemlist = "" for num, oneObj in enumerate(self._inventory): dmInfo = "(" + str(oneObj.getId()) + ")" itemlist += ROW_FORMAT.format(num, oneObj.describe() + self.dmTxt(dmInfo)) if markerAfter and num == (markerAfter - 1): if markerTxt == "": markerTxt = "items below will be truncated on exit" itemlist += "--- " + markerTxt + " ---\n" if itemlist: buf += itemlist else: buf += " Nothing\n" return buf def getDmMarkers(self, obj): buf = "(" + str(obj.getId()) + ")" if obj.isInvisible(): buf += "[INV]" if obj.isHidden(): buf += "[HID]" return buf def unique(self, sequence): """ Remove duplicates in a set without changing the order. code adapted from https://tinyurl.com/yc6atal8 """ seen = set() return [x for x in sequence if not (x in seen or seen.add(x))] def describeInvAsList(self, showDm, showHidden, showInvisible, sortList=False): """ show inventory items as compact list typically used by room object, as player sees it """ logPrefix = "describeInvAsList: " buf = "" dLog( logPrefix + "showDm=" + str(showDm) + " showHidden=" + str(showHidden) + " showInvisible=" + str(showInvisible), Inventory._instanceDebug, ) if sortList: invList = itemSort(self.getInventory()) else: invList = self.getInventory() dLog( logPrefix + "Orig - " + str([x.describe() for x in invList]), Inventory._instanceDebug, ) # create a list of items in inventory and a dict of related DM info dmDict = {} itemList = [] for oneitem in invList: itemStr = "" if ( (oneitem.isInvisible() and not showInvisible) or (oneitem.isHidden() and not showHidden) and not showDm ): dLog(logPrefix + "HID/INV: " + str(oneitem), Inventory._instanceDebug) pass else: itemStr += oneitem.getSingular() itemList.append(itemStr) dmInfo = self.getDmMarkers(oneitem) try: if re.match(dmInfo, dmDict[itemStr]): dmDict[itemStr] += dmInfo except KeyError: dmDict[itemStr] = dmInfo # instanciate inflect to help with grammar and punctuation inf = inflect.engine() dLog(logPrefix + "preSet - " + str(itemList), Inventory._instanceDebug) # create a list of unique items # uniqueItemNames = set(itemList) # set was messing up the order uniqueItemNames = self.unique(itemList) dLog(logPrefix + "postSet - " + str(uniqueItemNames), Inventory._instanceDebug) # create a list of items with their counts countedList = [] for name in uniqueItemNames: itemStr = "" itemCnt = itemList.count(name) if itemCnt == 1: # we just want the article, but inf.a returns the noun words = inf.a(name).split(" ", 1) itemStr += words[0] else: itemStr += inf.number_to_words(inf.num(itemCnt)) itemStr += " " + inf.plural_noun(name, itemCnt) if showDm: itemStr += dmDict[name] countedList.append(itemStr) dLog(logPrefix + "counted - " + str(uniqueItemNames), Inventory._instanceDebug) # join our list with commas and 'and' sightList = inf.join(countedList) # intelligently wrap the resulting string if sightList != "": buf = textwrap.fill(sightList, width=80) + "\n" dLog(logPrefix + buf, Inventory._instanceDebug) return buf def clearInventory(self): """ remove everything from inventory """ self._inventory = [] def truncateInventory(self, num): """ remove everything from inventory that exceeds <num> items """ if not num: num = self._inventoryTruncSize del self._inventory[num:] def _setInventoryWeight(self): """ Calculate the weight of inventory """ self._invWeight = 0 for oneObj in list(self._inventory): self._invWeight += oneObj.getWeight() def _setInventoryValue(self): """ Calculate the value of inventory """ self._invValue = 0 for oneObj in list(self._inventory): self._invValue += oneObj.getValue() def inventoryWeightAvailable(self): weight = self.getInventoryMaxWeight() - self.getInventoryWeight() return int(weight) def canCarryAdditionalWeight(self, num): if self.inventoryWeightAvailable() >= int(num): return True return False def getRandomInventoryItem(self): if not self.getInventory(): return None return getRandomItemFromList(self.getInventory()) def autoPopulateInventory(self): """ should be overridden if needed """ def transferInventoryToRoom( self, roomObj, roomMsgFunct, persist=False, verbose=True ): logPrefix = "transferInventoryToRoom(" + str(roomObj.getId()) + "): " # We are madifying the inventory as we iterate through it, so we need # a copy of the list. selfInventory = self.getInventory().copy() selfInventoryCount = len(selfInventory) truncsize = self.getInventoryTruncSize() if selfInventoryCount == 0: return None dLog( logPrefix + "inventory of " + self.getItemId() + " -- " + str(selfInventory), Inventory._instanceDebug, ) for item in selfInventory: dLog( logPrefix + "------------ item - " + item.describe(), Inventory._instanceDebug, ) if persist: # On death, we want player's items to temporarily persist item.setPersistThroughOneRoomLoad(True) dLog( logPrefix + "Adding persist attribute to " + item.describe(), Inventory._instanceDebug, ) if self.getType() == "Character": self.unEquip(item) self.removeFromInventory(item) dLog( logPrefix + "Removing item " + item.describe() + " from " + self.getItemId() + "inventory", Inventory._instanceDebug, ) truncsize = 100 # don't want dead character items truncated if roomObj.addToInventory(item, maxSize=truncsize): dLog( logPrefix + "Adding item " + item.describe() + " to room inventory", Inventory._instanceDebug, ) if verbose: roomMsgFunct(roomObj, item.describe() + " falls to the floor\n") else: dLog( logPrefix + "Discarding item " + item.describe() + " instead of adding it to room inventory (trunc)", Inventory._instanceDebug, ) if verbose: roomMsgFunct( roomObj, item.describe() + "falls to the floor and rolls away" ) roomObj.save() # end transferInventoryToRoom ``` #### File: sog/common/ioLib.py ```python from common.general import Terminator, logger import common.globals import queue import re import sys class Spooler: """ Superclass for I/O spooling """ def __init__(self): self._inputStr = "" # user input buffer self._outputSpool = queue.Queue() # output buffer self._debugIO = False # Turn on/off debug logging self._maxPromptRetries = 10 # of times an input is retried def spoolOut(self, txt): """ Append to output buffer """ self._outputSpool.put(str(txt)) def getInputStr(self): """ Get command from input buffer """ return self._inputStr def setInputStr(self, iStr): """ Set the inputStr, replacing whatever was there before """ self._inputStr = str(iStr) def popOutSpool(self): """ Return string with entirety of outpool spool. Output spool is emptied """ data = "" while not self._outputSpool.empty(): data += self._outputSpool.get() return data def outputSpoolContains(self, str1): """ returns True if given string is in the output spool * since the queue doesn't have a way to peek at it, we end up emptying and replacing the queue. * This is inefficient, but since the use case is rare and the queue is always small, we don't care. Maybe someday we'll switch to a better queuing mechanism """ found = False if self._outputSpool.empty(): return False newQueue = queue.Queue() while not self._outputSpool.empty(): data = self._outputSpool.get() if re.search(str1, data): found = True newQueue.put(data) self._outputSpool = newQueue if found: return True return False def getMaxPromptRetries(self): return self._maxPromptRetries def promptForInput(self, promptStr, regex="", requirementsTxt=""): """ prompt for string input - return str or empty if none """ for x in range(1, self.getMaxPromptRetries()): # logger.debug("PromptForInput try " + str(x)) self.spoolOut(promptStr) oneStr = "" if self._sendAndReceive(): oneStr = self.getInputStr() else: logger.debug("S&R returned False") if oneStr == "" or not self.isRunning(): return "" elif regex == "" or re.search(regex, oneStr): return oneStr else: self.spoolOut(requirementsTxt) return "" # many of our menus are number driven, so use generic helper function. def promptForNumberInput( self, promptStr, maxNum=999999, minNum=0, requirementsTxt="" ): """ prompt for number input and return integer - -1 = failed """ if requirementsTxt == "": requirementsTxt = ( "Please select a number between " + str(minNum) + " and " + str(maxNum) + "\n" ) while True: self.spoolOut(promptStr) self._sendAndReceive() numStr = self.getInputStr() if re.match("^[0-9]+$", str(numStr)): if int(numStr) >= minNum and int(numStr) <= maxNum: return int(numStr) elif numStr == "": return -1 self.spoolOut(requirementsTxt) return -1 def promptForYN(self, promptStr): """ prompt for yes/no input - return True or False """ while True: self.spoolOut(promptStr + " [y/N]: ") self._sendAndReceive() oneStr = self.getInputStr() if oneStr == "y" or oneStr == "Y": return True if oneStr == "n" or oneStr == "N": return False return False def promptForCommand(self, promptStr=""): """ Prompt user for command """ if promptStr == "": promptStr = self.getCmdPrompt() self.spoolOut(promptStr) if self._sendAndReceive(): return True return False def getCmdPrompt(self): """ Default command prompt - expected to be overridden """ return "Input : " def broadcast(self, data, who="all", header=None): """ spool broadcast message * This is the simple terminal I/O version of the method * Originally written for the server, which has more complex functionality (i.e. broadcast to specific targets), but we wanted this to overrideable so that it can be used outside of the server (i.e. so things don't break when testing without sockets) """ if data[-1] != "\n": # Add newline if needed data += "\n" if not header: header = "--- broadcast to " + str(who) + ": " + str(data) self.spoolOut(header + data) return True def welcome(self, welcomeMsg=""): """ Welcome banner """ self.spoolOut(welcomeMsg) # pyfiglet_spec = importlib.util.find_spec("pyfiglet") # # if welcomeMsg == '': # self.spoolOut(self.txtBanner("Welcome") + "\n") # else: # self.spoolOut(welcomeMsg) # # if pyfiglet_spec: # welcomeMsg = pyfiglet.figlet_format(welcomeMsg, # noqa: F821 # font="slant") return None def txtBanner(self, msg, bChar="-"): """ return a string containing a banner. Default is like this: ----- mymessage ----- """ return "{0} {1} {0}".format(self.txtLine(lineChar=bChar, lineSize=5), msg) def txtLine(self, lineChar="-", lineSize=80): """ return a string containing a line line size and character are customizable Default is like this: ---------------------------------------------------------------- """ return lineChar * lineSize class LocalIo(Spooler): """ Superclass for client I/O spooling * _sendAndReceive is server specific. Keep it separate so we can replace it when running tests""" def __init__(self): super().__init__() def _sendAndReceive(self): """ Send data as output and recieve input This is the simple "terminal" case for send/receive, but it can be overwritten for client/server or for automated testing """ clientdata = "" dataToSend = self.popOutSpool() # get data from spool print(dataToSend, end="") # show data try: clientdata = input("") # accept input except ( KeyboardInterrupt, TypeError, AttributeError, KeyError, RecursionError, NameError, ): sys.exit(1) self.setInputStr(clientdata) # store input return True class TestIo(Spooler): """ Superclass for client I/O spooling * _sendAndReceive is server specific. Keep it separate so we can replace it when running tests""" def __init__(self): super().__init__() self._inputCmds = [] self._cmdCounter = 0 self._outputStr = "" def setInputs(self, inputList): self._inputCmds.append(inputList) def getOutput(self): return self._outputStr def _sendAndReceive(self): """ Send data as output and recieve input This is the simple "terminal" case for send/receive, but it can be overwritten for client/server or for automated testing """ # Simulate output # pop output off the spool and storing it where we can retrieve it self._outputStr = self.popOutSpool() logger.info("testIo.sr output = " + self._outputStr) # Simulate input by using the next unused input command in _inputCmds if self._cmdCounter < len(self._inputCmds): cmd = self._inputCmds[self._cmdCounter] else: cmd = "exit" self._cmdCounter += 1 self.setInputStr(cmd) # store input logger.info("testIo.sr input = " + self.cmd) return True class ServerIo(Spooler): def _sendAndReceive(self): # noqa: C901 """ All client Input and output function go through here * Override IOspool for client/server communication * send and recieve is connected in a single transaction * Data to be sent comes from the outputSpool queue * Data Recieveed goed into the inputStr var """ clientdata = "" dataToSend = self.popOutSpool() if self.socket: try: # send the data if self._debugServer: logger.debug(str(self) + " SENDING:\n" + dataToSend) self.socket.sendall(str.encode(dataToSend)) if self._debugServer: logger.debug(str(self) + " SEND: Data Sent") except (ConnectionResetError, ConnectionAbortedError): self.terminateClientConnection() return False except IOError: pass try: if self._debugServer: logger.debug(str(self) + " REC: Waiting for input") clientdata = self.socket.recv(common.globals.BYTES_TO_TRANSFER) if self._debugServer: logger.debug(str(self) + " REC: " + str(clientdata.decode("utf-8"))) except (ConnectionResetError, ConnectionAbortedError): self.terminateClientConnection() return False except IOError: pass else: logger.debug(str(self) + " No socket to receive input from") return False if clientdata: clientdata = str(clientdata.decode("utf-8")) if clientdata == common.globals.NOOP_STR: # empty sends clientdata = "" if self._debugServer: logger.debug("Server received NO_OP from client") elif clientdata == common.globals.TERM_STR: # client shut down if self._debugServer: logger.debug("Server received TERM_STR from client") self.terminateClientConnection() return False elif clientdata == common.globals.STOP_STR: # server shut down if self._debugServer: logger.debug("Server received STOP_STR from client") self.terminateClientConnection() raise Terminator return False self.setInputStr(clientdata) else: logger.debug(str(self) + " No clientdata returned") return False return True ``` #### File: sog/sog/game.py ```python import cmd from datetime import datetime import pprint import random import re from combat import Combat from common.ipc import Ipc from common.general import isIntStr, dateStr, logger, dLog from common.general import splitTargets, targetSearch, itemSort from common.general import getRandomItemFromList, secsSinceDate, getNeverDate from common.globals import maxCreaturesInRoom from common.help import enterHelp from creature import Creature from magic import Spell, SpellList, spellCanTargetSelf from object import ObjectFactory, isObjectFactoryType from room import RoomFactory, isRoomFactoryType, getRoomTypeFromFile class _Game(cmd.Cmd, Combat, Ipc): """ Single instance of the Game class, shared by all users (see instanciation magic at the bottom of the file)""" _instanceDebug = False def __init__(self): """ game-wide attributes """ self.instance = "Instance at %d" % self.__hash__() self._activeRooms = [] self._activePlayers = [] self._startdate = datetime.now() self._asyncThread = None self._instanceDebug = _Game._instanceDebug return None def debug(self): return pprint.pformat(vars(self)) def toggleInstanceDebug(self): self._instanceDebug = not self._instanceDebug def getInstanceDebug(self): return self._instanceDebug def getId(self): return self.instance def isValid(self): if self.getId() != "" and self._startdate < datetime.now(): return True return False def asyncTasks(self): """ Tasks that run in a separate thread with ~1 sec intervals """ self.asyncNonPlayerActions() self.asyncCharacterActions() def processDeadClients(self): True def joinGame(self, client): """ Perform required actions related to joining the game """ charObj = client.charObj if not charObj: logger.warn("Game: Character not defined - returning False") return False gameCmd = GameCmd(client) # each user gets their own cmd shell self.addToActivePlayerList(charObj) # in-game broadcast announcing game entry msg = self.txtBanner( "{} has entered the game at {}".format(charObj.getName(), dateStr("now")), bChar="=") self.gameMsg(msg + "\n") logger.info("JOINED GAME " + charObj.getId()) # add room to charObj and then display the room if self.joinRoom(1, charObj): self.charMsg(charObj, charObj.getRoom().display(charObj)) try: gameCmd.cmdloop() # start the game cmdloop finally: if client.charObj: self.leaveGame(client.charObj) return False def leaveGame(self, charObj, saveChar=True): """ Handle details of leaving a game """ self.leaveRoom(charObj) # remove character from game character list self.removeFromActivePlayerList(charObj) # final character save before throwing away charObj if saveChar: # saveChar is False when it's a suicide try: charObj.save(logStr=__class__.__name__) except AttributeError: logger.warning("Could not save character") # notification and logging msg = self.txtBanner( "{} has left the game at {}".format(charObj.getName(), dateStr("now")), bChar="=") self.gameMsg(msg + "\n") if charObj.client: charObj.client.spoolOut(msg + "\n") logger.info("LEFT GAME " + charObj.getId()) # Discard charObj if charObj.client: charObj.client.charObj = None charObj = None return True def getCharacterList(self): return self._activePlayers def addToActivePlayerList(self, charObj): """ add character to list of characters in game """ if charObj not in self.getCharacterList(): self._activePlayers.append(charObj) def removeFromActivePlayerList(self, charObj): """ remove character from list of characters in game """ if charObj in self.getCharacterList(): self._activePlayers.remove(charObj) def getActiveRoomList(self): return self._activeRooms def addToActiveRooms(self, roomObj): """ Add room to active room list """ if roomObj not in self.getActiveRoomList(): self._activeRooms.append(roomObj) return True def removeFromActiveRooms(self, roomObj): """ Remove room from active room list """ if self.isActiveRoom(roomObj): self._activeRooms.remove(roomObj) return True def isActiveRoom(self, roomObj): """ Return true if room is in active room list """ if roomObj in self.getActiveRoomList(): return True return False def getActiveRoom(self, num): """ Return the roomObj for an active room, given the room number """ for roomObj in self.getActiveRoomList(): if roomObj.getId() == num: return roomObj return None def activeRoomInfo(self): msg = "Active rooms: " + ", ".join( [x.getItemId() + "(" + str(x) + ")" for x in self.getActiveRoomList()] ) return msg def deActivateEmptyRoom(self, roomObj): """ deactiveates room if empty. Returns true if deactiveated """ if len(roomObj.getCharacterList()) == 0: self.removeFromActiveRooms(roomObj) return True return False def asyncCharacterActions(self): """ asyncronous actions that occur to players. """ for charObj in self.getCharacterList(): self.timeoutInactivePlayer(charObj) charObj.processPoisonAndRegen() def timeoutInactivePlayer(self, charObj, timeoutInSecs=300): """ kick character out of game if they have been inactive """ removeCharFromGame = False timeOutTxt = "You have timed out due to inactivity\n" if charObj.getInputDate() == getNeverDate(): # Ignore the timeout check if the input date has not been set yet # This is a timing issue in that the first run of the async loop # runs before the character is fully initialized with an input date. return(False) if secsSinceDate(charObj.getInputDate()) > timeoutInSecs: removeCharFromGame = True if not charObj.client.is_alive(): removeCharFromGame = True if removeCharFromGame: self.charMsg(charObj, timeOutTxt) logger.info("GAME TIMEOUT {}".format(charObj.getId())) self.leaveGame(charObj, saveChar=True) return(True) return(False) def asyncNonPlayerActions(self): """ asyncronous actions that are not tied to a player. """ for roomObj in self.getActiveRoomList(): if self.deActivateEmptyRoom(roomObj): continue self.creatureEncounter(roomObj) self.creaturesAttack(roomObj) return None def roomLoader(self, roomStr): """ returns a roomObj, given a roomStr """ logPrefix = "game.roomLoader (" + str(roomStr) + ") " roomObj = None roomType = "room" roomNum = 0 roomStr = str(roomStr) if isIntStr(roomStr): roomNum = int(roomStr) roomType = getRoomTypeFromFile(roomNum) elif "/" in roomStr: # if it's not a number, assume it's in the form: Room/35 roomType, roomNum = roomStr.split("/") if isIntStr(roomNum): roomNum = int(roomNum) if roomNum == 0: logger.error(logPrefix + "Room number is 0") return None else: logger.error(logPrefix + "Room number is invalid") return None # See if room is already active for oneroom in self.getActiveRoomList(): if oneroom.getRoomNum() == roomNum: # if the room alread exists roomObj = oneroom # use existing roomObj if not roomObj: roomObj = RoomFactory(roomType, roomNum) # instanciate room object roomObj.load(logStr=__class__.__name__) # load room from disk if roomObj is None: logger.error(logPrefix + "Room object is None") return roomObj # end roomLoader def joinRoom(self, roomThing, charObj): """ insert player into a room * can accept room number or roomObj * create or join room instance * add character to room instance * add room to character instance * add room to active rooms list * close spring loaded doors if room is empty # roomStr can be a room number or can be in the form Shop/35 """ roomObj = None if isinstance(roomThing, int) or isinstance(roomThing, str): roomObj = self.roomLoader(roomThing) elif isRoomFactoryType(roomThing.getType()): roomObj = roomThing if not roomObj: logger.error("joinRoom: Could not get roomObj") return False existingRoom = charObj.getRoom() if existingRoom: if existingRoom == roomObj: # if already in desired room return True # do nothing else: self.leaveRoom(charObj) # leave the previous room charObj.setRoom(roomObj) # Add room to character roomObj.addCharacter(charObj) # Add character to room self.addToActiveRooms(roomObj) # Add room to active room list return True def leaveRoom(self, charObj): """ Handle details of leaving a room * Remove room from active rooms list if it's empty * remove character from room instance * remove room from character instance * toDo - check if other players/creatures follow * toDo - notify others that character has left the room * toDo - stop/reassign attackers """ if not charObj: return False if not charObj.getRoom(): # There is no previous room, so just return return True if charObj.getRoom().getId() == 0: # Not a real room - just loaded? return True charObj.getRoom().removeCharacter(charObj) # remove charact from room # if room's character list is empty, remove room from activeRoomList if len(charObj.getRoom().getCharacterList()) == 0: self.removeFromActiveRooms(charObj.getRoom()) charObj.getRoom().removeNonPermanents(removeTmpPermFlag=False) charObj.getRoom().save() charObj.removeRoom() # Remove room from character return True def calculateObjectPrice(self, charObj, obj): """ return adjusted price for an object based on many factors """ if obj.isCursed(): return 1 price = obj.getValue() price = obj.adjustPrice(price) # object adjustment price = charObj.getRoom().adjustPrice(price) # room adjustment price = charObj.adjustPrice(price) # char adjust return price def getCorrespondingRoomObj(self, doorObj, activeOnly=False): """ Get the room object that correcponds to a door """ roomObj = self.getActiveRoom(doorObj.getToWhere()) if not roomObj: # If active room doesn't exist if not activeOnly: # Load room from disk into separate instance roomObj = self.roomLoader(doorObj.getToWhere()) else: roomObj = None return roomObj def modifyCorrespondingDoor(self, doorObj, charObj): """ When a door is opened/closed on one side, the corresponing door needs to be updated """ roomObj = self.getCorrespondingRoomObj(doorObj) if roomObj: for obj in roomObj.getInventory(): if obj.getId() == doorObj.getCorresspondingDoorId(): if doorObj.isClosed(): obj.close(charObj) else: obj.open(charObj) if doorObj.isLocked(): obj.lock() else: obj.unlock() roomObj.save() return True return True def buyTransaction( self, charObj, obj, price, prompt, successTxt="Ok.", abortTxt="Ok." ): """ buy an item """ roomObj = charObj.getRoom() if charObj.client.promptForYN(prompt): charObj.subtractCoins(price) # tax included charObj.addToInventory(obj) # add item if roomObj.getType() == "Shop": roomObj.recordTransaction(obj) # update stats roomObj.recordTransaction("sale/" + str(price)) charObj.recordTax(roomObj.getTaxAmount(price)) self.charMsg(charObj, successTxt) logger.info( "PURCHASE " + charObj.getId() + " bought " + obj.describe() + " for " + str(price) ) return True else: self.charMsg(charObj, abortTxt) return False def sellTransaction( self, charObj, obj, price, prompt, successTxt="Ok.", abortTxt="Ok." ): """ sell an item """ roomObj = charObj.getRoom() if charObj.client.promptForYN(prompt): charObj.removeFromInventory(obj) # remove item charObj.addCoins(price) # tax included if roomObj.getType() == "Shop": roomObj.recordTransaction(obj) # update stats roomObj.recordTransaction("purchase/" + str(price)) charObj.recordTax(roomObj.getTaxAmount(price)) self.charMsg(charObj, successTxt) logger.info( "SALE " + charObj.getId() + " sold " + obj.describe() + " for " + str(price) ) return True else: self.charMsg(charObj, abortTxt) return False def populateRoomCreatureCache(self, roomObj): """ Create a creature cache, so that we don't have to load the creatures every time we check for encounters. These creatures are never actually encountered. They just exist for reference """ debugPrefix = "game.populateRoomCreatureCache (" + str(roomObj.getId()) + "): " if len(roomObj.getCreatureCache()) == 0: dLog(debugPrefix + "Populating room creature cache", self._instanceDebug) # loop through all possible creatures for room and fill cache for ccNum in roomObj.getEncounterList(): ccObj = Creature(ccNum) ccObj.load() roomObj.creatureCachePush(ccObj) dLog(debugPrefix + "Cached " + ccObj.describe(), self._instanceDebug) def getEligibleCreatureList(self, roomObj): """ Determine which creatures, from the cache, can be encountered, by comparing their frequency attribute to a random roll. Fill a eligibleCreatureList with possible creatures for encounter. """ debugPrefix = "game.getEligibleCreatureList (" + str(roomObj.getId()) + "): " eligibleCreatureList = [] for ccObj in roomObj.getCreatureCache(): if ccObj.getFrequency() >= random.randint(1, 100): # Load creature to be encountered cObj = Creature(ccObj.getId()) cObj.load() eligibleCreatureList.append(cObj) dLog( debugPrefix + cObj.describe() + " is eligible", self._instanceDebug ) return eligibleCreatureList def creatureEncounter(self, roomObj): """ As an encounter, add creature to room Chance based on * room encounter rates and encounter list * creature frequency """ debugPrefix = "Game creatureEncounter (" + str(roomObj.getId()) + "): " if not roomObj.readyForEncounter(): # dLog(debugPrefix + 'Room not ready for encounter', # self._instanceDebug) return False if len(roomObj.getInventoryByType("Creature")) >= maxCreaturesInRoom: self.roomMsg( roomObj, "Others arrive, but wander off.\n", allowDupMsgs=False ) return False self.populateRoomCreatureCache(roomObj) eligibleCreatureList = self.getEligibleCreatureList(roomObj) creatureObj = getRandomItemFromList(eligibleCreatureList) if creatureObj: roomObj.addToInventory(creatureObj) dLog( debugPrefix + str(creatureObj.describe()) + " added to room", self._instanceDebug, ) self.roomMsg(roomObj, creatureObj.describe() + " has arrived\n") creatureObj.setEnterRoomTime() roomObj.setLastEncounter() return None def removeFromPlayerInventory(self, charObj, item, msg=""): """ display message and remove item from player's inventory * Has some canned responses, such as "disintegrate" """ if msg == "disint": msg = item.describe(article="The") + " disintegrates" if msg != "": self.charMsg(charObj, msg + "\n") # Remove item from player's inventory charObj.removeFromInventory(item) return None def txtBanner(self, msg, bChar="-"): """ return a string containing a banner. Default is like this: ----- mymessage ----- """ return "{0} {1} {0}".format(self.txtLine(lineChar=bChar, lineSize=5), msg) def txtLine(self, lineChar="-", lineSize=80): """ return a string containing a line line size and character are customizable Default is like this: ---------------------------------------------------------------- """ return lineChar * lineSize class GameCmd(cmd.Cmd): """ Game loop - separate one for each player * Uses cmd loop with do_<action> methods * if do_ methods return True, then loop exits """ def __init__(self, client=None): self.client = client if client: self.acctObj = client.acctObj self.gameObj = client.gameObj self.charObj = client.charObj else: self.acctObj = None self.gameObj = None self.charObj = None self._lastinput = "" self._instanceDebug = False def toggleInstanceDebug(self): self._instanceDebug = not self._instanceDebug def setInstanceDebug(self, val): self._instanceDebug = bool(val) def getInstanceDebug(self): return self._instanceDebug def getCmdPrompt(self): sp = "<" ep = ">" if self.charObj: promptsize = self.charObj.getPromptSize() else: promptsize = "full" if promptsize == "brief": promptStr = ep + " " else: promptStr = sp + "game" + ep + " " return promptStr def cmdloop(self): """ cmd method override - Game loop requires player to have character loaded """ stop = False line = "" self.preloop() while not stop: if self.client.promptForCommand(self.getCmdPrompt()): # send/recv line = self.client.getInputStr() stop = self.runcmd(line) else: stop = True self.postloop() def runcmd(self, cmd): """ workhorse of cmdloop * runcmd extracted from cmdloop so that tests can call it without prompting for input """ self._lastinput = cmd dLog("GAME cmd = " + cmd, self._instanceDebug) if self.precmd() == "stop": return True stop = self.onecmd(cmd) if self.postcmd(cmd) == "stop": return True return stop def preloop(self): """ functionality that get run once before the input loop begins """ # Set the input date when first entering the game. Required for timeout # to work properly on characters that never input a command. self.charObj.setInputDate() def precmd(self): """ cmd method override """ # If charater has timed out or been booted from the game # terminate the command loop. if self.charObj not in self.gameObj.getCharacterList(): return("stop") self.charObj.setInputDate() if self.lastcmd != "": self.charObj.setLastCmd(self.lastcmd) return(False) def postcmd(self, line): """ cmd method override """ if self.charObj: # doesn't exist if there is a suicide self.charObj.save(logStr=__class__.__name__) return(False) def emptyline(self): """ cmd method override """ return False def default(self, line): """ cmd method override """ logger.warn("*** Invalid game command: %s\n" % line) self.charObj.client.spoolOut("Invalid Command\n") def getLastCmd(self): """ Returns the first part of the last command """ return self.lastcmd.split(" ", 1)[0] def missingArgFailure(self): """ Print missing arg message and return False """ self.selfMsg(self.getLastCmd() + " what?\n") return False def getObjFromCmd(self, itemList, cmdline): """ Returns a list of target Items, given the full cmdargs """ targetItems = [] for target in splitTargets(cmdline): obj = targetSearch(itemList, target) if obj: targetItems.append(obj) targetItems += [None] * 2 # Add two None items to the list return targetItems def getCombatTarget(self, line): """ All combat commands need to determine the target """ charObj = self.charObj roomObj = charObj.getRoom() creatureList = roomObj.getInventoryByType("Creature") targetList = self.getObjFromCmd(creatureList, line) target = targetList[0] if not target: # Re-use old target if it still exists lastTarget = charObj.getCurrentlyAttacking() if lastTarget: if lastTarget in creatureList: target = lastTarget if not target: if line == "": self.selfMsg("No target.\n") else: self.selfMsg(line + " is not a valid target.\n") return None return target def parseSpellArgs(self, line): charObj = self.charObj roomObj = charObj.getRoom() charObjList = charObj.getInventory() roomInv = roomObj.getCharsAndInventory() targetList = self.getObjFromCmd(charObjList + roomInv, line) spellItem = None spellName = "" targetObj = None if self.getLastCmd() == "cast": # When casting a spell, there is no spellItem, so the first item # in the list is the target if len(targetList) >= 1: targetObj = targetList[0] spellName = line.split(" ", 1)[0] else: # When using a magic item, the first magic item encountered is the # spellItem and the next, if any, is the target for target in targetList: if not target: continue if not target.isMagicItem(): continue if not spellItem: spellItem = target if not targetObj: targetObj = target break if spellItem: spellName = spellItem.getSpellName() if spellName != "": if not targetObj and spellCanTargetSelf(spellName): targetObj = charObj return (spellItem, spellName, targetObj) def getFollowerList(self, charObj, roomObj=None): """ Returns list of characters from room that are following character """ followerList = [] if not roomObj: roomObj = charObj.getRoom() for onechar in roomObj.getCharacterList(): if onechar is charObj: # ignore self continue if onechar.getFollow() is charObj: followerList.append(onechar) return followerList def selfMsg(self, msg): """ send message using Game communnication. This simply allows us to call it without passing the extra arg) """ return self.gameObj.charMsg(self.charObj, msg) def othersMsg(self, roomObj, msg, ignore): """ send message using Game communnication. This simply allows us to call it without passing the extra arg) """ return self.gameObj.othersInRoomMsg(self.charObj, roomObj, msg, ignore) def moveDirection(self, charObj, direction): """ move subcommand - move in one of the the basic directions """ dLog("GAME move dir = " + direction, self._instanceDebug) exitDict = charObj.getRoom().getExits() if direction not in exitDict.keys(): self.selfMsg("You can't move in that direction!\n") return False destRoomNum = exitDict[direction] roomObj = self.gameObj.roomLoader(destRoomNum) if not roomObj: logger.error("Could not create roomObj " + str(destRoomNum) + ".") return False if not roomObj.canBeJoined(charObj): logger.error(roomObj.getId() + " can not be joined.") return False if self.gameObj.joinRoom(roomObj, charObj): return True else: logger.error("joinRoom Failed\n") return False return False def moveThroughPortalOrDoor(self, charObj, itemObj): """ move subcommand - move through door or portal """ if not itemObj: # no object - take no action self.selfMsg("That is not somewhere you can go!\n") return False if not itemObj.canBeEntered(charObj): self.selfMsg("You can't go there!\n") return False if itemObj.hasToll(): toll = itemObj.getToll() if charObj.canAffordAmount(toll): charObj.subtractCoins(toll) self.selfMsg("You paid a toll of {} coins.".format(toll)) else: self.selfMsg("Opening this item requires more coins than you have\n") return False dLog( "GAME move through obj = {}".format(itemObj.describe()), self._instanceDebug ) roomnum = itemObj.getToWhere() roomObj = self.gameObj.roomLoader(roomnum) if roomObj: if roomObj.canBeJoined(charObj): if self.gameObj.joinRoom(roomnum, charObj): return True else: logger.error("joinRoom Failed\n") else: logger.error(roomnum + " can not be joined") else: logger.error("Could not create roomObj " + roomnum) return False def move(self, line): """ move character from one room to another """ cmdargs = line.split(" ") charObj = self.charObj moved = False currentRoom = charObj.getRoom() oldRoom = charObj.getRoom() if currentRoom.isDirection(cmdargs[0]): # if command is a direction moved = self.moveDirection(charObj, cmdargs[0]) # Folks in the old room should see the player leave, unless hidden msg = "{} went {}\n".format( charObj.getName(), currentRoom.directionNameDict[cmdargs[0]]) self.othersMsg(oldRoom, msg, charObj.isHidden()) else: # handle doors and Portals itemList = self.getObjFromCmd(currentRoom.getInventory(), line) moved = self.moveThroughPortalOrDoor(charObj, itemList[0]) currentRoom = charObj.getRoom() arrivedMsg = "{} has arrived\n" if moved: # creatures in old room should stop attacking player self.gameObj.unAttack(oldRoom, charObj) # character possibly loses hidden charObj.possibilyLoseHiddenWhenMoving() self.selfMsg(charObj.getRoom().display(charObj)) # Folks in the new room should see the player arrive, unless hidden msg = arrivedMsg.format(charObj.getName()) self.othersMsg(currentRoom, msg, charObj.isHidden()) # Handle followers that are moving along with primary character for onechar in self.getFollowerList(charObj, oldRoom): if onechar.continuesToFollow(charObj): self.gameObj.joinRoom(currentRoom, onechar) self.gameObj.charMsg(onechar, onechar.getRoom().display(onechar)) msg = arrivedMsg.format(onechar.getName()) self.gameObj.othersInRoomMsg( onechar, currentRoom, msg, charObj.isHidden()) else: # Follower loses sight of leader and is no longer following onechar.setFollow() return True else: self.selfMsg("You can not go there!\n") return False def useObject(self, obj, line): """ Call method for using object, based on it's type/attributes """ if not obj: logger.error("game.useObject: Could not use a non-existent obj") return False if not isObjectFactoryType(obj.getType()): logger.error("game.useObject: Could not use a non-obj obj") return False if not obj.isUsable(): self.selfMsg(obj.describe(article="The") + "is not usable\n") if obj.isEquippable(): if self.charObj.equip(obj): self.selfMsg("Ok\n") else: self.selfMsg("You can't equip that\n") elif obj.isMagicItem(): self.useMagicItem(line) def useMagicItem(self, line): if line == "": return self.missingArgFailure() (spellItem, spellName, targetObj) = self.parseSpellArgs(line) if not spellItem: return self.missingArgFailure() if not targetObj: self.selfMsg("Invalid target for spell." + spellName + "\n") return False if spellItem.getType().lower() == "scroll": spellItem.readScroll(self.charObj, targetObj) # Note: A read scroll will already display the distintegrates # message via the item's cast method. Don't add it here. self.gameObj.removeFromPlayerInventory(self.charObj, spellItem) else: spellItem.cast(self.charObj, targetObj) return None def parseIpc(self, line): roomObj = self.charObj.getRoom() lastCmd = self.getLastCmd() target = None msg = "" # Get recipient, if any possibleRecipients = [] if lastCmd == "whisper": possibleRecipients = roomObj.getCharacterList() elif lastCmd == "send": possibleRecipients = self.gameObj.getCharacterList() # elif lastCmd in ['say', 'yell', 'shout', 'broadcast']: # target = None if len(possibleRecipients) > 0: targetList = self.getObjFromCmd(possibleRecipients, line) if targetList[0]: target = targetList[0] if re.search("[^ ]+ [^ ]+", line): # todo: fix this if target is more than one word. # i.e. Player #1. junk, msg = line.split(" ", 1) if msg == "": msg = self.client.promptForInput(lastCmd + " what? ") return (target, msg) def do_accept(self, line): """ transaction - accept an offer """ self.selfMsg(line + " not implemented yet\n") def do_action(self, line): """ communication - fun in-room communication """ charObj = self.charObj roomObj = charObj.getRoom() if line == "": self.selfMsg("Usage: action <txt>\n") return False msg = charObj.getName() + " " + line self.gameObj.roomMsg(roomObj, msg + "\n") logger.info(msg) charObj.setHidden(False) def do_appeal(self, line): """ ask DMs for help """ self.selfMsg(line + " not implemented yet\n") def do_att(self, line): """ combat - alias """ return self.do_attack(line) def do_attack(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Attack what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_auction(self, line): """ alias - sell """ return self.do_sell(list) def do_backstab(self, line): """ combat """ # monster gets double damage on next attack target = self.getCombatTarget(line) if not target: self.selfMsg("Backstab what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_balance(self, line): """ info - view bank balance when in a bank """ charObj = self.charObj roomObj = charObj.getRoom() if not roomObj.getType() == "Shop": self.selfMsg("You can't do that here. Find a bank\n") return False if not roomObj.isBank(): self.selfMsg("You can't do that here. Find a bank.\n") return False amount = charObj.getBankBalance() self.selfMsg("Your account balance is " + str(amount) + " shillings.\n") def do_block(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Block what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_break(self, line): """ alias - smash """ return self.do_smash(line) def do_bribe(self, line): """ transaction - bribe a creature to vanish """ cmdargs = line.split(" ") charObj = self.charObj roomObj = charObj.getRoom() if len(cmdargs) < 2: self.selfMsg("Try 'bribe <creature> <amount>'\n") return False if not isIntStr(cmdargs[1]): self.selfMsg("How many shillings are you trying to bribe with?'\n") return False creatureName = cmdargs[0] coins = int(cmdargs[1]) roomCreatureList = roomObj.getCreatureList() itemList = self.getObjFromCmd(roomCreatureList, creatureName) if not itemList[0]: self.selfMsg("Who are you trying to bribe?\n") return False creatureObj = itemList[0] if creatureObj: if creatureObj.acceptsBribe(charObj, coins): # Bribe succeeds - money is already subtracted self.selfMsg( creatureObj.describe(article="The") + " accepts your offer and leaves\n" ) roomObj.removeFromInventory(creatureObj) return False else: # Bribe failed - contextual response already provided charObj.setHidden(False) return False def do_brief(self, line): """ set the prompt and room description to least verbosity """ self.charObj.setPromptSize("brief") def do_broadcast(self, line): """ communication - send to everyone in the game * players are limited to X broadcasts per day (currently 5) * log broadcasted messages, in case of abuse. """ if not self.charObj.getLimitedBroadcastCount(): self.selfMsg("You have used all of your broadcasts for today\n") return False if line == "": msg = self.client.promptForInput("Enter Input: ") else: msg = line if msg != "": fullmsg = self.charObj.getName() + " broadcasted, '" + msg + "'" if self.gameObj.gameMsg(fullmsg + "\n"): logger.info(fullmsg) self.charObj.reduceLimitedBroadcastCount() else: self.selfMsg("Message not received\n") def do_buy(self, line): """ transaction - buy something from a vendor """ cmdargs = line.split(" ") charObj = self.charObj roomObj = charObj.getRoom() if not roomObj.getType() == "Shop": self.selfMsg("You can't do that here. Find a vendor\n") return False if not roomObj.isVendor(): self.selfMsg("You can't do that here. Find a vendor\n") return False if len(cmdargs) < 1 or not isIntStr(cmdargs[0]): self.selfMsg("usage: buy <item> [#]\n") return False catList = roomObj.getCatalog() if int(cmdargs[0]) < 0 or int(cmdargs[0]) > (len(catList)) - 1: self.selfMsg("Bad item number. Aborted\n") return False catItem = catList[int(cmdargs[0])] oType, oNum = catItem.split("/") itemObj = ObjectFactory(oType, oNum) itemObj.load() price = self.gameObj.calculateObjectPrice(charObj, itemObj) # check if player has the funds if not charObj.canAffordAmount(price): self.selfMsg(roomObj.getCantAffordTxt()) return False # check if player can carry the Weight weight = itemObj.getWeight() if not charObj.canCarryAdditionalWeight(weight): self.selfMsg(roomObj.getCantCarryTxt(weight)) return False # prompt player for confirmation prompt = ( "You are about to spend " + str(price) + " shillings for " + itemObj.getArticle() + " " + itemObj.getName() + ". Proceed?" ) successTxt = roomObj.getSuccessTxt() abortTxt = roomObj.getAbortedTxt() self.gameObj.buyTransaction( charObj, itemObj, price, prompt, successTxt, abortTxt ) def do_cast(self, line): """ magic """ cmdargs = line.split(" ") charObj = self.charObj roomObj = charObj.getRoom() if len(cmdargs) < 1: self.selfMsg("Cast what spell?\n") spellName = cmdargs[0] line = line.lstrip(spellName) if spellName not in SpellList: self.selfMsg("That's not a valid spell.\n") return False if not charObj.knowsSpell(spellName): self.selfMsg("You haven't learned that spell.\n") return False if len(cmdargs) > 1: possibleTargets = charObj.getInventory() + roomObj.getCharsAndInventory() targetList = self.getObjFromCmd(possibleTargets, line) if targetList[0]: targetObj = targetList[0] else: self.selfMsg("Could not determine target for spell.\n") return False else: targetObj = self.charObj spellObj = Spell(charObj, targetObj, spellName) # Apply effects of spell spellObj.cast(roomObj) def do_catalog(self, line): """ info - get the catalog of items from a vendor """ charObj = self.charObj roomObj = charObj.getRoom() if not roomObj.getType() == "Shop": self.selfMsg("You can't do that here. Find a vendor\n") return False if not roomObj.isVendor(): self.selfMsg("You can't do that here. Find a vendor\n") return False # display # list by iterating, loading, & displaying objs itemBuf = "" for num, oneitem in enumerate(roomObj.getCatalog()): oType, oNum = oneitem.split("/") itemObj = ObjectFactory(oType, oNum) itemObj.load() # calculate price price = self.gameObj.calculateObjectPrice(charObj, itemObj) ROW_FORMAT = " ({0:2}) {1:<7} {2:<60}\n" itemBuf += ROW_FORMAT.format(num, price, itemObj.describe()) if itemBuf != "": self.selfMsg( "Catalog of items for sale\n" + ROW_FORMAT.format("#", "Price", "Description") + itemBuf ) def do_circle(self, line): """ combat - If creature is not attacking, then delay their first attack by X seconds """ target = self.getCombatTarget(line) if not target: self.selfMsg("Circle what?\n") return False if target.isAttacking(): self.selfMsg("You can't circle an attacking creature\n") return False self.gameObj.circle(self.charObj, target, self.getLastCmd()) self.selfMsg("Ok.\n") return False def do_climb(self, line): """ alias - go """ return self.do_go(line) def do_clock(self, line): """ info - time """ self.selfMsg(dateStr("now") + "\n") def do_close(self, line): """ close a door or container """ charObj = self.charObj roomObj = charObj.getRoom() itemList = self.getObjFromCmd(roomObj.getInventory(), line) if not itemList[0]: self.selfMsg("usage: close <item> [number]\n") return False targetObj = itemList[0] if not targetObj.isClosable(charObj): if targetObj.isClosed(): self.selfMsg("It's already closed.\n") else: self.selfMsg("You can not close that!\n") return False if targetObj.close(charObj): self.selfMsg("Ok\n") if targetObj.getType() == "Door": self.gameObj.modifyCorrespondingDoor(targetObj, charObj) return False else: self.selfMsg( "You can not close " + targetObj.describe(article="the") + "\n" ) return False def do_d(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_debug(self, line): """ dm - show raw debug info abot an item/room/character/etc """ cmdargs = line.split(" ") charObj = self.charObj roomObj = charObj.getRoom() if not charObj.isDm(): self.selfMsg("Unknown Command\n") return False if len(cmdargs) == 0: self.selfMsg("usage: debug <room | self | object>") return False buf = "" if cmdargs[0].lower() == "room": buf += "=== Debug Info for Room " + str(roomObj.getId()) + " ===\n" buf += roomObj.debug() + "\n" elif cmdargs[0].lower() == "game": buf += "=== Debug Info for game ===\n" buf += self.gameObj.debug() + "\n" elif cmdargs[0].lower() == "self": buf += "=== Debug Info for Self " + str(charObj.getId()) + " ===\n" buf += charObj.debug() + "\n" else: itemList = self.getObjFromCmd( roomObj.getCharsAndInventory() + charObj.getInventory(), line ) if itemList[0]: buf += ( "=== Debug Info for Object " + str(itemList[0].getId()) + " ===\n" ) buf += itemList[0].debug() + "\n" self.selfMsg(buf) return None def do_deposit(self, line): """ transaction - make a deposit in the bank """ cmdargs = line.split(" ") charObj = self.charObj roomObj = charObj.getRoom() if not roomObj.getType() == "Shop": self.selfMsg("You can't do that here. Find a bank\n") return False if not roomObj.isBank(): self.selfMsg("You can't do that here. Find a bank\n") return False if len(cmdargs) < 1 or not isIntStr(cmdargs[0]): self.selfMsg("usage: deposit <amount>\n") return False # check if player has the funds amount = int(cmdargs[0]) if not charObj.canAffordAmount(amount): self.selfMsg(roomObj.getCantAffordTxt(amount)) return False taxRate = roomObj.getTaxRate() bankfee, dAmount = charObj.calculateBankFees(amount, taxRate) prompt = ( "You are about to deposit " + str(amount) + " shillings into the bank.\n" ) if taxRate != 0: prompt += ( "The bank charges " + "a " + str(taxRate) + "% deposit fee which comes to a " + str(bankfee) + " shilling charge.\n" + "Your account will increase by " + str(dAmount) + " shillings.\n" ) prompt += "Continue?" if self.client.promptForYN(prompt): charObj.bankDeposit(amount, taxRate) roomObj.recordTransaction("deposit/" + str(dAmount)) roomObj.recordTransaction("fees/" + str(bankfee)) self.selfMsg(roomObj.getSuccessTxt()) return False else: self.selfMsg(roomObj.getAbortedTxt()) return False def do_destroy(self, line): """ dm - destroy an object or creature """ if not self.charObj.isDm(): self.selfMsg("Unknown Command\n") return False charObj = self.charObj roomObj = charObj.getRoom() roomObjList = self.getObjFromCmd(roomObj.getInventory(), line) if roomObjList[0]: roomObj.removeObject(roomObjList[0]) roomObj.save() self.selfMsg("ok\n") return False charObjList = self.getObjFromCmd(charObj.getInventory(), line) if charObjList[0]: roomObj.removeFromInventory(charObjList[0]) self.selfMsg("ok\n") return False def do_dminfo(self, line): """ dm - show char info that isn't directly avaliable to players """ if not self.charObj.isDm(): return False self.selfMsg(self.charObj.dmInfo()) def do_dm_on(self, line): """ admin - Turn DM mode on """ if self.acctObj.isAdmin(): self.charObj.setDm() self.selfMsg("ok\n") logger.info("{} just became a DM".format(self.charObj.getName())) def do_dm_off(self, line): """ dm - turn dm mode off """ if self.charObj.isDm(): self.charObj.removeDm() self.selfMsg("ok\n") else: self.selfMsg("Unknown Command\n") def do_down(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_draw(self, line): """ alias - use """ return self.do_use(line) def do_drink(self, line): """ alias - use """ return self.do_use(line) def do_drop(self, line): """ drop an item """ charObj = self.charObj roomObj = charObj.getRoom() charObjList = charObj.getInventory() targetList = self.getObjFromCmd(charObjList, line) if not targetList[0]: self.selfMsg("What are you trying to drop?\n") return False if charObj.removeFromInventory(targetList[0]): charObj.unEquip(targetList[0]) roomObj.addObject(targetList[0]) self.selfMsg("Ok\n") else: self.selfMsg("Didn't work\n") def do_e(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_east(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_echo(self, line): self.selfMsg(line + " not implemented yet\n") def do_enter(self, line): """ alias - go """ if line == "": return self.missingArgFailure() self.move(line) def do_equip(self, line): """ alias - use """ return self.do_use(line) def do_examine(self, line): """ alias - look """ return self.do_look(line) def do_exit(self, line): """ exit game - returns True to exit command loop """ return True def do_exp(self, line): self.selfMsg(self.charObj.expInfo()) def do_experience(self, line): """ info - show character's exp info """ self.selfMsg(self.charObj.expInfo()) def do_feint(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Feint at what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_file(self, line): """ info - show characters attached to account """ self.selfMsg(self.acctObj.showCharacterList()) def do_follow(self, line): """ follow another player - follower is moved when they move """ charObj = self.charObj roomObj = charObj.getRoom() charList = self.getObjFromCmd(roomObj.getCharacterList(), line) targetCharObj = charList[0] if not targetCharObj or not targetCharObj.getType() == "Character": self.selfMsg("You can't follow that\n") charObj.setFollow() # Unset follow attribute return False if targetCharObj is charObj: self.selfMsg("You can't follow yourself\n") charObj.setFollow() # Unset follow attribute return False charObj.setFollow(targetCharObj) self.selfMsg("ok\n") if not charObj.isHidden(): self.gameObj.charMsg(targetCharObj, "{} follows you\n".format(charObj.getName())) return(False) def do_full(self, line): """ set the prompt and room descriptions to maximum verbosity """ self.charObj.setPromptSize("full") def do_get(self, line): # noqa: C901 """ pick up an item """ charObj = self.charObj roomObj = charObj.getRoom() itemList = self.getObjFromCmd(roomObj.getInventory(), line) itemObj = itemList[0] containerObj = itemList[1] if not itemObj: return self.missingArgFailure() if itemObj.getType() == "Container": if containerObj: # Player is trying to put a container from the room into a # container in the room. Let's just say no to that self.selfMsg("You can't put a container in a container\n") return False else: # The 1st item was not found, so the container is the 1st item containerObj = itemObj if containerObj: if not containerObj.getType() == "Container": self.selfMsg("That's not a container?\n") return False # Find target item in the container cList = self.getObjFromCmd(containerObj.getInventory(), line) itemObj = cList[0] if not itemObj: self.selfMsg("Put what in there?\n") return False if not itemObj.isCarryable(): self.selfMsg(itemObj.describe() + " can not be carried.\n") return False if not charObj.canCarryAdditionalWeight(itemObj.getWeight()): self.selfMsg("You are not strong enough.\n") return False guardingCreatureObj = roomObj.getGuardingCreature() if guardingCreatureObj: self.selfMsg( guardingCreatureObj.describe() + " blocks you from taking that.\n" ) return False if containerObj: if containerObj.withdraw(charObj, itemObj): self.selfMsg("ok\n") else: # Get item from room roomObj.removeObject(itemObj) if itemObj.getType() == "Coins": charObj.addCoins(itemObj.getValue()) else: charObj.addToInventory(itemObj) self.selfMsg("Ok\n") def do_go(self, line): """ go through a door or portal """ if line == "": self.selfMsg("Go where?\n") self.move(line) def do_goto(self, line): """ dm - teleport directly to a room """ cmdargs = line.split(" ") charObj = self.charObj if not self.charObj.isDm(): self.selfMsg("Unknown Command\n") return False if len(cmdargs) == 0: self.selfMsg("usage: goto <room>\n") return False self.gameObj.joinRoom(cmdargs[0], charObj) self.selfMsg(charObj.getRoom().display(charObj)) def do_h(self, line): """ alias - health """ charObj = self.charObj self.selfMsg(charObj.healthInfo()) def do_hea(self, line): """ alias - health """ charObj = self.charObj self.selfMsg(charObj.healthInfo()) def do_health(self, line): """ info - show character's health """ charObj = self.charObj self.selfMsg(charObj.healthInfo()) def do_help(self, line): """ info - enter the help system """ enterHelp(self.client) def do_hide(self, line): """ attempt to hide player or item * hidden players aren't attacked by creatures and don't show up in room listings unless they are searched for. * hidden items don't show up in room listings. """ # cmdargs = line.split(' ') charObj = self.charObj if line == "": canhide = True # can't hide if there are engaged creatures in the room, even if # they are attacking someone else. for creatObj in charObj.getRoom().getCreatureList(): if creatObj.isAttacking(): canhide = False if canhide: charObj.attemptToHide() msg = "You hide in the shadows" else: msg = "You are noticed as you hide in the shadows" charObj.setHidden(False) if charObj.isDm(): msg += "(" + str(charObj.isHidden()) + ")" self.selfMsg(msg + "\n") else: self.selfMsg(line + " not implemented yet\n") def do_hint(self, line): self.selfMsg(line + " not implemented yet\n") def do_hit(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Hit what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_hold(self, line): """ alias - use """ return self.do_use(line) def do_identify(self, line): """ info - Show detailed information about a item or character * this is considered a limited use spell """ self.selfMsg(line + " not implemented yet\n") def do_info(self, line): """ alias - information """ self.selfMsg(self.charObj.getInfo()) def do_information(self, line): """ info - show all information about a character to that character """ self.selfMsg(self.charObj.getInfo()) def do_inv(self, line): """ alias - inventory """ self.selfMsg(self.charObj.inventoryInfo()) def do_inventory(self, line): """ info - show items that character is carrying """ self.selfMsg(self.charObj.inventoryInfo()) def do_kill(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Kill what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_laugh(self, line): """ communication - reaction """ charObj = self.charObj roomObj = charObj.getRoom() extramsg = "" if line != "": extramsg = " " + line self.gameObj.roomMsg(roomObj, charObj.getName() + " laughs" + extramsg + "\n") charObj.setHidden(False) def do_list(self, line): """ alias - file """ return self.do_catalog(line) def do_lock(self, line): """ lock an object with a key """ charObj = self.charObj roomObj = charObj.getRoom() roomObjList = roomObj.getInventory() fullObjList = charObj.getInventory() + roomObjList itemList = self.getObjFromCmd(fullObjList, line) itemObj = itemList[0] keyObj = itemList[1] if not itemList[0]: return self.missingArgFailure() if not keyObj: self.selfMsg("You can't lock anything without a key\n") return False if not itemObj.isLockable(): if itemObj.isLocked(): self.selfMsg("It's already locked!\n") elif itemObj.isOpen(): self.selfMsg("You can't lock it when it's open!\n") else: self.selfMsg("This is not lockable!\n") return False if keyObj.getLockId() != itemObj.getLockId(): self.selfMsg("The key doesn't fit the lock\n") return False itemObj.lock() if itemObj.getType() == "Door": self.gameObj.modifyCorrespondingDoor(itemObj, charObj) self.selfMsg("Ok\n") return False def do_look(self, line): """ examine a creature, object, or player * includes items in both the room and in the character inventory """ roomObj = self.charObj.getRoom() # Experimenting with sorting. Not sure if we want this, so we have a # Flag for now sortList = False if sortList: allItems = itemSort(roomObj.getCharsAndInventory()) + itemSort( self.charObj.getInventory() ) else: allItems = roomObj.getCharsAndInventory() + self.charObj.getInventory() itemList = self.getObjFromCmd(allItems, line) if line == "": # display the room msg = roomObj.display(self.charObj) if not re.search("\n$", msg): msg += "\n" self.selfMsg(msg) return False if not itemList[0]: self.selfMsg("You must be blind because you " + "don't see that here\n") return False msg = itemList[0].examine() if not re.search("\n$", msg): msg += "\n" # append newline if needed self.selfMsg(msg) # display the object return False def do_lose(self, line): """ attempt to ditch someone that is following you """ roomObj = self.charObj.getRoom() charList = self.getObjFromCmd(roomObj.getCharacterList(), line) targetCharObj = charList[0] if not targetCharObj: self.selfMsg("You can't lose that\n") return False # Need to determine if lose succeeds, based on odds targetCharObj.setFollow(None) self.selfMsg("ok\n") # Notify target that they have been lost self.gameObj.charMsg(targetCharObj, "{} loses you".format(self.charObj.getName())) return False def do_lunge(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Lunge at what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_n(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_north(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_now(self, line): """ alias - clock """ return self.do_clock() def do_o(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_offer(self, line): """ transaction - offer player money/items [in return for $/items] """ self.selfMsg(self.getLastCmd() + " not implemented yet\n") def do_open(self, line): """ Open a door or a chest """ charObj = self.charObj roomObj = charObj.getRoom() itemList = self.getObjFromCmd(roomObj.getInventory(), line) if not itemList[0]: return self.missingArgFailure() itemObj = itemList[0] if not itemObj.isOpenable(charObj): if itemObj.isOpen(): self.selfMsg("It's already open.\n") elif itemObj.isLocked(): self.selfMsg("You can't. It's locked.\n") else: self.selfMsg("You can't open that.\n") return False if itemObj.getType() == "Container": if itemObj.hasToll(): toll = itemObj.getToll() if charObj.canAffordAmount(toll): charObj.subtractCoins(toll) self.selfMsg("You paid a toll of {} coins.".format(toll)) else: self.selfMsg( "Opening this item requires more coins than you have\n" ) return False if itemObj.open(charObj): self.selfMsg("You open it.\n") self.othersMsg( roomObj, charObj.getName() + " opens the " + itemObj.getSingular() + "\n", charObj.isHidden(), ) if itemObj.getType() == "Door": self.gameObj.modifyCorrespondingDoor(itemObj, charObj) return False else: self.selfMsg("You fail to open the door.\n") return False def do_out(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_panic(self, line): """ alias - run """ self.selfMsg(line + " not implemented yet\n") def do_parley(self, line): """ communication - talk to a npc """ charObj = self.charObj roomObj = charObj.getRoom() roomCreatureList = roomObj.getCreatureList() itemList = self.getObjFromCmd(roomCreatureList, line) if not itemList[0]: self.selfMsg(self.getLastCmd() + " with whom?\n") return False creat1 = itemList[0] msg = creat1.getParleyTxt() + "\n" if creat1.getParleyAction().lower() == "teleport": self.selfMsg(msg) self.gameObj.joinRoom(creat1.getParleyTeleportRoomNum(), charObj) elif creat1.getParleyAction().lower() == "sell": saleItem = creat1.getParleySaleItem() if saleItem: price = int(saleItem.getValue() * 0.9) prompt = ( msg + " Would you like to buy " + saleItem.describe() + " for " + price + "?" ) successTxt = ( "It's all yours. Don't tell anyone " + "that you got it from me" ) abortTxt = "Another time, perhaps." self.gameObj.buyTransaction( charObj, saleItem, price, prompt, successTxt, abortTxt ) else: self.selfMsg("I have nothing to sell.\n") else: self.selfMsg(msg) charObj.setHidden(False) def do_parry(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Parry at what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_pawn(self, line): """ alias - sell """ return self.do_sell(list) def do_picklock(self, line): """ attempt to pick the lock on a door or container and open it """ charObj = self.charObj roomObj = charObj.getRoom() itemList = self.getObjFromCmd(roomObj.getInventory(), line) if not itemList[0]: self.selfMsg("pick which item with a lock?\n") return False itemObj = itemList[0] if not itemObj.isPickable(): self.selfMsg("You can't pick that.\n") return False if itemObj.pick(charObj): self.selfMsg("You pick the lock.\n") self.othersMsg( roomObj, charObj.getName() + " picks the " + "lock on the " + itemObj.getSingular() + "\n", charObj.isHidden(), ) return False else: self.selfMsg("You fail to pick the lock.\n") self.othersMsg( roomObj, charObj.getName() + " fails to pick the lock on the " + itemObj.getSingular() + "\n", charObj.isHidden(), ) return False return False def do_prompt(self, line): """ set verbosity """ self.charObj.setPromptSize("") def do_purse(self, line): """ info - display money """ charObj = self.charObj self.selfMsg(charObj.financialInfo()) def do_put(self, line): """ place an item in a container """ charObj = self.charObj roomObj = charObj.getRoom() charObjList = charObj.getInventory() roomObjList = roomObj.getInventory() targetList = self.getObjFromCmd(charObjList + roomObjList, line) if not targetList[0]: return self.missingArgFailure() itemObj = targetList[0] containerObj = targetList[1] if not itemObj: self.selfMsg("What are you trying to put?\n") return False if not containerObj: self.selfMsg("What are you trying to put where?\n") return False if containerObj.getType() != "Container": self.selfMsg("You can't put anything in that!\n") return False if containerObj.deposit(charObj, itemObj): charObj.unEquip(itemObj) self.selfMsg("ok\n") return False self.selfMsg("Didn't work!\n") return False def do_quit(self, line): """ quit the game """ return self.do_exit(line) def do_read(self, line): """ magic - read a scroll to use the spell """ if line == "": return self.missingArgFailure() self.useMagicItem(line) return False def do_reloadperm(self, line): ''' dm - reload permanents from disk (i.e. after modification) ''' roomObj = self.charObj.getRoom() if not self.charObj.isDm(): self.selfMsg("Unknown Command\n") return False itemList = self.getObjFromCmd(roomObj.getInventory(), line) if not itemList[0]: self.selfMsg("usage: reloadperm <objectname>\n") return False roomObj.reloadPermanent(itemList[0].getId()) self.selfMsg("Ok\n") return False def do_remove(self, line): """ unequip an item that you have equipped """ return self.do_unequip(line) def do_repair(self, line): """ transaction - repair character's item in a repair shop """ cmdargs = line.split(" ") charObj = self.charObj roomObj = charObj.getRoom() if not roomObj.getType() == "Shop": self.selfMsg("You can't do that here. Find a wright\n") return False if not roomObj.isRepairShop(): self.selfMsg("You can't do that here. Find a wright\n") return False if len(cmdargs) < 1 or not isIntStr(cmdargs[0]): self.selfMsg("usage: repair <item> [#]\n") playerInventory = charObj.getInventory() itemList = self.getObjFromCmd(playerInventory, line) if not itemList[0]: return self.missingArgFailure() itemObj = itemList[0] if not itemObj.canBeRepaired(): self.selfMsg("This can't be repaired\n") return False price = self.gameObj.calculateObjectPrice(charObj, itemObj) * 100 prompt = ( "You are about to repair " + itemObj.getArticle() + " " + itemObj.getName() + " for " + str(price) + " shillings. Proceed?" ) if self.client.promptForYN(prompt): itemObj.repair() roomObj.recordTransaction(itemObj) roomObj.recordTransaction("repair/" + str(price)) charObj.recordTax(roomObj.getTaxAmount(price)) self.selfMsg(roomObj.getSuccessTxt()) return False else: self.selfMsg(roomObj.getAbortedTxt()) return False def do_return(self, line): """ alias - unequip """ return self.do_unequip() def do_roominfo(self, line): """' dm - show room info """ if not self.charObj.isDm(): self.selfMsg("Unknown Command\n") return False self.selfMsg(self.charObj.getRoom().getInfo()) def do_run(self, line): """ drop weapon and escape room in random direction """ self.gameObj.run(self.charObj) def do_s(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_save(self, line): """ save character """ if self.client.charObj.save(): self.selfMsg("Saved\n") else: self.selfMsg("Could not save\n") def do_say(self, line): """ communication within room """ if line == "": msg = self.client.promptForInput("Say what? ") else: msg = line if msg != "": fullmsg = self.charObj.getName() + " said, '" + msg + "'" if self.gameObj.roomMsg(self.charObj.getRoom(), fullmsg + "\n"): self.charObj.setHidden(False) logger.info(fullmsg) else: self.selfMsg("Message not received\n") def do_search(self, line): """ attempt to find items, players, or creatures that are hidden """ charObj = self.charObj roomObj = charObj.getRoom() foundSomething = False for obj in roomObj.getInventory(): if obj.isHidden(): if charObj.searchSucceeds(obj): self.selfMsg("You find " + obj.describe() + "\n") foundSomething = True if not foundSomething: self.selfMsg("Your search turns up nothing\n") def do_sell(self, line): """ transaction - Sell an item to a pawnshop """ charObj = self.charObj roomObj = charObj.getRoom() if not roomObj.getType() == "Shop": self.selfMsg("You can't do that here. Find a buyer\n") return False if not roomObj.isPawnShop(): self.selfMsg("You can't do that here. Find a buyer.\n") return False itemList = self.getObjFromCmd(charObj.getInventory(), line) if not itemList[0]: return self.missingArgFailure() itemObj = itemList[0] price = int(self.gameObj.calculateObjectPrice(charObj, itemObj) * 0.8) # prompt player for confirmation prompt = ( "You are about to pawn " + itemObj.getArticle() + " " + itemObj.getName() + " for " + str(price) + " shillings. Proceed?" ) self.gameObj.sellTransaction( charObj, itemObj, price, prompt, roomObj.getSuccessTxt(), roomObj.getAbortedTxt(), ) def do_send(self, line): """ communication - direct message to another player """ if line == "": self.selfMsg("usage: send <playerName> [msg]\n") return False target, msg = self.parseIpc(line) if msg != "": fullmsg = self.charObj.getName() + " sent, '" + msg + "'" if self.gameObj.directMsg(target, fullmsg + "\n"): self.charObj.setHidden(False) logger.info("To " + target.getName() + ", " + fullmsg) else: self.selfMsg("Message not received\n") return False def do_shout(self, line): """ communication - alias for yell """ return self.do_yell(line) def do_skills(self, line): """ info - show character's skills """ self.selfMsg(self.charObj.SkillsInfo()) def do_slay(self, line): """ dm - combat - do max damage to creature, effectively killing it """ target = self.getCombatTarget(line) if not target: self.selfMsg("Slay what?\n") return False if self.charObj.isDm(): atkcmd = "slay" else: atkcmd = "attack" # if your not a dm, this is a standard attack self.gameObj.attackCreature(self.charObj, target, atkcmd) return False def do_smash(self, line): """ attempt to open a door/chest with brute force """ charObj = self.charObj roomObj = charObj.getRoom() itemList = self.getObjFromCmd(roomObj.getInventory(), line) if not itemList[0]: return self.missingArgFailure() itemObj = itemList[0] if not itemObj.isSmashable(): self.selfMsg("This is not smashable!\n") return False if itemObj.smash(charObj): self.othersMsg( roomObj, charObj.getName() + " smashes the " + itemObj.getSingular() + " open.\n", ) self.selfMsg("You smash it open!\n") otherRoom = self.gameObj.getCorrespondingRoomObj(itemObj) if otherRoom: self.gameObj.roomMsg( otherRoom, itemObj.getSingular() + " smashes open\n" ) if itemObj.getType() == "Door": self.gameObj.modifyCorrespondingDoor(itemObj, charObj) return False else: self.othersMsg( roomObj, charObj.getName() + " fails to smash " + itemObj.describe() + " open.\n", ) self.selfMsg("Bang! You fail to smash it open!\n") otherRoom = self.gameObj.getCorrespondingRoomObj(itemObj) if otherRoom: self.gameObj.roomMsg( otherRoom, "You hear a noise on the " + "other side of the " + itemObj.getSingular() + "\n", ) return False def do_south(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_stats(self, line): """ info - show character's stats """ self.selfMsg(self.charObj.StatsInfo()) def do_status(self, line): """ alias - health """ return self.do_health() def do_steal(self, line): """ transaction - attempt to steal from another player """ self.selfMsg(line + " not implemented yet\n") def do_stopasync(self, line): """ dm - stop async thread (which should trigger an automatic restart) """ if not self.charObj.isDm(): self.selfMsg("Unknown Command\n") return False self.gameObj._asyncThread.halt() self.selfMsg("ok\n") def do_strike(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Strike what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_study(self, line): """ magic - study a scroll to learn the chant """ charObj = self.charObj if line == "": return self.missingArgFailure() (spellItem, spellName, targetObj) = self.parseSpellArgs(line) if not spellItem: self.selfMsg("Study what?\n") return False if not spellItem.getType().lower() == "scroll": self.selfMsg("You can't study that.\n") return False # Learn the spell and display the chant msg = spellItem.study(charObj) self.selfMsg(msg) # Remove item from player's inventory self.gameObj.removeFromPlayerInventory(charObj, spellItem, "disint") return False def do_suicide(self, line): if not self.client.promptForYN( "DANGER: This will permanently " + "delete your character." + " Are you sure?" ): return False charObj = self.charObj charName = charObj.getName() self.gameObj.leaveGame(self.client.charObj, saveChar=False) msg = self.gameObj.txtBanner( charName + " has shuffled off this mortal coil", bChar="=" ) charObj.delete() charObj = None self.charObj = None self.acctObj.removeCharacterFromAccount(charName) self.gameObj.gameMsg(msg) logger.info("Character deleted: " + charName) return True def do_take(self, line): """ alias - get """ return self.do_get(line) def do_talk(self, line): """ alias - parley """ return self.do_parley(line) def do_teach(self, line): """ teach another player a spell """ self.selfMsg(line + " not implemented yet\n") def do_toggle(self, line): """ dm command to set flags """ if self.charObj.isDm(): if ( line.lower() == "character" or line.lower() == "char" or line.lower() == "self" ): obj = self.charObj elif line.lower() == "room": obj = self.charObj.getRoom() elif line.lower() == "game": obj = self.gameObj elif line.lower() == "gamecmd": obj = self elif line.lower() == "client": obj = self.client else: roomObj = self.charObj.getRoom() itemList = self.getObjFromCmd(roomObj.getCharsAndInventory(), line) if itemList[0]: obj = itemList[0] else: self.selfMsg("Can't toggle " + line + "\n") self.selfMsg( "Fixed toggles:\n" + " self, room, game, gamecmd, client\n" ) return False else: self.selfMsg("Unknown Command\n") return False obj.toggleInstanceDebug() self.selfMsg( "Toggled " + line + ": debug=" + str(obj.getInstanceDebug()) + "\n" ) return False def do_thrust(self, line): """ combat """ target = self.getCombatTarget(line) if not target: self.selfMsg("Thrust at what?\n") return False self.gameObj.attackCreature(self.charObj, target, self.getLastCmd()) return False def do_track(self, line): """ show direction last player traveled """ self.selfMsg(line + " not implemented yet\n") def do_train(self, line): """ increase level if exp and location allow """ charObj = self.charObj roomObj = charObj.getRoom() roomObj.train(charObj) def do_turn(self, line): """ magic - chance for clerics/paladins to destroy creatures """ self.selfMsg(line + " not implemented yet\n") def do_u(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_unequip(self, line): """ stop using a piece of equiptment """ charObj = self.charObj targetList = self.getObjFromCmd(charObj.getInventory(), line) if not targetList[0]: return self.missingArgFailure() elif len(targetList) > 0: itemObj = targetList[0] else: itemObj = None if charObj.unEquip(itemObj): self.selfMsg("Ok\n") else: self.selfMsg("You can't do that\n") def do_unfollow(self, line): """ unfollow - stop following """ self.charObj.setFollow() # Unset follow attribute self.selfMsg("ok\n") return False def do_unlock(self, line): """ unlock a door/chest with a key """ charObj = self.charObj roomObj = charObj.getRoom() roomObjList = roomObj.getInventory() fullObjList = charObj.getInventory() + roomObjList itemList = self.getObjFromCmd(fullObjList, line) if not itemList[0]: return self.missingArgFailure() itemObj = itemList[0] keyObj = itemList[1] if not keyObj: self.selfMsg("You can't lock anything without a key\n") return False if not itemObj.isUnlockable(): if itemObj.isUnlocked(): self.selfMsg("It's already unlocked!\n") elif itemObj.isOpen(): self.selfMsg("You can't unlock it when it's open!\n") else: self.selfMsg("This is not unlockable!\n") return False if keyObj.getLockId() != itemObj.getLockId(): self.selfMsg("The key doesn't fit the lock\n") return False if itemObj.unlock(keyObj): if itemObj.getType() == "Door": self.gameObj.modifyCorrespondingDoor(itemObj, charObj) self.selfMsg("You unlock the lock.\n") self.othersMsg( roomObj, charObj.getName() + " unlocks the " + "lock on the " + itemObj.getSingular() + "\n", charObj.isHidden(), ) return False else: self.selfMsg("You fail to unlock the lock.\n") self.othersMsg( roomObj, charObj.getName() + " fails to " + "unlock the lock on the " + itemObj.getSingular() + "\n", charObj.isHidden(), ) return False return False def do_up(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_use(self, line): """ equip an item or use a scroll or magic item """ if line == "": return self.missingArgFailure() charObj = self.charObj roomObj = charObj.getRoom() objList = charObj.getInventory() + roomObj.getCharsAndInventory() targetList = self.getObjFromCmd(objList, line) itemObj = None # Require at least one arg after command for target in targetList: if not target: continue if not target.isUsable(): continue if not itemObj: itemObj = target if not itemObj: return self.missingArgFailure() type = itemObj.getType() if type == "Character" or type == "Creature": return self.missingArgFailure() if isObjectFactoryType(type): self.useObject(itemObj, line) return False logger.warn( "game.do_use: Attempt to use: " + itemObj.describe() + " - with type " + type ) if roomObj: # tmp - remove later if room object is not needed here pass # but there may be spells/items that affect the room. def do_w(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_wear(self, line): """ alias - use """ return self.do_use(line) def do_west(self, line): """ navigation """ self.move(self._lastinput[0]) # pass first letter def do_where(self, line): """ alias - look """ return self.do_look(line) def do_whisper(self, line): """ communication - char to char, with chance of being overheard """ if line == "": self.selfMsg("usage: whisper <playerName> [txt]\n") return False target, msg = self.parseIpc(line) received = False charName = self.charObj.getName() for oneChar in self.charObj.getRoom().getCharacterList(): if target == oneChar: # if is recipient oneChar.client.spoolOut( charName + " whispers, '" + msg + "'\n" # notify ) received = True else: if not oneChar.hearsWhispers(): continue oneChar.client.spoolOut( "You overhear " + charName + " whisper " + msg + "\n" ) self.charObj.setHidden(False) if received: self.selfMsg("ok\n") else: self.selfMsg("Message not received\n") return False def do_who(self, line): """ info - show who is playing the game """ charTxt = "" charObj = self.charObj charFormat = " {:20} - {:16} - {:20}\n" header = " Characters currently playing:\n" header += charFormat.format("Character Name", "Login Date", "Account") header += charFormat.format("-" * 20, "-" * 16, "-" * 20) for onechar in charObj.getRoom().getCharacterList(): charTxt += charFormat.format(onechar.getName(), dateStr(onechar.getLastLoginDate()), charObj.client.acctObj.getDisplayName()) self.selfMsg(header + charTxt) return None def do_wield(self, line): """ alias - use """ return self.do_use(line) def do_withdraw(self, line): """ transaction - take money out of the bank """ cmdargs = line.split(" ") charObj = self.charObj roomObj = charObj.getRoom() if not roomObj.getType() == "Shop": self.selfMsg("You can't do that here. Find a bank\n") return False if not roomObj.isBank(): self.selfMsg("You can't do that here. Find a bank\n") return False if len(cmdargs) < 1 or not isIntStr(cmdargs[0]): self.selfMsg("usage: withdraw <amount>\n") return False amount = int(cmdargs[0]) if not charObj.canWithdraw(amount): self.selfMsg(roomObj.getCantAffordTxt(amount)) return False taxRate = roomObj.getTaxRate() bankfee, wAmount = charObj.calculateBankFees(amount, taxRate) prompt = ( "You are about to withdraw " + str(amount) + " shillings from the bank.\n" ) if taxRate != 0: prompt += ( "The bank charges a " + str(taxRate) + "% withdrawl fee which comes to a charge of " + str(bankfee) + "shillings.\n" + "As a result, you will receive " + str(wAmount) + " shillings.\n" ) prompt += "Continue?" if self.client.promptForYN(prompt): charObj.bankWithdraw(amount, taxRate) roomObj.recordTransaction("withdrawl/" + str(wAmount)) roomObj.recordTransaction("fees/" + str(bankfee)) self.selfMsg(roomObj.getSuccessTxt()) return False else: self.selfMsg(roomObj.getAbortedTxt()) return False def do_yell(self, line): """ communication - all in room and adjoining rooms """ if line == "": msg = self.client.promptForInput(self.getLastCmd() + " what? ") else: msg = line if msg != "": fullmsg = self.charObj.getName() + " yelled, '" + msg + "'" if self.gameObj.yellMsg(self.charObj.getRoom(), fullmsg + "\n"): logger.info(fullmsg) self.charObj.setHidden(False) else: self.selfMsg("Message not received\n") # instanciate the _Game class _game = _Game() def Game(): """ return a reference to the single, existing _game instance Thus, when we try to instanciate Game, we are just returning a ref to the existing Game """ return _game ``` #### File: sog/test/test_account.py ```python import re import unittest from common.testLib import TestGameBase from common.general import logger class TestAccount(TestGameBase): testAccountId = "<EMAIL>" _instanceDebug = False def setUp(self): self.banner("start", testName=__class__.__name__) self._testAcctName = self.testAccountId self._client = self.createClientAndAccount() self._acctObj = self.getAcctObj() def testAccountInstanciation(self): testCharName1 = "TestAcctChar1" acctObj = self._acctObj acctObj.setDisplayName(testCharName1) assert acctObj.getName() == testCharName1 assert acctObj.getEmail() == self.testAccountId assert acctObj.getId() == self.testAccountId acctObj.setLoginDate() assert acctObj.getLastLoginDate() acctObj.setLogoutDate() assert acctObj.getLastLogoutDate() assert acctObj.isValid() assert not acctObj.isAdmin() assert acctObj.describe() != "" assert acctObj.getInfo() != "" assert not acctObj.adminFileExists() def testChangeEmail(self): assert self._acctObj.setUserEmailAddress("<EMAIL>") assert not self._acctObj.setUserEmailAddress("badEmail.com") def testCharacterList(self): testCharName1 = "TestAcctChar2" testCharName2 = "TestAcctChar3" self._acctObj.addCharacterToAccount(testCharName1) assert testCharName1 in self._acctObj.getCharacterList() self._acctObj.addCharacterToAccount(testCharName2) if self._instanceDebug: logger.debug(self._acctObj.showCharacterList()) assert re.search(testCharName2, self._acctObj.showCharacterList()) self._acctObj.removeCharacterFromAccount(testCharName1) assert testCharName1 not in self._acctObj.getCharacterList() assert self._acctObj.getMaxNumOfCharacters() == 5 assert self._acctObj.getCharacterList() == [testCharName2] if __name__ == "__main__": unittest.main() ``` #### File: sog/test/test_game.py ```python import unittest from common.testLib import TestGameBase from common.general import logger # import object # import room # import creature class TestGame(TestGameBase): def setTestName(self, name=""): self._testName = __class__.__name__ def testGameInstanciation(self): gameObj = self.getGameObj() assert gameObj.isValid() out = "Could not instanciate the game object" self.assertEqual(gameObj._startdate != "", True, out) def testToggleInstanceDebug(self): gameObj = self.getGameObj() startState = gameObj.getInstanceDebug() gameObj.toggleInstanceDebug() out = "toggleInstanceDebug could not be set" self.assertEqual(gameObj.getInstanceDebug() != startState, True, out) gameObj.toggleInstanceDebug() out = "toggleInstanceDebug could not be set" self.assertEqual(gameObj.getInstanceDebug() == startState, True, out) # def testAsync(self): # gameObj = self.getGameObj() # gameObj.asyncTasks() # gameObj.asyncNonPlayerActions() # gameObj.asyncCharacterActions() def testEncounter(self): self.joinRoom(15) roomObj = self.getRoomObj() gameObj = self.getGameObj() gameObj.creatureEncounter(roomObj) class TestGameCmd(TestGameBase): doorOpenAttributes = [ "objId", "_name", "_closed"] doorLockAttributes = doorOpenAttributes + [ "_locked", "_locklevel", "_lockId"] doorTrapAttributes = doorLockAttributes + [ "_traplevel", "_poison", "_toll"] def testGameCmdInstanciation(self): gameCmdObj = self.getGameCmdObj() out = "Could not instanciate the gameCmd object" self.assertEqual(gameCmdObj._lastinput == "", True, out) def testGameCmdGetObj(self): gameCmdObj = self.getGameCmdObj() roomObj = self.getRoomObj() charObj = self.getCharObj() obj = self.createObject(type="Weapon", name="laser") obj._singledesc = "green laser" roomObj.addToInventory(obj) logger.info("\n" + roomObj.display(charObj)) logger.info("Before:\n" + charObj.inventoryInfo()) assert not gameCmdObj.do_get("laser") # cmds always return False logger.info("After:\n" + charObj.inventoryInfo()) assert obj not in roomObj.getInventory() assert obj in charObj.getInventory() def testGameCmdGetCoins(self): gameCmdObj = self.getGameCmdObj() roomObj = self.getRoomObj() charObj = self.getCharObj() charObj.setCoins(0) assert charObj.getCoins() == 0 coinObj = self.createObject(type="Coins", name="coins") coinObj._value = 50 roomObj.addToInventory(coinObj) logger.info("\n" + roomObj.display(charObj)) logger.info("Before:\n" + charObj.financialInfo()) assert not gameCmdObj.do_get("coins") # cmds always return False logger.info("After:\n" + charObj.financialInfo()) assert coinObj not in roomObj.getInventory() assert coinObj not in charObj.getInventory() assert charObj.getCoins() == 50 def addFiveItemsToCharacter(self, charObj): obj1 = self.createObject(num=99991, type="Armor", name="armor1") charObj.addToInventory(obj1) obj2 = self.createObject(num=99992, type="Weapon", name="weapon2") charObj.addToInventory(obj2) obj3 = self.createObject(num=99993, type="Shield", name="shield3") charObj.addToInventory(obj3) obj4 = self.createObject(num=99994, type="Treasure", name="treasure4") charObj.addToInventory(obj4) obj5 = self.createObject(num=99995, type="Treasure", name="treasure5") charObj.addToInventory(obj5) assert len(charObj.getInventory()) == 5 def testTransferInventoryToRoom(self): gameObj = self.getGameObj() charObj = self.getCharObj() charObj.setName("deadGuy") charObj.setHitPoints(10) roomObj = self.createRoom(num=99990) roomObj._inventory = [] charObj.setRoom(roomObj) logger.debug("Testing inventory transfer") self.addFiveItemsToCharacter(charObj) logger.debug("Char Before Trans: " + str(charObj.describeInventory())) logger.debug("Room Before Trans: " + str(roomObj.describeInventory())) assert len(roomObj.getInventory()) == 0 charObj.transferInventoryToRoom( charObj.getRoom(), gameObj.roomMsg, persist=True, verbose=False ) logger.debug("Room After Trans: " + str(roomObj.describeInventory())) logger.debug("Char After Trans: " + str(charObj.describeInventory())) assert len(roomObj.getInventory()) == 5 assert len(charObj.getInventory()) == 0 for obj in roomObj.getInventory(): assert obj.persistsThroughOneRoomLoad() roomObj.removeNonPermanents(removeTmpPermFlag=True) logger.debug("Room PostRemove: " + str(roomObj.describeInventory())) assert len(roomObj.getInventory()) == 5 for obj in roomObj.getInventory(): assert not obj.persistsThroughOneRoomLoad(), ( "Item " + str(obj.getItemId()) + " should no longer persist" ) def logRoomInventory(self, charObj): logger.info( "----- room ID: " + charObj.getRoom().getItemId() + " " + str(charObj.getRoom()) + " -----" ) logger.info(charObj.getRoom().display(charObj)) logger.info(str(charObj.getRoom().getInventory())) logger.info("") def testPlayerDeath(self): tmpRoomNum = 99980 # clean up the test room before we start self.purgeTestRoomData(roomNums=[tmpRoomNum]) gameObj = self.getGameObj() charObj = self.getCharObj() charObj.setName("deadGuy") charObj.setHitPoints(10) roomObj = self.createRoom(num=tmpRoomNum) roomObj._inventory = [] roomObj.save() self.joinRoom(room=roomObj) creObj = self.createCreature() logger.info("Testing character death") self.addFiveItemsToCharacter(charObj) assert len(charObj.getInventory()) == 5 assert len(roomObj.getInventory()) == 0 gameObj.applyPlayerDamage(charObj, creObj, 11) self.logRoomInventory(charObj) assert ( len(charObj.getInventory()) == 0 ), "player's belongings should be removed as they are dumped to room" assert len(charObj.getRoom().getInventory()) == 0 self.joinRoom(room=tmpRoomNum) self.logRoomInventory(charObj) assert ( len(charObj.getRoom().getInventory()) == 5 ), "player's belongings should have persisted in room inventory" logger.info(str(charObj.getRoom().getInventory())) self.joinRoom(room=self._testRoomNum) self.logRoomInventory(charObj) self.joinRoom(room=tmpRoomNum) self.logRoomInventory(charObj) assert ( len(charObj.getRoom().getInventory()) == 0 ), "player's belongings in room inventory should only persist once" self.purgeTestRoomData(roomNums=[tmpRoomNum]) def doorTestSetUp(self): doorObj1 = self.createObject(num=99997, type='Door', name='door1') doorObj1._toWhere = 99993 doorObj1._correspondingDoorId = 99996 doorObj1._closed = False doorObj2 = self.createObject(num=99996, type="Door", name="door2") doorObj2._toWhere = 99992 doorObj2._correspondingDoorId = 99997 doorObj2._closed = False roomObj1 = self.createRoom(num=99992) roomObj1._inventory = [] roomObj1.addToInventory(doorObj1) roomObj2 = self.createRoom(num=99993) roomObj2._inventory = [] roomObj2.addToInventory(doorObj2) return roomObj1, roomObj2, doorObj1, doorObj2 def testDoorsInActiveRooms(self): ''' Set up a pair of doors and verify that door actions work ''' gameObj = self.getGameObj() gameCmdObj = self.getGameCmdObj() charObj = self.getCharObj() charObj.setName('doorOpener') charObj.setHitPoints(10) (roomObj1, roomObj2, doorObj1, doorObj2) = self.doorTestSetUp() self.joinRoom(room=roomObj1) # Add room with 2nd door to active rooms list gameObj.addToActiveRooms(roomObj2) # test that doors are set up correctly assert doorObj1.getToWhere() == roomObj2.getId() assert doorObj2.getToWhere() == roomObj1.getId() assert doorObj1.getCorresspondingDoorId() == doorObj2.getId() assert doorObj2.getCorresspondingDoorId() == doorObj1.getId() logger.info("Test: Original State") self.showItems([doorObj1, doorObj2], self.doorOpenAttributes) # Open door1 self.logRoomInventory(charObj) msg = "Opening door should succeed" logger.info("Test: " + msg) logger.warning("Opening Door") assert not gameCmdObj.do_open("door1") # cmds always return False assert doorObj1.isOpen(), msg self.showItems([doorObj1, doorObj2], self.doorOpenAttributes) # close door1 - check that its closed, and corresponding door is closed msg = "Closing door - both doors should be closed" logger.info("Test: " + msg) logger.warning("Closing Door") assert not gameCmdObj.do_close("door1") # cmds always return False self.showItems([doorObj1, doorObj2], self.doorOpenAttributes) assert doorObj1.isClosed(), msg # Door should be closed assert doorObj2.isClosed(), msg # Corresponding Door should be closed self.logRoomInventory(charObj) # Re-open door1 after being closed msg = "Opening door after it was closed - both doors should be open" logger.info("Test: " + msg) logger.warning("Opening Door") assert not gameCmdObj.do_open("door1") # cmds always return False self.showItems([doorObj1, doorObj2], self.doorOpenAttributes) assert doorObj1.isOpen() # Door should be open assert doorObj2.isOpen() # Corresponding Door should be open # # self._spring = True # automatically close if nobody is in the room # keyObj1 = self.createObject(num=99996, type='Key', name='goodkey') keyObj1._lockId = 99999 keyObj2 = self.createObject(num=99995, type='Key', name='badkey') keyObj2._lockId = 99990 charObj.addToInventory(keyObj1) charObj.addToInventory(keyObj2) msg = "Locking any door without key should fail" logger.info("Test: " + msg) assert not gameCmdObj.do_lock("door1") # cmds always return False assert not doorObj1.isLocked(), msg msg = "Locking an open door with key should fail" logger.info("Test: " + msg) assert not gameCmdObj.do_lock("door1 goodkey") # cmds always return False assert not doorObj1.isLocked(), msg logger.warning("Closing Door") assert not gameCmdObj.do_close("door1") # cmds always return False self.showItems([doorObj1], self.doorLockAttributes) msg = "Locking closed door with no lock should fail" logger.info("Test: " + msg) assert not gameCmdObj.do_lock("door1 goodkey") # cmds always return False assert not doorObj1.isLocked(), msg logger.warning("Adding lock level and lock id") doorObj1._locklevel = 1 doorObj1._lockId = 99999 self.showItems([doorObj1], self.doorLockAttributes) msg = "Locking door with bad key should fail" logger.info("Test: " + msg) assert not gameCmdObj.do_lock("door1 badkey") # cmds always return False assert not doorObj1.isLocked(), msg msg = "Locking door with good key should succeed - both should be locked" logger.info("Test: " + msg) assert not gameCmdObj.do_lock("door1 goodkey") # cmds always return False self.showItems([doorObj1, doorObj2], self.doorLockAttributes) assert doorObj1.isLocked(), msg assert doorObj2.isLocked(), msg msg = "Opening a locked door should fail - door should remain closed" logger.info("Test: " + msg) assert not gameCmdObj.do_open("door1") # cmds always return False assert doorObj1.isClosed(), msg assert doorObj1.isLocked(), msg msg = "Unlocking a locked door with key should succeed, both should be unlocked" logger.info("Test: " + msg) logger.warning("Unlocking Door") assert not gameCmdObj.do_unlock("door1 goodkey") # cmds always return False self.showItems([doorObj1], self.doorLockAttributes) assert doorObj1.isUnlocked(), msg assert doorObj2.isUnlocked(), msg msg = "Opening a previously locked door should succeed - both should be open" logger.info("Test: " + msg) logger.warning("Opening Door") assert not gameCmdObj.do_open("door1") # cmds always return False self.showItems([doorObj1], self.doorLockAttributes) assert doorObj1.isOpen(), msg assert doorObj2.isOpen(), msg msg = "Opening door with trap - char should be damaged" logger.info("Test: " + msg) charObj.client.popOutSpool() # Clear the output spool charObj._instanceDebug = True charObj.dexterity = -1000 # make sure random odds don't break tests charObj._level = -1000 # make sure random odds don't break tests logger.warning("Adding trap level") doorObj1._traplevel = 1 doorObj1.close(charObj) self.showItems([doorObj1], self.doorTrapAttributes) charObj.setMaxHP(100) charObj.setHitPoints(100) assert not gameCmdObj.do_open("door1") # cmds always return False charObj._instanceDebug = False logger.info("OutSpool: " + charObj.client.popOutSpool()) assert charObj.getHitPoints() < 100, msg msg = "Opening door with trap and poison - char should be poisoned" logger.info("Test: " + msg) charObj._instanceDebug = True logger.warning("Adding poison to trap") doorObj1._poison = True doorObj1.close(charObj) self.showItems([doorObj1], self.doorTrapAttributes) charObj.setMaxHP(100) charObj.setHitPoints(100) assert not gameCmdObj.do_open("door1") # cmds always return False charObj._instanceDebug = False logger.info("OutSpool: " + charObj.client.popOutSpool()) assert charObj.getHitPoints() < 100 assert charObj.isPoisoned(), msg msg = "Try to go through a toll door without funds. Should fail" logger.info("Test: " + msg) doorObj1._toll = 2000 charObj._level = 1 charObj.setCoins(1000) origId = roomObj1.getItemId() self.showItems([doorObj1], self.doorTrapAttributes) assert not gameCmdObj.do_look("door1") # cmds always return False assert not gameCmdObj.do_go("door1") # cmds always return False logger.info("OutSpool: " + charObj.client.popOutSpool()) assert roomObj1.getItemId() == origId, msg msg = "Go through a door with a toll - Char should have fewer coins" logger.info("Test: " + msg) doorObj1._toll = 250 charObj.setCoins(1000) doorLoc = doorObj1.getToWhere() assert not gameCmdObj.do_go("door1") # cmds always return False assert charObj.getCoins() == 750, msg assert charObj.getRoom().getId() == doorLoc, msg self.purgeTestRoomData(roomNums=[99992, 99993]) if __name__ == "__main__": unittest.main() ``` #### File: sog/test/test_general.py ```python from datetime import datetime, timedelta import re import unittest import common.general from common.testLib import TestGameBase # from common.general import logger import object class TestGeneral(TestGameBase): def setUp(self): self.banner("start", testName=__class__.__name__) def testIsIntStr(self): inputs = [ "apple", "apple 1", "apple #1", "apple #11", "1", "33", "#33", "33x", "@33", ] outputs = [False, False, False, False, True, True, False, False, False] for num, input in enumerate(inputs): result = common.general.isIntStr(input) out = ( "Input: " + str(input) + " - Output: " + str(result) + " - Expected: " + str(outputs[num]) ) status = bool(result == outputs[num]) self.assertEqual(status, True, out) def testIsCountStr(self): inputs = [ "apple", "apple 1", "apple #1", "apple #11", "1", "33", "#33", "33x", "@33", ] outputs = [False, False, False, False, True, True, True, False, False] for num, input in enumerate(inputs): result = common.general.isCountStr(input) out = ( "Input: " + str(input) + " - Output: " + str(result) + " - Expected: " + str(outputs[num]) ) status = bool(result == outputs[num]) self.assertEqual(status, True, out) def testSplitTarget(self): inputs = [ "staff", "staff 1", "staff player", "staff 1 player", "staff player 2", "staff 1 player 2", ] outputs = [ ["staff"], ["staff 1"], ["staff", "player"], ["staff 1", "player"], ["staff", "player 2"], ["staff 1", "player 2"], ] for num, input in enumerate(inputs): resultlist = common.general.splitTargets(input) out = ( "Input: " + str(input) + " - Output: " + str(resultlist) + " - Expected: " + str(outputs[num]) ) status = bool(resultlist == outputs[num]) self.assertEqual(status, True, out) def testTargetSearch(self): itemList = [] # Create a list of objects with names corresponding to ids. obj1 = object.Object(1) obj1.setName("staff1") itemList.append(obj1) obj2 = object.Object(2) obj2.setName("sword1") itemList.append(obj2) obj3 = object.Object(3) obj3.setName("armor1") itemList.append(obj3) obj4 = object.Object(4) obj4.setName("staff2") itemList.append(obj4) obj5 = object.Object(5) obj5.setName("sword2") itemList.append(obj5) inputs = [ "staff", "staff 1", "staff sword", "staff 2 sword", "staff sword 2", "staff 2 sword 2", ] outputs = [ ["staff1"], ["staff1"], ["staff1", "sword1"], ["staff2", "sword1"], ["staff1", "sword2"], ["staff2", "sword2"], ] for num, input in enumerate(inputs): targets = common.general.splitTargets(input) for num2, target in enumerate(targets): obj = common.general.targetSearch(itemList, target) if obj: out = ( "Input: " + str(input) + " - Output: (" + str(obj.getName()) + ") - Expected: (" + str(outputs[num][num2]) + ")" ) status = bool(obj.getName() == outputs[num][num2]) else: out = "Could not retrieve item for input: " + str(input) status = False self.assertEqual(status, True, out) def testGetRandomItemFromList(self): # This test verifies that item selected is from the input list inputs = [ ["bacon", "eggs", "toast"], ["toast"], [1, 2, 3, 4, 5], ] for num, input in enumerate(inputs): result = common.general.getRandomItemFromList(input) assert result in input # result is one of the items in list # This test asserts that, given X tries, each of the X inputs will be # selected at least once. There's a small chance that this could fail, # but it seems unlikely inputs = ["a", "b", "c", "d"] itemCount = {} for i in inputs: itemCount[i] = 0 # initialize counts for i in range(0, 1000): item = common.general.getRandomItemFromList(inputs) itemCount[item] += 1 # increment counter for i in inputs: assert itemCount[i] # fail if count for item is 0 def testDates(self): invalidInputs = [None, "", 1, "None", [1, 2, 3]] neverInputs = [common.general.getNeverDate()] dayInputs = [ datetime.now(), datetime.now() - timedelta(seconds=30), datetime.now() + timedelta(minutes=30), datetime.now() - timedelta(hours=23), ] longInputs = [ datetime.now() - timedelta(hours=26), datetime.now() - timedelta(days=3), datetime.now() - timedelta(days=400), ] validDateRegex = "^[0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+$" for num, input in enumerate(neverInputs + dayInputs + longInputs): aMsg = "input = " + str(input) secsSince = common.general.secsSinceDate(input) dateStr = common.general.dateStr(input) diffDay = common.general.differentDay(datetime.now(), input) if input in invalidInputs: assert secsSince == 0, aMsg assert dateStr == "Never", aMsg assert diffDay is False, aMsg if input in neverInputs: assert secsSince == 0, aMsg assert dateStr == "Never", aMsg assert diffDay is True, aMsg if input in dayInputs: assert secsSince < 86400, aMsg assert re.match(validDateRegex, dateStr), aMsg todaysDate = datetime.now().strftime("%Y/%m/%d") if re.match(todaysDate, input.strftime("%Y/%m/%d")): assert diffDay is False, aMsg else: assert diffDay is True, aMsg if input in longInputs: assert secsSince >= 86400, aMsg assert re.match(validDateRegex, dateStr), aMsg assert diffDay is True, aMsg def testDifferentDay(self): now = datetime.now() yd = now - timedelta(days=1) inputs = [ [now, now], [yd, yd], [now, yd], [yd, common.general.getNeverDate()], [ common.general.getNeverDate(), common.general.getNeverDate(), ], # noqa: E501 [now, common.general.getNeverDate()], ] outputs = [False, False, True, True, False, True] for num, input in enumerate(inputs): result = common.general.differentDay(inputs[num][0], inputs[num][1]) out = ( "Input: " + str(input) + " - Output: " + str(result) + " - Expected: " + str(outputs[num]) ) status = bool(result == outputs[num]) self.assertEqual(status, True, out) def testTruncateWithInt(self): invalidInputs = [None, "", "None", [1, 2, 3], datetime.now()] validInputs = [1.23456789, 1, 1.234, 123456789, 0.123456789, 0] invalidRegex = "\\.\\d{4}" for input in invalidInputs + validInputs: result = common.general.truncateWithInt(input) assert not re.search(invalidRegex, str(result)) if input in validInputs and input != 0: assert result != 0 if __name__ == "__main__": unittest.main() ``` #### File: sog/test/test_multiplayer.py ```python from collections import namedtuple import re import unittest from common.testLib import TestGameBase from common.general import logger # import object # import room # import creature from game import GameCmd class TestGame(TestGameBase): def setTestName(self, name=""): self._testName = __class__.__name__ def testGameInstanciation(self): gameObj = self.getGameObj() assert gameObj.isValid() out = "Could not instanciate the game object" self.assertEqual(gameObj._startdate != "", True, out) def testToggleInstanceDebug(self): gameObj = self.getGameObj() startState = gameObj.getInstanceDebug() gameObj.toggleInstanceDebug() out = "toggleInstanceDebug could not be set" self.assertEqual(gameObj.getInstanceDebug() != startState, True, out) gameObj.toggleInstanceDebug() out = "toggleInstanceDebug could not be set" self.assertEqual(gameObj.getInstanceDebug() == startState, True, out) class TestGameCmd(TestGame): def multiCharacterSetUp(self, nameList=["char1", "char2"], roomObj=None): """ Create X characters and place them in the same room Returns a list of nametuples, one element for each character """ # Create namedtuple and charList for resulting objects CharBlob = namedtuple('CharBlob', ['charObj', 'clientObj', 'gameCmdObj']) charList = [] # Create a roomObj for our characters, if it doesn't already exist. if not roomObj: roomObj = self.createRoom(num=99990) for name in nameList: if name == nameList[0]: # Set up the first character, which is special since the # framework creates it for us. charObj = self.getCharObj() charObj.setName(name) clientObj = charObj.client gameCmdObj = GameCmd(self.getGameCmdObj()) else: # Create the secondary characters clientObj, gameCmdObj = self.createAdditionalClient(name=name) charObj = clientObj.charObj # Store resulting character, client, and gameCmd objects in list # of namedtuples C = CharBlob(charObj, clientObj, gameCmdObj) charList.append(C) # Add the character to the room clientObj.getGameObj().joinRoom(roomObj, charObj) if len(nameList) > 1: logger.debug("multi: Verifying character setup") charNamesInGame = self.getCharNamesInGame(charList[0].clientObj.gameObj) charNamesInRoom = self.getCharNamesInRoom(charList[0].charObj.getRoom()) # logger.debug("multi: CharList={}".format(", ".join(charNamesInGame))) for oneCharName in nameList: # Check that our characters are all currently in the same game assert oneCharName in charNamesInGame, ( "Character {} is not in the game -- Chars:{}".format( oneCharName, charNamesInGame)) # Check that our characters are all currently in the same room assert oneCharName in charNamesInRoom, ( "Character {} is not in the room -- Chars:{}".format( oneCharName, charNamesInRoom)) return(charList) def getCharNamesInRoom(self, roomObj): """ Returns a list of character names in the given room """ return [c.getName() for c in roomObj.getCharacterList()] def getCharNamesInGame(self, gameObj): return [c.getName() for c in gameObj.getCharacterList()] def charIsInRoom(self, charObj, roomObj): return charObj in roomObj.getCharacterList() def showRoomNumsForAllChars(self): """ Dump the list of characters and their room number as debug output """ outStr = "--- Characters in the Game: " outStr += ", ".join(["{0}-->{1}".format( c.getName(), c.getRoom().getId()) for c in self.getGameObj().getCharacterList()]) logger.debug(outStr) def showOutput(self, prefix, charObj): """ Shows the character's output spool as debug log output """ logStr = "--- {0} {1} - begin output ---\n{2}\n--- {0} {1}- end output ---\n" outStr = re.sub("\n", "\n> ", ("> " + charObj.client.popOutSpool().rstrip())) logger.debug(logStr.format(prefix, charObj.getName(), outStr)) def testFollow(self): """ Test the lead/follow functionality """ roomObj = self.createRoom(num=99990) roomObj._shortDesc = "room1" roomObj.s = "1" leadCharName = "Leader" parasiteCharName = "Parasite" charList = self.multiCharacterSetUp([leadCharName, parasiteCharName], roomObj) leadCharObj = charList[0].charObj leadGameCmdObj = charList[0].gameCmdObj parasiteCharObj = charList[1].charObj parasiteGameCmdObj = charList[1].gameCmdObj # Set player stats high so that follow always succeeds parasiteCharObj.setClassName("ranger") parasiteCharObj.dexterity = 25 parasiteCharObj.luck = 25 # Begin tests logger.debug("testFollow: Follow case1: invalid target") logger.debug("testFollowBad: FollowSettingPre={}".format( parasiteCharObj.getFollow())) assert not parasiteGameCmdObj.do_follow("does-not-exist") # always False logger.debug(self.showOutput("testFollowBad", parasiteCharObj)) logger.debug("testFollowBad: FollowSettingPost={}".format( parasiteCharObj.getFollow())) assert parasiteCharObj.getFollow() is None logger.debug("testFollow: Follow case2: invalid self target") logger.debug("testFollowBad: FollowSettingPre={}".format( parasiteCharObj.getFollow())) assert not parasiteGameCmdObj.do_follow(parasiteCharObj.getName()) # always F logger.debug(self.showOutput("testFollowBad", parasiteCharObj)) logger.debug("testFollowBad: FollowSettingPost={}".format( parasiteCharObj.getFollow())) assert parasiteCharObj.getFollow() is None logger.debug("testFollow: Follow case3: valid target") logger.debug("testFollowGood: FollowSettingPre={}".format( parasiteCharObj.getFollow())) assert not parasiteGameCmdObj.do_follow(leadCharObj.getName()) # always False logger.debug(self.showOutput("testFollowGood", parasiteCharObj)) logger.debug("testFollowGood: FollowSettingPost={}".format( parasiteCharObj.getFollow())) assert parasiteCharObj.getFollow() is leadCharObj logger.debug("testFollow: Follow case4: lead moves south") parasiteCharObj._instanceDebug = True self.showRoomNumsForAllChars() leadGameCmdObj._lastinput = "s" assert not leadGameCmdObj.do_s("") # always False logger.debug(self.showOutput("testFollowGood", leadCharObj)) logger.debug(self.showOutput("testFollowGood", parasiteCharObj)) debugInfo = "LeadCharRoom={} - ParasiteCharRoom={}".format( leadCharObj.getRoom().getId(), parasiteCharObj.getRoom().getId()) self.showRoomNumsForAllChars() assert self.charIsInRoom(parasiteCharObj, leadCharObj.getRoom()), debugInfo parasiteCharObj._instanceDebug = False logger.debug("testFollow: Follow case5: unfollow") assert not parasiteGameCmdObj.do_unfollow("") # always False assert parasiteCharObj.getFollow() is None parasitePreRoomObj = parasiteCharObj.getRoom() # store pre-move roomObj assert not leadGameCmdObj.do_s("") # always False assert parasitePreRoomObj is parasiteCharObj.getRoom() # hasn't moved logger.debug(self.showOutput("testFollow", parasiteCharObj)) logger.debug("testFollow: Follow case6: follow non player") assert not parasiteGameCmdObj.do_follow("tow") # always False logger.debug(self.showOutput("testFollowBad", parasiteCharObj)) assert parasiteCharObj.getFollow() is None def testLose(self): """ Test the lead/follow functionality """ roomObj = self.createRoom(num=99990) leadCharName = "Leader" parasiteCharName = "Parasite" charList = self.multiCharacterSetUp([leadCharName, parasiteCharName], roomObj) leadCharObj = charList[0].charObj leadGameCmdObj = charList[0].gameCmdObj parasiteCharObj = charList[1].charObj parasiteGameCmdObj = charList[1].gameCmdObj # This should already be tested as part of follow assert not parasiteGameCmdObj.do_follow(leadCharObj.getName()) # always False assert parasiteCharObj.getFollow() is leadCharObj # Begin tests logger.debug("testLose: Lose case1: valid target") assert not leadGameCmdObj.do_lose(parasiteCharObj.getName()) # always False assert not parasiteCharObj.getFollow() is leadCharObj assert not leadGameCmdObj.do_lose("does-not-exist") assert parasiteCharObj.getFollow() is None if __name__ == "__main__": unittest.main() ```
{ "source": "Jnewgeek/handson-ml", "score": 3 }
#### File: Jnewgeek/handson-ml/02_end_to_end_machine_learning_project.py ```python import os os.chdir(os.getcwd()) import numpy as np import pandas as pd # set the random seed np.random.seed(42) # to plot pretty figure, set the figure params import matplotlib.pyplot as plt plt.rcParams['font.sans-serif']=['SimHei'] plt.rcParams['axes.unicode_minus']=False import matplotlib as mpl mpl.rc("axes",labelsize=14) mpl.rc("xtick",labelsize=12) mpl.rc("ytick",labelsize=12) # set the figure location PROJECT_ROOT_DIR = "." CHAPTER_ID = "end_to_end_project" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) # define the function that save the figure def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # Ignore useless warnings (see SciPy issue #5998) import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd") # sklearn model #from sklearn.model_selection import train_test_split from sklearn.model_selection import StratifiedShuffleSplit from sklearn.externals import joblib import time ################################## Get data ################################### import os import tarfile from six.moves import urllib DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/" HOUSING_PATH = os.path.join("datasets", "housing") HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz" #download data from internet and decompress it to csv file def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): if not os.path.isdir(housing_path): os.makedirs(housing_path) tgz_path = os.path.join(housing_path, "housing.tgz") urllib.request.urlretrieve(housing_url, tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path=housing_path) housing_tgz.close() #load the housing data def load_housing_data(housing_path=HOUSING_PATH): if not os.path.exists(housing_path): fetch_housing_data() # load local file csv_path=os.path.join(housing_path,"housing.csv") return pd.read_csv(csv_path) housing=load_housing_data() print(">> Load housing data sucessfully!\n") ## check out the data infomation and structure #print("First 5 lines of data:\n"+"-"*40+"\n",housing.head()) # show the first 5 lines of data #print("Data info:\n"+"-"*40+"\n") #print(housing.info()) # show the data infomation, such as type and missing number #print("Data description:\n"+"-"*40+"\n",housing.describe()) # show the data structure #print("Ocean_proximity value_counts:\n"+"-"*40+"\n",housing.ocean_proximity.value_counts()) # show the ocean_proximity distribution # ######################### plot the data distributon ############################ ##plt.figure() #housing.hist(bins=50,figsize=(20,15)) #save_fig("attribute_histogram_plots") # ################### Split the data to train_data and test data ################# ############## split_train_test_01 ##def split_train_test(data,test_ratio=0.2,seed=42,random_=True): ## if random_==True: ## np.random.seed(seed) ## # shuffle the data ## shuffled_indices=np.random.permutation(len(data)) ## test_set_size=int(len(data)*test_ratio) ## test_indices=shuffled_indices[:test_set_size] ## train_indices=shuffled_indices[test_set_size:] ## return data.iloc[train_indices],data.iloc[test_indices] ## ##train_set, test_set = split_train_test(housing) ##print("Random: ",len(train_set), "train +", len(test_set), "test") ## ############### split_train_test_02 ### to make sure that works well when new data loaded ##from zlib import crc32 ## ### to create a array that mark whether the index should be added to test data ##def test_set_check(identifier,test_ratio): ## return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32 ## ##def split_train_test_by_id(data,test_ratio,id_column): ## ids=data[id_column] ## in_test_set=ids.apply(lambda id_: test_set_check(id_, test_ratio)) ## return data.loc[~in_test_set],data.loc[in_test_set] ## ### by rows ##housing_with_id = housing.reset_index() # adds an `index` column ##train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index") ##print("By rows: ",len(train_set), "train +", len(test_set), "test") ## ### by location ##housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"] ##train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id") ##print("By location: ",len(train_set), "train +", len(test_set), "test") # ############## split_train_test_03 # #train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42) #print("By sklearn train_test_split: ",len(train_set), "train +", len(test_set), "test") # ############ Stratified Sampling ##plot the median income ##housing["median_income"].hist() ##pd.cut() housing["income_cat"] = pd.cut(housing["median_income"], bins=[0., 1.5, 3.0, 4.5, 6., np.inf], labels=[1, 2, 3, 4, 5]) # bar plot #housing["income_cat"].value_counts().sort_index().plot(kind="bar") #plt.xticks(rotation=0) split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing["income_cat"]): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] print("By sklearn StratifiedShuffleSplit: ",len(strat_train_set), "train +", len(strat_test_set), "test") #print(strat_test_set["income_cat"].value_counts() / len(strat_test_set)) #print(housing["income_cat"].value_counts() / len(housing)) ################ compare the error between the Stratified and random #def income_cat_proportions(data): # return data["income_cat"].value_counts() / len(data) # #train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42) # #compare_props = pd.DataFrame({ # "Overall": income_cat_proportions(housing), # "Random": income_cat_proportions(test_set), # "Stratified": income_cat_proportions(strat_test_set), #}).sort_index() #compare_props["Rand. %error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100 #compare_props["Strat. %error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100 #print(compare_props) #delete the extra column try: for set_ in (strat_train_set, strat_test_set): set_.drop("income_cat", axis=1, inplace=True) except: pass ############## Discover and visualize the data to gain insights ############### housing=strat_train_set.copy() #housing.plot(kind="scatter",x="longitude",y="latitude",alpha=0.1) # ## set the scatter size and colors #housing.plot(kind="scatter",x="longitude",y="latitude",alpha=0.4, # s=housing.population/1000,label="population", # c="median_house_value",cmap=plt.get_cmap("jet"),colorbar=True) #plt.legend() # add map png #import matplotlib.image as mpimg #california_img=mpimg.imread(PROJECT_ROOT_DIR + '/images/end_to_end_project/california.png') #ax = housing.plot(kind="scatter", x="longitude", y="latitude", figsize=(10,7), # s=housing['population']/100, label="Population", # c="median_house_value", cmap=plt.get_cmap("jet"), # colorbar=False, alpha=0.4, # ) #plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=0.5, # cmap=plt.get_cmap("jet")) #plt.ylabel("Latitude", fontsize=14) #plt.xlabel("Longitude", fontsize=14) # #prices = housing["median_house_value"] #tick_values = np.linspace(prices.min(), prices.max(), 11) #cbar = plt.colorbar() #cbar.ax.set_yticklabels(["$%dk"%(round(v/1000)) for v in tick_values], fontsize=14) #cbar.set_label('Median House Value', fontsize=16) # #plt.legend(fontsize=16) #save_fig("california_housing_prices_plot") # ## find correction #corr_matrix=housing.corr() #print("Correction:\n"+"-"*40+"\n",corr_matrix["median_house_value"].sort_values(ascending=False)) # ## scatter matrix #print(">> Plotting the scatter matrix of 4 variables...\n") #from pandas.plotting import scatter_matrix # #attributes=["median_house_value","median_income","total_rooms","housing_median_age"] #scatter_matrix(housing[attributes],figsize=(12,8)) #save_fig("scatter_matrix_plot") # ## plot scatter of median_house_value and median_income #housing.plot(kind="scatter", x="median_income", y="median_house_value", # alpha=0.1) #plt.axis([0, 16, 0, 550000]) #save_fig("income_vs_house_value_scatterplot") # ## create different variable #print(">> Create some new variables...") #housing["rooms_per_household"] = housing["total_rooms"]/housing["households"] #housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"] #housing["population_per_household"]=housing["population"]/housing["households"] # ## check out the new correction #corr_matrix=housing.corr() #print("New Correction:\n"+"-"*40+"\n",corr_matrix["median_house_value"].sort_values(ascending=False)) # Prepare the data for Machine Learning algorithms housing=strat_train_set.drop("median_house_value",axis=1) housing_label=strat_train_set["median_house_value"].copy() # clean the missing data #print(">> Replace the missing data with median number...\n") try: from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+ except ImportError: from sklearn.preprocessing import Imputer as SimpleImputer #imputer=SimpleImputer(strategy="median") ## imputer can only caculate the median value on numeric data, so we should drop the text label housing_num=housing.drop("ocean_proximity",axis=1) #imputer.fit(housing_num) ## 填补缺失值 #X=imputer.transform(housing_num) #housing_tr=pd.DataFrame(X,columns=housing_num.columns) # transform the character data #print(">> Transform the Category to Number...\n") #try: # from sklearn.preprocessing import OrdinalEncoder #except ImportError: # from future_encoders import OrdinalEncoder # Scikit-Learn < 0.20 #housing_cat = housing[['ocean_proximity']] #print("Raw label(first 10 lines):\n"+"-"*40+"\n",housing_cat.head(10)) #ordinal_encoder = OrdinalEncoder() #housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat) #print("Transformed label(first 10 lines):\n"+"-"*40+"\n",housing_cat_encoded[:10]) #print("Transformed Rules:\n"+"-"*40+"\n",ordinal_encoder.categories_) # ## Create OneHotEncoder #print(">> Create OneHotEncoder...\n") try: #from sklearn.preprocessing import OrdinalEncoder # just to raise an ImportError if Scikit-Learn < 0.20 from sklearn.preprocessing import OneHotEncoder except ImportError: from future_encoders import OneHotEncoder # Scikit-Learn < 0.20 #cat_encoder = OneHotEncoder() #housing_cat_1hot = cat_encoder.fit_transform(housing_cat) #print("One hot encoder:\n"+"-"*40+"\n",housing_cat_1hot.toarray()) # add extra feature # get the right column indices: safer than hard-coding indices 3, 4, 5, 6 #print(">> Add extra feature...\n") rooms_ix, bedrooms_ix, population_ix, household_ix = [ list(housing.columns).index(col) for col in ("total_rooms", "total_bedrooms", "population", "households")] from sklearn.preprocessing import FunctionTransformer def add_extra_features(X, add_bedrooms_per_room=True): rooms_per_household = X[:, rooms_ix] / X[:, household_ix] population_per_household = X[:, population_ix] / X[:, household_ix] if add_bedrooms_per_room: bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix] return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room] else: return np.c_[X, rooms_per_household, population_per_household] #attr_adder = FunctionTransformer(add_extra_features, validate=False, # kw_args={"add_bedrooms_per_room": False}) #housing_extra_attribs = attr_adder.fit_transform(housing.values) #housing_extra_attribs = pd.DataFrame( # housing_extra_attribs, # columns=list(housing.columns)+["rooms_per_household", "population_per_household"], # index=housing.index) #print("After adding extra features:\n"+"-"*40+"\n",housing_extra_attribs.head()) # ## Create a pipeline to prepare the data #print(">> Create a pipeline to prepare the data...\n") from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler try: from sklearn.compose import ColumnTransformer except ImportError: from future_encoders import ColumnTransformer # Scikit-Learn < 0.20 # clean the numreric features num_pipeline = Pipeline([ ('imputer', SimpleImputer(strategy="median")), ('attribs_adder', FunctionTransformer(add_extra_features, validate=False)), ('std_scaler', StandardScaler()), ]) num_attribs = list(housing_num) cat_attribs = ["ocean_proximity"] full_pipeline = ColumnTransformer([ ("num", num_pipeline, num_attribs), ("cat", OneHotEncoder(), cat_attribs), ]) housing_prepared = full_pipeline.fit_transform(housing) print("\n>> prepare housing data sucessfully!\n") ################################################################################################# ###################################### Select and train a model ################################ ################################################################################################# def select_model(model='LR',housing_prepared=housing_prepared,housing_label=housing_label,load=True): '''select the LinearRegression,DecisionTreeRegressor or RandomForestRegressor.''' # save model from sklearn.externals import joblib # load the model if it exists import time time1=time.time() if model=="LR": from sklearn.linear_model import LinearRegression time1=time.time() model=LinearRegression() model_name="LinearRegression" elif model=="DT": from sklearn.tree import DecisionTreeRegressor model=DecisionTreeRegressor(random_state=42) model_name="DecisionTreeRegressor" elif model=="RF": from sklearn.ensemble import RandomForestRegressor model=RandomForestRegressor(n_estimators=10, random_state=42) model_name="RandomForestRegressor" elif model=="SVR": from sklearn.svm import SVR model = SVR(kernel="linear") model_name="SVR" else: return None if os.path.exists("model_set/%s.pkl"%model_name): model=joblib.load("model_set/%s.pkl"%model_name) print("\n>> Load < %s > model from the local sucessfully!\n"%model_name) return model # train the model model.fit(housing_prepared,housing_label) # caculate the RMSE from sklearn.metrics import mean_squared_error housing_predictions=model.predict(housing_prepared) mse=mean_squared_error(housing_label,housing_predictions) rmse=np.sqrt(mse) time2=time.time() print("%s trained sucessfully, use %.2fs, the rmse is %.6f."%(model_name,time2-time1,rmse)) with open("model_set/model_statistics.txt",'a+',encoding="utf-8") as f: f.write("[ % s]"%time.ctime()+"%s trained sucessfully, use %.2fs, the rmse is %.6f."%(model_name,time2-time1,rmse)) # Fine-tune your model from sklearn.model_selection import cross_val_score print("\n>> %s Scores:\n"%model_name+"-"*40+"\n") time1=time.time() scores=cross_val_score(model,housing_prepared,housing_label, scoring="neg_mean_squared_error",cv=10) rmse_scores=np.sqrt(-scores) time2=time.time() # check out the final results def display_scores(scores,time_=time2-time1): print("scores:",scores) print("Mean:",scores.mean()) print("Standard deviation:",scores.std()) print("time used: %.2fs"%time_) with open("model_set/model_statistics.txt",'a+',encoding="utf-8") as f: f.write("scores: {}\n".format(scores)) f.write("Mean: {}\n".format(scores.mean())) f.write("Standard deviation: {}\n".format(scores.std())) f.write("time used: %.2fs\n"%time_) f.write("-"*100+"\n") display_scores(rmse_scores) # save the model joblib.dump(model,"model_set/%s.pkl"%model_name) return model ## LinearRegression #lin_reg=select_model() # ## DecisionTreeRegressor #tree_reg=select_model("DT") # ## RandomForestRegressor #forest_reg=select_model("RF") # ### SVR #svr_reg=select_model("SVR") ################################################################################################# ###################################### Adjust the params smoothly ############################### ################################################################################################# def find_best_model(): #from sklearn.model_selection import GridSearchCV #import time #print("\n>> Starting Search the best params,please wait some seconds...") #time1=time.time() # #param_grid=[ # {'n_estimators':[3,10,30],'max_features':[2,4,6,8]}, # {'bootstrap':[False],'n_estimators':[3,10],'max_features':[2,3,4]}, # ] ## train the model from sklearn.ensemble import RandomForestRegressor #forest_reg=RandomForestRegressor() #grid_search=GridSearchCV(forest_reg,param_grid,cv=5, # scoring='neg_mean_squared_error') #grid_search.fit(housing_prepared,housing_label) #time2=time.time() #print("\n>> Grid Search sucessfully,use time %.2fs\n"%(time2-time1)) #print("-"*40) #print(grid_search.best_params_) #print(grid_search.best_estimator_) #print("-"*40) #cvres = grid_search.cv_results_ #for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): # print(np.sqrt(-mean_score), params) # random to adjust the params import time print("\n>> Starting Search the best params randomly,please wait some seconds...") from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint time1=time.time() param_distribs = { 'n_estimators': randint(low=1, high=200), 'max_features': randint(low=1, high=8), } forest_reg = RandomForestRegressor(random_state=42) rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs, n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42) rnd_search.fit(housing_prepared, housing_label) time2=time.time() print("\n>> Grid Search sucessfully,use time %.2fs\n"%(time2-time1)) cvres = rnd_search.cv_results_ for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): print(np.sqrt(-mean_score), params) # show the importance of features feature_importances = rnd_search.best_estimator_.feature_importances_ extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"] #cat_encoder = cat_pipeline.named_steps["cat_encoder"] # old solution cat_encoder = full_pipeline.named_transformers_["cat"] cat_one_hot_attribs = list(cat_encoder.categories_[0]) attributes = num_attribs + extra_attribs + cat_one_hot_attribs print("Importance of features:\n"+"-"*40+"\n") import pprint pprint.pprint(sorted(zip(feature_importances, attributes), reverse=True)) return rnd_search ################################################################################################# ###################################### Final model ############################################## ################################################################################################# def main(): if os.path.exists("model_set/final_model.pkl"): final_model=joblib.load("model_set/final_model.pkl") else: from sklearn.metrics import mean_squared_error time1=time.time() rnd_search=find_best_model() final_model = rnd_search.best_estimator_ X_test = strat_test_set.drop("median_house_value", axis=1) y_test = strat_test_set["median_house_value"].copy() X_test_prepared = full_pipeline.transform(X_test) final_predictions = final_model.predict(X_test_prepared) final_mse = mean_squared_error(y_test, final_predictions) final_rmse = np.sqrt(final_mse) time2=time.time() print("Final model finished,use time %.2fs,rmse is %.6f"%(time2-time1,final_rmse)) # confidence interval from scipy import stats confidence = 0.95 squared_errors = (final_predictions - y_test) ** 2 # mean = squared_errors.mean() m = len(squared_errors) interval_array=np.sqrt(stats.t.interval(confidence, m - 1, loc=np.mean(squared_errors), scale=stats.sem(squared_errors))) print("95% confidence interval is",interval_array) # save model joblib.dump(final_model,"model_set/final_model.pkl") if __name__=="__main__": main() ``` #### File: Jnewgeek/handson-ml/tackle_titanic.py ```python import os os.chdir(os.getcwd()) import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc("axes",labelsize=14) mpl.rc("xtick",labelsize=12) mpl.rc("ytick",labelsize=12) plt.rcParams["font.sans-serif"]=["SimHei"] plt.rcParams["axes.unicode_minus"]=False import seaborn as sns sns.set(font="SimHei") chapter_id="titanic" def save_fig(fig_id,tight_layout=True): path=os.path.join(".","images",chapter_id,fig_id+".png") if tight_layout: plt.tight_layout() plt.savefig(path,format="png",dpi=300) ####################################### load data ########################################### TITANIC_PATH = os.path.join("datasets", "titanic") import pandas as pd import time def load_titanic_data(filename, titanic_path=TITANIC_PATH): csv_path = os.path.join(titanic_path, filename) return pd.read_csv(csv_path) print(">> Starting loading data...") time1=time.time() train_data = load_titanic_data("train.csv") test_data = load_titanic_data("test.csv") time2=time.time() print("finished! use time %.2fs."%(time2-time1)) #train_data.head() #train_data.info() #train_data.describe() #train_data["Survived"].value_counts() ################################ Prepare the data #################################### from sklearn.base import BaseEstimator, TransformerMixin # A class to select numerical or categorical columns # since Scikit-Learn doesn't handle DataFrames yet def get_preprocess_pipeline(num_columns=["Age", "SibSp", "Parch", "Fare"], cat_columns=["Pclass", "Sex", "Embarked"]): class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names] from sklearn.pipeline import Pipeline try: from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+ except ImportError: from sklearn.preprocessing import Imputer as SimpleImputer # 数值型数据取中位数填补缺失值 #num_columns=["Age", "SibSp", "Parch", "Fare"] num_pipeline = Pipeline([ ("select_numeric", DataFrameSelector(num_columns)), ("imputer", SimpleImputer(strategy="median")), ]) #num_pipeline.fit_transform(train_data) # 字符型数据取众数填补缺失值 class MostFrequentImputer(BaseEstimator, TransformerMixin): def fit(self, X, y=None): self.most_frequent_ = pd.Series([X[c].value_counts().index[0] for c in X], index=X.columns) return self def transform(self, X, y=None): return X.fillna(self.most_frequent_) try: from sklearn.preprocessing import OrdinalEncoder # just to raise an ImportError if Scikit-Learn < 0.20 from sklearn.preprocessing import OneHotEncoder except ImportError: from future_encoders import OneHotEncoder # Scikit-Learn < 0.20 cat_pipeline = Pipeline([ ("select_cat", DataFrameSelector(cat_columns)), ("imputer", MostFrequentImputer()), ("cat_encoder", OneHotEncoder(sparse=False)), ]) #cat_pipeline.fit_transform(train_data) # 合并特征 from sklearn.pipeline import FeatureUnion preprocess_pipeline = FeatureUnion(transformer_list=[ ("num_pipeline", num_pipeline), ("cat_pipeline", cat_pipeline), ]) return preprocess_pipeline # prepared data finally preprocess_pipeline=get_preprocess_pipeline() X_train = preprocess_pipeline.fit_transform(train_data) y_train = train_data["Survived"] ################################## Train model ###################################### def select_model(model_name="SVC",X_train=X_train,y_train=y_train): print(">> %s model...\n"%model_name+"-"*40) time.sleep(0.5) time1=time.time() if model_name=="SVC": # SVC from sklearn.svm import SVC model = SVC(gamma="auto") #model.fit(X_train, y_train) elif model_name=="RF": from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=100, random_state=42) else: return None # cross_val_score from sklearn.model_selection import cross_val_score model_scores = cross_val_score(model, X_train, y_train, cv=10) time2=time.time() print("finished! use time %.2fs,%s mean score:"%(time2-time1,model_name),model_scores.mean()) # test check # X_test = preprocess_pipeline.transform(test_data) # y_pred = svm_clf.predict(X_test) return model,model_scores svm_clf,svm_scores=select_model() forest_clf,forest_scores=select_model("RF") def plot_modelScores(): plt.figure(figsize=(8, 4)) plt.plot([1]*10, svm_scores, ".") plt.plot([2]*10, forest_scores, ".") plt.boxplot([svm_scores, forest_scores], labels=("SVM","Random Forest")) plt.ylabel("Accuracy", fontsize=14) #plot_modelScores() #################### add more feature train_data["AgeBucket"] = train_data["Age"] // 15 * 15 #train_data[["AgeBucket", "Survived"]].groupby(['AgeBucket']).mean() train_data["RelativesOnboard"] = train_data["SibSp"] + train_data["Parch"] #train_data[["RelativesOnboard", "Survived"]].groupby(['RelativesOnboard']).mean() # new pipeline preprocess_pipeline=get_preprocess_pipeline(num_columns=["AgeBucket", "RelativesOnboard", "Fare"]) X_train = preprocess_pipeline.fit_transform(train_data) y_train = train_data["Survived"] # new models svm_clf,svm_scores=select_model("SVC",X_train,y_train) forest_clf,forest_scores=select_model("RF",X_train,y_train) plot_modelScores() # Grid from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint time1=time.time() param_distribs = { 'n_estimators': randint(low=1, high=200), 'max_features': randint(low=1, high=8), } forest_reg = RandomForestClassifier(random_state=42) rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs, n_iter=10, cv=5, scoring='accuracy', random_state=42, verbose=5,n_jobs=-1) rnd_search.fit(X_train, y_train) time2=time.time() print("\n>> Grid Search sucessfully,use time %.2fs\n"%(time2-time1)) final_model=rnd_search.best_estimator_ # 预测值 test_data["AgeBucket"] = test_data["Age"] // 15 * 15 #train_data[["AgeBucket", "Survived"]].groupby(['AgeBucket']).mean() test_data["RelativesOnboard"] = test_data["SibSp"] + test_data["Parch"] X_test_prepared = preprocess_pipeline.transform(test_data) final_predictions = final_model.predict(X_test_prepared) submission=load_titanic_data("gender_submission.csv") # 混淆矩阵 from sklearn.metrics import confusion_matrix true_survive=submission["Survived"].values print("混淆矩阵:\n",confusion_matrix(true_survive,final_predictions)) from sklearn.metrics import precision_score, recall_score,f1_score print("精确度:",precision_score(true_survive,final_predictions)) print("召回率:",recall_score(true_survive,final_predictions)) print("F1分数:",f1_score(true_survive,final_predictions)) # ROC from sklearn.metrics import roc_curve fpr,tpr,thresholds=roc_curve(true_survive,final_predictions) # def plot_roc_curve(fpr,tpr,label=None): plt.plot(fpr,tpr,linewidth=2,label=label) plt.plot([0,1],[0,1],'k--') plt.axis([0,1,0,1]) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.figure(figsize=(8, 6)) plot_roc_curve(fpr, tpr) from sklearn.metrics import roc_auc_score print("ROC值:",roc_auc_score(true_survive,final_predictions)) submission["Survived"]=final_predictions submission.to_csv("./datasets/titanic/gender_submission_new.csv",index=False,encoding="utf-8") ```
{ "source": "jnewland/ha-conf", "score": 2 }
#### File: custom_components/alarmdotcom/lock.py ```python import logging import re from pyalarmdotcomajax import Alarmdotcom, AlarmdotcomADT, AlarmdotcomProtection1 import voluptuous as vol import homeassistant.components.lock as lock try: from homeassistant.components.lock import LockEntity except ImportError: from homeassistant.components.lock import ( Lock as LockEntity, ) from homeassistant.components.lock import PLATFORM_SCHEMA from homeassistant.const import ( CONF_CODE, CONF_NAME, CONF_PASSWORD, CONF_USERNAME, STATE_JAMMED, STATE_LOCKED, STATE_LOCKING, STATE_UNLOCKED, STATE_UNLOCKING, ) from homeassistant.helpers.aiohttp_client import ( async_create_clientsession, async_get_clientsession, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Alarm.com" CONF_ADT = "adt" CONF_PROTECTION1 = "protection1" CONF_TWO_FACTOR_COOKIE = "two_factor_cookie" DOMAIN = "alarmdotcom" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Optional(CONF_CODE): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_ADT, default=False): cv.boolean, vol.Optional(CONF_PROTECTION1, default=False): cv.boolean, vol.Optional(CONF_TWO_FACTOR_COOKIE): cv.string, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up a Alarm.com control panel.""" name = config.get(CONF_NAME) code = config.get(CONF_CODE) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) two_factor_cookie = config.get(CONF_TWO_FACTOR_COOKIE) use_new_websession = hass.data.get(DOMAIN) adt_or_protection1 = 0 if config.get(CONF_ADT): adt_or_protection1 = 1 elif config.get(CONF_PROTECTION1): adt_or_protection1 = 2 if not use_new_websession: hass.data[DOMAIN] = True use_new_websession = False alarmdotcom = AlarmDotComLock( hass, name, code, username, password, use_new_websession, adt_or_protection1, two_factor_cookie, ) await alarmdotcom.async_login() async_add_entities([alarmdotcom]) class AlarmDotComLock(LockEntity): """Representation of an Alarm.com status.""" def __init__( self, hass, name, code, username, password, use_new_websession, adt_or_protection1, two_factor_cookie, ): """Initialize the Alarm.com status.""" _LOGGER.debug("Setting up Alarm.com...") self._name = name self._code = code if code else None if use_new_websession: websession = async_create_clientsession(hass) _LOGGER.debug("Using new websession.") else: websession = async_get_clientsession(hass) _LOGGER.debug("Using hass websession.") self._state = None if adt_or_protection1 == 1: adc_class = AlarmdotcomADT elif adt_or_protection1 == 2: adc_class = AlarmdotcomProtection1 else: adc_class = Alarmdotcom self._lock = adc_class( username, password, websession, False, False, False, two_factor_cookie, ) async def async_login(self): """Login to Alarm.com.""" await self._lock.async_login() async def async_update(self): """Fetch the latest state.""" await self._lock.async_update("lock") return self._lock.state @property def name(self): """Return the name of the alarm.""" return self._name @property def code_format(self): """Return one or more digits/characters.""" if self._code is None: return None if isinstance(self._code, str) and re.search("^\\d+$", self._code): return "number" return "text" @property def state(self): """Return the state of the device.""" if self._lock.state.lower() == "open": return STATE_UNLOCKED if self._lock.state.lower() == "locked": return STATE_LOCKED return None @property def device_state_attributes(self): """Return the state attributes.""" return {"sensor_status": self._lock.sensor_status} async def async_lock(self, code=None): """Send lock command.""" if self._validate_code(code): await self._lock.async_lock() async def async_unlock(self, code=None): """Send unlock command.""" if self._validate_code(code): await self._lock.async_unlock() def _validate_code(self, code): """Validate given code.""" check = self._code is None or code == self._code if not check: _LOGGER.warning("Wrong code entered") return check ```
{ "source": "jnewton3edinburgh/nbgrader", "score": 2 }
#### File: tests/labextensions/test_course_list.py ```python import contextlib import pytest import sys import subprocess as sp import time from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import TimeoutException from selenium.webdriver.common.action_chains import ActionChains from .conftest import notwindows, _make_nbserver, _make_browser, _close_nbserver, _close_browser @contextlib.contextmanager def nbserver(course, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache, startup_fn=None): server = _make_nbserver( course, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache, startup_fn=startup_fn) try: yield server finally: _close_nbserver(server) @pytest.fixture(scope="module") def browser(request, tempdir): browser = _make_browser(tempdir) def fin(): _close_browser(browser) request.addfinalizer(fin) return browser def _wait(browser): return WebDriverWait(browser, 10) def _click_element(browser: WebDriver, element): ActionChains(browser).click(element).perform() def _click_when_available(browser: WebDriver, by, arg): _wait(browser).until(lambda x: browser.find_element(by, arg)) element = browser.find_element(by, arg) _wait(browser).until(EC.visibility_of(element)) _click_element(browser, element) return element def _load_course_list(browser, port, retries=5): # go to the correct page browser.get("http://localhost:{}/lab".format(port)) def page_loaded(browser): logo_id = 'jp-MainLogo' return len(browser.find_elements_by_id(logo_id)) > 0 time.sleep(15) # wait for the page to load try: _wait(browser).until(page_loaded) except TimeoutException: if retries > 0: print("Retrying page load...") # page timeout, but sometimes this happens, so try refreshing? _load_course_list(browser, port, retries=retries - 1) else: print("Failed to load the page too many times") raise side_bar_selector = '[data-id="command-palette"]' cl_selector = '[data-command="nbgrader:course_list"]' time.sleep(1) _click_when_available(browser, By.CSS_SELECTOR, side_bar_selector) _click_when_available(browser, By.CSS_SELECTOR, cl_selector) # make sure courses are visible _wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#formgrader_list"))) def _wait_for_list(browser, num_rows): _wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#formgrader_list_loading"))) _wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#formgrader_list_placeholder"))) _wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#formgrader_list_error"))) _wait(browser).until(lambda browser: len(browser.find_elements_by_css_selector("#formgrader_list > .list_item")) == num_rows) rows = browser.find_elements_by_css_selector("#formgrader_list > .list_item") assert len(rows) == num_rows return rows @pytest.mark.nbextensions @notwindows def test_local_formgrader(browser, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache): with nbserver("course101", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache): _load_course_list(browser, port) # check that there is one local course rows = _wait_for_list(browser, 1) assert rows[0].text == "course101" # make sure the url of the course is correct link = browser.find_elements_by_css_selector("#formgrader_list > .list_item a")[0] url = link.get_attribute("href") assert url == "http://localhost:{}/formgrader".format(port) @pytest.mark.nbextensions @notwindows def test_no_jupyterhub(browser, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache): def update_config(env): with open('nbgrader_config.py', 'a') as fh: fh.write("from nbgrader.auth import JupyterHubAuthPlugin\n") fh.write("c.Authenticator.plugin_class = JupyterHubAuthPlugin\n") args = [ "course101", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache, update_config ] with nbserver(*args): # check that there is one local course rows = _wait_for_list(browser, 1) assert rows[0].text == "course101" # make sure the url of the course is correct link = browser.find_elements_by_css_selector("#formgrader_list > .list_item a")[0] url = link.get_attribute("href") assert url == "http://localhost:{}/formgrader".format(port) @pytest.mark.nbextensions @notwindows def test_no_formgrader(browser, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache): def disable_formgrader(env): sp.check_call([ sys.executable, "-m", "jupyter", "nbextension", "disable", "--user", "formgrader/main", "--section=tree"], env=env) sp.check_call([ sys.executable, "-m", "jupyter", "serverextension", "disable", "--user", "nbgrader.server_extensions.formgrader"], env=env) args = [ "course101", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache, disable_formgrader ] with nbserver(*args): browser.refresh() time.sleep(5) _load_course_list(browser, port) _wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#formgrader_list_placeholder"))) # TODO: add a test case where jupyterhub is running, and a test case where a # course group doesn't have a corresponding formgrader. I think this will # require creating a small mock JupyterHub server that can run and accept the # basic requests. ```
{ "source": "jnez71/AcroCart", "score": 3 }
#### File: AcroCart/acrocart/simulator.py ```python from __future__ import division import numpy as np; npl = np.linalg import time class Simulator(object): """ Class for simulation setup of an acrocart system. Faster-than-realtime timeseries simulation requires MatPlotLib for plotting. Realtime simulation requires Mayavi for graphics. (http://docs.enthought.com/mayavi/mayavi/installation.html) dyn: acrocart.Dynamics object """ def __init__(self, dyn): self.dyn = dyn def simulate(self, q0, control, tN=0, dt=0.005, goal=None, override=None, Des=None): """ Runs the simulation and displays / plots the results. q0: array initial state condition [pos, ang1, ang2, vel, angvel1, angvel2] control: function of array state, scalar time, and scalar goal that returns a scalar input force tN: scalar duration in time units, set to 0 for realtime simulation dt: scalar timestep for simulation goal: optional scalar position to be passed to control and marked as "goal" in visualizations override: optional function of scalar time that returns an array state that overrides the actual simulation (allows you to visualize an arbitrary trajectory) Des: tuple of time and state trajectories to be plotted as desired """ q0 = np.array(q0, dtype=np.float64) # Positive tN means timeseries simulation if tN > 0: from matplotlib import pyplot # Record real start time print "----" print "Simulating acrocart for a horizon of {}...".format(tN) start_time = time.time() # Run timeseries simulation and store results T = np.arange(0, tN+dt, dt) Q = np.zeros((len(T), self.dyn.n_q), dtype=np.float64) U = np.zeros((len(T), self.dyn.n_u), dtype=np.float64) Q[0] = np.copy(q0) for i, t in enumerate(T[:-1]): U[i] = control(Q[i], t, goal) if override is None: Q[i+1] = self.dyn.step(Q[i], U[i], dt) else: Q[i+1] = override(t) print "Simulation finished in {} realtime seconds.".format(np.round(time.time()-start_time, 3)) print "Final state: {}".format(np.round(Q[-1], 3)) print "Sum of squared input forces: {}".format(np.round(np.sum(U[:, 0]**2), 3)) # Plot results print "Plotting results... (close plots to continue)" print "----" fig = pyplot.figure() fig.suptitle("AcroCart Simulation", fontsize=24) ax = fig.add_subplot(2, 1, 1) ax.plot(T, Q[:, 0], "k", label="pos") ax.plot(T, Q[:, 1], "g", label="ang1") ax.plot(T, Q[:, 2], "b", label="ang2") if Des is not None: ax.plot(Des[0], Des[1][:, 0], "k--") ax.plot(Des[0], Des[1][:, 1], "g--") ax.plot(Des[0], Des[1][:, 2], "b--") ax.set_xlim([T[0], T[-1]]) ax.legend(fontsize=16) ax.set_ylabel("Pose", fontsize=16) ax.grid(True) ax = fig.add_subplot(2, 1, 2) ax.plot(T, U[:, 0], "r", label="input") ax.set_xlim([T[0], T[-1]]) ax.set_ylabel("Input", fontsize=16) ax.set_xlabel("Time", fontsize=16) ax.grid(True) pyplot.show() # blocking # Nonpositive tN implies realtime simulation else: print "----" print "Starting realtime acrocart simulation..." print "Setting-up Mayavi graphics..." from mayavi import mlab import os, vtk if os.path.exists("/dev/null"): shadow_realm = "/dev/null" else: shadow_realm = "c:\\nul" mlab_warning_output = vtk.vtkFileOutputWindow() mlab_warning_output.SetFileName(shadow_realm) vtk.vtkOutputWindow().SetInstance(mlab_warning_output) # Generate visualization objects and initial figure view fig = mlab.figure(size=(500, 500), bgcolor=(0.25, 0.25, 0.25)) rail = mlab.plot3d(self.dyn.rail_lims, (0, 0), (0, 0), line_width=1, color=(1, 1, 1)) cart = mlab.points3d(q0[0], 0, 0, scale_factor=0.2, mode="cube", color=(0, 0, 1)) joint1 = mlab.points3d(q0[0], -0.125, 0, scale_factor=0.12, color=(0, 1, 1)) x1, y1 = q0[0]+self.dyn.l1*np.sin(q0[1]), -self.dyn.l1*np.cos(q0[1]) pole1 = mlab.plot3d((q0[0], x1), (-0.15, -0.15), (0, y1), line_width=1, color=(0, 1, 0)) joint2 = mlab.points3d(x1, -0.175, y1, scale_factor=0.12, color=(0, 1, 1)) pole2 = mlab.plot3d((x1, x1+self.dyn.l2*np.sin(q0[2])), (-0.2, -0.2), (y1, y1-self.dyn.l2*np.cos(q0[2])), line_width=1, color=(1, 0, 0)) disp = mlab.text3d(-0.6, 0, 1.2*(self.dyn.l1+self.dyn.l2), "t = 0.0", scale=0.45) if goal is not None: goal_viz = mlab.points3d(goal, 0, -1.2*(self.dyn.l1+self.dyn.l2), scale_factor=0.2, mode="axes", color=(1, 0, 0)) recenter = lambda: mlab.view(azimuth=-90, elevation=90, focalpoint=(np.mean(self.dyn.rail_lims), 0, 0), distance=1.8*np.sum(np.abs(self.dyn.rail_lims))) recenter() # Setup user keyboard interactions disturb = [0.0] realtime_goal = [goal] reset = [False] def keyPressEvent(event): k = str(event.text()) if k == '.': realtime_goal[0] += 0.2 elif k == ',': realtime_goal[0] -= 0.2 elif k == ' ': realtime_goal[0] = goal elif k == 'v': recenter() elif k == 'r': t[0] = 0.0 q[0] = np.copy(q0) realtime_goal[0] = goal print "User triggered reset!" reset[0] = True start_time[0] = time.time() fig.scene._vtk_control.keyPressEvent = keyPressEvent print "--\nUSER KEYBOARD CONTROLS:" if override is None: print "Increment / decrement goal with '>' / '<' and reset goal with ' ' (spacebar)." print "Reset view with 'v'. Reset simulation with 'r'.\n--" print "(close all Mayavi windows to continue...)" # Wrap simulation in animation @mlab.animate(delay=50) # 20 FPS is best Mayavi can do def realtime_loop(): while True: # Simulate physics up to realtime while (t[0] < time.time()-start_time[0]) and not reset[0]: if override is None: q[0] = self.dyn.step(q[0], control(q[0], t[0], realtime_goal[0]), dt, disturb[0]) else: q[0] = override(t[0]) t[0] += dt # Update animation reset[0] = False cart.mlab_source.set(x=q[0][0]) joint1.mlab_source.set(x=q[0][0]) x1, y1 = q[0][0]+self.dyn.l1*np.sin(q[0][1]), -self.dyn.l1*np.cos(q[0][1]) pole1.mlab_source.set(x=(q[0][0], x1), z=(0, y1)) joint2.mlab_source.set(x=x1, z=y1) pole2.mlab_source.set(x=(x1, x1+self.dyn.l2*np.sin(q[0][2])), z=(y1, y1-self.dyn.l2*np.cos(q[0][2]))) if goal is not None: goal_viz.mlab_source.set(x=realtime_goal[0]) disp.text = "t = " + str(np.round(t[0], 1)) yield # Begin simulation and visualization t = [0.0] q = [np.copy(q0)] start_time = [time.time()] realtime_loop() mlab.show() # blocking print "----" ```
{ "source": "jnez71/adaptive_control", "score": 3 }
#### File: adaptive_control/2link/control_2link.py ```python from __future__ import division import numpy as np import numpy.linalg as npl from collections import deque from cmath import sqrt ################################################# PRIMARY CLASS class Controller: def __init__(self, dt, q0, target, path_type, kp, kd, kg, ku, kf, umax, vmax, amax, history_size, filter_window, adapt0): """ Set-up. Takes call-period, initial state, target pose, path type, gains, integral/filter window size, effort limit, maximum speed and acceleration, history stack size, selection type, and initial condition. """ self.ncontrols = len(umax) self.nparams = len(adapt0) if filter_window and np.isfinite(filter_window): self.nfilt = int(filter_window / dt) else: self.nfilt = 0 self.set_gains(kp, kd, kg, ku, kf) self.set_limits(umax, vmax, amax) self.adapt = adapt0 self.adapt_err = np.zeros(self.nparams) self.Lest = np.zeros(2) self.mest = np.zeros(2) self.gest = 0 self.uf = np.zeros(self.ncontrols) self.Yuf = np.zeros((self.ncontrols, self.nparams)) self.Yuf1 = np.zeros((self.ncontrols, self.nparams)) self.uf_stack = deque([self.uf] * self.nfilt) self.Yuf1_stack = deque([self.Yuf1] * self.nfilt) self.q0 = q0 self.q_stack = deque([q0] * self.nfilt) self.history_stack = deque([self.make_history_pair(self.Yuf, self.uf)] * history_size) self.history_size = history_size self.history_eig = 0 self.YY_stack = deque([np.zeros((self.nparams, self.nparams))] * history_size) self.YY_sum = np.zeros((self.nparams, self.nparams)) self.time = 0 self.set_path(q0, target, path_type, dt) self.rep = np.zeros(self.ncontrols) self.rep_T = np.zeros(self.ncontrols) self.rep_stack = deque([self.rep] * self.ncycle) self.kill = False ######################## def set_gains(self, kp, kd, kg, ku, kf): """ Sets proportional, derivative, adaptive, and filter gains. """ self.kp = np.array(kp, dtype=np.float32) self.kd = np.array(kd, dtype=np.float32) self.kr = self.kp / self.kd if type(kg) is str: if kg == 'LS': self.kg = 100*np.eye(self.nparams) self.use_LS = True else: raise ValueError("Did you mean kg = 'LS' (least squares)?") else: self.kg = np.diag(kg) self.use_LS = False self.ku = np.diag(ku) self.kf = np.array(kf, dtype=np.float32) ######################## def set_limits(self, umax, vmax, amax): """ Sets model limits. Uses the limits to compute a model reference for tracking, and uses repmax for limiting repetitive learning. """ self.umax = np.array(umax, dtype=np.float32) self.vmax = np.array(vmax, dtype=np.float32) self.amax = np.array(amax, dtype=np.float32) self.saturated = False if np.inf in self.umax or 0 in self.umax: self.umaxref = np.array([250, 30], dtype=np.float32) else: self.umaxref = self.umax self.dref = self.umaxref / self.vmax if np.inf in self.amax: self.mref = np.array([0.01, 0.01], dtype=np.float32) else: self.mref = self.umaxref / self.amax self.repmax = np.array([15, 15]) ######################## def set_path(self, q0, target, path_type, dt): """ Resets controller time and reference acceleration. Sets the path initial state, the target position, and the type of path. Updates reference q to its initial t=0 value. If the path will be cyclic, repetitive learning is enabled. The path cycle period is hardcoded in. """ self.path_time = 0 self.qref = np.array(q0) self.aref = np.zeros(self.ncontrols) self.path_type = path_type if path_type == 'train': self.target = 2*np.pi*(np.random.rand(2) - 0.5) else: self.target = np.array(target) if path_type == 'cycle': self.use_RL = True else: self.use_RL = False self.Tcycle = 5 # s self.ncycle = int(2 * self.Tcycle / dt) self.update_ref(0) ######################## def get_effort(self, q, dt): """ Returns the vector of torques as a PD controller plus a feedforward term that uses an estimate of the system's physical parameters. The output is saturated at umax as specified by the user previously. Before returning the torques, the latest parameter estimate is also updated. """ # Tracking errors E = self.qref[:2] - q[:2] Edot = self.qref[2:] - q[2:] tracking_err = self.kr*E + Edot # Tracking regressor Y = np.array([ [np.cos(q[0]), self.aref[0] - self.kr[0]*q[2] + self.kr[0]*self.qref[2], np.cos(q[0] + q[1]), np.cos(q[1])*(2*self.aref[0] + self.aref[1] - 2*self.kr[0]*q[2] - self.kr[1]*q[3] + 2*self.kr[0]*self.qref[2] + self.kr[1]*self.qref[3]) - q[3]*np.sin(q[1])*(2*q[2] + q[3]), self.aref[0] + self.aref[1] - self.kr[0]*q[2] - self.kr[1]*q[3] + self.kr[0]*self.qref[2] + self.kr[1]*self.qref[3]], [0, 0, np.cos(q[0] + q[1]), q[2]**2*np.sin(q[1]) + np.cos(q[1])*(self.aref[0] - self.kr[0]*q[2] + self.kr[0]*self.qref[2]), self.aref[0] + self.aref[1] - self.kr[0]*q[2] - self.kr[1]*q[3] + self.kr[0]*self.qref[2] + self.kr[1]*self.qref[3]] ]) # Control law u = self.kp*E + self.kd*Edot + Y.dot(self.adapt) + self.rep # Learning gradient gain if self.use_LS: # Approximate least-squares gain choice self.kg = self.kg - (self.kg.dot(self.ku.dot(self.Yuf.T.dot(self.Yuf))).dot(self.kg))*dt # Update adaptation self.adapt = self.adapt + self.kg.dot(Y.T.dot(tracking_err) + self.ku.dot(self.adapt_err))*dt if self.use_RL: self.rep = np.clip(self.rep_T, -self.repmax, self.repmax) + self.kd*tracking_err self.rep_stack.append(self.rep) self.rep_T = self.rep_stack.popleft() # Update filtered prediction regressor, filtered control effort, and learning history stack self.update_learning(q, u, dt) # Update reference trajectory and controller life time self.update_ref(dt) self.time = self.time + dt # Safety saturation of output self.saturated = False for i, mag in enumerate(abs(u)): if mag > self.umax[i]: u[i] = self.umax[i] * np.sign(u[i]) self.saturated = True # Return effort torques return u ######################## def update_learning(self, q, u, dt): """ Concurrent-learning plus (if applicable) repetitive learning. http://arxiv.org/pdf/1507.08903.pdf http://www.me.berkeley.edu/~horowitz/Publications_files/Papers_numbered/Journal/24j_Kaneko_repetitive_manipulators_IEEE_TRA97.pdf """ # Instantaneous parts of filtered prediction regressor Yuf2_now = np.array([ [0, q[2], 0, np.cos(q[1])*(2*q[2] + q[3]), q[2] + q[3]], [0, 0, 0, q[2]*np.cos(q[1]), q[2] + q[3]] ]) Yuf2_then = np.array([ [0, self.q0[2], 0, np.cos(self.q0[1])*(2*self.q0[2] + self.q0[3]), self.q0[2] + self.q0[3]], [0, 0, 0, self.q0[2]*np.cos(self.q0[1]), self.q0[2] + self.q0[3]] ]) Yuf2 = Yuf2_now - Yuf2_then # Convolutional filtering of prediction regressor and control effort... if self.kf: self.Yuf = self.kf*(self.Yuf1 + Yuf2) Yuf1dot = np.array([ [np.cos(q[0]), -self.kf*q[2], np.cos(q[0] + q[1]), -self.kf*np.cos(q[1])*(2*q[2] + q[3]), -self.kf*(q[2] + q[3])], [0, 0, np.cos(q[0] + q[1]), q[2]*((q[2] + q[3])*np.sin(q[1]) - self.kf*np.cos(q[1])), -self.kf*(q[2] + q[3])] ]) # infinite window continuous sum... if not self.nfilt: self.uf = self.uf + self.kf*(u - self.uf)*dt self.Yuf1 = self.Yuf1 + (Yuf1dot - self.kf*self.Yuf1)*dt # ...or finite window push pop else: self.uf_stack.append(self.kf*(u - self.uf)*dt) self.uf = (self.uf - self.uf_stack.popleft()) + self.uf_stack[-1] self.Yuf1_stack.append((Yuf1dot - self.kf*self.Yuf1)*dt) self.Yuf1 = (self.Yuf1 - self.Yuf1_stack.popleft()) + self.Yuf1_stack[-1] self.q_stack.append(q) self.q0 = self.q_stack.popleft() # ...or integral filtering of prediction regressor and control effort if kf = 0 else: self.Yuf = self.Yuf1 + Yuf2 Yuf1dot = np.array([ [np.cos(q[0]), 0, np.cos(q[0] + q[1]), 0, 0], [0, 0, np.cos(q[0] + q[1]), q[2]*(q[2] + q[3])*np.sin(q[1]), 0] ]) # infinite window continuous sum... if not self.nfilt: self.uf = self.uf + u*dt self.Yuf1 = self.Yuf1 + Yuf1dot*dt # ...or finite window push pop else: self.uf_stack.append(u*dt) self.uf = (self.uf - self.uf_stack.popleft()) + self.uf_stack[-1] self.Yuf1_stack.append(Yuf1dot*dt) self.Yuf1 = (self.Yuf1 - self.Yuf1_stack.popleft()) + self.Yuf1_stack[-1] self.q_stack.append(q) self.q0 = self.q_stack.popleft() # If stack size is > 0 then use selective learning... if self.history_size: # Candidate data point new_data = self.make_history_pair(self.Yuf, self.uf) new_YY = self.Yuf.T.dot(self.Yuf) # If buffer is full... if self.time > dt*self.history_size: # Space for storing minimum eigenvalues during new data point testing eig_mins = np.zeros(self.history_size) # YY_sum if we add new data but don't remove any extended_sum = self.YY_sum + new_YY # Test all possible insertions of the new data for i in xrange(self.history_size): candidate_sum = extended_sum - self.YY_stack[i] try: assert np.isfinite(candidate_sum[0, 0]) eig_mins[i] = npl.eigvalsh(candidate_sum)[0] except (npl.LinAlgError, AssertionError): print("ADAPTATION UNSTABLE: try a smaller kg (or pick kg='LS'), or try a smaller stack_size.") self.kill = True return 0 # Take best possible insertion if it raises the minimum eigenvalue of our current stack hotseat = np.argmax(eig_mins) if eig_mins[hotseat] > self.history_eig and not self.saturated: # Print if wisdom has increased significantly if eig_mins[hotseat] - self.history_eig > 0.001: print('Significant: {} @ time: {}'.format(np.round(self.history_eig*100, 1), self.time)) # Update history self.history_stack[hotseat] = new_data self.history_eig = eig_mins[hotseat] self.YY_sum = extended_sum - self.YY_stack[hotseat] self.YY_stack[hotseat] = new_YY # ...until then just learn regardless else: self.history_stack.append(new_data) self.history_stack.popleft() self.YY_stack.append(new_YY) self.YY_sum = (self.YY_sum - self.YY_stack.popleft()) + new_YY print('Buffering @ time: {}'.format(self.time)) # Update estimated adaptation error self.adapt_err = np.zeros(self.nparams) for i, pair in enumerate(self.history_stack): self.adapt_err = self.adapt_err + pair['Yi'].T.dot(pair['ui'] - pair['Yi'].dot(self.adapt)) # ...otherwise just use newest data point ("composite adaptation") else: self.adapt_err = self.Yuf.T.dot(self.uf - self.Yuf.dot(self.adapt)) # Solve for system parameters using dynamic parameter estimates, taking a great guess at g if all(np.around(abs(self.adapt), 2)): self.Lest = 9.81 * abs(np.array([self.adapt[1] / self.adapt[0], self.adapt[4] / self.adapt[2]])) self.mest[1] = abs(self.adapt[4] / self.Lest[1]**2) self.mest[0] = abs((self.adapt[1] / self.Lest[0]**2) - self.mest[1]) ######################## def make_history_pair(self, Yi, ui): """ Creates a history pair as a dictionary containing keys 'Yi' and 'ui', which are the filtered regressor and filtered effort for that instant. """ return {'Yi': Yi, 'ui': ui} ######################## def update_ref(self, dt): """ Updates the reference state qref depending on the settings created in set_path. In every case, a spring-damper tuned to vmax and amax is used to generate the profile between each discontinuous target. 'train': sequence of random joint-space configurations 'waypoint': a single end-effector-space waypoint 'random': sequence of random 'waypoint's 'cycle': switching between two 'waypoint's at Tcycle time """ self.path_time = self.path_time + dt if self.path_type == 'train': Eref = self.target[:2] - self.qref[:2] Erefdot = -self.qref[2:] uref = self.kp*Eref + self.kd*Erefdot self.qref = self.qref + self.reference_dynamics(self.qref, uref)*dt if self.path_time > self.Tcycle: self.set_path(self.qref, 2*np.pi*(np.random.rand(2) - 0.5), 'train', dt) elif self.path_type in ['waypoint', 'random']: target_q = self.kinem_reverse(np.concatenate((self.target, [0, 0])), self.qref)[:2] Eref = target_q[:2] - self.qref[:2] Erefdot = -self.qref[2:] uref = self.kp*Eref + self.kd*Erefdot self.qref = self.qref + self.reference_dynamics(self.qref, uref)*dt if self.path_type == 'random' and self.path_time > self.Tcycle: searching = True while searching: target = sum(self.Lest)*(np.random.rand(2) - 0.5) if (all(np.around(abs(self.Lest), 5)) and abs((npl.norm(target)**2 - self.Lest[0]**2 - self.Lest[1]**2) / (2*self.Lest[0]*self.Lest[1])) <= 1 and npl.norm(target - self.target) > 1): searching = False self.set_path(self.qref, target, 'random', dt) elif self.path_type == 'cycle': Eref = self.target[:2] - self.qref[:2] Erefdot = -self.qref[2:] uref = self.kp*Eref + self.kd*Erefdot self.qref = self.qref + self.reference_dynamics(self.qref, uref)*dt if self.path_time > self.Tcycle: self.set_path(self.qref, -self.target, 'cycle', dt) else: raise ValueError("Invalid path_type.") ######################## def reference_dynamics(self, qref, uref): """ Computes reference state derivative (qrefdot). Takes reference state (qref) and reference control input (uref). Spring-damper model tuned to vmax (terminal velocity) and amax (saturation). """ # Imposed actuator saturation for i, mag in enumerate(abs(uref)): if mag > self.umaxref[i]: uref[i] = self.umaxref[i] * np.sign(uref[i]) # Simple linear evolution return np.concatenate((qref[2:] , (uref - self.dref*qref[2:]) / self.mref)) ######################## def kinem_reverse(self, x, qlast=None): """ Given some end effector state x, solves for the corresponding joint state q. Optionally uses the last joint state qlast to decide on the closest new q solution. """ if all(np.around(abs(self.Lest), 5)): c2 = (npl.norm(x[:2])**2 - self.Lest[0]**2 - self.Lest[1]**2) / (2*self.Lest[0]*self.Lest[1]) else: c2 = (npl.norm(x[:2])**2 - 2) / 2 s2a = np.real(sqrt(1 - c2**2)) s2b = -s2a Jp = np.array([[self.Lest[0] + self.Lest[1]*c2, -self.Lest[1]*s2a], [self.Lest[1]*s2a, self.Lest[0] + self.Lest[1]*c2] ]) if abs(c2) > 1 or np.isclose(npl.det(Jp), 0): ta = 2*np.pi*(np.random.rand(2)-0.5) tb = 2*np.pi*(np.random.rand(2)-0.5) else: c1a, s1a = npl.inv(Jp).dot(x[:2]) c1b, s1b = npl.inv(Jp.T).dot(x[:2]) ta = np.array([np.arctan2(s1a, c1a), np.arctan2(s2a, c2)]) tb = np.array([np.arctan2(s1b, c1b), np.arctan2(s2b, c2)]) if qlast is None or npl.norm(ta-qlast[:2]) < npl.norm(tb-qlast[:2]): t = ta else: t = tb Jv = np.array([[-(self.Lest[0]*np.sin(t[0]) + self.Lest[1]*np.sin(t[0]+t[1])), -self.Lest[1]*np.sin(t[0]+t[1])], [self.Lest[0]*np.cos(t[0]) + self.Lest[1]*np.cos(t[0]+t[1]), self.Lest[1]*np.cos(t[0]+t[1])] ]) if np.isclose(npl.det(Jv), 0): w = np.zeros(2) else: w = npl.inv(Jv).dot(x[2:]) return np.concatenate((t, w)) ``` #### File: adaptive_control/neural/test_nn_controller_2dof.py ```python from __future__ import division import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from nn_controller_2dof import NN_controller ################################################# PHYSICAL PARAMETERS # Simulation duration, timestep, and animation T = 40 # s dt = 0.001 # s framerate = 60 # fps outline_path = False # should path outline be shown on animation # Initial condition q = np.deg2rad([-90, 0, 0, 0]) # [rad, rad, rad/s, rad/s] # Link lengths L = [1, 0.5] # m # Link masses m = [5, 3] # kg # Local gravity g = 9.81 # m/s^2 # Joint damping d = [0.05, 0.05] # (N*m)/(rad/s) # Joint friction b = [1, 1] # N*m c = [2, 2] # s/rad # Actuator limits umax = [250, 30] # N*m # Vibration noise vibe_mean = [0, 0] # N*m vibe_stdv = [0, 0] # N*m # Sensor noise sensor_mean = [0, 0, 0, 0] # [rad, rad, rad/s, rad/s] sensor_stdv = [0, 0, 0, 0] # [rad, rad, rad/s, rad/s] ################################################# CONTROL SYSTEM PARAMETERS # Proportional gains kp = [100, 100] # (N*m)/(rad) # Derivative gains kd = [100, 100] # (N*m)/(rad/s) # Learning gains n = 10 # number of neurons kv = 10 * np.ones((len(q)+1, len(q)+1)) kw = 10 * np.ones((n+1, n+1)) # Path to track path_type = 'train' # 'waypoint', 'random', 'train', or 'cycle' target = np.deg2rad([70, 45]) # m, or rad (for 'train' and 'cycle') vmax = [np.pi, np.pi] # rad/s amax = [5, 1] # rad/s^2 # Initialize controller controller = NN_controller(dt, q, target, path_type, kp, kd, n, kv, kw, umax, vmax, amax) ################################################# EQUATIONS OF MOTION def dynamics(q, u): """ Returns state derivative (qdot). Takes control input (u) and current state (q). """ # Externally set parameters global L, m, g, b, c, umax # Mass matrix M(q) M = np.zeros((2, 2)) M[0, 0] = (m[0]+m[1])*L[0]**2 + m[1]*L[1]**2 + 2*m[1]*L[0]*L[1]*np.cos(q[1]) M[0, 1] = m[1]*L[1]**2 + m[1]*L[0]*L[1]*np.cos(q[1]) M[1, 0] = M[0, 1] # symmetry M[1, 1] = m[1]*L[1]**2 # Centripetal and coriolis vector V(q) V = np.array([ -m[1]*L[0]*L[1]*(2*q[2]*q[3]+q[3]**2)*np.sin(q[1]), m[1]*L[0]*L[1]*q[2]**2*np.sin(q[1]) ]) # Gravity vector G(q) G = np.array([ g*(m[0]+m[1])*L[0]*np.cos(q[0]) + m[1]*g*L[1]*np.cos(q[0]+q[1]), m[1]*g*L[1]*np.cos(q[0]+q[1]) ]) # Joint damping D(q) D = np.array([ d[0]*q[2], d[1]*q[3] ]) # Joint friction F = np.array([ b[0]*np.tanh(c[0]*q[2]), b[1]*np.tanh(c[1]*q[3]) ]) # Vibration noise introduced in an industrial environment f = vibe_mean + vibe_stdv*np.random.randn(2) # Store internal dynamics for viewing global M_store, V_store, G_store, F_store M_store = M V_store = V G_store = G F_store = F # Actuator saturation for i, mag in enumerate(abs(u)): if mag > umax[i]: u[i] = umax[i] * np.sign(u[i]) # [theta1dot, theta2dot] = [w1, w2] and [w1dot, w2dot] = (M^-1)*(u-V-G-D-F) return np.concatenate((q[2:], np.linalg.inv(M).dot(u + f - V - G - D - F))) def kinem_forward(q, L): """ Returns the state of the end effector (x = [px, py, vx, vy]). Takes the current joint state (q) and link lengths (L). """ return np.array([ L[0]*np.cos(q[0]) + L[1]*np.cos(q[0]+q[1]), L[0]*np.sin(q[0]) + L[1]*np.sin(q[0]+q[1]), -L[0]*np.sin(q[0])*q[2] - L[1]*np.sin(q[0]+q[1])*(q[2]+q[3]), L[0]*np.cos(q[0])*q[2] + L[1]*np.cos(q[0]+q[1])*(q[2]+q[3]) ]) ################################################# SIMULATION # Define time domain t_arr = np.arange(0, T, dt) # Preallocate results memory q_history = np.zeros((len(t_arr), 4)) x_history = np.zeros((len(t_arr), 4)) qref_history = np.zeros((len(t_arr), 4)) xref_history = np.zeros((len(t_arr), 4)) target_history = np.zeros((len(t_arr), 2)) u_history = np.zeros((len(t_arr), 2)) y_history = np.zeros((len(t_arr), 2)) dyn_history = np.zeros((len(t_arr), 2)) # Keep some dynamics internals too M_store = [] V_store = [] G_store = [] F_store = [] # Integrate dynamics using zero-order forward stepping for i, t in enumerate(t_arr): # Controller's decision sensor_noise = sensor_mean + sensor_stdv*np.random.randn(4) u = controller.get_effort(q + sensor_noise, dt) # Dynamics at this instant qdot = dynamics(q, u) # Record this instant q_history[i, :] = q x_history[i, :] = kinem_forward(q, L) qref_history[i, :] = controller.qref xref_history[i, :] = kinem_forward(controller.qref, L) target_history[i, :] = controller.target u_history[i, :] = u y_history[i, :] = controller.y dyn_history[i, :] = M_store.dot(controller.aref) + M_store.dot(controller.kr*(controller.qref[2:] - q[2:])) + V_store + G_store + F_store # Quit early if something breaks if controller.kill: break # Modify any time-varying parameters pass # Step forward, qnext = qlast + qdot*dt q = q + qdot*dt ################################################# VISUALIZATION # Figure for joint space results fig1 = plt.figure() fig1.suptitle('Joint Space', fontsize=20) # Plot joint angle 1 ax1 = fig1.add_subplot(2, 3, 1) ax1.set_ylabel('Angle 1 (deg)', fontsize=16) ax1.plot(t_arr, np.rad2deg(q_history[:, 0]), 'k', t_arr, np.rad2deg(qref_history[:, 0]), 'g--') ax1.grid(True) # Plot joint angle 2 ax1 = fig1.add_subplot(2, 3, 2) ax1.set_ylabel('Angle 2 (deg)', fontsize=16) ax1.plot(t_arr, np.rad2deg(q_history[:, 1]), 'k', t_arr, np.rad2deg(qref_history[:, 1]), 'g--') ax1.grid(True) # Plot control efforts ax1 = fig1.add_subplot(2, 3, 3) ax1.set_ylabel('Torque (N*m)', fontsize=16) ax1.plot(t_arr, u_history[:, 0], 'b', t_arr, u_history[:, 1], 'g') ax1.grid(True) # Plot joint velocity 1 ax1 = fig1.add_subplot(2, 3, 4) ax1.set_ylabel('Velocity 1 (deg/s)', fontsize=16) ax1.plot(t_arr, np.rad2deg(q_history[:, 2]), 'k', t_arr, np.rad2deg(qref_history[:, 2]), 'g--') ax1.set_xlabel('Time (s)') ax1.grid(True) # Plot joint velocity 1 ax1 = fig1.add_subplot(2, 3, 5) ax1.set_ylabel('Velocity 2 (deg/s)', fontsize=16) ax1.plot(t_arr, np.rad2deg(q_history[:, 3]), 'k', t_arr, np.rad2deg(qref_history[:, 3]), 'g--') ax1.set_xlabel('Time (s)') ax1.grid(True) # Plot adaptive estimates ax1 = fig1.add_subplot(2, 3, 6) ax1.set_ylabel('NN Estimation Error', fontsize=16) ax1.plot(t_arr, dyn_history[:, 0]-y_history[:, 0], 'b', t_arr, dyn_history[:, 1]-y_history[:, 1], 'g') ax1.set_xlabel('Time (s)') ax1.grid(True) plt.show() # Plot for repetative learning results if applicable fig1a = plt.figure() fig1a.suptitle('NN Learning Evolution', fontsize=20) ax1a = fig1a.add_subplot(1, 2, 1) ax1a.set_xlabel('Time (s)') ax1a.set_ylabel('Link 1 Dynamics Estimate (N*m)') ax1a.plot(t_arr, y_history[:, 0], 'k', t_arr, dyn_history[:, 0], 'g--') ax1a = fig1a.add_subplot(1, 2, 2) ax1a.set_xlabel('Time (s)') ax1a.set_ylabel('Link 2 Dynamics Estimate (N*m)') ax1a.plot(t_arr, y_history[:, 1], 'k', t_arr, dyn_history[:, 1], 'g--') plt.show() # Create figure for end effector results fig2 = plt.figure() fig2.suptitle('End Effector Space', fontsize=20) ax2 = fig2.add_subplot(1, 1, 1) ax2.set_xlabel('x-position (m)') ax2.set_ylabel('y-position (m)') ax2.set_xlim([-np.sum(L), np.sum(L)]) ax2.set_ylim([-np.sum(L), np.sum(L)]) # Plot parametric end effector position ax2.plot(x_history[:, 0], x_history[:, 1], 'k', xref_history[:, 0], xref_history[:, 1], 'g--') ax2.scatter(x_history[0, 0], x_history[0, 1], color='r', s=50) if path_type not in ['train', 'cycle']: ax2.scatter(target_history[:, 0], target_history[:, 1], color='g', s=50) ax2.grid(True) plt.show() # Figure for animation fig3 = plt.figure() fig3.suptitle('Evolution') ax3 = fig3.add_subplot(1, 1, 1) ax3.set_xlabel('- World X +') ax3.set_ylabel('- World Y +') ax3.set_xlim([-np.sum(L)-1, np.sum(L)+1]) ax3.set_ylim([-np.sum(L)-1, np.sum(L)+1]) ax3.grid(True) # Position of intermediate joint during motion elb_history = np.concatenate(([L[0]*np.cos(q_history[:, 0])], [L[0]*np.sin(q_history[:, 0])])).T # Lines for representing the links and points for joints lthick = 3 pthick = 25 link1 = ax3.plot([0, elb_history[0, 0]], [0, elb_history[0, 1]], color='k', linewidth=lthick) link2 = ax3.plot([elb_history[0, 0], x_history[0, 0]], [elb_history[0, 1], x_history[0, 1]], color='k', linewidth=lthick) end = ax3.scatter(x_history[0, 0], x_history[0, 1], color='k', s=pthick*m[1], zorder=2) elb = ax3.scatter(elb_history[0, 0], elb_history[0, 1], color='k', s=pthick*m[0], zorder=2) bse = ax3.scatter(0, 0, color='k', s=pthick) # Desired trajectory curve and tracking point pointref = ax3.scatter(xref_history[0, 0], xref_history[0, 1], color='g', s=pthick*m[1], zorder=3) if path_type not in ['train', 'cycle']: pointtar = ax3.scatter(target[0], target[1], color='r', s=50, zorder=4) # Plot entirety of actual trajectory if outline_path: outline = ax3.plot(x_history[:, 0], x_history[:, 1], 'k--', linewidth=1) outlineref = ax3.plot(xref_history[:, 0], xref_history[:, 1], 'g--', linewidth=lthick/3) # Function for updating the animation frame def update(arg, ii=[0]): i = ii[0] # don't ask... if np.isclose(t_arr[i], np.around(t_arr[i], 1)): fig3.suptitle('Evolution (Time: {})'.format(t_arr[i]), fontsize=24) link1[0].set_data([0, elb_history[i, 0]], [0, elb_history[i, 1]]) link2[0].set_data([elb_history[i, 0], x_history[i, 0]], [elb_history[i, 1], x_history[i, 1]]) end.set_offsets((x_history[i, 0], x_history[i, 1])) elb.set_offsets((elb_history[i, 0], elb_history[i, 1])) pointref.set_offsets((xref_history[i, 0], xref_history[i, 1])) if path_type not in ['train', 'cycle']: pointtar.set_offsets((target_history[i, 0], target_history[i, 1])) ii[0] += int(1 / (dt * framerate)) if ii[0] >= len(t_arr): print("Resetting animation!") ii[0] = 0 return [link1, link2, end, elb, pointref] # Run animation ani = animation.FuncAnimation(fig3, func=update, interval=dt*1000) print("\nRemember to keep the diplay window aspect ratio square.\n") plt.show() ```
{ "source": "jnez71/adaptive_filter", "score": 3 }
#### File: jnez71/adaptive_filter/NLMS.py ```python from __future__ import division import numpy as np import numpy.linalg as npl from scipy.io import loadmat from scipy.io.wavfile import write as wavwrite ################################################# MAIN # Do you want to save and plot? save_audio = False plot_results = True def regmat(x, m): """ Returns the order-m filter regression matrix of a timeseries x. This is the matrix squareroot of the autocorrelation. """ # Number of order-m lags of the signal that can be fit into itself nlags = len(x) - m # Row-stack each set of m data points X = np.zeros((nlags, m)) for i in xrange(nlags): X[i, :] = x[i:(i+m)] return X def nlms(x, y, m, a, z=1): """ Returns the array of m weights of the (hopefully) MSE-optimal filter for a given input data array x and a desired output data array y. Also returns a list of the errors, approximate SNRs, and weights at each iteration. The algorithm used is gradient descent with stepsize a. (The filter order is m of course). The timeseries is iterated through z times (number of "epochs"). """ # Initialization x = np.array(x, dtype=np.float64) y = np.array(y, dtype=np.float64) m = int(m); z = int(z) w = np.zeros(m) X = regmat(x, m) e_list = []; snr_list = []; w_list = [] # Routine for run in xrange(z): for i in xrange(len(x) - m): w_list.append(w) xi = x[i:(i+m)] yi = y[i+m-1] e = yi - w.dot(xi) w = w + a*(e*xi)/(xi.dot(xi)) e_list.append(e) if not i%100: snr_list.append((i, 10*np.log10(np.mean(np.square(y[m:(m+i+1)]))/np.mean(np.square(e_list[:i+1]))))) return w, e_list, snr_list, w_list # Unpack data data = loadmat('audio_data.mat') noisy_speech = data['reference'].flatten() noise = data['primary'].flatten() fs = data['fs'].flatten() # Hz # See http://www.cs.cmu.edu/~aarti/pubs/ANC.pdf m = 100 a = 0.03 w, e_list, snr_list, w_list = nlms(noise, noisy_speech, m, a) speech = np.array(e_list, dtype=np.float64) se_arr = np.square(speech) snr_arr = np.array(snr_list) w_arr = np.array(w_list, dtype=np.float64) ################################################# RECORD if save_audio: wavwrite('recovered_NLMS.wav'.format(m), fs, speech) ################################################# VISUALIZATION if plot_results: # More imports for plotting import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as plt3 import matplotlib.cm as cm fontsize = 30 # Performance contour fig1 = plt.figure() fig1.suptitle('Performance Contour (order: {}, stepsize: {})'.format(m, a), fontsize=fontsize) ax1 = plt3.Axes3D(fig1) ax1.set_xlabel('Weight 1', fontsize=fontsize) ax1.set_ylabel('Weight 2', fontsize=fontsize) ax1.set_zlabel('Square Error', fontsize=fontsize) ax1.grid(True) ax1.plot(w_arr[:, 0], w_arr[:, 1], se_arr) # Weight tracks fig2 = plt.figure() fig2.suptitle('Weight Tracks (order: {}, stepsize: {})'.format(m, a), fontsize=fontsize) ax2 = fig2.add_subplot(1, 1, 1) ax2.set_xlabel('Iteration', fontsize=fontsize) ax2.set_ylabel('Weight Value', fontsize=fontsize) ax2.set_ylim((-3, 3)) ax2.grid(True) ax2.plot(w_arr) # Learning curve fig2 = plt.figure() fig2.suptitle('Learning Curve (order: {}, stepsize: {})'.format(m, a), fontsize=fontsize) ax2 = fig2.add_subplot(1, 1, 1) ax2.set_xlabel('Iteration', fontsize=fontsize) ax2.set_ylabel('Square Error', fontsize=fontsize) ax2.set_ylim((0, 50)) ax2.grid(True) ax2.plot(se_arr) # SNR Iteration fig3 = plt.figure() fig3.suptitle('Approximate SNR (order: {}, stepsize: {})'.format(m, a), fontsize=fontsize) ax3 = fig3.add_subplot(1, 1, 1) ax3.set_xlabel('Iteration', fontsize=fontsize) ax3.set_ylabel('ERLE (dB)', fontsize=fontsize) ax3.grid(True) ax3.plot(snr_arr[:, 0], snr_arr[:, 1]) plt.show() ```
{ "source": "jnez71/lqRRT", "score": 3 }
#### File: lqRRT/lqrrt/tree.py ```python from __future__ import division import numpy as np ################################################# PRIMARY CLASS class Tree: """ To initialize, provide... seed_state: An array of the state of the seed node. seed_lqr: The local LQR-optimal cost-to-go array S and policy array K as a tuple (S, K). That is, S solves the local Riccati equation and K = (R^-1)*(B^T)*(S) for effort jacobian B. """ def __init__(self, seed_state, seed_lqr): # Store number of states self.nstates = len(seed_state) # Store number of controls try: self.ncontrols = seed_lqr[1].shape[0] except: print("\nThe given seed_lqr is not consistent.") print("Continuing, assuming you don't care about the lqr or effort features...\n") self.ncontrols = 1 # Initialize state array self.state = np.array(seed_state, dtype=np.float64).reshape(1, self.nstates) # Initialize all other feature lists self.pID = [-1] self.lqr = [seed_lqr] self.x_seq = [[seed_state]] self.u_seq = [[np.zeros(self.ncontrols)]] # Initialize number of nodes self.size = 1 ################################################# def add_node(self, pID, state, lqr, x_seq, u_seq): """ Adds a node to the tree with the given features. """ # Make sure the desired parent exists if pID >= self.size or pID < 0: raise ValueError("The given parent ID, {}, doesn't exist.".format(pID)) # Update state array self.state = np.vstack((self.state, state)) # Append all other feature lists self.pID.append(pID) self.lqr.append(lqr) self.x_seq.append(x_seq) self.u_seq.append(u_seq) # Increment node count self.size += 1 ################################################# def climb(self, ID): """ Returns a list of node IDs that connect the seed to the node with the given ID. The first element in the list is always 0 (seed) and the last is always ID (the given climb destination). """ # Make sure the desired end node exists if ID >= self.size or ID < 0: raise ValueError("The given ID, {}, doesn't exist.".format(ID)) # Follow parents backward and then reverse list IDs = [] while ID != -1: IDs.append(ID) ID = self.pID[ID] return IDs[::-1] ################################################# def trajectory(self, IDs): """ Given a list of node IDs, the full sequence of states and efforts to go from IDs[0] to IDs[-1] are returned as a tuple (x_seq_full, u_seq_full). """ x_seq_full = []; u_seq_full = [] for ID in IDs: x_seq_full.extend(self.x_seq[ID]) u_seq_full.extend(self.u_seq[ID]) return (x_seq_full, u_seq_full) ################################################# def visualize(self, dx, dy, node_seq=None): """ Plots the (dx,dy)-cross-section of the current tree, and highlights the path given by the list, node_seq. For example, dx=0, dy=1 plots the states #0 and #1. """ print("\n...now plotting...") from matplotlib import pyplot as plt fig = plt.figure() fig.suptitle('Tree') plt.axis('equal') ax = fig.add_subplot(1, 1, 1) ax.set_xlabel('- State {} +'.format(dx)) ax.set_ylabel('- State {} +'.format(dy)) ax.grid(True) if node_seq is None: node_seq = [] if self.size > 1: for ID in xrange(self.size): x_seq = np.array(self.x_seq[ID]) if ID in node_seq: ax.plot((x_seq[:, dx]), (x_seq[:, dy]), color='r', zorder=2) else: ax.plot((x_seq[:, dx]), (x_seq[:, dy]), color='0.75', zorder=1) ax.scatter(self.state[0, dx], self.state[0, dy], color='b', s=48) if len(node_seq): ax.scatter(self.state[node_seq[-1], dx], self.state[node_seq[-1], dy], color='r', s=48) print("Done! Close window to continue.\n") plt.show() ```
{ "source": "jnez71/misc", "score": 3 }
#### File: misc/control/adaptive_affine.py ```python import numpy as np from matplotlib import pyplot ################################################## # Controller class C: def __init__(self, n, k): self.n = int(n) self.k = float(k) self.W = np.zeros((n, n), dtype=float) self.b = np.zeros(n, dtype=float) def u(self, r, x, dt): ked = self.k*(r - x)*dt self.W += np.outer(ked, x) self.b += ked return self.W.dot(x) + self.b ################################################## # Drift dynamic n = 3 def f(x, t): return np.array([10.0*(x[1] - x[0]), x[0]*(28.0 - x[2]) - x[1], x[0]*x[1] - 2.6*x[2]]) # Actuator dynamic # (needs to be identity for Lyapunov proof, but might still work otherwise) def B(x, t): return np.array([[x[1], 0.0, 0.0], [ 0.0, 2*x[0], 0.0], [ 0.0, 0.0, 1.0]]) ################################################## # Time dt = 0.001 T = np.arange(0.0, 3.0, dt) # State X = np.zeros((len(T), n), dtype=float) X[0] = [-1.0, 2.0, 3.0] # Control U = np.zeros((len(T), n), dtype=float) c = C(n, 1.0) # Reference R = np.array([[6.0, 7.0, -7.0]] * len(T)) ################################################## # Simulation control = True for i in range(len(T)-1): if control: U[i] = c.u(R[i], X[i], dt) dxdt = f(X[i], T[i]) + B(X[i], T[i]).dot(U[i]) X[i+1] = X[i] + dxdt*dt ################################################## # Plot fig = pyplot.figure() if control: fig.suptitle("Controlled Response", fontsize=26) else: fig.suptitle("Natural Response", fontsize=26) ax = None for i in range(n): ax = fig.add_subplot(n, 1, i+1, sharex=ax) ax.plot(T, X[:, i], color='b', linewidth=2, label="state") ax.plot(T, R[:, i], color='g', linewidth=3, linestyle=':', label="desire") ax.plot(T[:-1], U[:-1, i], color='r', linewidth=0.5, label="action", scaley=False) ax.set_xlim([T[0], T[-1]]) ax.set_ylabel("state "+str(i), fontsize=20) ax.grid(True) ax.set_xlabel("time", fontsize=20) ax.legend() pyplot.show() ``` #### File: misc/control/qlearning.py ```python from __future__ import division import numpy as np; npl = np.linalg # Visualization dependencies import matplotlib.pyplot as plt from matplotlib import rcParams from matplotlib import cm as colormap from mpl_toolkits.mplot3d import Axes3D # State, action, measurement, and time cardinalities nS = 3; nA = 2; nM = 2; nT = int(1E5) # Process Model: transition conditional-probability matrix, nA by nS by nS' P = np.array([[[ 1, 0, 0], [ 1, 0, 0], [ 0, 0.3, 0.7]], [[0.4, 0, 0.6], [0.1, 0.6, 0.3], [ 0, 0.1, 0.9]]], dtype=np.float64) # Sensor Model: observation conditional-probability matrix, nA by nS' by nM' R = np.array([[[ 1, 0], [ 1, 0], [1-0.1, 0.1]], [[ 1, 0], [ 1, 0], [ 0, 1]]], dtype=np.float64) # Discount factor and cost function matrix for exact action-state # pair, nA by nS (belief-state cost function is <b(*),C(u,*)>) g = 0.8 C = np.array([[-1, -1, -3], [ 0, 0, -2]], dtype=np.float64) # Q-approximation basis choice... basis = "planar" # ...planar if basis == "planar": nF = 8 def F(b, y, u): f = np.zeros(nF) if y == 0: f[0] = b[0] f[1] = b[0]*u f[2] = b[2] f[3] = b[2]*u f[4] = u else: f[5] = b[2] f[6] = u f[7] = 1 return f # ...or radial elif basis == "radial": Z = np.array([[0.8, 0.1], [0.1, 0.1], [0.1, 0.8]]) nF = 2*len(Z)+3 def F(b, y, u): f = np.zeros(nF) if y == 0: f[:len(Z)] = np.exp(-npl.norm(Z - [b[0], b[2]], axis=1)/2) f[len(Z):2*len(Z)] = u*f[:len(Z)] else: f[-3:-1] = (b[2], u) f[-1] = 1 return f # State, measurement, belief, and parametric-Q histories x = np.zeros(nT, dtype=np.int64) y = np.zeros(nT, dtype=np.int64) b = np.zeros((nT, nS), dtype=np.float64) f = np.zeros((nT, nF), dtype=np.float64) q = np.zeros((nT, nF), dtype=np.float64) # Initial conditions u = 0 x[0] = 0 b[0] = [1/3, 1/3, 1/3] f[0] = F(b[0], 0, u) q[0] = 20*(np.random.rand(nF)-0.5) Ksum = np.zeros((nF, nF)) # Function for randomly sampling with a given discrete probability density sample_from = lambda p: np.argwhere(np.random.sample() < np.cumsum(p))[0][0] # Simulation T = np.arange(nT) for t in T[1:]: # Randomly choose next action ut = sample_from([0.5, 0.5]) # Advance state, obtain measurement x[t] = sample_from(P[u, x[t-1]]) y[t] = sample_from(R[u, x[t]]) # Update belief b[t] = (b[t-1].dot(P[u]))*R[u, :, y[t]] b[t] = b[t] / np.sum(b[t]) # Approximate error and jacobian f_a = np.array([F(b[t], y[t], a) for a in xrange(nA)]) E = b[t-1].dot(C[u]) + g*np.min(f_a.dot(q[t-1])) - f[t-1].dot(q[t-1]) Ksum = Ksum + np.outer(f_a[ut], f_a[ut]) # Update Q approximation condition = npl.cond(Ksum) if condition < 10000: q[t] = q[t-1] + (1/t)*npl.inv(Ksum/t).dot(E*f[t-1]) else: q[t] = q[t-1] + 0.001*E*f[t-1] f[t] = f_a[ut] u = ut # Heartbeat if t % int(nT/10) == 0: print("Progress: {}%".format(int(100*t/nT))) print("Error: {}".format(np.round(E, 3))) print("Eigs: {}".format(np.round(npl.eigvals(Ksum/t), 3))) print("Condition: {}\n".format(condition)) print("Final q: {}".format(np.round(q[-1], 3))) # Compute discretized final Q function res = 81 B = np.vstack((np.repeat(np.linspace(0, 1, res), res), np.tile(np.linspace(0, 1, res), res), np.zeros(res**2))).T B = np.delete(B, np.argwhere(np.sum(B, axis=1) > 1).flatten(), axis=0) B[:, 2] = 1 - np.sum(B, axis=1) Q = np.zeros((nA, len(B))) for a in xrange(nA): for i, bi in enumerate(B): Q[a, i] = F(bi, 0, a).dot(q[-1]) U = np.argmin(Q, axis=0) V = np.min(Q, axis=0) # Prepare plots rcParams.update({'figure.autolayout': True}) fig = plt.figure() fig.canvas.set_window_title("qrover_results") nplotsr = 2; nplotsc = 3 fontsize = 16 dens = int(np.ceil(nT/1000)) # Plot policy on belief simplex ax = fig.add_subplot(nplotsr, nplotsc, 1, projection='3d') ax.scatter(B[:, 0], B[:, 1], B[:, 2], c=U, zorder=1) ax.set_xlim([0, 1]); ax.set_ylim([0, 1]); ax.set_zlim([0, 1]) ax.set_title("Policy", fontsize=fontsize) ax.set_xlabel("Bottom", fontsize=fontsize) ax.set_ylabel("Middle", fontsize=fontsize) ax.set_zlabel("Top", fontsize=fontsize) ax.view_init(20, 20) # Plot Q function over belief state ax = fig.add_subplot(nplotsr, nplotsc, 2, projection='3d') ax.plot_trisurf(B[::1, 0], B[::1, 1], Q[0, ::1], cmap=colormap.Blues, linewidth=0, antialiased=True) ax.plot_trisurf(B[::1, 0], B[::1, 1], Q[1, ::1], cmap=colormap.autumn, linewidth=0, antialiased=True) ax.set_xlim([0, 1]); ax.set_ylim([0, 1]) ax.set_title("Q Function", fontsize=fontsize) ax.set_xlabel("Bottom", fontsize=fontsize) ax.set_ylabel("Middle", fontsize=fontsize) ax.set_zlabel("Q", fontsize=fontsize) ax.view_init(20, 20) # Plot value function over belief state ax = fig.add_subplot(nplotsr, nplotsc, 3, projection='3d') ax.plot_trisurf(B[::1, 0], B[::1, 1], V[::1], cmap=colormap.coolwarm, linewidth=0, antialiased=True) ax.set_xlim([0, 1]); ax.set_ylim([0, 1]) ax.set_title("Value Function", fontsize=fontsize) ax.set_xlabel("Bottom", fontsize=fontsize) ax.set_ylabel("Middle", fontsize=fontsize) ax.set_zlabel("V", fontsize=fontsize) ax.view_init(20, 20) # Plot projection weights ax = fig.add_subplot(2, 1, 2) ax.plot(T, q) ax.set_xlim([0, nT]) ax.set_xlabel("Time", fontsize=fontsize) ax.set_ylabel("Weights", fontsize=fontsize) ax.grid(True) # # Plot belief state exploration # fig = plt.figure() # fig.canvas.set_window_title("qrover_results_aux") # ax = fig.add_subplot(1, 1, 1, projection='3d') # ax.scatter(b[::dens, 0], b[::dens, 1], b[::dens, 2], c='b', alpha=0.1) # ax.set_xlim([0, 1]); ax.set_ylim([0, 1]); ax.set_zlim([0, 1]) # ax.set_title("Belief-State Exploration", fontsize=fontsize) # ax.set_xlabel("Bottom", fontsize=fontsize) # ax.set_ylabel("Middle", fontsize=fontsize) # ax.set_zlabel("Top", fontsize=fontsize) # ax.view_init(20, 20) plt.show() ``` #### File: misc/geometry/spherical_average.py ```python from autograd import numpy as np, value_and_grad # pip3 install autograd from scipy.optimize import minimize from matplotlib import pyplot from mpl_toolkits.mplot3d import Axes3D np.set_printoptions(precision=4, sign=' ') EPS = 1e-12 ################################################## def chord(u1, u2): return np.linalg.norm(u2 - u1, axis=u2.ndim-1) def geodesic(u1, u2): # https://en.wikipedia.org/wiki/Great-circle_distance#From_chord_length return 2*np.arcsin(np.clip(chord(u1, u2) / 2, -1+EPS, 1-EPS)) ################################################## @value_and_grad def leastsquares(u, samples, metric): return np.sum(np.square(metric(u, samples))) def solve(samples, metric): return minimize(fun=leastsquares, args=(samples, metric), x0=[-1,1,0], method="SLSQP", jac=True, constraints={"type": "eq", "fun": lambda u: np.linalg.norm(u)**2 - 1, "jac": lambda u: 2*u}, #options={"disp": True}, tol=EPS).x ################################################## n = 500 samples = [1,0,0] + np.random.normal(0, 0.9, size=(n,3)) samples /= np.linalg.norm(samples, axis=1).reshape(n,-1) min_chords = solve(samples, chord) min_geodesics = solve(samples, geodesic) renormalized = np.mean(samples, axis=0) renormalized /= np.linalg.norm(renormalized) ################################################## print("ArgMin of Chords: ", min_chords) print("ArgMin of Geodesics:", min_geodesics) print("Renormalized Mean: ", renormalized) fig = pyplot.figure() axis = fig.add_subplot(111, projection="3d") axis.scatter(*samples.T, c='k', alpha=0.2, label="Samples") axis.scatter(*min_chords.T, c='r', s=200, label="ArgMin of Chords") axis.scatter(*min_geodesics.T, c='g', s=200, label="ArgMin of Geodesics") axis.scatter(*renormalized.T, c='b', s=200, label="Renormalized Mean") axis.set_xlabel("x") axis.set_ylabel("y") axis.set_zlabel("z") axis.legend() pyplot.show() ################################################## ``` #### File: misc/physics/stationary_action.py ```python from autograd import numpy as np, value_and_grad # pip3 install --user autograd from scipy.optimize import minimize # pip3 install --user scipy from matplotlib import pyplot # pip3 install --user matplotlib np.set_printoptions(suppress=True) pyplot.rcParams["font.size"] = 16 pyplot.rcParams["axes.grid"] = True ################################################## n = 1 # configuration-space dimensionality m = 1.0 # mass k = 1.0 # stiffness dt = 0.01 # temporal resolution b = np.pi#+0.2 # temporal extent (here PI is the value beyond which the action is nonconvex) q0 = 1.0 # initial configuration qd0 = 0.0 # initial velocity ######################### def lagrangian(q, qd): return 0.5*m*qd**2 - 0.5*k*q**2 # kinetic less potential def acceleration(q, qd): return -k*q/m # force over mass ################################################## def stationary(q0, qd0, dt, b): T = np.arange(0, b, dt) Q = np.empty((len(T), n), float) Qd = np.empty((len(T), n), float) Q[0] = q0 Qd[0] = qd0 for i in range(len(T)-1): # Verlet integration a = acceleration(Q[i], Qd[i]) Q[i+1] = Q[i] + Qd[i]*dt + 0.5*a*dt**2 a1 = acceleration(Q[i+1], Qd[i]) Qd[i+1] = Qd[i] + 0.5*(a+a1)*dt return (T, Q, Qd) print("Solving EL for stationary trajectory...") T, Q, Qd = stationary(q0, qd0, dt, b) qb = Q[-1] ################################################## @value_and_grad def action(Q, dt=dt): Qd = np.append(np.diff(Q)/dt, 0) return np.sum(lagrangian(Q, Qd)) * dt boundaries = { "type": "eq", "fun": lambda Q: (Q[0]-q0)**2 + (Q[-1]-qb)**2 } print("Directly optimizing for minimal trajectory...") Qmin = minimize(action, np.zeros_like(Q), method="SLSQP", jac=True, constraints=boundaries, options={"disp": True, "maxiter": 1000}).x ################################################## print("Plotting results...") figure, axes = pyplot.subplots(1, 1) axes.set_ylabel("Configuration") axes.set_xlabel("Time") axes.plot(T, Q, label="stationary (physical)") axes.plot(T, Qmin, ls=":", lw=4, label="minimum") axes.legend() pyplot.show() ################################################## ``` #### File: misc/signals/gaussian_markov_kernel.py ```python import numpy as np from matplotlib import pyplot npr = np.random np.set_printoptions(suppress=True) pyplot.rcParams["font.size"] = 16 pyplot.rcParams["axes.grid"] = True ################################################## SYSTEM def initial(m=10.0, s=2.0): return npr.normal(m, s) # gaussian initial-condition def transition(x, s=1.0): #f = 0.5*x # linear f = 10*np.sin(2/(1+x**2)) # nonlinear return f + npr.normal(0.0, s) # gaussian transition def simulate(d): X = [initial()] for i in range(d-1): X.append(transition(X[-1])) return X # one sample from d-dimensional joint (only gaussian if linear transitions) ################################################## SIMULATE d = 9 n = int(5e5) print("Simulating samples...") samples = np.array([simulate(d) for i in range(n)]) print("Computing statistics...") mean = np.mean(samples, axis=0) covar = np.cov(samples, rowvar=False) ################################################## VISUALIZE print("========================================") print(np.round(mean, 3), '\n') print(np.round(covar, 3)) print("========================================") print("Visualizing covariance...") vmax = np.max(np.abs(covar)) pyplot.imshow(covar, cmap="coolwarm", vmin=-vmax, vmax=vmax, interpolation="lanczos") pyplot.colorbar() pyplot.grid(False) pyplot.title("Covariance") print("Visualizing joint...") pyplot.figure() pyplot.scatter(samples[::int(n/1e3+1), 0], samples[::int(n/1e3+1), -1], alpha=0.4) pyplot.xlabel("x0") pyplot.ylabel("x{0}".format(d-1)) pyplot.show() ``` #### File: misc/signals/phase_locked_loop.py ```python import numpy as np from scipy.signal import butter, zpk2ss from matplotlib import pyplot # Display configuration np.set_printoptions(suppress=True) pyplot.rcParams["axes.grid"] = True pyplot.rcParams["font.size"] = 20 ################################################## # Periodic 1-dimensional real-valued signal class Signal: def __init__(self, amplitude, frequency, shift): self.amplitude = np.float(amplitude) self.frequency = np.float(frequency) self.shift = np.float(shift) def tap(self, time): return self.amplitude*np.sin((2*np.pi*self.frequency)*(time - self.shift)) ################################################## # Phase-locked-loop using modulation-based detector and linear filter class PhaseLockedLoop: def __init__(self, A, b, c, freequency): self.A = np.array(np.atleast_2d(A), float) # filter dynamic (n,n) self.b = np.array(b, float).reshape(len(self.A), 1) # filter input transform (n,1) self.c = np.array(c, float).reshape(1, len(self.A)) # filter output transform (1,n) self.x = np.zeros((len(self.A), 1), float) # filter state (n,1) self.freequency = np.float(freequency) # free frequency of the controlled oscillator self.phase = np.float(0) # phase estimate def seek(self, target, timestep): replica = np.sin(self.phase) # internal oscillator output error = target*replica - 0.5 # modulate target with replica, cosine/2 of phase difference is in there self.x += timestep*(self.A.dot(self.x) + self.b.dot(error)) # filter out harmonics advance = self.c.dot(self.x) # compute oscillator control as proportional to filtered error self.phase += timestep*(2*np.pi*(self.freequency + advance)) # advance the oscillator return replica, advance # for records ################################################## # Discrete Fourier transform normalized magnitude (for analysis) def fourier(values, period): frequencies = np.fft.rfftfreq(len(values), period) coefficients = np.sqrt(2.0/len(values)) * np.fft.rfft(values) # scaled for unitarity coefficients[0] /= np.sqrt(2.0) magnitudes = np.abs(coefficients) / np.linalg.norm(coefficients) return (frequencies, magnitudes) ################################################## # Create simulation time domain timestep = 0.0001 times = np.arange(0, 0.3, timestep) # Design PLL using Butterworth filter f = 100 n = 8 k = 20 A, b, c, _ = zpk2ss(*butter(n, 2*np.pi*f, btype="lowpass", analog=True, output="zpk")) pll = PhaseLockedLoop(A=A, b=b, c=k*c, freequency=f) # Run simulation targets = Signal(1.5, f-3, 0.007).tap(times) replicas, advances = np.transpose([pll.seek(target, timestep) for target in targets]) ################################################## # Plot time-domain results figure, axes = pyplot.subplots(2, 1, sharex=True) axes[0].plot(times, targets, color='r', label="Target") axes[0].plot(times, replicas, color='k', label="PLL") axes[0].set_xlim([0, times[-1]]) axes[0].legend() axes[1].plot(times, advances, color='m', label="Advance") axes[0].set_xlim([0, times[-1]]) axes[1].legend() axes[-1].set_xlabel("Time") # # Plot steady-state (assumed halfway) frequency-domain results # figure, axes = pyplot.subplots(1, 1, sharex=True) # axes.plot(*fourier(replicas[len(times)//2:], timestep), color='k', label="PLL") # axes.plot(*fourier(advances[len(times)//2:], timestep), color='m', label="Advance") # axes.set_xlim([0, 5*f]) # axes.legend() # axes.set_ylabel("Normalized Magnitude") # axes.set_xlabel("Frequency") # Block to display visualization print("Close plots to finish!") pyplot.show() ################################################## ``` #### File: misc/vehicles/mobile_inverted_pendulum.py ```python from __future__ import division import numpy as np import numpy.linalg as npl import matplotlib.pyplot as plt import matplotlib.animation as ani # Simulation parameters duration = 20 # s timestep = 0.005 # s framerate = 30 # FPS # Pendulum parameters cw_to_cm = np.array([0, 2]) # m, body y mass_pend = 1 # kg inertia_pend = mass_pend*cw_to_cm[1]**2 # kg*m^2 friction_pend = 0.2 # (N*m)/(rad/s) # Wheel parameters radius = 0.5 # m mass_wheel = 1 # kg inertia_wheel = 0.5*mass_wheel*radius**2 # kg*m^2 friction_wheel = 1.2 # (N*m)/(rad/s) # World parameters gravity = np.array([0, -9.81]) # m/s^2, world y incline = np.deg2rad(0) # rad david = None # fuck david_shift = 0 # you # Sensor parameters encoder_tpr = 1024 # tic/rev gyro_bias = np.deg2rad(2) # rad/s gyro_stdv = np.deg2rad(2) # rad/s inclinometer_bias = np.deg2rad(1) # rad inclinometer_stdv = np.deg2rad(5) # rad # Actuator parameters torque_limit = 20 # N*m torque_deltamax = 0.1*torque_limit / timestep # N*m/s # Control system initialization controller_active = True kp = 50 # N*m/rad kd = 2*np.sqrt(kp) # N*m/(rad/s) max_mobile_tilt = np.deg2rad(15) # rad desired_position = 4 # m, None => no preference last_encoder = 0 # Initial condition q = np.array([0, 0, np.deg2rad(2), 0]) w = q[0]/radius u = 0 p = 0 def dynamics(q, u, p): """ Returns state derivative qdot. Takes current state q, motor input torque u, and disturbance torque p. See <http://renaissance.ucsd.edu/courses/mae143c/MIPdynamics.pdf> (rederived with incline). """ # Angle of pendulum in incline frame ang = q[2] - incline # Mass matrix M = np.array([ [(mass_wheel + mass_pend)*radius**2 + inertia_wheel, mass_pend*radius*cw_to_cm[1]*np.cos(ang)], [mass_pend*radius*cw_to_cm[1]*np.cos(ang), inertia_pend + mass_pend*cw_to_cm[1]**2] ]) # Gravity effect g = np.array([ -mass_pend*radius*cw_to_cm[1]*q[3]**2*np.sin(ang) + mass_wheel*radius*gravity[1]*np.sin(incline), mass_pend*gravity[1]*cw_to_cm[1]*np.sin(q[2]) ]) # Friction force d = np.array([ -friction_wheel * (q[1] + np.arctan(q[1])), friction_pend * q[3] ]) # Dynamics accel_wheel_neg, accel_pend = npl.inv(M).dot(np.array([-u, p+u]) - g - d) return np.array([q[1], -accel_wheel_neg*radius, q[3], accel_pend]) ################################################# SIMULATION # Define time domain t_arr = np.arange(0, duration, timestep) # Preallocate results memory q_history = np.zeros((len(t_arr), 4)) q_ref_history = np.zeros((len(t_arr), 4)) q_meas_history = np.zeros((len(t_arr), 4)) u_history = np.zeros(len(t_arr)) w_history = np.zeros(len(t_arr)) p_history = np.zeros(len(t_arr)) incline_history = np.zeros(len(t_arr)) # Integrate dynamics using first-order forward stepping for i, t in enumerate(t_arr): # Sensor measurements encoder_meas = (np.round((w/(2*np.pi))*encoder_tpr)/encoder_tpr)*2*np.pi * radius # m deriv_est = (encoder_meas - last_encoder) / timestep # m/s last_encoder = encoder_meas inclinometer_meas = q[2] + inclinometer_bias + np.random.normal(0, inclinometer_stdv) # rad gyro_meas = q[3] + gyro_bias + np.random.normal(0, gyro_stdv) # rad/s # Controller's decision if controller_active and q[2] > -np.pi/2 and q[2] < np.pi/2: if desired_position is None: mobile_tilt = 0 else: mobile_tilt = np.clip(0.1*(encoder_meas-desired_position) + 0.1*q[1], -max_mobile_tilt, max_mobile_tilt) u_ref = kp*(mobile_tilt - (inclinometer_meas-inclinometer_bias)) + kd*(0 - (gyro_meas-gyro_bias)) else: mobile_tilt = 0 u_ref = 0 # Actuator slew and saturation if u < u_ref: u = np.clip(u + torque_deltamax*timestep, u, u_ref) elif u > u_ref: u = np.clip(u - torque_deltamax*timestep, u_ref, u) u = np.clip(u, -torque_limit, torque_limit) # External disturbance if t < max(t_arr)/2: p = 0#-5*np.sin(2*t)*np.cos(0.2*t) else: p = 0 # Record this instant q_history[i, :] = q q_ref_history[i, :] = [desired_position, 0, mobile_tilt, 0] q_meas_history[i, :] = [encoder_meas, deriv_est, inclinometer_meas, gyro_meas] u_history[i] = u w_history[i] = w p_history[i] = p incline_history[i] = incline # First-order integrate qdot = f(q, u, p) q = q + dynamics(q, u, p)*timestep w = q[0]/radius # Update incline for david if david is not None: incline = np.arctan(2*david*(q[0] - david_shift)) ################################################# VISUALIZATION # Plots fig1 = plt.figure() fig1.suptitle('Results', fontsize=20) ax1 = fig1.add_subplot(2, 3, 1) ax1.set_ylabel('Wheel Position (m)', fontsize=16) ax1.plot(t_arr, q_history[:, 0], 'k', t_arr, q_ref_history[:, 0], 'g--') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 2) ax1.set_ylabel('Pendulum Angle (deg)', fontsize=16) ax1.plot(t_arr, np.rad2deg(q_history[:, 2]), 'k', t_arr, np.rad2deg(q_ref_history[:, 2]), 'g--') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 3) ax1.set_ylabel('Input Torque (N*m)', fontsize=16) ax1.plot(t_arr, u_history, 'k', t_arr, p_history, 'r--') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 4) ax1.set_ylabel('Wheel Velocity (m/s)', fontsize=16) ax1.plot(t_arr, q_history[:, 1], 'k', t_arr, q_ref_history[:, 1], 'g--') ax1.set_xlabel('Time (s)') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 5) ax1.set_ylabel('Pendulum Velocity (deg/s)', fontsize=16) ax1.plot(t_arr, np.rad2deg(q_history[:, 3]), 'k', t_arr, np.rad2deg(q_ref_history[:, 3]), 'g--') ax1.set_xlabel('Time (s)') ax1.grid(True) ax1 = fig1.add_subplot(2, 3, 6) ax1.set_ylabel('State Estimation Errors', fontsize=16) ax1.plot(t_arr, q_history[:, 0] - q_meas_history[:, 0], 'k', label='position') ax1.plot(t_arr, q_history[:, 1] - q_meas_history[:, 1], 'r', label='velocity') ax1.plot(t_arr, q_history[:, 2] - q_meas_history[:, 2], 'g', label='pend angle') ax1.plot(t_arr, q_history[:, 3] - q_meas_history[:, 3], 'b', label='pend rate') ax1.legend() ax1.set_xlabel('Time (s)') ax1.grid(True) print("Close the plot window to continue to animation.") plt.show() # Animation fig2 = plt.figure() fig2.suptitle('Evolution', fontsize=24) plt.axis('equal') ax2 = fig2.add_subplot(1, 1, 1) ax2.set_xlabel('- World X (m)+') ax2.set_ylabel('- World Y (m)+') ax2.grid(True) # (break q[0] incline coordinate into world coordinates) x_history = q_history[:, 0] * np.cos(incline_history) y_history = q_history[:, 0] * np.sin(incline_history) ax_lim = 2*npl.norm(cw_to_cm) + np.ceil(radius) ax2.set_xlim([-ax_lim, ax_lim]) ax2.set_ylim([-ax_lim, ax_lim]) floor_lim = np.max(np.abs(q_history[:, 0])) * 2 pscale = 0.25 graphic_floor = ax2.plot([-floor_lim*np.cos(incline_history[0]) + radius*np.sin(incline_history[0]), floor_lim*np.cos(incline_history[0]) + radius*np.sin(incline_history[0])], [-floor_lim*np.sin(incline_history[0])-radius*np.cos(incline_history[0]), floor_lim*np.sin(incline_history[0])-radius*np.cos(incline_history[0])], color='g', linewidth=5) graphic_wheel = ax2.add_patch(plt.Circle((x_history[0], y_history[0]), radius=radius, fc='k')) graphic_ind = ax2.plot([x_history[0], x_history[0] + radius*np.sin(w_history[0])], [y_history[0], y_history[0] + radius*np.cos(w_history[0])], color='y', linewidth=3) graphic_pend = ax2.plot([x_history[0], x_history[0] - cw_to_cm[1]*np.sin(q_history[0, 2])], [y_history[0], y_history[0] + cw_to_cm[1]*np.cos(q_history[0, 2])], color='b', linewidth=5) graphic_dist = ax2.plot([x_history[0] - cw_to_cm[1]*np.sin(q_history[0, 2]), x_history[0] - cw_to_cm[1]*np.sin(q_history[0, 2]) - pscale*p_history[0]*np.cos(q_history[0, 2])], [y_history[0] + cw_to_cm[1]*np.cos(q_history[0, 2]), y_history[0] + cw_to_cm[1]*np.cos(q_history[0, 2]) - pscale*p_history[0]*np.sin(q_history[0, 2])], color='r', linewidth=3) def ani_update(arg, ii=[0]): i = ii[0] # don't ask... if np.isclose(t_arr[i], np.around(t_arr[i], 1)): fig2.suptitle('Evolution (Time: {})'.format(t_arr[i]), fontsize=24) graphic_floor[0].set_data([-floor_lim*np.cos(incline_history[i]) + radius*np.sin(incline_history[i]), floor_lim*np.cos(incline_history[i]) + radius*np.sin(incline_history[i])], [-floor_lim*np.sin(incline_history[i])-radius*np.cos(incline_history[i]), floor_lim*np.sin(incline_history[i])-radius*np.cos(incline_history[i])]) graphic_wheel.center = (x_history[i], y_history[i]) graphic_ind[0].set_data([x_history[i], x_history[i] + radius*np.sin(w_history[i])], [y_history[i], y_history[i] + radius*np.cos(w_history[i])]) graphic_pend[0].set_data([x_history[i], x_history[i] - cw_to_cm[1]*np.sin(q_history[i, 2])], [y_history[i], y_history[i] + cw_to_cm[1]*np.cos(q_history[i, 2])]) graphic_dist[0].set_data([x_history[i] - cw_to_cm[1]*np.sin(q_history[i, 2]), x_history[i] - cw_to_cm[1]*np.sin(q_history[i, 2]) - pscale*p_history[i]*np.cos(q_history[i, 2])], [y_history[i] + cw_to_cm[1]*np.cos(q_history[i, 2]), y_history[i] + cw_to_cm[1]*np.cos(q_history[i, 2]) - pscale*p_history[i]*np.sin(q_history[i, 2])]) ii[0] += int(1 / (timestep * framerate)) if ii[0] >= len(t_arr): print("Resetting animation!") ii[0] = 0 return [graphic_floor, graphic_wheel, graphic_ind, graphic_pend, graphic_dist] # Run animation print("Starting animation.\nBlack: wheel, Blue: pendulum, Yellow: angle indicator, Red: disturbance, Green: ground.") animation = ani.FuncAnimation(fig2, func=ani_update, interval=timestep*1000) plt.show() ```
{ "source": "jnez71/multicopter", "score": 3 }
#### File: multicopter/old_software/monotonic.py ```python import ctypes, os CLOCK_MONOTONIC = 1 CLOCK_MONOTONIC_RAW = 4 # see <linux/time.h> class timespec(ctypes.Structure): _fields_ = [ ('tv_sec', ctypes.c_long), ('tv_nsec', ctypes.c_long) ] librt = ctypes.CDLL('librt.so.1', use_errno=True) clock_gettime = librt.clock_gettime clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)] def monotonic_time(): t = timespec() if clock_gettime(CLOCK_MONOTONIC , ctypes.pointer(t)) != 0: errno_ = ctypes.get_errno() raise OSError(errno_, os.strerror(errno_)) return t.tv_sec + t.tv_nsec * 1e-9 ``` #### File: software/multicopter/allocator.py ```python from __future__ import division import numpy as np; npl = np.linalg from scipy.optimize import minimize import motion class Allocator(object): """ Solver class for choosing the dictionary of efforts that implement a desired wrench on a multicopter. model: a Model object for the multicopter reg: regularization factor that multiplies the cost of sum(thrusts^2) in the solver tol: solver tolerance, make sure that it is always much less than reg method: string with solver optimization method corresponding to the methods used by scipy.optimize.minimize verbose: whether or not verbose error messages should be displayed if optimizer fails """ def __init__(self, model, reg=1e-5, tol=1e-8, method="SLSQP", verbose=False): self.model = model self.reg = np.float64(reg) self.tol = np.float64(tol) self.method = str(method) self.verbose = bool(verbose) # Locally alias a bunch of thruster related things for legibility here Bp = self.model.B_direcs Bq = self.model.B_levers C = np.diag(self.model.reaction_coeffs) S = self.reg * np.eye(len(self.model.thrusters)) J_u = Bp.T.dot(Bp) + Bq.T.dot(Bq) + Bq.T.dot(Bp).dot(C) + C.dot(Bp.T).dot(Bq) + C.dot(Bp.T).dot(Bp).dot(C) + S J_ang = Bq + Bp.dot(C) J_lin = Bp # Construct cost function, Jacobian, and initial guess for thrust solver part of effort allocation self.thrust_alloc_cost = lambda thrusts, wrench: 0.5 * ((self.model.wrench_from_thrusts(thrusts) - wrench).norm_squared() + self.reg*np.sum(thrusts**2)) self.thrust_alloc_cost_jac = lambda thrusts, wrench: thrusts.T.dot(J_u) - wrench.ang.T.dot(J_ang) - wrench.lin.T.dot(J_lin) self.thrust_alloc_guess = np.mean(self.model.thrust_limits, axis=1) def allocate(self, wrench): """ Returns the optimal allocation of thruster efforts (dictionary) to achieve a desired wrench. wrench: Wrench object containing the desired instantaneous force and torque on the multicopter center of mass """ thrust_opt = minimize(fun=self.thrust_alloc_cost, jac=self.thrust_alloc_cost_jac, args=wrench, bounds=self.model.thrust_limits, x0=self.thrust_alloc_guess, method=self.method, tol=self.tol) if self.verbose and not thrust_opt.success: print "----------" print "WARNING: Thrust allocator optimization failed." print "--" print "Wrench requested:" print wrench.lin, wrench.ang print "Thrust bounds:" print self.model.thrust_limits print "Initial guess and cost:" print self.thrust_alloc_guess, self.thrust_alloc_cost(self.thrust_alloc_guess, wrench) print "Thrusts chosen and final cost:" print thrust_opt.x, self.thrust_alloc_cost(thrust_opt.x, wrench) print "----------" efforts = {} for i, key in enumerate(self.model.thruster_keys): efforts[key] = self.model.thrusters[key].effort_from_thrust(thrust_opt.x[i]) return efforts ``` #### File: multicopter/comms/xbee.py ```python import struct import time from twisted.internet import serialport, protocol, defer import datachunker, deferral, variable def _calculate_checksum(data): return 0xFF - sum(map(ord, data)) % 256 class Protocol(protocol.Protocol): def connectionLost(self, reason): print 'XBee connection lost:', reason class XBee(object): @defer.inlineCallbacks def __new__(cls, reactor, port, initial_baud_rate=9600): self = object.__new__(XBee) buf = [] self._protocol = Protocol() self._protocol.dataReceived = lambda data: buf.extend(data) cmds = [ 'ATID9FF', 'ATHP6', 'ATKY0', 'ATRR0', 'ATMT0', 'ATAP1', 'ATMYFFFF', 'ATDTFFFF', #'ATMK0', # sniffing # RB/PK? 'ATCN', ] self._port = serialport.SerialPort(self._protocol, port, reactor, initial_baud_rate) self._port.flushInput() yield deferral.sleep(1.1) buff_str = repr(''.join(buf)); buf = [] if buff_str == "''": print "buffer: empty" else: print "buffer:", buff_str self._port.write('+++') yield deferral.sleep(1.1) for cmd in cmds: print repr(''.join(buf)); buf = [] self._port.write(cmd + '\r') yield deferral.sleep(.1) self.packet_received = variable.Event() self._protocol.dataReceived = datachunker.DataChunker(self.dataReceiver()) defer.returnValue(self) def dataReceiver(self): while True: x = yield 1 if x != '\x7e': print 'garbage', x.encode('hex') continue length, = struct.unpack('>H', (yield 2)) if length == 0: print 'length == 0' continue if length > 270: print 'length too long', length continue data = yield length checksum_should = _calculate_checksum(data) checksum, = struct.unpack('>B', (yield 1)) if checksum != checksum_should: print 'invalid checksum!' continue cmdID = ord(data[0]) cmdData = data[1:] if cmdID != 0x81: print 'unknown xbee packet:', (cmdID, cmdData) continue else: if len(cmdData) < 4: print 'short xbee packet:', (cmdID, cmdData) continue source_address, rssi, options = struct.unpack('>HBB', cmdData[:4]) self.packet_received.happened(dict(source_address=source_address, rssi=rssi, options=options, data=cmdData[4:])) def _send_packet(self, frame_data): self._port.write(struct.pack('>BH', 0x7e, len(frame_data)) + frame_data + chr(_calculate_checksum(frame_data))) def transmit(self, data): self._send_packet('0100ffff00'.decode('hex') + data) ``` #### File: software/multicopter/copilot.py ```python from __future__ import division import numpy as np; npl = np.linalg from allocator import Allocator import motion class CoPilot(object): """ Solver class that takes Command objects from a Pilot and computes the instantaneous efforts necessary. model: a Model object for the multicopter kp: list of proportional gains for the degrees of freedom [world-up, roll, pitch, yaw] kd: list of derivative gains for the degrees of freedom [world-up, roll, pitch, yaw] primer_state: State object that will provide initial conditions for yaw, ascent, and time """ def __init__(self, model, kp, kd, primer_state): self.model = model self.kp = np.array(kp, dtype=np.float64) self.kd = np.array(kd, dtype=np.float64) if self.model.gmag == 0: self.ascent_dir = np.array([0, 0, 1], dtype=np.float64) else: self.ascent_dir = -self.model.gdir self.reset(primer_state) self.allocator = Allocator(self.model, verbose=False) def control(self, state, command): """ Returns the efforts that should be applied to achieve the given pilot commands. state: State object with the current multicopter state command: Command object with the pilot's current commands """ # Project body-up direction onto world-up direction (get cosine of the tilt angle) ctilt = self.ascent_dir.dot(state.pose.rotate_vector([0, 0, 1])) # Don't worry about ascent if completely sideways or upsidedown if ctilt <= 1e-5: response_force = [0, 0, 0] else: # Compute actual state's world-coordinate ascent and ascent rate state_ascent = self.ascent_dir.dot(state.pose.lin) state_ascent_rate = self.ascent_dir.dot(state.pose.rotate_vector(state.twist.lin)) # Let thruster force be completely along body-up and strong enough to cancel world-coordinate gravity, plus feedback response_force = [0, 0, (1/ctilt) * (self.model.mass*self.model.gmag + self.kp[0]*(self.ascent - state_ascent) + self.kd[0]*(command.ascent_rate - state_ascent_rate))] # Ordinary Lie algebraic attitude control scheme for thruster torques qdes = motion.quaternion_from_euler(command.roll, command.pitch, self.yaw) response_torque = self.kp[1:]*motion.ori_error(qdes, state.pose.ang) + self.kd[1:]*([0, 0, command.yaw_rate] - state.twist.ang) # Integrate command rates dt = state.time - self.time self.yaw = motion.unwrap_angle(self.yaw + dt*command.yaw_rate) self.ascent += dt*command.ascent_rate self.time += dt # Allocate efforts for the response wrench return self.allocator.allocate(motion.Wrench(response_force, response_torque)) def reset(self, state): """ Sets the internally integrated yaw, ascent, and time to the values implicitly contained by a given state. state: State object to extract yaw, ascent, and time from """ self.yaw = motion.euler_from_quaternion(state.pose.ang)[2] self.ascent = self.ascent_dir.dot(state.pose.lin) self.time = state.time ``` #### File: software/multicopter/pilot.py ```python from __future__ import division import numpy as np; npl = np.linalg from threading import Thread from collections import deque # thread safe from inputs import devices, get_gamepad from motion import Command class Pilot(object): """ User interface for remote-controlling a multicopter. Call start_pilot_thread to begin filling an internal buffer with user input. Call get_command to execute / clear the buffer and get the current relevant Command object. Change the mission_code attribute to an integer that will be sent as command.start on activation. Call stop_pilot_thread when done! max_roll: magnitude of the largest acceptable roll command (in degrees) max_pitch: magnitude of the largest acceptable pitch command (in degrees) max_yaw_rate: magnitude of the largest acceptable yaw rate command (in degrees per time) max_ascent_rate: magnitude of the largest acceptable ascent rate command stick_deadband: fraction of analog joystick travel that should be treated as zero trigger_deadband: fraction of analog trigger travel that should be treated as zero max_buffer_size: maximum number of user commands that should be stored before dropping old ones button_callbacks: dictionary of callback functions keyed by button names (A, B, X, Y, L, R, SL, SR, DV, DH, K) """ def __init__(self, max_roll=65, max_pitch=65, max_yaw_rate=180, max_ascent_rate=5, stick_deadband=0.1, trigger_deadband=0.0, max_buffer_size=200, button_callbacks={}): self.max_roll = np.deg2rad(max_roll) self.max_pitch = np.deg2rad(max_pitch) self.max_yaw_rate = np.deg2rad(max_yaw_rate) self.max_ascent_rate = np.float64(max_ascent_rate) self.stick_deadband = float(stick_deadband) self.trigger_deadband = float(trigger_deadband) self.max_buffer_size = int(max_buffer_size) self.button_callbacks = button_callbacks # Valid input device names in priority order self.valid_device_names = ["Microsoft X-Box One pad (Firmware 2015)", "PowerA Xbox One wired controller"] # Set valid input device self.input_device = None for valid_device_name in self.valid_device_names: if self.input_device is not None: break for device in devices: if device.name == valid_device_name: self.input_device = device.name print "Hello, Pilot! Ready to read from {}.".format(device.name) break if self.input_device is None: raise IOError("FATAL: No valid input device is connected!") # Digital button code names self.button_codes = {"BTN_SOUTH": "A", "BTN_EAST": "B", "BTN_NORTH": "X", "BTN_WEST": "Y", "BTN_TL": "L", "BTN_TR": "R", "BTN_SELECT": "SL", "BTN_START": "SR", "ABS_HAT0Y": "DV", "ABS_HAT0X": "DH", "BTN_MODE": "K"} # Analog input characteristics self.max_stick = 32767 self.max_trigger = 1023 self.min_stick = int(self.stick_deadband * self.max_stick) self.min_trigger = int(self.trigger_deadband * self.max_trigger) # Internals self.command = None self.pilot_thread = None self.stay_alive = False self.buffer = deque([]) self.buffer_size_flag = False # Change this integer attribute to affect what command.start will be when activated self.mission_code = 0 def get_command(self): """ Executes / clears the input buffer and returns the current relevant Command object. """ if self.pilot_thread is None: raise AssertionError("FATAL: Cannot get_command without active pilot thread!") while self.buffer: event = self.buffer.pop() if event.code == "ABS_Y": self.command.ascent_rate = -self._stick_frac(event.state) * self.max_ascent_rate elif event.code == "ABS_X": pass elif event.code == "ABS_RY": self.command.pitch = -self._stick_frac(event.state) * self.max_pitch elif event.code == "ABS_RX": self.command.roll = self._stick_frac(event.state) * self.max_roll elif event.code == "ABS_Z": self.command.yaw_rate = self._trigger_frac(event.state) * self.max_yaw_rate elif event.code == "ABS_RZ": self.command.yaw_rate = -self._trigger_frac(event.state) * self.max_yaw_rate elif event.code in self.button_codes: if event.code == "BTN_WEST": self.command.start = int(event.state * self.mission_code) elif event.code == "BTN_NORTH": self.command.cancel = bool(event.state) elif event.code == "BTN_MODE": self.command.kill = bool(event.state) self.button_callbacks.get(self.button_codes[event.code], lambda val: None)(event.state) return self.command def start_pilot_thread(self): """ Starts a thread that reads user input into the internal buffer. """ if self.stay_alive: print "----------" print "WARNING: Pilot thread already running!" print "Cannot start another." print "----------" return self.command = Command() self.stay_alive = True if self.input_device in ["Microsoft X-Box One pad (Firmware 2015)", "PowerA Xbox One wired controller"]: self.pilot_thread = Thread(target=self._listen_xbox) else: raise IOError("FATAL: No listener function has been implemented for device {}.".format(self.input_device)) print "Pilot thread has begun!" self.pilot_thread.start() def stop_pilot_thread(self): """ Terminates the Pilot's user input reading thread and clears the buffer. """ self.stay_alive = False if self.pilot_thread is not None: print "Pilot thread terminating on next input!" self.pilot_thread.join() # stay secure self.pilot_thread = None while self.buffer: self.buffer.pop() self.buffer_size_flag = False self.command = None def _listen_xbox(self): try: while self.stay_alive: self.buffer.appendleft(get_gamepad()[0]) # this is blocking (hence need for threading) if len(self.buffer) > self.max_buffer_size: if not self.buffer_size_flag: self.buffer_size_flag = True print "----------" print "WARNING: Pilot input buffer reached {} entries.".format(self.max_buffer_size) print "Dropping old commands." print "----------" self.buffer.pop() finally: print "Pilot thread terminated!" self.pilot_thread = None def _stick_frac(self, val): if abs(val) > self.min_stick: return np.divide(val, self.max_stick, dtype=np.float64) return np.float64(0) def _trigger_frac(self, val): if abs(val) > self.min_trigger: return np.divide(val, self.max_trigger, dtype=np.float64) return np.float64(0) ```
{ "source": "jnez71/navboxplus", "score": 3 }
#### File: navboxplus/tests/test_ukf_so3.py ```python from __future__ import division import numpy as np; npl = np.linalg from time import time from navboxplus import NavBoxPlus from navboxplus.boxops import quaternion_boxplus, quaternion_boxminus, normalize_l2 # For easy reading of results, suppress scientific notation np.set_printoptions(suppress=True) # State is [quaternion_orientation_body_to_world, angular_velocity_in_body] n_x = 7 n_m = 6 # Process uncertainty resides only in our angular acceleration model n_wf = 3 # Sensor is ordinary gyroscope n_z = 3 n_wh = 3 # Typical torque control, but that isn't relevant to this test n_r = 7 n_u = 3 # Boxplus combines quaternion boxplus and ordinary addition for angular velocity def xplus(x, v): return np.append(quaternion_boxplus(x[:4], v[:3]), x[4:]+v[3:]) # Boxminus combines quaternion boxminus and ordinary subtraction for angular velocity def xminus(x2, x1): return np.append(quaternion_boxminus(x2[:4], x1[:4]), x2[4:]-x1[4:]) # Zero angular acceleration process model, except noise enters through angular acceleration dt = 0.01 def f(x, u, wf, dt): return np.append(quaternion_boxplus(x[:4], x[4:]*dt), x[4:]+wf) # Additive noise gyroscope with tiny bias def h(x, u, wh): return x[4:] + wh + 0.001 # Unity-gain feedback torque controller, irrelevant to this test def g(r, rnext, x, Cx, dt): return quaternion_boxminus(r[:4], x[:4]) + (r[4:] - x[4:]) # Some initial state that is obvious what will happen after a predict and correct q = normalize_l2([1, 3, 2, -9]) x = np.append(q, [1, 0, 0]) Cx = np.eye(n_m) # Simple process noise characteristics wf0 = np.zeros(n_wf) Cf = np.eye(n_wf) # Simple sensor noise characteristics wh0 = np.zeros(n_wh) Ch = np.eye(n_wh) # Create NavBoxPlus object nav = NavBoxPlus(x0=x, Cx0=Cx, g=g, f=f, hDict={'gyro': h}, n_r=n_r, n_wf=n_wf, n_whDict={'gyro': n_wh}, xplus=xplus, xminus=xminus) # Run predict step start_predict = time() nav.predict(x, x, wf0, Cf, dt) predict_duration = time() - start_predict # Display prediction results print "Original state: {}".format(np.round(x, 3)) print "Predicted next state: {}".format(np.round(nav.x, 3)), '\n' print "Original covariance:\n", np.round(Cx, 3) print "Covariance after prediction:\n", np.round(nav.Cx, 3), '\n' print "Predict took {} ms.".format(np.round(1000*predict_duration, 3)) print "\n----\n" assert nav.is_pdef(nav.Cx) # Correct after receiving some gyro measurement z = [-2, 10, 0] start_correct = time() nav.correct('gyro', z, wh0, Ch) correct_duration = time() - start_correct # Display correction results print "Gyro measurement: {}".format(z) print "Corrected state: {}".format(np.round(nav.x, 3)), '\n' print "Covariance after correction:\n", np.round(nav.Cx, 3), '\n' print "Correct took {} ms.".format(np.round(1000*correct_duration, 3)), '\n' assert nav.is_pdef(nav.Cx) print "Look good?\n" ```
{ "source": "jnez71/Orientation_Library", "score": 4 }
#### File: Orientation_Library/demos/demo_PDcontrol.py ```python from __future__ import division import numpy as np import numpy.linalg as npl import matplotlib.pyplot as plt import matplotlib.animation as animation import mpl_toolkits.mplot3d.axes3d as p3 from orientation_library import transformations as trns from orientation_library import oritools as ori ################################################# SETUP # Define time: dt = 0.01 # global time step (s) T = 10 # simulation duration (s) t_arr = np.arange(0, T, dt) # time values array (s) # Define body inertia: I = np.array([[ 1 , 0.01 , 0.02], [0.01 , 2 , 0.03], [0.02 , 0.03 , 3 ]]) # inertia matrix in body frame (kg*m^2) # I = np.diag([1, 2, 3]) # cool to compare to diagonal inertia matrix case (which would imply body frame is principal frame) invI = npl.inv(I) # store inverse for future use # Define state and initial conditions: q = trns.random_quaternion() # orientation state quaternion representing a conversion ***from body frame to world frame*** w = 10 * (np.random.rand(3) - 0.5) # angular velocity state (rad/s) in world frame torque = np.array([0, 0, 0]) # initial control input torque, need only be initialized for programming purposes print('\nInitial Orientation: {}'.format(q)) print('Initial Ang. Velocity: {}'.format(np.rad2deg(w))) # Controller setup (set gains to 0 if you want to test torque-free precession): q_des = trns.random_quaternion() # desired orientation state w_des = np.array([0, 0, 0]) # desired angular velocity state () kp = np.array([150, 150, 150]) # proportional gain (body frame roll, pitch, yaw) kd = np.array([170, 170, 170]) # derivative gain (body frame rolling, pitching, yawing) print('Desired Orientation: {}'.format(q_des)) print('Desired Ang. Velocity: {}'.format(np.rad2deg(w_des))) print('Proportional Gains: {}'.format(kp)) print('Derivative Gains: {}'.format(kd)) # Animation setup: showplots = True # Show angle plots before animating? (close plot window to proceed to animation) framerate = 20 # Despite the display not being real-time, this controls something like a framerate # Visually represent the body as three perpendicular lines, each defined by its endpoints: linlen = 0.5 xline = np.array([[0, linlen], [0, 0], [0, 0]]) yline = np.array([[0, 0], [0, linlen], [0, 0]]) zline = np.array([[0, 0], [0, 0], [0, linlen]]) body = np.concatenate((xline, yline, zline), axis=1) # each column is a point in body # Initialize histories for recording the simulation: q_history = np.zeros((len(t_arr), 4)) roll_history, pitch_history, yaw_history = np.zeros(len(t_arr)), np.zeros(len(t_arr)), np.zeros(len(t_arr)) # for plotting something understandable body_world_history = np.zeros((body.shape[0], body.shape[1], len(t_arr))) # will store all the body points expressed in world frame at each instant w_history = np.zeros((len(t_arr), 3)) torque_history = np.zeros((len(t_arr), 3)) ################################################# SIMULATE # Solve the rigid body rotation ODE with first-order integration: # --- # Equation 1: Hnext = Hlast + torque*dt # --- # where in this case torque is the controller output and H is the # body's angular momentum in world frame, H = I_world * w ==> w = inv(I_world)*H # In differential equation form, this is the classic law Hdot = torque iff # the origin of the body frame is at the center of mass, which is typically done. # --- # Equation 2: qnext = dq "oriplus" q # --- # where dq represents ***nextbody to lastbody*** and q represents ***lastbody to world*** so that # their orisum is ***nextbody to world*** which then overwrites ***lastbody to world*** as the new q. # The key here is that w*dt is the rotvec representing ***nextbody to lastbody***, # and it has an equivalent quaternion expression dq. In differential equation form, the equation # is qdot = f(w,dt) = ori.quaternion_from_rotvec(w*dt)/dt, but it is crucial to understand that # integrating this equation requires use of ori.plus, so it cannot be easily fed into a standard solver. # --- # Simulate over t_arr: for i, t in enumerate(t_arr): # Record current state: q_history[i, :] = np.copy(q) roll_history[i], pitch_history[i], yaw_history[i] = trns.euler_from_quaternion(q, 'rxyz') body_world_history[:, :, i] = ori.qapply_points(q, body) w_history[i, :] = np.copy(w) torque_history[i, :] = np.copy(torque) # Current values needed to compute next state: I_world = ori.qapply_matrix(q, I) # current inertia matrix in world frame H = I_world.dot(w) # current angular momentum wb = ori.qapply_points(trns.quaternion_inverse(q), w) # w in body frame dq = ori.quaternion_from_rotvec(wb * dt) # change in orientation for this timestep instant # PD controller: q_err = ori.error(q, q_des) # q_err is a rotvec w_err = w_des - w kpW = np.diag(ori.qapply_matrix(q, np.diag(kp))) # world frame kp gains kdW = np.diag(ori.qapply_matrix(q, np.diag(kd))) # world frame kd gains torque = (kpW * q_err) + (kdW * w_err) # Compute next state: q = ori.plus(dq, q) # new orientation computed using dq and old q I_world = ori.qapply_matrix(q, I) # new I_world computed using new q H = H + (torque * dt) # new H from old H and torque w = npl.inv(I_world).dot(H) # new angular velocity computed using new I_world and new H ################################################# DISPLAY if showplots: fig1 = plt.figure() fig1.suptitle('Orientation State Evolution', fontsize=24) # Plot roll: ax1 = fig1.add_subplot(3, 3, 1) ax1.plot(t_arr, np.rad2deg(roll_history)) ax1.set_ylabel('roll (deg)', fontsize=16) ax1.grid(True) # Plot pitch: ax2 = fig1.add_subplot(3, 3, 4) ax2.plot(t_arr, np.rad2deg(pitch_history)) ax2.set_ylabel('pitch (deg)', fontsize=16) ax2.grid(True) # Plot yaw: ax3 = fig1.add_subplot(3, 3, 7) ax3.plot(t_arr, np.rad2deg(yaw_history)) ax3.set_xlabel('time (s)', fontsize=16) ax3.set_ylabel('yaw (deg)', fontsize=16) ax3.grid(True) # Plot rolling: ax1 = fig1.add_subplot(3, 3, 2) ax1.plot(t_arr, np.rad2deg(w_history[:, 0])) ax1.set_ylabel('w_x (deg/s)', fontsize=16) ax1.grid(True) # Plot pitching: ax2 = fig1.add_subplot(3, 3, 5) ax2.plot(t_arr, np.rad2deg(w_history[:, 1])) ax2.set_ylabel('w_y (deg/s)', fontsize=16) ax2.grid(True) # Plot yawing: ax3 = fig1.add_subplot(3, 3, 8) ax3.plot(t_arr, np.rad2deg(w_history[:, 2])) ax3.set_ylabel('w_z (deg/s)', fontsize=16) ax3.set_xlabel('time (s)', fontsize=16) ax3.grid(True) # Plot torque_x: ax1 = fig1.add_subplot(3, 3, 3) ax1.plot(t_arr, torque_history[:, 0]) ax1.set_ylabel('T_x (N*m)', fontsize=16) ax1.grid(True) # Plot torque_y: ax2 = fig1.add_subplot(3, 3, 6) ax2.plot(t_arr, torque_history[:, 1]) ax2.set_ylabel('T_y (N*m)', fontsize=16) ax2.grid(True) # Plot torque_z: ax3 = fig1.add_subplot(3, 3, 9) ax3.plot(t_arr, torque_history[:, 2]) ax3.set_ylabel('T_z (N*m)', fontsize=16) ax3.set_xlabel('time (s)', fontsize=16) ax3.grid(True) plt.show() fig2 = plt.figure() fig2.suptitle('Orientation State Evolution', fontsize=24) ax4 = p3.Axes3D(fig2) ax4.set_xlim3d([-1, 1]) ax4.set_ylim3d([-1, 1]) ax4.set_zlim3d([-1, 1]) ax4.set_xlabel('- World X +') ax4.set_ylabel('- World Y +') ax4.set_zlabel('- World Z +') ax4.grid(True) # Plot desired: body_des = 2 * ori.qapply_points(q_des, body) ax4.plot(body_des[0, :2], body_des[1, :2], body_des[2, :2], color='red', ls='--', linewidth=0.8) ax4.plot(body_des[0, 2:4], body_des[1, 2:4], body_des[2, 2:4], color='green', ls='--', linewidth=0.8) ax4.plot(body_des[0, 4:6], body_des[1, 4:6], body_des[2, 4:6], color='blue', ls='--', linewidth=0.8) # Plot initial: body_world_history_init = 2 * body_world_history[:, :, 0] ax4.plot(body_world_history_init[0, :2], body_world_history_init[1, :2], body_world_history_init[2, :2], color='red', ls=':', linewidth=0.8) ax4.plot(body_world_history_init[0, 2:4], body_world_history_init[1, 2:4], body_world_history_init[2, 2:4], color='green', ls=':', linewidth=0.8) ax4.plot(body_world_history_init[0, 4:6], body_world_history_init[1, 4:6], body_world_history_init[2, 4:6], color='blue', ls=':', linewidth=0.8) # Create drawing objects: x = ax4.plot(body_world_history[0, :2, 0], body_world_history[1, :2, 0], body_world_history[2, :2, 0], color='red', linewidth=4) y = ax4.plot(body_world_history[0, 2:4, 0], body_world_history[1, 2:4, 0], body_world_history[2, 2:4, 0], color='green', linewidth=4) z = ax4.plot(body_world_history[0, 4:6, 0], body_world_history[1, 4:6, 0], body_world_history[2, 4:6, 0], color='blue', linewidth=4) def update(arg, ii=[0]): i = ii[0] if np.isclose(t_arr[i], np.around(t_arr[i], 1)): fig2.suptitle('Orientation State Evolution (Time: {})'.format(t_arr[i]), fontsize=24) x[0].set_data(body_world_history[0, :2, i], body_world_history[1, :2, i]) x[0].set_3d_properties(body_world_history[2, :2, i]) y[0].set_data(body_world_history[0, 2:4, i], body_world_history[1, 2:4, i]) y[0].set_3d_properties(body_world_history[2, 2:4, i]) z[0].set_data(body_world_history[0, 4:6, i], body_world_history[1, 4:6, i]) z[0].set_3d_properties(body_world_history[2, 4:6, i]) ii[0] += int(1 / (dt * framerate)) if ii[0] >= len(t_arr): ii[0] = 0 return [x, y, z] ani = animation.FuncAnimation(fig2, func=update, interval=dt*1000) print('Remember to keep the diplay window aspect ratio square!') print('') plt.show() ```
{ "source": "jnfang/business-board-tool", "score": 2 }
#### File: appengine/tools/api_server.py ```python from __future__ import with_statement import BaseHTTPServer import httplib import logging import os.path import pickle import socket import SocketServer import subprocess import sys import tempfile import threading import time import traceback import urllib2 import urlparse import wsgiref.headers import google import yaml from google.appengine.api import mail_stub from google.appengine.api import request_info from google.appengine.api import urlfetch_stub from google.appengine.api import user_service_stub from google.appengine.api.app_identity import app_identity_stub from google.appengine.api.blobstore import blobstore_stub from google.appengine.api.blobstore import file_blob_storage from google.appengine.api.capabilities import capability_stub from google.appengine.api.channel import channel_service_stub from google.appengine.api.files import file_service_stub from google.appengine.api.logservice import logservice_stub from google.appengine.api.search import simple_search_stub from google.appengine.api.taskqueue import taskqueue_stub from google.appengine.api.prospective_search import prospective_search_stub from google.appengine.api.memcache import memcache_stub from google.appengine.api.system import system_stub from google.appengine.api.xmpp import xmpp_service_stub from google.appengine.api import datastore_file_stub from google.appengine.datastore import datastore_sqlite_stub from google.appengine.datastore import datastore_stub_util from google.appengine.datastore import datastore_v4_stub from google.appengine.api import apiproxy_stub_map from google.appengine.ext.remote_api import remote_api_pb from google.appengine.ext.remote_api import remote_api_services from google.appengine.runtime import apiproxy_errors QUIT_PATH = '/quit' GLOBAL_API_LOCK = threading.RLock() class Error(Exception): pass def _ClearDatastoreStorage(datastore_path): """Delete the datastore storage file at the given path.""" if os.path.lexists(datastore_path): try: os.remove(datastore_path) except OSError, e: logging.warning('Failed to remove datastore file %r: %s', datastore_path, e) def _ClearProspectiveSearchStorage(prospective_search_path): """Delete the perspective search storage file at the given path.""" if os.path.lexists(prospective_search_path): try: os.remove(prospective_search_path) except OSError, e: logging.warning('Failed to remove prospective search file %r: %s', prospective_search_path, e) THREAD_SAFE_SERVICES = frozenset(( 'app_identity_service', 'capability_service', 'channel', 'logservice', 'mail', 'memcache', 'remote_socket', 'urlfetch', 'user', 'xmpp', )) def _ExecuteRequest(request): """Executes an API method call and returns the response object. Args: request: A remote_api.Request object representing the API call e.g. a call to memcache.Get. Returns: A ProtocolBuffer.ProtocolMessage representing the API response e.g. a memcache_service_pb.MemcacheGetResponse. Raises: apiproxy_errors.CallNotFoundError: if the requested method doesn't exist. apiproxy_errors.ApplicationError: if the API method calls fails. """ service = request.service_name() method = request.method() service_methods = remote_api_services.SERVICE_PB_MAP.get(service, {}) request_class, response_class = service_methods.get(method, (None, None)) if not request_class: raise apiproxy_errors.CallNotFoundError('%s.%s does not exist' % (service, method)) request_data = request_class() request_data.ParseFromString(request.request()) response_data = response_class() def MakeRequest(): apiproxy_stub_map.MakeSyncCall(service, method, request_data, response_data) if service in THREAD_SAFE_SERVICES: MakeRequest() else: with GLOBAL_API_LOCK: MakeRequest() return response_data class APIRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """Handler for all API server HTTP requests.""" def log_message(self, format, *args): logging.debug(format, *args) def do_GET(self): if self.path == QUIT_PATH: self._HandleShutdown() else: params = urlparse.parse_qs(urlparse.urlparse(self.path).query) rtok = params.get('rtok', ['0'])[0] self.send_response(httplib.OK) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write(yaml.dump({ 'app_id': self.server.app_id, 'rtok': rtok, })) def _HandleShutdown(self): """Handles a request for the API Server to exit.""" self.send_response(httplib.OK) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write('API Server Quitting') self.server.shutdown() def do_POST(self): """Handles a single API request e.g. memcache.Get().""" self.send_response(httplib.OK) self.send_header('Content-Type', 'application/octet-stream') self.end_headers() response = remote_api_pb.Response() try: request = remote_api_pb.Request() request.ParseFromString( self.rfile.read(int(self.headers['content-length']))) api_response = _ExecuteRequest(request).Encode() response.set_response(api_response) except Exception, e: logging.debug('Exception while handling %s\n%s', request, traceback.format_exc()) response.set_exception(pickle.dumps(e)) if isinstance(e, apiproxy_errors.ApplicationError): application_error = response.mutable_application_error() application_error.set_code(e.application_error) application_error.set_detail(e.error_detail) self.wfile.write(response.Encode()) class APIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): """Serves API calls over HTTP.""" def __init__(self, server_address, app_id): BaseHTTPServer.HTTPServer.__init__(self, server_address, APIRequestHandler) self.app_id = app_id def _SetupStubs( app_id, application_root, appidentity_email_address, appidentity_private_key_path, trusted, blobstore_path, use_sqlite, auto_id_policy, high_replication, datastore_path, datastore_require_indexes, images_host_prefix, logs_path, mail_smtp_host, mail_smtp_port, mail_smtp_user, mail_smtp_password, mail_enable_sendmail, mail_show_mail_body, matcher_prospective_search_path, taskqueue_auto_run_tasks, taskqueue_task_retry_seconds, taskqueue_default_http_server, user_login_url, user_logout_url, default_gcs_bucket_name): """Configures the APIs hosted by this server. Args: app_id: The str application id e.g. "guestbook". application_root: The path to the directory containing the user's application e.g. "/home/bquinlan/myapp". trusted: A bool indicating if privileged APIs should be made available. blobstore_path: The path to the file that should be used for blobstore storage. use_sqlite: A bool indicating whether DatastoreSqliteStub or DatastoreFileStub should be used. auto_id_policy: One of datastore_stub_util.SEQUENTIAL or .SCATTERED, indicating whether the Datastore stub should assign IDs sequentially or scattered. high_replication: A bool indicating whether to use the high replication consistency model. datastore_path: The path to the file that should be used for datastore storage. datastore_require_indexes: A bool indicating if the same production datastore indexes requirements should be enforced i.e. if True then a google.appengine.ext.db.NeedIndexError will be be raised if a query is executed without the required indexes. images_host_prefix: The URL prefix (protocol://host:port) to preprend to image urls on calls to images.GetUrlBase. logs_path: Path to the file to store the logs data in. mail_smtp_host: The SMTP hostname that should be used when sending e-mails. If None then the mail_enable_sendmail argument is considered. mail_smtp_port: The SMTP port number that should be used when sending e-mails. If this value is None then mail_smtp_host must also be None. mail_smtp_user: The username to use when authenticating with the SMTP server. This value may be None if mail_smtp_host is also None or if the SMTP server does not require authentication. mail_smtp_password: The password to use when authenticating with the SMTP server. This value may be None if mail_smtp_host or mail_smtp_user is also None. mail_enable_sendmail: A bool indicating if sendmail should be used when sending e-mails. This argument is ignored if mail_smtp_host is not None. mail_show_mail_body: A bool indicating whether the body of sent e-mails should be written to the logs. matcher_prospective_search_path: The path to the file that should be used to save prospective search subscriptions. taskqueue_auto_run_tasks: A bool indicating whether taskqueue tasks should be run automatically or it the must be manually triggered. taskqueue_task_retry_seconds: An int representing the number of seconds to wait before a retrying a failed taskqueue task. taskqueue_default_http_server: A str containing the address of the http server that should be used to execute tasks. user_login_url: A str containing the url that should be used for user login. user_logout_url: A str containing the url that should be used for user logout. default_gcs_bucket_name: A str overriding the usual default bucket name. """ os.environ['APPLICATION_ID'] = app_id tmp_app_identity_stub = app_identity_stub.AppIdentityServiceStub.Create( email_address=appidentity_email_address, private_key_path=appidentity_private_key_path) if default_gcs_bucket_name is not None: tmp_app_identity_stub.SetDefaultGcsBucketName(default_gcs_bucket_name) apiproxy_stub_map.apiproxy.RegisterStub( 'app_identity_service', tmp_app_identity_stub) blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id) apiproxy_stub_map.apiproxy.RegisterStub( 'blobstore', blobstore_stub.BlobstoreServiceStub(blob_storage)) apiproxy_stub_map.apiproxy.RegisterStub( 'capability_service', capability_stub.CapabilityServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'channel', channel_service_stub.ChannelServiceStub()) if use_sqlite: datastore = datastore_sqlite_stub.DatastoreSqliteStub( app_id, datastore_path, datastore_require_indexes, trusted, root_path=application_root, auto_id_policy=auto_id_policy) else: datastore = datastore_file_stub.DatastoreFileStub( app_id, datastore_path, datastore_require_indexes, trusted, root_path=application_root, auto_id_policy=auto_id_policy) if high_replication: datastore.SetConsistencyPolicy( datastore_stub_util.TimeBasedHRConsistencyPolicy()) apiproxy_stub_map.apiproxy.RegisterStub( 'datastore_v3', datastore) apiproxy_stub_map.apiproxy.RegisterStub( 'datastore_v4', datastore_v4_stub.DatastoreV4Stub(app_id)) apiproxy_stub_map.apiproxy.RegisterStub( 'file', file_service_stub.FileServiceStub(blob_storage)) try: from google.appengine.api.images import images_stub except ImportError: logging.warning('Could not initialize images API; you are likely missing ' 'the Python "PIL" module.') from google.appengine.api.images import images_not_implemented_stub apiproxy_stub_map.apiproxy.RegisterStub( 'images', images_not_implemented_stub.ImagesNotImplementedServiceStub()) else: apiproxy_stub_map.apiproxy.RegisterStub( 'images', images_stub.ImagesServiceStub(host_prefix=images_host_prefix)) apiproxy_stub_map.apiproxy.RegisterStub( 'logservice', logservice_stub.LogServiceStub(logs_path=logs_path)) apiproxy_stub_map.apiproxy.RegisterStub( 'mail', mail_stub.MailServiceStub(mail_smtp_host, mail_smtp_port, mail_smtp_user, mail_smtp_password, enable_sendmail=mail_enable_sendmail, show_mail_body=mail_show_mail_body)) apiproxy_stub_map.apiproxy.RegisterStub( 'memcache', memcache_stub.MemcacheServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'search', simple_search_stub.SearchServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub('system', system_stub.SystemServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'taskqueue', taskqueue_stub.TaskQueueServiceStub( root_path=application_root, auto_task_running=taskqueue_auto_run_tasks, task_retry_seconds=taskqueue_task_retry_seconds, default_http_server=taskqueue_default_http_server)) apiproxy_stub_map.apiproxy.GetStub('taskqueue').StartBackgroundExecution() apiproxy_stub_map.apiproxy.RegisterStub( 'urlfetch', urlfetch_stub.URLFetchServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'user', user_service_stub.UserServiceStub(login_url=user_login_url, logout_url=user_logout_url)) apiproxy_stub_map.apiproxy.RegisterStub( 'xmpp', xmpp_service_stub.XmppServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'matcher', prospective_search_stub.ProspectiveSearchStub( matcher_prospective_search_path, apiproxy_stub_map.apiproxy.GetStub('taskqueue'))) def _TearDownStubs(): """Clean up any stubs that need cleanup.""" logging.info('Applying all pending transactions and saving the datastore') datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3') datastore_stub.Write() def ParseCommandArguments(args): """Parses and the application's command line arguments. Args: args: A list of command line arguments *not* including the executable or script e.g. ['-A' 'myapp', '--api_port=8000']. Returns: An object containing the values passed in the commandline as attributes. Raises: SystemExit: if the argument parsing fails. """ import argparse from google.appengine.tools import boolean_action parser = argparse.ArgumentParser() parser.add_argument('-A', '--application', required=True) parser.add_argument('--api_host', default='') parser.add_argument('--api_port', default=8000, type=int) parser.add_argument('--trusted', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--appidentity_email_address', default=None) parser.add_argument('--appidentity_private_key_path', default=None) parser.add_argument('--application_root', default=None) parser.add_argument('--application_host', default='localhost') parser.add_argument('--application_port', default=None) parser.add_argument('--blobstore_path', default=None) parser.add_argument('--datastore_path', default=None) parser.add_argument('--auto_id_policy', default='scattered', type=lambda s: s.lower(), choices=(datastore_stub_util.SEQUENTIAL, datastore_stub_util.SCATTERED)) parser.add_argument('--use_sqlite', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--high_replication', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--require_indexes', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--clear_datastore', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--logs_path', default=None) parser.add_argument('--enable_sendmail', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--smtp_host', default='') parser.add_argument('--smtp_port', default=25, type=int) parser.add_argument('--smtp_user', default='') parser.add_argument('--smtp_password', default='') parser.add_argument('--show_mail_body', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--prospective_search_path', default=None) parser.add_argument('--clear_prospective_search', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--enable_task_running', action=boolean_action.BooleanAction, const=True, default=True) parser.add_argument('--task_retry_seconds', default=30, type=int) parser.add_argument('--user_login_url', default=None) parser.add_argument('--user_logout_url', default=None) return parser.parse_args(args) class APIServerProcess(object): """Manages an API Server running as a seperate process.""" def __init__(self, executable, host, port, app_id, script=None, appidentity_email_address=None, appidentity_private_key_path=None, application_host=None, application_port=None, application_root=None, auto_id_policy=None, blobstore_path=None, clear_datastore=None, clear_prospective_search=None, datastore_path=None, enable_sendmail=None, enable_task_running=None, high_replication=None, logs_path=None, prospective_search_path=None, require_indexes=None, show_mail_body=None, smtp_host=None, smtp_password=<PASSWORD>, smtp_port=None, smtp_user=None, task_retry_seconds=None, trusted=None, use_sqlite=None, default_gcs_bucket_name=None): """Configures the APIs hosted by this server. Args: executable: The path of the executable to use when running the API Server e.g. "/usr/bin/python". host: The host name that should be used by the API Server e.g. "localhost". port: The port number that should be used by the API Server e.g. 8080. app_id: The str application id e.g. "guestbook". script: The name of the script that should be used, along with the executable argument, to run the API Server e.g. "api_server.py". If None then the executable is run without a script argument. appidentity_email_address: Email address for service account substitute. appidentity_private_key_path: Private key for service account substitute. application_host: The name of the host where the development application server is running e.g. "localhost". application_port: The port where the application server is running e.g. 8000. application_root: The path to the directory containing the user's application e.g. "/home/bquinlan/myapp". auto_id_policy: One of "sequential" or "scattered", indicating whether the Datastore stub should assign IDs sequentially or scattered. blobstore_path: The path to the file that should be used for blobstore storage. clear_datastore: Clears the file at datastore_path, emptying the datastore from previous runs. clear_prospective_search: Clears the file at prospective_search_path, emptying the perspective search state from previous runs. datastore_path: The path to the file that should be used for datastore storage. enable_sendmail: A bool indicating if sendmail should be used when sending e-mails. This argument is ignored if mail_smtp_host is not None. enable_task_running: A bool indicating whether taskqueue tasks should be run automatically or it the must be manually triggered. high_replication: A bool indicating whether to use the high replication consistency model. logs_path: Path to the file to store the logs data in. prospective_search_path: The path to the file that should be used to save prospective search subscriptions. require_indexes: A bool indicating if the same production datastore indexes requirements should be enforced i.e. if True then a google.appengine.ext.db.NeedIndexError will be be raised if a query is executed without the required indexes. show_mail_body: A bool indicating whether the body of sent e-mails should be written to the logs. smtp_host: The SMTP hostname that should be used when sending e-mails. If None then the enable_sendmail argument is considered. smtp_password: The password to use when authenticating with the SMTP server. This value may be None if smtp_host or smtp_user is also None. smtp_port: The SMTP port number that should be used when sending e-mails. If this value is None then smtp_host must also be None. smtp_user: The username to use when authenticating with the SMTP server. This value may be None if smtp_host is also None or if the SMTP server does not require authentication. task_retry_seconds: An int representing the number of seconds to wait before a retrying a failed taskqueue task. trusted: A bool indicating if privileged APIs should be made available. use_sqlite: A bool indicating whether DatastoreSqliteStub or DatastoreFileStub should be used. default_gcs_bucket_name: A str overriding the normal default bucket name. """ self._process = None self._host = host self._port = port if script: self._args = [executable, script] else: self._args = [executable] self._BindArgument('--api_host', host) self._BindArgument('--api_port', port) self._BindArgument('--appidentity_email_address', appidentity_email_address) self._BindArgument('--appidentity_private_key_path', appidentity_private_key_path) self._BindArgument('--application_host', application_host) self._BindArgument('--application_port', application_port) self._BindArgument('--application_root', application_root) self._BindArgument('--application', app_id) self._BindArgument('--auto_id_policy', auto_id_policy) self._BindArgument('--blobstore_path', blobstore_path) self._BindArgument('--clear_datastore', clear_datastore) self._BindArgument('--clear_prospective_search', clear_prospective_search) self._BindArgument('--datastore_path', datastore_path) self._BindArgument('--enable_sendmail', enable_sendmail) self._BindArgument('--enable_task_running', enable_task_running) self._BindArgument('--high_replication', high_replication) self._BindArgument('--logs_path', logs_path) self._BindArgument('--prospective_search_path', prospective_search_path) self._BindArgument('--require_indexes', require_indexes) self._BindArgument('--show_mail_body', show_mail_body) self._BindArgument('--smtp_host', smtp_host) self._BindArgument('--smtp_password', smtp_password) self._BindArgument('--smtp_port', smtp_port) self._BindArgument('--smtp_user', smtp_user) self._BindArgument('--task_retry_seconds', task_retry_seconds) self._BindArgument('--trusted', trusted) self._BindArgument('--use_sqlite', use_sqlite) self._BindArgument('--default_gcs_bucket_name', default_gcs_bucket_name) @property def url(self): """Returns the URL that should be used to communicate with the server.""" return 'http://%s:%d' % (self._host, self._port) def __repr__(self): return '<APIServerProcess command=%r>' % ' '.join(self._args) def Start(self): """Starts the API Server process.""" assert not self._process, 'Start() can only be called once' self._process = subprocess.Popen(self._args) def _CanConnect(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((self._host, self._port)) except socket.error: connected = False else: connected = True s.close() return connected def WaitUntilServing(self, timeout=30.0): """Waits until the API Server is ready to handle requests. Args: timeout: The maximum number of seconds to wait for the server to be ready. Raises: Error: if the server process exits or is not ready in "timeout" seconds. """ assert self._process, 'server was not started' finish_time = time.time() + timeout while time.time() < finish_time: if self._process.poll() is not None: raise Error('server has already exited with return: %r', self._process.returncode) if self._CanConnect(): return time.sleep(0.2) raise Error('server did not start after %f seconds', timeout) def _BindArgument(self, argument, value): if value is not None: self._args.append('%s=%s' % (argument, value)) def Quit(self, timeout=5.0): """Causes the API Server process to exit. Args: timeout: The maximum number of seconds to wait for an orderly shutdown before forceably killing the process. """ assert self._process, 'server was not started' if self._process.poll() is None: try: urllib2.urlopen(self.url + QUIT_PATH) except urllib2.URLError: pass finish_time = time.time() + timeout while time.time() < finish_time and self._process.poll() is None: time.sleep(0.2) if self._process.returncode is None: logging.warning('api_server did not quit cleanly, killing') self._process.kill() class ApiServerDispatcher(request_info._LocalFakeDispatcher): """An api_server Dispatcher implementation.""" def add_request(self, method, relative_url, headers, body, source_ip, server_name=None, version=None, instance_id=None): """Process an HTTP request. Args: method: A str containing the HTTP method of the request. relative_url: A str containing path and query string of the request. headers: A list of (key, value) tuples where key and value are both str. body: A str containing the request body. source_ip: The source ip address for the request. server_name: An optional str containing the server name to service this request. If unset, the request will be dispatched to the default server. version: An optional str containing the version to service this request. If unset, the request will be dispatched to the default version. instance_id: An optional str containing the instance_id of the instance to service this request. If unset, the request will be dispatched to according to the load-balancing for the server and version. Returns: A request_info.ResponseTuple containing the response information for the HTTP request. """ try: header_dict = wsgiref.headers.Headers(headers) connection_host = header_dict.get('host') connection = httplib.HTTPConnection(connection_host) connection.putrequest( method, relative_url, skip_host='host' in header_dict, skip_accept_encoding='accept-encoding' in header_dict) for header_key, header_value in headers: connection.putheader(header_key, header_value) connection.endheaders() connection.send(body) response = connection.getresponse() response.read() response.close() return request_info.ResponseTuple( '%d %s' % (response.status, response.reason), [], '') except (httplib.HTTPException, socket.error): logging.exception( 'An error occured while sending a %s request to "%s%s"', method, connection_host, relative_url) return request_info.ResponseTuple('0', [], '') def main(): logging.basicConfig( level=logging.INFO, format='[API Server] [%(filename)s:%(lineno)d] %(levelname)s %(message)s') args = ParseCommandArguments(sys.argv[1:]) if args.clear_datastore: _ClearDatastoreStorage(args.datastore_path) if args.clear_prospective_search: _ClearProspectiveSearchStorage(args.prospective_search_path) if args.blobstore_path is None: _, blobstore_temp_filename = tempfile.mkstemp(prefix='ae-blobstore') args.blobstore_path = blobstore_temp_filename if args.datastore_path is None: _, datastore_temp_filename = tempfile.mkstemp(prefix='ae-datastore') args.datastore_path = datastore_temp_filename if args.prospective_search_path is None: _, prospective_search_temp_filename = tempfile.mkstemp( prefix='ae-prospective_search') args.prospective_search_path = prospective_search_temp_filename if args.application_host: application_address = args.application_host if args.application_port and args.application_port != 80: application_address += ':' + str(args.application_port) else: application_address = None if not hasattr(args, 'default_gcs_bucket_name'): args.default_gcs_bucket_name = None request_info._local_dispatcher = ApiServerDispatcher() _SetupStubs(app_id=args.application, application_root=args.application_root, appidentity_email_address=args.appidentity_email_address, appidentity_private_key_path=args.appidentity_private_key_path, trusted=args.trusted, blobstore_path=args.blobstore_path, datastore_path=args.datastore_path, use_sqlite=args.use_sqlite, auto_id_policy=args.auto_id_policy, high_replication=args.high_replication, datastore_require_indexes=args.require_indexes, images_host_prefix=application_address, logs_path=args.logs_path, mail_smtp_host=args.smtp_host, mail_smtp_port=args.smtp_port, mail_smtp_user=args.smtp_user, mail_smtp_password=<PASSWORD>, mail_enable_sendmail=args.enable_sendmail, mail_show_mail_body=args.show_mail_body, matcher_prospective_search_path=args.prospective_search_path, taskqueue_auto_run_tasks=args.enable_task_running, taskqueue_task_retry_seconds=args.task_retry_seconds, taskqueue_default_http_server=application_address, user_login_url=args.user_login_url, user_logout_url=args.user_logout_url, default_gcs_bucket_name=args.default_gcs_bucket_name) server = APIServer((args.api_host, args.api_port), args.application) try: server.serve_forever() finally: _TearDownStubs() if __name__ == '__main__': try: main() except KeyboardInterrupt: pass ```
{ "source": "jnfang/pact-python", "score": 3 }
#### File: message/src/message_handler.py ```python class CustomError(Exception): def __init__(self, *args): if args: self.topic = args[0] else: self.topic = None def __str__(self): if self.topic: return 'Custom Error:, {0}'.format(self.topic) class MessageHandler(object): def __init__(self, event): self.pass_event(event) @staticmethod def pass_event(event): if event.get('documentType') != 'microsoft-word': raise CustomError("Not correct document type") ``` #### File: pact-python/pact/verify_wrapper.py ```python from pact.constants import VERIFIER_PATH import sys import os import platform import subprocess from os.path import isdir, join, isfile from os import listdir def capture_logs(process, verbose): """Capture logs from ruby process.""" result = '' for line in process.stdout: result = result + line + '\n' return result def path_exists(path): """ Determine if a particular path exists. Can be provided a URL or local path. URLs always result in a True. Local paths are True only if a file exists at that location. :param path: The path to check. :type path: str :return: True if the path exists and is a file, otherwise False. :rtype: bool """ if path.startswith('http://') or path.startswith('https://'): return True return isfile(path) def sanitize_logs(process, verbose): """ Print the logs from a process while removing Ruby stack traces. :param process: The Ruby pact verifier process. :type process: subprocess.Popen :param verbose: Flag to toggle more verbose logging. :type verbose: bool :rtype: None """ for line in process.stdout: if (not verbose and line.lstrip().startswith('#') and ('vendor/ruby' in line or 'pact-provider-verifier.rb' in line)): continue else: sys.stdout.write(line) def expand_directories(paths): """ Iterate over paths and expand any that are directories into file paths. :param paths: A list of file paths to expand. :type paths: list :return: A list of file paths with any directory paths replaced with the JSON files in those directories. :rtype: list """ paths_ = [] for path in paths: if path.startswith('http://') or path.startswith('https://'): paths_.append(path) elif isdir(path): paths_.extend( [join(path, p) for p in listdir(path) if p.endswith('.json')]) else: paths_.append(path) # Ruby pact verifier expects forward slashes regardless of OS return [p.replace('\\', '/') for p in paths_] def rerun_command(): """ Create a rerun command template for failed interactions. :rtype: str """ is_windows = 'windows' in platform.platform().lower() command = '' if is_windows: command = ( 'cmd.exe /v /c "' 'set PACT_DESCRIPTION=<PACT_DESCRIPTION>' '& set PACT_PROVIDER_STATE=<PACT_PROVIDER_STATE>' '& {command}' ' & set PACT_DESCRIPTION=' ' & set PACT_PROVIDER_STATE="'.format(command=' '.join(sys.argv))) else: command = ("PACT_DESCRIPTION='<PACT_DESCRIPTION>'" " PACT_PROVIDER_STATE='<PACT_PROVIDER_STATE>'" " {command}".format(command=' '.join(sys.argv))) env = os.environ.copy() env['PACT_INTERACTION_RERUN_COMMAND'] = command return env class PactException(Exception): """PactException when input isn't valid. Args: Exception ([type]): [description] Raises: KeyError: [description] Exception: [description] Returns: [type]: [description] """ def __init__(self, *args, **kwargs): """Create wrapper.""" super().__init__(*args, **kwargs) self.message = args[0] class VerifyWrapper(object): """A Pact Verifier Wrapper.""" def _broker_present(self, **kwargs): if kwargs.get('broker_url') is None: return False return True def _validate_input(self, pacts, **kwargs): if len(pacts) == 0 and not self._broker_present(**kwargs): raise PactException('Pact urls or Pact broker required') def call_verify( self, *pacts, provider_base_url, provider, enable_pending=False, include_wip_pacts_since=None, **kwargs ): """Call verify method.""" verbose = kwargs.get('verbose', False) self._validate_input(pacts, **kwargs) provider_app_version = kwargs.get('provider_app_version') options = { '--provider-base-url': provider_base_url, '--provider': provider, '--broker-username': kwargs.get('broker_username', None), '--broker-password': kwargs.get('broker_password', None), '--pact-broker-base-url': kwargs.get('broker_url', None), '--provider-states-setup-url': kwargs.get('provider_states_setup_url'), '--log-dir': kwargs.get('log_dir'), '--log-level': kwargs.get('log_level') } command = [VERIFIER_PATH] all_pact_urls = expand_directories(list(pacts)) command.extend(all_pact_urls) command.extend(['{}={}'.format(k, v) for k, v in options.items() if v]) if(provider_app_version): command.extend(["--provider-app-version", provider_app_version]) if(kwargs.get('publish_verification_results', False) is True): command.extend(['--publish-verification-results']) if(kwargs.get('verbose', False) is True): command.extend(['--verbose']) if enable_pending: command.append('--enable-pending') if include_wip_pacts_since: command.extend(['--include-wip-pacts-since={}'.format(include_wip_pacts_since)]) headers = kwargs.get('custom_provider_headers', []) for header in headers: command.extend(['{}={}'.format('--custom-provider-header', header)]) for tag in kwargs.get('consumer_tags', []): command.extend(["--consumer-version-tag={}".format(tag)]) for tag in kwargs.get('consumer_selectors', []): command.extend(["--consumer-version-selector={}".format(tag)]) for tag in kwargs.get('provider_tags', []): command.extend(["--provider-version-tag={}".format(tag)]) env = rerun_command() result = subprocess.Popen(command, bufsize=1, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) sanitize_logs(result, verbose) result.wait() logs = capture_logs(result, verbose) return result.returncode, logs def publish_results(self, provider_app_version, command): """Publish results to broker.""" if not provider_app_version: # todo implement raise Exception('todo') command.extend(["--provider-app-version", provider_app_version, "--publish-verification-results"]) ``` #### File: pact-python/tests/test_message_consumer.py ```python from unittest import TestCase from mock import Mock from pact.message_consumer import MessageConsumer from pact.provider import Provider from pact.message_pact import MessagePact class MessageConsumerTestCase(TestCase): def setUp(self): self.mock_service = Mock(MessagePact) self.provider = Mock(Provider) self.message_consumer = MessageConsumer('TestMessageConsumer', service_cls=self.mock_service) def test_init(self): result = MessageConsumer('TestMessageConsumer') self.assertIsInstance(result, MessageConsumer) self.assertEqual(result.name, 'TestMessageConsumer') self.assertIs(result.service_cls, MessagePact) def test_has_pact_with(self): result = self.message_consumer.has_pact_with(self.provider) self.assertIs(result, self.mock_service.return_value) self.mock_service.assert_called_once_with( consumer=self.message_consumer, provider=self.provider, pact_dir=None, version='3.0.0', broker_base_url=None, publish_to_broker=False, broker_username=None, broker_password=<PASSWORD>, broker_token=None, file_write_mode='merge') def test_has_pact_with_customer_all_options(self): result = self.message_consumer.has_pact_with( self.provider, pact_dir='/pacts', version='3.0.0', file_write_mode='merge') self.assertIs(result, self.mock_service.return_value) self.mock_service.assert_called_once_with( consumer=self.message_consumer, provider=self.provider, pact_dir='/pacts', version='3.0.0', broker_base_url=None, publish_to_broker=False, broker_username=None, broker_password=<PASSWORD>, broker_token=<PASSWORD>, file_write_mode='merge') def test_has_pact_with_not_a_provider(self): with self.assertRaises(ValueError): self.message_consumer.has_pact_with(None) ```
{ "source": "jnferguson/entropyDeviation", "score": 3 }
#### File: entropyDeviation/bin/edfind.py ```python from argparse import ArgumentParser as ap from sys import exit from os import path, access, R_OK import entropyDeviationType def printBlock(idx, block): print "\t\tBN: {0:^5X}\tC: {1:^7.4f}\tS: {2:^7.4f}\tES: {3:^7.4f}\tER: {4:^7.4f}".format(idx, block.chi_square, block.shannon, block.estimate, block.error) return parser = ap(description='Accepted Arguments') parser.add_argument( 'file', help='The input file to scan') parser.add_argument( '--blocksize', '-b', help='The size of the blocks to split the input file into; specified in bytes', default=8192) parser.add_argument( '--blockscore', '-s', help='Whether to print the score of all the blocks or not', action='store_true') parser.add_argument( '--wholescore', '-w', help='Whether to print the whole file score or not', action='store_true') parser.add_argument( '--blockdev', '-d', help='Whether to calculate and print deviations for a given block', action='store_true') parser.add_argument( '--blocknumber', '-n', help='The block number to calculate deviations for', default=None) parser.add_argument( '--wholedev', '-o', help='Whether to calculate --blocknumber N\'s deviation against the whole file score', action='store_true') parser.add_argument( '--xydev', '-y', help='Calculate the deviations of block X from block Y', nargs=2) parser.add_argument( '--seqdev', '-q', help='Sequential deviation; Calculate the deviations of all blocks from its neighbor blocks', action='store_true') parser.add_argument( '--seqxy', '-e', help="The range of blocks to calculate sequential deviation for", nargs=2, default=None) parser.add_argument( '--suspect', '-u', help="Attempt to identify suspect blocks", action='store_true'), parser.add_argument( '--frequency','-f', help='Iterate across all blocks printing the frequency of each byte', action='store_true') parser.add_argument( '--freqcount', '-c', help='Specifies the number of characters with the highest frequency to print', default=None) parser.add_argument( '--freqxy', '-r', help='Print the frequency of --freqcount or all bytes in a range of blocks delimited by X and Y', nargs=2, default=None) parser.add_argument( '--xor', '-x', help='Whether to attempt to find an embedded PE file encrypted with a one-byte XOR key', action='store_true') parser.add_argument( '--xorall', '-a', help='When specified, the XOR table search will be performed for all possible PE files and not' ' just the first one', action='store_true') args = parser.parse_args() if not args.file: print "An input file must be specified\n" parser.print_help() exit(-1) if not path.isfile(args.file): print "The file '%s' does not exist" % args.file exit(-1) if not access(args.file, R_OK): print "The file '%s' is not readable" % args.file exit(-1) if args.blockdev or args.wholedev: if args.blocknumber == None: raise RuntimeError("One or more options where specified that requires --blocknumber to be specified") e = entropyDeviationType.entropyDeviationType(args.blocksize) e.openFile(args.file) xor = entropyDeviationType.xorTableSearchType(512) xor.openFile(args.file) if args.blocknumber: if False == e.isValidBlockNumber(args.blocknumber): raise RuntimeError("A --blocknumber that exceeds the number of blocks was specified") if args.xydev: if False == e.isValidBlockRange(args.xydev[0], args.xydev[1]): raise RuntimeError("A --xydev with a block number that exceeds the number of blocks was specified") if None != e: print "FILE: {0:s} BLOCK COUNT: {1:d} BLOCK SIZE: {2:d}".format(args.file, e.getBlockCount(), long(args.blocksize)) if args.blockscore == True: idx = 0 allScores = e.getAllScores() print "\n\tALL SCORES" for s in allScores: printBlock(idx, s) # print "\t\tBN: {0:^5X}\tC: {1:^7.4f}\tS: {2:^7.4f}\tES: {3:^7.4f}\tER: {4:^7.4f}".format(idx, # s.chi_square, s.shannon, # s.estimate, s.error) idx += 1 if args.wholescore == True: ws = e.getWholeFileScore() print "\n\tWHOLE FILE SCORE" print "\t\t\t\tC: {0:^7.4f}\tS: {1:^7.4f}\tES: {2:^7.4f}\tER: {3:^7.4f}".format( ws.chi_square, ws.shannon, ws.estimate, ws.error) if args.xydev != None: bx = long(args.xydev[0], 16) by = long(args.xydev[1], 16) xy = e.getXYDeviation(bx, by) print "\n\tBLOCK {0:^5X} DEVIATION RELATIVE BLOCK {1:^5X}".format(bx, by) print "\t\t\t\tC: {0:^7.4f}\tS: {1:^7.4f}\tES: {2:^7.4f}\tER: {3:^7.4f}".format(xy.chi_square, xy.shannon, xy.estimate, xy.error) if args.blockdev == True: bnum = long(args.blocknumber, 16) idx = 0 print "\n\tBLOCK {0:^5X} DEVIATION RELATIVE ALL BLOCKS".format(bnum) for dev in e.getBlockAllDeviation(bnum): if idx == bnum: idx += 1 printBlock(idx, dev) # print "\t\tBN: {0:^5X}\tC: {1:^7.4f}\tS: {2:^7.4f}\tES: {3:^7.4f}\tER: {4:^7.4f}".format(idx, # dev.chi_square, dev.shannon, # dev.estimate, dev.error) idx += 1 if args.wholedev == True: print "\n\tBLOCK {0:^5X} DEVIATION RELATIVE WHOLE FILE".format(bnum) wd = e.getWholeFileDeviation(bnum) print "\t\t\t\tC: {0:^7.4f}\tS: {1:^7.4f}\tES: {2:^7.4f}\tER: {3:^7.4f}".format(wd.chi_square, wd.shannon, wd.estimate, wd.error) if args.seqdev == True or args.seqxy != None: items = list() if args.seqxy != None: minv = long(args.seqxy[0], 16) maxv = long(args.seqxy[1], 16) else: minv = 0 maxv = e.getBlockCount()-1 items = e.getSequentialDeviation(minv, maxv) print "\n\tSEQUENTIAL DEVIATION FOR BLOCKS [{0:^5X}:{1:^5X}]".format(minv, maxv) for item in items: fidx = 0 sidx = 0 if None == item['prior']: fidx = item['index'] sidx = item['next'] else: fidx = item['prior'] sidx = item['index'] dev = item['dev'] print "\t\tBN [{0:^5X}:{1:^5X}]\tC: {2:^7.4f}\tS: {3:^7.4f}\tES: {4:^7.4f}\tER: {5:^7.4f}".format(fidx, sidx, dev.chi_square, dev.shannon, dev.estimate, dev.error) if True == args.suspect: suspectIndex = 0 suspectMaxIndex = 0 tmp = 0 suspectBlock = None suspectRanges = list() avgs = list() avg = dict() items = e.findHighDeviation(100, 20, 1) print "\n\tSUSPECT BLOCK CHECK (NOT RELIABLE; EXAMPLE IMPLEMENTATION)" for item in items: if None == item['prior']: suspectIndex = item['next'] else: suspectIndex = item['index'] if (0 != suspectIndex): if e.isHighAverageChi(suspectIndex-1, 15): if not e.isValidBlockNumber(suspectIndex+1): print "\n\tWARNING: LAST BLOCK SUSPECT: DEVIATION STATISTICS FOR RANGE POTENTIALLY UNRELIABLE" continue if not e.priorHighAndNextLowShannon(suspectIndex, 20.0, 1.5): continue suspectMaxIndex = e.getSequentialLowShannon(suspectIndex+1) if abs(suspectMaxIndex - suspectIndex) <= 1: continue tmp = e.getSequentialCloseChi(suspectIndex+1, e.getBlockCount()-1) if suspectMaxIndex < tmp: suspectMaxIndex = tmp suspectRanges.append(tuple((suspectIndex, suspectMaxIndex))) suspectRanges = e.coalesceSequential(suspectRanges, 2) for sr in suspectRanges: if sr[1]-sr[0] == 1 and sr[0] == 0: print "\n\tWARNING: FIRST BLOCK SUSPECT: DEVIATION STATISTICS FOR RANGE POTENTIALLY UNRELIABLE" elif sr[1]-sr[0] == 1 and sr[1] == e.getBlockCount()-1: print "\n\tWARNING: LAST BLOCK SUSPECT: DEVIATION STATISTICS FOR RANGE POTENTIALLY UNRELIABLE" print "\n\tBLOCKS [{0:^5X}:{1:^5X}] SUSPECT".format(sr[0], sr[1]) for idx in range(sr[0], sr[1]): printBlock(idx, e.getScore(idx)) if True == args.frequency or None != args.freqcount or None != args.freqxy: dis = list() cnt = 256 x = 0 y = e.getBlockCount()-1 if None != args.freqcount: cnt = long(args.freqcount) if None != args.freqxy: x = long(args.freqxy[0], 16) y = long(args.freqxy[1], 16) print "\n\tBYTE FREQUENCY FOR BLOCKS [{0:^5X}:{1:^5X}]".format(x, y) for idx in range(x, y): line = list() plc = 4 diff = 0 dis = e.calculateDistribution(idx, idx+1) if 4 > cnt and 0 != cnt: plc = cnt for dcnt in range(0, cnt+1): if dcnt >= 256: break if dcnt+1 < 256: cntz = float(dis[dcnt].count) cnto = float(dis[dcnt+1].count) if 0 != cnto: diff = (abs(cntz-cnto)/((cntz+cnto)/2))*100.0 if plc == len(line): print "\t\tBN: {0:^5X}".format(idx), for l in line: print "[V: %.2X C:%5X P: %7.2f]" % (l[0], l[1], l[2]), line = list() print "" else: line.append(tuple((dis[dcnt].value, dis[dcnt].count, diff))) if True == args.xor or True == args.xorall: if True == args.xorall: print "\n\tXOR TABLE SEARCH ALL" try: for pe in xor.findAll(): print "\t\tOFFSET: {0:^5X} ({1:^5X})\tKEY: {2:^2X}".format(pe.offset, pe.offset/args.blocksize, pe.key) except UserWarning as e: print "\t\t%s" % e else: print "\n\tXOR TABLE SEARCH FIRST" try: pe = xor.findFirst() print "\t\tOFFSET: {0:^5X} ({1:^5X})\tKEY: {2:^2X}".format(pe.offset, long(pe.offset)/long(args.blocksize), pe.key) except UserWarning as e: print "\t\t%s" % e ``` #### File: jnferguson/entropyDeviation/setup.py ```python import sys, os, platform from distutils.core import setup from distutils.command.sdist import sdist from distutils.extension import Extension cxx_compile_flags = [] def read(fname): return open(os.path.join('.', fname)).read() def check_deps(): if sys.version_info[:2] < (2,7): raise RuntimeError("This module has not been tested on versions of Python earlier than 2.7") if sys.version_info[:2] > (3,0): print "This module has not been tested on Python 3.x, it may/may not work" def add_cxxflags(): if sys.version.find('GCC') > 0: cxx_compile_flags.append('-std=c++11') if platform.python_compiler()[4:5] < '4'or platform.python_compiler()[6:7] < '6': raise RuntimeError("This module requires C++11 support with among other things " "nullptr support (GCC 4.6.x). Please upgrade GCC") check_deps() add_cxxflags() setup( name='entDevType', version='0.1.1', description='A module for calculating the entropy/entropic deviations in data', long_description=read('README.txt'), author='<NAME>', author_email='<EMAIL>', url='https://github.com/jnferguson/entropyDeviation/', license='OSI Approved :: BSD License', platforms=[ 'POSIX', 'POSIX :: Linux' ], classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Programming Language :: C++', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Information Analysis', 'Topic :: Security', 'Topic :: Security :: Cryptography' 'Topic :: Software Development :: Python Modules', 'Topic :: Utilities' ], packages=['entropyDeviationType'], ext_modules=[ Extension("entDevType", [ 'src/entropy.cpp', 'src/entropy_wrapper.cpp', 'src/xor_table.cpp', 'src/entropy_deviation.cpp', 'src/xor_table_wrapper.cpp', # 'src/key_sizer_recover.cpp', ], language='c++', extra_compile_args=cxx_compile_flags, libraries = ["boost_python"]) ], scripts=['bin/edfind.py']) ```
{ "source": "jnferner/programming-for-biology", "score": 3 }
#### File: programming_for_biology/basics_and_branches/gene_mapping.py ```python import numpy as np def _validate_recombination_frequency(recombination_frequency: float): if recombination_frequency < 0.0 or recombination_frequency >= 0.5: raise ValueError( f"recombination_frequency must be in the range [0.0, 0.5), but is {recombination_frequency}" ) def get_map_distance(recombination_frequency: float) -> float: """ Returns the distance in cM between two genes. """ _validate_recombination_frequency(recombination_frequency) return -0.5 * np.log(1 - 2 * recombination_frequency) if __name__ == "__main__": map_distance = get_map_distance(0.115) print(map_distance) ``` #### File: programming_for_biology/bioinformatics/dictionaries_and_speed.py ```python import numpy.random as rd import time def random_list(length): alphabet = "abcdefghijklmnopqrstuvwxyz" l = [] for i in range(length): s = "" for j in range(5): s += alphabet[rd.randint(0, 24)] l.append(s) return l def element_to_index(list_): dictionary = {} for i, element in enumerate(list_): if element not in dictionary: dictionary[element] = [i] else: dictionary[element].append(i) return dictionary if __name__ == "__main__": rd.seed(0) l1 = random_list(10000) l2 = random_list(10000) time1 = time.time() common = [] for fruit in l1: if fruit in l2: if fruit not in common: common.append(fruit) time2 = time.time() print(common) print("time spent on list part:", time2 - time1) time3 = time.time() d1 = element_to_index(l1) d2 = element_to_index(l2) # time3 = time.time() common = [] for fruit in d1: if fruit in d2: if fruit not in common: common.append(fruit) print(common) print("time spent on dictionary part:", time.time() - time3) print(d1["oodms"]) ``` #### File: programming_for_biology/complex_loops_b/tossing_coins.py ```python import numpy.random as rd def generate_coin_tosses(): """ Tosses a coin and returns 0 for heads and 1 for tails. """ while True: yield rd.randint(0, 2) def count_consecutives(tosses): """ Counts the number of consecutive heads or tails. """ consecutive_tosses = 1 max_consecutive_tosses = 1 for i in range(0, len(tosses) - 1): if tosses[i] == tosses[i + 1]: consecutive_tosses += 1 max_consecutive_tosses = max(max_consecutive_tosses, consecutive_tosses) else: consecutive_tosses = 1 return max_consecutive_tosses def toss_until_we_get_consecutives(consecutive_tosses): """ Tosses a coin until we get the number of consecutive tosses we want. """ consecutive_tosses_so_far = 1 last_toss = None for i, toss in enumerate(generate_coin_tosses()): if consecutive_tosses_so_far == consecutive_tosses: return i if last_toss == toss: consecutive_tosses_so_far += 1 else: consecutive_tosses_so_far = 1 last_toss = toss if __name__ == "__main__": rd.seed(0) consecutive_tosses = 8 rounds = toss_until_we_get_consecutives(consecutive_tosses) print(f"Got {consecutive_tosses} consecutive tosses in {rounds} rounds.") ``` #### File: programming_for_biology/complex_loops/rolling_dice.py ```python import numpy.random as rd def roll_dice(n): """ Simulate rolling n dice. """ return rd.randint(1, 7, n) def count_sixes(results): """ Count the number of sixes in a list of integers. """ return sum(1 for roll in results if roll == 6) def simulate_dice_game(rolls_per_round, rounds): """ Simulate rolling dice and counting sixes. """ return [count_sixes(roll_dice(rolls_per_round)) for _ in range(rounds)] if __name__ == "__main__": rd.seed(141) rolls_per_round = 30 rounds = 10 results = simulate_dice_game(rolls_per_round, rounds) print( f"After rolling a die {rolls_per_round} times in {rounds} game rounds, we got the following results:" ) for i, result in enumerate(results): print(f"Round {i + 1}: {result} sixes") ``` #### File: programming_for_biology/dictionaries_and_strings/rewrite_strings.py ```python def change_case(s): return "".join(letter.upper() if i % 10 < 5 else letter.lower() for i, letter in enumerate(s)) if __name__ == "__main__": s = "<KEY>" changed_case = change_case(s) length = len(changed_case) print(f"{changed_case}{length: 10}") ``` #### File: programming_for_biology/for_loops/calculation_with_two_lists.py ```python def get_shifted_product(first_list: list, second_list: list) -> list: """ Returns a list of the product of each element in the first list with each element in the second list shifted by one index. """ shifted_second_list = second_list[1:] return [a * b for a, b in zip(first_list, shifted_second_list)] if __name__ == "__main__": first_list = [7, 3, 2, 5, 1, 4, 4, 6, 2, 9, 1, 6, 3, 2, 6, 5, 5] second_list = [8, 9, 8, 9, 6, 4, 5, 5, 8, 2, 4, 3, 1, 6, 5, 6, 5] shifted_product = get_shifted_product(first_list, second_list) print(sum(shifted_product)) ``` #### File: programming_for_biology/for_loops/product.py ```python from functools import reduce from operator import mul def get_product(numbers: list) -> int: return reduce(mul, numbers, 1) def get_product_from_needle_on(numbers: list, needle: int) -> int: index = numbers.index(needle) return get_product(numbers[index:]) if __name__ == "__main__": numbers = [5, 7, 2, 9, 8, 9, 3, 4, 2, 3, 2, 7, 7, 5] print(f"Product: {get_product(numbers)}") print(f"Product from 3 on: {get_product_from_needle_on(numbers, 3)}") ``` #### File: programming_for_biology/lists_and_tuples/reader.py ```python from pathlib import Path from os.path import join def _get_code_dir(): return Path(__file__).parent.absolute() if __name__ == '__main__': with open(join(_get_code_dir(), 'data', 'thompson.txt')) as file: lines = file.readlines() print(lines[89][:48]) ``` #### File: programming_for_biology/revision/factorial.py ```python from functools import reduce def factorial(n: int): return reduce(lambda x, y: x * y, range(1, n + 1), 1) if __name__ == "__main__": print(factorial(19)) ```
{ "source": "jnfran92/adaptive-boxes", "score": 2 }
#### File: adaptive-boxes/proto/adabox_for_cuda_proto_improved.py ```python import sys import time import numpy as np import matplotlib.pyplot as plt from multiprocessing import Pool # if len(sys.argv) < 3: # print('ERROR Args number. Needed: \n[1]In Path(with file.npy) -- prepros file \n[2]Out Path(with .json)') # sys.exit() # # # in_path = str(sys.argv[1]) # out_path = str(sys.argv[2]) in_path = '/Users/Juan/django_projects/adaptive-boxes/data_binary/squares.binary' out_path = '' data_matrix = np.loadtxt(in_path, delimiter=",") data_matrix[:,0] = 1 # Plot fig = plt.figure(figsize=(6, 3.2)) ax = fig.add_subplot(111) plt.imshow(data_matrix) ax.set_aspect('equal') # Flatten Matrix data_matrix_f = data_matrix.flatten() # Kernel Data dim3_block_x = data_matrix.shape[1] dim3_block_y = data_matrix.shape[0] block_dim_y = dim3_block_y block_dim_x = dim3_block_x # KERNEL # Kernel non-editable - they go in for-loop block_idx_x = 0 block_idx_y = 0 thread_idx_x = 0 thread_idx_y = 0 # Kernel editable # Params distances = np.zeros(shape=[data_matrix_f.shape[0]]) # Could be stored in Cache- Shared Memory idx_i = 7 # y rand point idx_j = 13 # x rand point plt.scatter(idx_j, idx_i, c='r') m = data_matrix.shape[0] n = data_matrix.shape[1] # br ---- for i in range(idx_i, m): temp_value = data_matrix_f[i * n + idx_j] if temp_value == 0: i = i - 1 break else: plt.scatter(idx_j, i, c='g', marker='x') d0 = i for j in range(idx_j + 1, n): for i in range(idx_i, d0 + 1): # print(str(j) + ' ' + str(i)) temp_value = data_matrix_f[i * n + j] if temp_value == 0: i = i - 1 break else: plt.scatter(j, i, c='b', marker='x') if i < d0: j = j - 1 break # bl ---- for i in range(idx_i, m): temp_value = data_matrix_f[i * n + idx_j] if temp_value == 0: i = i - 1 break else: plt.scatter(idx_j, i, c='g', marker='x') d0 = i for j in range(idx_j - 1, -1, -1): for i in range(idx_i, d0 + 1): # print(str(j) + ' ' + str(i)) temp_value = data_matrix_f[i * n + j] if temp_value == 0: i = i - 1 break else: plt.scatter(j, i, c='b', marker='x') if i < d0: j = j + 1 break # tl ---- for i in range(idx_i, -1, -1): temp_value = data_matrix_f[i * n + idx_j] if temp_value == 0: i = i + 1 break else: plt.scatter(idx_j, i, c='g', marker='x') d0 = i for j in range(idx_j - 1, -1, -1): for i in range(idx_i, d0 - 1, -1): # print(str(j) + ' ' + str(i)) temp_value = data_matrix_f[i * n + j] if temp_value == 0: i = i + 1 break else: plt.scatter(j, i, c='b', marker='x') if i > d0: j = j + 1 break # tr ---- for i in range(idx_i, -1, -1): temp_value = data_matrix_f[i * n + idx_j] if temp_value == 0: i = i + 1 break else: plt.scatter(idx_j, i, c='g', marker='x') d0 = i for j in range(idx_j + 1, n): for i in range(idx_i, d0 -1, - 1): # print(str(j) + ' ' + str(i)) temp_value = data_matrix_f[i * n + j] if temp_value == 0: i = i + 1 break else: plt.scatter(j, i, c='b', marker='x') if i > d0: j = j - 1 break # plt.scatter(j, idx_i_arg, c='g', marker='x') # plt.scatter(j, idx_i_arg + first_step_i - 1, c='g', marker='x') # Run Kernel for thread_idx_y in range(block_dim_y): for thread_idx_x in range(block_dim_x): # print('running threadId.x: ' + str(thread_idx_x) + ' threadId.y: ' + str(thread_idx_y)) i = thread_idx_y j = thread_idx_x g_i = block_dim_y * block_idx_y + i g_j = block_dim_x * block_idx_x + j m = block_dim_y n = block_dim_x plt.scatter(j, i, c='b', marker='x') val_in_b = data_matrix_f[n * i + j] val_in_a = data_matrix_f[n * i + idx_j] distance_j = (j - idx_j) * val_in_b * val_in_a distance_i = (i - idx_i) * val_in_b * val_in_a print('i: ' + str(i) + ' j: ' + str(j) + ' distance ' + str(distance_j)) # if distance_j > 0: distances[i * n + j] = distance_j # distances[i * n + j] = distance_j # if j == idx_j: # distances[i * n + j] = distance_j + distance_i print(distances.reshape([m, n])) distances_matrix = distances.reshape([m, n]) # Break # Get min distance in left - Atomic can be used(In this case: min() function) distances_matrix = distances.reshape([m, n]) idx_d = 1 distances_matrix[idx_d, :].max() distances_matrix[idx_d, :].min() for thread_idx_y in range(block_dim_y): for thread_idx_x in range(block_dim_x): # print('running threadId.x: ' + str(thread_idx_x) + ' threadId.y: ' + str(thread_idx_y)) i = thread_idx_y j = thread_idx_x g_i = block_dim_y * block_idx_y + i g_j = block_dim_x * block_idx_x + j m = block_dim_y n = block_dim_x if (j == 0): distances[i * n + 0: i * n + m] def get_right_bottom_rectangle(idx_i_arg, idx_j_arg): step_j = 0 first_step_i = 0 while True: i = idx_i_arg j = idx_j_arg + step_j if j == n: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i = 0 while True: i = idx_i_arg + step_i if i == m: break # print(i) temp_val = data_matrix[i, j] # print(temp_val) # plt.scatter(j, i, c='g', marker='x') if temp_val == 0: break step_i += 1 if step_j == 0: first_step_i = step_i else: if step_i < first_step_i: break plt.scatter(j, idx_i_arg, c='g', marker='x') plt.scatter(j, idx_i_arg + first_step_i - 1, c='g', marker='x') x1_val = idx_j_arg y1_val = idx_i_arg x2_val = idx_j_arg + step_j - 1 y2_val = idx_i_arg + first_step_i - 1 return x1_val, x2_val, y1_val, y2_val def get_left_bottom_rectangle(idx_i_arg, idx_j_arg): step_j = 0 first_step_i = 0 while True: i = idx_i_arg j = idx_j_arg - step_j if j == -1: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i = 0 while True: i = idx_i_arg + step_i if i == m: break # print(i) temp_val = data_matrix[i, j] # print(temp_val) # plt.scatter(j, i, c='g', marker='x') if temp_val == 0: break step_i += 1 if step_j == 0: first_step_i = step_i else: if step_i < first_step_i: break plt.scatter(j, idx_i_arg, c='g', marker='x') plt.scatter(j, idx_i_arg + first_step_i - 1, c='b', marker='x') step_j += 1 x1_val = idx_j_arg y1_val = idx_i_arg x2_val = idx_j_arg - step_j + 1 y2_val = idx_i_arg + first_step_i - 1 return x1_val, x2_val, y1_val, y2_val def get_left_top_rectangle(idx_i_arg, idx_j_arg): step_j = 0 first_step_i = 0 while True: i = idx_i_arg j = idx_j_arg - step_j if j == -1: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i = 0 while True: i = idx_i_arg - step_i if i == -1: break # print(i) temp_val = data_matrix[i, j] # print(temp_val) # plt.scatter(j, i, c='g', marker='x') if temp_val == 0: break step_i += 1 if step_j == 0: first_step_i = step_i else: if step_i < first_step_i: break plt.scatter(j, idx_i_arg, c='g', marker='x') plt.scatter(j, idx_i_arg - first_step_i + 1, c='b', marker='x') step_j += 1 x1_val = idx_j_arg y1_val = idx_i_arg x2_val = idx_j_arg - step_j + 1 y2_val = idx_i_arg - first_step_i + 1 return x1_val, x2_val, y1_val, y2_val def get_right_top_rectangle(idx_i_arg, idx_j_arg): step_j = 0 first_step_i = 0 while True: i = idx_i_arg j = idx_j_arg + step_j if j == n: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i = 0 while True: i = idx_i_arg - step_i if i == -1: break # print(i) temp_val = data_matrix[i, j] # print(temp_val) # plt.scatter(j, i, c='g', marker='x') if temp_val == 0: break step_i += 1 if step_j == 0: first_step_i = step_i else: if step_i < first_step_i: break plt.scatter(j, idx_i_arg, c='g', marker='x') plt.scatter(j, idx_i_arg - first_step_i + 1, c='g', marker='x') step_j += 1 x1_val = idx_j_arg y1_val = idx_i_arg x2_val = idx_j_arg + step_j - 1 y2_val = idx_i_arg - first_step_i + 1 return x1_val, x2_val, y1_val, y2_val # Plot fig = plt.figure(figsize=(6, 3.2)) ax = fig.add_subplot(111) plt.imshow(data_matrix) ax.set_aspect('equal') m = data_matrix.shape[0] # for i n = data_matrix.shape[1] # for j for i_n in range(m): for j_n in range(n): if data_matrix[i_n, j_n] == 1: plt.scatter(j_n, i_n, c='w', marker='.') idx_i = 10 # y rand point idx_j = 1 # x rand point plt.scatter(idx_j, idx_i, c='r') coords = np.zeros(shape=[4, 4]) # 4 threads: [right-bottom right_top , left-bt, left-tp], 4 coords: [x1 x2 y1 y2] x1, x2, y1, y2 = get_right_bottom_rectangle(idx_i, idx_j) coords[0, :] = np.array([x1, x2, y1, y2]) p1 = np.array([x1, y1]) p2 = np.array([x1, y2]) p3 = np.array([x2, y1]) p4 = np.array([x2, y2]) ps = np.array([p1, p2, p4, p3, p1]) plt.plot(ps[:, 0], ps[:, 1], c='w') x1, x2, y1, y2 = get_right_top_rectangle(idx_i, idx_j) coords[1, :] = np.array([x1, x2, y1, y2]) p1 = np.array([x1, y1]) p2 = np.array([x1, y2]) p3 = np.array([x2, y1]) p4 = np.array([x2, y2]) ps = np.array([p1, p2, p4, p3, p1]) plt.plot(ps[:, 0], ps[:, 1], c='w') x1, x2, y1, y2 = get_left_bottom_rectangle(idx_i, idx_j) coords[2, :] = np.array([x1, x2, y1, y2]) p1 = np.array([x1, y1]) p2 = np.array([x1, y2]) p3 = np.array([x2, y1]) p4 = np.array([x2, y2]) ps = np.array([p1, p2, p4, p3, p1]) plt.plot(ps[:, 0], ps[:, 1], c='w') x1, x2, y1, y2 = get_left_top_rectangle(idx_i, idx_j) coords[3, :] = np.array([x1, x2, y1, y2]) p1 = np.array([x1, y1]) p2 = np.array([x1, y2]) p3 = np.array([x2, y1]) p4 = np.array([x2, y2]) ps = np.array([p1, p2, p4, p3, p1]) plt.plot(ps[:, 0], ps[:, 1], c='w') # coords[] pr = coords[[0, 1], 1].min() pl = coords[[2, 3], 1].max() pb = coords[[0, 2], 3].min() pt = coords[[1, 3], 3].max() # final x1x2 and y1y2 x1 = pl x2 = pr y1 = pt y2 = pb plt.scatter(x1, y1, c='r') plt.scatter(x2, y2, c='b') p1 = np.array([x1, y1]) p2 = np.array([x1, y2]) p3 = np.array([x2, y1]) p4 = np.array([x2, y2]) ps = np.array([p1, p2, p4, p3, p1]) plt.plot(ps[:, 0], ps[:, 1], c='r') data_matrix[y1:y2 + 1, x1:x2 + 1] = 0 ``` #### File: adaptive-boxes/proto/adabox_for_cuda.py ```python import time import numpy as np from adabox.tools import Rectangle, save_to_json def get_right_bottom_rectangle(idx_i_arg, idx_j_arg, n_arg, m_arg): step_j = 0 first_step_i = 0 while True: i = idx_i_arg j = idx_j_arg + step_j if j == n_arg: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i = 0 while True: i = idx_i_arg + step_i if i == m_arg: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i += 1 if step_j == 0: first_step_i = step_i else: if step_i < first_step_i: break step_j += 1 x1_val = idx_j_arg y1_val = idx_i_arg x2_val = idx_j_arg + step_j - 1 y2_val = idx_i_arg + first_step_i - 1 return x1_val, x2_val, y1_val, y2_val def get_left_bottom_rectangle(idx_i_arg, idx_j_arg, m_arg): step_j = 0 first_step_i = 0 while True: i = idx_i_arg j = idx_j_arg - step_j if j == -1: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i = 0 while True: i = idx_i_arg + step_i if i == m_arg: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i += 1 if step_j == 0: first_step_i = step_i else: if step_i < first_step_i: break step_j += 1 x1_val = idx_j_arg y1_val = idx_i_arg x2_val = idx_j_arg - step_j + 1 y2_val = idx_i_arg + first_step_i - 1 return x1_val, x2_val, y1_val, y2_val def get_left_top_rectangle(idx_i_arg, idx_j_arg): step_j = 0 first_step_i = 0 while True: i = idx_i_arg j = idx_j_arg - step_j if j == -1: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i = 0 while True: i = idx_i_arg - step_i if i == -1: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i += 1 if step_j == 0: first_step_i = step_i else: if step_i < first_step_i: break step_j += 1 x1_val = idx_j_arg y1_val = idx_i_arg x2_val = idx_j_arg - step_j + 1 y2_val = idx_i_arg - first_step_i + 1 return x1_val, x2_val, y1_val, y2_val def get_right_top_rectangle(idx_i_arg, idx_j_arg, n_arg): step_j = 0 first_step_i = 0 while True: i = idx_i_arg j = idx_j_arg + step_j if j == n_arg: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i = 0 while True: i = idx_i_arg - step_i if i == -1: break temp_val = data_matrix[i, j] if temp_val == 0: break step_i += 1 if step_j == 0: first_step_i = step_i else: if step_i < first_step_i: break step_j += 1 x1_val = idx_j_arg y1_val = idx_i_arg x2_val = idx_j_arg + step_j - 1 y2_val = idx_i_arg - first_step_i + 1 return x1_val, x2_val, y1_val, y2_val in_path = '/Users/Juan/django_projects/adaptive-boxes/data_prepros/boston12.binary' out_path = '/Users/Juan/django_projects/adaptive-boxes/results/boston12.json' start = time.time() data_matrix = np.loadtxt(in_path, delimiter=",") end = time.time() print('Loaded Data!') print('Elapsed time: ' + str(end - start)) # Kernel Data dim3_block_x = 1 dim3_block_y = 1 block_dim_y = 1 block_dim_x = 1 # KERNEL # Kernel non-editable - they go in for-loop block_idx_x = 0 block_idx_y = 0 thread_idx_x = 0 thread_idx_y = 0 # Kernel editable # Params # 4 threads: [right-bottom right_top , left-bt, left-tp], 4 coords: [x1 x2 y1 y2] coords = np.zeros(shape=[4, 4]) # Could be stored in Cache- Shared Memory idx_i = 1 # y rand point idx_j = 1 # x rand point n = data_matrix.shape[1] # for j m = data_matrix.shape[0] # for i recs = [] stop_flag = False print('Doing the Decomposition') start = time.time() while not stop_flag: ones_counter = (data_matrix == 1).sum() # print(ones_counter) if ones_counter == 0: print("End!") break # get random Point whs_one = np.where(data_matrix == 1) whs_one_len = whs_one[0].shape[0] rand_num = int(np.random.rand()*whs_one_len) idx_i = whs_one[0][rand_num] # y rand point idx_j = whs_one[1][rand_num] # x rand point # Decompositions x1, x2, y1, y2 = get_right_bottom_rectangle(idx_i, idx_j, n, m) coords[0, :] = np.array([x1, x2, y1, y2]) x1, x2, y1, y2 = get_right_top_rectangle(idx_i, idx_j, n) coords[1, :] = np.array([x1, x2, y1, y2]) x1, x2, y1, y2 = get_left_bottom_rectangle(idx_i, idx_j, m) coords[2, :] = np.array([x1, x2, y1, y2]) x1, x2, y1, y2 = get_left_top_rectangle(idx_i, idx_j) coords[3, :] = np.array([x1, x2, y1, y2]) # coords[] pr = coords[[0, 1], 1].min() pl = coords[[2, 3], 1].max() pb = coords[[0, 2], 3].min() pt = coords[[1, 3], 3].max() # final x1x2 and y1y2 x1 = int(pl) x2 = int(pr) y1 = int(pt) y2 = int(pb) # write data recs.append(Rectangle(x1, x2, y1, y2)) data_matrix[y1:y2+1, x1:x2+1] = 0 end = time.time() print('Work Finished!!!') print('Elapsed time: ' + str(end - start)) # Save best data set best_set = recs array_to_save = np.zeros(shape=[len(best_set), 4]) for x in range(len(best_set)): array_to_save[x, 0] = best_set[x].x1 array_to_save[x, 1] = best_set[x].x2 array_to_save[x, 2] = best_set[x].y1 array_to_save[x, 3] = best_set[x].y2 save_to_json(out_path, array_to_save, 1) # # # Plot # plot_rectangles(recs, 1) # plt.show() # # fig = plt.figure() # ax = fig.add_subplot(111) # plt.imshow(data_matrix) # ax.set_aspect('equal') ``` #### File: adaptive-boxes/proto/preopros_group_partitions.py ```python import matplotlib.pyplot as plt import pandas as pd from adabox.tools import * import numpy as np import matplotlib.colors as colors colors_list = list(colors._colors_full_map.values()) # Returns array -> x1 x2 y1 y2 is_checked? gi gj (g:groups) and Summary [group_id, n_elements, diff_y, diff_x] def create_groups(json_data_arg, sep_value_arg): data_shape_val = json_data_arg.shape data_prepros_val = np.zeros(shape=[data_shape_val[0], data_shape_val[1] + 5]) # data_prepros: 0-3(x,y,z) 4(is checked?) 5(area) 6(ratio) 7(g_i) 8(g_j) sep = sep_value_arg / 2 for i_d in range(len(json_data_arg)): data_prepros_val[i_d][0] = json_data_arg[i_d][0] - sep data_prepros_val[i_d][1] = json_data_arg[i_d][1] + sep data_prepros_val[i_d][2] = json_data_arg[i_d][2] - sep data_prepros_val[i_d][3] = json_data_arg[i_d][3] + sep data_prepros_val[i_d][4] = 0 # (is checked?) init in False # area (x2-x1) * (y2-y1) diff_x = abs(data_prepros_val[i_d][1] - data_prepros_val[i_d][0]) diff_y = abs(data_prepros_val[i_d][3] - data_prepros_val[i_d][2]) area = diff_x * diff_y # ratio (x2-x1) / (y2-y1) ratio = diff_x / diff_y data_prepros_val[i_d][5] = np.round(area, decimals=4) # area data_prepros_val[i_d][6] = np.round(ratio, decimals=4) # ratio # Init groups data_prepros_pd = pd.DataFrame(data_prepros_val) data_prepros_pd.sort_values(by=5) data_groups = data_prepros_pd.groupby(by=5) gi_counter = 0 summary_val = [] for g in data_groups: # print('-> ' + str(g[0])) g_data = g[1] g_data_groups = g_data.groupby(by=6) for g_d in g_data_groups: # print('----> ' + str(g_d[0])) # print('--------------> ' + str(gi_counter)) g_data_data = g_d[1] indexes = np.array(g_data_data.index) data_prepros_val[indexes, 7] = gi_counter data_prepros_val[indexes, 8] = list(range(len(indexes))) diff_x = abs(data_prepros_val[indexes[0], 1] - data_prepros_val[indexes[0], 0]) diff_y = abs(data_prepros_val[indexes[0], 3] - data_prepros_val[indexes[0], 2]) summary_val.append([gi_counter, len(indexes), diff_y, diff_x]) gi_counter = gi_counter + 1 result_data = data_prepros_val[:, [0, 1, 2, 3, 4, 7, 8]] return result_data, summary_val file_name = 'best_set_50' path_base = '/Users/Juan/django_projects/py-ard/heuristic/results' json_data = load_from_json(path_base + '/' + file_name + '.json') data = np.array(json_data['data']) sep_value = float(json_data['sep_value']) data_prepros, summary = create_groups(data, sep_value) # Plot Rectangles by groups plt.figure() for rec in data_prepros: x1 = rec[0] x2 = rec[1] y1 = rec[2] y2 = rec[3] p1 = np.array([x1, y1]) p2 = np.array([x1, y2]) p3 = np.array([x2, y1]) p4 = np.array([x2, y2]) ps = np.array([p1, p2, p4, p3, p1]) plt.plot(ps[:, 0], ps[:, 1], color=colors_list[int(rec[5])]) # # data_shape = data.shape # data_prepros = np.zeros(shape=[data_shape[0], data_shape[1] + 5]) # # # data_prepros: 0-3(x,y,z) 4(is checked?) 5(area) 6(ratio) 7(g_i) 8(g_j) # # sep = sep_value / 2 # for i_d in range(len(data)): # data_prepros[i_d][0] = data[i_d][0] - sep # data_prepros[i_d][1] = data[i_d][1] + sep # data_prepros[i_d][2] = data[i_d][2] - sep # data_prepros[i_d][3] = data[i_d][3] + sep # # data_prepros[i_d][4] = 0 # (is checked?) init in False # # # area (x2-x1) * (y2-y1) # diff_x = abs(data_prepros[i_d][1] - data_prepros[i_d][0]) # diff_y = abs(data_prepros[i_d][3] - data_prepros[i_d][2]) # # area = diff_x * diff_y # # # ratio (x2-x1) / (y2-y1) # ratio = diff_x / diff_y # # data_prepros[i_d][5] = np.round(area, decimals=4) # area # data_prepros[i_d][6] = np.round(ratio, decimals=4) # ratio # # # Init groups # data_prepros_pd = pd.DataFrame(data_prepros) # data_prepros_pd.sort_values(by=5) # data_groups = data_prepros_pd.groupby(by=5) # # gi_counter = 0 # for g in data_groups: # print('-> ' + str(g[0])) # g_data = g[1] # g_data_groups = g_data.groupby(by=6) # for g_d in g_data_groups: # print('----> ' + str(g_d[0])) # print('--------------> ' + str(gi_counter)) # g_data_data = g_d[1] # # indexes = np.array(g_data_data.index) # data_prepros[indexes, 7] = gi_counter # data_prepros[indexes, 8] = list(range(len(indexes))) # # gi_counter = gi_counter + 1 # # # # result_data = data_prepros[:,[0,1,2,3,7,8]] # # # Plot Rectangles by groups # g_id = 0.30691722402116284 # print(g_id) # data_subgroup = np.array(data_groups.get_group(g_id)) # for rec in data_subgroup: # x1 = rec[0] # x2 = rec[1] # y1 = rec[2] # y2 = rec[3] # # p1 = np.array([x1, y1]) # p2 = np.array([x1, y2]) # p3 = np.array([x2, y1]) # p4 = np.array([x2, y2]) # # ps = np.array([p1, p2, p4, p3, p1]) # plt.plot(ps[:, 0], ps[:, 1], color='y') # # # # # Plot Rectangles by groups # plt.figure() # color_counter = 0 # for data_group in data_groups: # print('group: ' + str(data_group[0])) # data_subgroup = np.array(data_group[1]) # for rec in data_subgroup: # x1 = rec[0] # x2 = rec[1] # y1 = rec[2] # y2 = rec[3] # # p1 = np.array([x1, y1]) # p2 = np.array([x1, y2]) # p3 = np.array([x2, y1]) # p4 = np.array([x2, y2]) # # ps = np.array([p1, p2, p4, p3, p1]) # plt.plot(ps[:, 0], ps[:, 1], color=colors_list[color_counter]) # # color_counter = color_counter + 1 # # Plot All Rectangles plt.figure() for rec in data_prepros: x1 = rec[0] x2 = rec[1] y1 = rec[2] y2 = rec[3] p1 = np.array([x1, y1]) p2 = np.array([x1, y2]) p3 = np.array([x2, y1]) p4 = np.array([x2, y2]) ps = np.array([p1, p2, p4, p3, p1]) plt.plot(ps[:, 0], ps[:, 1]) ``` #### File: adaptive-boxes/proto/prepros_interfaces.py ```python import matplotlib.colors as colors import matplotlib.pyplot as plt from adabox.tools import * colors_list = list(colors._colors_full_map.values()) # Returns array -> x1 x2 y1 y2 is_checked? gi gj (g:groups) and Summary [group_id, n_elements, diff_y, diff_x] def create_groups(json_data_arg, sep_value_arg): data_shape_val = json_data_arg.shape data_prepros_val = np.zeros(shape=[data_shape_val[0], data_shape_val[1] + 5]) # data_prepros: 0-3(x,y,z) 4(is checked?) 5(area) 6(ratio) 7(g_i) 8(g_j) sep = sep_value_arg / 2 for i_d in range(len(json_data_arg)): data_prepros_val[i_d][0] = json_data_arg[i_d][0] - sep data_prepros_val[i_d][1] = json_data_arg[i_d][1] + sep data_prepros_val[i_d][2] = json_data_arg[i_d][2] - sep data_prepros_val[i_d][3] = json_data_arg[i_d][3] + sep data_prepros_val[i_d][4] = 0 # (is checked?) init in False # area (x2-x1) * (y2-y1) diff_x = abs(data_prepros_val[i_d][1] - data_prepros_val[i_d][0]) diff_y = abs(data_prepros_val[i_d][3] - data_prepros_val[i_d][2]) area = diff_x * diff_y # ratio (x2-x1) / (y2-y1) ratio = diff_x / diff_y data_prepros_val[i_d][5] = np.round(area, decimals=4) # area data_prepros_val[i_d][6] = np.round(ratio, decimals=4) # ratio # Init groups data_prepros_pd = pd.DataFrame(data_prepros_val) data_prepros_pd.sort_values(by=5) data_groups = data_prepros_pd.groupby(by=5) gi_counter = 0 summary_val = [] for g in data_groups: # print('-> ' + str(g[0])) g_data = g[1] g_data_groups = g_data.groupby(by=6) for g_d in g_data_groups: # print('----> ' + str(g_d[0])) # print('--------------> ' + str(gi_counter)) g_data_data = g_d[1] indexes = np.array(g_data_data.index) data_prepros_val[indexes, 7] = gi_counter data_prepros_val[indexes, 8] = list(range(len(indexes))) diff_x = abs(data_prepros_val[indexes[0], 1] - data_prepros_val[indexes[0], 0]) diff_y = abs(data_prepros_val[indexes[0], 3] - data_prepros_val[indexes[0], 2]) summary_val.append([gi_counter, len(indexes), diff_y, diff_x]) gi_counter = gi_counter + 1 result_data = data_prepros_val[:, [0, 1, 2, 3, 4, 7, 8]] return result_data, summary_val def search_lr_interfaces(data_prepros_arg, n_partition_arg, sep_value_arg, n_split_sep_value_arg, error_val_arg): sep_value_split_arg = sep_value_arg / n_split_sep_value_arg # greater than 2 test_partition_arg = data_prepros_arg[n_partition_arg] error_sep_value = error_val_arg * sep_value_arg error_sep_value_sup = 0 + error_sep_value error_sep_value_inf = 0 - error_sep_value x_units_val = [] # Iter 2 times because of x1 and x2 to find en left and right side of the partition for it in range(2): if it == 0: side_temp = 'Left' else: side_temp = 'Right' data_prepros_x = data_prepros_arg[:, 1 - it] common_x_pos_arg = test_partition_arg[0 + it] diffs = data_prepros_x - common_x_pos_arg diffs_condition = (abs(diffs) <= error_sep_value_sup) & (abs(diffs) >= error_sep_value_inf) location = np.where(diffs_condition) partition_pos = location[0] for p in partition_pos: # print(p) temp_partition = data_prepros_arg[p] is_partition_checked = temp_partition[4] if is_partition_checked == 0: y1_temp = np.array([test_partition_arg[2], temp_partition[2]]).max() y2_temp = np.array([test_partition_arg[3], temp_partition[3]]).min() plt.scatter(common_x_pos_arg, y1_temp, marker='x') plt.scatter(common_x_pos_arg, y2_temp, marker='x') # test if range is inside both partitions condition_1 = (y1_temp >= temp_partition[2]) & (y2_temp <= temp_partition[3]) condition_2 = (y1_temp >= test_partition_arg[2]) & (y2_temp <= test_partition_arg[3]) if condition_1 & condition_2: # index_distance = int(abs(y1_temp - y2_temp)/sep_value_split) # temp temp_list = [] index_count_aux = int(np.round(abs(temp_partition[3] - temp_partition[2]) / sep_value_split_arg)) for s in range(index_count_aux): # print(s) val = temp_partition[3] - sep_value_split_arg / 2 - s * sep_value_split_arg # plt.scatter(test_partition[1] + sep_value_split, val, marker='x', c='b') if (val > y1_temp) & (val < y2_temp): plt.scatter(common_x_pos_arg + sep_value_split_arg / 2, val, marker='_', c='b') print('Partition ' + str(temp_partition[5]) + ' ' + str(temp_partition[6]) + ' index: ' + str(s)) temp_list.append([temp_partition[5], temp_partition[6], s]) # test test_list = [] index_count_aux = int(np.round(abs(test_partition_arg[3] - test_partition_arg[2]) / sep_value_split_arg)) for s in range(index_count_aux): val = test_partition_arg[3] - sep_value_split_arg / 2 - s * sep_value_split_arg # plt.scatter(test_partition[1] - sep_value_split, val, marker='x', c='r') if (val > y1_temp) & (val < y2_temp): plt.scatter(common_x_pos_arg - sep_value_split_arg / 2, val, marker='_', c='r') print('Partition ' + str(test_partition_arg[5]) + ' ' + str(test_partition_arg[6]) + ' index: ' + str(s)) test_list.append([test_partition_arg[5], test_partition_arg[6], s]) for l in range(len(temp_list)): if it == 0: # Left l_part = temp_list[l] r_part = test_list[l] else: # Right l_part = test_list[l] r_part = temp_list[l] gi_l = int(l_part[0]) gj_l = int(l_part[1]) gl_index = int(l_part[2]) gi_r = int(r_part[0]) gj_r = int(r_part[1]) gr_index = int(r_part[2]) x_units_val.append(InterfaceUnit(((gi_l, gj_l), (gi_r, gj_r)), (gl_index, gr_index))) return x_units_val def search_ud_interfaces(data_prepros_arg, n_partition_arg, sep_value_arg, n_split_sep_value_arg, error_val_arg): sep_value_split_arg = sep_value_arg / n_split_sep_value_arg # greater than 2 test_partition_arg = data_prepros_arg[n_partition_arg] error_sep_value = error_val_arg * sep_value_arg error_sep_value_sup = 0 + error_sep_value error_sep_value_inf = 0 - error_sep_value y_units_val = [] # Iter 2 times because of y1 and y2 to find in up and down side of the partition for it in range(2): if it == 0: side_temp = 'Up' else: side_temp = 'Down' data_prepros_y = data_prepros_arg[:, 3 - it] common_y_pos_arg = test_partition_arg[2 + it] diffs = data_prepros_y - common_y_pos_arg diffs_condition = (abs(diffs) <= error_sep_value_sup) & (abs(diffs) >= error_sep_value_inf) location = np.where(diffs_condition) partition_pos = location[0] for p in partition_pos: # print(p) temp_partition = data_prepros_arg[p] is_partition_checked = temp_partition[4] if is_partition_checked == 0: x1_temp = np.array([test_partition_arg[0], temp_partition[0]]).max() x2_temp = np.array([test_partition_arg[1], temp_partition[1]]).min() plt.scatter(x1_temp, common_y_pos_arg, marker='x') plt.scatter(x2_temp, common_y_pos_arg, marker='x') # test if range is inside both partitions condition_1 = (x1_temp >= temp_partition[0]) & (x2_temp <= temp_partition[1]) condition_2 = (x1_temp >= test_partition_arg[0]) & (x2_temp <= test_partition_arg[1]) if condition_1 & condition_2: # index_distance = int(abs(y1_temp - y2_temp)/sep_value_split) # temp temp_list = [] index_count_aux = int(np.round(abs(temp_partition[1] - temp_partition[0]) / sep_value_split_arg)) for s in range(index_count_aux): # print(s) val = temp_partition[0] + sep_value_split_arg / 2 + s * sep_value_split_arg # plt.scatter(test_partition[1] + sep_value_split, val, marker='x', c='b') if (val > x1_temp) & (val < x2_temp): plt.scatter(val, common_y_pos_arg + sep_value_split_arg / 2, marker='|', c='y') print('Partition ' + str(temp_partition[5]) + ' ' + str(temp_partition[6]) + ' index: ' + str(s)) temp_list.append([temp_partition[5], temp_partition[6], s]) # test test_list = [] index_count_aux = int(np.round(abs(test_partition_arg[1] - test_partition_arg[0]) / sep_value_split_arg)) for s in range(index_count_aux): val = test_partition_arg[0] + sep_value_split_arg / 2 + s * sep_value_split_arg # plt.scatter(test_partition[1] - sep_value_split, val, marker='x', c='r') if (val > x1_temp) & (val < x2_temp): plt.scatter(val, common_y_pos_arg - sep_value_split_arg / 2, marker='|', c='g') print('Partition ' + str(test_partition_arg[5]) + ' ' + str(test_partition_arg[6]) + ' index: ' + str(s)) test_list.append([test_partition_arg[5], test_partition_arg[6], s]) for l in range(len(temp_list)): if it == 1: # Up u_part = temp_list[l] d_part = test_list[l] else: # Down u_part = test_list[l] d_part = temp_list[l] gi_u = int(u_part[0]) gj_u = int(u_part[1]) gu_index = int(u_part[2]) gi_d = int(d_part[0]) gj_d = int(d_part[1]) gd_index = int(d_part[2]) y_units_val.append(InterfaceUnit(((gi_u, gj_u), (gi_d, gj_d)), (gu_index, gd_index))) return y_units_val def get_xy_units(data_prepros_arg, sep_value_arg, n_split_sep_value_arg, error_val_arg): # n_split_sep_value = 10 # error_val = 0.05 # init algorithm x_units_val = [] y_units_val = [] for n_partition in range(len(data_prepros_arg)): x_units_temp = search_lr_interfaces(data_prepros_arg, n_partition, sep_value_arg, n_split_sep_value_arg, error_val_arg) y_units_temp = search_ud_interfaces(data_prepros_arg, n_partition, sep_value_arg, n_split_sep_value_arg, error_val_arg) x_units_val.extend(x_units_temp) y_units_val.extend(y_units_temp) data_prepros_arg[n_partition][4] = 1 # Partition Complete return y_units_val, x_units_val file_name = 'best_just_sqrs_50_8000_92' path_base = '/Users/Juan/django_projects/py-ard/heuristic/results' json_data_raw = load_from_json(path_base + '/' + file_name + '.json') json_data = np.array(json_data_raw['data']) sep_value = float(json_data_raw['sep_value']) data_prepros, summary = create_groups(json_data, sep_value) # Plot Rectangles by groups plt.figure() for rec in data_prepros: x1 = rec[0] x2 = rec[1] y1 = rec[2] y2 = rec[3] p1 = np.array([x1, y1]) p2 = np.array([x1, y2]) p3 = np.array([x2, y1]) p4 = np.array([x2, y2]) ps = np.array([p1, p2, p4, p3, p1]) plt.plot(ps[:, 0], ps[:, 1], color=colors_list[int(rec[5])]) n_split_sep_value = 10 error_val = 0.05 y_units, x_units = get_xy_units(data_prepros, sep_value, n_split_sep_value, error_val) # # # init algorithm # x_units = [] # y_units = [] # for n_partition in range(len(data_prepros)): # x_units_temp = search_lr_interfaces(data_prepros, n_partition, sep_value, n_split_sep_value, error_val) # y_units_temp = search_ud_interfaces(data_prepros, n_partition, sep_value, n_split_sep_value, error_val) # # x_units.extend(x_units_temp) # y_units.extend(y_units_temp) # # data_prepros[n_partition][4] = 1 # Partition Complete # Print units for x_unit in x_units: print(str(x_unit.group) + ' ' + str(x_unit.position)) for y_unit in y_units: print(str(y_unit.group) + ' ' + str(y_unit.position)) ```
{ "source": "jnfrncs/sgTpushed2SWE", "score": 2 }
#### File: jnfrncs/sgTpushed2SWE/sgTpushed2SWE.py ```python __author__ = "<NAME>, Cisco Switzerland" __copyright__ = "MIT License. Copyright (c) 2020 Cisco and/or its affiliates." __version__ = 1.0 """ Copyright (c) 2019, Cisco Systems, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import asyncio from asyncio.tasks import FIRST_COMPLETED import json import sys import time from websockets import ConnectionClosed from ws_stomp import WebSocketStomp from sgTpushed2SWE_pxgrid import PxgridControl from sgTpushed2SWE_swe import SmcControl, IpCache, Speedo from sgTpushed2SWE_args import Config def ipTagCache_cleanup(config,staleIPs, smc, ipTags): if config.cache_remove_stale_ip(): for IpAddr in staleIPs: tagId = ipTags.exists(IpAddr) tagDetails = smc.tag_details(tagId) tagName = tagDetails['name'] print("* Stale IP ({}), removing it from tag {} - rate/s : {:.1f}.".format(IpAddr, tagName, smc.callRate() ), flush=True) smc.delIpFromTag(tagId, tagDetails, IpAddr) ipTags.delete(IpAddr) def key_enter_callback(event): sys.stdin.readline() event.set() async def future_read_message(ws, future): try: message = await ws.stomp_read_message() future.set_result(message) except ConnectionClosed: print('Websocket connection closed') async def subscribe_loop(config, secret, ws_url, topic, smc): global ipTags ws = WebSocketStomp(ws_url, config.ise_nodename(), secret, config.get_ssl_context()) await ws.connect() await ws.stomp_connect(pubsub_node_name) await ws.stomp_subscribe(topic) print("{TIME} ({Index of pxgrid msg}/{Index of SWE API calls made}) PxGrid -> sgt: {TAG} IPs: {IP} ") while True: future = asyncio.Future() future_read = future_read_message(ws, future) await asyncio.wait([future_read], return_when=FIRST_COMPLETED) message = json.loads(future.result()) session = message['sessions'][0] pxRawRate.monitor() if 'ctsSecurityGroup' in session.keys() and 'ipAddresses' in session.keys() : now = int(time.time()) pxIpRate.monitor() ipAddresses = session['ipAddresses'] sgtName = session['ctsSecurityGroup'] tstamp = time.strftime("%H:%M:%S", time.gmtime()) listOfIPs = " ".join(ipAddresses) print("{} ({}/{}) PxGrid -> sgt: {} IPs: {} rate {:.1f}/s|{:.1f}/s".format(tstamp,pxIpRate.index(),smc.callIndex(),sgtName,listOfIPs,pxRawRate.rate(), pxIpRate.rate()), flush=True) # SMC call rate limit verification smcRate = smc.callRate() if smcRate > config.smc_max_rate(): print(" *** SMC API max call rate reached ({:.1f}/s), cancelling pxgrid record ".format(smcRate)) continue tagId = smc.tagIdFromName(sgtName) # tag/group name cache lookup if tagId == '': # group/tag doesn't exit yet; needs to be created print("({}/{}) New tag ({}), creation in SMC - (rate/s : {:.1f}).".format(pxIpRate.index(),smc.callIndex(),sgtName,smc.callRate()), flush=True) tagId = smc.createTag(sgtName) # smc API; one query. if tagId == '': # impossible to create print("### Error: Impossible to create new tag ({}).".format(sgtName), flush=True) continue for IpAddr in ipAddresses: if IpAddr == '': continue # for each IP address in the pxgrid message : cachedIpTag = ipTags.exists(IpAddr) # cache IP lookup, could return None if tagId != cachedIpTag: # actual group/tag in cache is unknown or different from received # update the Tag cache ipTags.update(IpAddr,tagId) # retrieve the config of the new Tag (group) tagDetails = smc.tag_details(tagId) # smc API; one query if tagDetails == config.smc_unknown_tag() : print("### Error: Impossible to get the tagId ({}) details.".format(tagId), flush=True) ipTags.delete(IpAddr) # in case sync is lost with FMC print(' ## {}/{} update message not processed.'.format(sgtName,IpAddr)) continue # no way to get the tag details; if 'ranges' in tagDetails: #update the cache with @IPs found in SMC in the group/tag ipTags.sync(tagId,tagDetails['ranges']) if IpAddr in tagDetails['ranges']: age = int((now - ipTags.last(IpAddr))/60) # in minutes print(" Tag ({}), {} present in SMC (age {} min.) - rate/s : {:.1f}.".format(sgtName, IpAddr, age, smc.callRate() ), flush=True) else: print(" Tag ({}), {} not found in SMC, adding it - rate/s : {:.1f}.".format(sgtName, IpAddr,smc.callRate() ), flush=True) smc.addIp2Tag(tagId,tagDetails,IpAddr) # smc API; one query else: print('## Warning: missing details for {}.'.format(tagId)) if cachedIpTag != None: # @IP was present in another group/tag; # retrieve the config of the old Tag (group) tagDetails = smc.tag_details(cachedIpTag) # smc API; one query tagName = tagDetails['name'] ipTags.sync(tagId,tagDetails['ranges']) #by the way, update the cache with all @IPs found if IpAddr in tagDetails['ranges']: print(" Old tag ({}), {} present in SMC, removing it - rate/s : {:.1f}.".format(tagName, IpAddr, smc.callRate() ), flush=True) smc.delIpFromTag(tagId,tagDetails,IpAddr) else: print(" Old tag ({}), {} not found in SMC, no change.".format(tagName, IpAddr ), flush=True) else: # known @IP, in the correct group/tag age = int((now - ipTags.last(IpAddr))/60) # in minutes print(" Tag ({}), @IP ({}) present in cache, no change (age {} min.).".format(sgtName, IpAddr, age ), flush=True) ipTags.confirm(IpAddr) # reset the age in the cache. # cleaning up the ipTags cache staleIPs = ipTags.review() ipTagCache_cleanup(config, staleIPs, smc, ipTags) if __name__ == '__main__': assert (sys.version_info >= (3, 6)), "Requires Python 3.6 min." config = Config() pxgrid = PxgridControl(config) pxRawRate = Speedo() pxIpRate = Speedo() smc = SmcControl(config) ipTags = IpCache(config) while pxgrid.account_activate()['accountState'] != 'ENABLED': time.sleep(60) # lookup for session service service_lookup_response = pxgrid.service_lookup('com.cisco.ise.session') service = service_lookup_response['services'][0] pubsub_service_name = service['properties']['wsPubsubService'] topic = service['properties']['sessionTopic'] # lookup for pubsub service service_lookup_response = pxgrid.service_lookup(pubsub_service_name) pubsub_service = service_lookup_response['services'][0] pubsub_node_name = pubsub_service['nodeName'] secret = pxgrid.get_access_secret(pubsub_node_name)['secret'] ws_url = pubsub_service['properties']['wsUrl'] # authenticate to Stealthwatch and set SMC environment smc.authenticate() smc.get_tenantID() smc.tagList() smc.setSgtRootTags() asyncio.get_event_loop().run_until_complete(subscribe_loop(config, secret, ws_url, topic, smc)) ``` #### File: jnfrncs/sgTpushed2SWE/sgTpushed2SWE_swe.py ```python __author__ = "<NAME>, Cisco Switzerland" __copyright__ = "MIT License. Copyright (c) 2020 Cisco and/or its affiliates." __version__ = 1.0 """ Copyright (c) 2019, Cisco Systems, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import requests try: requests.packages.urllib3.disable_warnings() except: pass import json import time """ --------------------------------------------------------------------------------- SmcControl Class used to interact with SMC trough API calls SMC API calls documentation refers to groups as "tags" ; group & tag are basically the same concept in SWE. For simplicity, the SMC group/tag is created with the same name as the incoming SGT bound to an @IP --------------------------------------------------------------------------------- """ class SmcControl: def __init__(self, config): self.config = config # Initialize the Requests session self.api_session = requests.Session() self.smc_login = { "username": self.config.smc_user(), "password": self.config.smc_password() } self.tag_list = [] self.tenantId = 0 self.apiRate = Speedo() self.lastAuth = int(time.time()) - 2 * self.config.smc_reauth() self.sgtRootTags = {} self.sgtRootName = '' self.auth_url = "https://" + self.config.smc_host() + "/token/v2/authenticate" self.tenant_url = 'https://' + self.config.smc_host() + '/sw-reporting/v1/tenants/' self.close_url = 'https://' + self.config.smc_host() + '/token' self.tag_url = 'https://' + self.config.smc_host() + '/smc-configuration/rest/v1/tenants/' """ Authentication process to FMC """ def authenticate(self): # authenticate only initially or after SMC_REAUTH now = int(time.time()) if now - self.lastAuth < self.config.smc_reauth(): # no need to re-authenticate return(True) self.apiRate.monitor() # after SMC_REAUTH, need to perform the POST request to login response = self.api_session.request("POST", self.auth_url, verify=False, data=self.smc_login) if(response.status_code == 200): self.lastAuth = now return(True) else: print("An error has ocurred, while logging in, with the following code {}".format(response.status_code)) return(False) """ Look for the tenant ID which is required to retrieve / post other data """ def get_tenantID(self): # check for authentication self.authenticate() self.apiRate.monitor() # Get the list of tenants (domains) from the SMC response = self.api_session.request("GET", self.tenant_url, verify=False) if (response.status_code == 200): # Store the tenant (domain) ID tenant_list = json.loads(response.content)["data"] self.tenantId = tenant_list[0]["id"] print("Found SWE tenant ID = {}".format(self.tenantId)) else: print("An error has ocurred, while fetching tenants (domains), with the following code {}".format(response.status_code)) """ List and store the existing host groups in the tenant. Host groups in the API calls library are named "tags" The result is a dictionnary with group/tag names and IDs """ def tagList(self): # check for authentication self.authenticate() self.apiRate.monitor() url = self.tag_url + str(self.tenantId) + '/tags/' response = self.api_session.request("GET", url, verify=False) if (response.status_code == 200): # Return the list tag_list = json.loads(response.content)["data"] self.tag_list = tag_list # If unable to fetch list of tags (host groups) else: print("An error has ocurred, while fetching tags (host groups), with the following code {}".format(response.status_code)) self.tag_list = [] return(self.tag_list) """ Retrieve all details for a particular tag (group) ID, including all @IPs already bound to it. """ def tag_details(self, tagId): if tagId == '': return(self.config.smc_unknown_tag()) # check for authentication self.authenticate() self.apiRate.monitor() url = self.tag_url + str(self.tenantId) + '/tags/' + str(tagId) response = self.api_session.request("GET", url, verify=False) if (response.status_code == 200): # Grab the tag details and check if the malicious IP is associated with this tag return(json.loads(response.content)["data"]) else: print('## Unable to locate tag Id: {}, return code {}.'.format(tagId, response.status_code)) if (response.status_code == 404): # not found, refresh the list self.tagList() return(self.config.smc_unknown_tag()) """ Returns an ID from the tag/group dictionnary. """ def tagIdFromName(self,tagName): for tag in self.tag_list: if tag['name'] == tagName: return(tag['id']) return('') """ From the config file, verify the parent groups/tags are valid """ def setSgtRootTags(self): sgtRootName = self.config.smc_sgt_default_parent() sgtRootTags = self.config.smc_sgt_parent_tags() # list of tag/group names found in SMC tagNames = set(item['name'] for item in self.tag_list) if sgtRootName not in tagNames: print(" !! Config error, >{}< group doesn't exist in SMC".format(sgtRootName)) exit(-1) else: self.sgtRootName = sgtRootName for tag in sgtRootTags.values(): if tag not in tagNames: print(" !! Config error, >{}< group doesn't exist in SMC".format(tag)) exit(-1) self.sgtRootTags = sgtRootTags """ Find a parent group/tag if configured, or return the default one. """ def getSgtRootTag(self,sgtName): if sgtName in self.sgtRootTags.keys(): return(self.sgtRootTags[sgtName]) else: return(self.sgtRootName) """ Creates a new group/tag in SMC Baselining for individual @IP is set to off """ def createTag(self,tagName): # check for authentication self.authenticate() # Set the filter with the request data rootTagId = self.tagIdFromName(self.getSgtRootTag(tagName)) tstamp = time.strftime("%y/%m/%d %H:%M:%S", time.gmtime()) request_data = [ { "name": tagName, "location": "INSIDE", "description": "ISE generated tag (SGT) group, created: {}".format(tstamp), "ranges": [], "hostBaselines": False, "suppressExcludedServices": True, "inverseSuppression": False, "hostTrap": False, "sendToCta": True, "parentId": rootTagId } ] # Add the new tag (host group) in the SMC self.apiRate.monitor() url = self.tag_url + str(self.tenantId) + '/tags' request_headers = {'Content-type': 'application/json', 'Accept': 'application/json'} response = self.api_session.request("POST", url, verify=False, data=json.dumps(request_data), headers=request_headers) # If successfully able to add the tag (host group) if (response.status_code != 200): print("## Cannot create tag for :", tagName) # try refreshing the list in case some change was done in SMC self.tagList() return('') tagDetails= json.loads(response.content)["data"][0] tagId = tagDetails['id'] # update tag list self.tag_list.append({'id' : tagId, 'name' : tagName}) return(tagId) """ Add an @IP into the range list in the tag/group """ def addIp2Tag(self,tagId,tagDetails,IpAddr): # check for authentication self.authenticate() url = self.tag_url + str(self.tenantId) + '/tags/' + str(tagId) # Modify the details of thee given tag (host group) from the SMC tagDetails['ranges'].append(IpAddr) self.apiRate.monitor() # Update the details of the given tag in the SMC request_headers = {'Content-type': 'application/json', 'Accept': 'application/json'} response = self.api_session.request("PUT", url, verify=False, data=json.dumps(tagDetails), headers=request_headers) # If successfully able to update the tag (host group) updatedTagDetails = json.loads(response.content)["data"] if (response.status_code != 200) or IpAddr not in updatedTagDetails["ranges"]: print("Impossible to add Ip addr into tagId {} (code {}):".format(str(tagId),response.status_code)) """ Remove an @IP from the range list in the tag/group """ def delIpFromTag(self,tagId,tagDetails,IpAddr): # check for authentication self.authenticate() url = self.tag_url + str(self.tenantId) + '/tags/' + str(tagId) # Modify the details of thee given tag (host group) from the SMC if IpAddr in tagDetails['ranges']: tagDetails['ranges'].remove(IpAddr) self.apiRate.monitor() # Update the details of the given tag in the SMC request_headers = {'Content-type': 'application/json', 'Accept': 'application/json'} response = self.api_session.request("PUT", url, verify=False, data=json.dumps(tagDetails), headers=request_headers) # If successfully able to update the tag (host group) updatedTagDetails = json.loads(response.content)["data"] if (response.status_code != 200) or IpAddr in updatedTagDetails["ranges"]: print("Impossible to remove Ip addr from tagId {} (code {}):".format(str(tagId),response.status_code)) # return the actual API call rate for rate limiting rules def callRate(self): return self.apiRate.rate() # return the number of SMC API calls already done since the script has started def callIndex(self): return self.apiRate.index() # Disconnects properly from the SMC def close(self): response = self.api_session.delete(self.close_url, timeout=30, verify=False) self.req +=1 print('Disconnected from SWE') """ --------------------------------------------------------------------------------- IpCache Class : IP@ <-> group/tag entries cache Avoids to fire an API call to SMC if the @IP was seen recently Format : { ip : tag } --------------------------------------------------------------------------------- """ class IpCache: def __init__(self, config): # Initialize the cache self.config = config self.cache = {} self.lastcleanup = int(time.time()) """ Updates an entry in the cache, and returns the old value if the @IP was present before. """ def update(self,ip, tagId): tick = int(time.time()) tag = { 'id' : tagId, 'tick' : tick } oldTagId = self.exists(ip) if oldTagId == None: # add the entry in the cache self.cache.update({ ip : tag }) return(None) else: # IP already exists in the cache if tagId == oldTagId: # tag hasn't changed return(None) else: # tag changed, updating and returning the old one self.cache.update({ ip : tag }) return(oldTagId) """ Checks if an @IP exists in the cache """ def exists(self, ip): if ip in self.cache.keys(): return(self.cache[ip]['id']) else: return(None) """ Removes an @IP / tag entry from the cache """ def delete(self, IpAddr): if self.exists(IpAddr) != None: del self.cache[IpAddr] """ Retrieve the latest time an @IP has been updated """ def last(self, ip): if ip in self.cache.keys(): return(self.cache[ip]['tick']) else: return(0) """ Update the tick value for an @IP If not updated, the cache review will remove the @IP from SMC (and cache) after being aging out """ def confirm(self, ip): tagId = self.exists(ip) if tagId: tick = int(time.time()) tag = { 'id' : tagId, 'tick' : tick } self.cache.update({ ip : tag }) return(tick) else: return(0) """ Updates a list of @IPs an entry in the cache """ def sync(self, tagId, IpAddresses): for IpAddr in IpAddresses: self.update(IpAddr,tagId) """ Removes stale entries in the cache """ def review(self): now = int(time.time()) staleIPs = [] if now - self.lastcleanup > self.config.cache_cleanup_time(): print('* {} : cache review.'.format(time.strftime("%m-%d %H:%M:%S", time.gmtime()))) for IpAddr in self.cache.keys(): age = now - self.last(IpAddr) if age > self.config.cache_stale_ip(): localtime = time.localtime(now) print(' -- {} age = {} (sec).'.format(IpAddr,age)) staleIPs.append(IpAddr) self.lastcleanup = now return(staleIPs) """ --------------------------------------------------------------------------------- Speedo Class used to rate the number of events (calls, messages, etc) per second Calculation made on the latest 5 seconds (per default) - monitor() takes the event in account - rate() reports the average of events / s - index() provides the actual index --------------------------------------------------------------------------------- """ class Speedo: def __init__(self, lapse = 5): self.req = 0 self.req_clock = [] self.lapse = lapse # in seconds def monitor(self): self.req +=1 self.req_clock.append(time.time()) def rate(self): now = time.time() for item in self.req_clock: if now - item > self.lapse: # rate calculated on self.lapse period of time self.req_clock.remove(item) rate = len(self.req_clock) / self.lapse return(rate) def index(self): return self.req ```
{ "source": "jnga773/TeReoNumberConverter", "score": 3 }
#### File: TeReoNumberConverter/build_files/TeReoNumberConverter_GUI.py ```python import sys from PyQt5.QtWidgets import * from PyQt5.QtCore import * from PyQt5.QtGui import * from _converter_function import _ones_translator, _hundreds_translator, TeReoNumberConverter # Define class for main window class MainWindow(QMainWindow): def __init__(self, *args, **kwargs): super(MainWindow, self).__init__(*args, **kwargs) # Title of the window self.setWindowTitle("Te Reo Number Converter") # Set Icon size self.setIconSize(QSize(16,16)) self.setWindowIcon(QIcon("./build_files/Icon.ico")) # Menu bar MainMenuBar = self.menuBar() AboutMenu = MainMenuBar.addMenu('&About') # About action AboutAction = QAction("&About the App (v1.3)", self) AboutAction.setStatusTip("Information about the App") AboutAction.triggered.connect(self.AboutWindowPopup) # Add action to menu item AboutMenu.addAction(AboutAction) ########################## # WINDOW WIDGETS # ########################## # Text to say what to input self.InputLabel = QLabel(self) self.InputLabel.setText('Enter whole number (less than 10 billion):') # LineEdit number input self.InputNumber = QLineEdit(self) self.InputNumber.setMaxLength(15) self.InputNumber.setPlaceholderText("Number:") # Only allow integers self.onlyInt = QIntValidator() self.InputNumber.setValidator(self.onlyInt) # If Enter is pressed, calculate number self.InputNumber.returnPressed.connect(self.ConvertNumber) # Move input box # self.InputNumber.move(80, 20) # self.InputNumber.resize(200, 32) # self.InputLabel.move(20, 20) # Input okay button InputButton = QPushButton('Aue', self) # If button is clicked, calculate number InputButton.clicked.connect(self.ConvertNumber) # Move input button # InputButton.resize(200,32) # InputButton.move(80, 60) # Output Print self.OutputLabel = QLabel(self) self.OutputLabel.setText("In Te Reo Māori, 0 is: kore!") ############# # LAYOUT # ############# # Main layout MainLayout = QVBoxLayout() MainLayout.addWidget(self.InputLabel) MainLayout.addWidget(self.InputNumber) MainLayout.addWidget(InputButton) MainLayout.addWidget(self.OutputLabel) # Set MainLayout as main widget widget = QWidget() widget.setLayout(MainLayout) self.setCentralWidget(widget) def ConvertNumber(self): """ When triggered, converts input string into Te Reo number and updates OutputLabel. """ # print('Input Number: {}'.format(input_str)) # Update input_str input_str = self.InputNumber.text() # Convert number to Te Reo Māori output_str = TeReoNumberConverter(input_str) self.OutputLabel.setText("In Te Reo Māori, {:,d} is: {}!".format(int(input_str), output_str)) def AboutWindowPopup(self): """ Opens a popup window with information on the app and my email address """ msg = QMessageBox() # Set icon as information msg.setIcon(QMessageBox.Information) # Set text to be selectable msg.setTextInteractionFlags(Qt.TextSelectableByMouse) msg.setText("This app was devolped by <NAME> (<EMAIL>). The source code is available at https://www.github.com/jnga773/TeReoNumberConverter") # msg.setInformativeText("The source code is available at https://www.github.com/jnga773/TeReoNumberConverter") msg.setDetailedText("v1.0 - Initial Release \n" +\ "v1.1 - Changed 'kotahi tekau' to 'tekau' \n" +\ "v1.2 - Increased range to 9,999,999,999 (just under 10 billion) \n" +\ "v1.2.1 - Fixed Typo \n" +\ "v1.2.2 - Added commas to format of input string (1234 -> 1,234) \n" +\ "v1.3 - Fixed the conversion so it correctly translates the numbers") msg.setWindowTitle("About Te Reo Number Converter (v1.3)") msg.setStandardButtons(QMessageBox.Ok) msg.show() msg.exec_() if __name__ == '__main__': app = QApplication(sys.argv) window = MainWindow() window.resize(500, 100) window.show() app.exec_() ```
{ "source": "jngannon/SpaRaNa", "score": 2 }
#### File: SpaRaNa/sparana/lobotomizer.py ```python import numpy as np import cupy as cp from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from sparana.parameter_selection import get_k_biggest from sparana.parameter_selection import get_k_smallest from sparana.model import model def get_MAV_module(lobo, data): ''' This will run and store the mean activated values in the metric matrices in the class, sorts the list or whatever''' for i in data: if lobo._model._layer_type == 'Sparse': this_layer_inputs = i.transpose() if lobo._model._layer_type == 'Full': this_layer_inputs = i if lobo._model._comp_type == 'GPU': this_layer_inputs = cp.array(this_layer_inputs) output = None layer_count = 0 for layer in lobo._model.layers: output = layer.activate(this_layer_inputs) this_layer_inputs = output if lobo._model._layer_type == 'Sparse': lobo._weight_stats[layer_count] += layer.activate_weights(this_layer_inputs) # Convert the activatedd full layers to sparse matrices. if lobo._model._layer_type == 'Full': lobo._weight_stats[layer_count] += csr_matrix(layer.activate_weights(this_layer_inputs)) layer_count += 1 if lobo._layer_type == 'Sparse': output = output.transpose() lobo._weight_stats = [coo_matrix(i) for i in lobo._weight_stats] for i in lobo._weight_stats: i.data = abs(i/len(data)) return def get_MAAV_module(lobo, data): ''' MAAV is mean absolutes activated values''' for layer in lobo._model.layers: layer._dropout = None for i in data: if lobo._model._layer_type == 'Sparse': this_layer_inputs = i.transpose() if lobo._model._layer_type == 'Full': this_layer_inputs = i if lobo._model._comp_type == 'GPU': this_layer_inputs = cp.array(this_layer_inputs) output = None layer_count = 0 for layer in lobo._model.layers: if lobo._model._layer_type == 'Sparse': lobo._weight_stats[layer_count] += abs(layer.activate_weights(this_layer_inputs)) # Convert the activatedd full layers to sparse matrices. if lobo._model._layer_type == 'Full': if lobo._lobo_type == 'lobotomizer': lobo._weight_stats[layer_count] += abs(coo_matrix(cp.asnumpy(layer.activate_weights(this_layer_inputs)))) if lobo._lobo_type == 'parameter_selector': lobo._weight_stats[layer_count] += abs(cp.asnumpy(layer.activate_weights(this_layer_inputs))) output = layer.activate(this_layer_inputs) this_layer_inputs = output layer_count += 1 if lobo._model._layer_type == 'Sparse': output = output.transpose() # Convert stuff here if lobo._lobo_type == 'lobotomizer': lobo._weight_stats = [coo_matrix(i) for i in lobo._weight_stats] for i in lobo._weight_stats: i = i/len(data) for layer in lobo._model.layers: layer._dropout = lobo._model._dropout return def get_absolute_values_module(lobo): ''' Stores the sorted list or whatever, either of these will just replace what is already there''' if lobo._model._comp_type == 'GPU': lobo._weight_stats = [coo_matrix(abs(i.weights.get())) for i in lobo._model.layers] if lobo._model._comp_type == 'CPU': lobo._weight_stats = [coo_matrix(abs(i.weights)) for i in lobo._model.layers] return class lobotomizer: ''' All stats arrays, sparse or no will be stored on the CPU ram, otherwise this will simply double the GPU memory requirements. These operations would be sped up on a GPU, but are run much less than training.''' def __init__(self, model): self._model = model self._lobo_type = 'lobotomizer' self._weight_stats = [coo_matrix(i._weights.shape) for i in self._model.layers] self._AV_datapoints = 0 def get_MAV(self, data): ''' This will run and store the mean activated values in the metric matrices in the class, sorts the list or whatever''' get_MAV_module(self, data) return def get_MAAV(self, data): ''' MAAV is mean absolutes activated values''' get_MAAV_module(self, data) return def get_absolute_values(self): ''' Stores the sorted list or whatever, either of these will just replace what is already there''' get_absolute_values_module(self) return def get_sparse_masks(self): """ Need to set the sparse training masks for selected training here""" for i in self._model._layers: i._sparse_training_mask = i._weights!=0 return def get_random(self): """ Gets randomized stats matrices, so prune smallest prunes random weights""" return def get_negative_values(self): if self._model._comp_type == 'GPU': self._weight_stats = [coo_matrix(i.weights.get()) for i in self._model.layers] if self._model._comp_type == 'CPU': self._weight_stats = [coo_matrix(i.weights) for i in self._model.layers] for i in range(len(self._model.layers)): self._weight_stats[i].data[self._weight_stats[i].data > 0] = 0 self._weight_stats[i].eliminate_zeros() self._weight_stats[i].data = abs(self._weight_stats[i].data) return def get_positive_values(self): if self._model._comp_type == 'GPU': self._weight_stats = [coo_matrix(i.weights.get()) for i in self._model.layers] if self._model._comp_type == 'CPU': self._weight_stats = [coo_matrix(i.weights) for i in self._model.layers] for i in range(len(self._model.layers)): self._weight_stats[i].data[self._weight_stats[i].data < 0] = 0 self._weight_stats[i].eliminate_zeros() self._weight_stats[i].data = abs(self._weight_stats[i].data) return def get_activation_ranks(self, data = None): """ Ranks the weights for each activation so that I can remove the smallest x% of weights from each activation, not just the smallest weights from the whole weight matrix...................""" if data is not None: self._lobo_type = 'parameter_selector' self._weight_stats = [np.zeros(i._weights.shape) for i in self._model.layers] get_MAAV_module(self, data) self._lobo_type = 'lobotomizer' else: if self._model._comp_type == 'GPU': self._weight_stats = [abs(i.weights.get()) for i in self._model.layers] if self._model._comp_type == 'CPU': self._weight_stats = [abs(i.weights) for i in self._model.layers] for i in range(len(self._weight_stats)): temp = [] for j in self._weight_stats[i]: # This is surely not the most efficient way of doing this, there is a function # somewhere but I can't find it, so this will do. argsort = np.argsort(j) ranks = np.zeros(len(j)) # Look at what the difference between the MAAV and absolute array structures, probably an indexing problem for k in range(len(j)): ranks[argsort[k]] = k temp.append(ranks) self._weight_stats[i] = coo_matrix(np.array(temp)) return def prune_smallest(self, prune_ratio = None, print_stats = False, layers = None): ''' Prunes the weights in the model class. Using the smallest values from weight stats to prune. Sparse matrices will be reconstructed and assigned to the layer classes. Layers needs to be a list of ratios for eack layer to be pruned to. I can just not include the final layer. There are no checks or errors on here, so pay attention to the number of layers and the number of ratios input.''' # Sparse GPU weights need to be reassigned, dont support index based assignment, full GPU, and sparse, and full CPU # can be assigned, I will need to run eliminate zeros. if layers: for i in range(len(layers)): if self._model._layer_type == 'Sparse' and self._model._comp_type == 'GPU': # Copy weight matrix to CPU ram as a COO matrix cpu_coo_matrix = self._model.layers[i]._weights.get().tocoo() # Number of parameters to be removed remove = int(layers[i]*cpu_coo_matrix.nnz) if print_stats: print('Pruning ', remove,' parameters from ', len(cpu_coo_matrix.data), ' parameters in layer ', i) # List of indices of parameters to be removed sortlist = np.argsort(self._layer_stats[i].data)[:remove] # New COO matrix with parameters removed cpu_coo_matrix = coo_matrix((cpu_coo_matrix.data[sortlist], (cpu_coo_matrix.row[sortlist], cpu_coo_matrix.col[sortlist])), shape = cpu_coo_matrix.shape) # Copy back to GPU in the layer class as the original CSR matrix self._model.layers[i]._weights = cp.sparse.csr_matrix(cpu_coo_matrix) else: if layers[i] != None: # Number of parameters to be removed remove = np.size(self._model.layers[i]._weights) *(layers[i] - (1-self._weight_stats[i].getnnz()/np.size(self._model.layers[i]._weights))) remove = int(remove) if print_stats: print('Pruning ', remove,' parameters from ', self._weight_stats[i].nnz, ' parameters in layer ', i) # List of indices of parameters to be removed sortlist = np.argsort(self._weight_stats[i].data)[:remove] # Loop through and set weights to 0 for j in sortlist: self._model.layers[i]._weights[self._weight_stats[i].row[j], self._weight_stats[i].col[j]] = 0 self._weight_stats[i].data[j] = 0 self._weight_stats[i].eliminate_zeros() if not layers: # Not pruning the last layer, the model begins to fail quickly when this layer is pruned. for i in range(len(self._model.layers)-1): if self._model._layer_type == 'Sparse' and self._model._comp_type == 'GPU': # Copy weight matrix to CPU ram as a COO matrix cpu_coo_matrix = self._model.layers[i]._weights.get().tocoo() # Number of parameters to be removed remove = int(prune_ratio*cpu_coo_matrix.nnz) if print_stats: print('Pruning ', remove,' parameters from ',cpu_coo_matrix.nnz, ' parameters in layer ', i) # List of indices of parameters to be removed sortlist = np.argsort(self._weight_stats[i].data)[:remove] # New COO matrix with parameters removed cpu_coo_matrix = coo_matrix((cpu_coo_matrix.data[sortlist], (cpu_coo_matrix.row[sortlist], cpu_coo_matrix.col[sortlist])), shape = cpu_coo_matrix.shape) # Copy back to GPU in the layer class as the original CSR matrix self._model.layers[i]._weights = cp.sparse.csr_matrix(cpu_coo_matrix) else: # Number of parameters to be removed remove = int(prune_ratio*self._weight_stats[i].getnnz()) if print_stats: print('Pruning ', remove,' parameters from ',' parameters in layer ', i) # List of indices of parameters to be removed sortlist = np.argsort(self._weight_stats[i].data)[:remove] # Loop through and set weights to 0. There is probably a faster way to do this. for j in sortlist: self._model.layers[i]._weights[self._weight_stats[i].row[j], self._weight_stats[i].col[j]] = 0 return def prune_all_negative(self, layers = None, prune_ratio = None): """ Just prunes the weights of a matrix that are negative, I have not added the option of choosing what ratio to remove, but I might depending on how experiments go. """ if layers: for i in range(len(layers)): if layers[i] == True: self._model._layers[i]._weights[self._model._layers[i]._weights < 0] = 0 else: for layer in self._model._layers: layer._weights[layer._weights < 0] = 0 return class vulcanizer: ''' This is for splitting a smaller model off the main model, which can then be trained in a memory/compute restricted system, and the parameters can be reinserted into the main model.''' def __init__(self, model, selection_type = 'max', std = None): self._model = model self._lobo_type = 'parameter_selector' if model._layer_type == 'Sparse': self._weight_stats = [coo_matrix(i._weights.shape) for i in self._model.layers] if model._layer_type == 'Full': self._weight_stats = [np.zeros(i._weights.shape) for i in self._model.layers] self._submodel = None self._coordinates = None self._average_zeros = None self._activation_selection = selection_type self._std = std self._mean_zeros = [] def get_MAV(self, data): ''' This will run and store the mean activated values in the metric matrices in the class, sorts the list or whatever''' get_MAV_module(self, data) # Do a check before converting here for i in self._weight_stats: i = cp.asnumpy(i) return def get_MAAV(self, data): ''' MAAV is mean absolutes activated values''' get_MAAV_module(self, data) return def get_absolute_values(self): ''' The absolute values of the weights''' get_absolute_values_module(self) return def get_average_zeros(self, inputs): _ = self._model.outputs(input) for i in self._model.layers: self._mean_zeros.append(np.mean(i._outputs == 0)) return def split_model(self, sizes, new_last_layer = False): ''' splits the model, returns a submodel, the sizes input is an array of the sizes of the final *x* layers. Define the last layer, it should be the same number of classes as the original. If there are 10 classes, the sizes array should look something like [50, 50, 10]. I dont have the new_last_layer bit built yet, this is a reminder, because I might want to.''' start = len(self._weight_stats) - len(sizes) these_layers = [] if len(self._mean_zeros) == 0: print ('You need to run get_average_zeros') return for i in range(len(sizes)): if self._model._comp_type == 'GPU' these_layers.append(cp.asnumpy(self._model.layers[start+i]._weights)) else these_layers.append(self._model.layers[start+i]._weights) for i in range(len(sizes)-1): these_weights = these_layers[i] # Get the max things here... if self._activation_selection = 'max': indices = get_max_columns(these_layers[i], sizes[i]) if self._activation_selection = 'normal': indices = get_normal_columns(these_lkayers[i], sizes[i], self._std) self._coordinates.append(indices) # Get the means of the other stuff here these_layers[i] = np.delete(these_layers[i], axis = 1) these_layers[i+1] = np.delete(these_layers[i], axis = 0) # Construct the to-be weight arrays here # Create the layer objects here if this_layer = # Put the models in here newmodel = model(input_size = None, layers = these_layers, comp_type = 'GPU') for i in these_layers: #put the weight arrays in here self._submodel = newmodel return newmodel def mind_meld(self): ''' returns the parameters into the original model''' return def revert_weights(self): ''' This is to return the consolidated weights to their original state after a training step''' return class parameter_selector: ''' All stats arrays, sparse or no will be stored on the CPU ram, otherwise this will simply double the GPU memory requirements. These operations would be sped up on a GPU, but are run much less than training.''' def __init__(self, model): self._model = model self._lobo_type = 'parameter_selector' if model._layer_type == 'Sparse': self._weight_stats = [coo_matrix(i._weights.shape) for i in self._model.layers] if model._layer_type == 'Full': self._weight_stats = [np.zeros(i._weights.shape) for i in self._model.layers] # TODO This is a dummy variable for comparing the stats of parameters when different data is passed through self._other_weight_stats = None # This is a list of ratios to keep track of which layers have had their parameters assessed. self._mask_ratios = np.zeros((len(self._weight_stats))) # Keeps track of whether the weights associated with the new class have been added self._class_weights_added = False # subsets assign random integers to the weight matrices to choose different subsets to train over self._subsets = None def get_MAV(self, data): ''' This will run and store the mean activated values in the metric matrices in the class, sorts the list or whatever''' get_MAV_module(self, data) # Do a check before converting here for i in self._weight_stats: i = cp.asnumpy(i) return def get_MAAV(self, data): ''' MAAV is mean absolutes activated values''' get_MAAV_module(self, data) return def get_absolute_values(self): ''' The absolute values of the weights''' get_absolute_values_module(self) return def get_top(self, ratio, number = None, layers = None, subset = None): ''' Returns the top k parameters, inputs should be either lists, or a single number.''' # Loop over the weight stats, get the top, store in the layer class and free up the space with weight_stat = None if not layers: for i in range(len(self._weight_stats)): indices = get_k_biggest([self._weight_stats[i]], ratio) # Initalize the bitmask self._model.layers[i]._sparse_training_mask = cp.zeros(self._model.layers[i]._weights.shape) # Keep track of the ratios self._mask_ratios[i] = ratio if not subset: for j in indices[0]: self._model.layers[i]._sparse_training_mask[j[0]][j[1]] = 1 if subset: for j in indices[0]: if self._subsets[i][j[0]][j[1]] == subset: self._model.layers[i]._sparse_training_mask[j[0]][j[1]] = 1 if layers: for i in range(len(layers)): indices = get_k_biggest([self._weight_stats[i]], layers[i]) # Initialize the bitmask self._model.layers[i]._sparse_training_mask = cp.zeros(self._model.layers[i]._weights.shape) # Keep track of the ratios self._mask_ratios[i] = ratio if not subset: for j in indices[0]: self._model.layers[i]._sparse_training_mask[j[0]][j[1]] = 1 if subset: for j in indices[0]: if self._subsets[i][j[0]][j[1]] == subset: self._model.layers[i]._sparse_training_mask[j[0]][j[1]] = 1 return def get_bottom(self, ratio, number = None, layers = None, subset = None): ''' Returns the bottom k parameters, inputs should be either lists, or a single number.''' # Loop over the weight stats, get the top, store in the layer class and free up the space with weight_stat = None if not layers: for i in range(len(self._weight_stats)): indices = get_k_smallest([self._weight_stats[i]], ratio) # Initalize the bitmask self._model.layers[i]._sparse_training_mask = cp.zeros(self._model.layers[i]._weights.shape) # Keep track of the ratios self._mask_ratios[i] = ratio if not subset: for j in indices[0]: self._model.layers[i]._sparse_training_mask[j[0]][j[1]] = 1 if subset: for j in indices[0]: if self._subsets[i][j[0]][j[1]] == subset: self._model.layers[i]._sparse_training_mask[j[0]][j[1]] = 1 if layers: for i in range(len(layers)): indices = get_k_smallest([self._weight_stats[i]], layers[i]) # Initialize the bitmask self._model.layers[i]._sparse_training_mask = cp.zeros(self._model.layers[i]._weights.shape) # Keep track of the ratios self._mask_ratios[i] = ratio if not subset: for j in indices[0]: self._model.layers[i]._sparse_training_mask[j[0]][j[1]] = 1 if subset: for j in indices[0]: if self._subsets[i][j[0]][j[1]] == subset: self._model.layers[i]._sparse_training_mask[j[0]][j[1]] = 1 return def add_output_class(self, output_class): '''This adds all of the weights that are associated with a new output class. ''' self._model.layers[-1]._sparse_training_mask[:,output_class] = 1 self._class_weights_added = True return def print_ratios(self): print('The ratios of parameters for each layer are:', self._mask_ratios) if self._class_weights_added: print('Output class weights have been added') else: print('Output class weights have NOT been added') return def get_top_gaussian(self, ratio = None, variance = None): ''' Returns a selection of K parameters chosen from a gaussian distribution centred on the top values.''' return def initialize_subsets(self, number_of_subsets): self._subsets = [np.floor(np.random.uniform(0, number_of_subsets, size = (i._weights.shape))) for i in self._model.layers] return ``` #### File: SpaRaNa/sparana/optimizer.py ```python import numpy as np import cupy as cp from sparana.parameter_selection import get_k_max def get_gradients(opt, inputs, labels, backward_layers = None, loss_function = 'MSE'): outputs = opt._model.outputs(inputs) # This is hard coded quadratic error. The error for the softmax is built in here for reasons. gradients = [] if loss_function == 'MSE': if opt._model._comp_type == 'CPU': error = -(outputs - labels) if opt._model._comp_type == 'GPU': error = -(outputs - cp.array(labels)) if loss_function == 'CE': if opt._model._comp_type == 'CPU': error = -(outputs - labels) if opt._model._comp_type == 'GPU': error = -(outputs - cp.array(labels)) if backward_layers == None: backward_layers = len(opt._model.layers) for i in range(backward_layers): if i < len(opt._model.layers)-1: # For the last layer, feed in the error calculated from the outputs, for the middle layers # feed in the error from the following layer, and outputs from the previous layer if opt._model._layer_type == 'Full': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(opt._model.layers[-2-i]._outputs, error*opt._model.layers[-1-i]._relu) if opt._model._layer_type == 'Sparse': if opt._model.layers[-1-i]._activation_type == 'Relu': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(opt._model.layers[-2-i]._outputs, error*(opt._model.layers[-1-i]._relu.transpose())) if opt._model.layers[-1-i]._activation_type == 'Linear': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(opt._model.layers[-2-i]._outputs, error) gradients.append((weight_gradients, bias_gradients)) if i == len(opt._model.layers)-1: # For the first layer, feed in the error from the following layer, and the inputs if opt._model._comp_type == 'CPU': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(inputs, error*opt._model.layers[-1-i]._relu) if opt._model._comp_type == 'GPU': if opt._model._layer_type == 'Full': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(cp.array(inputs), error*opt._model.layers[-1-i]._relu) if opt._model._layer_type == 'Sparse': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(cp.array(inputs.transpose()), error*(opt._model.layers[-1-i]._relu.transpose())) gradients.append((weight_gradients, bias_gradients)) # Gradients are appended in reverse order, reverse this to simplify applying training step gradients.reverse() return gradients def selected_gradients(opt, inputs, labels, layers): outputs = opt._model.outputs(inputs) # This is hard coded quadratic error. gradients = [] if opt._model._comp_type == 'CPU': error = -(outputs - labels) if opt._model._comp_type == 'GPU': error = -(outputs - cp.array(labels)) ### This bit goes for i in layers: Got to start last and move backwards, for i in range(len(opt._model.layers)): if i < len(opt._model.layers)-1: # For the last layer, feed in the error calculated from the outputs, for the middle layers # feed in the error from the following layer, and outputs from the previous layer if opt._model._layer_type == 'Full': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(opt._model.layers[-2-i]._outputs, error*opt._model.layers[-1-i]._relu) if opt._model._layer_type == 'Sparse': if opt._model.layers[-1-i]._activation_type == 'Relu': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(opt._model.layers[-2-i]._outputs, error*(opt._model.layers[-1-i]._relu.transpose())) if opt._model.layers[-1-i]._activation_type == 'Linear': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(opt._model.layers[-2-i]._outputs, error) gradients.append((weight_gradients, bias_gradients)) if i == len(opt._model.layers)-1: # For the first layer, feed in the error from the following layer, and the inputs if opt._model._comp_type == 'CPU': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(inputs, error*opt._model.layers[-1-i]._relu) if opt._model._comp_type == 'GPU': if opt._model._layer_type == 'Full': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(cp.array(inputs), error*opt._model.layers[-1-i]._relu) if opt._model._layer_type == 'Sparse': weight_gradients, bias_gradients, error = opt._model.layers[-1-i].get_gradients(cp.array(inputs.transpose()), error*(opt._model.layers[-1-i]._relu.transpose())) gradients.append((weight_gradients, bias_gradients)) # Gradients are appended in reverse order, reverse thisto simplify applying training step gradients.reverse() return gradients class sgd_optimizer: """ First attempt at building an optimizer, only uses quadratic cost function""" def __init__(self, model, learning_rate, l1_constant = None, l2_constant = None): self._model = model #if self._model.layers[0]._learning_rate != None: # self._layer_learning_rates = True self._learning_rate = learning_rate self._gradients = [] self._l2_constant = l2_constant self._l1_constant = l1_constant def train_step(self, inputs, labels): grads = get_gradients(self, inputs, labels) for i in range(len(grads)): if self._model._layer_type == 'Full': if self._l2_constant and not self._l1_constant: self._model.layers[i]._weights += self._learning_rate*grads[i][0] - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights if self._l2_constant and self._l1_constant: self._model.layers[i]._weights += self._learning_rate*grads[i][0] - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights) if self._l1_constant and not self._l1_constant: self._model.layers[i]._weights += self._learning_rate*grads[i][0] - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights) if not self._l1_constant and not self._l2_constant: self._model.layers[i]._weights += self._learning_rate*grads[i][0] self._model.layers[i]._biases += self._learning_rate*grads[i][1] if self._model._layer_type == 'Sparse': if self._l2_constant and not self._l1_constant: self._model.layers[i]._weights.data += self._learning_rate*grads[i][0] - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights.data if self._l2_constant and self._l1_constant: self._model.layers[i]._weights.data += self._learning_rate*grads[i][0] - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights.data - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights.data) if self._l1_constant and not self._l1_constant: self._model.layers[i]._weights.data += self._learning_rate*grads[i][0] - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights.data) if not self._l1_constant and not self._l2_constant: self._model.layers[i]._weights.data += self._learning_rate*grads[i][0] self._model.layers[i]._biases += self._learning_rate*grads[i][1] class madadad_optimizer: """ Adadad is a kind of adaptive gradients optimizer, where gradients that keep moving in the same direction, move faster. """ def __init__(self, model, learning_rate, friction, l1_constant = None, l2_constant = None): self._model = model self._learning_rate = learning_rate if self._model._comp_type == 'CPU': if self._model._layer_type == 'Full': self._adadad_weights = [np.zeros(i._weights.shape) for i in self._model.layers] self._adadad_biases = [np.zeros(i._biases.shape) for i in self._model.layers] if self._model._layer_type == 'Sparse': stats = [] if self._model._comp_type == 'GPU': if self._model._layer_type == 'Full': self._adadad_weights = [cp.zeros(i._weights.shape) for i in self._model.layers] self._adadad_biases = [cp.zeros(i._biases.shape) for i in self._model.layers] if self._model._layer_type == 'Sparse': stats = [] self._gradients = [] self._friction = friction self._l2_constant = l2_constant self._l1_constant = l1_constant def train_step(self, inputs, labels): grads = get_gradients(self, inputs, labels) for i in range(len(grads)): signs = np.sign(grads[i][0]) bias_signs = np.sign(grads[i][1]) self._adadad_weights[i] = (np.sign(self._adadad_weights[i]) == signs)*self._adadad_weights[i] self._adadad_biases[i] = (np.sign(self._adadad_biases[i]) == bias_signs)*self._adadad_biases[i] self._adadad_weights[i] = self._friction*self._adadad_weights[i] + signs self._adadad_biases[i] = self._friction*self._adadad_biases[i] + bias_signs for i in range(len(grads)): if self._model._layer_type == 'Full': if self._l2_constant and not self._l1_constant: self._model.layers[i]._weights += self._learning_rate*grads[i][0]*abs(self._adadad_weights[i]) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights self._model.layers[i]._biases += self._learning_rate*grads[i][1]*abs(self._adadad_biases[i]) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._biases if self._l2_constant and self._l1_constant: self._model.layers[i]._weights += self._learning_rate*grads[i][0]*abs(self._adadad_weights[i]) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights) self._model.layers[i]._biases += self._learning_rate*grads[i][1]*abs(self._adadad_biases[i]) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._biases - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._biases) if self._l1_constant and not self._l2_constant: self._model.layers[i]._weights += self._learning_rate*grads[i][0]*abs(self._adadad_stats[i]) - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights) self._model.layers[i]._biases += self._learning_rate*grads[i][1]*abs(self._adadad_biases[i]) -self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._biases) if not self._l1_constant and not self._l2_constant: self._model.layers[i]._weights += self._learning_rate*grads[i][0]*abs(self._adadad_stats[i]) self._model.layers[i]._biases += self._learning_rate*grads[i][1] if self._model._layer_type == 'Sparse': self._model.layers[i]._weights = (self._model.layers[i]._weights + self._learning_rate*grads[i][0]).tocoo() self._model.layers[i]._biases += self._learning_rate*grads[i][1] class adadad_optimizer: """ Adfm is a kind of adaptive gradients optimizer, where gradients that keep moving in the same direction, move faster. Modified from the adadad optimizer I first developed to include a friction constant and momentum parameter. """ def __init__(self, model, learning_rate, friction, momentum = None, l1_constant = None, l2_constant = None, epsilon = 1e-7): self._model = model self._learning_rate = learning_rate if self._model._comp_type == 'CPU': if self._model._layer_type == 'Full': self._adadad_weights = [np.zeros(i._weights.shape) for i in self._model.layers] self._adadad_biases = [np.zeros(i._biases.shape) for i in self._model.layers] if self._model._layer_type == 'Sparse': stats = [] if self._model._comp_type == 'GPU': if self._model._layer_type == 'Full': self._adadad_weights = [cp.zeros(i._weights.shape) for i in self._model.layers] self._adadad_biases = [cp.zeros(i._biases.shape) for i in self._model.layers] if self._model._layer_type == 'Sparse': stats = [] self._gradients = [] self._l2_constant = l2_constant self._l1_constant = l1_constant self._friction = friction self._momentum = momentum self._epsilon = epsilon self._steps = 0 def train_step(self, inputs, labels): self._steps += 1 grads = get_gradients(self, inputs, labels) for i in range(len(grads)): if self._model._layer_type == 'Full': if self._l2_constant and not self._l1_constant: self._model.layers[i]._weights += self._learning_rate*(grads[i][0] + self._adadad_weights[i]) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights self._model.layers[i]._biases += self._learning_rate*(grads[i][1] + self._adadad_biases[i]) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._biases if self._l2_constant and self._l1_constant: self._model.layers[i]._weights += self._learning_rate*(grads[i][0] + self._adadad_weights[i]) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights) self._model.layers[i]._biases += self._learning_rate*(grads[i][1] + self._adadad_biases[i]) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._biases - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._biases) if self._l1_constant and not self._l2_constant: self._model.layers[i]._weights += self._learning_rate*(grads[i][0] + self._adadad_stats[i]) - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights) self._model.layers[i]._biases += self._learning_rate*(grads[i][1] + self._adadad_biases[i]) - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._biases) if not self._l1_constant and not self._l2_constant: self._model.layers[i]._weights += self._learning_rate*(grads[i][0] + self._adadad_stats[i]) self._model.layers[i]._biases += self._learning_rate*grads[i][1] if self._model._layer_type == 'Sparse': self._model.layers[i]._weights = (self._model.layers[i]._weights + self._learning_rate*grads[i][0]).tocoo() self._model.layers[i]._biases += self._learning_rate*grads[i][1] for i in range(len(grads)): #signs = np.sign(grads[i][0]) squares = grads[i][0]*self._adadad_weights[i] bias_squares = grads[i][1]*self._adadad_biases[i] #self._adadad_stats[i] = (np.sign(self._adadad_stats[i][0]) == signs)*self._adadad_stats[i][0] self._adadad_weights[i] = (squares > -self._epsilon)*self._adadad_weights[i] self._adadad_biases[i] = (bias_squares > -self._epsilon)*self._adadad_biases[i] self._adadad_weights[i] = self._adadad_weights[i]*self._friction + grads[i][0] self._adadad_biases[i] = self._adadad_biases[i]*self._friction + grads[i][1] if self._steps < 10: self._adadad_weights[i] *= 0 self._adadad_biases[i] *= 0 class adam_optimizer: """ Adam optimizer with quadratic cost function""" def __init__(self, model, learning_rate, beta1 = 0.9, beta2 = 0.999, epsilon = 10e-8, l1_constant = None, l2_constant = None, bitmasks = None): self._model = model self._learning_rate = learning_rate self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon self._gradients = [] self._l2_constant = l2_constant self._l1_constant = l1_constant if self._model._comp_type == 'CPU': if self._model._layer_type == 'Full': self._weight_m1 = [np.zeros(i._weights.shape) for i in self._model.layers] self._bias_m1 = [np.zeros(i._biases.shape) for i in self._model.layers] self._weight_m2 = [np.zeros(i._weights.shape) for i in self._model.layers] self._bias_m2 = [np.zeros(i._biases.shape) for i in self._model.layers] if self._model._layer_type == 'Sparse': print('Sparse Adam not implemented yet') stats = [] if self._model._comp_type == 'GPU': if self._model._layer_type == 'Full': self._weight_m1 = [cp.zeros(i._weights.shape) for i in self._model.layers] self._bias_m1 = [cp.zeros(i._biases.shape) for i in self._model.layers] self._weight_m2 = [cp.zeros(i._weights.shape) for i in self._model.layers] self._bias_m2 = [cp.zeros(i._biases.shape) for i in self._model.layers] if self._model._layer_type == 'Sparse': stats = [] print('Sparse Adam not implemented yet') self._timestep = 0 # Bitmasks for training only on selected parameters, input as full matrices stored on the same device as the weight matrix. self._bitmasks = bitmasks def train_step(self, inputs, labels, train_biases = True): grads = get_gradients(self, inputs, labels) grads = [(np.clip(i[0], -1, 1), np.clip(i[1], -1, 1)) for i in grads] if self._bitmasks: for i in range(len(grads)): grads[i][0] = grads[i][0]*self._bitmask[i] self._timestep += 1 co_learning_rate = self._learning_rate*(np.sqrt(1 - self._beta2**self._timestep)/(1-self._beta1**self._timestep)) for i in range(len(grads)): self._weight_m1[i] = self._beta1*self._weight_m1[i] + (1-self._beta1)*grads[i][0] self._bias_m1[i] = self._beta1*self._bias_m1[i] + (1-self._beta1)*grads[i][1] self._weight_m2[i] = self._beta2*self._weight_m2[i] + (1-self._beta2)*grads[i][0]*grads[i][0] self._bias_m2[i] = self._beta2*self._bias_m2[i] + (1-self._beta2)*grads[i][1]*grads[i][1] if self._model._layer_type == 'Full': if self._l2_constant and not self._l1_constant: self._model.layers[i]._weights += co_learning_rate*self._weight_m1[i]/(np.sqrt(self._weight_m2[i])+self._epsilon) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights if train_biases: self._model.layers[i]._biases += co_learning_rate*self._bias_m1[i]/(np.sqrt(self._bias_m2[i])+self._epsilon) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._biases if self._l2_constant and self._l1_constant: self._model.layers[i]._weights += co_learning_rate*self._weight_m1[i]/(np.sqrt(self._weight_m2[i])+self._epsilon) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights) if train_biases: self._model.layers[i]._biases += co_learning_rate*self._bias_m1[i]/(np.sqrt(self._bias_m2[i])+self._epsilon) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._biases - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._biases) if self._l1_constant and not self._l1_constant: self._model.layers[i]._weights += co_learning_rate*self._weight_m1[i]/(np.sqrt(self._weight_m2[i])+self._epsilon) - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights) if train_biases: self._model.layers[i]._biases += co_learning_rate*self._bias_m1[i]/(np.sqrt(self._bias_m2[i])+self._epsilon) - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._biases) if not self._l1_constant and not self._l2_constant: self._model.layers[i]._weights += co_learning_rate*self._weight_m1[i]/(np.sqrt(self._weight_m2[i])+self._epsilon) if train_biases: self._model.layers[i]._biases += co_learning_rate*self._bias_m1[i]/(np.sqrt(self._bias_m2[i])+self._epsilon) if self._model._layer_type == 'Sparse': if self._l2_constant and not self._l1_constant: self._model.layers[i]._weights.data += self._learning_rate*grads[i][0] - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights.data if self._l2_constant and self._l1_constant: self._model.layers[i]._weights.data += self._learning_rate*grads[i][0] - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[i]._weights.data - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights.data) if self._l1_constant and not self._l1_constant: self._model.layers[i]._weights.data += self._learning_rate*grads[i][0] - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[i]._weights.data) if not self._l1_constant and not self._l2_constant: self._model.layers[i]._weights.data += self._learning_rate*grads[i][0] self._model.layers[i]._biases += self._learning_rate*grads[i][1] class selected_adam_optimizer: """ Adam optimizer with quadratic cost function. This one optimizes over a selection of parameters, not optimized for speed yet, just using bitmasks and such. Inputs a list of parameters that will be updated.""" def __init__(self, model, learning_rate, beta1 = 0.9, beta2 = 0.999, epsilon = 10e-8, l1_constant = None, l2_constant = None, backward_layers = None): self._model = model self._learning_rate = learning_rate self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon self._gradients = [] self._l2_constant = l2_constant self._l1_constant = l1_constant if backward_layers == None: self._backward_layers = len(self._model.layers) else: self._backward_layers = backward_layers self._weight_m1 = [] self._bias_m1 = [] self._weight_m2 = [] self._bias_m2 = [] if self._model._comp_type == 'CPU': if self._model._layer_type == 'Full': for i in range(self._backward_layers): self._weight_m1.append(np.zeros(self._model.layers[i+len(self._model.layers) - self._backward_layers]._weights.shape)) self._bias_m1.append(np.zeros(self._model.layers[i+ len(self._model.layers) - self._backward_layers]._biases.shape)) self._weight_m2.append(np.zeros(self._model.layers[i+ len(self._model.layers) - self._backward_layers]._weights.shape)) self._bias_m2.append(np.zeros(self._model.layers[i+ len(self._model.layers) - self._backward_layers]._biases.shape)) if self._model._layer_type == 'Sparse': print('Sparse Adam not implemented yet') stats = [] if self._model._comp_type == 'GPU': if self._model._layer_type == 'Full': for i in range(self._backward_layers): self._weight_m1.append(cp.zeros(self._model.layers[i+len(self._model.layers) - self._backward_layers]._weights.shape)) self._bias_m1.append(cp.zeros(self._model.layers[i + len(self._model.layers) - self._backward_layers]._biases.shape)) self._weight_m2.append(cp.zeros(self._model.layers[i+len(self._model.layers) - self._backward_layers]._weights.shape)) self._bias_m2.append(cp.zeros(self._model.layers[i+len(self._model.layers) - self._backward_layers]._biases.shape)) if self._model._layer_type == 'Sparse': stats = [] print('Sparse Adam not implemented yet') self._timestep = 0 self._layers = None def train_step(self, inputs, labels, layers = None, train_biases = True): grads = get_gradients(self, inputs, labels, self._backward_layers) grads = [(np.clip(i[0], -1, 1), np.clip(i[1], -1, 1)) for i in grads] # Multiply gradients by the sparse bitmasks for i in range(len(grads)): grads[-(i+1)] = (np.multiply(grads[-(i+1)][0], self._model.layers[-(i+1)]._sparse_training_mask), grads[-(i+1)][1]) self._timestep += 1 co_learning_rate = self._learning_rate*(np.sqrt(1 - self._beta2**self._timestep)/(1-self._beta1**self._timestep)) for i in range(self._backward_layers): self._weight_m1[-(i+1)] = self._beta1*self._weight_m1[-(i+1)] + (1-self._beta1)*grads[-(i+1)][0] self._weight_m2[-(i+1)] = self._beta2*self._weight_m2[-(i+1)] + (1-self._beta2)*grads[-(i+1)][0]*grads[-(i+1)][0] if train_biases: self._bias_m1[-(i+1)] = self._beta1*self._bias_m1[-(i+1)] + (1-self._beta1)*grads[-(i+1)][1] self._bias_m2[-(i+1)] = self._beta2*self._bias_m2[-(i+1)] + (1-self._beta2)*grads[-(i+1)][1]*grads[-(i+1)][1] # Use loop to go forwards with the grads/moments arrays and backwards with the layers list. if self._model._layer_type == 'Full': if self._l2_constant and not self._l1_constant: self._model.layers[-(i+1)]._weights += co_learning_rate*self._weight_m1[-(i+1)]/(np.sqrt(self._weight_m2[i])+self._epsilon) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[-(i+1)]._weights if train_biases: self._model.layers[-(i+1)]._biases += co_learning_rate*self._bias_m1[-(i+1)]/(np.sqrt(self._bias_m2[-(i+1)])+self._epsilon) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[-(i+1)]._biases if self._l2_constant and self._l1_constant: self._model.layers[-(i+1)]._weights += co_learning_rate*self._weight_m1[-(i+1)]/(np.sqrt(self._weight_m2[-(i+1)])+self._epsilon) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[-(i+1)]._weights - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[-(i+1)]._weights) if train_biases: self._model.layers[-(i+1)]._biases += co_learning_rate*self._bias_m1[-(i+1)]/(np.sqrt(self._bias_m2[-(i+1)])+self._epsilon) - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[-(i+1)]._biases - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[-(i+1)]._biases) if self._l1_constant and not self._l1_constant: self._model.layers[-(i+1)]._weights += co_learning_rate*self._weight_m1[-(i+1)]/(np.sqrt(self._weight_m2[-(i+1)])+self._epsilon) - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[-(i+1)]._weights) if train_biases: self._model.layers[-(i+1)]._biases += co_learning_rate*self._bias_m1[-(i+1)]/(np.sqrt(self._bias_m2[-(i+1)])+self._epsilon) - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[-(i+1)]._biases) if not self._l1_constant and not self._l2_constant: self._model.layers[-(i+1)]._weights += co_learning_rate*self._weight_m1[-(i+1)]/(np.sqrt(self._weight_m2[-(i+1)])+self._epsilon) if train_biases: self._model.layers[-(i+1)]._biases += co_learning_rate*self._bias_m1[-(i+1)]/(np.sqrt(self._bias_m2[-(i+1)])+self._epsilon) if self._model._layer_type == 'Sparse': if self._l2_constant and not self._l1_constant: self._model.layers[-(i+1)]._weights.data += self._learning_rate*grads[-(i+1)][0] - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[-(i+1)]._weights.data if self._l2_constant and self._l1_constant: self._model.layers[-(i+1)]._weights.data += self._learning_rate*grads[-(i+1)][0] - self._l2_constant/inputs.shape[0]*self._learning_rate*self._model.layers[-(i+1)]._weights.data - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[-(i+1)]._weights.data) if self._l1_constant and not self._l1_constant: self._model.layers[-(i+1)]._weights.data += self._learning_rate*grads[-(i+1)][0] - self._l1_constant/inputs.shape[0]*self._learning_rate*np.sign(self._model.layers[-(i+1)]._weights.data) if not self._l1_constant and not self._l2_constant: self._model.layers[-(i+1)]._weights.data += self._learning_rate*grads[-(i+1)][0] self._model.layers[-(i+1)]._biases += self._learning_rate*grads[-(i+1)][1] class subnet_finder: """ First attempt at building an optimizer, only uses quadratic cost function. There are parts to this that could be built into othere sections of the library, but I don't know if this will work, and they might just end up bloating the files.""" def __init__(self, model, error_mean = None): self._model = model self._alpha = None self._subnet_size = None # Initialize subnet masks, reset with each step self._subnet_masks = [np.zeros(i._weights.shape) for i in self._model.layers] # Scores initialized at 0, starting point is arbitrary since they are all sorted at the end self._weight_scores = [cp.zeros(i._weights.shape) for i in self._model.layers] self._error_mean = error_mean self._steps = 0 self._above_threshold = 0 def random_train_step(self, inputs, labels, ratio = None, error_type = 'quadratic'): ''' Forward pass with the dropout mask chosen randomly''' if self._model._layer_type == 'Full': this_layer_inputs = inputs if self._model._comp_type == 'GPU': this_layer_inputs = cp.array(this_layer_inputs) for layer in self._model._layers: outputs = layer.activate_NG(inputs = this_layer_inputs, ratio = ratio, distribution = 'binomial') this_layer_inputs = outputs if self._model._comp_type == 'CPU': if error_type == 'quadratic': error = np.mean((outputs - labels)**2) if error_type == 'argmax': error = np.mean(np.argmax(outputs, axis = 1) == np.argmax(labels, axis = 1)) if self._model._comp_type == 'GPU': if error_type == 'quadratic': error = np.mean((outputs - cp.array(labels))**2) if error_type == 'argmax': error = np.mean(np.argmax(outputs, axis = 1) == np.argmax(cp.array(labels), axis = 1)) # Update error mean if error_type == 'quadratic': if self._steps == 0 : self._error_mean = error else: delta = error - self._error_mean self._error_mean += delta/self._steps for i in range(len(self._weight_scores)): self._weight_scores[i] += self._model._layers[i]._dropout_mask*delta self._steps += 1 if error_type == 'argmax': # Use the error mean as the expected value from a random selection #self._error_mean = 1/self._model.layers[-1]._size if error > self._error_mean: self._above_threshold += 1 for i in range(len(self._weight_scores)): self._weight_scores[i] += self._model._layers[i]._dropout_mask*(error - self._error_mean) def gaussian_train_step(self, inputs, labels, temp): ''' More targeted parameter selection''' print('Do the thing') def get_accuracy(self, inputs, labels): if self._model._layer_type == 'Full': this_layer_inputs = inputs if self._model._comp_type == 'GPU': this_layer_inputs = cp.array(this_layer_inputs) for layer in self._model._layers: outputs = layer.activate_NG(inputs = this_layer_inputs, ratio = None, distribution = None) this_layer_inputs = outputs return np.mean(np.argmax(outputs, axis = 1) == np.argmax(cp.array(labels), axis = 1)) def choose_parameters(self, parameter_ratio, layers = None): ''' Sets the mask as ratio% of parameters with the highest scores.''' if layers: for i in layers: # get indices indices = get_k_max([self._weight_scores[i]], parameter_ratio) # make the mask if self._model._comp_type == 'GPU': self._model.layers[i]._dropout_mask = cp.zeros(self._model.layers[i]._weights.shape) if self._model._comp_type == 'CPU': self._model.layers[i]._dropout_mask = np.zeros(self._model.layers[i]._weights.shape) for j in indices: self._model.layers[i]._dropout_mask[j[0]][j[1]] = 1 print('You have set the bitmasks for ', layers,' do not forget to set the rest') else: for i in range(len(self._weight_scores)): # Get indices indices = get_k_max([self._weight_scores[i]], parameter_ratio) # Make the mask if self._model._comp_type == 'GPU': self._model.layers[i]._dropout_mask = cp.zeros(self._model.layers[i]._weights.shape) if self._model._comp_type == 'CPU': self._model.layers[i]._dropout_mask = np.zeros(self._model.layers[i]._weights.shape) for j in indices[0]: self._model.layers[i]._dropout_mask[j[0]][j[1]] = 1 return def set_ones_bitmask(self, layers): '''Sets the bitmasks in the given layers to ones''' for i in layers: if self._model._comp_type == 'GPU': self._model.layers[i]._dropout_mask = cp.ones(self._model.layers[i]._weights.shape) if self._model._comp_type == 'CPU': self._model.layers[i]._dropout_mask = np.ones(self._model.layers[i]._weights.shape) ``` #### File: SpaRaNa/sparana/saver.py ```python import numpy as np import cupy as cp import pickle from cupy.sparse import coo_matrix from cupy.sparse import csr_matrix class model_saver: def __init__(self, model): self._model = model if self._model._layer_type == 'Sparse': if self._model._comp_type == 'GPU': self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers] if self._model._comp_type == 'CPU': self._model_arrays = [(i._weights.copy(), np.array(i._biases)) for i in self._model._layers] if self._model._layer_type == 'Full': if self._model._comp_type == 'GPU': self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers] if self._model._comp_type == 'CPU': self._model_arrays = [(np.array(i._weights), np.array(i._biases)) for i in self._model._layers] self._sparse_parameters = None def store_model(self): ''' Stores the current state of the model. ''' if self._model._layer_type == 'Sparse': if self.model._comp_type == 'GPU': self._model_arrays = [(i._weights.get(), np.array(i._biases)) for i in self._model._layers] if self._model._comp_type == 'CPU': self._model_arrays = [(i._weights.copy(), np.array(i._biases)) for i in self._model._layers] if self._model._layer_type == 'Full': if self._model._comp_type == 'GPU': self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers] if self._model._comp_type == 'CPU': self._model_arrays = [(np.array(i._weights), np.array(i._biases)) for i in self._model._layers] return def restore_model(self): ''' Restores the weights stored in the model saver. ''' if self._model._layer_type == 'Sparse': if self._model._comp_type == 'CPU': for i in range(self._model._depth): self._model._layers[i]._weights = self._model_arrays[i][0].copy() self._model._layers[i]._biases = np.array(self._model_arrays[i][1]) if self._model._comp_type == 'GPU': for i in range(self._model._depth): self._model._layers[i]._weights = cp.sparse.csr_matrix(self._model_arrays[i][0]) self._model._layers[i]._biases = cp.array(self._model_arrays[i][1]) if self._model._layer_type == 'Full': if self._model._comp_type == 'GPU': for i in range(self._model._depth): self._model._layers[i]._weights = cp.array(self._model_arrays[i][0]) self._model._layers[i]._biases = cp.array(self._model_arrays[i][1]) if self._model._comp_type == 'CPU': for i in range(self._model._depth): self._model._layers[i]._weights = np.array(self._model_arrays[i][0]) self._model._layers[i]._biases = np.array(self._model_arrays[i][1]) return def pickle_model(self, filename): ''' Stores the model in a pickle file. ''' pickle.dump(self._model, open(filename, 'wb')) print('Model pickled') return def load_model(self, filename): ''' Loads the model from a pickle file. ''' filelist = pickle.load(open(filename, 'rb')) if self._model._layer_type == 'Sparse': self._model_arrays = [(i[0].copy(), np.array(i[1])) for i in filelist] if self._model._layer_type == 'Full': for i in range(self._model._depth): self._model.layers[i]._weights = filelist.layers[i]._weights self._model.layers[i]._biases = filelist.layers[i]._biases # Do a check that the layer type matches the weight datatype def load_sparse_parameters(self, filename): ''' Loads sparse parameters into the loader class, and into the model. (I can't think of a real use for loading the parameters into the loader, and model seperately)''' parameters = pickle.load(open(filename, 'rb')) for i in range(len(parameters)): # Put the training masks in the layer objects, TODO turn this into a [0,1] mask self._model._sparse_training_mask = None #parameters[i] # Put the individual weights in the weight matrices for j in range(parameters[i].nnz): self._model._layers[i]._weights[parameters[i].row[j]][parameters[i].col[j]] = parameters[i].data[j] print('Inserted weights from ', filename, ' into the weight matrices') return def store_sparse_parameters(self): ''' This returns the parameters that can be stored in memory in the notebook, use pickle_sparse_parameters after this''' # What format will this give me, I need sparse matrices. parameters = [] for i in self._model._layers: these_parameters = np.multiply(i._weights, i._sparse_training_mask) # Sparsify these_parameters these_parameters = csr_matrix(these_parameters, dtype = np.float32) these_parameters = these_parameters.tocoo() parameters.append((these_parameters, i._biases)) self._sparse_parameters = parameters return def pickle_sparse_parameters(self, filename): ''' Stores the sparse parameters in a pickle file. ''' if self._sparse_parameters == None: print('No parameters stored') return pickle.dump(self._sparse_parameters, open(filename, 'wb')) print('Model pickled') return def restore_sparse_parameters(self): ''' Need a more specific name than sparse parameters. this will take some learning, drop the weights in''' if self._sparse_parameters == None: print('No parameters stored') return for i in range(len(self._sparse_parameters)): # Put the training masks in the layer objects, TODO turn this into a [0,1] mask #self._model._sparse_training_mask = None #parameters[i] # Put the individual weights in the weight matrices for j in range(self._sparse_parameters[i][0].nnz): self._model._layers[i]._weights[int(self._sparse_parameters[i][0].row[j])][int(self._sparse_parameters[i][0].col[j])] = self._sparse_parameters[i][0].data[j] # Replace full arrays, test #self._model._layers[i]._weights = np.multiply(self._model._layers[i]._weights, (self._sparse_parameters[i][0] == 0)) #self._model._layers[i]._weights = self._model._layers[i]._weights + self._sparse_parameters[i][0] self._model._layers[i]._biases = self._sparse_parameters[i][1] print('Sparse parameters restored') return ```
{ "source": "jngaravitoc/nba", "score": 3 }
#### File: nba/com/com_methods.py ```python import numpy as np def re_center(vec, cm): """ Subtract a vector from a each dimension of another vector, this is done to recenter a halo positions and velocities to its center of mass. Input: ------ vec : numpy.array A numpy array to which substract the vector cm cm : numpy array A numpy 1d array with Output: ------- numpy.array A new vector with a subtracted vector in each dimension. """ #assert len(vec)==len(cm), "Make sure the len of your N-vector is the same as your 1d vector" new_vec = np.copy(vec) for i in range(len(cm)): new_vec[:,i] = vec[:,i] - cm[i] return new_vec def com_disk_potential(xyz, vxyz, Pdisk): V_radius = 2 vx = vxyz[:,0] vy = vxyz[:,1] vz = vxyz[:,2] x = xyz[:,0] y = xyz[:,1] z = xyz[:,2] min_pot = np.where(Pdisk==min(Pdisk))[0] x_min = x[min_pot] y_min = y[min_pot] z_min = z[min_pot] # This >2.0 corresponds to the radius in kpc of the particles that # I am taking into account to compute the CM avg_particles = np.where(np.sqrt((x-x_min)**2.0 + (y-y_min)**2.0 + (z-z_min)**2.0)<V_radius)[0] x_cm = sum(x[avg_particles])/len(avg_particles) y_cm = sum(y[avg_particles])/len(avg_particles) z_cm = sum(z[avg_particles])/len(avg_particles) vx_cm = sum(vx[avg_particles])/len(avg_particles) vy_cm = sum(vy[avg_particles])/len(avg_particles) vz_cm = sum(vz[avg_particles])/len(avg_particles) return np.array([x_cm, y_cm, z_cm]), np.array([vx_cm, vy_cm, vz_cm]) def velocities_com(cm_pos, pos, vel, r_cut=20): """ Function to compute the COM velocity in a sphere of 20 kpc """ # Compute the distance with respect to the COM R_cm = ((pos[:,0]-cm_pos[0])**2 + (pos[:,1]-cm_pos[1])**2 + (pos[:,2]-cm_pos[2])**2)**0.5 # Select the particles inside 15 kpc index = np.where(R_cm < r_cut)[0] # Compute the velocities of the COM: velx_cm = np.sum(vel[index,0])/len(vel[index,0]) vely_cm = np.sum(vel[index,1])/len(vel[index,1]) velz_cm = np.sum(vel[index,2])/len(vel[index,2]) return np.array([velx_cm, vely_cm, velz_cm]) def mean_pos(xyz, vxyz, m): """ Returns the COM positions and velocities. .. math:: \vec{R} = \sum_i^N m_i \vec{r_i} / N """ # Number of particles N = np.sum(m) xCOM = np.sum(xyz[:,0]*m)/N yCOM = np.sum(xyz[:,1]*m)/N zCOM = np.sum(xyz[:,2]*m)/N vxCOM = np.sum(vxyz[:,0]*m)/N vyCOM = np.sum(vxyz[:,1]*m)/N vzCOM = np.sum(vxyz[:,2]*m)/N return np.array([xCOM, yCOM, zCOM]), np.array([vxCOM, vyCOM, vzCOM]) def shrinking_sphere(xyz, vxyz, m, delta=0.025): """ Compute the center of mass coordinates and velocities of a halo using the Shrinking Sphere Method Power et al 2003. It iterates in radii until reach a convergence given by delta of 1% of the total number of particles while there are more than 1000 particles. Parameters ----------- xyz: numpy.array cartesian coordinates with shape (n,3) vxys: numpy.array cartesian velocities with shape (n,3) delta: float, optional Convergence of the COM computation in the same units of `xyz', D=0.025 Returns -------- rcm: numpy.array Arrays containing the coordinate of the center of mass with respect to a (0,0,0) point. vcm: numpy.array Arrays containing the velocities of the center of mass with respect to a (0,0,0) point. References ----------- .. [1] <NAME>., ``The inner structure of ΛCDM haloes - I. A numerical convergence study", MNRAS, vol. 338, no. 1, pp. 14–34, 2003. doi:10.1046/j.1365-8711.2003.05925.x. """ N_i = len(xyz) N = N_i xCM = 0.0 yCM = 0.0 zCM = 0.0 rCOM, vCOM = mean_pos(xyz, vxyz, m) xCM_new, yCM_new, zCM_new = rCOM vxCM_new, vyCM_new, vzCM_new = vCOM while (((np.sqrt((xCM_new-xCM)**2 + (yCM_new-yCM)**2 + (zCM_new-zCM)**2) > delta) & (N>N_i*0.01)) | (N>1000)): xCM = xCM_new yCM = yCM_new zCM = zCM_new # Re-centering sphere R = np.sqrt((xyz[:,0]-xCM_new)**2 + (xyz[:,1]-yCM_new)**2 + (xyz[:,2]-zCM_new)**2) Rmax = np.max(R) # Reducing Sphere by its 2.5% index = np.where(R<Rmax*0.975)[0] xyz = xyz[index] vxyz = vxyz[index] m = m[index] N = len(xyz) #Computing new CM coordinates and velocities rCOM, vCOM = mean_pos(xyz, vxyz, m) xCM_new, yCM_new, zCM_new = rCOM vxCM_new, vyCM_new, vzCM_new = vCOM vxCM_new, vyCM_new, vzCM_new = velocities_com([xCM_new, yCM_new, zCM_new], xyz, vxyz) return np.array([xCM_new, yCM_new, zCM_new]), np.array([vxCM_new, vyCM_new, vzCM_new]) ``` #### File: nba/orbits/halo_orbit.py ```python import numpy as np import sys from nba.ios.io_snaps import halo_ids, load_snapshot from nba.ios import get_com def orbit(snapname, ninit, nfinal, com_frame, galaxy, N_halo_part, snapformat, com_method): """ Computes the COM for a sequence of snapshots. """ pos_com = np.zeros((nfinal-ninit+1, 3)) vel_com = np.zeros((nfinal-ninit+1, 3)) for k in range(ninit, nfinal+1): all_ids = load_snapshot(snapname+'_{:03d}'.format(k), snapformat, 'pid', 'dm') print(len(all_ids)) ids = halo_ids(all_ids, N_halo_part, galaxy) all_pos = load_snapshot(snapname+'_{:03d}'.format(k), snapformat, 'pos', 'dm') all_vel = load_snapshot(snapname+'_{:03d}'.format(k), snapformat, 'vel', 'dm') all_mass = load_snapshot(snapname+'_{:03d}'.format(k), snapformat, 'mass', 'dm') pos = all_pos[ids] vel = all_vel[ids] mass = all_mass[ids] pos_com[k-ninit], vel_com[k-ninit] = get_com(pos, vel, mass, com_method, snapname+'_{:03d}'.format(k), snapformat) return pos_com, vel_com if __name__ == '__main__': # Define variables # including the path of the snapshot snapshot = "/mnt/home/nico/ceph/gadget_runs/MWLMC/MWLMC5/out/" # sys.argv[1] out_name = 'MWLMC5_100M_b0_vir_OM3_G4' #sys.argv[2] init_snap = 0 #int(sys.argv[3]) final_snap = 10 # int(sys.argv[4]) snap_format = 3 # gadget4 - hdf5 com_method1 = 'shrinking' com_method2 = 'diskpot' nhost=100000000 nsat=15000000 pos_com_host, vel_com_host = orbit(snapshot+out_name, init_snap, final_snap, 0, 0, [nhost, nsat], snap_format, com_method2) pos_com_sat, vel_com_sat = orbit(snapshot+out_name, init_snap, final_snap, 1, 1, [nhost, nsat], snap_format, com_method1) # Save data np.savetxt(out_name, np.array([pos_com_host[:,0], pos_com_host[:,1], pos_com_host[:,2], vel_com_host[:,0], vel_com_host[:,1], vel_com_host[:,2], pos_com_sat[:,0], pos_com_sat[:,1], pos_com_sat[:,2], vel_com_sat[:,0], vel_com_sat[:,1], vel_com_sat[:,2]]).T) ```
{ "source": "jngod2011/asset_pricing_code", "score": 3 }
#### File: asset_pricing_code/ez_code/stability_plots.py ```python import numpy as np import matplotlib.pyplot as plt from utility_solver import compute_recursive_utility import unicodedata def stability_plot(ModelClass, param1, # string p1_min, # min value for param1 p1_max, # min value for param1 param2, # string p2_min, # min value for param2 p2_max, # min value for param2 xlabel=None, ylabel=None, w_star_guess=None, coords=(-225, 30), # relative location of text G=3, one_step=False): # grid size for x and y axes # Normalize unicode identifiers param1 = unicodedata.normalize('NFKC', param1) param2 = unicodedata.normalize('NFKC', param2) # Allocate arrays, set up parameter grid R = np.empty((G, G)) # Get default parameter vals for param1 and param2 md = ModelClass() param1_value = md.__getattribute__(param1) param2_value = md.__getattribute__(param2) # Compute utility at default values if w_star_guess is not None: md.w_star_guess[:] = w_star_guess else: compute_recursive_utility(md) # Set up grid for param1 and param2 x_vals = np.linspace(p1_min, p1_max, G) y_vals = np.linspace(p2_min, p2_max, G) w = np.copy(md.w_star_guess) # Loop through parameters computing test coefficient for i, x in enumerate(x_vals): for j, y in enumerate(y_vals): # Create a new instance and take w_star_guess from # the last instance. Set parameters. md_previous = md md = ModelClass(build_grids=True) md.w_star_guess[:] = md_previous.w_star_guess md.__setattr__(param1, x) md.__setattr__(param2, y) if md.utility_params_differ(md_previous): compute_recursive_utility(md) if one_step: sr = md.compute_suped_spec_rad(n=1, num_reps=8000) r = sr() else: sr = md.compute_spec_rad_of_V(n=1000, num_reps=8000) r = sr() R[i, j] = r # Now the plot point_location=(param1_value, param2_value) fig, ax = plt.subplots(figsize=(10, 5.7)) cs1 = ax.contourf(x_vals, y_vals, R.T, alpha=0.5) ctr1 = ax.contour(x_vals, y_vals, R.T, levels=[1.0]) plt.clabel(ctr1, inline=1, fontsize=13) plt.colorbar(cs1, ax=ax, format="%.6f") if ModelClass.__name__ == 'BY': print_name = 'Bansal-Yaron' else: print_name = 'Schorfheide-Song-Yaron' ax.annotate(print_name, xy=point_location, xycoords="data", xytext=coords, textcoords="offset points", fontsize=12, arrowprops={"arrowstyle" : "->"}) ax.plot(*point_location, "ko", alpha=0.6) if one_step: title = "One step contraction coefficient" else: title = "Spectral radius" ax.set_title(title) if xlabel is None: xlabel = param1 ax.set_xlabel(xlabel, fontsize=16) if ylabel is None: ylabel = param2 ax.set_ylabel(ylabel, fontsize=16) ax.ticklabel_format(useOffset=False) model_type = ModelClass.__name__ if one_step: filename = param1 + param2 + "model_type" + "_onestep_" + ".pdf" else: filename = param1 + param2 + "model_type" + "_" + ".pdf" plt.savefig("pdfs/" + filename) plt.show() ``` #### File: asset_pricing_code/mehra_prescott_code_asset_pricing/mp_model.py ```python import numpy as np from numpy import sqrt, exp from scipy.stats import norm inv_sqrt_2pi = 1 / sqrt(2 * np.pi) class MehraPrescott: """ Represents the model. """ def __init__(self, β=0.99, γ=2.5, ρ=0.941, σ=0.000425, # Conditional volatility b=0.00104): # Conditional mean self.β, self.γ, self.ρ, self.σ, self.b = β, γ, ρ, σ, b # Parameters in the stationary distribution self.svar = σ**2 / (1 - ρ**2) self.ssd = np.sqrt(self.svar) self.smean = self.b / (1 - ρ) def sim_state(self, x0=None, num_paths=1000, ts_length=1000): """ Simulate the state process. If x0 is None, then draw from the stationary distribution. """ ρ, b, σ = self.ρ, self.b, self.σ X = np.ones((num_paths, ts_length)) W = np.random.randn(num_paths, ts_length) if x0 is None: X[:, 0] = self.smean else: X[:, 0] = x0 for t in range(ts_length-1): X[:, t+1] = ρ * X[:, t] + b + σ * W[:, t+1] return X def spec_rad_sim(self, num_paths=1000, ts_length=1000): β, γ = self.β, self.γ X = self.sim_state(num_paths=num_paths, ts_length=ts_length) A = β * np.exp((1 - γ) * X) A = np.prod(A, axis=1) return A.mean()**(1/ts_length) def spec_rad_analytic(self): # Unpack parameters β, γ, ρ, σ = self.β, self.γ, self.ρ, self.σ b = self.b k1 = 1 - γ s = k1 * b / (1 - ρ) t = k1**2 * σ**2 / (2 * (1 - ρ)**2) return β * exp(s + t) ```
{ "source": "jngrb/montepython_public", "score": 2 }
#### File: likelihoods/euclid_lensing/__init__.py ```python from montepython.likelihood_class import Likelihood import io_mp import scipy.integrate from scipy import interpolate as itp import os import numpy as np import math # Adapted from <NAME> class euclid_lensing(Likelihood): def __init__(self, path, data, command_line): Likelihood.__init__(self, path, data, command_line) # Force the cosmological module to store Pk for redshifts up to # max(self.z) self.need_cosmo_arguments(data, {'output': 'mPk'}) self.need_cosmo_arguments(data, {'z_max_pk': self.zmax}) # Force the cosmological module to store Pk for k up to an arbitrary # number self.need_cosmo_arguments(data, {'P_k_max_1/Mpc': self.k_max}) # Define array of l values, and initialize them # It is a logspace self.l = np.exp(self.dlnl*np.arange(self.nlmax)) ######################################################## # Find distribution of dn_dz (not normalized) in each bin ######################################################## # Assuming each bin contains the same number of galaxies, we find the # bin limits in z space # Compute the total number of galaxies until zmax (no normalization # yet), that is the integral of the galaxy distribution function from 0 # to self.zmax n_tot, error = scipy.integrate.quad( self.galaxy_distribution, 0, self.zmax) assert error <= 1e-7, ( "The integration of the galaxy distribution is not as " "precise as expected.") # For each bin, compute the limit in z space # Create the array that will contain the z boundaries for each bin. The # first value is already correctly set to 0. self.z_bin_edge = np.zeros(self.nbin+1, 'float64') for Bin in xrange(self.nbin-1): bin_count = 0. z = self.z_bin_edge[Bin] while (bin_count <= n_tot/self.nbin): gd_1 = self.galaxy_distribution(z) gd_2 = self.galaxy_distribution(z+self.dz) bin_count += 0.5*(gd_1+gd_2)*self.dz z += self.dz self.z_bin_edge[Bin+1] = z self.z_bin_edge[self.nbin] = self.zmax # Fill array of discrete z values self.z = np.linspace(0, self.zmax, num=self.nzmax) # Fill distribution for each bin (convolving with photo_z distribution) self.eta_z = np.zeros((self.nzmax, self.nbin), 'float64') gal = self.galaxy_distribution(self.z, True) for Bin in xrange(self.nbin): low = self.z_bin_edge[Bin] hig = self.z_bin_edge[Bin+1] for nz in xrange(self.nzmax): z = self.z[nz] integrand = gal*self.photo_z_distribution(z, self.z, True) integrand = np.array([ elem if low <= self.z[index] <= hig else 0 for index, elem in enumerate(integrand)]) self.eta_z[nz, Bin] = scipy.integrate.trapz( integrand, self.z) # integrate eta(z) over z (in view of normalizing it to one) self.eta_norm = np.zeros(self.nbin, 'float64') for Bin in xrange(self.nbin): self.eta_norm[Bin] = np.sum(0.5*( self.eta_z[1:, Bin]+self.eta_z[:-1, Bin])*( self.z[1:]-self.z[:-1])) ################ # Noise spectrum ################ # Number of galaxies per steradian self.noise = 3600.*self.gal_per_sqarcmn*(180./math.pi)**2 # Number of galaxies per steradian per bin self.noise = self.noise/self.nbin # Noise spectrum (diagonal in bin*bin space, independent of l and Bin) self.noise = self.rms_shear**2/self.noise ########### # Read data ########### # If the file exists, initialize the fiducial values # It has been stored flat, so we use the reshape function to put it in # the right shape. self.Cl_fid = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64') self.fid_values_exist = False fid_file_path = os.path.join(self.data_directory, self.fiducial_file) if os.path.exists(fid_file_path): self.fid_values_exist = True flat_Cl = np.loadtxt(fid_file_path) self.Cl_fid = flat_Cl.reshape((self.nlmax, self.nbin, self.nbin)) return def galaxy_distribution(self, z, array=False): """ Galaxy distribution returns the function D(z) from the notes If the array flag is set to True, z is then interpretated as an array, and not as a single value. """ zmean = 0.9 z0 = zmean/1.412 if not array: galaxy_dist = z**2*math.exp(-(z/z0)**(1.5)) else: return z**2*np.exp(-(z/z0)**(1.5)) return galaxy_dist def photo_z_distribution(self, z, zph, array=True): """ Photo z distribution If the array flag is set to True, z is then interpretated as an array, and not as a single value. """ # Standard error on dz/(1+z) sigma_ph = 0.05 # Note: you must normalize it yourself to one if you want to get nice # plots of the galaxy distribution function in each bin (otherwise, the # spectra will remain correct, but each D_i(x) will loot strangely # normalized when compared to the original D(z) if not array: photo_z_dist = math.exp(-0.5*( (z-zph)/sigma_ph/(1.+z))**2)/sigma_ph/(1.+z)/math.sqrt( 2.*math.pi) else: photo_z_dist = np.exp(-0.5*( (z-zph)/sigma_ph/(1.+z))**2)/sigma_ph/(1.+z)/math.sqrt( 2.*math.pi) return photo_z_dist def loglkl(self, cosmo, data): # One wants to obtain here the relation between z and r, this is done # by asking the cosmological module with the function z_of_r self.r = np.zeros(self.nzmax, 'float64') self.dzdr = np.zeros(self.nzmax, 'float64') self.r, self.dzdr = cosmo.z_of_r(self.z) # Compute now the selection function eta(r) = eta(z) dz/dr normalized # to one. The np.newaxis helps to broadcast the one-dimensional array # dzdr to the proper shape. Note that eta_norm is also broadcasted as # an array of the same shape as eta_z self.eta_r = self.eta_z*(self.dzdr[:, np.newaxis]/self.eta_norm) # Compute function g_i(r), that depends on r and the bin # g_i(r) = 2r(1+z(r)) int_0^+\infty drs eta_r(rs) (rs-r)/rs # TODO is the integration from 0 or r ? g = np.zeros((self.nzmax, self.nbin), 'float64') for Bin in xrange(self.nbin): for nr in xrange(1, self.nzmax-1): fun = self.eta_r[nr:, Bin]*(self.r[nr:]-self.r[nr])/self.r[nr:] g[nr, Bin] = np.sum(0.5*( fun[1:]+fun[:-1])*(self.r[nr+1:]-self.r[nr:-1])) g[nr, Bin] *= 2.*self.r[nr]*(1.+self.z[nr]) # Get power spectrum P(k=l/r,z(r)) from cosmological module pk = np.zeros((self.nlmax, self.nzmax), 'float64') for index_l in xrange(self.nlmax): for index_z in xrange(1, self.nzmax): if (self.l[index_l]/self.r[index_z] > self.k_max): raise io_mp.LikelihoodError( "you should increase euclid_lensing.k_max up to at" "least %g" % self.l[index_l]/self.r[index_z]) pk[index_l, index_z] = cosmo.pk( self.l[index_l]/self.r[index_z], self.z[index_z]) # Recover the non_linear scale computed by halofit. If no scale was # affected, set the scale to one, and make sure that the nuisance # parameter epsilon is set to zero k_sigma = np.zeros(self.nzmax, 'float64') if (cosmo.nonlinear_method == 0): k_sigma[:] = 1.e6 else: k_sigma = cosmo.nonlinear_scale(self.z, self.nzmax) # Define the alpha function, that will characterize the theoretical # uncertainty. Chosen to be 0.001 at low k, raise between 0.1 and 0.2 # to self.theoretical_error alpha = np.zeros((self.nlmax, self.nzmax), 'float64') # self.theoretical_error = 0.1 if self.theoretical_error != 0: for index_l in range(self.nlmax): k = self.l[index_l]/self.r[1:] alpha[index_l, 1:] = np.log(1.+k[:]/k_sigma[1:])/( 1.+np.log(1.+k[:]/k_sigma[1:]))*self.theoretical_error # recover the e_th_nu part of the error function e_th_nu = self.coefficient_f_nu*cosmo.Omega_nu/cosmo.Omega_m() # Compute the Error E_th_nu function if 'epsilon' in self.use_nuisance: E_th_nu = np.zeros((self.nlmax, self.nzmax), 'float64') for index_l in range(1, self.nlmax): E_th_nu[index_l, :] = np.log( 1.+self.l[index_l]/k_sigma[:]*self.r[:]) / ( 1.+np.log(1.+self.l[index_l]/k_sigma[:]*self.r[:]))*e_th_nu # Add the error function, with the nuisance parameter, to P_nl_th, if # the nuisance parameter exists for index_l in range(self.nlmax): epsilon = data.mcmc_parameters['epsilon']['current']*( data.mcmc_parameters['epsilon']['scale']) pk[index_l, :] *= (1.+epsilon*E_th_nu[index_l, :]) # Start loop over l for computation of C_l^shear Cl_integrand = np.zeros((self.nzmax, self.nbin, self.nbin), 'float64') Cl = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64') # Start loop over l for computation of E_l if self.theoretical_error != 0: El_integrand = np.zeros((self.nzmax, self.nbin, self.nbin), 'float64') El = np.zeros((self.nlmax, self.nbin, self.nbin), 'float64') for nl in xrange(self.nlmax): # find Cl_integrand = (g(r) / r)**2 * P(l/r,z(r)) for Bin1 in xrange(self.nbin): for Bin2 in xrange(self.nbin): Cl_integrand[1:, Bin1, Bin2] = g[1:, Bin1]*g[1:, Bin2]/( self.r[1:]**2)*pk[nl, 1:] if self.theoretical_error != 0: El_integrand[1:, Bin1, Bin2] = g[1:, Bin1]*( g[1:, Bin2])/( self.r[1:]**2)*pk[nl, 1:]*alpha[nl, 1:] # Integrate over r to get C_l^shear_ij = P_ij(l) # C_l^shear_ij = 9/16 Omega0_m^2 H_0^4 \sum_0^rmax dr (g_i(r) # g_j(r) /r**2) P(k=l/r,z(r)) # It it then multiplied by 9/16*Omega_m**2 to be in units of Mpc**4 # and then by (h/2997.9)**4 to be dimensionless for Bin1 in xrange(self.nbin): for Bin2 in xrange(self.nbin): Cl[nl, Bin1, Bin2] = np.sum(0.5*( Cl_integrand[1:, Bin1, Bin2] + Cl_integrand[:-1, Bin1, Bin2])*( self.r[1:]-self.r[:-1])) Cl[nl, Bin1, Bin2] *= 9./16.*(cosmo.Omega_m())**2 Cl[nl, Bin1, Bin2] *= (cosmo.h()/2997.9)**4 if self.theoretical_error != 0: El[nl, Bin1, Bin2] = np.sum(0.5*( El_integrand[1:, Bin1, Bin2] + El_integrand[:-1, Bin1, Bin2])*( self.r[1:]-self.r[:-1])) El[nl, Bin1, Bin2] *= 9./16.*(cosmo.Omega_m())**2 El[nl, Bin1, Bin2] *= (cosmo.h()/2997.9)**4 if Bin1 == Bin2: Cl[nl, Bin1, Bin2] += self.noise # Write fiducial model spectra if needed (exit in that case) if self.fid_values_exist is False: # Store the values now, and exit. fid_file_path = os.path.join( self.data_directory, self.fiducial_file) with open(fid_file_path, 'w') as fid_file: fid_file.write('# Fiducial parameters') for key, value in data.mcmc_parameters.iteritems(): fid_file.write( ', %s = %.5g' % (key, value['current']*value['scale'])) fid_file.write('\n') for nl in range(self.nlmax): for Bin1 in range(self.nbin): for Bin2 in range(self.nbin): fid_file.write("%.8g\n" % Cl[nl, Bin1, Bin2]) print '\n\n /|\ Writing fiducial model in {0}'.format( fid_file_path) print '/_o_\ for {0} likelihood'.format(self.name) return 1j # Now that the fiducial model is stored, we add the El to both Cl and # Cl_fid (we create a new array, otherwise we would modify the # self.Cl_fid from one step to the other) # Spline Cl[nl,Bin1,Bin2] along l spline_Cl = np.empty((self.nbin, self.nbin), dtype=(list, 3)) for Bin1 in xrange(self.nbin): for Bin2 in xrange(Bin1, self.nbin): spline_Cl[Bin1, Bin2] = list(itp.splrep( self.l, Cl[:, Bin1, Bin2])) if Bin2 > Bin1: spline_Cl[Bin2, Bin1] = spline_Cl[Bin1, Bin2] # Spline El[nl,Bin1,Bin2] along l if self.theoretical_error != 0: spline_El = np.empty((self.nbin, self.nbin), dtype=(list, 3)) for Bin1 in xrange(self.nbin): for Bin2 in xrange(Bin1, self.nbin): spline_El[Bin1, Bin2] = list(itp.splrep( self.l, El[:, Bin1, Bin2])) if Bin2 > Bin1: spline_El[Bin2, Bin1] = spline_El[Bin1, Bin2] # Spline Cl_fid[nl,Bin1,Bin2] along l spline_Cl_fid = np.empty((self.nbin, self.nbin), dtype=(list, 3)) for Bin1 in xrange(self.nbin): for Bin2 in xrange(Bin1, self.nbin): spline_Cl_fid[Bin1, Bin2] = list(itp.splrep( self.l, self.Cl_fid[:, Bin1, Bin2])) if Bin2 > Bin1: spline_Cl_fid[Bin2, Bin1] = spline_Cl_fid[Bin1, Bin2] # Compute likelihood # Prepare interpolation for every integer value of l, from the array # self.l, to finally compute the likelihood (sum over all l's) dof = 1./(int(self.l[-1])-int(self.l[0])+1) ells = range(int(self.l[0]), int(self.l[-1])+1) # Define cov theory, observ and error on the whole integer range of ell # values Cov_theory = np.zeros((len(ells), self.nbin, self.nbin), 'float64') Cov_observ = np.zeros((len(ells), self.nbin, self.nbin), 'float64') Cov_error = np.zeros((len(ells), self.nbin, self.nbin), 'float64') for Bin1 in xrange(self.nbin): for Bin2 in xrange(Bin1, self.nbin): Cov_theory[:, Bin1, Bin2] = itp.splev( ells, spline_Cl[Bin1, Bin2]) Cov_observ[:, Bin1, Bin2] = itp.splev( ells, spline_Cl_fid[Bin1, Bin2]) if self.theoretical_error > 0: Cov_error[:, Bin1, Bin2] = itp.splev( ells, spline_El[Bin1, Bin2]) if Bin2 > Bin1: Cov_theory[:, Bin2, Bin1] = Cov_theory[:, Bin1, Bin2] Cov_observ[:, Bin2, Bin1] = Cov_observ[:, Bin1, Bin2] Cov_error[:, Bin2, Bin1] = Cov_error[:, Bin1, Bin2] chi2 = 0. # TODO parallelize this for index, ell in enumerate(ells): det_theory = np.linalg.det(Cov_theory[index, :, :]) det_observ = np.linalg.det(Cov_observ[index, :, :]) if (self.theoretical_error > 0): det_cross_err = 0 for i in range(self.nbin): newCov = np.copy(Cov_theory) newCov[:, i] = Cov_error[:, i] det_cross_err += np.linalg.det(newCov) # Newton method # Find starting point for the method: start = 0 step = 0.001*det_theory/det_cross_err error = 1 old_chi2 = -1.*data.boundary_loglike error_tol = 0.01 epsilon_l = start while error > error_tol: vector = np.array([epsilon_l-step, epsilon_l, epsilon_l+step]) # Computing the function on three neighbouring points function_vector = np.zeros(3, 'float64') for k in range(3): Cov_theory_plus_error = Cov_theory+vector[k]*Cov_error det_theory_plus_error = np.linalg.det( Cov_theory_plus_error) det_theory_plus_error_cross_obs = 0 for i in range(self.nbin): newCov = np.copy(Cov_theory_plus_error) newCov[:, i] = Cov_observ[:, i] det_theory_plus_error_cross_obs += np.linalg.det( newCov) function_vector[k] = (2.*ell+1.)*self.fsky*(det_theory_plus_error_cross_obs/det_theory_plus_error + math.log(det_theory_plus_error/det_observ) - self.nbin ) + dof*vector[k]**2 # Computing first first_d = (function_vector[2]-function_vector[0]) / (vector[2]-vector[0]) second_d = (function_vector[2]+function_vector[0]-2*function_vector[1]) / (vector[2]-vector[1])**2 # Updating point and error epsilon_l = vector[1] - first_d/second_d error = abs(function_vector[1] - old_chi2) old_chi2 = function_vector[1] # End Newton Cov_theory_plus_error = Cov_theory + epsilon_l * Cov_error det_theory_plus_error = np.linalg.det(Cov_theory_plus_error) det_theory_plus_error_cross_obs = 0 for i in range(self.nbin): newCov = np.copy(Cov_theory_plus_error) newCov[:, i] = Cov_observ[:, i] det_theory_plus_error_cross_obs += np.linalg.det(newCov) chi2 += (2.*ell+1.)*self.fsky*(det_theory_plus_error_cross_obs/det_theory_plus_error + math.log(det_theory_plus_error/det_observ) - self.nbin ) + dof*epsilon_l**2 else: det_cross = 0. for i in xrange(self.nbin): newCov = np.copy(Cov_theory[index, :, :]) newCov[:, i] = Cov_observ[index, :, i] det_cross += np.linalg.det(newCov) chi2 += (2.*ell+1.)*self.fsky*(det_cross/det_theory + math.log(det_theory/det_observ) - self.nbin) # Finally adding a gaussian prior on the epsilon nuisance parameter, if # present if 'epsilon' in self.use_nuisance: epsilon = data.mcmc_parameters['epsilon']['current'] * \ data.mcmc_parameters['epsilon']['scale'] chi2 += epsilon**2 return -chi2/2. ```
{ "source": "jnguyen1098/gryph-graph", "score": 3 }
#### File: jnguyen1098/gryph-graph/graph.py ```python import pygraphviz import wyvern import random import pydot import sys import csv import re prereq_reg = re.compile(r'[A-Z]{2,4}\*[0-9]{4}') restrict_reg = re.compile(r'[A-Z]{2,4}\*[0-9]{4}') class Course: def __init__(self, name, prereqs, restricts): self.name = name self.prereqs = prereqs self.restricts = restricts def __repr__(self): return f'{self.name}[{self.prereqs}][{self.restricts}]' def parse_prereqs(prereq_string): matches = re.findall(prereq_reg, prereq_string) return matches def parse_restricts(restrict_string): matches = re.findall(restrict_reg, restrict_string) return matches def main(argv): luck = 10 # [0,100]% chance of anchoring a node #seems like bad luck to me! if len(argv) != 3: print("Usage:", sys.argv[0], "csvfile outputname") exit() courses = {} svg_path = argv[1] with open(svg_path) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = -1 for row in csv_reader: courses[row[0]] = Course(row[0], row[9], row[11]) line_count += 1 print(f'Processed {line_count} courses') G = pygraphviz.AGraph(directed=True) G.layout(prog='dot') for key, value in courses.items(): if value.name != "Common Name": prereqs = parse_prereqs(value.prereqs) restricts = parse_restricts(value.restricts) for prereq in prereqs: G.add_edge(prereq, value.name, constraint=(random.randint(0, 100) > luck)) for restrict in restricts: G.add_edge(restrict, value.name, color='red', constraint=(random.randint(0, 100) > luck)) print(f'{key}:{prereqs}:{restricts}') G.write(f'{argv[2]}.gv') graphs = pydot.graph_from_dot_file(f'{argv[2]}.gv') graphs[0].write_svg(f'{argv[2]}.svg') graphs[0].write_png(f'{argv[2]}.png') print("Done!") if __name__ == "__main__": sys.exit(main(sys.argv)) ```
{ "source": "jnguyen1098/plumage", "score": 3 }
#### File: jnguyen1098/plumage/extract.py ```python import argparse import csv import logging import os import sys import time import tweepy # type: ignore def extract_tweets(secret: str, query: str, outfile: str, count: int = 0, wait: int = 300) -> None: """Extract Tweets using the Tweepy API.""" logger = logging.getLogger("extracter") logger.info("Authenticating with Tweepy") logger.info("Reading secrets file %s", secret) token_fp = open(secret, "r") auth = tweepy.OAuthHandler(token_fp.readline().strip(), token_fp.readline().strip()) auth.set_access_token(token_fp.readline().strip(), token_fp.readline().strip()) api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) token_fp.close() logger.info("Attempting to authenticate") api.verify_credentials() logger.info("Authenticated! Examining outfile.") if not os.path.exists(outfile): logger.info("%s doesn't exist - it will be created.", outfile) file_p = open(outfile, "w", encoding="utf-8") tweet_writer = csv.writer(file_p) tweet_writer.writerow( [ "full_text", "created_at", "source", "id", "retweet_count", "favorite_count", "user_name", "user_id_str", "user_handle", "user_location", "user_desc", "user_protected", "user_followers", "user_created", "user_verified", "user_tweet_count", ] ) else: logger.info("%s exists - will append.", outfile) file_p = open(outfile, "a", encoding="utf-8") tweet_writer = csv.writer(file_p) logger.info("Starting Tweet extraction for query '%s'", query) if not count: logger.info("(executing forever)") else: logger.info("(executing %s times)", count) i = 1 bookmark = "1" while True: # Our search query. # # q - search query. We use the -filter:retweets # specifier in order to prune any retweets. # Otherwise we'd have to prune Tweets that # are prefaced with 'RT' # # lang - English Tweets only # # count - 100 is the max as per the Twitter API # # tweet_mode - we use extended tweet mode in # order to access Tweets that are greater # than 140 char. in length this is to keep # legacy Twitter API applications intact # # result_type - we use recent so as to create # a chronological record of Tweets # # since_id - we keep track of the last Tweet # saved and use it as a bookmark in order # to only get the Tweets coming after it # for tweet in api.search( q=f"{query} -filter:retweets", lang="en", count=100, tweet_mode="extended", result_type="recent", max_id=bookmark, ): # These are the features we write tweet_writer.writerow( [ tweet.full_text, tweet.created_at, tweet.source, tweet.id_str, tweet.retweet_count, tweet.favorite_count, tweet.user.name, tweet.user.id_str, tweet.user.screen_name, tweet.user.location, tweet.user.description, tweet.user.protected, tweet.user.followers_count, tweet.user.created_at, tweet.user.verified, tweet.user.statuses_count, ] ) # Flush the stream every time just in case file_p.flush() # Set the most recent Tweet as a bookmark bookmark = tweet.id_str # Transparency/monitoring limits = api.rate_limit_status() rem = limits["resources"]["application"]["/application/rate_limit_status"]["remaining"] logger.info("Tweets written to %s (%s hourly API accesses left)", outfile, rem) # Do not loop if demo if i == count: break i += 1 # Respect API time.sleep(wait) def main() -> int: """Execute standalone.""" arg_p = argparse.ArgumentParser() arg_p.add_argument("tokenfile", help="see README for details") arg_p.add_argument("query", help="search term") arg_p.add_argument("outfile", help="output file") args = arg_p.parse_args() logging.basicConfig( level=logging.INFO, format="[%(levelname)s | %(name)s] %(message)s", ) extract_tweets(args.tokenfile, args.query, args.outfile, count=0) return 0 if __name__ == "__main__": sys.exit(main()) ``` #### File: jnguyen1098/plumage/mine.py ```python import argparse import csv import json import logging import pickle import random import string import sys from typing import Dict, Iterator, List from nltk import NaiveBayesClassifier, classify, ngrams # type: ignore from nltk.corpus import twitter_samples # type: ignore from nltk.stem.wordnet import WordNetLemmatizer # type: ignore from nltk.tag import pos_tag # type: ignore MAX_TWEETS = -1 DIVISION = 25 SUBJECTIVITY_THRESHOLD = 0.30 def mine_tweets(infile: str, tweetout: str, gramout: str) -> None: """Classify, prune, and atomize Tweets.""" logger = logging.getLogger("miner") logger.info("Gathering and tokenizing positive tweets") positive_tweet_tokens = twitter_samples.tokenized("positive_tweets.json") logger.info("Gathering and tokenizing negative tweets") negative_tweet_tokens = twitter_samples.tokenized("negative_tweets.json") logger.info("Cleaning model tokens") positive_cleaned_tokens_list = [] negative_cleaned_tokens_list = [] # Clean tokens for tokens in positive_tweet_tokens: positive_cleaned_tokens_list.append(normalize(tokens)) # Clean tokens for tokens in negative_tweet_tokens: negative_cleaned_tokens_list.append(normalize(tokens)) logger.info("Building Tweet corpus") positive_tokens_for_model = get_tweets_for_model(positive_cleaned_tokens_list) # type: ignore negative_tokens_for_model = get_tweets_for_model(negative_cleaned_tokens_list) # type: ignore # Mark positive Tweets as such positive_dataset = [(tweet_dict, "Positive") for tweet_dict in positive_tokens_for_model] # Mark negative Tweets as such negative_dataset = [(tweet_dict, "Negative") for tweet_dict in negative_tokens_for_model] # Create unified dataset and shuffle it dataset = positive_dataset + negative_dataset random.shuffle(dataset) # Train the data using the first 70% as # training data, and the last 30% as # testing data. logger.info("70% training, 30% testing") train_data = dataset[:7000] test_data = dataset[7000:] logger.info("Training...") classifier = NaiveBayesClassifier.train(train_data) logger.info("Accuracy is: %s", classify.accuracy(classifier, test_data)) logger.info("Classifying Tweets") tweets = [] with open(infile, "r") as csv_file: logger.info("Opened %s", infile) csv_reader = csv.reader(csv_file, delimiter=",") logger.info("Attached CSV reader to %s successfully", infile) # Counts processed Tweets and rejected ones counter: int = 0 subject_reject: int = 0 # Iterate for tweet in csv_reader: # Printing if not counter % DIVISION: logger.info("Read in %s Tweets so far...", counter) # For debugging if counter == MAX_TWEETS: break # Classify Tweet new_tweet = Tweet(tweet) dist = classifier.prob_classify( dict([token, True] for token in new_tweet.cleaned_tokens) # type: ignore ) new_tweet.positivity = dist.prob("Positive") new_tweet.negativity = dist.prob("Negative") new_tweet.difference = abs(new_tweet.positivity - new_tweet.negativity) # Assess the subjectivity of the Tweet if new_tweet.difference > SUBJECTIVITY_THRESHOLD: tweets.append(new_tweet) else: subject_reject += 1 # Count counter += 1 logger.info("Processed %s Tweets", len(tweets)) logger.info("%s Tweets were rejected for not being subjective enough", subject_reject) # Pickle Tweets pickle.dump(tweets, open(tweetout, "wb")) logger.info("Pickled %s Tweets", len(tweets)) # Storing our n-gram occurrences gram_scores: List[Dict[str, int]] = [{}, {}, {}, {}, {}] # Counting n-grams for i in range(1, 5): logger.info("Creating %s-grams", i) # Iterate for tweet in tweets: # type: ignore # Create n-grams grams = ngrams(tweet.cleaned_tokens, i) # type: ignore # Count every gram for gram in grams: # Create record for new n-gram if gram not in gram_scores[i]: gram_scores[i][gram] = 1 # Update existing record else: gram_scores[i][gram] += 1 # Serialize n-grams to file with open(gramout, "wb") as gramout_fp: pickle.dump(gram_scores, gramout_fp) class Tweet: """Tweet object.""" def __init__(self, tweet_row: List[str]) -> None: """Initialize Tweet object.""" # Existing members self.full_text = tweet_row[0] self.created_at = tweet_row[1] self.source = tweet_row[2] self.tweet_id = tweet_row[3] self.retweet_count = tweet_row[4] self.favorite_count = tweet_row[5] self.user_name = tweet_row[6] self.user_id_str = tweet_row[7] self.user_handle = tweet_row[8] self.user_location = tweet_row[9] self.user_desc = tweet_row[10] self.user_protected = tweet_row[11] self.user_followers = tweet_row[12] self.user_created = tweet_row[13] self.user_verified = tweet_row[14] self.user_tweet_count = tweet_row[15] self.cleaned_text = tweet_row[16] self.cleaned_tokens = json.loads(tweet_row[17]) self.positivity = -1 self.negativity = -1 self.difference = -1 def normalize(tweet_tokens: List[str]) -> List[str]: """Lemmatize a Twitter post..""" cleaned_tokens = [] # Part of Speech tagging for token, tag in pos_tag(tweet_tokens): if tag.startswith("NN"): pos = "n" elif tag.startswith("VB"): pos = "v" else: pos = "a" # Lemmatize lemmatizer = WordNetLemmatizer() token = lemmatizer.lemmatize(token, pos) if len(token) > 0 and token not in string.punctuation: cleaned_tokens.append(token.lower()) return cleaned_tokens def get_all_words(cleaned_tokens_list: List[List[str]]) -> Iterator[str]: """Yield generator for all words.""" for tokens in cleaned_tokens_list: for token in tokens: yield token def get_tweets_for_model(cleaned_tokens_list): # type: ignore """Yield dicts for Tweets.""" for tweet_tokens in cleaned_tokens_list: yield dict([token, True] for token in tweet_tokens) # type: ignore def main() -> int: """Execute standalone.""" arg_p = argparse.ArgumentParser() arg_p.add_argument("infile", help="input .CSV file") arg_p.add_argument("tweetout", help="output Tweets .CSV file") arg_p.add_argument("gramout", help="output n-grams .PICKLE file") args = arg_p.parse_args() logging.basicConfig( level=logging.INFO, format="[%(levelname)s | %(name)s] %(message)s", ) mine_tweets(args.infile, args.tweetout, args.gramout) return 0 if __name__ == "__main__": sys.exit(main()) ``` #### File: jnguyen1098/plumage/preprocess.py ```python import argparse import csv import json import logging import re import string import sys from typing import List import preprocessor # type: ignore import nltk # type: ignore from nltk.stem.wordnet import WordNetLemmatizer # type: ignore from nltk.tag import pos_tag # type: ignore from nltk.tokenize import word_tokenize # type: ignore MAX_TWEETS = -1 DIVISION = 25 def preprocess_tweets(infile: str, outfile: str) -> None: """Remove redundant and non-objective posts.""" logger = logging.getLogger("preprocessor") # Number of Tweets read counter: int = 0 # List of all Tweets tweets: List[Tweet] = [] # Begin reading with open(infile, "r") as csv_file: # CSV reader csv_reader = csv.reader(csv_file, delimiter=",") logger.info("Attached CSV reader") # Number of Tweets deleted due to URL url_blocked = 0 # Iterate for tweet in csv_reader: # Messaging checkpoints if not counter % DIVISION: logger.info("Processed %s Tweets", counter) # Break at limit if counter == MAX_TWEETS: break # Only add Tweet if it doesn't contain a URL. # As per Ejieh's master's thesis, the vast majority # of posts with URLs lack any subjectivity. ptn = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+" if not bool(re.search(ptn, tweet[0])): tweets.append(Tweet(tweet)) else: url_blocked += 1 counter += 1 logger.info("Read %s Tweets in total", counter) # Finishing message logger.info("Only %s Tweets were kept", len(tweets)) with open(outfile, "w", encoding="utf-8") as output_file: tweet_writer = csv.writer(output_file) i = 1 for tweet in tweets: # type: ignore tweet_writer.writerow( [ tweet.full_text, # type: ignore tweet.created_at, # type: ignore tweet.source, # type: ignore tweet.tweet_id, # type: ignore tweet.retweet_count, # type: ignore tweet.favorite_count, # type: ignore tweet.user_name, # type: ignore tweet.user_id_str, # type: ignore tweet.user_handle, # type: ignore tweet.user_location, # type: ignore tweet.user_desc, # type: ignore tweet.user_protected, # type: ignore tweet.user_followers, # type: ignore tweet.user_created, # type: ignore tweet.user_verified, # type: ignore tweet.user_tweet_count, # type: ignore tweet.cleaned_text, # type: ignore json.dumps(tweet.cleaned_tokens), # type: ignore ] ) if not i % DIVISION: logger.info("Wrote Tweet #%s", i) i += 1 logger.info("Wrote %s Tweets in total", len(tweets)) class Tweet: """Tweet object.""" def __init__(self, tweet_row: List[str]) -> None: """Initialize Tweet object.""" # Existing members self.full_text = tweet_row[0] self.created_at = tweet_row[1] self.source = tweet_row[2] self.tweet_id = tweet_row[3] self.retweet_count = tweet_row[4] self.favorite_count = tweet_row[5] self.user_name = tweet_row[6] self.user_id_str = tweet_row[7] self.user_handle = tweet_row[8] self.user_location = tweet_row[9] self.user_desc = tweet_row[10] self.user_protected = tweet_row[11] self.user_followers = tweet_row[12] self.user_created = tweet_row[13] self.user_verified = tweet_row[14] self.user_tweet_count = tweet_row[15] # New members self.cleaned_text = Tweet.clean_tweet(self.full_text) self.cleaned_tokens = Tweet.normalize(word_tokenize(self.cleaned_text)) @staticmethod def clean_tweet(full_text: str) -> str: """Remove meaningless data, in-place, from Tweets.""" # Said Ozcan's preprocessor cleaned = str(preprocessor.clean(full_text)) # Remove any remnant mentions cleaned = str(re.sub(r"@[A-Za-z0-9_]+", "", cleaned)) # Remove non-alpha cleaned = str(re.sub(r"[^A-Za-z ]+", "", cleaned)) return cleaned @staticmethod def normalize(tweet_tokens: List[str]) -> List[str]: """Lemmatize a Twitter post..""" cleaned_tokens = [] # Part of Speech tagging for token, tag in pos_tag(tweet_tokens): if tag.startswith("NN"): pos = "n" elif tag.startswith("VB"): pos = "v" else: pos = "a" # Lemmatize lemmatizer = WordNetLemmatizer() token = lemmatizer.lemmatize(token, pos) if len(token) > 0 and token not in string.punctuation: cleaned_tokens.append(token.lower()) return cleaned_tokens def main() -> int: """Execute standalone.""" arg_p = argparse.ArgumentParser() arg_p.add_argument("infile", help="input .CSV file") arg_p.add_argument("outfile", help="output .CSV file") args = arg_p.parse_args() logging.basicConfig( level=logging.INFO, format="[%(levelname)s | %(name)s] %(message)s", ) nltk.download("punkt") nltk.download("averaged_perceptron_tagger") nltk.download("wordnet") nltk.download("twitter_samples") nltk.download("stopwords") preprocess_tweets(args.infile, args.outfile) return 0 if __name__ == "__main__": sys.exit(main()) ```
{ "source": "jnguyen1098/quizmake", "score": 4 }
#### File: quizmake/quizmake/corpus.py ```python __revision__ = "not_defined" __docformat__ = "reStructuredText" class Corpus: """This is just the Corpus class to hold datasets. There is a function :func:`speak` that just speaks the field `amazing`. :param nothing: description I guess :param nothing2: description again I guess :type nothing: int :type nothing2: str :return: everything :rtype: int """ def __init__(self) -> None: """Initiate the Corpus class.""" self.amazing = "lmao" def speak(self) -> None: """Speak the assigned word.""" print(self.amazing) def yell(self) -> None: """Say random stuff.""" print(self.amazing + " LMaO") def corn() -> str: """Return the string corn.""" return "corn" ``` #### File: tests/smoke_tests/smoke_test.py ```python from quizmake import core def test_sanity() -> None: """Test for sanity.""" args = [ "prog", "tests/test_data/tokens/valid_tokens/", "tests/test_data/questions/valid_questions/", ] assert core.main(args) == 0 ```
{ "source": "jnguyen1111/Discord-Cat-Bot", "score": 3 }
#### File: SourceCode/bot/regularcommand.py ```python from discord.ext import commands import discord import random import json import requests import giphy_client import time class regular_commands(commands.Cog): def __init__(self , bot): self.bot_start_time = time.time() self.bot = bot # used for obtaining random gifs from giphy in the random_gif function self.giphy_instance = giphy_client.DefaultApi() self.giphy_config = {"api_key": ""} # remember to add your giphy key IMPORTANT! #8ball responses to user self.eight_ball_fortune = ["As I see it, yes.", "Ask again later.", "Better not tell you now.", "Cannot predict now.", "Concentrate and ask again.", "Don’t count on it.", "It is certain.", "It is decidedly so.", "Most likely.", "My reply is no.", "My sources say no.", "Outlook not so good.", "Outlook good.", "Reply hazy, try again.", "Signs point to yes.", "Very doubtful.", "Without a doubt.", "Yes.", "Yes – definitely.", "You may rely on it." ] def get_quote(self): quote_data = requests.get("https://zenquotes.io/api/random") quote_dictionary = json.loads(quote_data.text) quote_find = quote_dictionary[0]['q'] + " -" + quote_dictionary[0]['a'] #finds a quote and the author associated with it return quote_find # obtains quotes from the website using javascript object notation @commands.command(name="quote") async def quote(self,ctx): quote = self.get_quote() await ctx.send(quote) def get_rand_gif(self): random_gif_info = self.giphy_instance.gifs_random_get(self.giphy_config["api_key"]) #obtains information for a gif that was picked image = random_gif_info.data.image_url # obains the gifs image url return image # obtains a random gif from giphy @commands.command(name="rgif") async def random_gif(self,ctx): random_gif = self.get_rand_gif() await ctx.send(random_gif) #pings the bot latency @commands.command(name="ping") async def ping(self, ctx): await ctx.send("Latency: {} ms".format(round(self.bot.latency * 1000))) # obtains the bots time it has been on @commands.command(name="uptime") async def up_time(self, ctx): seconds = int(time.time() - self.bot_start_time) #gets the differnce of time since current time minus the time bot was on hours = seconds // 3600 seconds %= 3600 minutes = seconds // 60 seconds %= 60 await ctx.send("Bot Uptime: {}".format("%d:%02d:%02d" % (hours,minutes,seconds))) #sends information to the user of the person who created the bot @commands.command(name="creator") async def about(self,ctx): embed_color = discord.Colour.blue() embed_author = discord.Embed(title="Creator", url="https://github.com/jnguyen1111" , color= embed_color) embed_author.set_author(name="jnguyen1111",icon_url="https://avatars.githubusercontent.com/u/78591393?v=4") embed_author.set_thumbnail(url="https://miro.medium.com/max/719/0*9f5uMrKMjLbzEf7q.png") await ctx.send(embed = embed_author) #fortune teller to user's question @commands.command(name="8ball") async def magic_eight_ball(self, ctx, *, question): user = "<@!" + str(ctx.message.author.id) + ">" #obtain user's name who requested command await ctx.send(user + """ Question: {} Answer: {}""".format(question, random.choice(self.eight_ball_fortune))) #allows class defined in beginning to be added as an extension to commands def setup(bot): bot.add_cog(regular_commands(bot)) ```
{ "source": "jnguyen1192/BotInstagram", "score": 3 }
#### File: jnguyen1192/BotInstagram/main.py ```python @source https://instapy.org/ # Press Maj+F10 to execute it or replace it with your code. # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. from time import sleep from selenium import webdriver from webdriver_manager.chrome import ChromeDriverManager import random def get_rand_number(min=5, max=10): return random.randint(min, max) mail="<EMAIL>" mdp="...31ynnHojoj" browser = webdriver.Chrome(ChromeDriverManager().install()) browser.implicitly_wait(get_rand_number()) browser.get('https://www.instagram.com/') browser.implicitly_wait(get_rand_number()) bot = browser.find_element_by_xpath("//button[text()='Accepter']") bot.click() browser.implicitly_wait(get_rand_number()) # button connect bot = browser.find_element_by_xpath("//span[text()='Se connecter avec Facebook']") bot.click() browser.implicitly_wait(get_rand_number()) # button connect bot = browser.find_element_by_xpath("//button[@title='Tout accepter']") bot.click() browser.implicitly_wait(get_rand_number()) #input mail bot = browser.find_element_by_xpath('//input[@id="email"]') bot.send_keys(mail) browser.implicitly_wait(get_rand_number()) #input pass bot = browser.find_element_by_xpath("//input[@name='pass']") bot.send_keys(mdp[::-1]) browser.implicitly_wait(get_rand_number()) # button connect bot = browser.find_element_by_xpath("//button[@name='login']") bot.click() print("sleep before insta plus tard") sleep(20) # button activer bot = browser.find_element_by_xpath("//button[text()='Plus tard']") bot.click() sleep(5) # button activer bot = browser.find_element_by_xpath("//buttont[text()='Activer']") bot.click() browser.implicitly_wait(get_rand_number()) sleep(40000) browser.close() def print_hi(name): # Use a breakpoint in the code line below to debug your script. print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint. # Press the green button in the gutter to run the script. if __name__ == '__main__': print_hi('PyCharm') # See PyCharm help at https://www.jetbrains.com/help/pycharm/ ```
{ "source": "jnguyen1192/FunnySR", "score": 3 }
#### File: jnguyen1192/FunnySR/pdf_reader.py ```python from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter # resource manager and interpreter from pdfminer.converter import TextConverter from pdfminer.layout import LAParams from pdfminer.pdfpage import PDFPage from io import StringIO # read write string buffer import pyttsx3 # text to speech import requests # for web request (post/get) pdf_url = "http://codex.cs.yale.edu/avi/db-book/db4/slide-dir/ch1-2.pdf" # link of the pdf file # file name with folder path where the downloaded file will be saved. # in windows directory/folder path is a must, however, not in linux. file_name = "saved_pdf.pdf" reading_speed = 100 # words per minute voice_id = 3 # 1 -female voice, 0 -male voice eng = pyttsx3.init() # initialize speech engine /API def pdf_to_text(file_name): """Converts pdf to text. Argument file name""" resourceManager = PDFResourceManager() # to store shared resources such as fonts or images retstr = StringIO() # reads and writes a string buffer device = TextConverter(resourceManager, retstr, laparams=LAParams()) # Create a PDF device object. f = open(file_name, 'rb') # open the file in read and binary mode interpreter = PDFPageInterpreter(resourceManager, device) # Interpreter to process the page contents # pagenos = set() #store in a set pagenos, for page in PDFPage.get_pages(f, maxpages=0, caching=True, check_extractable=True): interpreter.process_page(page) f.close() # close the file after reading device.close() # close the device text = retstr.getvalue() # extract text retstr.close() return text # return the extracted text def read_out(text, voice, speed): """Read out the text. Argument text-to read, voice-0/1, speed-words per minute""" voices = eng.getProperty('voices') # get the voices eng.setProperty('voice', voices[voice].id) # select voice eng.say(text) # convert the text into speech eng.setProperty('rate', speed) # words per minute eng.runAndWait() # run def audio_book(url): """1.downloads a pdf file from given url 2.converts into text 3.reads it out """ try: # try getting response = requests.get(url, stream=True) # get web request except: # in case of an error, close the program print("There might be an issue with the internet or url.") return if response.status_code != 200: # 200 means ok, if not close the program print("Get request fail, check internet connection or provide a valid url") return with open(file_name, "wb") as pdf: # open in write and binary mode for chunk in response.iter_content(chunk_size=1024): if chunk: # writing one chunk at a time to pdf file pdf.write(chunk) text = pdf_to_text(file_name=file_name) # convert pdf to text read_out(text=text, voice=voice_id, speed=reading_speed) # read the text """run""" print("Press CTRL+C to stop!") audio_book(url=pdf_url) ```
{ "source": "jnguyen1192/SIA", "score": 2 }
#### File: jnguyen1192/SIA/SAIDaemon.py ```python import os import docker import socket import logging from docker.utils import kwargs_from_env class SAIDaemon: """ The appearence of the SIA """ def build(self, path_dockerfile=''): if path_dockerfile == '': path_dockerfile = os.getcwd() client = None api_client = None try: client = docker.from_env() # TODO only if images changes #img = client.images.build(path=path_dockerfile, tag="sai_daemon") kwargs = kwargs_from_env() # @source : https://github.com/qazbnm456/tsaotun/blob/master/tsaotun/lib/docker_client.py api_client = docker.APIClient(**kwargs) print(api_client.version()) print(os.getcwd()[2:]) print("Docker run ---------->") #/Users/johdu/PycharmProjects/SAI/test # run container # TODO stop current c_sai_daemon for c in client.containers.list(): if c.__getattribute__("name") == "c_sai_daemon": api_client.kill("c_sai_daemon") # TODO rm current c_sai_daemon for c in client.containers.list(all=True): if c.__getattribute__("name") == "c_sai_daemon": api_client.remove_container("c_sai_daemon") # @source : http://www.geo.mtu.edu/geoschem/docs/putty_install.html # @source : https://github.com/asweigart/pyautogui/issues/124 # https://github.com/niranjanshr13/Automate_Linux_with_GAssistant probably use or not # TODO test if the ip is the real ip IPAddr = socket.gethostbyname_ex(socket.gethostname())[-1][-1] # socket.gethostbyname(socket.gethostname()) #print("other ", socket.gethostbyname_ex(socket.gethostname())[-1][-1]) #print(socket.gethostname(), " with 99, it's a docker tools ip") print("Is is the real ip ?", IPAddr) #environment = {"DISPLAY": IPAddr + ':0.0'} environment = {"DISPLAY": IPAddr + ':0.0'} volumes = {"/c/Users/johdu/PycharmProjects/SAI": {'bind': '/code/', 'mode': 'rw'} } # volume : src:dest print(client.containers.run(image="sai_daemon", name="c_sai_daemon", volumes=volumes, environment=environment).decode('utf8')) # create container """ resp = api_client.create_container(image="sai_daemon", name="container_sai_daemon", host_config=api_client.create_host_config(binds=[ '/code/:' + os.getcwd()[2:], ])) container = client.containers.get(resp['Id']) container.start() """ client.close() api_client.close() #print(client.containers.run("sai_daemon").decode('utf8')) #print(img) except Exception as e: logging.error("Build function don't work because " + str(e)) client.close() api_client.close() return -1 # TODO the daemon has been correctly build return 0 def hello_world(self): # TODO the daemon says hello world return "hello world" ``` #### File: jnguyen1192/SIA/SpeedTest.py ```python import time import unittest class SpeedTest(unittest.TestCase): def test_speed_once(self, func, *args): beg = time.time() func(*args) end = time.time() return end - beg def test_speed_26_screenshot_in_one_second(self): """ The goal is to mesurate how maximum can we take screen in less than a second """ import threading import time from mss import mss import os import SAIEyes saie = SAIEyes.SAIEyes(eyes_dir="Speedtest", ctm_dir="Speedtest") def loop26(): import d3dshot import os d = d3dshot.create() for i in range(26): name = "gross_"+str(i) print(name) # TODO find the best way # first way 9.029s => 2 img/s as a jpg 2.467s => 10 img/s #cs = saie.get_current_screen(name+".jpg") #saie.save_image_court_term_memory(cs, name + ".jpg") # second way 5.343s => 4 img/s as a jpg 2.68s => 10 img/s #import pyscreeze #pyscreeze._screenshot_win32(name+".png") # third way 4.658s => 5 img/s #with mss() as sct: # print(sct.shot()) # rename monitor-1.png to name+".png"import os # os.replace('monitor-1.png', name+".png") #sct.save() # fourth way 4.909 => 5 img/s as a jpg 15 img/s d.screenshot_to_disk(directory=os.path.join("Speedtest", "CourtTermMemory"), file_name=name + ".jpg") my_thread = threading.Thread(target=loop26) my_thread.start() my_thread.join() def test_take_img_during_10_seconds(self): import time import d3dshot import os from datetime import datetime, timezone d = d3dshot.create() start = time.time() end = 0 i = 0 print("hello") # without current date 180 imgs => 18 img/s """while end - start < 10: name = "gross_"+str(i) d.screenshot_to_disk(directory=os.path.join("Speedtest", "CourtTermMemory"), file_name=name + ".jpg") end = time.time() i += 1 """ # with current date and deleting on the directory 169 imgs => 16.9 imgs/s # with current date on the directory 183 imgs => 18 imgs/s while end - start < 10: d.screenshot_to_disk(directory=os.path.join("Speedtest", "CourtTermMemory"), file_name=datetime.today().strftime("%Y%m%d%H%M%S%f") + ".jpg") end = time.time() i += 1 # TODO in one hour we get 18 Go of images to clean and we need to know how much we can extract different shape during this hour def test_take_img_during_one_hour(self): """ Save one hour images """ import time import d3dshot import os from datetime import datetime d = d3dshot.create() start = time.time() end = 0 i = 0 print("hello") # TODO How many images has been created ? while end - start < 3600: d.screenshot_to_disk(directory=os.path.join("Speedtest", "OneHour"), file_name=datetime.today().strftime("%Y%m%d%H%M%S%f") + ".jpg") end = time.time() i += 1 def test_only_extract_shape_of_one_hour_images(self): """ Extract the shape to one hour images to know what is the volume of the shape directory """ # TODO for each images in directory Speedtest/OneHour # TODO use class Shape to extract all the shape # TODO Get the size of the directory Speedtest/One_Hour_Shapes # TODO Optionnal 1: remove the duplicate shape # TODO Optionnal 2: remove the duplicate shape using a threshold def test_auto_click_and_move_during_taking_image_during_10_seconds(self): """ This will test if SAI correctly save the inputs it use to do actions """ # TODO Get a random action # TODO Start the saving in another thread during 10 seconds # TODO Do the random action during at least 3 seconds during 5 seconds # TODO Check if the database save the action # TODO Check if the directory contains images def test_auto_click_and_move_during_taking_image_during_1_minute(self): """ This will test if SAI correctly save the inputs it use to do actions """ # Implement the function create_db # Launch the db with SAIBrain # TODO Get a random action # Create a function random_point in the window in my_tools # TODO Start the saving in another thread during 60 seconds # Create a function Open in SAIEyes that will launch a thread that will save the images # TODO Do the random action during at least 3 seconds during each 10 seconds # Use function move_mouse_to or left_click to do a random action and stock the action with the times # TODO Check if the database save the action # Select From action # TODO Check if the directory contains images # os.path.isfile() using Select From action, images def test_auto_click_and_move_during_taking_image_during_5_minute(self): """ This will test if SAI correctly save the inputs it use to do actions """ # TODO Get a random action # TODO Start the saving in another thread during 5 minutes # TODO Do the random action during at least 3 seconds during each 30 seconds # TODO Check if the database save the action # TODO Check if the directory contains images def test_auto_click_and_move_during_taking_image_during_10_minute(self): """ This will test if SAI correctly save the inputs it use to do actions """ # TODO Get a random action # TODO Start the saving in another thread during 10 minutes # TODO Do the random action during at least 3 seconds during each 45 seconds # TODO Check if the database save the action # TODO Check if the directory contains images def test_auto_click_and_move_during_taking_image_during_30_minute(self): """ This will test if SAI correctly save the inputs it use to do actions """ # TODO Get a random action # TODO Start the saving in another thread during 30 minutes # TODO Do the random action during at least 3 seconds during each 45 seconds # TODO Check if the database save the action # TODO Check if the directory contains images def test_synthetisis_images_from_30_minutes_autoclick(self): """ This will test if SAI correctly reduce images """ # TODO Reduce the files as the shape # TODO as a new representation of the images less heavy def test_auto_click_and_move_during_taking_image_during_60_minute_with_syntethisis(self): """ This will test if SAI correctly save the inputs it use to do actions """ # TODO Get a random action # TODO Start the saving in another thread during 30 minutes # TODO Do the random action during at least 3 seconds during each 45 seconds # TODO Check if the database save the action # TODO Check if the directory contains images # TODO Then reduce the files as the shape # TODO as a new representation of the images less heavy def test_speed(self, func, *args, nb_test=10): sum = 0 for i in range(nb_test): sum += self.test_speed_once(func, *args) return sum/nb_test if __name__ == '__main__': unittest.main() ```
{ "source": "j-nguyen/FractalBot", "score": 3 }
#### File: FractalBot/cogs/tags.py ```python from discord.ext import commands from .utils import db from .utils import models from .utils import perms from sqlalchemy.orm import sessionmaker class Tags: """ Tag commands, to showcase. """ def __init__(self, bot): self.bot = bot # Set-up the engine here. self.engine = db.engine # Create a session self.Session = sessionmaker(bind=self.engine) # We will need a command to insert a new tag into the db. @commands.group(pass_context=True) @perms.mod_or_permissions(kick_members=True) async def tag(self, ctx): if ctx.invoked_subcommand is None: await self.bot.say('Invalid tag command. $tag <add/remove/list/show>') @tag.command() @perms.mod_or_permissions(kick_members=True) async def add(self, name: str, *, desc: str): """ Adds a tag to the database. """ # Create a tag object tag = models.Tag(name=name, description=desc) sess = self.Session() try: sess.add(tag) sess.commit() await self.bot.say('Added tag command.') except Exception as e: await self.bot.say('Cannot add. Reasons: Duplicate entry, or Invalid response.') finally: sess.close() @tag.command() @perms.mod_or_permissions(kick_members=True) async def remove(self, name: str): """ Removes the tag from the database """ # Removes the tag sess = self.Session() tag = sess.query(models.Tag).filter(models.Tag.name == name).first() if tag != None: sess.delete(tag) sess.commit() await self.bot.say('Tag *{}* deleted.'.format(name)) else: await self.bot.say('Cant find the specified tag!') @tag.command() async def list(self): """ List all tags available in the database. """ # Shows the list of tags sess = self.Session() tags = sess.query(models.Tag).all() if tags is None: await self.bot.say('No tags.') else: tagsName = [tag.name for tag in tags] await self.bot.say('Tags: ' + ', '.join(tagsName)) @tag.command() async def show(self, name: str): """ output a tag, based on the name given """ # display the tag sess = self.Session() tag = sess.query(models.Tag).filter(models.Tag.name == name).first() if tag is None: await self.bot.say('Tag cannot be found.') else: await self.bot.say('{}'.format(tag.description)) # Helps us add to the extension def setup(bot): bot.add_cog(Tags(bot)) ``` #### File: FractalBot/cogs/user.py ```python from discord.ext import commands from .utils import db from .utils import models from .utils import perms from sqlalchemy.orm import sessionmaker import discord import datetime class User: """ User related commands """ def __init__(self, bot): self.bot = bot # Set-up the engine here. self.engine = db.engine # Create a session self.Session = sessionmaker(bind=self.engine) @commands.group(pass_context=True) async def topic(self, ctx): if ctx.invoked_subcommand is None: await self.bot.say('Invalid command: $tag <list/join/leave>') # Lets the user join a specific role which opens up a channel for them. @topic.command(pass_context=True) async def join(self, ctx, name: str = None): """ Joins a specific topic, given the topic name """ member = ctx.message.author roles = ctx.message.server.roles if name is None: await self.bot.say('Are you sure you\'ve inputted something?') else: db = self.Session() topic = db.query(models.Topic).filter(models.Topic.name == name).first() db.close() if topic: role = discord.utils.find(lambda r: r.id == str(topic.role_id), roles) try: await self.bot.add_roles(member, role) await self.bot.say('Joined {}'.format(topic.name)) except discord.Forbidden: await self.bot.say('Cannot add! Permissions wrong?') except discord.HTTPException: await self.bot.say('Something happened! Please try again') else: await self.bot.say('Could not find topic channel.') @topic.command() async def list(self): """ List all the topics available. """ db = self.Session() topics = db.query(models.Topic).all() db.close() await self.bot.say('Topics: {}'.format(','.join([topic.name for topic in topics]))) @topic.command() @perms.mod_or_permissions(kick_members=True) async def add(self, name: str, role: discord.Role = None): """ Adds a topic """ if role is None: await self.bot.say('Invalid role.') else: try: db = self.Session() topic = models.Topic(name=name, role_id=role.id) db.add(topic) db.commit() await self.bot.say('Added topic.') except Exception as e: print (e) @topic.command(pass_context=True) async def leave(self, ctx, name: str = None): """ Leave a topic that you are from. """ if name is None: await self.bot.say('Invalid topic.') else: member = ctx.message.author roles = ctx.message.author.roles db = self.Session() topic = db.query(models.Topic).filter(models.Topic.name == name).first() db.close() if topic: try: role = discord.utils.find(lambda r: r.id == str(topic.role_id), roles) await self.bot.remove_roles(member, role) await self.bot.say('Left {} topic.'.format(topic.name)) except discord.Forbidden: await self.bot.say('Something went wrong') except discord.HTTPException: await self.bot.say('Leaving role failed') @topic.command() @perms.mod_or_permissions(kick_members=True) async def remove(self, name: str = None): """ Removes a topic from the database. """ if name is None: await self.bot.say('Invalid topic') else: db = self.Session() try: topic = db.query(models.Topic).filter(models.Topic.name == name).first() db.delete(topic) db.commit() await self.bot.say('Deleted {} topic'.format(topic.name)) except Exception as e: print (e) finally: db.close() # Helps us add to the extension def setup(bot): bot.add_cog(User(bot)) ``` #### File: cogs/utils/db.py ```python from sqlalchemy import create_engine import json # TODO: Fix usage of global engine = None def loadDB(user, password, hostname, dbname): global engine engine = create_engine('postgresql+psycopg2://{}:{}@{}/{}'.format(user, password, hostname, dbname)) ``` #### File: cogs/utils/perms.py ```python from discord.ext import commands import discord.utils # Checks permission based on the attribute given. def check_permissions(ctx, perms): msg = ctx.message ch = msg.channel author = msg.author resolved = ch.permissions_for(author) return all(getattr(resolved, name, None) == value for name, value in perms.items()) # Checks permission based on the role. def role_or_permissions(ctx, check, **perms): if check_permissions(ctx, perms): return True ch = ctx.message.channel author = ctx.message.author if ch.is_private: return False # can't have roles in PMs role = discord.utils.find(check, author.roles) return role is not None # Checks permission for Moderators def mod_or_permissions(**perms): def predicate(ctx): return role_or_permissions(ctx, lambda r: r.name == 'Staff', **perms) # Adds to the command import check return commands.check(predicate) ``` #### File: j-nguyen/FractalBot/setup.py ```python import json from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from cogs.utils import models def loadDbConfig(): with open('postgresql.json') as f: return json.load(f) def main(): db = loadDbConfig() Base = declarative_base() print ('Connecting to DB') engine = create_engine('postgresql+psycopg2://{}:{}@{}/{}'.format(db['user'], db['password'], db['hostname'], db['database']), echo=True) models.Tag.__table__.create(engine, checkfirst=True) models.User.__table__.create(engine, checkfirst=True) models.Rank.__table__.create(engine, checkfirst=True) models.Role.__table__.create(engine, checkfirst=True) models.Level.__table__.create(engine, checkfirst=True) models.Topic.__table__.create(engine, checkfirst=True) Session = sessionmaker(bind=engine) # Try doing an initial insert for levels (up to 50) ranks = [] for i in range(50): xp = 100 * int(round(pow(i, 1.2))) rank = models.Rank(xp=xp) ranks.append(rank) # Try addding the ranks db = Session() if db.query(models.Rank).count() == 0: try: db.add_all(ranks) db.commit() except Exception as e: print (e) if __name__ == '__main__': main() ```
{ "source": "jnha/blipwave", "score": 4 }
#### File: jnha/blipwave/sampling.py ```python import numpy as np from blipwave import RATE def sample(wave, length, rate=RATE): """ Samples a waveform Args: wave: the waveform length: the length of time to sample in seconds rate: the sample rate in samples/second returns: An array of samples of the waveform """ clip = np.linspace(0, length, int(length*rate)) return wave(clip) ```
{ "source": "jnhansen/esahub", "score": 2 }
#### File: esahub/esahub/config.py ```python import yaml import os SETTINGS_FILES = [ os.path.join(os.path.dirname(__file__), 'config.yaml'), os.path.expanduser('~/.esahub.conf') ] CONFIG = {} # # Settings files in order of increasing precedence # def load(fname): try: loader = yaml.FullLoader except AttributeError: loader = yaml.Loader fname = os.path.expanduser(fname) if os.path.isfile(fname): with open(fname, 'r') as fid: CONFIG.update(yaml.load(fid, Loader=loader)) CONFIG['GENERAL']['DATA_DIR'] = \ os.path.expanduser(CONFIG['GENERAL']['DATA_DIR']) for f in SETTINGS_FILES: load(f) ``` #### File: esahub/esahub/scihub.py ```python import os import aiohttp import asyncio import lxml.etree as ET from datetime import datetime, timedelta import pytz import re from .config import CONFIG from . import utils, geo, checksum, tty from urllib.parse import urlparse, parse_qs, urlencode from collections import OrderedDict import hashlib import logging logger = logging.getLogger('esahub') logger.disabled = True CHUNK = 64 * 1024 PREFIXES = { 'os': 'http://a9.com/-/spec/opensearch/1.1/', 'opensearch': 'http://a9.com/-/spec/opensearch/1.1/', 'doc': 'http://www.w3.org/2005/Atom', 'gml': 'http://www.opengis.net/gml' } DOWNLOAD_SUFFIX = '.download' DOWNLOAD_URL_PATTERN = \ "{host}/odata/v1/Products('{uuid}')/$value" CHECKSUM_URL_PATTERN = \ "{host}/odata/v1/Products('{uuid}')/Checksum/Value/$value" PREVIEW_URL_PATTERN = \ "{host}/odata/v1/Products('{uuid}')/Products('Quicklook')/$value" DATETIME_FMT = '%Y-%m-%dT%H:%M:%S.000Z' # ----------------------------------------------------------------------------- # HTTP CONNECTION METHODS # This is all the async stuff # ----------------------------------------------------------------------------- class NotFoundError(Exception): pass class SessionManager(): """ Manage active HTTP sessions. Keep different sessions for queries and downloads, because concurrent downloads are limited, while queries are not. """ def __init__(self, concurrent=None): self._concurrent = concurrent self._sessions = {} def __getitem__(self, server): if server not in self._sessions: cfg = CONFIG['SERVERS'][server] auth = aiohttp.BasicAuth(login=cfg['user'], password=cfg['password']) if self._concurrent is None: concurrent = cfg['downloads'] else: concurrent = self._concurrent connector = aiohttp.TCPConnector(limit=concurrent) self._sessions[server] = aiohttp.ClientSession( auth=auth, connector=connector) return self._sessions[server] def __del__(self): # Close all active sessions loop = asyncio.get_event_loop() closer_tasks = [] for server, session in self._sessions.items(): closer_tasks.append(session.close()) if len(closer_tasks) > 0: loop.run_until_complete(asyncio.wait(closer_tasks)) QUERY = SessionManager(concurrent=CONFIG['GENERAL']['N_SCIHUB_QUERIES']) DOWNLOAD = SessionManager() def block(fn, *args, **kwargs): """Run an async function and block.""" task = fn(*args, **kwargs) loop = asyncio.get_event_loop() result = loop.run_until_complete(task) return result def get_response(url): async def _resp(url): server = _get_server_from_url(url) async with QUERY[server].get(url) as resp: return resp return block(_resp, url) def resolve(url, server=None): return block(_resolve, url, server=server) async def _resolve(url, server=None): if server is None: server = _get_server_from_url(url) async with QUERY[server].get(url) as response: return await response.text() async def get_total_results(url): response = await _resolve(url) xml = response.encode('utf-8') root = ET.fromstring(xml) try: total_results = int(root.find('os:totalResults', PREFIXES).text) except TypeError: total_results = 0 except AttributeError as e: raise AttributeError("Could not extract total results from URL " "{}: {}".format(url, e)) return total_results async def _ping_single(server): cfg = CONFIG['SERVERS'][server] url = '{host}/search?q=*:*'.format(host=cfg['host']) async with QUERY[server].get(url) as response: return (server, response.status) async def _download(url, destination, return_md5=False, cont=True): """Downloads a file from the remote server into the specified destination. Parameters ---------- url : str The source URL. destination : str The local target file path. cont : bool, optional Continue partial downloads (default: True). Returns ------- bool True if successful, False otherwise. """ # # Create directory. # path, file_name = os.path.split(destination) os.makedirs(path, exist_ok=True) pbar_key = file_name.rstrip(DOWNLOAD_SUFFIX) if return_md5: hash_md5 = hashlib.md5() headers = {} if cont and os.path.isfile(destination): local_size = os.path.getsize(destination) headers['Range'] = 'bytes={}-'.format(local_size) with open(destination, 'rb') as f: hash_md5.update(f.read()) tty.screen.status(progress=local_size) else: local_size = 0 server = _get_server_from_url(url) async with DOWNLOAD[server].get(url, timeout=None, headers=headers) \ as response: size = int(response.headers['Content-Length']) # Create progress bar only now: pbar = tty.screen[pbar_key] pbar.n = local_size pbar.total = size + local_size pbar.refresh() mode = 'ab' if cont else 'wb' with open(destination, mode) as f: async for data in response.content.iter_chunked(CHUNK): if return_md5: hash_md5.update(data) f.write(data) progress = len(data) tty.screen.status(progress=progress) pbar.update(len(data)) # if pbar is not None: # pbar.close() if return_md5: return (True, hash_md5.hexdigest().lower()) else: return True # ----------------------------------------------------------------------------- # XML PARSING # ----------------------------------------------------------------------------- def parse_page(xml): file_list = [] try: root = ET.fromstring(xml) # try: # total_results = int(root.find('os:totalResults', PREFIXES).text) # except TypeError: # total_results = 0 for entry in root.findall('doc:entry', PREFIXES): filename = entry.find("./doc:str[@name='identifier']", PREFIXES).text ingestiondate = utils.to_date( entry.find("./doc:date[@name='ingestiondate']", PREFIXES).text, output='date') try: footprint_tag = entry.find("./doc:str[@name='gmlfootprint']", PREFIXES).text match = re.search('<gml:coordinates>(.*)</gml:coordinates>', footprint_tag) coords = geo.gml_to_polygon(match.groups()[0]) except AttributeError: coords = None filesize = utils.h2b(entry.find("./doc:str[@name='size']", PREFIXES).text) preview_url = entry.find("./doc:link[@rel='icon']", PREFIXES).attrib['href'] try: rel_orbit = int(entry.find( "doc:int[@name='relativeorbitnumber']", PREFIXES).text) except AttributeError: rel_orbit = None try: orbit_dir = entry.find("doc:str[@name='orbitdirection']", PREFIXES).text.upper() except AttributeError: orbit_dir = None file_dict = { 'title': entry.find('doc:title', PREFIXES).text, 'url': entry.find('doc:link', PREFIXES).attrib['href'], 'preview': preview_url, 'uuid': entry.find('doc:id', PREFIXES).text, 'filename': filename, 'size': filesize, 'ingestiondate': ingestiondate, 'coords': coords, 'orbit_direction': orbit_dir, 'rel_orbit': rel_orbit } file_dict['host'] = _get_host_from_url(file_dict['url']) file_list.append(file_dict) except ET.XMLSyntaxError: # not valid XML file_list = [] return file_list async def _files_from_url(url, verbose=False): xml = await _resolve(url) result = parse_page(xml.encode('utf-8')) if verbose: tty.screen.status(progress=len(result)) return result async def _get_file_list_from_url(url, limit=None, verbose=False): # Parse first page to get total number of results. total_results = await get_total_results(url) host = urlparse(url).netloc if verbose and total_results > 0: tty.screen.status(desc='Querying {host}'.format(host=host), total=total_results, mode='bar') if limit is None: total = total_results else: total = min(limit, total_results) urls = [url] + [u for u in _generate_next_url(url, total=total)] tasks = [_files_from_url(u, verbose=verbose) for u in urls] results = await asyncio.gather(*tasks) result = utils.flatten(results) return result # ----------------------------------------------------------------------------- # QUERY BUILDING # ----------------------------------------------------------------------------- def _parse_time_parameter(value): # Default ingestiontime query parameters start = '1970-01-01T00:00:00.000Z' end = 'NOW' DATE_FMT = '%Y-%m-%dT00:00:00.000Z' if value == 'today': start = datetime.strftime(datetime.now(pytz.utc), DATE_FMT) elif value == 'yesterday': start = datetime.strftime(datetime.now(pytz.utc) - timedelta(1), DATE_FMT) end = datetime.strftime(datetime.now(pytz.utc), DATE_FMT) elif value == 'midnight': end = datetime.strftime(datetime.now(pytz.utc), DATE_FMT) elif value == '24h': start = datetime.strftime(datetime.now(pytz.utc) - timedelta(1), DATETIME_FMT) end = datetime.strftime(datetime.now(pytz.utc), DATETIME_FMT) else: parsed = utils.parse_datetime(value) if parsed[0] is not None: start = datetime.strftime(parsed[0], DATETIME_FMT) if parsed[1] is not None: end = datetime.strftime(parsed[1], DATETIME_FMT) return start, end def _build_query(query={}): """ Builds and returns the query URL given the command line input parameters. Parameters ---------- query : list, optional A list of additional custom query elements submitted to SciHub. The queries will be concatenated with ampersands (&). """ query_list = [] sort_string = '' # Default ingestiontime query parameters start = '1970-01-01T00:00:00.000Z' end = 'NOW' # Geospatial query list geo_query_list = [] for key, val in query.items(): if key == 'mission': # Build mission selection query: query_list.append('platformname:{0}'.format(query['mission'])) elif key == 'satellite': # Build satellite selection query: query_list.append('identifier:{0}*'.format(query['satellite'])) elif key == 'time': start, end = _parse_time_parameter(val) elif key == 'geo': # Build geospatial query: if type(val) is not list: val = [val] for item in val: geo_query_list.append( 'footprint:"Intersects({0})"'.format(item)) elif key == 'location': if type(val) is not list: val = [val] for loc in val: if loc in CONFIG['LOCATIONS']: geo_query_list.append('footprint:"Intersects({0})"'.format( CONFIG['LOCATIONS'][loc])) else: logger.error( '{0} {1}'.format(tty.error('Location not found:'), loc) ) elif key == 'type': query_list.append('producttype:{}'.format(query['type'])) elif key == 'orbit': if val.upper() in ['ASC', 'ASCENDING']: orbit = 'ASCENDING' elif val.upper() in ['DESC', 'DESCENDING']: orbit = 'DESCENDING' else: raise ValueError("Invalid value for `orbit`: '{}'" .format(val)) query_list.append('orbitdirection:{}'.format(orbit)) elif key == 'id': query_list.append('identifier:{}'.format(query['id'])) elif key == 'query': query_list.append('{}'.format(query['query'])) elif key == 'sort': sort_string = '&orderby={} {}'.format(*query['sort']) # Not a special keyword. Pass directly to SciHub else: query_list.append('{}:{}'.format(key, val)) # Build ingestiondate query: query_list.append('ingestiondate:[{0} TO {1}]'.format(start, end)) # Build geospatial query: if len(geo_query_list): query_list.append( '({})'.format(' OR '.join(geo_query_list)) ) # Generate full query string if len(query_list) == 0: query_list.append('*:*') query_string = ' AND '.join(query_list) # # Example: 'https://scihub.copernicus.eu/s3/search? # q=(footprint:"Intersects(POLYGON((-25.100 46.800, # -5.250 46.800,-5.250 57.400,-25.100 57.400,-25.100 46.800)))") # &rows=25&start=0' # query_url = 'q={q}&start=0&rows={rows}{sort}'.format( q=query_string, rows=CONFIG['GENERAL']['ENTRIES'], sort=sort_string ) return query_url def _build_url(query, server): if type(query) is dict: query_string = _build_query(query) else: query_string = query return '{url}/search?{q}'.format( url=CONFIG['SERVERS'][server]['host'], q=query_string ) # ----------------------------------------------------------------------------- # UTILITY FUNCTIONS # ----------------------------------------------------------------------------- def _generate_next_url(url, total=None): parsed_url = urlparse(url) q_params = parse_qs(parsed_url.query) q_params = {k: v[0] if len(v) == 1 else v for k, v in q_params.items()} if 'rows' in q_params: q_params['rows'] = int(q_params['rows']) else: q_params['rows'] = 10 if 'start' in q_params: q_params['start'] = int(q_params['start']) else: q_params['start'] = 0 if total is not None: last_start = total - q_params['rows'] q_params['rows'] = min(q_params['rows'], total) while total is None or q_params['start'] < last_start: q_params['start'] += q_params['rows'] parsed_url = parsed_url._replace(query=urlencode(q_params, doseq=True)) yield parsed_url.geturl() def _get_available_servers(): servers = [server for server, conf in CONFIG['SERVERS'].items() if len(conf['user']) > 0 and len(conf['password']) > 0] return servers def _get_server_from_url(url): if 'server' in CONFIG['GENERAL']['QUERY']: return CONFIG['GENERAL']['QUERY']['server'] for servername, cfg in CONFIG['SERVERS'].items(): if cfg['host'] in url: return servername raise Exception("Could not determine server for {url}!".format(url=url)) def _get_host_from_url(url): p = urlparse(url) host = '{}://{}/{}'.format( p.scheme, p.netloc, p.path.strip('/').split('/')[0]) return host def _auto_detect_server_from_query(query, available_only=False): servers = None if 'satellite' in query and query['satellite'] in CONFIG['SATELLITES']: servers = CONFIG['SATELLITES'][query['satellite']]['source'] if 'identifier' in query: sat = query['identifier'][:3] if sat in CONFIG['SATELLITES']: servers = CONFIG['SATELLITES'][sat]['source'] if 'mission' in query: sats = utils.select(CONFIG['SATELLITES'], platform=query['mission']) if len(sats) > 0: ll = [source for sat in sats.values() for source in sat['source']] servers = list(OrderedDict.fromkeys(ll)) # # If the server couldn't be determined from the query, return a list of # all servers. # if servers is None: servers = list(CONFIG['SERVERS'].keys()) if available_only: servers = [server for server in servers if server in _get_available_servers()] return servers async def _uuid_from_identifier(identifier): identifier = os.path.splitext(os.path.split(identifier)[1])[0] results = await _search({'identifier': identifier+'*'}) if len(results) == 0: raise NotFoundError('Product not found: {}'.format(identifier)) return results[0]['uuid'] async def _host_and_uuid_from_identifier(identifier): identifier = os.path.splitext(os.path.split(identifier)[1])[0] results = await _search({'identifier': identifier+'*'}) if len(results) == 0: raise NotFoundError('Product not found: {}'.format(identifier)) return (results[0]['host'], results[0]['uuid']) def _host_from_uuid(uuid): # # IMPLEMENT THIS # return None async def _download_url_from_identifier(identifier): host, uuid = await _host_and_uuid_from_identifier(identifier) return _download_url_from_uuid(uuid, host=host) async def _checksum_url_from_identifier(identifier): host, uuid = await _host_and_uuid_from_identifier(identifier) return _checksum_url_from_uuid(uuid, host=host) async def _preview_url_from_identifier(identifier): host, uuid = await _host_and_uuid_from_identifier(identifier) return _preview_url_from_uuid(uuid, host=host) def _download_url_from_uuid(uuid, host=None): if host is None: host = _host_from_uuid(uuid) return DOWNLOAD_URL_PATTERN.format(host=host, uuid=uuid) def _checksum_url_from_uuid(uuid, host=None): if host is None: host = _host_from_uuid(uuid) return CHECKSUM_URL_PATTERN.format(host=host, uuid=uuid) def _preview_url_from_uuid(uuid, host=None): if host is None: host = _host_from_uuid(uuid) return PREVIEW_URL_PATTERN.format(host=host, uuid=uuid) # ----------------------------------------------------------------------------- # PUBLIC API METHODS # ----------------------------------------------------------------------------- # def ping(server=None): # results = {} # if server is not None: # servers = [server] # else: # servers = list(CONFIG['SERVERS'].keys()) # pool = multiprocessing.Pool(processes=len(servers)) # results = pool.map_async(_ping_single, servers) # # pool.close() # # pool.join() # # for servername,server in servers.items(): # while not results.ready(): # time.sleep(1) # return results.get() def search(*args, **kwargs): return block(_search, *args, **kwargs) async def _search(query={}, server='auto', limit=None, verbose=False, **kwargs): """ Search SciHub for satellite products. Parameters ---------- query : dict, optional Query parameters as dictionary. May contain key 'server' (see below). server : str, optional If server is 'all', search all saved SciHub servers. If server is 'auto', attempt to auto-detect the server from the query. Otherwise, use the specified server (as configured in CONFIG.SERVERS) (default: 'auto') limit : int, optional The maximum number of results to return. kwargs : dict, optional The query parameters can also be passed as keyword arguments. Returns ------- list of dict A list of dictionaries representing the found search results. """ # # Search in each server. # query.update(kwargs) if 'server' in query: server = query.pop('server') if server == 'all': servers = _get_available_servers() elif server == 'auto': servers = _auto_detect_server_from_query(query, available_only=True) else: servers = [server] if servers is None: servers = [] query_string = _build_query(query) tasks = [] for servername in servers: server = CONFIG['SERVERS'][servername] url = '{url}/search?{q}'.format( url=server['host'], q=query_string ) logger.debug('Trying server {}: {}'.format(servername, url)) tasks.append( _get_file_list_from_url(url, limit=limit, verbose=verbose) ) results = await asyncio.gather(*tasks) results = utils.flatten(results) # # Delete duplicate results (if product is on multiple servers). # TODO: This should be done in the order of preference as # given by CONFIG['SERVERS'] ! # unique = utils.unique_by(results, lambda x: x['filename']) if limit is not None: unique = unique[:limit] return unique async def _md5(product=None, uuid=None): if product is not None: if type(product) is dict and 'uuid' in product and 'host' in product: md5_url = _checksum_url_from_uuid(product['uuid'], host=product['host']) elif type(product) is str: md5_url = await _checksum_url_from_identifier(product) else: return False elif uuid is not None: md5_url = _checksum_url_from_uuid(uuid) server = _get_server_from_url(md5_url) async with QUERY[server].get(md5_url) as response: result = await response.read() return result.decode().lower() # if PY2: # return result.lower() # else: # return result.decode().lower() def md5(product=None, uuid=None): """Returns the md5 sum of the file stored on SciHub given the product name or uuid. Parameters ---------- product : str, optional The product name. If given, `uuid` is ignored. uuid : str, optional The product uuid. Returns ------- str The md5 checksum in lower case. """ return block(_md5, product=product, uuid=uuid) def exists(product): search_results = search({'identifier': product+'*'}, limit=1) return len(search_results) > 0 def download(product): cont = CONFIG['GENERAL']['CONTINUE'] if isinstance(product, list): # Multiple downloads # tty.screen.status(total=len(product)) tasks = [_single_download(p, return_md5=True, cont=cont) for p in product] loop = asyncio.get_event_loop() result = loop.run_until_complete(asyncio.gather(*tasks)) else: # Single download result = block(_single_download, product=product, cont=cont) return result async def _single_download(product, return_md5=False, cont=True): """Download a satellite product. Checks for file existence and MD5 checksum. Parameters ---------- product : str or dict The name of the product to be downloaded from SciHub. Alternatively, a dictionary representing a search result from SciHub. return_md5 : bool, optional Whether to compute and return the md5 hash sum (default: False). cont : bool, optional Continue partial downloads (default: True). Returns ------- str The local file path if the download was successful OR the file already exists and passes the md5 checksum test. False otherwise. """ if type(product) is dict: fdata = product else: fdata = await _search( {'identifier': os.path.splitext(product)[0]+'*'} ) fdata = fdata[0] satellite = utils.get_satellite(fdata['filename']) ext = CONFIG['SATELLITES'][satellite]['ext'] file_name = fdata['filename'] + ext pbar_key = file_name b_download = True b_file_okay = False complete = False full_file_path = os.path.join(CONFIG['GENERAL']['DATA_DIR'], file_name) download_path = full_file_path + DOWNLOAD_SUFFIX # # Check if file already exists in location: # if os.path.exists(full_file_path) and os.path.isfile(full_file_path): if not CONFIG['GENERAL']['CHECK_EXISTING']: # # File exists and won't be checked. # msg = '{} Skipping existing - MD5 not checked'.format(file_name) tty.screen[pbar_key] = (tty.warn('Skipping') + ': {name}', tty.NOBAR) logger.debug(msg) b_download = False else: # # File exists and will be checked for md5 consistency # local_md5 = checksum.md5(full_file_path) remote_md5 = await _md5(fdata) if local_md5 == remote_md5: file_size = os.path.getsize(full_file_path) msg = '{} Skipping download (MD5 okay)'.format(file_name) tty.screen[pbar_key] = (tty.success('Exists') + ': {name}', tty.NOBAR) tty.screen.status(progress=file_size) logger.debug(msg) b_download = False b_file_okay = True else: msg = '{} MD5 wrong: redownloading ...'.format(file_name) # Don't create the progress bar just yet logger.debug(msg) if b_download: # # Retrials in case the MD5 hashsum fails # for i in range(CONFIG['GENERAL']['TRIALS']): complete = await _download(fdata['url'], download_path, return_md5=return_md5, cont=cont) if return_md5: complete, local_md5 = complete # # After download, check MD5 hashsum # if not complete: # # Download incomplete. # msg = '{} Download failed, trial {:d}/{:d}.'.format( file_name, i+1, CONFIG['GENERAL']['TRIALS']) logger.debug(msg) tty.screen[pbar_key] = (tty.error('Failed') + ': {name}', tty.NOBAR) else: # # Download completed. # if not return_md5: local_md5 = checksum.md5(download_path) remote_md5 = await _md5(fdata) if local_md5 != remote_md5: # # Download failed. # msg = '{} MD5 checksum failed, trial {:d}/{:d}.'.format( file_name, i+1, CONFIG['GENERAL']['TRIALS']) logger.debug(msg) tty.screen[pbar_key] = (tty.error('Failed') + ': {name}', tty.NOBAR) else: # # Download completed and successful. # msg = '{} MD5 okay'.format(file_name) logger.debug(msg) tty.screen[pbar_key] = (tty.success('MD5 okay') + ': {name}', tty.NOBAR) b_file_okay = True break if not b_file_okay: msg = '{} Download failed.'.format(file_name) logger.warning(msg) tty.screen[pbar_key] = (tty.error('Failed') + ': {name}', tty.NOBAR) if CONFIG['GENERAL']['DOWNLOAD_PREVIEW']: full_preview_path = os.path.join(CONFIG['GENERAL']['DATA_DIR'], fdata['filename']+'.jpeg') # # If not yet done, download preview file # if not os.path.exists(full_preview_path) or \ not os.path.isfile(full_preview_path): if not _download(fdata['preview'], full_preview_path): logger.info(' Preview not available.') if b_file_okay: # # File has been downloaded successfully OR already exists # --> Return the file path # os.rename(download_path, full_file_path) msg = 'Download successful: {}'.format(full_file_path) logger.debug(msg) tty.screen[pbar_key] = (tty.success('Successful') + ': {name}', tty.NOBAR) if return_md5: return full_file_path, local_md5 else: return full_file_path elif b_download and not complete: # # File download failed --> Return FALSE # msg = 'Download failed: {}'.format(file_name) logger.error(msg) tty.screen[pbar_key] = (tty.error('Failed') + ': {name}', tty.NOBAR) return False else: # # File download was skipped --> Return FALSE # return False async def _get_remote_files_per_satellite(files, satellite): product_names = [os.path.splitext(os.path.split(fpath)[1])[0] for fpath in files] chunksize = 10 queries = [] for products in utils.chunks(product_names, chunksize): query_string = '({})'.format( ' OR '.join(['identifier:{}'.format(product) for product in products]) ) queries.append({'query': query_string, 'satellite': satellite}) tasks = [_search(q) for q in queries] result = await asyncio.gather(*tasks) return utils.flatten(result) def _get_remote_files(files): tasks = [] for sat in CONFIG['SATELLITES']: sat_files = [f for f in files if os.path.split(f)[1].startswith(sat)] tasks.append(_get_remote_files_per_satellite(sat_files, sat)) loop = asyncio.get_event_loop() results = loop.run_until_complete(asyncio.gather(*tasks)) return utils.flatten(results) def redownload(local_file_list): """Redownload a list of corrupt files. This method will automatically determine the correct source of each file from the file name. It will then attempt to redownload the files. Parameters ---------- local_file_list : list of str A list of local files to be redownloaded from the server. """ remote_files = _get_remote_files(local_file_list) logger.info('DOWNLOADING {}'.format(len(remote_files))) download(remote_files) ``` #### File: esahub/tests/test_all.py ```python from esahub import scihub, utils, checksum, check, main import unittest import contextlib import logging import re import datetime as DT import pytz import os import sys import subprocess from shapely.wkt import loads as wkt_loads from esahub.tests import config as test_config from esahub import config logger = logging.getLogger('esahub') PY2 = sys.version_info < (3, 0) SMALL_SIZE_QUERY = 'size: ???.* KB' if hasattr(unittest.TestCase, 'subTest'): class TestCase(unittest.TestCase): pass else: class TestCase(unittest.TestCase): @contextlib.contextmanager def subTest(self, msg='', **params): """Mock subTest method so no exception is raised under Python2.""" utils.eprint('subTest:', msg, params) yield return # ----------------------------------------------------------------------------- # TEST SETUP # ----------------------------------------------------------------------------- def setUpModule(): test_config.set_test_config() test_config.prepare() def tearDownModule(): test_config.cleanup() # ----------------------------------------------------------------------------- # SCIHUB # ----------------------------------------------------------------------------- class ScihubTestCase(TestCase): @classmethod def setUpClass(cls): test_config.set_test_config() # def setUp(self): def test_servers(self): for name in scihub._get_available_servers(): cfg = config.CONFIG['SERVERS'][name] with self.subTest(server_name=name): url = '{}/search?q=*:*'.format(cfg['host']) response = scihub.get_response(url) # # Assert that the HTML response has status code 200 (OK) # self.assertEqual(response.status, 200) def test__generate_next_url(self): # _generate_next_url(url, total=None) pass def test__parse_page(self): # _parse_page(url, first=False) pass def test__get_file_list_from_url(self): # _get_file_list_from_url(url, limit=None) pass def test__callback(self): # _callback(result) pass def test__build_query(self): # _build_query(query={}) pass def test__build_url(self): # _build_url(query, server) pass def test__download(self): # _download(url, destination, quiet=None, queue=None) pass def test__get_file_list_wrapper(self): # _get_file_list_wrapper(url) pass def test__ping_single(self): # _ping_single(servername) pass def test__auto_detect_server_from_query(self): queries = [ # (query, server) ({'mission': 'Sentinel-1'}, config.CONFIG['SATELLITES']['S1A']['source']), ({'mission': 'Sentinel-2'}, config.CONFIG['SATELLITES']['S2A']['source']), ({'mission': 'Sentinel-3'}, config.CONFIG['SATELLITES']['S3A']['source']), ({'satellite': 'S1A'}, config.CONFIG['SATELLITES']['S1A']['source']), ({'satellite': 'S3A'}, config.CONFIG['SATELLITES']['S3A']['source']), ({'satellite': 'S2B'}, config.CONFIG['SATELLITES']['S2B']['source']), ({'identifier': "S1A_IW_OCN__2SDV_20160924T181320_" "20160924T181345_013198_014FDF_6692.zip"}, config.CONFIG['SATELLITES']['S1A']['source']) ] for query, server in queries: with self.subTest(query=query): self.assertEqual( scihub._auto_detect_server_from_query(query), server ) def test__uuid_from_identifier(self): products = scihub.search({}, limit=1) for product in products: with self.subTest(product=product): self.assertEqual( scihub.block(scihub._uuid_from_identifier, product['title']), product['uuid'] ) # def test__download_url_from_identifier(self): # # _download_url_from_identifier(identifier) # pass # def test__checksum_url_from_identifier(self): # # _checksum_url_from_identifier(identifier) # pass # def test__preview_url_from_identifier(self): # # _preview_url_from_identifier(identifier) # pass # def test__download_url_from_uuid(self): # # _download_url_from_uuid(uuid, host=None) # pass # def test__checksum_url_from_uuid(self): # # _checksum_url_from_uuid(uuid, host=None) # pass # def test__preview_url_from_uuid(self): # # _preview_url_from_uuid(uuid, host=None) # pass def test_get_response(self): for name in scihub._get_available_servers(): with self.subTest(server_name=name): response = scihub.get_response( scihub._build_url({'query': '*:*'}, name) ) self.assertEqual(response.status, 200) def test_md5_from_file(self): for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']): with self.subTest(file=f): # # Assert that the md5 sum computed from the local file is equal # to the md5 sum obtained from the remote server. # try: remote_md5 = scihub.md5(f) self.assertEqual( checksum.md5(f), remote_md5 ) except Exception as e: self.fail('Remote MD5 could not be obtained: {}'.format(e)) def test_exists_true(self): existing = scihub.search({}, limit=1) for e in existing: with self.subTest(product=e['filename']): self.assertTrue(scihub.exists(e['filename'])) def test_exists_false(self): not_existing = 'this_is_not_on_scihub' self.assertFalse(scihub.exists(not_existing)) # ----------------------------------------------------------------------------- # SCIHUB SEARCH # ----------------------------------------------------------------------------- class ScihubSearchTestCase(TestCase): @classmethod def setUpClass(cls): test_config.set_test_config() def test_query_entries(self): query = {'mission': 'Sentinel-3'} server = scihub._auto_detect_server_from_query(query, available_only=True)[0] url = scihub._build_url(query, server) html = scihub.resolve(url) # # Assert that the number of entries found on the page matches the # number of entries requested per page. # self.assertEqual(html.count('<entry>'), config.CONFIG['GENERAL']['ENTRIES']) def test_orbit_query(self): for search_str, orbit in [ ('ASC', 'ASCENDING'), ('DESC', 'DESCENDING') ]: query = {'orbit': search_str} result = scihub.search(query, limit=20) for prod in result: self.assertEqual(prod['orbit_direction'], orbit) def test_id_query(self): prod = scihub.search({}, limit=5)[-1] query = {'id': prod['title']} result = scihub.search(query) self.assertEqual(len(result), 1) self.assertEqual(result[0], prod) def test_queries(self): queries = [ # (name, query) ('S3', {'mission': 'Sentinel-3'}), ] for name, q in queries: with self.subTest(name=name): server = scihub._auto_detect_server_from_query( q, available_only=True)[0] url = scihub._build_url(q, server=server) response = scihub.get_response(url) # # Assert that queries for each mission return a # status code 200 (OK) # self.assertEqual(response.status, 200) with self.subTest('count entries'): q = {'mission': 'Sentinel-3'} server = scihub._auto_detect_server_from_query( q, available_only=True)[0] url = scihub._build_url(q, server=server) html = scihub.resolve(url) # # Assert that the number of entries found on the page matches the # number of entries requested per page. # self.assertEqual(html.count('<entry>'), config.CONFIG['GENERAL']['ENTRIES']) def test_temporal_queries(self): with self.subTest('yesterday'): file_list = scihub.search({'mission': 'Sentinel-3', 'time': 'yesterday'}, limit=200) yesterday = DT.datetime.now(pytz.utc)-DT.timedelta(1) today = DT.datetime.now(pytz.utc) start = DT.datetime(yesterday.year, yesterday.month, yesterday.day, tzinfo=pytz.utc) end = DT.datetime(today.year, today.month, today.day, tzinfo=pytz.utc) for f in file_list: # # Assert that the ingestiondate of each entry was yesterday. # self.assertGreaterEqual(f['ingestiondate'], start) self.assertLessEqual(f['ingestiondate'], end) with self.subTest('today'): file_list = scihub.search({'mission': 'Sentinel-3', 'time': 'today'}, limit=200) today = DT.datetime.now(pytz.utc) start = DT.datetime(today.year, today.month, today.day, tzinfo=pytz.utc) for f in file_list: # # Assert that the ingestiondate of each entry is today. # self.assertGreaterEqual(f['ingestiondate'], start) # # NOTE: This test presently fails because apparantly, # SciHub's `intersects` parameter does not work reliably. # def test_spatial_queries(self): loc, ref_coords = next(iter(config.CONFIG['LOCATIONS'].items())) with self.subTest(location=loc): file_list = scihub.search( {'location': [loc], 'time': 'to 2017-09-01T00:00:00Z'}, server='S3', limit=20) for f in file_list: with self.subTest(product=f['filename']): # # Assert that the products indeed intersect the # requested location. # distance = wkt_loads(f['coords']).distance( wkt_loads(ref_coords)) utils.eprint('Distance: {}'.format(distance)) self.assertLessEqual(distance, 0.5) def test_get_file_list(self): q = {'mission': 'Sentinel-3'} limit = 107 file_list = scihub.search(q, limit=limit) # # Assert that only `limit` entries are returned. # self.assertEqual(limit, len(file_list)) for f in file_list: # # Assert that each entry contains the attributes `url`, `uuid` and # `filename`. # self.assertIn('url', f) self.assertIn('uuid', f) self.assertIn('filename', f) # ----------------------------------------------------------------------------- # SCIHUB DOWNLOAD # ----------------------------------------------------------------------------- class ScihubDownloadTestCase(TestCase): @classmethod def setUpClass(cls): test_config.set_test_config() def setUp(self): test_config.clear_test_data() def tearDown(self): test_config.clear_test_data() def test_download(self): file_list = scihub.search({'query': SMALL_SIZE_QUERY}, limit=1) for f in file_list: with self.subTest(url=f['url']): result = scihub.download(f) # # Assert that the download didn't fail and that # the returned file path exists. # self.assertNotEqual(result, False) self.assertTrue(os.path.isfile(result)) def test_download_many(self): file_list = scihub.search({'query': SMALL_SIZE_QUERY}, limit=2) scihub.download(file_list) # # Assert that all downloads were successful. # local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR']) local_files_identifiers = [os.path.splitext(os.path.split(_)[1])[0] for _ in local_files] for f in file_list: self.assertIn(f['filename'], local_files_identifiers) for f in local_files: with self.subTest(file=f): _, healthy, msg = check.check_file(f, mode='file') utils.eprint(msg) self.assertTrue(healthy) def test_redownload(self): test_config.copy_corrupt_data() local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR']) scihub.redownload(local_files) new_local_files = utils.ls(config.CONFIG['GENERAL']['DATA_DIR']) self.assertEqual(set(local_files), set(new_local_files)) for f in local_files: with self.subTest(file=f): _, healthy, msg = check.check_file(f, mode='file') utils.eprint(msg) self.assertTrue(healthy) # ----------------------------------------------------------------------------- # CHECK # ----------------------------------------------------------------------------- class CheckTestCase(TestCase): @classmethod def setUpClass(cls): test_config.set_test_config() def setUp(self): test_config.copy_test_data() def tearDown(self): test_config.clear_test_data() def test_check_file_md5_healthy(self): for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']): with self.subTest(file=f): # # Assert that the files check out in `md5` mode. # try: file_path, healthy, message = \ check.check_file(f, mode='md5') self.assertTrue(healthy) except Exception as e: self.fail('File check failed: {}'.format(e)) def test_check_file_zip_healthy(self): for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']): with self.subTest(file=f): # # Assert that the files check out in `file` mode. # try: file_path, healthy, message = \ check.check_file(f, mode='file') self.assertTrue(healthy) except Exception as e: self.fail('File check failed: {}'.format(e)) def test_check_file_md5_corrupt(self): test_config.clear_test_data() test_config.copy_corrupt_data() for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']): with self.subTest(file=f): # # Assert that the files are detected as corrupt in `md5` mode. # try: file_path, healthy, message = \ check.check_file(f, mode='md5') self.assertFalse(healthy) except Exception as e: self.fail('File check failed: {}'.format(e)) def test_check_file_zip_corrupt(self): test_config.clear_test_data() test_config.copy_corrupt_data() for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']): with self.subTest(file=f): # # Assert that the files are detected as corrupt in `file` mode. # try: file_path, healthy, message = \ check.check_file(f, mode='file') self.assertFalse(healthy) except Exception as e: self.fail('File check failed: {}'.format(e)) # ----------------------------------------------------------------------------- # CHECKSUM # ----------------------------------------------------------------------------- class ChecksumTestCase(TestCase): @classmethod def setUpClass(cls): test_config.set_test_config() def setUp(self): test_config.copy_test_data() def tearDown(self): test_config.clear_test_data() def test_md5(self): for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']): with self.subTest(file=f): # # Assert that the md5 checksum returned by checksum.md5() is # equal to the md5 sum returned by bash md5 or md5sum tool. # for exe in ['md5', 'md5sum']: if utils._which(exe) is not None: bash_output = subprocess.check_output([exe, f]) if not PY2: bash_output = bash_output.decode() bash_md5 = re.search('[a-zA-Z0-9]{32}', bash_output).group() break self.assertEqual( checksum.md5(f), bash_md5 ) def test_etag_small_files(self): for f in utils.ls(config.CONFIG['GENERAL']['DATA_DIR']): with self.subTest(file=f): # # Assert that the computed etag is equal to the md5 # checksum for files smaller than the chunksize. # size_mb = max(10, int(os.path.getsize(f) / 1024**2)) self.assertEqual( checksum.md5(f), checksum.etag(f, chunksize=2 * size_mb) ) # def test_etag_large_files(self): # pass # ----------------------------------------------------------------------------- # MAIN # ----------------------------------------------------------------------------- class MainTestCase(TestCase): @classmethod def setUpClass(cls): test_config.set_test_config() cls.check_mode = config.CONFIG['GENERAL']['CHECK_MODE'] config.CONFIG['GENERAL']['CHECK_MODE'] = 'file' @classmethod def tearDownClass(cls): config.CONFIG['GENERAL']['CHECK_MODE'] = cls.check_mode def setUp(self): test_config.copy_test_data() def tearDown(self): test_config.clear_all() def test_ls(self): q = {'time': 'today', 'satellite': 'S3A', 'location': ['Ireland_Mace_Head']} files = scihub.search(q) result = main.ls(q) self.assertEqual(len(result), len(files)) def test_get(self): test_config.clear_test_data() q = {'satellite': 'S3A', 'query': SMALL_SIZE_QUERY} files = scihub.search(q, limit=2) main.get(q, limit=2) for f in files: ext = '.zip' with self.subTest(product=f['filename']): self.assertTrue( os.path.isfile(os.path.join( config.CONFIG['GENERAL']['DATA_DIR'], f['filename']) + ext) ) def test_doctor(self): test_config.copy_corrupt_data() corrupt_files = utils.ls(test_config.TEST_DATA_DIR_CORRUPT, path=False) # healthy_files = utils.ls(test_config.TEST_DATA_DIR_ORIGINAL, # path=False) result = main.doctor() bad_files = [os.path.split(status[0])[1] for status in result if status[1] is False] # # Assert that the number of healthy/corrupt files detected are correct # self.assertEqual(len(bad_files), len(corrupt_files)) for corrupt_file in corrupt_files: # # Assert that each corrupt file has been registered. # self.assertIn(corrupt_file, bad_files) def test_doctor_delete(self): test_config.copy_corrupt_data() corrupt_files = utils.ls(test_config.TEST_DATA_DIR_CORRUPT, path=False) healthy_files = utils.ls(test_config.TEST_DATA_DIR_ORIGINAL, path=False) main.doctor(delete=True) # # Assert that the corrupt files have been deleted. # for f in corrupt_files: self.assertFalse(os.path.isfile(os.path.join( config.CONFIG['GENERAL']['DATA_DIR'], f))) # # Assert that the healthy files have not been deleted. # for f in healthy_files: self.assertTrue(os.path.isfile(os.path.join( config.CONFIG['GENERAL']['DATA_DIR'], f))) def test_doctor_repair(self): test_config.copy_corrupt_data() corrupt_files = utils.ls(test_config.TEST_DATA_DIR_CORRUPT, path=False) # healthy_files = utils.ls(test_config.TEST_DATA_DIR_ORIGINAL, # path=False) main.doctor(repair=True) for f in corrupt_files: repaired_f = os.path.join(config.CONFIG['GENERAL']['DATA_DIR'], f) with self.subTest(file=repaired_f): # # Assert that each corrupt file has been repaired. # _, healthy, msg = check.check_file(repaired_f, mode='file') utils.eprint(msg) self.assertTrue(healthy) # ----------------------------------------------------------------------------- # utils # ----------------------------------------------------------------------------- class UtilsTestCase(TestCase): def test_parse_datetime(self): _dt = DT.datetime dates = [ ('Sep 5, 2016', (_dt(2016, 9, 5, 0, 0, 0), _dt(2016, 9, 6, 0, 0, 0))), ('5 Sep 2016', (_dt(2016, 9, 5, 0, 0, 0), _dt(2016, 9, 6, 0, 0, 0))), ('06/1998', (_dt(1998, 6, 1, 0, 0, 0), _dt(1998, 7, 1, 0, 0, 0))), ('Jan 2018 to Oct 2018', (_dt(2018, 1, 1, 0, 0, 0), _dt(2018, 11, 1, 0, 0, 0))), ('1 Jan 2018 to 30 Sep 2018', (_dt(2018, 1, 1, 0, 0, 0), _dt(2018, 10, 1, 0, 0, 0))), ('12/2017', (_dt(2017, 12, 1, 0, 0, 0), _dt(2018, 1, 1, 0, 0, 0))), ('2017/12', (_dt(2017, 12, 1, 0, 0, 0), _dt(2018, 1, 1, 0, 0, 0))), ('2017/12 to 2018/12', (_dt(2017, 12, 1, 0, 0, 0), _dt(2019, 1, 1, 0, 0, 0))), ('Jan 1, 2017, Jan 1, 2018', (_dt(2017, 1, 1, 0, 0, 0), _dt(2018, 1, 2, 0, 0, 0))), ('to Jan 2018', (None, _dt(2018, 2, 1, 0, 0, 0))), ('2015 -', (_dt(2015, 1, 1, 0, 0, 0), None)), ('to 2017-09-01T00:00:00', (None, _dt(2017, 9, 1, 0, 0, 0))) ] for date_str, date_obj in dates: with self.subTest(date_str=date_str): self.assertEqual( utils.parse_datetime(date_str), date_obj ) ```
{ "source": "jnhansen/geo", "score": 2 }
#### File: nd/tests/test_algorithm.py ```python import pytest from nd.algorithm import (Algorithm, wrap_algorithm, parallelize) from nd.testing import (generate_test_dataset, generate_test_dataarray) from xarray.testing import assert_equal as xr_assert_equal from numpy.testing import assert_raises_regex, assert_equal from collections import OrderedDict import inspect # Create algorithm class DummyAlgorithm(Algorithm): """test docstring""" def __init__(self, value, *args, **kwargs): self.value = value def apply(self, ds): """Apply dummy algorithm.""" return ds + self.value class ParallelDummyAlgorithm(Algorithm): """test docstring""" def __init__(self, value, *args, **kwargs): self.value = value @parallelize def apply(self, ds): """Apply dummy algorithm.""" return ds + self.value @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_wrap_algorithm(generator): ds = generator() args = (0.1,) kwargs = {} algo = DummyAlgorithm(*args, **kwargs) wrapper = wrap_algorithm(DummyAlgorithm, 'wrapper_name') # Make sure the result is the same xr_assert_equal( algo.apply(ds), wrapper(ds, *args, **kwargs) ) # Check name, docstring, signature assert(wrapper.__name__ == 'wrapper_name') assert_equal( wrapper.__doc__, 'Wrapper for :class:`nd.tests.test_algorithm.DummyAlgorithm`.\n\n' + DummyAlgorithm.__doc__) assert_equal( list(OrderedDict(inspect.signature(wrapper).parameters).keys()), ['ds', 'value', 'args', 'kwargs'] ) def test_invalid_algorithm_no_apply(): class MissingApplyAlgorithm(Algorithm): def __init__(self, *args, **kwargs): pass # Check that the invalid algorithm cannot be instantiated with assert_raises_regex( TypeError, "Can't instantiate abstract class MissingApplyAlgorithm " "with abstract methods apply" ): MissingApplyAlgorithm() @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) @pytest.mark.parametrize('njobs', [-1, 1, 2]) def test_parallelized_apply(generator, njobs): ds = generator() algo = ParallelDummyAlgorithm(3) ref = algo.apply(ds) result = algo.apply(ds, njobs=njobs) xr_assert_equal(ref, result) ``` #### File: nd/tests/test_coregister.py ```python import skimage.transform import skimage.registration from nd.testing import generate_test_dataset from nd.utils import get_vars_for_dims from nd.warp import Coregistration from nd.filters import GaussianFilter import numpy as np def create_misaligned_dataset(**kwargs): np.random.seed(0) ds = generate_test_dataset(**kwargs) datavars = get_vars_for_dims(ds, ['time', 'x', 'y']) # Create some structure from t=0 d0 = GaussianFilter(sigma=3).apply(ds.isel(time=0)) d0 = d0/d0.max() # Generate a noisy dataset based on the structure ds = ds/ds.max() ds = ds + d0 # Introduce some shifts ntime = ds.dims['time'] shifts = np.random.rand(ntime, 2) shifts[0, :] = 0 for t in range(1, ntime): src = ds.isel(time=t) transf = skimage.transform.AffineTransform(translation=shifts[t, :]) # Apply transform to each variable for v in datavars: ds[v].loc[dict(time=ds['time'][t])] = \ skimage.transform.warp(src[v].values, transf, order=3) return ds, shifts[1:, :] def check_shifts(ds): ref_var = 'C11' ref = ds.isel(time=0)[ref_var].values shifts = [] for t in range(1, ds.dims['time']): # Estimate shift shift = skimage.registration.phase_cross_correlation( ds.isel(time=t)[ref_var].values, ref, upsample_factor=30)[0] shifts.append(shift) return np.array(shifts) def test_coregistration(): ds, old_shifts = create_misaligned_dataset( dims={'y': 200, 'x': 200, 'time': 50}) cor = Coregistration(upsampling=50) ds_cor = cor.apply(ds) shifts = check_shifts(ds_cor) print(shifts) assert (np.abs(shifts) <= 0.2).all() # # New shifts may be slightly larger if the original shifts were very small # assert np.logical_or( np.abs(shifts) <= np.abs(old_shifts), np.abs(shifts) <= 0.1 ).all() ``` #### File: nd/tests/test_vector.py ```python import pytest from nd.testing import (generate_test_dataset, generate_test_geodataframe, assert_equal_crs) from nd import vector from nd import warp from numpy.testing import assert_equal, assert_allclose from geopandas.testing import assert_geodataframe_equal import geopandas as gpd import numpy as np import rasterio from scipy import ndimage def test_rasterize_no_side_effects(): ds = generate_test_dataset() df = generate_test_geodataframe() df_copy = df.copy() _ = vector.rasterize(df, ds) # Check that the original GeoDataFrame doesn't change as part of the # rasterization assert_geodataframe_equal( df, df_copy ) def test_rasterize(tmpdir): path = str(tmpdir.join('polygons.shp')) ds = generate_test_dataset(dims=dict(x=100, y=100, time=5)) df = generate_test_geodataframe() schema = gpd.io.file.infer_schema(df) schema['properties']['date'] = 'date' df.to_file(path, schema=schema) # Rasterize raster = vector.rasterize(path, ds) # Check that the raster contains all fields as variables assert set(raster.data_vars).union({'geometry'}) == set(df.columns) # Check dtypes assert np.issubdtype(raster.float.dtype, np.floating) assert np.issubdtype(raster.integer.dtype, np.signedinteger) assert np.issubdtype(raster.category.dtype, np.signedinteger) # Check that extent, projection etc. are identical to the reference raster assert_equal( warp.get_bounds(raster), warp.get_bounds(ds) ) assert_equal_crs( warp.get_crs(raster), warp.get_crs(ds) ) assert_equal( warp.get_transform(raster), warp.get_transform(ds) ) # Check raster content shape = (ds.dims['y'], ds.dims['x']) transform = warp.get_transform(ds) for i, row in df.iterrows(): poly = row['geometry'] mask = rasterio.features.rasterize( [poly], out_shape=shape, transform=transform ) # Erode mask to avoid edge effects mask = ndimage.morphology.binary_erosion(mask) == 1 for v in raster.data_vars: if 'legend' in raster[v].attrs: expected = sorted(raster[v].attrs['legend'], key=lambda x: x[1] == str(row[v]))[-1][0] else: expected = row[v] values = raster[v].isel(time=0).values values[mask] assert_allclose(values[mask], expected) @pytest.mark.parametrize('columns', [ ['integer'], ['integer', 'date'], ['float', 'category'], ['integer', 'geometry'], ]) @pytest.mark.parametrize('date_field', ['date', None]) def test_rasterize_columns(columns, date_field): ds = generate_test_dataset() df = generate_test_geodataframe() raster = vector.rasterize(df, ds, columns=columns, date_field=date_field) if date_field is None: expected_vars = set(columns) - {'geometry'} else: expected_vars = set(columns) - {'geometry', 'date'} assert_equal( set(raster.data_vars), expected_vars ) def test_rasterize_date_field(): ds = generate_test_dataset() df = generate_test_geodataframe() raster = vector.rasterize(df, ds, date_field='date') assert len(np.unique(df['date'])) == raster.dims['time'] assert_equal( np.unique(df['date']).astype('datetime64[s]'), raster.time.values.astype('datetime64[s]') ) ``` #### File: nd/tests/test_xarray_accessor.py ```python import pytest import numpy as np import inspect import xarray as xr from collections import OrderedDict from numpy.testing import assert_equal from xarray.testing import assert_equal as xr_assert_equal from nd.testing import (generate_test_dataset, generate_test_dataarray, assert_equal_files, assert_equal_crs, requires) from nd import warp, filters, io, change, utils, visualize from nd._xarray import patch_doc from rasterio.crs import CRS try: import cartopy except ModuleNotFoundError: cartopy = None # --------------- # Test properties # --------------- @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_shape(generator): ds = generator() shape = utils.get_shape(ds) assert_equal(shape, ds.nd.shape) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_dims(generator): ds = generator() dims = utils.get_dims(ds) assert_equal(dims, ds.nd.dims) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_crs(generator): crs = CRS.from_epsg(4326) ds = generator(crs=crs) assert_equal_crs(crs, ds.nd.crs) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_bounds(generator): extent = (10, 30, 15, 35) ds = generator(extent=extent) assert_equal(extent, ds.nd.bounds) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_resolution(generator): ds = generator() res = warp.get_resolution(ds) assert_equal(res, ds.nd.resolution) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_transform(generator): ds = generator() transf = warp.get_transform(ds) assert_equal(transf, ds.nd.transform) # ------------------------- # Test reprojection methods # ------------------------- @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_reproject(generator): ds = generator() kwargs = dict(crs='epsg:27700') xr_assert_equal( warp.reproject(ds, **kwargs), ds.nd.reproject(**kwargs) ) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_resample(generator): ds = generator() kwargs = dict(width=50) xr_assert_equal( warp.resample(ds, **kwargs), ds.nd.resample(**kwargs) ) # -------------------------- # Test visualization methods # -------------------------- def test_accessor_nd_to_rgb(): ds = generate_test_dataset(dims={'y': 50, 'x': 50}) def rgb(d): return [d.C11, d.C22, d.C11/d.C22] assert_equal( visualize.to_rgb(rgb(ds)), ds.nd.to_rgb(rgb) ) def test_accessor_nd_to_rgb_default(): ds = generate_test_dataset(dims={'y': 50, 'x': 50}) assert_equal( visualize.to_rgb([ds.C11, ds.C22, ds.C11/ds.C22]), ds.nd.to_rgb() ) def test_accessor_nd_to_video(tmpdir): ds = generate_test_dataset() path_1 = str(tmpdir.join('video1.avi')) path_2 = str(tmpdir.join('video2.avi')) visualize.write_video(ds, path_1) ds.nd.to_video(path_2) assert_equal_files(path_1, path_2) @requires('cartopy') def test_accessor_nd_plot_map(): ds = generate_test_dataset() ax = ds.nd.plot_map(background=None) assert isinstance(ax, cartopy.mpl.geoaxes.GeoAxesSubplot) # --------------- # Test IO methods # --------------- def test_accessor_nd_as_complex(): ds = generate_test_dataset() xr_assert_equal( io.assemble_complex(ds), ds.nd.as_complex() ) def test_accessor_nd_as_real(): ds = generate_test_dataset().nd.as_complex() xr_assert_equal( io.disassemble_complex(ds), ds.nd.as_real() ) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_to_netcdf(tmpdir, generator): ds = generator() path_1 = str(tmpdir.join('ds1.nc')) path_2 = str(tmpdir.join('ds2.nc')) io.to_netcdf(ds, path_1) ds.nd.to_netcdf(path_2) xr_assert_equal( io.open_dataset(path_1), io.open_dataset(path_2) ) # -------------------- # Test general methods # -------------------- @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_nd_apply(generator): ds = generator() def func(arr): """Reduce a two dimensional array to its mean.""" return arr.mean() signature = '(x,y)->()' xr_assert_equal( ds.nd.apply(func, signature=signature), utils.apply(ds, func, signature=signature) ) # ------------------------------- # Test change detection accessors # ------------------------------- @requires('gsl') def test_accessor_nd_omnibus(): ds1 = generate_test_dataset( dims={'y': 5, 'x': 5, 'time': 10}, mean=[1, 0, 0, 1], sigma=0.1 ).isel(time=slice(None, 5)) ds2 = generate_test_dataset( dims={'y': 5, 'x': 5, 'time': 10}, mean=[10, 0, 0, 10], sigma=0.1 ).isel(time=slice(5, None)) ds = xr.concat([ds1, ds2], dim='time') kwargs = dict(n=9, alpha=0.9) xr_assert_equal( change.omnibus(ds, **kwargs), ds.nd.change_omnibus(**kwargs) ) # --------------------- # Test filter accessors # --------------------- @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_filter_nlmeans(generator): ds = generator() kwargs = dict(dims=('y', 'x'), r=0, f=1, sigma=2, h=2) xr_assert_equal( filters.nlmeans(ds, **kwargs), ds.filter.nlmeans(**kwargs) ) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_filter_boxcar(generator): ds = generator() kwargs = dict(dims=('y', 'x'), w=5) xr_assert_equal( filters.boxcar(ds, **kwargs), ds.filter.boxcar(**kwargs) ) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_filter_convolution(generator): ds = generator() np.random.seed(42) kwargs = dict(dims=('y', 'x'), kernel=np.random.rand(5, 5)) xr_assert_equal( filters.convolution(ds, **kwargs), ds.filter.convolve(**kwargs) ) @pytest.mark.parametrize('generator', [ generate_test_dataset, generate_test_dataarray ]) def test_accessor_filter_gaussian(generator): ds = generator() kwargs = dict(dims=('y', 'x'), sigma=1.5) xr_assert_equal( filters.gaussian(ds, **kwargs), ds.filter.gaussian(**kwargs) ) # --------------------------- # Test accessor documentation # --------------------------- def test_patch_doc(): def src_fn(data, a, b, c, d={}): """Source docstring""" pass @patch_doc(src_fn) def fn(self): pass # Check that docstring matches assert_equal(src_fn.__doc__, fn.__doc__) # Check that signature matches # (apart from first parameter) params_src = OrderedDict(inspect.signature(src_fn).parameters) params_fn = OrderedDict(inspect.signature(fn).parameters) params_src.popitem(last=False) params_fn.popitem(last=False) assert_equal( params_src, params_fn ) ```
{ "source": "jnhansen/rasterio", "score": 2 }
#### File: rasterio/rasterio/profiles.py ```python import warnings from rasterio.compat import UserDict from rasterio.dtypes import uint8 class Profile(UserDict): """Base class for Rasterio dataset profiles. Subclasses will declare driver-specific creation options. """ defaults = {} def __init__(self, data={}, **kwds): """Create a new profile based on the class defaults, which are overlaid with items from the `data` dict and keyword arguments.""" UserDict.__init__(self) initdata = self.defaults.copy() initdata.update(data) initdata.update(**kwds) self.data.update(initdata) def __getitem__(self, key): """Like normal item access but with affine alias.""" return self.data[key] def __setitem__(self, key, val): """Like normal item setter but forbidding affine item.""" if key == 'affine': raise TypeError("affine key is prohibited") self.data[key] = val class DefaultGTiffProfile(Profile): """Tiled, band-interleaved, LZW-compressed, 8-bit GTiff.""" defaults = { 'driver': 'GTiff', 'interleave': 'band', 'tiled': True, 'blockxsize': 256, 'blockysize': 256, 'compress': 'lzw', 'nodata': 0, 'dtype': uint8 } default_gtiff_profile = DefaultGTiffProfile() ``` #### File: rasterio/rasterio/vrt.py ```python import xml.etree.ElementTree as ET import rasterio from rasterio._warp import WarpedVRTReaderBase from rasterio.dtypes import _gdal_typename from rasterio.enums import MaskFlags from rasterio.path import parse_path, vsi_path from rasterio.transform import TransformMethodsMixin from rasterio.windows import WindowMethodsMixin class WarpedVRT(WarpedVRTReaderBase, WindowMethodsMixin, TransformMethodsMixin): """A virtual warped dataset. Abstracts the details of raster warping and allows access to data that is reprojected when read. This class is backed by an in-memory GDAL VRTWarpedDataset VRT file. Attributes ---------- src_dataset : dataset The dataset object to be virtually warped. resampling : int One of the values from rasterio.enums.Resampling. The default is `Resampling.nearest`. tolerance : float The maximum error tolerance in input pixels when approximating the warp transformation. The default is 0.125. src_nodata: int or float, optional The source nodata value. Pixels with this value will not be used for interpolation. If not set, it will be default to the nodata value of the source image, if available. dst_nodata: int or float, optional The nodata value used to initialize the destination; it will remain in all areas not covered by the reprojected source. Defaults to the value of src_nodata, or 0 (gdal default). warp_extras : dict GDAL extra warp options. See http://www.gdal.org/structGDALWarpOptions.html. Examples -------- >>> with rasterio.open('tests/data/RGB.byte.tif') as src: ... with WarpedVRT(src, crs='EPSG:3857') as vrt: ... data = vrt.read() """ def __repr__(self): return "<{} WarpedVRT name='{}' mode='{}'>".format( self.closed and 'closed' or 'open', self.name, self.mode) def __enter__(self): self.start() return self def __exit__(self, *args, **kwargs): self.close() def __del__(self): self.close() def close(self): self.stop() def _boundless_vrt_doc( src_dataset, nodata=None, background=None, hidenodata=False, width=None, height=None, transform=None): """Make a VRT XML document. Parameters ---------- src_dataset : Dataset The dataset to wrap. background : Dataset, optional A dataset that provides the optional VRT background. NB: this dataset must have the same number of bands as the src_dataset. Returns ------- bytes An ascii-encoded string (an ElementTree detail) """ nodata = nodata or src_dataset.nodata width = width or src_dataset.width height = height or src_dataset.height transform = transform or src_dataset.transform vrtdataset = ET.Element('VRTDataset') vrtdataset.attrib['rasterYSize'] = str(height) vrtdataset.attrib['rasterXSize'] = str(width) srs = ET.SubElement(vrtdataset, 'SRS') srs.text = src_dataset.crs.wkt if src_dataset.crs else "" geotransform = ET.SubElement(vrtdataset, 'GeoTransform') geotransform.text = ','.join([str(v) for v in transform.to_gdal()]) for bidx, ci, block_shape, dtype in zip(src_dataset.indexes, src_dataset.colorinterp, src_dataset.block_shapes, src_dataset.dtypes): vrtrasterband = ET.SubElement(vrtdataset, 'VRTRasterBand') vrtrasterband.attrib['dataType'] = _gdal_typename(dtype) vrtrasterband.attrib['band'] = str(bidx) if nodata is not None: nodatavalue = ET.SubElement(vrtrasterband, 'NoDataValue') nodatavalue.text = str(nodata) if hidenodata: hidenodatavalue = ET.SubElement(vrtrasterband, 'HideNoDataValue') hidenodatavalue.text = "1" colorinterp = ET.SubElement(vrtrasterband, 'ColorInterp') colorinterp.text = ci.name.capitalize() if background is not None: simplesource = ET.SubElement(vrtrasterband, 'SimpleSource') sourcefilename = ET.SubElement(simplesource, 'SourceFilename') sourcefilename.attrib['relativeToVRT'] = "0" sourcefilename.text = vsi_path(parse_path(background.name)) sourceband = ET.SubElement(simplesource, 'SourceBand') sourceband.text = str(bidx) sourceproperties = ET.SubElement(simplesource, 'SourceProperties') sourceproperties.attrib['RasterXSize'] = str(width) sourceproperties.attrib['RasterYSize'] = str(height) sourceproperties.attrib['dataType'] = _gdal_typename(dtype) sourceproperties.attrib['BlockYSize'] = str(block_shape[0]) sourceproperties.attrib['BlockXSize'] = str(block_shape[1]) srcrect = ET.SubElement(simplesource, 'SrcRect') srcrect.attrib['xOff'] = '0' srcrect.attrib['yOff'] = '0' srcrect.attrib['xSize'] = str(background.width) srcrect.attrib['ySize'] = str(background.height) dstrect = ET.SubElement(simplesource, 'DstRect') dstrect.attrib['xOff'] = '0' dstrect.attrib['yOff'] = '0' dstrect.attrib['xSize'] = str(width) dstrect.attrib['ySize'] = str(height) simplesource = ET.SubElement(vrtrasterband, 'SimpleSource') sourcefilename = ET.SubElement(simplesource, 'SourceFilename') sourcefilename.attrib['relativeToVRT'] = "0" sourcefilename.text = vsi_path(parse_path(src_dataset.name)) sourceband = ET.SubElement(simplesource, 'SourceBand') sourceband.text = str(bidx) sourceproperties = ET.SubElement(simplesource, 'SourceProperties') sourceproperties.attrib['RasterXSize'] = str(width) sourceproperties.attrib['RasterYSize'] = str(height) sourceproperties.attrib['dataType'] = _gdal_typename(dtype) sourceproperties.attrib['BlockYSize'] = str(block_shape[0]) sourceproperties.attrib['BlockXSize'] = str(block_shape[1]) srcrect = ET.SubElement(simplesource, 'SrcRect') srcrect.attrib['xOff'] = '0' srcrect.attrib['yOff'] = '0' srcrect.attrib['xSize'] = str(src_dataset.width) srcrect.attrib['ySize'] = str(src_dataset.height) dstrect = ET.SubElement(simplesource, 'DstRect') dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a) dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e) dstrect.attrib['xSize'] = str(src_dataset.width * src_dataset.transform.a / transform.a) dstrect.attrib['ySize'] = str(src_dataset.height * src_dataset.transform.e / transform.e) if src_dataset.nodata is not None: nodata_elem = ET.SubElement(simplesource, 'NODATA') nodata_elem.text = str(src_dataset.nodata) if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums): maskband = ET.SubElement(vrtdataset, 'MaskBand') vrtrasterband = ET.SubElement(maskband, 'VRTRasterBand') vrtrasterband.attrib['dataType'] = 'Byte' simplesource = ET.SubElement(vrtrasterband, 'SimpleSource') sourcefilename = ET.SubElement(simplesource, 'SourceFilename') sourcefilename.attrib['relativeToVRT'] = "0" sourcefilename.text = vsi_path(parse_path(src_dataset.name)) sourceband = ET.SubElement(simplesource, 'SourceBand') sourceband.text = 'mask,1' sourceproperties = ET.SubElement(simplesource, 'SourceProperties') sourceproperties.attrib['RasterXSize'] = str(width) sourceproperties.attrib['RasterYSize'] = str(height) sourceproperties.attrib['dataType'] = 'Byte' sourceproperties.attrib['BlockYSize'] = str(block_shape[0]) sourceproperties.attrib['BlockXSize'] = str(block_shape[1]) srcrect = ET.SubElement(simplesource, 'SrcRect') srcrect.attrib['xOff'] = '0' srcrect.attrib['yOff'] = '0' srcrect.attrib['xSize'] = str(src_dataset.width) srcrect.attrib['ySize'] = str(src_dataset.height) dstrect = ET.SubElement(simplesource, 'DstRect') dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a) dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e) dstrect.attrib['xSize'] = str(src_dataset.width) dstrect.attrib['ySize'] = str(src_dataset.height) return ET.tostring(vrtdataset) ``` #### File: jnhansen/rasterio/setup.py ```python import copy from distutils.command.sdist import sdist import itertools import logging import os import platform import pprint import shutil import subprocess import sys from setuptools import setup from setuptools.extension import Extension logging.basicConfig(stream=sys.stderr, level=logging.INFO) log = logging.getLogger() def check_output(cmd): # since subprocess.check_output doesn't exist in 2.6 # we wrap it here. try: out = subprocess.check_output(cmd) return out.decode('utf') except AttributeError: # For some reasone check_output doesn't exist # So fall back on Popen p = subprocess.Popen(cmd, stdout=subprocess.PIPE) out, err = p.communicate() return out def copy_data_tree(datadir, destdir): try: shutil.rmtree(destdir) except OSError: pass shutil.copytree(datadir, destdir) # python -W all setup.py ... if 'all' in sys.warnoptions: log.level = logging.DEBUG # Parse the version from the rasterio module. with open('rasterio/__init__.py') as f: for line in f: if line.find("__version__") >= 0: version = line.split("=")[1].strip() version = version.strip('"') version = version.strip("'") continue with open('VERSION.txt', 'w') as f: f.write(version) # Use Cython if available. try: from Cython.Build import cythonize except ImportError: cythonize = None # By default we'll try to get options via gdal-config. On systems without, # options will need to be set in setup.cfg or on the setup command line. include_dirs = [] library_dirs = [] libraries = [] extra_link_args = [] gdal2plus = False gdal_output = [None] * 4 gdalversion = None try: import numpy as np include_dirs.append(np.get_include()) except ImportError: sys.exit("ERROR: Numpy and its headers are required to run setup().") try: gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config') for i, flag in enumerate(("--cflags", "--libs", "--datadir", "--version")): gdal_output[i] = check_output([gdal_config, flag]).strip() for item in gdal_output[0].split(): if item.startswith("-I"): include_dirs.extend(item[2:].split(":")) for item in gdal_output[1].split(): if item.startswith("-L"): library_dirs.extend(item[2:].split(":")) elif item.startswith("-l"): libraries.append(item[2:]) else: # e.g. -framework GDAL extra_link_args.append(item) # datadir, gdal_output[2] handled below gdalversion = gdal_output[3] if gdalversion: log.info("GDAL API version obtained from gdal-config: %s", gdalversion) except Exception as e: if os.name == "nt": log.info("Building on Windows requires extra options to setup.py " "to locate needed GDAL files. More information is available " "in the README.") else: log.warning("Failed to get options via gdal-config: %s", str(e)) # Get GDAL API version from environment variable. if 'GDAL_VERSION' in os.environ: gdalversion = os.environ['GDAL_VERSION'] log.info("GDAL API version obtained from environment: %s", gdalversion) # Get GDAL API version from the command line if specified there. if '--gdalversion' in sys.argv: index = sys.argv.index('--gdalversion') sys.argv.pop(index) gdalversion = sys.argv.pop(index) log.info("GDAL API version obtained from command line option: %s", gdalversion) if not gdalversion: sys.exit("ERROR: A GDAL API version must be specified. Provide a path " "to gdal-config using a GDAL_CONFIG environment variable " "or use a GDAL_VERSION environment variable.") gdal_version_parts = gdalversion.split('.') gdal_major_version = int(gdal_version_parts[0]) gdal_minor_version = int(gdal_version_parts[1]) if gdal_major_version == 1 and gdal_minor_version < 11: sys.exit("ERROR: GDAL >= 1.11 is required for rasterio. " "Please upgrade GDAL.") # Conditionally copy the GDAL data. To be used in conjunction with # the bdist_wheel command to make self-contained binary wheels. if os.environ.get('PACKAGE_DATA'): destdir = 'rasterio/gdal_data' if gdal_output[2]: log.info("Copying gdal data from %s" % gdal_output[2]) copy_data_tree(gdal_output[2], destdir) else: # check to see if GDAL_DATA is defined gdal_data = os.environ.get('GDAL_DATA', None) if gdal_data: log.info("Copying gdal_data from %s" % gdal_data) copy_data_tree(gdal_data, destdir) # Conditionally copy PROJ.4 data. projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj') if os.path.exists(projdatadir): log.info("Copying proj_data from %s" % projdatadir) copy_data_tree(projdatadir, 'rasterio/proj_data') # Extend distutil's sdist command to generate 3 C extension sources for # the _io module: a version for GDAL < 2, one for 2 <= GDAL < 2.1 and # one for GDAL >= 2.1. class sdist_multi_gdal(sdist): def run(self): shutil.copy('rasterio/_shim1.pyx', 'rasterio/_shim.pyx') _ = check_output(['cython', '-v', '-f', 'rasterio/_shim.pyx', '-o', 'rasterio/_shim1.c']) print(_) shutil.copy('rasterio/_shim20.pyx', 'rasterio/_shim.pyx') _ = check_output(['cython', '-v', '-f', 'rasterio/_shim.pyx', '-o', 'rasterio/_shim20.c']) print(_) shutil.copy('rasterio/_shim21.pyx', 'rasterio/_shim.pyx') _ = check_output(['cython', '-v', '-f', 'rasterio/_shim.pyx', '-o', 'rasterio/_shim21.c']) print(_) sdist.run(self) ext_options = { 'include_dirs': include_dirs, 'library_dirs': library_dirs, 'libraries': libraries, 'extra_link_args': extra_link_args, 'define_macros': []} if not os.name == "nt": # These options fail on Windows if using Visual Studio ext_options['extra_compile_args'] = ['-Wno-unused-parameter', '-Wno-unused-function'] # Copy extension options for cpp extension modules. cpp_ext_options = copy.deepcopy(ext_options) # Remove -std=c++11 from C extension options. try: ext_options['extra_link_args'].remove('-std=c++11') ext_options['extra_compile_args'].remove('-std=c++11') except Exception: pass # GDAL 2.3 and newer requires C++11 if (gdal_major_version, gdal_minor_version) >= (2, 3): cpp11_flag = '-std=c++11' # 'extra_compile_args' may not be defined eca = cpp_ext_options.get('extra_compile_args', []) if platform.system() == 'Darwin': if cpp11_flag not in eca: eca.append(cpp11_flag) eca += [cpp11_flag, '-mmacosx-version-min=10.9', '-stdlib=libc++'] # TODO: Windows elif cpp11_flag not in eca: eca.append(cpp11_flag) cpp_ext_options['extra_compile_args'] = eca # Configure optional Cython coverage. cythonize_options = {} if os.environ.get('CYTHON_COVERAGE'): cythonize_options['compiler_directives'] = {'linetrace': True} cythonize_options['annotate'] = True ext_options['define_macros'].extend( [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')]) log.debug('ext_options:\n%s', pprint.pformat(ext_options)) if gdal_major_version >= 2: # GDAL>=2.0 does not require vendorized rasterfill.cpp cython_fill = ['rasterio/_fill.pyx'] sdist_fill = ['rasterio/_fill.cpp'] else: cython_fill = ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'] sdist_fill = ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'] # When building from a repo, Cython is required. if os.path.exists("MANIFEST.in") and "clean" not in sys.argv: log.info("MANIFEST.in found, presume a repo, cythonizing...") if not cythonize: sys.exit( "ERROR: Cython.Build.cythonize not found. " "Cython is required to build from a repo.") # Copy the GDAL version-specific shim module to _shim.pyx. if gdal_major_version == 2 and gdal_minor_version >= 1: shutil.copy('rasterio/_shim21.pyx', 'rasterio/_shim.pyx') elif gdal_major_version == 2 and gdal_minor_version == 0: shutil.copy('rasterio/_shim20.pyx', 'rasterio/_shim.pyx') elif gdal_major_version == 1: shutil.copy('rasterio/_shim1.pyx', 'rasterio/_shim.pyx') ext_modules = cythonize([ Extension( 'rasterio._base', ['rasterio/_base.pyx'], **ext_options), Extension( 'rasterio._io', ['rasterio/_io.pyx'], **ext_options), Extension( 'rasterio._features', ['rasterio/_features.pyx'], **ext_options), Extension( 'rasterio._env', ['rasterio/_env.pyx'], **ext_options), Extension( 'rasterio._warp', ['rasterio/_warp.pyx'], **cpp_ext_options), Extension( 'rasterio._fill', cython_fill, **cpp_ext_options), Extension( 'rasterio._err', ['rasterio/_err.pyx'], **ext_options), Extension( 'rasterio._example', ['rasterio/_example.pyx'], **ext_options), Extension( 'rasterio._shim', ['rasterio/_shim.pyx'], **ext_options), Extension( 'rasterio._crs', ['rasterio/_crs.pyx'], **ext_options), Extension( 'rasterio.shutil', ['rasterio/shutil.pyx'], **ext_options)], quiet=True, **cythonize_options) # If there's no manifest template, as in an sdist, we just specify .c files. else: ext_modules = [ Extension( 'rasterio._base', ['rasterio/_base.c'], **ext_options), Extension( 'rasterio._io', ['rasterio/_io.c'], **ext_options), Extension( 'rasterio._features', ['rasterio/_features.c'], **ext_options), Extension( 'rasterio._env', ['rasterio/_env.c'], **ext_options), Extension( 'rasterio._warp', ['rasterio/_warp.cpp'], **cpp_ext_options), Extension( 'rasterio._fill', sdist_fill, **cpp_ext_options), Extension( 'rasterio._err', ['rasterio/_err.c'], **ext_options), Extension( 'rasterio._example', ['rasterio/_example.c'], **ext_options), Extension( 'rasterio._crs', ['rasterio/_crs.c'], **ext_options), Extension( 'rasterio.shutil', ['rasterio/shutil.c'], **ext_options)] # Copy the GDAL version-specific shim module to _shim.pyx. if gdal_major_version == 2 and gdal_minor_version >= 1: ext_modules.append( Extension('rasterio._shim', ['rasterio/_shim21.c'], **ext_options)) elif gdal_major_version == 2 and gdal_minor_version == 0: ext_modules.append( Extension('rasterio._shim', ['rasterio/_shim20.c'], **ext_options)) elif gdal_major_version == 1: ext_modules.append( Extension('rasterio._shim', ['rasterio/_shim1.c'], **ext_options)) with open('README.rst') as f: readme = f.read() # Runtime requirements. inst_reqs = [ 'affine', 'attrs', 'click>=4.0,<8', 'cligj>=0.5', 'numpy', 'snuggs>=1.4.1', 'click-plugins'] if sys.version_info < (3, 4): inst_reqs.append('enum34') extra_reqs = { 'ipython': ['ipython>=2.0'], 's3': ['boto3>=1.2.4'], 'plot': ['matplotlib'], 'test': [ 'pytest>=2.8.2', 'pytest-cov>=2.2.0', 'boto3>=1.2.4', 'packaging', 'hypothesis'], 'docs': ['ghp-import', 'numpydoc', 'sphinx', 'sphinx-rtd-theme']} # Add futures to 'test' for Python < 3.2. if sys.version_info < (3, 2): extra_reqs['test'].append('futures') # Add all extra requirements extra_reqs['all'] = list(set(itertools.chain(*extra_reqs.values()))) setup_args = dict( cmdclass={'sdist': sdist_multi_gdal}, name='rasterio', version=version, description="Fast and direct raster I/O for use with Numpy and SciPy", long_description=readme, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: C', 'Programming Language :: Cython', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Multimedia :: Graphics :: Graphics Conversion', 'Topic :: Scientific/Engineering :: GIS'], keywords='raster gdal', author='<NAME>', author_email='<EMAIL>', url='https://github.com/mapbox/rasterio', license='BSD', package_dir={'': '.'}, packages=['rasterio', 'rasterio.rio'], entry_points=''' [console_scripts] rio=rasterio.rio.main:main_group [rasterio.rio_commands] blocks=rasterio.rio.blocks:blocks bounds=rasterio.rio.bounds:bounds calc=rasterio.rio.calc:calc clip=rasterio.rio.clip:clip convert=rasterio.rio.convert:convert edit-info=rasterio.rio.edit_info:edit env=rasterio.rio.env:env gcps=rasterio.rio.gcps:gcps info=rasterio.rio.info:info insp=rasterio.rio.insp:insp mask=rasterio.rio.mask:mask merge=rasterio.rio.merge:merge overview=rasterio.rio.overview:overview rasterize=rasterio.rio.rasterize:rasterize rm=rasterio.rio.rm:rm sample=rasterio.rio.sample:sample shapes=rasterio.rio.shapes:shapes stack=rasterio.rio.stack:stack transform=rasterio.rio.transform:transform warp=rasterio.rio.warp:warp ''', include_package_data=True, ext_modules=ext_modules, zip_safe=False, install_requires=inst_reqs, extras_require=extra_reqs) if os.environ.get('PACKAGE_DATA'): setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']} setup(**setup_args) ``` #### File: rasterio/tests/test_crs.py ```python import logging import subprocess import json import pytest import rasterio from rasterio._base import _can_create_osr from rasterio.crs import CRS from rasterio.env import env_ctx_if_needed from rasterio.errors import CRSError from .conftest import requires_gdal21, requires_gdal22 @pytest.fixture(scope='session') def profile_rgb_byte_tif(path_rgb_byte_tif): with rasterio.open(path_rgb_byte_tif) as src: return src.profile # When possible, Rasterio gives you the CRS in the form of an EPSG code. def test_read_epsg(tmpdir): with rasterio.open('tests/data/RGB.byte.tif') as src: assert src.crs.to_dict() == {'init': 'epsg:32618'} def test_read_esri_wkt(tmpdir): with rasterio.open('tests/data/test_esri_wkt.tif') as src: assert src.crs.to_dict() == { 'datum': 'NAD83', 'lat_0': 23, 'lat_1': 29.5, 'lat_2': 45.5, 'lon_0': -96, 'no_defs': True, 'proj': 'aea', 'units': 'm', 'x_0': 0, 'y_0': 0, } @pytest.mark.gdalbin def test_read_epsg3857(tmpdir): tiffname = str(tmpdir.join('lol.tif')) subprocess.call([ 'gdalwarp', '-t_srs', 'EPSG:3857', 'tests/data/RGB.byte.tif', tiffname]) with rasterio.open(tiffname) as src: assert src.crs.to_dict() == {'init': 'epsg:3857'} # Ensure that CRS sticks when we write a file. @pytest.mark.gdalbin def test_write_3857(tmpdir): src_path = str(tmpdir.join('lol.tif')) subprocess.call([ 'gdalwarp', '-t_srs', 'EPSG:3857', 'tests/data/RGB.byte.tif', src_path]) dst_path = str(tmpdir.join('wut.tif')) with rasterio.open(src_path) as src: with rasterio.open(dst_path, 'w', **src.meta) as dst: assert dst.crs.to_dict() == {'init': 'epsg:3857'} info = subprocess.check_output([ 'gdalinfo', dst_path]) # WKT string may vary a bit w.r.t GDAL versions assert 'PROJCS["WGS 84 / Pseudo-Mercator"' in info.decode('utf-8') def test_write_bogus_fails(tmpdir, profile_rgb_byte_tif): src_path = str(tmpdir.join('lol.tif')) profile = profile_rgb_byte_tif.copy() profile['crs'] = ['foo'] with pytest.raises(CRSError): rasterio.open(src_path, 'w', **profile) # TODO: switch to DatasetWriter here and don't require a .start(). def test_from_proj4_json(): json_str = '{"proj": "longlat", "ellps": "WGS84", "datum": "WGS84"}' crs_dict = CRS.from_string(json_str) assert crs_dict == json.loads(json_str) # Test with invalid JSON code with pytest.raises(ValueError): assert CRS.from_string('{foo: bar}') def test_from_epsg(): crs_dict = CRS.from_epsg(4326) assert crs_dict['init'].lower() == 'epsg:4326' # Test with invalid EPSG code with pytest.raises(ValueError): assert CRS.from_epsg(0) def test_from_epsg_string(): crs_dict = CRS.from_string('epsg:4326') assert crs_dict['init'].lower() == 'epsg:4326' # Test with invalid EPSG code with pytest.raises(ValueError): assert CRS.from_string('epsg:xyz') def test_from_string(): wgs84_crs = CRS.from_string('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs') assert wgs84_crs.to_dict() == {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'} # Make sure this doesn't get handled using the from_epsg() even though 'epsg' is in the string epsg_init_crs = CRS.from_string('+units=m +init=epsg:26911 +no_defs=True') assert epsg_init_crs.to_dict() == {'units': 'm', 'init': 'epsg:26911', 'no_defs': True} def test_bare_parameters(): """ Make sure that bare parameters (e.g., no_defs) are handled properly, even if they come in with key=True. This covers interaction with pyproj, which makes presents bare parameters as key=<bool>.""" # Example produced by pyproj crs_dict = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +proj=lcc +x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0') assert crs_dict.get('no_defs', False) is True crs_dict = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=False +proj=lcc +x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0') assert crs_dict.get('no_defs', True) is False def test_is_geographic(): assert CRS({'init': 'EPSG:4326'}).is_geographic is True assert CRS({'init': 'EPSG:3857'}).is_geographic is False wgs84_crs = CRS.from_string('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs') assert wgs84_crs.is_geographic is True nad27_crs = CRS.from_string('+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs') assert nad27_crs.is_geographic is True lcc_crs = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +proj=lcc +x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0') assert lcc_crs.is_geographic is False def test_is_projected(): assert CRS({'init': 'EPSG:3857'}).is_projected is True lcc_crs = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +proj=lcc +x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0') assert CRS(lcc_crs).is_projected is True wgs84_crs = CRS.from_string('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs') assert CRS(wgs84_crs).is_projected is False def test_is_same_crs(): crs1 = CRS({'init': 'EPSG:4326'}) crs2 = CRS({'init': 'EPSG:3857'}) assert crs1 == crs1 assert crs1 != crs2 wgs84_crs = CRS.from_string('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs') assert crs1 == wgs84_crs # Make sure that same projection with different parameter are not equal lcc_crs1 = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +proj=lcc +x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0') lcc_crs2 = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +proj=lcc +x_0=0 +units=m +lat_2=77 +lat_1=45 +lat_0=0') assert lcc_crs1 != lcc_crs2 def test_null_crs_equality(): assert CRS() == CRS() a = CRS() assert a == a assert not a != a def test_null_and_valid_crs_equality(): assert (CRS() == CRS(init='EPSG:4326')) is False def test_to_string(): assert CRS({'init': 'EPSG:4326'}).to_string() == "+init=EPSG:4326" def test_is_valid_false(): with pytest.raises(CRSError): CRS(init='EPSG:432600').is_valid def test_is_valid(): assert CRS(init='EPSG:4326').is_valid def test_empty_json(): with pytest.raises(CRSError): CRS.from_string('{}') with pytest.raises(CRSError): CRS.from_string('[]') with pytest.raises(CRSError): CRS.from_string('') @pytest.mark.parametrize('arg', [None, {}, '']) def test_can_create_osr_none_err(arg): """Passing None or empty fails""" assert not _can_create_osr(arg) def test_can_create_osr(): assert _can_create_osr({'init': 'EPSG:4326'}) assert _can_create_osr('EPSG:4326') @pytest.mark.parametrize('arg', ['EPSG:-1', 'foo']) def test_can_create_osr_invalid(arg): """invalid CRS definitions fail""" assert not _can_create_osr(arg) @requires_gdal22( reason="GDAL bug resolved in 2.2+ allowed invalid CRS to be created") def test_can_create_osr_invalid_epsg_0(): assert not _can_create_osr('EPSG:') def test_has_wkt_property(): assert CRS({'init': 'EPSG:4326'}).wkt.startswith('GEOGCS["WGS 84",DATUM') def test_repr(): assert repr(CRS({'init': 'EPSG:4326'})).startswith("CRS({'init'") def test_dunder_str(): assert str(CRS({'init': 'EPSG:4326'})) == CRS({'init': 'EPSG:4326'}).to_string() def test_epsg_code(): assert CRS({'init': 'EPSG:4326'}).is_epsg_code assert not CRS({'proj': 'latlon'}).is_epsg_code def test_epsg(): assert CRS({'init': 'EPSG:4326'}).to_epsg() == 4326 assert CRS.from_string('+proj=longlat +datum=WGS84 +no_defs').to_epsg() == 4326 def test_epsg__no_code_available(): lcc_crs = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +proj=lcc ' '+x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0') assert lcc_crs.to_epsg() is None def test_crs_OSR_equivalence(): crs1 = CRS.from_string('+proj=longlat +datum=WGS84 +no_defs') crs2 = CRS.from_string('+proj=latlong +datum=WGS84 +no_defs') crs3 = CRS({'init': 'EPSG:4326'}) assert crs1 == crs2 assert crs1 == crs3 def test_crs_OSR_no_equivalence(): crs1 = CRS.from_string('+proj=longlat +datum=WGS84 +no_defs') crs2 = CRS.from_string('+proj=longlat +datum=NAD27 +no_defs') assert crs1 != crs2 def test_safe_osr_release(tmpdir): log = logging.getLogger('rasterio._gdal') log.setLevel(logging.DEBUG) logfile = str(tmpdir.join('test.log')) fh = logging.FileHandler(logfile) log.addHandler(fh) with rasterio.Env(): CRS({}) == CRS({}) log = open(logfile).read() assert "Pointer 'hSRS' is NULL in 'OSRRelease'" not in log @requires_gdal21(reason="CRS equality is buggy pre-2.1") def test_from_wkt(): wgs84 = CRS.from_string('+proj=longlat +datum=WGS84 +no_defs') from_wkt = CRS.from_wkt(wgs84.wkt) assert wgs84.wkt == from_wkt.wkt def test_from_wkt_invalid(): with pytest.raises(CRSError): CRS.from_wkt('trash') def test_from_user_input_epsg(): assert 'init' in CRS.from_user_input('EPSG:4326') def test_from_esri_wkt(): projection_string = ( 'PROJCS["USA_Contiguous_Albers_Equal_Area_Conic_USGS_version",' 'GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",' 'SPHEROID["GRS_1980",6378137.0,298.257222101]],' 'PRIMEM["Greenwich",0.0],' 'UNIT["Degree",0.0174532925199433]],' 'PROJECTION["Albers"],' 'PARAMETER["false_easting",0.0],' 'PARAMETER["false_northing",0.0],' 'PARAMETER["central_meridian",-96.0],' 'PARAMETER["standard_parallel_1",29.5],' 'PARAMETER["standard_parallel_2",45.5],' 'PARAMETER["latitude_of_origin",23.0],' 'UNIT["Meter",1.0],' 'VERTCS["NAVD_1988",' 'VDATUM["North_American_Vertical_Datum_1988"],' 'PARAMETER["Vertical_Shift",0.0],' 'PARAMETER["Direction",1.0],UNIT["Centimeter",0.01]]]') proj_crs_str = CRS.from_string(projection_string) proj_crs_wkt = CRS.from_wkt(projection_string) assert proj_crs_str.to_string() == proj_crs_wkt.to_string() assert proj_crs_str.to_string() == \ ("+datum=NAD83 +lat_0=23 +lat_1=29.5 +lat_2=45.5 " "+lon_0=-96 +no_defs +proj=aea +units=m +x_0=0 +y_0=0") def test_compound_crs(): wkt = """COMPD_CS["unknown",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]],VERT_CS["unknown",VERT_DATUM["unknown",2005],UNIT["metre",1.0,AUTHORITY["EPSG","9001"]],AXIS["Up",UP]]]""" assert CRS.from_wkt(wkt).wkt.startswith('GEOGCS["WGS 84"') def test_dataset_compound_crs(): with rasterio.open("tests/data/compdcs.vrt") as dataset: assert dataset.crs.wkt.startswith('GEOGCS["WGS 84"') @pytest.mark.wheel def test_environ_patch(gdalenv, monkeypatch): """GDAL_DATA is patched as when rasterio._crs is imported""" monkeypatch.delenv('GDAL_DATA', raising=False) monkeypatch.delenv('PROJ_LIB', raising=False) with env_ctx_if_needed(): assert CRS.from_epsg(4326) != CRS(units='m', proj='aeqd', ellps='WGS84', datum='WGS84', lat_0=-17.0, lon_0=-44.0) ``` #### File: rasterio/tests/test_dataset.py ```python import os import pytest import rasterio from rasterio.errors import RasterioIOError def test_files(data): tif = str(data.join('RGB.byte.tif')) aux = tif + '.aux.xml' with open(aux, 'w'): pass with rasterio.open(tif) as src: assert src.files == [tif, aux] def test_handle_closed(path_rgb_byte_tif): """Code that calls ``DatasetBase.handle()`` after it has been closed should raise an exception. """ with rasterio.open(path_rgb_byte_tif) as src: pass with pytest.raises(RasterioIOError): src.files ``` #### File: rasterio/tests/test_io_mixins.py ```python from affine import Affine import pytest import rasterio from rasterio.errors import RasterioDeprecationWarning from rasterio.windows import Window, WindowMethodsMixin EPS = 1.0e-8 def assert_window_almost_equals(a, b, precision=3): assert round(a.col_off, precision) == round(b.col_off, precision) assert round(a.row_off, precision) == round(b.row_off, precision) assert round(a.width, precision) == round(b.width, precision) assert round(a.height, precision) == round(b.height, precision) class MockDatasetBase(object): def __init__(self): # from tests/data/RGB.byte.tif self.affine = Affine(300.0379266750948, 0.0, 101985.0, 0.0, -300.041782729805, 2826915.0) self.bounds = (101985.0, 2611485.0, 339315.0, 2826915.0) self.transform = self.affine self.height = 718 self.width = 791 def test_windows_mixin(): class MockDataset(MockDatasetBase, WindowMethodsMixin): pass src = MockDataset() assert_window_almost_equals( src.window(*src.bounds), Window(0, 0, src.width, src.height) ) assert src.window_bounds(Window(0, 0, src.width, src.height)) == src.bounds assert src.window_transform( Window(0, 0, src.width, src.height)) == src.transform def test_windows_mixin_fail(): class MockDataset(WindowMethodsMixin): # doesn't inherit transform, height and width pass src = MockDataset() with pytest.raises(TypeError): src.window() with pytest.raises(TypeError): src.window_bounds() with pytest.raises(TypeError): src.window_transform() def test_window_transform_method(): with rasterio.open('tests/data/RGB.byte.tif') as src: assert src.window_transform(Window(0, 0, None, None)) == src.transform assert src.window_transform(Window(None, None, None, None)) == src.transform assert src.window_transform( Window(1, 1, None, None)).c == src.bounds.left + src.res[0] assert src.window_transform( ((1, None), (1, None))).f == src.bounds.top - src.res[1] assert src.window_transform( ((-1, None), (-1, None))).c == src.bounds.left - src.res[0] assert src.window_transform( ((-1, None), (-1, None))).f == src.bounds.top + src.res[1] def test_window_method(): with rasterio.open('tests/data/RGB.byte.tif') as src: left, bottom, right, top = src.bounds dx, dy = src.res assert_window_almost_equals( src.window(left + EPS, bottom + EPS, right - EPS, top - EPS), Window(0, 0, src.width, src.height) ) assert_window_almost_equals( src.window(left, top - 400, left + 400, top), Window(0, 0, 400 / src.res[0], 400 / src.res[1]) ) assert_window_almost_equals( src.window(left, top - 2 * dy - EPS, left + 2 * dx - EPS, top), Window(0, 0, 2, 2)) assert_window_almost_equals( src.window(left - 2 * dx, top - 2 * dy, left + 2 * dx, top + 2 * dy), Window(-2, -2, 4, 4) ) def test_window_bounds_function(): with rasterio.open('tests/data/RGB.byte.tif') as src: rows = src.height cols = src.width assert src.window_bounds(((0, rows), (0, cols))) == src.bounds ``` #### File: rasterio/tests/test_rio_gcp.py ```python import json import logging import sys from click.testing import CliRunner import pytest from rasterio.rio.main import main_group logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) def test_feature_seq(): """GeoJSON sequence w/out RS is the default""" runner = CliRunner() result = runner.invoke( main_group, ['gcps', 'tests/data/white-gemini-iv.vrt']) assert result.exit_code == 0 assert result.output.count('"Feature"') == 3 assert '-78' in result.output def test_collection(): """GeoJSON collections can be had, optionally""" runner = CliRunner() result = runner.invoke( main_group, ['gcps', 'tests/data/white-gemini-iv.vrt', '--collection']) assert result.exit_code == 0 assert result.output.count('"FeatureCollection"') == 1 assert '-78' in result.output def test_feature_seq_indent_rs(): """Indentation of a feature sequence succeeds with ascii RS option""" runner = CliRunner() result = runner.invoke( main_group, ['gcps', 'tests/data/white-gemini-iv.vrt', '--indent', '2', '--rs']) assert result.exit_code == 0 def test_feature_seq_indent_no_rs(): """Indentation of a feature sequence fails without ascii RS option""" runner = CliRunner() result = runner.invoke( main_group, ['gcps', 'tests/data/white-gemini-iv.vrt', '--indent', '2']) assert result.exit_code == 2 def test_projected(): """Projected GeoJSON is an option""" runner = CliRunner() result = runner.invoke( main_group, ['gcps', 'tests/data/white-gemini-iv.vrt', '--projected']) assert result.exit_code == 0 assert '-78' not in result.output def test_feature_precision(): """Coordinate rounding is an option""" runner = CliRunner() result = runner.invoke( main_group, ['gcps', 'tests/data/white-gemini-iv.vrt', '--projected', '--precision', '1']) assert result.exit_code == 0 assert '"x": 116792.0' in result.output def test_collection_precision(): """Coordinate rounding is an option""" runner = CliRunner() result = runner.invoke( main_group, ['gcps', 'tests/data/white-gemini-iv.vrt', '--collection', '--projected', '--precision', '1']) assert result.exit_code == 0 assert '"FeatureCollection"' in result.output assert '"x": 116792.0' in result.output def test_collection_geographic_precision(): """Unrounded coordinates are an option""" runner = CliRunner() result = runner.invoke( main_group, ['gcps', 'tests/data/white-gemini-iv.vrt', '--collection', '--projected']) assert result.exit_code == 0 assert '"FeatureCollection"' in result.output assert '116792.0,' in result.output ``` #### File: rasterio/tests/test_rio_stack.py ```python from click.testing import CliRunner import rasterio from rasterio.rio.main import main_group from rasterio.rio.stack import stack def test_stack(tmpdir): outputname = str(tmpdir.join('stacked.tif')) runner = CliRunner() result = runner.invoke( main_group, ['stack', 'tests/data/RGB.byte.tif', outputname]) assert result.exit_code == 0 with rasterio.open(outputname) as out: assert out.count == 3 assert out.read(1).max() > 0 def test_stack_list(tmpdir): outputname = str(tmpdir.join('stacked.tif')) runner = CliRunner() result = runner.invoke( main_group, [ 'stack', 'tests/data/RGB.byte.tif', '--bidx', '1,2,3', outputname]) assert result.exit_code == 0 with rasterio.open(outputname) as out: assert out.count == 3 def test_stack_slice(tmpdir): outputname = str(tmpdir.join('stacked.tif')) runner = CliRunner() result = runner.invoke( main_group, [ 'stack', 'tests/data/RGB.byte.tif', '--bidx', '..2', 'tests/data/RGB.byte.tif', '--bidx', '3..', outputname]) assert result.exit_code == 0 with rasterio.open(outputname) as out: assert out.count == 3 def test_stack_single_slice(tmpdir): outputname = str(tmpdir.join('stacked.tif')) runner = CliRunner() result = runner.invoke( main_group, [ 'stack', 'tests/data/RGB.byte.tif', '--bidx', '1', 'tests/data/RGB.byte.tif', '--bidx', '2..', '--rgb', outputname]) assert result.exit_code == 0 with rasterio.open(outputname) as out: assert out.count == 3 def test_format_jpeg(tmpdir): outputname = str(tmpdir.join('stacked.jpg')) runner = CliRunner() result = runner.invoke( main_group, [ 'stack', 'tests/data/RGB.byte.tif', outputname, '--format', 'JPEG']) assert result.exit_code == 0 def test_error(tmpdir): outputname = str(tmpdir.join('stacked.tif')) runner = CliRunner() result = runner.invoke( main_group, [ 'stack', 'tests/data/RGB.byte.tif', outputname, '--driver', 'BOGUS']) assert result.exit_code == 1 ```
{ "source": "jnheo-md/aria", "score": 3 }
#### File: jnheo-md/aria/aria.py ```python from math import ceil import cv2 import csv import numpy as np import tkinter from tkinter import filedialog from skimage.color import rgb2hed, hed2rgb from openslide import OpenSlide from skimage.util import img_as_ubyte,img_as_float from skimage.filters import thresholding, threshold_otsu ### constants SKIP_CROP = False MANUAL_THRESHOLD = False BLOCK_SIZE = 1024 import sys,os root = tkinter.Tk() root.withdraw() #use to hide tkinter window def openfilename(): # open file dialog box to select image # The dialogue box has a title "Open" filename = filedialog.askopenfilename() return filename if len(sys.argv) == 1 : FILE_NAME = openfilename() elif len(sys.argv) ==2 : FILE_NAME = sys.argv[1] elif len(sys.argv) > 2 : if sys.argv[1] == "skip-crop": SKIP_CROP = True if sys.argv[2] == "manual-threshold" : MANUAL_THRESHOLD= True FILE_NAME = sys.argv[3] else : FILE_NAME = sys.argv[2] elif sys.argv[1] == "manual-threshold": MANUAL_THRESHOLD= True if sys.argv[2] == "skip-crop" : MANUAL_THRESHOLD= True FILE_NAME = sys.argv[3] else : FILE_NAME = sys.argv[2] else : # error print("ARIA error : cannot understand arguments") print("usage : python aria.py [skip-crop(optional)] [manual-threshold(optional)] [filename]") print("ex1) python aria.py skip-crop no-need-to-crop.svs - skips cropping step") print("ex1) python aria.py skip-crop manual-threshold no-need-to-crop-manual-threshold.svs - skips cropping step / shows manual threshold adjustment step") print("ex2) python aria.py need-to-crop.svs - uses cropping step") exit() print("###########################") print("###### ARIA-PY v 3.0 ######") print("###########################") print("#### by <NAME> ####") print("###########################") print("") print("Processing file : "+FILE_NAME) print("###########################") ################################ # Draws rectangle on the image ################################ def draw_rectangle(x,y): global small_img, image image = small_img.copy() # start from original image image -= 50 # decrease brightness of image dragged = np.zeros_like(image) cv2.rectangle(dragged, pt1=(ix,iy), pt2=(x, y),color=(255,255,255),thickness=-1) # create white box alpha = 0.8 mask = dragged.astype(bool) image[mask] = cv2.addWeighted(image, alpha, dragged, 1-alpha,0)[mask] # merge rectangle onto image def onClick(event, x, y, flags, param): global ix, iy, drawing, finalX, finalY if event == cv2.EVENT_LBUTTONDOWN: #when mousedown, set initial values # drawing = True ix = x iy = y elif ((event == cv2.EVENT_MOUSEMOVE) & (flags & cv2.EVENT_FLAG_LBUTTON) | (event == cv2.EVENT_LBUTTONUP)): #when moving, redraw rectangle # if drawing == True: draw_rectangle(x,y) finalX = x finalY = y def resizeAndCrop(src, startX, endX, startY, endY, use_openslide = False) : resize_ratio = BLOCK_SIZE/(endX-startX) ## first, get total image size slide_level = 0 if use_openslide : resized_total_width = src.level_dimensions[0][0] * resize_ratio for level in range(src.level_count) : if(src.level_dimensions[level][0] < resized_total_width) : break slide_level = level slide_total_width = src.level_dimensions[slide_level][0] slide_to_total_ratio = slide_total_width/src.level_dimensions[0][0] slide_startX = int(startX * slide_to_total_ratio) slide_startY = int(startY * slide_to_total_ratio) slide_endX= int(endX* slide_to_total_ratio) slide_endY= int(endY* slide_to_total_ratio) slide_to_output_ratio = BLOCK_SIZE / (slide_endX - slide_startX) width_height_ratio = (endX - startX) / (endY - startY) ## np array : height , weight, dim result_image = np.zeros((ceil(BLOCK_SIZE * (1/width_height_ratio)),BLOCK_SIZE,3), np.uint8) ## Loop through slide to make BLOCK_SIZED pieces for iterable_x in range(ceil((slide_endX - slide_startX)/BLOCK_SIZE)) : for iterable_y in range(ceil((slide_endY - slide_startY)/BLOCK_SIZE)) : sys.stdout.write('\033[2K\033[1G') print(f"Cropping {(iterable_x * ceil((slide_endY - slide_startY)/BLOCK_SIZE) + iterable_y) / (ceil((slide_endY - slide_startY)/BLOCK_SIZE) * ceil((slide_endX - slide_startX)/BLOCK_SIZE)) * 100}% done", end="\r") this_block_width = BLOCK_SIZE ## processing when rest of image is less than BLOCK_SIZE (width) if(slide_startX + (iterable_x + 1) * BLOCK_SIZE) > slide_endX : this_block_width = slide_endX - (slide_startX + iterable_x * BLOCK_SIZE) this_block_height = BLOCK_SIZE ## processing when rest of image is less than BLOCK_SIZE (height) if(slide_startY + (iterable_y + 1) * BLOCK_SIZE) > slide_endY : this_block_height = slide_endY - (slide_startY + iterable_y * BLOCK_SIZE) this_block_img = src.read_region((ceil(startX + iterable_x * BLOCK_SIZE / slide_to_total_ratio), ceil(startY + iterable_y * BLOCK_SIZE / slide_to_total_ratio)),slide_level,size = (ceil(this_block_width), ceil(this_block_height))) ## Compose destination Xs and Ys output_startX = ceil((iterable_x * BLOCK_SIZE)*slide_to_output_ratio) output_endX = output_startX + ceil(this_block_width * slide_to_output_ratio) output_startY = ceil((iterable_y * BLOCK_SIZE)*slide_to_output_ratio) output_endY = output_startY + ceil(this_block_height * slide_to_output_ratio) ## Compose new image result_shape =result_image[output_startY : output_endY, output_startX:output_endX].shape result_image[output_startY : output_endY, output_startX:output_endX] = cv2.cvtColor(cv2.resize(np.array(this_block_img), dsize=(result_shape[1],result_shape[0])), cv2.COLOR_RGB2BGR) return result_image else : ## use opencv src = src[newY1:newY2, newX1:newX2] if (newX2 - newX1) > BLOCK_SIZE : #then resize needed contour_resize_ratio = BLOCK_SIZE / (newX2 - newX1) result_image = cv2.resize(src, dsize=(BLOCK_SIZE, ceil((newY2 - newY1) * contour_resize_ratio))) return result_image ## end resize function def deconvolution(src, mask, startX, endX, startY, endY, USE_OPENSLIDE) : print("Starting deconvolution...") ## get full sized mask image resizedMask = cv2.resize(mask, dsize = (endX - startX, endY - startY), interpolation=cv2.INTER_NEAREST) ## create empty result image result_image = np.zeros((endY - startY,endX - startX), np.uint8) # opencv type, height, weight, dim for iterable_x in range(ceil((endX - startX)/BLOCK_SIZE)) : for iterable_y in range(ceil((endY - startY)/BLOCK_SIZE)) : sys.stdout.write('\033[2K\033[1G') print(f"Progress : {((iterable_x * ceil((endY - startY)/BLOCK_SIZE) + iterable_y) / (ceil((endY - startY)/BLOCK_SIZE) * ceil((endX - startX)/BLOCK_SIZE)) * 100):.2f}% done", end="\r") this_block_width = BLOCK_SIZE ## processing when rest of image is less than BLOCK_SIZE (width) if(startX + (iterable_x + 1) * BLOCK_SIZE) > endX : this_block_width = endX - (startX + iterable_x * BLOCK_SIZE) this_block_height = BLOCK_SIZE ## processing when rest of image is less than BLOCK_SIZE (height) if(startY + (iterable_y + 1) * BLOCK_SIZE) > endY : this_block_height = endY - (startY + iterable_y * BLOCK_SIZE) ## define boundaries for full image (output) relative_startX = iterable_x * BLOCK_SIZE relative_startY = iterable_y * BLOCK_SIZE relative_endX = relative_startX + this_block_width relative_endY = relative_startY + this_block_height ## read defined region from original file if USE_OPENSLIDE : this_block_img = np.array(src.read_region((startX + relative_startX, startY + relative_startY),0,size = (this_block_width, this_block_height))) else : this_block_img = src[(startY+relative_startY) : (startY+relative_startY + this_block_height), (startX + relative_startX) : (startX + relative_startX + this_block_width)] this_block_img = cv2.cvtColor(this_block_img,cv2.COLOR_BGR2RGB) this_block_img = this_block_img[:,:,:3] ## get mask for that region this_mask = cv2.cvtColor(resizedMask[relative_startY : relative_endY, relative_startX : relative_endX], cv2.COLOR_GRAY2BGR) ## get cropped region, in RGB cropped_block = cv2.bitwise_and(this_block_img, this_mask) ## clear memory del this_block_img ## IHC colorspace cropped_block = img_as_float(cropped_block) cropped_block = rgb2hed(cropped_block) null = np.zeros_like(cropped_block[:, :, 0]) cropped_block = img_as_ubyte(hed2rgb(np.stack((null, null, cropped_block[:, :, 2]), axis=-1))) cropped_block = 255 - cv2.cvtColor(cropped_block,cv2.COLOR_RGB2GRAY) cropped_block = cv2.bitwise_and(cropped_block, cv2.cvtColor(this_mask,cv2.COLOR_BGR2GRAY)) ## save to result image result_image[relative_startY : relative_endY, relative_startX : relative_endX] = cropped_block return result_image ## end deconvolution function USE_OPENSLIDE = False ix = -1 iy = -1 finalX = -1 finalY = -1 drawing = False if SKIP_CROP == False : try : slide_img = OpenSlide(FILE_NAME) small_level = slide_img.level_count-1 # if there is less than or equal to 2 levels, set level to last level small_img_size = slide_img.level_dimensions[small_level] small_img = np.array(slide_img.read_region((0,0), small_level, size=small_img_size)) if(small_img_size[0] > 1024) : # then resize to 1024 small_img_width = 1024 small_img_height = round(1024*small_img_size[1]/small_img_size[0]) small_img_size = [small_img_width,small_img_height] small_img = cv2.resize(small_img, dsize=(small_img_width,small_img_height)) small_img = cv2.cvtColor(small_img, cv2.COLOR_RGB2BGR) #finally, the image is presentable with cv2.imshow orig_img_size = slide_img.level_dimensions[0] #get full resolution image size USE_OPENSLIDE = True except BaseException : # try with opencv try : slide_img = cv2.imread(FILE_NAME) # set width to 1024 small_img_width = 1024 small_img_height = round(1024*slide_img.shape[0]/slide_img.shape[1]) small_img_size = [small_img_width,small_img_height] small_img = cv2.resize(slide_img, dsize=(small_img_width,small_img_height)) orig_img_size = [slide_img.shape[1],slide_img.shape[0]] #get full resolution image size USE_OPENSLIDE =False except BaseException : print("Error opening image!") exit() #variables for dragging, initial values WINDOW_NAME = "DRAG to set crop area, then press ENTER" cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) cv2.setMouseCallback(WINDOW_NAME, onClick) image = small_img.copy() while True: cv2.imshow(WINDOW_NAME, image) ret = cv2.waitKey(10) if ret == 27: #esc, exit exit() elif ret==13 : #return, so continue! cv2.destroyAllWindows() print("Cropping and exporting image...") break if abs(ix-finalX) < 10 and abs(iy-finalY) < 10 : # no area or too small area was selected. so export whole image ix = 0 iy =0 finalX = small_img_size[0] finalY = small_img_size[1] # del small_img #will not use again resize_ratio = orig_img_size[0]/small_img_size[0] # get the ratio of larger image compared to thumbnail if(ix < finalX) : newX1 = int(resize_ratio*ix) #start X of new image newX2 = int(resize_ratio*finalX) #end X of new image else : newX2 = int(resize_ratio*ix) #start X of new image newX1 = int(resize_ratio*finalX) #end X of new image if(iy < finalY) : newY1 = int(resize_ratio*iy) #start Y of new image newY2 = int(resize_ratio*finalY) #end Y of new image else : newY2 = int(resize_ratio*iy) #start Y of new image newY1 = int(resize_ratio*finalY) #end Y of new image image = resizeAndCrop(slide_img, newX1, newX2, newY1, newY2, USE_OPENSLIDE) ## new X1, X2, Y1, Y2 are coordinates for cropped area using original image ## these will be used later on. ##### now new window - for deconvolution & contour, analysis. needsReload = True def passChange(a) : global needsReload needsReload = True pass WINDOW_NAME2 = "Adjust variables then press ENTER" cv2.namedWindow(WINDOW_NAME2, cv2.WINDOW_NORMAL) cv2.createTrackbar("contour start", WINDOW_NAME2, 0, 255, passChange) cv2.createTrackbar("contour end",WINDOW_NAME2, 0, 255, passChange) cv2.createTrackbar("min area", WINDOW_NAME2, 1, 100, passChange) cv2.createTrackbar("background white",WINDOW_NAME2,0,200, passChange) cv2.createTrackbar("kernel size",WINDOW_NAME2,1,30, passChange) cv2.setTrackbarPos("contour start",WINDOW_NAME2,10) cv2.setTrackbarPos("contour end",WINDOW_NAME2,39) cv2.setTrackbarPos("min area",WINDOW_NAME2,5) cv2.setTrackbarPos("background white",WINDOW_NAME2,10) cv2.setTrackbarPos("kernel size",WINDOW_NAME2,3) origHeight, origWidth, dim = image.shape ## adjust brightness and contrast BRIGHTNESS = -130 CONTRAST =1.4 imageForCountour = cv2.convertScaleAbs(image, alpha=CONTRAST, beta=BRIGHTNESS) grayImage = cv2.cvtColor(imageForCountour, cv2.COLOR_BGR2GRAY) invertedGrayImage = cv2.bitwise_not(grayImage) height, width= grayImage.shape totalArea = width*height keyResult = None while keyResult != 13 and keyResult != 27: if needsReload : needsReload = False CANNY_START = cv2.getTrackbarPos('contour start', WINDOW_NAME2) CANNY_END = cv2.getTrackbarPos('contour end', WINDOW_NAME2) AREA_RATIO = cv2.getTrackbarPos('min area', WINDOW_NAME2) / 2000 if AREA_RATIO == 0: AREA_RATIO = 0.001 WHITE_VALUE = cv2.getTrackbarPos('background white', WINDOW_NAME2) KERNEL_SIZE = cv2.getTrackbarPos('kernel size', WINDOW_NAME2) canny = cv2.Canny(grayImage, CANNY_START, CANNY_END) expanded = cv2.dilate(canny, np.ones([KERNEL_SIZE, KERNEL_SIZE])) eroded = cv2.erode(expanded, np.ones([KERNEL_SIZE, KERNEL_SIZE])) contours, hierarchy = cv2.findContours(eroded, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) showingContours = [] negativeContours = [] for each in contours: area = cv2.contourArea(each) if area > AREA_RATIO * totalArea: # get average intensitivy in grayscale mask = np.zeros_like(invertedGrayImage) # Create mask where white is what we want, black otherwise cv2.drawContours(mask, [each], -1, 255, -1) out = np.zeros_like(invertedGrayImage) # Extract out the object and place into output image out[mask == 255] = invertedGrayImage[mask == 255] # get sum of grayscale average = out.sum() / area if average < WHITE_VALUE: # too white negativeContours.append(each) else: showingContours.append(each) contourMask = np.zeros_like(invertedGrayImage) cv2.drawContours(contourMask, showingContours, -1, 255, -1) negativeContourMask = np.zeros_like(invertedGrayImage) cv2.drawContours(negativeContourMask, negativeContours, -1, 255, -1) finalMaskGray = cv2.bitwise_xor(contourMask, negativeContourMask) finalMask = cv2.cvtColor(finalMaskGray, cv2.COLOR_GRAY2BGR) finalMask[np.where((finalMask == [255, 255, 255]).all(axis=2))] = [0, 0, 255] # make it red inverted = cv2.cvtColor(invertedGrayImage, cv2.COLOR_GRAY2BGR) merged = cv2.addWeighted(inverted, 0.7, finalMask, 0.3, 0) cv2.putText(merged, f"CANNY {CANNY_START} ~ {CANNY_END} / AREA {AREA_RATIO} / WHITE {WHITE_VALUE} / KERNEL {KERNEL_SIZE}", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255)) cv2.imshow(WINDOW_NAME2, merged) keyResult = cv2.waitKey(1) cv2.destroyAllWindows() if keyResult == 13: # then save cv2.imwrite(FILE_NAME + '-area.png', merged) cv2.imwrite(FILE_NAME + '-contour.png', finalMaskGray) else: quit() ## ## ## ## RESET (free up memory) ## ## ## del grayImage del invertedGrayImage del expanded del eroded del finalMask del inverted del merged from datetime import datetime ## start deconvolution deconvoluted = deconvolution(slide_img, finalMaskGray, newX1, newX2, newY1, newY2, USE_OPENSLIDE) cv2.imwrite(FILE_NAME + '-deconvoluted.png',(255-deconvoluted)) ############### ## Thresholding ############### threshold = threshold_otsu(deconvoluted[deconvoluted!=0]) WINDOW_NAME2 = "Adjust threshold then press ENTER" cv2.namedWindow(WINDOW_NAME2, cv2.WINDOW_NORMAL) cv2.createTrackbar("threshold", WINDOW_NAME2, 0, 255, passChange) cv2.setTrackbarPos("threshold",WINDOW_NAME2,threshold) origHeight, origWidth = deconvoluted.shape ihc_d_gray_display = deconvoluted.copy() grayHeight, grayWidth = ihc_d_gray_display.shape ihc_d_gray_display = cv2.cvtColor(ihc_d_gray_display, cv2.COLOR_GRAY2BGR) croppedImage = deconvoluted if origHeight > 1024: ## too large, so resize for display croppedImage = cv2.resize(deconvoluted, dsize=(round(1024*origWidth/origHeight),1024)) ihc_d_gray_display = cv2.resize(ihc_d_gray_display,dsize=(round(1024*grayWidth/grayHeight),1024)) if MANUAL_THRESHOLD : keyResult = None needsReload =True while keyResult != 13 and keyResult != 27: if needsReload : needsReload = False threshold = cv2.getTrackbarPos('threshold', WINDOW_NAME2) _,thresholded_image = cv2.threshold(ihc_d_gray_display,threshold,255,cv2.THRESH_BINARY) thresholded_image = cv2.cvtColor(thresholded_image,cv2.COLOR_BGR2GRAY) cv2.putText(thresholded_image, "threshold : "+str(255 - threshold), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255)) cv2.imshow(WINDOW_NAME2, cv2.hconcat([croppedImage,thresholded_image])) keyResult = cv2.waitKey(1) cv2.destroyAllWindows() del thresholded_image del croppedImage _,th_auto = cv2.threshold(deconvoluted,threshold,255,cv2.THRESH_BINARY) _,th_fixed = cv2.threshold(deconvoluted,55,255,cv2.THRESH_BINARY) thresholded = cv2.cvtColor(th_auto, cv2.COLOR_GRAY2BGR) thresholded_fixed = cv2.cvtColor(th_fixed, cv2.COLOR_GRAY2BGR) cv2.imwrite(FILE_NAME + '-stained(auto).png', 255-thresholded) cv2.imwrite(FILE_NAME + '-stained(fixed).png', 255-thresholded_fixed) resizedMask = cv2.resize(finalMaskGray, dsize = (deconvoluted.shape[1], deconvoluted.shape[0])) fields = ["Total Area", "Threshold", "Stained(auto)", "Stained(fixed)"] results = [round(np.sum(resizedMask)/255), 255-threshold,round(np.sum(th_auto)/255),round(np.sum(th_fixed)/255) ] with open(FILE_NAME+"-result.csv", 'w',newline='') as f: # using csv.writer method from CSV package write = csv.writer(f) write.writerow(fields) write.writerow(results) print("********************************************") print(f" Analysis completed for {FILE_NAME}") print(f" Results saved to {FILE_NAME}-result.csv") print("********************************************") ```
{ "source": "jnhmcknight/flask-ezmail", "score": 3 }
#### File: flask-ezmail/flask_ezmail/connection.py ```python import blinker from flask_ezmail.message import Message from flask_ezmail.utils import sanitize_address, PY3, sanitize_addresses import smtplib import time signals = blinker.Namespace() email_dispatched = signals.signal("email-dispatched", doc=""" Signal sent when an email is dispatched. This signal will also be sent in testing mode, even though the email will not actually be sent. """) class Connection(object): """Handles connection to host.""" def __init__(self, mail): self.mail = mail def __enter__(self): if self.mail.suppress: self.host = None else: self.host = self.configure_host() self.num_emails = 0 return self def __exit__(self, exc_type, exc_value, tb): if self.host: self.host.quit() def configure_host(self): if self.mail.use_ssl: host = smtplib.SMTP_SSL(self.mail.server, self.mail.port) else: host = smtplib.SMTP(self.mail.server, self.mail.port) if self.mail.debug is not None: host.set_debuglevel(int(self.mail.debug)) else: host.set_debuglevel(int(False)) if self.mail.use_tls: host.starttls() if self.mail.username and self.mail.password: host.login(self.mail.username, self.mail.password) return host def send(self, message, envelope_from=None): """Verifies and sends message. :param message: Message instance. :param envelope_from: Email address to be used in MAIL FROM command. """ assert message.send_to, "No recipients have been added" assert message.sender, ( "The message does not specify a sender and a default sender " "has not been configured") if message.has_bad_headers(): raise BadHeaderError if message.date is None: message.date = time.time() if self.host: self.host.sendmail(sanitize_address(envelope_from or message.sender), list(sanitize_addresses(message.send_to)), message.as_bytes() if PY3 else message.as_string(), message.mail_options, message.rcpt_options) email_dispatched.send(message) self.num_emails += 1 if self.num_emails == self.mail.max_emails: self.num_emails = 0 if self.host: self.host.quit() self.host = self.configure_host() def send_message(self, *args, **kwargs): """Shortcut for send(msg). Takes same arguments as Message constructor. :versionadded: 0.3.5 """ self.send(Message(*args, **kwargs)) class BadHeaderError(Exception): pass ``` #### File: flask-ezmail/flask_ezmail/mail.py ```python from contextlib import contextmanager from flask_ezmail.connection import Connection from flask_ezmail.message import Message class _MailMixin(object): @contextmanager def record_messages(self): """Records all messages. Use in unit tests for example:: with mail.record_messages() as outbox: response = app.test_client.get("/email-sending-view/") assert len(outbox) == 1 assert outbox[0].subject == "testing" You must have blinker installed in order to use this feature. :versionadded: 0.4 """ if not email_dispatched: raise RuntimeError("blinker must be installed") outbox = [] def _record(message, app): outbox.append(message) email_dispatched.connect(_record) try: yield outbox finally: email_dispatched.disconnect(_record) def send(self, message): """Sends a single message instance. If TESTING is True the message will not actually be sent. :param message: a Message instance. """ with self.connect() as connection: message.send(connection) def send_message(self, *args, **kwargs): """Shortcut for send(msg). Takes same arguments as Message constructor. :versionadded: 0.3.5 """ self.send(Message(*args, **kwargs)) def connect(self): """Opens a connection to the mail host.""" return Connection(self) class Mail(_MailMixin): def __init__( self, server, username, password, port, use_tls=False, use_ssl=False, default_sender=None, debug=False, max_emails=None, suppress=False, ): self.server = server self.username = username self.password = password self.port = port self.use_tls = use_tls self.use_ssl = use_ssl self.default_sender = default_sender self.debug = debug self.max_emails = max_emails self.suppress = suppress ```
{ "source": "jnhnum1/contextual_codes", "score": 3 }
#### File: jnhnum1/contextual_codes/figures.py ```python import numpy as np import matplotlib import matplotlib.pyplot as plt plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.rc('lines', linewidth=0.8) from codingbounds import * def gen_figure_1(resolution=100): # Here we use results on the list decodability of multi-level # concatenated codes to obtain improved bounds on the *unique* # decodability of such codes. In particular, we use the fact # that it is possible to list-decode up to the Blokh-Zyablov bound # to show efficient unique decoding beyond half the Blokh-Zyablov bound # in the high-rate regime. rho_range = np.linspace(0, 0.25, resolution) rate_range = np.linspace(0, 1, resolution) GV_rates = np.frompyfunc(lambda rho : GV_unique_rate_vs_delta(2 * rho), 1, 1)(rho_range) plt.plot(rho_range, GV_rates, label="Gilbert-Varshamov (inefficient)", linestyle="--") BZ_rates = np.frompyfunc(BZ_rate_vs_efficient_unique_rho, 1, 1)(rho_range) plt.plot(rho_range, BZ_rates, label="Blokh-Zyablov") TR_rhos = np.frompyfunc(TR_rho_vs_rate, 1, 1)(rate_range) plt.plot(TR_rhos, rate_range, label="Thommesen-Rudra") # Find range of rates for which we improve over BZ and TR # eyeball estimate is that the crossover point is rho = 0.05 crossover_rho = root_scalar(lambda rho : best_padding_BZ_rate_vs_rho(rho) - best_prior_rate_vs_rho(rho), bracket=(0.03, 0.08)).root advantage_rho_range = np.linspace(0, crossover_rho, resolution) our_rates = np.frompyfunc(best_padding_BZ_rate_vs_rho, 1, 1)(advantage_rho_range) plt.plot(advantage_rho_range, our_rates, color="red", label="Our Result") plt.title("Comparison of Rate / Error Tolerance Tradeoffs") plt.xlabel("Correctable Fraction of Errors") plt.ylabel("Data Rate") plt.legend() plt.grid(True) plt.savefig('comparison_plot.pdf') ``` #### File: jnhnum1/contextual_codes/tables.py ```python from codingbounds import * def gen_CUD_table(): print(r"\begin{tabular}{|l||*{6}{c|}}\hline") rhos = [0.01, 0.02, 0.03, 0.05, 0.1, 0.2] sparsities = [1.0, 0.9, 0.75, 0.5, 0.25, 0.1, 0.05, 0.01] print(r"\backslashbox{Sparsity}{Errors} & " + " & ".join(["{:7.2f}".format(rho) for rho in rhos]) + r" \\ \hline\hline") for s in sparsities: print("{:4.2f}".format(s) + " & ".join(["{:7.3f}".format(CUD_best_rate_vs_rho_and_sparsity(rho, s)) for rho in rhos]) + r" \\" ) print(r"\hline \end{tabular}") ```
{ "source": "jnhoang/generic-flask", "score": 3 }
#### File: generic-flask/application/http_requests.py ```python import requests from requests.auth import HTTPBasicAuth from application.logger import Logger from application.config import Config logger = Logger().get_logger() class Requester: def __init__(self): self.config = Config() def get_url(self, url, jsonify, headers={'content': 'application/json'}, verify=True, params=None): # make GET request try: response = requests.get(url, headers=headers, verify=verify, params=params) response.raise_for_status() except requests.exceptions.HTTPError as e: logger.exception(f'Requester.get_url Exception: {e}') raise e # jsonify response if jsonify: response = self.jsonify_response(response) return response def jsonify_response(self, response): try: return response.json() except Exception as e: logger.exception('Could not jsonify response') raise e def post_url(self, url, jsonify, payload, auth=None, headers={'content': 'application/json'}, verify=True): try: response = requests.post(url=url, auth=auth, headers=headers, json=payload, verify=verify) response.raise_for_status() except requests.exceptions.HTTPError as e: logger.exception(f'Requester.post_url exception: {e}') raise e if jsonify: response = self.jsonify_response(response) return response ``` #### File: generic-flask/application/main_controller.py ```python import os import json import requests from datetime import datetime, timedelta from application.utils import Utils from application.http_requests import Requester from application.config import Config from application.logger import Logger logger = Logger().get_logger() class MainController: def __init__(self): self.http_requests = Requester() self.config = Config() def test(self): pass ```
{ "source": "jnhoward/SU2LDM_public", "score": 3 }
#### File: SU2LDM_public/utilityFunctions/calcF1F2hat.py ```python import numpy as np from scipy.linalg import block_diag import itertools ##################################################### ## ## Calculate F1 and F2 hat matrices ## ##################################################### #-- Define F1 function --# def F1Hat(a, b, c, d, X): Xtot0 = X[c] @ X[a] @ X[d] @ X[b] Xtot1 = X[a] @ X[c] @ X[d] @ X[b] Xtot2 = X[c] @ X[a] @ X[b] @ X[d] Xtot3 = X[a] @ X[c] @ X[b] @ X[d] Xtot4 = X[a] @ X[b] @ X[c] @ X[d] term1 = (1./4.)*(np.trace(Xtot0) + np.trace(Xtot1)) term2 = (-1./12.)*(np.trace(Xtot2) + np.trace(Xtot3)) term3 = (-1./3.)*(np.trace(Xtot4)) return term1 + term2 + term3 #-- Define F2 function --# def F2Hat(a, b, c, d, A, X): Xtot = A @ X[a] @ X[b] @ X[c] @ X[d] return np.trace(Xtot) #-- Calculate F1 and F2 Matrices --# def calcF1F2HatMatrices(X, A, Ngen=1, DEBUG=True): # Create F1 and F2 Matrices of all possible combinations (tensor) if(Ngen==1): n = 15 # 15 = eta' + 14 pions, 14 = 2Nf^2 − Nf − 1 = 2 (3)^2 - 3 - 1 elif(Ngen==3): n = 91 # 91 = eta' + 90 pions, 90 = 2Nf^2 − Nf − 1 = 2 (7)^2 - 7 - 1 else: print("Error: Invalid Ngen. Please use either Ngen=1 or Ngen=3.") return F1HatMatrix = np.zeros((n,n,n,n), dtype=complex) F2HatMatrix = np.zeros((n,n,n,n), dtype=complex) dummyarr = np.arange(n) it = 0 nbatch = n for (a,b,c,d) in itertools.product(dummyarr, dummyarr, dummyarr, dummyarr): if(a == it): it+=1 print("Now processing batch %d out of %d"%(it, nbatch)) F1HatMatrix[a,b,c,d] = F1Hat(a, b, c, d, X) F2HatMatrix[a,b,c,d] = F2Hat(a, b, c, d, A, X) if (DEBUG):#! Make these meaningful later print("") if (DEBUG): print("") return F1HatMatrix, F2HatMatrix ``` #### File: SU2LDM_public/utilityFunctions/convertToDMBasis.py ```python import numpy as np from scipy.linalg import block_diag ################################################################# ## ## Transform F (or Fhat) Matrices into definite DM charge basis ## ################################################################# #-- Calculate Transformation Matrix --# def calcDMTransformMatrix(Ngen, DEBUG=True): # \Pi^{mass}_i = V_ij \Pi^{DM charge basis}_j if(Ngen==1): Npions = 15 # Total number of pions #-- Set arrays that show how mass and DM charged states relate --# """ DM array: $[\Pi_0, ..., \Pi_4, \Pi_5, \Pi_6, ..., \Pi_{11}, \Pi_{12}, \Pi_{13}, \Pi_{14}]$ $[\Pi_0^0, ..., \Pi_4^0, \Pi_1^+, \Pi_1^-, ..., \Pi_4^+, \Pi_4^-, \Pi_{13}^0, \Pi_{14}^0]$ Define vectors A, B, D as follows: D | D+1: 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | <- DM charged pions in DM charge Basis A : 5 | 6 | 9 | 10 | <- Pions in mass basis with V = 1/sqrt(2) B : 8 | 7 | 12 | 11 | <- Pions in mass basis with V = +-i/sqrt(2) """ D = np.array([5, 7, 9, 11]) A = np.array([5, 6, 9, 10]) B = np.array([8, 7, 12, 11]) elif(Ngen==3): Npions = 91 # Total number of pions #-- Set arrays that show how mass and DM charged states relate --# """ DM array: $[\Pi_0, \Pi_1, \Pi_2, ..., \Pi_{23}, \Pi_{24}, \Pi_{25}, ..., \Pi_{90}]$ $[\Pi_0^0, \Pi_1^+, \Pi_1^-, ..., \Pi_{12}^+, \Pi_{12}^-, \Pi_{1}^0, ..., \Pi_{66}^0]$ Note: - As in Ngen=1 case, \Pi_0^0 is the eta' particle - \Pi_{66}^0 analogous to \Pi_{14}^0 in the Ngen=1 case - We obtain the mass array basis through numerical diagonalization, but the order is consistent and only depends on the structure of the non-diagonal matrix which does not change when changing the scan parameters Define vectors A, B, D as follows: D <- DM charged pions in DM charge Basis A <- Pions in mass basis with V = 1/sqrt(2) B <- Pions in mass basis with V = +-i/sqrt(2) D | D+1: 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | A : 38 | 39 | 50 | 51 | 62 | 63 | B : 41 | 40 | 53 | 52 | 65 | 64 | (cont.) D | D+1: 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | A : 70 | 71 | 78 | 79 | 82 | 83 | B : 73 | 72 | 81 | 80 | 85 | 84 | """ D = np.array([ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23]) A = np.array([38, 39, 50, 51, 62, 63, 70, 71, 78, 79, 82, 83]) B = np.array([41, 40, 53, 52, 65, 64, 73, 72, 81, 80, 85, 84]) else: print("Error: Invalid Ngen. Please use either Ngen=1 or Ngen=3.") return #-- Create V matrix --# Vmatrix = np.zeros((Npions,Npions), dtype=complex) normFactor = 1./(np.sqrt(2)) for i in range(len(A)): a = A[i] d = D[i] Vmatrix[a,d] = normFactor Vmatrix[a,d+1] = normFactor for i in range(len(B)): b = B[i] d = D[i] Vmatrix[b,d] = (0+1j)*normFactor Vmatrix[b,d+1] = (0-1j)*normFactor Iarr = np.arange(Npions) sans = np.concatenate((A, B, D, D+1)) Iarr = np.delete(Iarr, sans) for i in Iarr: Vmatrix[i,i] = 1. return Vmatrix #-- Convert F1Matrix, F2Matrix in definite DM basis --# def convertToDMBasis(F1Matrix, F2Matrix, Vmatrix, DEBUG=True): #-- Transform Fs from interaction to DM charge basis --# from transformFs import transformF F1DMchargeBasisMatrix = transformF(Vmatrix, F1Matrix, DEBUG) F2DMchargeBasisMatrix = transformF(Vmatrix, F2Matrix, DEBUG) return F1DMchargeBasisMatrix, F2DMchargeBasisMatrix ```
{ "source": "jnhu76/pmcore", "score": 3 }
#### File: Arachne/scripts/gdb-backtrace-arachne.py ```python from __future__ import print_function import gdb class BackTraceArachneCommand (gdb.Command): "Backtrace command for user threads in Arachne threading library." def __init__ (self): super (BackTraceArachneCommand, self).__init__ ("backtrace-arachne", gdb.COMMAND_STACK, gdb.COMPLETE_SYMBOL, True) gdb.execute("alias -a bta = backtrace-arachne", True) def backtrace(self, threadContext, from_tty): # Check if we are backtracing the current context loadedContext = gdb.parse_and_eval("Arachne::core.loadedContext") if isinstance(threadContext, str): threadContext = gdb.parse_and_eval(threadContext) if int(loadedContext) == int(threadContext): gdb.execute("backtrace", from_tty) return SP = gdb.parse_and_eval("$sp") PC = int(gdb.parse_and_eval("$pc")) r12 = int(gdb.parse_and_eval("$r12")) r13 = int(gdb.parse_and_eval("$r13")) r14 = int(gdb.parse_and_eval("$r14")) r15 = int(gdb.parse_and_eval("$r15")) rbx = int(gdb.parse_and_eval("$rbx")) rbp = int(gdb.parse_and_eval("$rbp")) loadedContext = int(gdb.parse_and_eval("Arachne::core.loadedContext")) gdb.execute("set Arachne::core.loadedContext = ((Arachne::ThreadContext*){0})".format(threadContext)) gdb.execute("set $rbp = *(uint64_t*) Arachne::core.loadedContext->sp") gdb.execute("set $rbx = *(((uint64_t*) Arachne::core.loadedContext->sp)+1)") gdb.execute("set $r15 = *(((uint64_t*) Arachne::core.loadedContext->sp)+2)") gdb.execute("set $r14 = *(((uint64_t*) Arachne::core.loadedContext->sp)+3)") gdb.execute("set $r13 = *(((uint64_t*) Arachne::core.loadedContext->sp)+4)") gdb.execute("set $r12 = *(((uint64_t*) Arachne::core.loadedContext->sp)+5)") gdb.execute("set $rsp=Arachne::core.loadedContext->sp + Arachne::SPACE_FOR_SAVED_REGISTERS", from_tty) gdb.execute("set $pc = *(void **)$rsp", from_tty) gdb.execute("backtrace", from_tty) # Restore gdb.execute("set $sp = {0}".format(SP), from_tty) gdb.execute("set $pc = {0}".format(PC), from_tty) gdb.execute("set $rbp = {0}".format(rbp), from_tty) gdb.execute("set $rbx = {0}".format(rbx), from_tty) gdb.execute("set $r15 = {0}".format(r15), from_tty) gdb.execute("set $r14 = {0}".format(r14), from_tty) gdb.execute("set $r13 = {0}".format(r13), from_tty) gdb.execute("set $r12 = {0}".format(r12), from_tty) gdb.execute("set Arachne::core.loadedContext = {0}".format(loadedContext)) def invoke(self, arg, from_tty): arg = arg.strip() if arg == "": # Backtrace all threadcontexts that are occupied in the current core maskAndCountPointer = gdb.parse_and_eval("Arachne::core.localOccupiedAndCount") if maskAndCountPointer == 0: print("Current core is not an Arachne core!") return bitmask = maskAndCountPointer.dereference()['_M_i']['occupied'] # Perform a backtrace on all the occupied bits. for i in range(56): if (bitmask >> i) & 1: threadContext = gdb.parse_and_eval("Arachne::core.localThreadContexts[{0}]".format(i)) print("Arachne Thread {0}: {1}".format(i, threadContext)) try: self.backtrace(threadContext, from_tty) except: pass return # Verify that the type is correct typestring=str(gdb.parse_and_eval(arg).type) if typestring.strip() != "Arachne::ThreadContext *": print("Please pass an Arachne::ThreadContext*") return # Check if the provided threadcontext is NULL, and do nothing if it is. threadcontextvalue = int(gdb.parse_and_eval(arg)) if threadcontextvalue == 0: print("A NULL pointer was passed!") return self.backtrace(arg, from_tty) BackTraceArachneCommand() ```
{ "source": "jnhustin/flask_blog", "score": 3 }
#### File: flaskblog/users/utils.py ```python import os from PIL import Image from flask import url_for, current_app from flask_mail import Message from flaskblog import mail def save_picture(form_picture): # save uploaded picture filename as a hashed value (prevent photos w/ same name in db) random_bytes = os.urandom(24) random_hex = b64encode(random_bytes).decode('utf-8') filename, file_ext = os.path.splitext(form_picture.filename) picture_filename = random_hex + file_ext picture_path = os.path.join(current_app.root_path, 'static/profile_pics', picture_filename) # resize picture to outputsize output_size = (125, 125) image = Image.open(form_picture) image.thumbnail(output_size) image.save(picture_path) return picture_filename def send_reset_email(user): token = user.get_reset_token() msg = Message( 'Password Reset Request', sender = '<EMAIL>', recipients = [user.email]) msg.body = 'To reset your password, visit the following link: %s \ If you did not make this request then simply ignore this email \ and no change is required' % url_for('users.reset_token', token=token, _external=True) mail.send(msg) ```
{ "source": "jnhustin/gunpla-api", "score": 2 }
#### File: gunpla-api/gunpla_api/app.py ```python from flask import Flask from gunpla_api.config import Config # add app extensions # cors, etc # instantiate, initialize & configure App # http://flask.pocoo.org/docs/0.12/patterns/appfactories/ def create_app(config_class=Config): # CONFIGURATION # http://flask.pocoo.org/docs/1.0/config/#configuration-basics app = Flask(__name__) app.config.from_object(Config) # EXTENSIONS # bind app to extensions # http://flask.pocoo.org/docs/1.0/extensiondev/#the-extension-code # REGISTER BLUEPRINTS from gunpla_api.private_routes import private from gunpla_api.public_routes import public app.register_blueprint(private) app.register_blueprint(public) return app ``` #### File: gunpla-api/gunpla_api/db_connector.py ```python import psycopg2 from os.path import join, dirname from gunpla_api.config import Config from gunpla_api.logger import Logger from gunpla_api.exceptions import DatabaseUniqueException, BadRequestException logger = Logger().get_logger() class DbConnector(): config = Config() host = config.db_host port = config.db_port user = config.db_user password = <PASSWORD> db_name = config.db_name def __init__(self): self.initialize_conn() def initialize_conn(self): self.conn = psycopg2.connect( host = self.host, port = self.port, user = self.user, password = <PASSWORD>, dbname = self.db_name, ) return def get_conn(self): try: if self.conn == None or self.conn.closed == 1: logger.debug('conn down, reinitializing') self.initialize_conn() else: return self.conn except Exception: logger.exception('db_connector.get_conn error') raise def execute_sql(self, function, sql, vals=None, is_close_conn=True): try: self.get_conn() cursor = self.conn.cursor() cursor.execute(sql, vals) result = function(cursor) except psycopg2.errors.UniqueViolation: logger.exception('db_connector unique constraint violation', extra={'sql': sql, 'vals': vals}) self.rollback() raise DatabaseUniqueException() except psycopg2.Error as e: logger.exception('some psycopg error', extra={'sql': sql, 'vals': vals, 'pg_code': e.pgcode}) self.rollback() raise except Exception as e: logger.exception('unknown database execution error', extra={'sql': sql, 'vals': vals, 'error': str(e)}) self.rollback() raise if is_close_conn: self.conn.commit() self.conn.close() return result def commit_sql(self, cursor=None): try: self.conn.commit() self.conn.close() return except: print('bruh, already done') def process_insert_results(self, cursor): status_message = cursor.statusmessage return { 'status_message' : status_message, } def process_update_results(self, cursor) : status_message = cursor.statusmessage if status_message == 'UPDATE 0': raise BadRequestException('id does not exist') return { 'status_message' : status_message, } def process_select_results(self, cursor) : status_message = cursor.statusmessage col_names = [ desc[0] for desc in cursor.description ] results = cursor.fetchall() return { 'status_message' : status_message, 'results' : results, 'col_names' : col_names, } def process_delete_results(self, cursor) : status_message = cursor.statusmessage return { 'status_message': status_message, } def rollback(self) : self.conn.rollback() self.conn.close() return ``` #### File: gunpla-api/gunpla_api/logger.py ```python import logging from pythonjsonlogger import jsonlogger from gunpla_api.singleton import Singleton @Singleton class Logger: def __init__(self): self.l = None def get_logger(self): if not self.l: self.l = self.start_logger() return self.l def start_logger(self): # configure logging formatter = jsonlogger.JsonFormatter() # formatter = jsonlogger.JsonFormatter('(message), (module), (funcName), (levelname), (asctime), (process)') this was broken logger = logging.getLogger(__name__) logHandler = logging.StreamHandler() logHandler.setFormatter(formatter) logger.addHandler(logHandler) # change log lvls here ['INFO', 'DEBUG'] logger.setLevel(logging.DEBUG) # Disable flask logging logging.getLogger('werkzeug').setLevel(logging.ERROR) return logger ``` #### File: gunpla-api/gunpla_api/validation.py ```python from gunpla_api.exceptions import BadRequestException from gunpla_api.logger import Logger logger = Logger().get_logger() class Validation(): def get_json_field(self, field, json, optional=False): try: return json.get(field) if optional else json[field] except: logger.exception('[get_json_field] error') raise BadRequestException(f'missing field: {field}') def get_query_param(self, field, params, optional=False): try: return params.get(field) if optional else params[field] except: logger.exception('[get_query_param] error') raise BadRequestException(f'missing required field: "{field}"') ```
{ "source": "jnhyperion/ImageBot", "score": 3 }
#### File: ImageBot/imagebot/_base_matcher.py ```python import cv2 from typing import List from ._results import MatchingResult class BaseMatcher: def __init__( self, image_path: str, template_path: str, convert_2_gray: bool = False, ): self.image_path = image_path self.template_path = template_path self.image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) self.template = cv2.imread(template_path, cv2.IMREAD_UNCHANGED) assert self.image is not None, "Image should not be None" assert self.template is not None, "Image template should not be None" self.h_image, self.w_image = self.image.shape[:2] self.h_template, self.w_template = self.template.shape[:2] assert ( self.h_image >= self.h_template and self.w_image >= self.w_template ), "Image template should be smaller than image source." self.convert_2_gray = convert_2_gray def find_best_result(self) -> MatchingResult: """ TODO """ def find_all_results(self) -> List[MatchingResult]: """ TODO """ ``` #### File: ImageBot/imagebot/_generic_matcher.py ```python from typing import List, Union from ._template_matcher import TemplateMatcher from ._feature_matcher import FeatureMatcher from ._results import MatchingResult class GenericMatcher(TemplateMatcher): def __init__(self, *args, strict_mode: bool = False, **kwargs): super().__init__(*args, **kwargs) self.strict_mode = strict_mode def find_all_results(self) -> List[MatchingResult]: results = super().find_all_results() if self.strict_mode or results: # strict mode will only use template matching return results else: feature_matcher = FeatureMatcher( self.image_path, self.template_path, self.convert_2_gray ) return feature_matcher.find_all_results() def find_best_result(self) -> Union[MatchingResult, None]: result = super().find_best_result() if self.strict_mode or result is not None: # strict mode will only use template matching return result else: feature_matcher = FeatureMatcher( self.image_path, self.template_path, self.convert_2_gray ) return feature_matcher.find_best_result() ``` #### File: ImageBot/tests/conftest.py ```python import os import shutil import pytest @pytest.fixture(scope="module") def tests_dir(): yield os.path.abspath(os.path.dirname(__file__)) @pytest.fixture(scope="module") def tests_out_dir(tests_dir, worker_id): out = os.path.join(tests_dir, "__out__", worker_id) shutil.rmtree(out, ignore_errors=True) os.makedirs(out) yield out @pytest.fixture def images(request, tests_dir): return ( os.path.join(tests_dir, f"images/{request.param}.png"), os.path.join(tests_dir, f"images/{request.param}_template.png"), request.param, ) ```
{ "source": "jnhyperion/ParallelADB", "score": 3 }
#### File: ParallelADB/paralleladb/_devices_mgr.py ```python import os from ._exceptions import OfflineDevicesError class _DevicesMgr: def __init__(self): self._serials = list() def get_serials(self): if not self._serials: self.get_serials_instantly() return self._serials def get_serials_instantly(self): result = os.popen('adb devices').read() for r in result.split('\n'): r = r.strip() if r.endswith('\tdevice'): device = r.split('\t')[0] self._serials.append(device) if not self._serials: raise OfflineDevicesError('No devices!!!') return self._serials DevicesMgr = _DevicesMgr() ``` #### File: ParallelADB/paralleladb/_parallel_adb.py ```python import os import logging import collections from multiprocessing.pool import ThreadPool from ._devices_mgr import DevicesMgr logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s - %(message)s') class _ParallelADB: def __init__(self): self._pool = None def run(self, cmd, serials=None, is_shell_cmd=True, print_result=False): """ :param cmd: adb shell command in shell mode, like 'pm clear com.example.pkg' :param serials: [serial1, serial2, ..] specify when you only want to run command in some of the connected device default is running on all connected device :param is_shell_cmd: to indicate if the command contains 'shell', default is True :param print_result: print the result from adb command line, default is False :return: the command output for each serial """ applied_serials = serials if serials else DevicesMgr.get_serials() if not self._pool: self._pool = ThreadPool(10) def _call_shell_cmd(s): if is_shell_cmd: full_cmd = 'adb -s {} shell "{}"' else: # non shell mode command needs to skip quote full_cmd = 'adb -s {} {}' full_cmd = full_cmd.format(s, cmd) logging.info('[ParallelADB] Running command: ' + full_cmd) return os.popen(full_cmd).readlines() all_results = self._pool.map(_call_shell_cmd, applied_serials) adb_outputs_wrapper = collections.namedtuple('ADBOutputs', ['serial', 'results']) adb_outputs = [adb_outputs_wrapper(*_) for _ in zip(applied_serials, all_results)] for i in adb_outputs: if print_result: logging.info('Results from device: ' + i.serial) for _l in i.results: logging.info(_l.strip()) return adb_outputs ParallelADB = _ParallelADB() ```
{ "source": "jnhyperion/RIDE", "score": 2 }
#### File: robotide/controller/dataloader.py ```python import os from threading import Thread from robotide import robotapi class DataLoader(object): def __init__(self, namespace, settings): self._namespace = namespace self._namespace.reset_resource_and_library_cache() self._settings = settings def load_datafile(self, path, load_observer): return self._load(_DataLoader(path, self._settings), load_observer) def load_initfile(self, path, load_observer): return self._load(_InitFileLoader(path), load_observer) def load_resource_file(self, datafile, load_observer): return self._load(_ResourceLoader( datafile, self._namespace.get_resource), load_observer) def resources_for(self, datafile, load_observer): return self._load(_ResourceLoader( datafile, self._namespace.get_resources), load_observer) def _load(self, loader, load_observer): self._wait_until_loaded(loader, load_observer) return loader.result def _wait_until_loaded(self, loader, load_observer): loader.start() load_observer.notify() while loader.is_alive(): loader.join(0.1) load_observer.notify() class _DataLoaderThread(Thread): def __init__(self): Thread.__init__(self) self.result = None def run(self): try: self.result = self._run() except Exception as e: # print("DEBUG: exception at DataLoader %s\n" % str(e)) pass # TODO: Log this error somehow class _DataLoader(_DataLoaderThread): def __init__(self, path, settings): _DataLoaderThread.__init__(self) self._path = path self._settings = settings def _run(self): return TestData(source=self._path, settings=self._settings) class _InitFileLoader(_DataLoaderThread): def __init__(self, path): _DataLoaderThread.__init__(self) self._path = path def _run(self): result = robotapi.TestDataDirectory(source=os.path.dirname(self._path)) result.initfile = self._path robotapi.FromFilePopulator(result).populate(self._path) return result class _ResourceLoader(_DataLoaderThread): def __init__(self, datafile, resource_loader): _DataLoaderThread.__init__(self) self._datafile = datafile self._loader = resource_loader def _run(self): return self._loader(self._datafile) class TestDataDirectoryWithExcludes(robotapi.TestDataDirectory): def __init__(self, parent, source, settings): self._settings = settings robotapi.TestDataDirectory.__init__(self, parent, source) def add_child(self, path, include_suites, extensions=None, warn_on_skipped=False): if not self._settings.excludes.contains(path): self.children.append(TestData( parent=self, source=path, settings=self._settings)) else: self.children.append(ExcludedDirectory(self, path)) def TestData(source, parent=None, settings=None): """Parses a file or directory to a corresponding model object. :param source: path where test data is read from. :returns: :class:`~.model.TestDataDirectory` if `source` is a directory, :class:`~.model.TestCaseFile` otherwise. """ if os.path.isdir(source): # print("DEBUG: Dataloader Is dir getting testdada %s\n" % source) data = TestDataDirectoryWithExcludes(parent, source, settings) # print("DEBUG: Dataloader testdata %s\n" % data.name) data.populate() # print("DEBUG: Dataloader after populate %s %s\n" % (data._tables, data.name)) return data return robotapi.TestCaseFile(parent, source).populate() class ExcludedDirectory(robotapi.TestDataDirectory): def __init__(self, parent, path): self._parent = parent self._path = path robotapi.TestDataDirectory.__init__(self, parent, path) def has_tests(self): return True ```
{ "source": "jnhyperion/WikiBot", "score": 3 }
#### File: WikiBot/example/example.py ```python from bs4 import BeautifulSoup from wikibot import WikiBot def modification_callback(soup): a = soup.find('table').find_all_next('tr')[6].find_all_next('td')[6].div a.append(BeautifulSoup('<b>WikiBot is awesome!</b>', 'html.parser')) wiki_bot = WikiBot(url='https://localhost', username='WikiBot', password='*******') wiki_bot.modify_content_with_bs4(space='My Space', title='Demo', modification_callback=modification_callback) ```
{ "source": "jnhyperion/XrayBot", "score": 2 }
#### File: XrayBot/xraybot/_xray_bot.py ```python import logging from enum import Enum from typing import List, Tuple, Union, Dict, Optional from atlassian import Jira, Xray from dataclasses import dataclass from concurrent.futures import ProcessPoolExecutor logger = logging logger_kwargs = { "level": logging.INFO, "format": "%(asctime)s %(levelname)s - %(message)s", "force": True, } logger.basicConfig(**logger_kwargs) @dataclass class TestEntity: # store in test custom field "Generic Test Definition" # using as the unique identified for one certain test unique_identifier: str summary: str description: str req_key: str key: Optional[str] = None class XrayResultType(Enum): PASS = "PASS" FAIL = "FAIL" TODO = "TODO" @dataclass class TestResultEntity: key: str result: XrayResultType _CF_TEST_DEFINITION = "Generic Test Definition" _CF_TEST_TYPE = "Test Type" _CF_TEST_TYPE_VAL_GENERIC = "Generic" _CF_TEST_TYPE_VAL_MANUAL = "Manual" _CF_TEST_TYPE_VAL_CUCUMBER = "Cucumber" class XrayBot: _MULTI_PROCESS_WORKER_NUM = 30 _AUTOMATION_TESTS_FOLDER_NAME = "Automation Test" _AUTOMATION_OBSOLETE_TESTS_FOLDER_NAME = "Obsolete" def __init__( self, jira_url: str, jira_username: str, jira_pwd: str, project_key: str ): """ :param jira_url: str :param jira_username: str :param jira_pwd: str :param project_key: str, jira project key, e.g: "TEST" """ self._jira_url = jira_url self._jira_username = jira_username self._jira_pwd = <PASSWORD> self._project_key = project_key self._automation_folder_id = -1 self._automation_obsolete_folder_id = -1 self._jira = Jira( url=self._jira_url, username=self._jira_username, password=self._jira_pwd ) self._xray = Xray( url=self._jira_url, username=self._jira_username, password=self._jira_pwd ) self._custom_fields: Dict[str, Union[str, List[str]]] = {} self._cached_all_custom_fields = None self.configure_custom_field(_CF_TEST_TYPE, _CF_TEST_TYPE_VAL_GENERIC) def configure_custom_field( self, field_name: str, field_value: Union[str, List[str]] ): """ :param field_name: str, custom field name :param field_value: custom field value of the test ticket e.g: field_value="value", field_value=["value1", "value2"] """ if field_name == _CF_TEST_TYPE: assert field_value not in ( _CF_TEST_TYPE_VAL_MANUAL, _CF_TEST_TYPE_VAL_CUCUMBER, ), f'Custom field value "{field_value}" is not supported in "{field_name}".' assert ( field_name != _CF_TEST_DEFINITION ), f'Custom field "{field_name}" is not configurable.' self._custom_fields[field_name] = field_value @property def cf_id_test_definition(self): return self._get_custom_field_by_name(_CF_TEST_DEFINITION) def get_xray_tests(self) -> List[TestEntity]: logger.info(f"Start querying all xray tests for project: {self._project_key}") jql = ( f'project = "{self._project_key}" and type = "Test" and reporter = "{self._jira_username}" ' 'and status != "Obsolete"' ) for k, v in self._custom_fields.items(): if isinstance(v, list) and v: converted = ",".join([f'"{_}"' for _ in v]) jql = f'{jql} and "{k}" in ({converted})' else: jql = f'{jql} and "{k}" = "{v}"' logger.info(f"Querying jql: {jql}") tests = [] for _ in self._jira.jql( jql, fields=["summary", "description", "issuelinks", self.cf_id_test_definition], limit=-1, )["issues"]: test = TestEntity( unique_identifier=_["fields"][self.cf_id_test_definition], summary=_["fields"]["summary"], description=_["fields"]["description"], req_key="", key=_["key"], ) links = _["fields"]["issuelinks"] _req_keys = [] for link in links: if link["type"]["name"] == "Tests": _req_keys.append(link["outwardIssue"]["key"]) if _req_keys: test.req_key = ",".join(_req_keys) tests.append(test) return tests def _get_custom_field_by_name(self, name: str): if not self._cached_all_custom_fields: self._cached_all_custom_fields = self._jira.get_all_custom_fields() for f in self._cached_all_custom_fields: if f["name"] == name: return f["id"] def _delete_test(self, test_entity: TestEntity): logger.info(f"Start deleting test: {test_entity.key}") self._jira.delete_issue(test_entity.key) def _obsolete_test(self, test_entity: TestEntity): logger.info(f"Start obsoleting test: {test_entity.key}") self._jira.set_issue_status(test_entity.key, "Obsolete") self._remove_links(test_entity) self._remove_case_from_folder(test_entity, self._automation_folder_id) self._add_case_into_folder(test_entity, self._automation_obsolete_folder_id) def _remove_links(self, test_entity: TestEntity): issue = self._jira.get_issue(test_entity.key) for link in issue["fields"]["issuelinks"]: if link["type"]["name"] == "Tests": self._jira.remove_issue_link(link["id"]) def _update_jira_test(self, test_entity: TestEntity): logger.info(f"Start updating test: {test_entity.key}") self._jira.update_issue_field( key=test_entity.key, fields={ "summary": test_entity.summary, "description": test_entity.description, }, ) self._remove_links(test_entity) self._link_test(test_entity) def _create_test(self, test_entity: TestEntity): logger.info(f"Start creating test: {test_entity.summary}") fields = { "issuetype": {"name": "Test"}, "project": {"key": self._project_key}, "description": test_entity.description, "summary": test_entity.summary, "assignee": {"name": self._jira_username}, self.cf_id_test_definition: test_entity.unique_identifier, } for k, v in self._custom_fields.items(): custom_field = self._get_custom_field_by_name(k) if isinstance(v, list) and v: fields[custom_field] = [{"value": _} for _ in v] else: fields[custom_field] = {"value": v} try: test_entity.key = self._jira.create_issue(fields)["key"] except Exception as e: logger.error(f"Create test with error: {e}") raise e logger.info(f"Created xray test: {test_entity.key}") self._finalize_new_test(test_entity) self._link_test(test_entity) self._add_case_into_folder(test_entity, self._automation_folder_id) def _finalize_new_test(self, test_entity: TestEntity): # only for new created xray test logger.info(f"Start finalizing test: {test_entity.key}") self._jira.set_issue_status(test_entity.key, "Ready for Review") self._jira.set_issue_status(test_entity.key, "In Review") self._jira.set_issue_status(test_entity.key, "Finalized") def _link_test(self, test_entity: TestEntity): if test_entity.req_key: # support multi req keys req_key_list = test_entity.req_key.split(",") for _req_key in req_key_list: logger.info(f"Start linking test to requirement: {test_entity.key}") link_param = { "type": {"name": "Tests"}, "inwardIssue": {"key": test_entity.key}, "outwardIssue": {"key": _req_key}, } self._jira.create_issue_link(link_param) def sync_tests(self, local_tests: List[TestEntity]): assert len(local_tests) == len( set([_.unique_identifier for _ in local_tests]) ), "Duplicated unique_identifier found in local_tests" self._create_automation_repo_folder() xray_tests = self.get_xray_tests() to_be_deleted, to_be_appended, to_be_updated = self._get_tests_diff( xray_tests, local_tests ) with ProcessPoolExecutor(self._MULTI_PROCESS_WORKER_NUM) as executor: executor.map(self._obsolete_test, to_be_deleted) with ProcessPoolExecutor(self._MULTI_PROCESS_WORKER_NUM) as executor: executor.map(self._create_test, to_be_appended) with ProcessPoolExecutor(self._MULTI_PROCESS_WORKER_NUM) as executor: executor.map(self._update_jira_test, to_be_updated) @staticmethod def _get_tests_diff( xray_tests: List[TestEntity], local_tests: List[TestEntity] ) -> Tuple[List[TestEntity], List[TestEntity], List[TestEntity]]: to_be_deleted = list() to_be_appended = list() to_be_updated = list() for test in xray_tests: if test.unique_identifier not in [_.unique_identifier for _ in local_tests]: # xray test not valid in xml anymore to_be_deleted.append(test) for test in local_tests: if test.unique_identifier not in [_.unique_identifier for _ in xray_tests]: # local test not exist in xray to_be_appended.append(test) for test in xray_tests: if test.unique_identifier in [_.unique_identifier for _ in local_tests]: # xray test already exists previous_summary = test.summary previous_description = test.description previous_req_key = test.req_key matched_local_test: TestEntity = [ _ for _ in local_tests if test.unique_identifier == _.unique_identifier ][0] new_summary = matched_local_test.summary new_description = matched_local_test.description new_req_key = matched_local_test.req_key if ( previous_summary != new_summary or previous_description != new_description or set(previous_req_key.split(",")) != set(new_req_key.split(",")) ): # test desc / requirement id is different test.summary = new_summary test.description = new_description test.req_key = new_req_key to_be_updated.append(test) return to_be_deleted, to_be_appended, to_be_updated def _create_test_plan(self, test_plan_name: str) -> str: jql = f'project = "{self._project_key}" and type="Test Plan" and reporter= "{self._jira_username}"' for _ in self._jira.jql(jql, limit=-1)["issues"]: if _["fields"]["summary"] == test_plan_name: key = _["key"] logger.info(f"Found existing test plan: {key}") return key fields = { "issuetype": {"name": "Test Plan"}, "project": {"key": self._project_key}, "summary": test_plan_name, "assignee": {"name": self._jira_username}, } test_plan_ticket = self._jira.create_issue(fields) key = test_plan_ticket["key"] logger.info(f"Created new test plan: {key}") return key def _add_tests_to_test_plan(self, test_plan_key: str, test_key: str): test_plans = self._xray.get_test_plans(test_key) if test_plan_key not in [_["key"] for _ in test_plans]: logger.info(f"Start adding test {test_key} to test plan {test_plan_key}") self._xray.update_test_plan(test_plan_key, add=[test_key]) def _add_tests_to_test_execution(self, test_execution_key: str, test_key: str): test_executions = self._xray.get_test_executions(test_key) if test_execution_key not in [_["key"] for _ in test_executions]: logger.info( f"Start adding test {test_key} to test execution {test_execution_key}" ) self._xray.update_test_execution(test_execution_key, add=[test_key]) def _add_test_execution_to_test_plan( self, test_execution_key: str, test_plan_key: str ): logger.info( f"Start adding test execution {test_execution_key} to test plan {test_plan_key}" ) self._xray.update_test_plan_test_executions( test_plan_key, add=[test_execution_key] ) def _create_test_execution(self, test_execution_name: str) -> str: jql = f'project = "{self._project_key}" and type="Test Execution" and reporter= "{self._jira_username}"' for _ in self._jira.jql(jql, limit=-1)["issues"]: if _["fields"]["summary"] == test_execution_name: key = _["key"] logger.info(f"Found existing test execution: {key}") return key fields = { "issuetype": {"name": "Test Execution"}, "project": {"key": self._project_key}, "summary": test_execution_name, "assignee": {"name": self._jira_username}, } test_plan_ticket = self._jira.create_issue(fields) key = test_plan_ticket["key"] logger.info(f"Created new test execution: {key}") return key def _update_test_result(self, test_key: str, result: str, test_execution_key: str): test_runs = self._xray.get_test_runs(test_key) for test_run in test_runs: if test_run["testExecKey"] == test_execution_key: logger.info(f"Start updating test run {test_key} result to {result}") self._xray.update_test_run_status(test_run["id"], result) def _add_case_into_folder(self, test_entity: TestEntity, folder_id: int): self._xray.put( f"rest/raven/1.0/api/testrepository/" f"{self._project_key}/folders/{folder_id}/tests", data={"add": [test_entity.key]}, ) def _remove_case_from_folder(self, test_entity: TestEntity, folder_id: int): self._xray.put( f"rest/raven/1.0/api/testrepository/" f"{self._project_key}/folders/{folder_id}/tests", data={"remove": [test_entity.key]}, ) def _create_repo_folder(self, folder_name: str, parent_id: int) -> int: all_folders = self._xray.get( f"rest/raven/1.0/api/testrepository/{self._project_key}/folders" ) def _iter_folders(folders): for _ in folders["folders"]: if _["id"] == parent_id: return _["folders"] else: _iter_folders(_) return [] if parent_id == -1: sub_folders = all_folders["folders"] else: sub_folders = _iter_folders(all_folders) folder_id = -1 for folder in sub_folders: if folder_name == folder["name"]: logger.info(f"Using existing test repo folder: {folder_name}") folder_id = folder["id"] break if folder_id == -1: logger.info(f"Create test repo folder: {folder_name}") folder = self._xray.post( f"rest/raven/1.0/api/testrepository/{self._project_key}/folders/{parent_id}", data={"name": folder_name}, ) folder_id = folder["id"] return folder_id def _create_automation_repo_folder(self): self._automation_folder_id = self._create_repo_folder( self._AUTOMATION_TESTS_FOLDER_NAME, -1 ) self._automation_obsolete_folder_id = self._create_repo_folder( self._AUTOMATION_OBSOLETE_TESTS_FOLDER_NAME, self._automation_folder_id ) def upload_automation_results( self, test_plan_name: str, test_execution_name: str, test_results: List[TestResultEntity], ): test_plan_key = self._create_test_plan(test_plan_name) test_execution_key = self._create_test_execution(test_execution_name) tests = self.get_xray_tests() with ProcessPoolExecutor(self._MULTI_PROCESS_WORKER_NUM) as executor: # add tests to test plan executor.map( self._add_tests_to_test_plan, [test_plan_key for _ in range(len(tests))], [_.key for _ in tests], ) with ProcessPoolExecutor(self._MULTI_PROCESS_WORKER_NUM) as executor: # add tests to test execution executor.map( self._add_tests_to_test_execution, [test_execution_key for _ in range(len(tests))], [_.key for _ in tests], ) self._add_test_execution_to_test_plan(test_execution_key, test_plan_key) with ProcessPoolExecutor(self._MULTI_PROCESS_WORKER_NUM) as executor: # update test execution result executor.map( self._update_test_result, [result.key for result in test_results], [result.result.value for result in test_results], [test_execution_key for _ in range(len(test_results))], ) ```
{ "source": "jni/2019-debugging", "score": 3 }
#### File: 2019-debugging/modules/interp.py ```python import numpy as np from skimage.util import img_as_ubyte from scipy import ndimage as ndi def scale_and_uint8(image, factor): """Interpolate an image by a given factor and convert the result to uint8. Parameters ---------- image : array """ coords = np.meshgrid(*(np.linspace(0, i, i * factor, endpoint=False) for i in image.shape), indexing='ij') interpolated = ndi.map_coordinates(image, coords, mode='reflect') output = img_as_ubyte(interpolated) return output ``` #### File: 2019-debugging/modules/niblack.py ```python import itertools import numpy as np from scipy import ndimage as ndi from collections.abc import Iterable from skimage.transform import integral_image from skimage.util import crop np.warnings.filterwarnings('ignore') def _validate_window_size(axis_sizes): """Ensure all sizes in ``axis_sizes`` are odd. Parameters ---------- axis_sizes : iterable of int Raises ------ ValueError If any given axis size is even. """ for axis_size in axis_sizes: if axis_size % 2 == 0: msg = ('Window size for `threshold_sauvola` or ' '`threshold_niblack` must not be even on any dimension. ' 'Got {}'.format(axis_sizes)) raise ValueError(msg) def _mean_std(image, w): """Return local mean and standard deviation of each pixel using a neighborhood defined by a rectangular window size ``w``. The algorithm uses integral images to speedup computation. This is used by :func:`threshold_niblack` and :func:`threshold_sauvola`. Parameters ---------- image : ndarray Input image. w : int, or iterable of int Window size specified as a single odd integer (3, 5, 7, …), or an iterable of length ``image.ndim`` containing only odd integers (e.g. ``(1, 5, 5)``). Returns ------- m : ndarray of float, same shape as ``image`` Local mean of the image. s : ndarray of float, same shape as ``image`` Local standard deviation of the image. References ---------- .. [1] <NAME>, <NAME>, and <NAME>, "Efficient implementation of local adaptive thresholding techniques using integral images." in Document Recognition and Retrieval XV, (San Jose, USA), Jan. 2008. :DOI:`10.1117/12.767755` """ if not isinstance(w, Iterable): w = (w,) * image.ndim _validate_window_size(w) pad_width = tuple((k // 2 + 1, k // 2) for k in w) padded = np.pad(image.astype('float'), pad_width, mode='reflect') padded_sq = padded * padded integral = integral_image(padded) integral_sq = integral_image(padded_sq) kern = np.zeros(tuple(k + 1 for k in w)) for indices in itertools.product(*([[0, -1]] * image.ndim)): kern[indices] = (-1) ** (image.ndim % 2 != np.sum(indices) % 2) total_window_size = np.prod(w) sum_full = ndi.correlate(integral, kern, mode='constant') mean = crop(sum_full, pad_width) / total_window_size sum_sq_full = ndi.correlate(integral_sq, kern, mode='constant') ex2 = crop(sum_sq_full, pad_width) / total_window_size stdev = np.sqrt(ex2 - mean**2) return mean, stdev def threshold_niblack(image, window_size=15, k=0.2): """Applies Niblack local threshold to an array. A threshold T is calculated for every pixel in the image using the following formula:: T = m(x,y) - k * s(x,y) where m(x,y) and s(x,y) are the mean and standard deviation of pixel (x,y) neighborhood defined by a rectangular window with size w times w centered around the pixel. k is a configurable parameter that weights the effect of standard deviation. Parameters ---------- image: ndarray Input image. window_size : int, or iterable of int, optional Window size specified as a single odd integer (3, 5, 7, …), or an iterable of length ``image.ndim`` containing only odd integers (e.g. ``(1, 5, 5)``). k : float, optional Value of parameter k in threshold formula. Returns ------- threshold : (N, M) ndarray Threshold mask. All pixels with an intensity higher than this value are assumed to be foreground. Notes ----- This algorithm is originally designed for text recognition. References ---------- .. [1] <NAME> (1986), An introduction to Digital Image Processing, Prentice-Hall. Examples -------- >>> from skimage import data >>> image = data.page() >>> binary_image = threshold_niblack(image, window_size=7, k=0.1) """ m, s = _mean_std(image, window_size) return m - k * s ```
{ "source": "jni/asv", "score": 3 }
#### File: asv/asv/console.py ```python from __future__ import (absolute_import, division, print_function, unicode_literals) import io import codecs import contextlib import locale import logging import os import sys import textwrap import time import six from six.moves import xrange, input WIN = (os.name == "nt") def isatty(file): """ Returns `True` if `file` is a tty. Most built-in Python file-like objects have an `isatty` member, but some user-defined types may not, so this assumes those are not ttys. """ if hasattr(file, 'isatty'): return file.isatty() return False def _decode_preferred_encoding(s): """ Decode the supplied byte string using the preferred encoding for the locale (`locale.getpreferredencoding`) or, if the default encoding is invalid, fall back first on utf-8, then on latin-1 if the message cannot be decoded with utf-8. """ if six.PY3 and isinstance(s, bytes): enc = locale.getpreferredencoding() try: try: return s.decode(enc) except LookupError: enc = 'utf-8' return s.decode(enc) except UnicodeDecodeError: return s.decode('latin-1', 'replace') return s def _color_text(text, color): """ Returns a string wrapped in ANSI color codes for coloring the text in a terminal:: colored_text = color_text('Here is a message', 'blue') This won't actually effect the text until it is printed to the terminal. Parameters ---------- text : str The string to return, bounded by the color codes. color : str An ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). """ color_mapping = { 'black': '0;30', 'red': '0;31', 'green': '0;32', 'brown': '0;33', 'blue': '0;34', 'magenta': '0;35', 'cyan': '0;36', 'lightgrey': '0;37', 'default': '0;39', 'darkgrey': '1;30', 'lightred': '1;31', 'lightgreen': '1;32', 'yellow': '1;33', 'lightblue': '1;34', 'lightmagenta': '1;35', 'lightcyan': '1;36', 'white': '1;37'} color_code = color_mapping.get(color, '0;39') return '\033[{0}m{1}\033[0m'.format(color_code, text) # This is a table of Unicode characters that we want to have # reasonable representations in ascii so they aren't just replaced # with '?'. A complete solution to this problem would involve a # third-party library such as "unidecode", but this handles the common # cases of stuff coming from asv. # # You can find the characters that need an entry using: # grep -P -n '[^\x00-\x7F]' -r * # in the `asv` source directory. _unicode_translations = { ord('μ'): 'u', ord('·'): '-', ord('±'): '~' } def _write_with_fallback(s, write, fileobj): """ Write the supplied string with the given write function like ``write(s)``, but use a writer for the locale's preferred encoding in case of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or 'latin-1'. *fileobj* can be text or byte stream, *s* can be unicode or bytes. """ try: write(s) return write except (UnicodeEncodeError, TypeError): # Let's try the next approach... pass enc = locale.getpreferredencoding() try: Writer = codecs.getwriter(enc) except LookupError: Writer = codecs.getwriter('utf-8') if isinstance(fileobj, io.TextIOBase): # Get the byte stream fileobj = fileobj.buffer if six.PY3 and isinstance(s, bytes): # Writers expect unicode input s = _decode_preferred_encoding(s) f = Writer(fileobj) write = f.write try: write(s) return write except UnicodeEncodeError: Writer = codecs.getwriter('latin-1') f = Writer(fileobj) write = f.write if six.PY3: s = s.translate(_unicode_translations) else: for key, val in _unicode_translations.iteritems(): s = s.replace(unichr(key), val) # If this doesn't work let the exception bubble up; I'm out of ideas try: write(s) return write except UnicodeEncodeError: write(s.encode('ascii', 'replace').decode('ascii')) return write def color_print(*args, **kwargs): """ Prints colors and styles to the terminal uses ANSI escape sequences. :: color_print('This is the color ', 'default', 'GREEN', 'green') Parameters ---------- positional args : str The positional arguments come in pairs (*msg*, *color*), where *msg* is the string to display and *color* is the color to display it in. *color* is an ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white, or '' (the empty string). file : writeable file-like object, optional Where to write to. Defaults to `sys.stdout`. If file is not a tty (as determined by calling its `isatty` member, if one exists), no coloring will be included. end : str, optional The ending of the message. Defaults to ``\\n``. The end will be printed after resetting any color or font state. """ file = kwargs.get('file', sys.stdout) end = kwargs.get('end', '\n') write = file.write if isatty(file) and not WIN: for i in xrange(0, len(args), 2): msg = args[i] if i + 1 == len(args): color = '' else: color = args[i + 1] if color: msg = _color_text(msg, color) msg = _decode_preferred_encoding(msg) write = _write_with_fallback(msg, write, file) write(end) else: for i in xrange(0, len(args), 2): msg = args[i] msg = _decode_preferred_encoding(msg) write = _write_with_fallback(msg, write, file) write(end) def get_answer_default(prompt, default, use_defaults=False): color_print("{0} [{1}]: ".format(prompt, default), end='') if use_defaults: return default x = input() if x.strip() == '': return default return x def truncate_left(s, l): if len(s) > l: return '...' + s[-(l - 3):] else: return s class Log(object): def __init__(self): self._indent = 1 self._total = 0 self._count = 0 self._logger = logging.getLogger() self._needs_newline = False self._last_dot = time.time() def _stream_formatter(self, record): ''' The formatter for standard output ''' if self._needs_newline: color_print('') parts = record.msg.split('\n', 1) first_line = parts[0] if len(parts) == 1: rest = None else: rest = parts[1] if self._total: color_print('[{0:6.02f}%] '.format( (float(self._count) / self._total) * 100.0), end='') color_print('·' * self._indent, end='') color_print(' ', end='') if record.levelno < logging.DEBUG: color = 'default' elif record.levelno < logging.INFO: color = 'default' elif record.levelno < logging.WARN: if self._indent == 1: color = 'green' elif self._indent == 2: color = 'blue' else: color = 'default' elif record.levelno < logging.ERROR: color = 'brown' else: color = 'red' indent = self._indent + 11 spaces = ' ' * indent color_print(first_line, color, end='') if rest is not None: color_print('') detail = textwrap.dedent(rest) for line in detail.split('\n'): color_print(spaces, end='') color_print(line) self._needs_newline = True sys.stdout.flush() @contextlib.contextmanager def indent(self): """ A context manager to increase the indentation level. """ self._indent += 1 yield self._indent -= 1 def dot(self): if isatty(sys.stdout): if time.time() > self._last_dot + 1.0: color_print('.', 'darkgrey', end='') sys.stdout.flush() self._last_dot = time.time() def set_nitems(self, n): """ Set the number of items in a lengthy process. Each of these steps should be incremented through using `step`. """ self._total = n def step(self): """ Write that a step has been completed. A percentage is displayed along with it. """ self._count += 1 def enable(self, verbose=False): sh = logging.StreamHandler() sh.emit = self._stream_formatter self._logger.addHandler(sh) if verbose: self._logger.setLevel(logging.DEBUG) else: self._logger.setLevel(logging.INFO) @contextlib.contextmanager def set_level(self, level): orig_level = self._logger.level self._logger.setLevel(level) try: yield finally: self._logger.setLevel(orig_level) def is_debug_enabled(self): return self._logger.getEffectiveLevel() <= logging.DEBUG def info(self, *args, **kwargs): self._logger.info(*args, **kwargs) def warn(self, *args, **kwargs): self._logger.warn(*args, **kwargs) def debug(self, *args, **kwargs): self._logger.debug(*args, **kwargs) def error(self, *args, **kwargs): self._logger.error(*args, **kwargs) def add(self, msg): _write_with_fallback(msg, sys.stdout.write, sys.stdout) sys.stdout.flush() log = Log() ``` #### File: test/benchmark/time_examples.py ```python from __future__ import (absolute_import, division, print_function, unicode_literals) import sys if sys.version_info[0] == 3: xrange = range import warnings class TimeSuite: sample_time = 0.1 def setup(self): self.n = 100 def time_example_benchmark_1(self): s = '' for i in xrange(self.n): s = s + 'x' def time_example_benchmark_2(self): s = [] for i in xrange(self.n): s.append('x') ''.join(s) class TimeSuiteSub(TimeSuite): pass def time_with_warnings(): print('hi') warnings.warn('before') 1 / 0 warnings.warn('after') time_with_warnings.sample_time = 0.1 def time_with_timeout(): while True: pass time_with_timeout.timeout = 0.1 class TimeWithRepeat(object): # Check that setup is re-run on each repeat called = None number = 1 repeat = 10 count = 0 warmup_time = 0 def setup(self): assert self.called is None self.called = False def teardown(self): assert self.called is True self.called = None print("<%d>" % (self.count,)) def time_it(self): assert self.called is False self.called = True self.count += 1 class TimeWithRepeatCalibrate(object): # Check that setup is re-run on each repeat, apart from # autodetection of suitable `number` repeat = 1 number = 0 sample_time = 0.1 def setup(self): print("setup") def time_it(self): pass class TimeWithBadTimer(object): # Check that calibration of number is robust against bad timers repeat = 1 number = 0 sample_time = 0.1 timeout = 5 def timer(self): return 0.0 def time_it(self): pass ``` #### File: asv/test/test_statistics.py ```python from __future__ import (absolute_import, division, print_function, unicode_literals) from itertools import product import pytest import asv.statistics as statistics try: import numpy as np HAS_NUMPY = True except ImportError: HAS_NUMPY = False try: from scipy import special, stats HAS_SCIPY = True except ImportError: HAS_SCIPY = False @pytest.mark.skipif(not HAS_NUMPY, reason="Requires numpy") def test_compute_stats(): np.random.seed(1) assert statistics.compute_stats([]) == (None, None) assert statistics.compute_stats([15.0]) == (15.0, None) for nsamples, true_mean in product([10, 50, 250], [0, 0.3, 0.6]): samples = np.random.randn(nsamples) + true_mean result, stats = statistics.compute_stats(samples) assert np.allclose(stats['systematic'], 0) assert np.allclose(stats['n'], len(samples)) assert np.allclose(stats['mean'], np.mean(samples)) assert np.allclose(stats['q_25'], np.percentile(samples, 25)) assert np.allclose(stats['q_75'], np.percentile(samples, 75)) assert np.allclose(stats['min'], np.min(samples)) assert np.allclose(stats['max'], np.max(samples)) assert np.allclose(stats['std'], np.std(samples, ddof=0)) assert np.allclose(result, np.median(samples)) ci = stats['ci_99'] assert ci[0] <= true_mean <= ci[1] w = 12.0 * np.std(samples) / np.sqrt(len(samples)) assert ci[1] - ci[0] < w err = statistics.get_err(result, stats) iqr = np.percentile(samples, 75) - np.percentile(samples, 25) assert np.allclose(err, iqr/2) @pytest.mark.skipif(not HAS_NUMPY, reason="Requires numpy") def test_is_different(): np.random.seed(1) # Smoke test is_different for true_mean, n, significant in [(0.05, 10, False), (0.05, 100, True), (0.1, 10, True)]: samples_a = 0 + 0.1 * np.random.rand(n) samples_b = true_mean + 0.1 * np.random.rand(n) result_a, stats_a = statistics.compute_stats(samples_a) result_b, stats_b = statistics.compute_stats(samples_b) assert statistics.is_different(stats_a, stats_b) == significant @pytest.mark.skipif(not HAS_NUMPY, reason="Requires numpy") def test_quantile_ci(): # Test the confidence intervals def get_z_exp(loc, scale, size): z = np.random.exponential(scale, size=size) z *= 2 * np.random.randint(0, 2, size=len(z)) - 1 return loc + z def get_z_normal(loc, scale, size): z = np.random.normal(loc, scale, size=size) return z loc = 2.5 scale = 2.5 np.random.seed(1) for alpha_min in [0.5, 0.9, 0.99, 0.999]: for sampler in [get_z_exp, get_z_normal]: for size in [10, 30]: samples = [] for k in range(300): z = sampler(loc, scale, size) m, ci = statistics.quantile_ci(z, 0.5, alpha_min) assert np.allclose(m, np.median(z)) a, b = ci samples.append(a <= loc <= b) alpha = sum(samples) / len(samples) # Order of magnitude should match assert 1 - alpha <= 5 * (1 - alpha_min), (alpha_min, sampler, size) def test_quantile_ci_small(): # Small samples should give min/max ci for n in range(1, 7): sample = list(range(n)) m, ci = statistics.quantile_ci(sample, 0.5, 0.99) assert ci[0] == min(sample) assert ci[1] == max(sample) @pytest.mark.skipif(not HAS_NUMPY, reason="Requires numpy") def test_quantile(): np.random.seed(1) x = np.random.randn(50) for q in np.linspace(0, 1, 300): expected = np.percentile(x, 100 * q) got = statistics.quantile(x.tolist(), q) assert np.allclose(got, expected), q @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_lgamma(): x = np.arange(1, 5000) expected = special.gammaln(x) got = np.vectorize(statistics.lgamma)(x) assert np.allclose(got, expected, rtol=1e-12, atol=0) assert np.isnan(statistics.lgamma(1.2)) @pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy") def test_binom_pmf(): p = np.linspace(0, 1, 7) k = np.arange(0, 40, 5)[:,None] n = np.arange(0, 40, 5)[:,None,None] expected = stats.binom.pmf(k, n, p) got = np.vectorize(statistics.binom_pmf)(n, k, p) assert np.allclose(got, expected, rtol=1e-12, atol=0) ```
{ "source": "jnice-81/dace", "score": 2 }
#### File: codegen/instrumentation/report.py ```python import json import numpy as np import re class InstrumentationReport(object): @staticmethod def get_event_uuid(event): uuid = (-1, -1, -1) if 'args' in event: args = event['args'] if 'sdfg_id' in args and args['sdfg_id'] is not None: uuid = (args['sdfg_id'], -1, -1) if 'state_id' in args and args['state_id'] is not None: uuid = (uuid[0], args['state_id'], -1) if 'id' in args and args['id'] is not None: uuid = (uuid[0], uuid[1], args['id']) return uuid def __init__(self, filename: str): # Parse file match = re.match(r'.*report-(\d+)\.json', filename) self.name = match.groups()[0] if match is not None else 'N/A' self.durations = {} self.counters = {} self._sortcat = None self._sortdesc = False with open(filename, 'r') as fp: report = json.load(fp) if 'traceEvents' not in report or 'sdfgHash' not in report: print(filename, 'is not a valid SDFG instrumentation report!') return self.sdfg_hash = report['sdfgHash'] events = report['traceEvents'] for event in events: if 'ph' in event: phase = event['ph'] name = event['name'] if phase == 'X': uuid = self.get_event_uuid(event) if uuid not in self.durations: self.durations[uuid] = {} if name not in self.durations[uuid]: self.durations[uuid][name] = [] self.durations[uuid][name].append(event['dur'] / 1000) if phase == 'C': if name not in self.counters: self.counters[name] = 0 self.counters[name] += event['args'][name] def __repr__(self): return 'InstrumentationReport(name=%s)' % self.name def sortby(self, column: str, ascending: bool = False): if (column and column.lower() not in ('counter', 'value', 'min', 'max', 'mean', 'median')): raise ValueError('Only Counter, Value, Min, Max, Mean, Median are ' 'supported') self._sortcat = column if column is None else column.lower() self._sortdesc = not ascending def _get_runtimes_string(self, label, runtimes, element, sdfg, state, string, row_format, colw, with_element_heading=True): indent = '' if len(runtimes) > 0: element_label = '' if element[0] > -1 and element[1] > -1 and element[2] > -1: # This element is a node. if sdfg != element[0]: # No parent SDFG row present yet, print it. string += row_format.format('SDFG (' + str(element[0]) + ')', '', '', '', '', width=colw) sdfg = element[0] if state != element[1]: # No parent state row present yet, print it. string += row_format.format('|-State (' + str(element[1]) + ')', '', '', '', '', width=colw) state = element[1] element_label = '| |-Node (' + str(element[2]) + ')' indent = '| | |' elif element[0] > -1 and element[1] > -1: # This element is a state. if sdfg != element[0]: # No parent SDFG row present yet, print it. string += row_format.format('SDFG (' + str(element[0]) + ')', '', '', '', '', width=colw) sdfg = element[0] state = element[1] element_label = '|-State (' + str(element[1]) + ')' indent = '| |' elif element[0] > -1: # This element is an SDFG. sdfg = element[0] state = -1 element_label = 'SDFG (' + str(element[0]) + ')' indent = '|' else: element_label = 'N/A' if with_element_heading: string += row_format.format(element_label, '', '', '', '', width=colw) string += row_format.format(indent + label + ':', '', '', '', '', width=colw) string += row_format.format(indent, '%.3f' % np.min(runtimes), '%.3f' % np.mean(runtimes), '%.3f' % np.median(runtimes), '%.3f' % np.max(runtimes), width=colw) return string, sdfg, state def getkey(self, element): events = self.durations[element] result = [] for event in events.keys(): runtimes = events[event] result.extend(runtimes) result = np.array(result) if self._sortcat == 'min': return np.min(result) elif self._sortcat == 'max': return np.max(result) elif self._sortcat == 'mean': return np.mean(result) else: # if self._sortcat == 'median': return np.median(result) def __str__(self): COLW = 15 COUNTER_COLW = 39 element_list = list(self.durations.keys()) element_list.sort() row_format = ('{:<{width}}' * 5) + '\n' counter_format = ('{:<{width}}' * 2) + '\n' string = 'Instrumentation report\n' string += 'SDFG Hash: ' + self.sdfg_hash + '\n' if len(self.durations) > 0: string += ('-' * (COLW * 5)) + '\n' string += ('{:<{width}}' * 2).format( 'Element', 'Runtime (ms)', width=COLW) + '\n' string += row_format.format('', 'Min', 'Mean', 'Median', 'Max', width=COLW) string += ('-' * (COLW * 5)) + '\n' sdfg = -1 state = -1 if self._sortcat in ('min', 'mean', 'median', 'max'): element_list = sorted(element_list, key=self.getkey, reverse=self._sortdesc) for element in element_list: events = self.durations[element] if len(events) > 0: with_element_heading = True for event in events.keys(): runtimes = events[event] string, sdfg, state = self._get_runtimes_string( event, runtimes, element, sdfg, state, string, row_format, COLW, with_element_heading) with_element_heading = False string += ('-' * (COLW * 5)) + '\n' if len(self.counters) > 0: string += ('-' * (COUNTER_COLW * 2)) + '\n' string += ('{:<{width}}' * 2).format( 'Counter', 'Value', width=COUNTER_COLW) + '\n' string += ('-' * (COUNTER_COLW * 2)) + '\n' if self._sortcat == 'value': counter_list = sorted(self.counters, key=lambda k: self.counters[k], reverse=self._sortdesc) elif self._sortcat == 'counter': counter_list = sorted(self.counters.keys(), reverse=self._sortdesc) else: counter_list = self.counters.keys() for counter in counter_list: string += counter_format.format(counter, self.counters[counter], width=COUNTER_COLW) string += ('-' * (COUNTER_COLW * 2)) + '\n' return string ``` #### File: blas/nodes/axpy.py ```python import dace.library import dace.properties import dace.sdfg.nodes from dace.transformation.transformation import ExpandTransformation from dace.libraries.blas import environments from dace import (config, data as dt, dtypes, memlet as mm, SDFG, SDFGState, symbolic) from dace.frontend.common import op_repository as oprepo @dace.library.expansion class ExpandAxpyVectorized(ExpandTransformation): """ Generic expansion of AXPY with support for vectorized data types. """ environments = [] @staticmethod def expansion(node, parent_state: SDFGState, parent_sdfg, schedule=dace.ScheduleType.Default): """ :param node: Node to expand. :param parent_state: State that the node is in. :param parent_sdfg: SDFG that the node is in. :param schedule: The schedule to set on maps in the expansion. For FPGA expansion, this should be set to FPGA_Device. """ node.validate(parent_sdfg, parent_state) x_outer = parent_sdfg.arrays[next( parent_state.in_edges_by_connector(node, "_x")).data.data] y_outer = parent_sdfg.arrays[next( parent_state.in_edges_by_connector(node, "_y")).data.data] res_outer = parent_sdfg.arrays[next( parent_state.out_edges_by_connector(node, "_res")).data.data] a = node.a n = node.n / x_outer.dtype.veclen axpy_sdfg = dace.SDFG("axpy") axpy_state = axpy_sdfg.add_state("axpy") x_inner = x_outer.clone() x_inner.transient = False y_inner = y_outer.clone() y_inner.transient = False res_inner = res_outer.clone() res_inner.transient = False axpy_sdfg.add_datadesc("_x", x_inner) axpy_sdfg.add_datadesc("_y", y_inner) axpy_sdfg.add_datadesc("_res", res_inner) x_in = axpy_state.add_read("_x") y_in = axpy_state.add_read("_y") z_out = axpy_state.add_write("_res") vec_map_entry, vec_map_exit = axpy_state.add_map("axpy", {"i": f"0:{n}"}, schedule=schedule) axpy_tasklet = axpy_state.add_tasklet( "axpy", ["x_conn", "y_conn"], ["z_conn"], f"z_conn = {a} * x_conn + y_conn") # Access container either as an array or as a stream index = "0" if isinstance(x_inner, dt.Stream) else "i" axpy_state.add_memlet_path(x_in, vec_map_entry, axpy_tasklet, dst_conn="x_conn", memlet=dace.Memlet(f"_x[{index}]")) index = "0" if isinstance(y_inner, dt.Stream) else "i" axpy_state.add_memlet_path(y_in, vec_map_entry, axpy_tasklet, dst_conn="y_conn", memlet=dace.Memlet(f"_y[{index}]")) index = "0" if isinstance(res_inner, dt.Stream) else "i" axpy_state.add_memlet_path(axpy_tasklet, vec_map_exit, z_out, src_conn="z_conn", memlet=dace.Memlet(f"_res[{index}]")) return axpy_sdfg @dace.library.expansion class ExpandAxpyFpga(ExpandTransformation): """ FPGA expansion which uses the generic implementation, but sets the map schedule to be executed on FPGA. """ environments = [] @staticmethod def expansion(node, parent_state: SDFGState, parent_sdfg: SDFG, **kwargs): """ :param node: Node to expand. :param parent_state: State that the node is in. :param parent_sdfg: SDFG that the node is in. """ return ExpandAxpyVectorized.expansion( node, parent_state, parent_sdfg, schedule=dace.ScheduleType.FPGA_Device, **kwargs) @dace.library.node class Axpy(dace.sdfg.nodes.LibraryNode): """ Implements the BLAS AXPY operation, which computes a*x + y, where the vectors x and y are of size n. Expects input connectrs "_x" and "_y", and output connector "_res". """ # Global properties implementations = { "pure": ExpandAxpyVectorized, "fpga": ExpandAxpyFpga, } default_implementation = None # Object fields a = dace.properties.SymbolicProperty(allow_none=False, default=dace.symbolic.symbol("a")) n = dace.properties.SymbolicProperty(allow_none=False, default=dace.symbolic.symbol("n")) def __init__(self, name, a=None, n=None, *args, **kwargs): super().__init__(name, *args, inputs={"_x", "_y"}, outputs={"_res"}, **kwargs) self.a = a or dace.symbolic.symbol("a") self.n = n or dace.symbolic.symbol("n") def compare(self, other): if (self.veclen == other.veclen and self.implementation == other.implementation): return True else: return False def validate(self, sdfg, state): in_edges = state.in_edges(self) if len(in_edges) != 2: raise ValueError("Expected exactly two inputs to axpy") in_memlets = [in_edges[0].data, in_edges[1].data] out_edges = state.out_edges(self) if len(out_edges) != 1: raise ValueError("Expected exactly one output from axpy") out_memlet = out_edges[0].data size = in_memlets[0].subset.size() if len(size) != 1: raise ValueError("axpy only supported on 1-dimensional arrays") if size != in_memlets[1].subset.size(): raise ValueError("Inputs to axpy must have equal size") if size != out_memlet.subset.size(): raise ValueError("Output of axpy must have same size as input") if (in_memlets[0].wcr is not None or in_memlets[1].wcr is not None or out_memlet.wcr is not None): raise ValueError("WCR on axpy memlets not supported") return True # Numpy replacement @oprepo.replaces('dace.libraries.blas.axpy') @oprepo.replaces('dace.libraries.blas.Axpy') def axpy_libnode(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, a, x, y, result): # Add nodes x_in, y_in = (state.add_read(name) for name in (x, y)) res = state.add_write(result) libnode = Axpy('axpy', a=a) state.add_node(libnode) # Connect nodes state.add_edge(x_in, None, libnode, '_x', mm.Memlet(x)) state.add_edge(y_in, None, libnode, '_y', mm.Memlet(y)) state.add_edge(libnode, '_res', res, None, mm.Memlet(result)) return [] ``` #### File: mpi/nodes/scatter.py ```python import dace.library import dace.properties import dace.sdfg.nodes from dace.symbolic import symstr from dace.transformation.transformation import ExpandTransformation from .. import environments @dace.library.expansion class ExpandScatterMPI(ExpandTransformation): environments = [environments.mpi.MPI] @staticmethod def expansion(node, parent_state, parent_sdfg, n=None, **kwargs): (inbuffer, in_count_str), (outbuffer, out_count_str), root = node.validate( parent_sdfg, parent_state) in_mpi_dtype_str = dace.libraries.mpi.utils.MPI_DDT( inbuffer.dtype.base_type) out_mpi_dtype_str = dace.libraries.mpi.utils.MPI_DDT( outbuffer.dtype.base_type) if inbuffer.dtype.veclen > 1: raise (NotImplementedError) if root.dtype.base_type != dace.dtypes.int32: raise ValueError("Scatter root must be an integer!") code = f""" int _commsize; MPI_Comm_size(MPI_COMM_WORLD, &_commsize); MPI_Scatter(_inbuffer, ({in_count_str})/_commsize, {in_mpi_dtype_str}, _outbuffer, {out_count_str}, {out_mpi_dtype_str}, _root, MPI_COMM_WORLD); """ tasklet = dace.sdfg.nodes.Tasklet(node.name, node.in_connectors, node.out_connectors, code, language=dace.dtypes.Language.CPP) return tasklet @dace.library.node class Scatter(dace.sdfg.nodes.LibraryNode): # Global properties implementations = { "MPI": ExpandScatterMPI, } default_implementation = "MPI" def __init__(self, name, *args, **kwargs): super().__init__(name, *args, inputs={"_inbuffer", "_root"}, outputs={"_outbuffer"}, **kwargs) def validate(self, sdfg, state): """ :return: A three-tuple (buffer, root) of the three data descriptors in the parent SDFG. """ inbuffer, outbuffer, root = None, None, None for e in state.out_edges(self): if e.src_conn == "_outbuffer": outbuffer = sdfg.arrays[e.data.data] for e in state.in_edges(self): if e.dst_conn == "_inbuffer": inbuffer = sdfg.arrays[e.data.data] if e.dst_conn == "_root": root = sdfg.arrays[e.data.data] if root.dtype.base_type != dace.dtypes.int32: raise (ValueError("Scatter root must be an integer!")) in_count_str = "XXX" out_count_str = "XXX" for _, src_conn, _, _, data in state.out_edges(self): if src_conn == '_outbuffer': dims = [symstr(e) for e in data.subset.size_exact()] out_count_str = "*".join(dims) for _, _, _, dst_conn, data in state.in_edges(self): if dst_conn == '_inbuffer': dims = [symstr(e) for e in data.subset.size_exact()] in_count_str = "*".join(dims) return (inbuffer, in_count_str), (outbuffer, out_count_str), root ``` #### File: libraries/stencil/stencil.py ```python import collections from typing import Dict, List, Tuple import dace import dace.library from .cpu import ExpandStencilCPU from .intel_fpga import ExpandStencilIntelFPGA # from .xilinx import ExpandStencilXilinx @dace.library.node class Stencil(dace.library.LibraryNode): """ Represents applying a stencil that reads at constants offset from one or more input connectors, and writes to one or more output connector, using the given boundary conditions when accesses are out of bounds. The size of the iteration space will be inferred from the largest field being accessed, and it is assumed that all other fields accessed have the same size in each corresponding dimension. For specifying the boundary conditions, the following options are supported: boundary_conditions = { # When an access into the given input is out of bounds, the code will... "a": {"btype": "shrink"}, # ...not output anything "b": {"btype": "constant", "value": 0.0}, # ...replace it with a constant "c": {"btype": "copy"} # ...uses the center value, e.g., c[0, 0] in 2D } When one or more fields accessed are of lower dimensionality than others, the `iterator_mapping` argument is used to specify which iterators should be used to access it. Consider the following code: c[0, 0, 0] = a[0, 0, 0] + b[0, 0] This will produce three iterators _i0, _i1, and _i2. Which two of these are used to index into b, which is only 2-dimensional, is specified using a tuple of booleans choosing the iterators: input_mapping = { "b": (True, False, True) } This will use iterators _i0 and _i2 for accessing b. """ implementations = { "pure": ExpandStencilCPU, "intel_fpga": ExpandStencilIntelFPGA, # "xilinx": ExpandStencilXilinx } default_implementation = "pure" code = dace.properties.CodeProperty( desc=("Stencil code accessing all the input connector at constant " "offsets relative to the center, e.g.: " "c[0, 0, 0] = 0.1 * a[-1, 0, 1] + 0.9 * b[0, 0, 1]"), default=dace.properties.CodeBlock("")) iterator_mapping = dace.properties.DictProperty( str, tuple, desc=("Dictionary mapping lower-dimensional input fields to a tuple " " of booleans indicating which iterators to use for their " "accesses, e.g.: {'a': (True, False, True)} uses the first and " "last iterator in a 3D iteration space to access a 2D array."), default=collections.OrderedDict()) boundary_conditions = dace.properties.OrderedDictProperty( desc=("Boundary condition specifications for each accessed field, on " "the form: {'b': {'btype': 'constant', 'value': 3}}."), default=collections.OrderedDict()) def __init__(self, label: str, code: str = "", iterator_mapping: Dict[str, Tuple[int]] = {}, boundary_conditions: Dict[str, Dict] = {}, **kwargs): super().__init__(label, **kwargs) self.code = type(self).code.from_string(code, dace.dtypes.Language.Python) self.iterator_mapping = iterator_mapping self.boundary_conditions = boundary_conditions ``` #### File: transformation/dataflow/map_dim_shuffle.py ```python from dace import registry from dace.sdfg import SDFG from dace.sdfg import nodes from dace.sdfg import utils as sdutil from dace.transformation import transformation from dace.properties import make_properties, ShapeProperty @registry.autoregister_params(singlestate=True) @make_properties class MapDimShuffle(transformation.Transformation): """ Implements the map-dim shuffle transformation. MapDimShuffle takes a map and a list of params. It reorders the dimensions in the map such that it matches the list. """ _map_entry = transformation.PatternNode(nodes.MapEntry) # Properties parameters = ShapeProperty(dtype=list, default=None, desc="Desired order of map parameters") @staticmethod def expressions(): return [sdutil.node_path_graph(MapDimShuffle._map_entry)] @staticmethod def can_be_applied(graph, candidate, expr_index, sdfg, strict=False): return True @staticmethod def match_to_str(graph, candidate): map_entry = graph.nodes()[candidate[MapDimShuffle._map_entry]] return map_entry.map.label + ': ' + str(map_entry.map.params) def apply(self, sdfg: SDFG): graph = sdfg.nodes()[self.state_id] map_entry = graph.nodes()[self.subgraph[self._map_entry]] if set(self.parameters) != set(map_entry.map.params): return map_entry.range.ranges = [r for list_param in self.parameters for map_param, r in zip(map_entry.map.params, map_entry.range.ranges) if list_param == map_param] map_entry.map.params = self.parameters ``` #### File: samples/instrumentation/tuning.py ```python import dace from dace.codegen.instrumentation.report import InstrumentationReport import itertools import math import numpy as np import sys # Set data type dtype = dace.float64 # Set number of repetitions REPS = 10 # Define symbols M, K, N = tuple(dace.symbol(name) for name in ('M', 'K', 'N')) # Our program is a simple matrix multiplication with unknown dimensions @dace.program def matmult(A: dtype[M, K], B: dtype[K, N], C: dtype[M, N]): for i in range(REPS): C[:] = A @ B def test_configuration(a_trans: bool, b_trans: bool, a_padding: int, b_padding: int) -> InstrumentationReport: """ Tests a single configuration of A and B and returns the instrumentation report from running the SDFG. """ # Convert the program to an SDFG to enable instrumentation sdfg = matmult.to_sdfg() # Remove extraneous states sdfg.apply_strict_transformations() # Instrument state that runs in the loop above state = next(s for s in sdfg.nodes() if len(s.nodes()) > 0) state.instrument = dace.InstrumentationType.Timer # Modify properties of SDFG arrays according to the configuration: # padding (round to the nearest padding value) and total size. if a_trans: a_strides = (1, int(math.ceil(M.get() / a_padding) * a_padding)) total_a = int(a_strides[1] * K.get()) else: a_strides = (int(math.ceil(K.get() / a_padding) * a_padding), 1) total_a = int(a_strides[0] * M.get()) if b_trans: b_strides = (1, int(math.ceil(K.get() / b_padding) * b_padding)) total_b = int(b_strides[1] * N.get()) else: b_strides = (int(math.ceil(N.get() / b_padding) * b_padding), 1) total_b = int(b_strides[0] * K.get()) # NOTE: In DaCe, strides are denoted in absolute values, meaning that each # dimension of "strides" contains the number of elements to skip in # order to get to the next element in that dimension. For example, # contiguous dimensions are denoted by 1 sdfg.arrays['A'].strides = a_strides sdfg.arrays['A'].total_size = total_a sdfg.arrays['B'].strides = b_strides sdfg.arrays['B'].total_size = total_b # Create matching arrays in numpy and fill with random values nbytes = dtype.bytes A = np.ndarray([M.get(), K.get()], dtype.type, buffer=np.ndarray([total_a], dtype.type), strides=[s * nbytes for s in a_strides]) B = np.ndarray([K.get(), N.get()], dtype.type, buffer=np.ndarray([total_b], dtype.type), strides=[s * nbytes for s in b_strides]) A[:] = np.random.rand(M.get(), K.get()) B[:] = np.random.rand(K.get(), N.get()) C = np.zeros([M.get(), N.get()], dtype.type) # Invoke SDFG: compile without additional transformations and run csdfg = sdfg.compile() csdfg(A=A, B=B, C=C, M=np.int32(M.get()), K=np.int32(K.get()), N=np.int32(N.get())) assert np.allclose(A @ B, C) # Return instrumentation report return sdfg.get_latest_report() if __name__ == '__main__': # Define some example sizes or use command line arguments M.set(int(sys.argv[1] if len(sys.argv) > 1 else 257)) K.set(int(sys.argv[2] if len(sys.argv) > 2 else 258)) N.set(int(sys.argv[3] if len(sys.argv) > 3 else 319)) # Disable debug printouts dace.Config.set('debugprint', value=False) # Create options for storage orders and padding ORDERS = ['normal', 'transposed'] PADDINGS = [1, 16, 512, 4096] best_config = (None, None, None, None) best_runtime = np.inf # NOTE: To keep tests fast we fix C's storage order and padding for tA_order, tB_order in itertools.product(ORDERS, ORDERS): for tA_padding, tB_padding in itertools.product(PADDINGS, PADDINGS): print(tA_order, tA_padding, tB_order, tB_padding) report = test_configuration(tA_order == 'transposed', tB_order == 'transposed', tA_padding, tB_padding) # Obtain the first entry type from the report (there is only one) entry = np.array(list(report.durations.values())[0]) print(list(entry)) # Use median value to rank performance runtime_ms = np.median(entry) if runtime_ms < best_runtime: best_runtime = runtime_ms best_config = (tA_order, tA_padding, tB_order, tB_padding) # Print out best configuration A_order, A_padding, B_order, B_padding = best_config print('Fastest configuration for (%dx%dx%d) is:' % (M.get(), K.get(), N.get())) print(' A with storage order %s, padding = %d' % (A_order, A_padding)) print(' B with storage order %s, padding = %d' % (B_order, B_padding)) print(' Runtime: %f ms' % best_runtime) ``` #### File: samples/polybench/bicg.py ```python import math import dace import polybench N = dace.symbol('N') M = dace.symbol('M') #datatypes = [dace.float64, dace.int32, dace.float32] datatype = dace.float64 # Dataset sizes sizes = [{ M: 38, N: 42, }, { M: 116, N: 124, }, { M: 390, N: 410, }, { M: 1900, N: 2100, }, { M: 1800, N: 2200, }] args = [([N, M], datatype), ([M], datatype), ([N], datatype), ([M], datatype), ([N], datatype)] def init_array(A, s, q, p, r): n = N.get() m = M.get() for i in range(m): p[i] = datatype(i % m) / m for i in range(n): r[i] = datatype(i % n) / n for j in range(m): A[i, j] = datatype(i * (j + 1) % n) / n @dace.program(datatype[N, M], datatype[M], datatype[N], datatype[M], datatype[N]) def bicg(A, s, q, p, r): @dace.map def reset_s(i: _[0:M]): out >> s[i] out = 0.0 @dace.map def compute(i: _[0:N], j: _[0:M]): inA << A[i, j] inr << r[i] inp << p[j] outs >> s(1, lambda a, b: a + b)[j] outq >> q(1, lambda a, b: a + b)[i] outs = inr * inA outq = inA * inp if __name__ == '__main__': polybench.main(sizes, args, [(1, 's'), (2, 'q')], init_array, bicg) ``` #### File: tests/fpga/async_test.py ```python import os import multiprocessing as mp from simple_systolic_array_test import P, N, make_sdfg from dace.config import Config from dace.fpga_testing import fpga_test import dace.dtypes import numpy as np def run_test(do_async): N.set(128) P.set(4) A = np.empty((N.get()), dtype=np.int32) Config.set("compiler", "intel_fpga", "launch_async", value=do_async) name = "async_test" sdfg = make_sdfg(name) sdfg.specialize({"P": P.get(), "N": N.get()}) # We don't care about the result, as long as it compiles and runs sdfg(A=A) return sdfg @fpga_test() def test_async_fpga_true(): return run_test(True) @fpga_test() def test_async_fpga_false(): return run_test(False) if __name__ == "__main__": test_async_fpga_true(None) # test_async_fpga_false(None) ``` #### File: tests/fpga/autorun_test.py ```python import argparse import dace import numpy as np import re from dace.fpga_testing import intel_fpga_test DTYPE = dace.float32 def make_sdfg(): N = dace.symbol("N", DTYPE) P = dace.symbol("P", DTYPE) sdfg = dace.SDFG("autorun_test") pre_state = sdfg.add_state("host_to_device") state = sdfg.add_state("compute") post_state = sdfg.add_state("device_to_host") sdfg.add_edge(pre_state, state, dace.InterstateEdge()) sdfg.add_edge(state, post_state, dace.InterstateEdge()) sdfg.add_array("arr_host", (N, ), DTYPE) sdfg.add_array("arr", (N, ), DTYPE, storage=dace.StorageType.FPGA_Global, transient=True) # Copy from host to device pre_host = pre_state.add_read("arr_host") pre_device = pre_state.add_write("arr") pre_state.add_memlet_path(pre_host, pre_device, memlet=dace.Memlet("arr[0:N]")) # Copy from device to host post_device = post_state.add_read("arr") post_host = post_state.add_write("arr_host") post_state.add_memlet_path(post_device, post_host, memlet=dace.Memlet("arr_host[0:N]")) sdfg.add_stream("pipe_in", DTYPE, storage=dace.StorageType.FPGA_Local, transient=True) # Read from memory into a stream memory_read = state.add_read("arr") pipe_in_write = state.add_write("pipe_in") state.add_memlet_path(memory_read, pipe_in_write, memlet=dace.Memlet("arr[0:N]", other_subset="0")) sdfg.add_stream("pipes_systolic", DTYPE, shape=(P + 1, ), storage=dace.StorageType.FPGA_Local, transient=True) # Simple processing element that can be autorun pipe_in_read = state.add_read("pipe_in") entry_add, exit_add = state.add_map("add", {"i": "0:N"}, schedule=dace.ScheduleType.FPGA_Device) tasklet_add = state.add_tasklet("add", {"val_in"}, {"val_out"}, "val_out = val_in + 9") state.add_memlet_path(pipe_in_read, entry_add, tasklet_add, dst_conn="val_in", memlet=dace.Memlet("pipe_in[0]")) pipe_systolic_write_head = state.add_write("pipes_systolic") state.add_memlet_path(tasklet_add, exit_add, pipe_systolic_write_head, src_conn="val_out", memlet=dace.Memlet("pipes_systolic[0]")) # Systolic array which can be autorun unroll_entry, unroll_exit = state.add_map( "systolic_array", {"p": "0:P"}, schedule=dace.ScheduleType.FPGA_Device, unroll=True) pipe_unroll_read = state.add_read("pipes_systolic") state.add_memlet_path(unroll_entry, pipe_unroll_read, memlet=dace.Memlet()) systolic_entry, systolic_exit = state.add_map( "add_systolic", {"i": "0:N"}, schedule=dace.ScheduleType.FPGA_Device) systolic_tasklet = state.add_tasklet("add_systolic", {"val_in"}, {"val_out"}, "val_out = 2 * val_in") state.add_memlet_path(pipe_unroll_read, systolic_entry, systolic_tasklet, dst_conn="val_in", memlet=dace.Memlet("pipes_systolic[p]")) pipe_unroll_write = state.add_write("pipes_systolic") state.add_memlet_path(systolic_tasklet, systolic_exit, pipe_unroll_write, src_conn="val_out", memlet=dace.Memlet("pipes_systolic[p + 1]")) state.add_memlet_path(pipe_unroll_write, unroll_exit, memlet=dace.Memlet()) # Write back to memory pipe_systolic_read_tail = state.add_read("pipes_systolic") memory_write = state.add_write("arr") state.add_memlet_path(pipe_systolic_read_tail, memory_write, memlet=dace.Memlet("arr[0:N]", other_subset="P")) return sdfg @intel_fpga_test() def test_autorun(): n = 128 p = 4 sdfg = make_sdfg() sdfg.specialize({"N": 128, "P": 4}) arr = np.ones((128, ), dtype=DTYPE.type) for c in (c for c in sdfg.generate_code() if c.language == "cl"): if len(re.findall(r"__attribute__\(\(autorun\)\)", c.code)) != 2: raise RuntimeError("Autogen attributes not found.") sdfg(arr_host=arr) if any(arr != 2**4 * 10): raise ValueError("Verification failed.") return sdfg if __name__ == "__main__": test_autorun(None) ``` #### File: tests/fpga/bank_split_test.py ```python import dace from multibank_copy_fpga_test import mkc from dace.dtypes import StorageType from dace.transformation.dataflow import BankSplit from dace.transformation import optimizer import numpy as np def test_simple_split(): sdfg = dace.SDFG("hbm_bank_split_first_dim") _, b, a = mkc(sdfg, None, "b", "a", StorageType.CPU_Heap, StorageType.CPU_Heap, [4, 10, 10], [40, 10], "b") for xform in optimizer.Optimizer(sdfg).get_pattern_matches( patterns=BankSplit): xform.apply(sdfg) sdfg(a=a, b=b) assert np.allclose(b[1], a[10:20, :]) assert np.allclose(b[3], a[30:40, :]) def test_even_split_3d(): sdfg = dace.SDFG("hbm_bank_split_even_split_3d") s, b, a = mkc(sdfg, None, "b", "a", StorageType.CPU_Heap, StorageType.CPU_Heap, [8, 50, 50, 50], [100, 100, 100], "b") for xform in optimizer.Optimizer(sdfg).get_pattern_matches( patterns=BankSplit): xform.split_array_info = [2, 2, 2] xform.apply(sdfg) b = np.random.uniform(0, 100, [8, 50, 50, 50]).astype(np.int32) sdfg(a=a, b=b) assert np.allclose(a[0:50, 0:50, 0:50], b[0, :, :, :]) assert np.allclose(a[50:100, 50:100, 50:100], b[7, :, :, :]) assert np.allclose(a[0:50, 50:100, 0:50], b[2, :, :, :]) def test_second_dim_split_2d(): sdfg = dace.SDFG("hbm_bank_split_sec_dim_split2d") s, a, b = mkc(sdfg, None, "a", "b", StorageType.CPU_Heap, StorageType.CPU_Heap, [10, 100], [10, 10, 10], "b") for xform in optimizer.Optimizer(sdfg).get_pattern_matches( patterns=BankSplit): xform.split_array_info = [1, 10] xform.apply(sdfg) a = np.random.uniform(0, 10, [10, 100]).astype(np.int32) sdfg(a=a, b=b) for i in range(10): assert np.allclose(a[0:10, 10 * i:(10 * i + 10)], b[i]) def test_explicit_split_3d(): sdfg = dace.SDFG("hbm_bank_split_explicit_3d") s, a, b = mkc(sdfg, None, "a", "b", StorageType.CPU_Heap, StorageType.CPU_Heap, [120, 100, 100], [24, 40, 50, 25]) for xform in optimizer.Optimizer(sdfg).get_pattern_matches( patterns=BankSplit): xform.split_array_info = [3, 2, 4] xform.apply(sdfg) a = np.random.uniform(0, 100, [120, 100, 100]).astype(np.int32) sdfg(a=a, b=b) assert np.allclose(a[80:120, 50:100, 75:100], b[23]) assert np.allclose(a[0:40, 50:100, 75:100], b[7]) assert np.allclose(a[40:80, 0:50, 25:50], b[9]) if __name__ == "__main__": test_simple_split() test_even_split_3d() test_second_dim_split_2d() test_explicit_split_3d() ``` #### File: tests/fpga/remove_degenerate_loop_test.py ```python import dace from dace.fpga_testing import fpga_test import copy import numpy as np import re def make_sdfg(name="transpose"): n = dace.symbol("N") m = dace.symbol("M") sdfg = dace.SDFG(name) pre_state = sdfg.add_state(name + "_pre") state = sdfg.add_state(name) post_state = sdfg.add_state(name + "_post") sdfg.add_edge(pre_state, state, dace.InterstateEdge()) sdfg.add_edge(state, post_state, dace.InterstateEdge()) _, desc_input_host = sdfg.add_array("a_input", (n, m), dace.float64) _, desc_output_host = sdfg.add_array("a_output", (m, n), dace.float64) desc_input_device = copy.copy(desc_input_host) desc_input_device.storage = dace.StorageType.FPGA_Global desc_input_device.location["memorytype"] = "ddr" desc_input_device.location["bank"] = "0" desc_input_device.transient = True desc_output_device = copy.copy(desc_output_host) desc_output_device.storage = dace.StorageType.FPGA_Global desc_output_device.location["memorytype"] = "ddr" desc_output_device.location["bank"] = "1" desc_output_device.transient = True sdfg.add_datadesc("a_input_device", desc_input_device) sdfg.add_datadesc("a_output_device", desc_output_device) # Host to device pre_read = pre_state.add_read("a_input") pre_write = pre_state.add_write("a_input_device") pre_state.add_memlet_path(pre_read, pre_write, memlet=dace.Memlet.simple(pre_write, "0:N, 0:M")) # Device to host post_read = post_state.add_read("a_output_device") post_write = post_state.add_write("a_output") post_state.add_memlet_path(post_read, post_write, memlet=dace.Memlet.simple( post_write, "0:N, 0:M")) # Compute state read = state.add_read("a_input_device") write = state.add_write("a_output_device") # Trivial tasklet tasklet = state.add_tasklet(name, {"_in"}, {"_out"}, "_out = _in") entry, exit = state.add_map(name, { "i": "0:N", "j": "0:M", }, schedule=dace.ScheduleType.FPGA_Device) state.add_memlet_path(read, entry, tasklet, dst_conn="_in", memlet=dace.Memlet.simple("a_input_device", "i, j", num_accesses=1)) state.add_memlet_path(tasklet, exit, write, src_conn="_out", memlet=dace.Memlet.simple("a_output_device", "j, i", num_accesses=1)) return sdfg @fpga_test() def test_remove_degenerate_loop(): sdfg = make_sdfg("remove_degenerate_loop_test") size = 8192 sdfg.specialize({"N": size, "M": 1}) # Degenerate dimension codes = sdfg.generate_code() tasklet_name = sdfg.name + "_tasklet" for code in codes: if code.target_type == "device": break # code now points to the appropriate code object else: # Sanity check raise ValueError("Didn't find tasklet in degenerate map.") if re.search(r"for \(.+\bj\b < \bM\b", code.code) is not None: raise ValueError("Single iteration loop was not removed.") first_assignment = re.search(r"\bj\b\s*=\s*0\s*;", code.code) if first_assignment is None: raise ValueError("Assignment to constant variable not found.") a_input = np.copy(np.arange(size, dtype=np.float64).reshape((size, 1))) a_output = np.empty((1, size), dtype=np.float64) sdfg(a_input=a_input, a_output=a_output) if any(a_input.ravel() != a_output.ravel()): raise ValueError("Unexpected output.") return sdfg if __name__ == "__main__": test_remove_degenerate_loop(None) ``` #### File: tests/fpga/veclen_conversion_connector_test.py ```python import argparse import numpy as np from veclen_conversion_test import SIZE, VECTOR_LENGTH, make_sdfg from dace.fpga_testing import fpga_test @fpga_test() def test_veclen_conversion_connector(): size = 128 vector_length = 4 SIZE.set(size) VECTOR_LENGTH.set(vector_length) if size % vector_length != 0: raise ValueError( "Size {} must be divisible by vector length {}.".format( size, vector_length)) sdfg = make_sdfg(name="veclen_conversion_connector", vectorize_connector=True) sdfg.specialize({"W": vector_length}) A = np.arange(size, dtype=np.float64) B = np.zeros((size, ), dtype=np.float64) sdfg(A=A, B=B, N=SIZE) mid = vector_length // 2 for i in range(size // vector_length): expected = np.concatenate( (A[i * vector_length + mid:(i + 1) * vector_length], A[i * vector_length:i * vector_length + mid])) if any(B[i * vector_length:(i + 1) * vector_length] != expected): raise ValueError("Shuffle failed: {} (should be {})".format( B, expected)) return sdfg if __name__ == "__main__": test_veclen_conversion_connector(None) ``` #### File: tests/library/blas_dot_test.py ```python import pytest import dace from dace.memlet import Memlet from dace.codegen.exceptions import CompilerConfigurationError, CompilationError import dace.libraries.blas as blas import numpy as np import sys import warnings ############################################################################### def make_sdfg(implementation, dtype, storage=dace.StorageType.Default): n = dace.symbol("n") suffix = "_device" if storage != dace.StorageType.Default else "" transient = storage != dace.StorageType.Default sdfg = dace.SDFG("dot_product_{}_{}".format(implementation, dtype)) state = sdfg.add_state("dataflow") sdfg.add_array("x" + suffix, [n], dtype, storage=storage, transient=transient) sdfg.add_array("y" + suffix, [n], dtype, storage=storage, transient=transient) sdfg.add_array("result" + suffix, [1], dtype, storage=storage, transient=transient) x = state.add_read("x" + suffix) y = state.add_read("y" + suffix) result = state.add_write("result" + suffix) dot_node = blas.nodes.dot.Dot("dot") dot_node.implementation = implementation state.add_memlet_path(x, dot_node, dst_conn="_x", memlet=Memlet.simple(x, "0:n", num_accesses=n)) state.add_memlet_path(y, dot_node, dst_conn="_y", memlet=Memlet.simple(y, "0:n", num_accesses=n)) state.add_memlet_path(dot_node, result, src_conn="_result", memlet=Memlet.simple(result, "0", num_accesses=1)) if storage != dace.StorageType.Default: sdfg.add_array("x", [n], dtype) sdfg.add_array("y", [n], dtype) sdfg.add_array("result", [1], dtype) init_state = sdfg.add_state("copy_to_device") sdfg.add_edge(init_state, state, dace.InterstateEdge()) x_host = init_state.add_read("x") y_host = init_state.add_read("y") x_device = init_state.add_write("x" + suffix) y_device = init_state.add_write("y" + suffix) init_state.add_memlet_path(x_host, x_device, memlet=Memlet.simple(x_host, "0:n", num_accesses=n)) init_state.add_memlet_path(y_host, y_device, memlet=Memlet.simple(y_host, "0:n", num_accesses=n)) finalize_state = sdfg.add_state("copy_to_host") sdfg.add_edge(state, finalize_state, dace.InterstateEdge()) result_device = finalize_state.add_write("result" + suffix) result_host = finalize_state.add_read("result") finalize_state.add_memlet_path(result_device, result_host, memlet=Memlet.simple(result_device, "0", num_accesses=1)) return sdfg ############################################################################### @pytest.mark.parametrize("implementation, dtype", [ pytest.param("pure", dace.float32), pytest.param("pure", dace.float64), pytest.param("MKL", dace.float32, marks=pytest.mark.mkl), pytest.param("MKL", dace.float64, marks=pytest.mark.mkl), pytest.param("cuBLAS", dace.float32, marks=pytest.mark.gpu), pytest.param("cuBLAS", dace.float64, marks=pytest.mark.gpu) ]) def test_dot(implementation, dtype): storage = (dace.StorageType.GPU_Global if implementation == 'cuBLAS' else dace.StorageType.Default) sdfg = make_sdfg(implementation, dtype, storage=storage) np_dtype = getattr(np, dtype.to_string()) dot = sdfg.compile() size = 32 x = np.ndarray(size, dtype=np_dtype) y = np.ndarray(size, dtype=np_dtype) result = np.ndarray(1, dtype=np_dtype) x[:] = 2.5 y[:] = 2 result[0] = 0 dot(x=x, y=y, result=result, n=size) ref = np.dot(x, y) diff = abs(result[0] - ref) assert diff < 1e-6 * ref ############################################################################### if __name__ == "__main__": test_dot("pure", dace.float32) test_dot("pure", dace.float64) test_dot("MKL", dace.float32) test_dot("MKL", dace.float64) test_dot("cuBLAS", dace.float32) test_dot("cuBLAS", dace.float64) ``` #### File: library/mpi/mpi_scatter_test.py ```python import dace from dace.memlet import Memlet import dace.libraries.mpi as mpi import numpy as np import pytest ############################################################################### def make_sdfg(dtype): n = dace.symbol("n") p = dace.symbol("p") sdfg = dace.SDFG("mpi_scatter") state = sdfg.add_state("dataflow") sdfg.add_array("inbuf", [n * p], dtype, transient=False) sdfg.add_array("outbuf", [n], dtype, transient=False) sdfg.add_array("root", [1], dace.dtypes.int32, transient=False) inbuf = state.add_access("inbuf") outbuf = state.add_access("outbuf") root = state.add_access("root") scatter_node = mpi.nodes.scatter.Scatter("scatter") state.add_memlet_path(inbuf, scatter_node, dst_conn="_inbuffer", memlet=Memlet.simple(inbuf, "0:n*p", num_accesses=n)) state.add_memlet_path(root, scatter_node, dst_conn="_root", memlet=Memlet.simple(root, "0:1", num_accesses=1)) state.add_memlet_path(scatter_node, outbuf, src_conn="_outbuffer", memlet=Memlet.simple(outbuf, "0:n", num_accesses=1)) return sdfg ############################################################################### @pytest.mark.parametrize("implementation, dtype", [ pytest.param("MPI", dace.float32, marks=pytest.mark.mpi), pytest.param("MPI", dace.float64, marks=pytest.mark.mpi) ]) def test_mpi(implementation, dtype): from mpi4py import MPI as MPI4PY np_dtype = getattr(np, dtype.to_string()) comm = MPI4PY.COMM_WORLD rank = comm.Get_rank() commsize = comm.Get_size() mpi_sdfg = None if commsize < 2: raise ValueError( "This test is supposed to be run with at least two processes!") for r in range(0, commsize): if r == rank: sdfg = make_sdfg(dtype) mpi_sdfg = sdfg.compile() comm.Barrier() size = 8 A = np.full(size * commsize, 7, dtype=np_dtype) B = np.full(size, 42, dtype=np_dtype) root = np.array([0], dtype=np.int32) mpi_sdfg(inbuf=A, outbuf=B, root=root, n=size, p=commsize) # now B should be an array of size, containing 0 if not np.allclose(B, np.full(size, 7, dtype=np_dtype)): raise (ValueError("The received values are not what I expected.")) ############################################################################### N = dace.symbol('N', dtype=dace.int64) P = dace.symbol('P', dtype=dace.int64) @dace.program def dace_scatter_gather(A: dace.float32[N * P]): tmp = np.empty_like(A, shape=[N]) dace.comm.Scatter(A, tmp, root=0) tmp[:] = np.pi dace.comm.Gather(tmp, A, root=0) @pytest.mark.mpi def test_dace_scatter_gather(): from mpi4py import MPI as MPI4PY comm = MPI4PY.COMM_WORLD rank = comm.Get_rank() commsize = comm.Get_size() mpi_sdfg = None if commsize < 2: raise ValueError( "This test is supposed to be run with at least two processes!") for r in range(0, commsize): if r == rank: mpi_sdfg = dace_scatter_gather.compile() comm.Barrier() length = 128 if rank == 0: A = np.full([length * commsize], np.pi, dtype=np.float32) else: A = np.random.randn(length * commsize).astype(np.float32) mpi_sdfg(A=A, N=length, P=commsize) if rank == 0: assert (np.allclose( A, np.full([length * commsize], np.pi, dtype=np.float32))) else: assert (True) ############################################################################### if __name__ == "__main__": test_mpi("MPI", dace.float32) test_mpi("MPI", dace.float64) test_dace_scatter_gather() ############################################################################### ``` #### File: tests/library/two_pkgs_test.py ```python import dace from dace import Memlet from dace.codegen.exceptions import CompilerConfigurationError, CompilationError from dace.libraries.linalg import Inv import numpy as np import warnings n = dace.symbol("n", dace.int64) def generate_matrix(size, dtype): if dtype == np.float32: tol = 1e-7 elif dtype == np.float64: tol = 1e-14 else: raise NotImplementedError while True: A = np.random.randn(size, size).astype(dtype) B = A @ A.T err = np.absolute(B @ np.linalg.inv(B) - np.eye(size)) if np.all(err < tol): break return A def make_sdfg(implementation, dtype, id=0, in_shape=[n, n], out_shape=[n, n], in_subset="0:n, 0:n", out_subset="0:n, 0:n", overwrite=False, getri=True): sdfg = dace.SDFG("linalg_inv_{}_{}_{}".format(implementation, dtype.__name__, id)) sdfg.add_symbol("n", dace.int64) state = sdfg.add_state("dataflow") sdfg.add_array("xin", in_shape, dtype) if not overwrite: sdfg.add_array("xout", out_shape, dtype) xin = state.add_read("xin") if overwrite: xout = state.add_write("xin") else: xout = state.add_write("xout") inv_node = Inv("inv", overwrite_a=overwrite, use_getri=getri) inv_node.implementation = implementation state.add_memlet_path(xin, inv_node, dst_conn="_ain", memlet=Memlet.simple(xin, in_subset, num_accesses=n * n)) state.add_memlet_path(inv_node, xout, src_conn="_aout", memlet=Memlet.simple(xout, out_subset, num_accesses=n * n)) return sdfg def _test_inv(implementation, dtype, id=0, size=4, in_shape=[4, 4], out_shape=[4, 4], in_offset=[0, 0], out_offset=[0, 0], in_dims=[0, 1], out_dims=[0, 1], overwrite=False, getri=True): assert np.all(np.array(in_shape)[in_dims] >= size) assert np.all(np.array(out_shape)[out_dims] >= size) assert np.all(np.array(in_offset) < size) assert np.all(np.array(out_offset) < size) assert np.all( np.array(in_offset)[in_dims] + size <= np.array(in_shape)[in_dims]) assert np.all( np.array(out_offset)[out_dims] + size <= np.array(out_shape)[out_dims]) in_subset = tuple([ slice(o, o + size) if i in in_dims else o for i, o in enumerate(in_offset) ]) if overwrite: out_subset = in_subset else: out_subset = tuple([ slice(o, o + size) if i in out_dims else o for i, o in enumerate(out_offset) ]) in_subset_str = ','.join([ "{b}:{e}".format(b=o, e=o + size) if i in in_dims else str(o) for i, o in enumerate(in_offset) ]) if overwrite: out_subset_str = in_subset_str else: out_subset_str = ','.join([ "{b}:{e}".format(b=o, e=o + size) if i in out_dims else str(o) for i, o in enumerate(out_offset) ]) sdfg = make_sdfg(implementation, dtype, id, in_shape, out_shape, in_subset_str, out_subset_str, overwrite, getri) inv_sdfg = sdfg.compile() A0 = np.zeros(in_shape, dtype=dtype) A0[in_subset] = generate_matrix(size, dtype) A1 = np.copy(A0) if overwrite: A2 = A1 else: A2 = np.zeros(out_shape, dtype=dtype) A3 = np.linalg.inv(A0[in_subset]) inv_sdfg(xin=A1, xout=A2, n=size) if dtype == np.float32: rtol = 1e-7 atol = 1e-7 elif dtype == np.float64: rtol = 1e-14 atol = 1e-14 else: raise NotImplementedError assert np.allclose(A2[out_subset], A3, rtol=rtol, atol=atol) if overwrite: assert not np.array_equal(A0, A1) if __name__ == "__main__": _test_inv('OpenBLAS', np.float32, id=0) ``` #### File: tests/python_frontend/arithmetic_conversions_test.py ```python import dace import numpy as np @dace.program def add(A: dace.complex64[5, 5], B: dace.float64[5, 5]): return A + B def test_add(): A = np.random.randint(0, high=10, size=(5, 5), dtype=np.uint64).astype(np.complex64) B = np.random.randint(-10, high=0, size=(5, 5), dtype=np.int32).astype(np.float64) C = add(A, B) assert(np.linalg.norm(C - A - B) / np.linalg.norm(A + B) < 1e-12) @dace.program def complex_conversion(a: dace.complex128[1], b: dace.int32): return a[0] + b def test_complex_conversion(): a = np.zeros((1,), dtype=np.complex128) a[0] = 5 + 6j b = 7 c = complex_conversion(a=a, b=b) assert(c[0] == 12 + 6j) @dace.program def float_conversion(a: dace.float32, b: dace.int64): return a + b def test_float_conversion(): a = np.float32(5.2) b = np.int64(7) c = float_conversion(a=a, b=b) assert(c[0] == a + b) if __name__ == "__main__": test_add() test_complex_conversion() test_float_conversion() ``` #### File: tests/python_frontend/power_operator_test.py ```python import dace import numpy as np @dace.program def pow_num_literals(a: dace.int64[1]): a[0] = 2 ** 3 def test_pow_num_literals(): res = np.zeros((1,), dtype=np.int64) pow_num_literals(a=res) assert(res[0] == 8) @dace.program def pow_op_preced(a: dace.int64[1]): a[0] = -1 ** 2 def test_pow_op_preced(): res = np.zeros((1,), dtype=np.int64) pow_op_preced(a=res) assert(res[0] == -1) @dace.program def pow_neg_exp(a: dace.float64[1]): a[0] = 10 ** -2 def test_pow_neg_exp(): res = np.zeros((1,), dtype=np.float64) pow_neg_exp(a=res) assert(res[0] == 0.01) if __name__ == "__main__": test_pow_num_literals() test_pow_op_preced() test_pow_neg_exp() ``` #### File: dace/tests/scalar_output_cudatest.py ```python import numpy as np import pytest import dace import dace.libraries.blas as blas @pytest.mark.gpu def test_dot_gpu(): @dace.program def dot(x: dace.float64[20], y: dace.float64[20]): return x @ y x = np.random.rand(20) y = np.random.rand(20) reference = x @ y sdfg = dot.to_sdfg() sdfg.apply_gpu_transformations() # Expand pure version oldimpl = blas.default_implementation blas.default_implementation = 'pure' daceres = sdfg(x=x, y=y) # Revert default implementation blas.default_implementation = oldimpl assert np.allclose(daceres, reference) @pytest.mark.gpu def test_scalar_output(): @dace.program def scaltest(A: dace.float64[20, 20]): scal = dace.define_local_scalar(dace.float64) for _ in dace.map[0:1]: with dace.tasklet: inp << A[1, 1] out >> scal out = inp + 5 return scal sdfg = scaltest.to_sdfg() sdfg.apply_gpu_transformations() A = np.random.rand(20, 20) ret = sdfg(A=A) assert np.allclose(ret, A[1, 1] + 5) @pytest.mark.gpu def test_scalar_output_ptr_access(): sdfg = dace.SDFG("scalptrtest") state = sdfg.add_state() sdfg.add_scalar("scal", dace.float64, transient=True, storage=dace.dtypes.StorageType.GPU_Global) sdfg.add_array("__return", [1], dace.float64) tasklet = state.add_tasklet( "write", {}, {"outp": dace.pointer(dace.float64)}, """ double a = 5; cudaMemcpyAsync(outp, &a, 1 * sizeof(double), cudaMemcpyHostToDevice, __state->gpu_context->streams[0]); """, language=dace.dtypes.Language.CPP, ) access_scal = state.add_access("scal") write_unsqueezed = state.add_write("__return") state.add_edge(tasklet, "outp", access_scal, None, sdfg.make_array_memlet("scal")) state.add_edge(access_scal, None, write_unsqueezed, None, sdfg.make_array_memlet("scal")) ret = sdfg() assert np.allclose(ret, 5) if __name__ == '__main__': test_dot_gpu() test_scalar_output() test_scalar_output_ptr_access() ``` #### File: dace/tests/stream_test.py ```python import dace def test(): s = dace.define_stream() S = dace.define_streamarray([2, 2]) for i in range(6): s[0].append(i) for j in range(2): S[0, j].append(i + j) S[1, j].append(i + j * 10) results = [] while len(s[0]): results.append(s[0].popleft()) while len(S[1, 1]): results.append(S[1, 1].popleft()) assert results == [0, 1, 2, 3, 4, 5, 10, 11, 12, 13, 14, 15] def test_consume_python(): inputs = [1,2,3,5,1] S = dace.stream(inputs) result = [] for s in dace.consume(S): result.append(s) assert inputs == list(reversed(result)) if __name__ == "__main__": test() test_consume_python() ``` #### File: tests/transformations/apply_to_test.py ```python import dace from dace.sdfg import utils as sdutil from dace.transformation.dataflow import MapFusion from dace.transformation.subgraph import SubgraphFusion from dace.transformation.pattern_matching import enumerate_matches @dace.function def dbladd(A: dace.float64[100, 100], B: dace.float64[100, 100]): dbl = B return A + dbl * B def test_applyto_pattern(): sdfg = dbladd.to_sdfg() sdfg.apply_strict_transformations() # Since there is only one state (thanks to StateFusion), we can use the # first one in the SDFG state = sdfg.node(0) # The multiplication map is called "_Mult__map" (see above graph), we can # query it mult_exit = next( n for n in state.nodes() if isinstance(n, dace.nodes.MapExit) and n.label == '_Mult__map') # Same goes for the addition entry add_entry = next( n for n in state.nodes() if isinstance(n, dace.nodes.MapEntry) and n.label == '_Add__map') # Since all redundant arrays have been removed by strict transformations, # we can get the only transient array that remains in the graph transient = next(aname for aname, desc in sdfg.arrays.items() if desc.transient) access_node = next( n for n in state.nodes() if isinstance(n, dace.nodes.AccessNode) and n.data == transient) MapFusion.apply_to(sdfg, first_map_exit=mult_exit, array=access_node, second_map_entry=add_entry) def test_applyto_enumerate(): sdfg = dbladd.to_sdfg() sdfg.apply_strict_transformations() # Construct subgraph pattern pattern = sdutil.node_path_graph(dace.nodes.MapExit, dace.nodes.AccessNode, dace.nodes.MapEntry) for subgraph in enumerate_matches(sdfg, pattern): MapFusion.apply_to(sdfg, first_map_exit=subgraph.source_nodes()[0], array=next(n for n in subgraph.nodes() if isinstance(n, dace.nodes.AccessNode)), second_map_entry=subgraph.sink_nodes()[0]) def test_applyto_subgraph(): sdfg = dbladd.to_sdfg() sdfg.apply_strict_transformations() state = sdfg.node(0) # Apply to subgraph SubgraphFusion.apply_to(sdfg, state.nodes()) if __name__ == '__main__': test_applyto_pattern() test_applyto_enumerate() test_applyto_subgraph() ```
{ "source": "jnice-81/FpgaHbmForDaCe", "score": 2 }
#### File: FpgaHbmForDaCe/Evaluation/hbm_axpy_dot_based.py ```python from typing import List import dace from dace import subsets from dace import memlet from dace import dtypes from dace.sdfg.sdfg import InterstateEdge, SDFG from dace.sdfg.state import SDFGState from dace.transformation.interstate.sdfg_nesting import NestSDFG from dace.transformation.optimizer import Optimizer from dace.transformation.interstate import InlineSDFG, FPGATransformSDFG from dace.transformation.dataflow import StripMining from dace.sdfg import graph, nodes, propagation, utils from dace.libraries.blas.nodes import dot from hbm_transform import HbmTransform from hbm_bank_split import HbmBankSplit from hbm_transform import set_shape from hbm_transform import transform_sdfg_for_hbm from hbm_transform import all_innermost_edges from helper import * ######## Simple base versions of the pure blas applications without HBM use def simple_vadd_sdfg(N, vec_len=16, tile_size=4096): alpha = dace.symbol("alpha", dtype=dace.float32) @dace.program def axpy(x: dace.vector(dace.float32, vec_len)[N/vec_len], y: dace.vector(dace.float32, vec_len)[N/vec_len], z: dace.vector(dace.float32, vec_len)[N/vec_len]): for i in dace.map[0:N/vec_len]: with dace.tasklet: xin << x[i] yin << y[i] zout >> z[i] zout = xin + yin * alpha sdfg = axpy.to_sdfg() sdfg.apply_strict_transformations() sdfg.apply_transformations(StripMining, {"tile_size": tile_size, "divides_evenly": True}) map = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.MapEntry) and x.map.params[0] == "i") map.map.schedule = dtypes.ScheduleType.FPGA_Device return sdfg def simple_dot_sdfg(N, tile_size=8192): sdfg: SDFG = SDFG("dot") state = sdfg.add_state() sdfg.add_array("x", [N/8], dace.vector(dace.float32, 8), dtypes.StorageType.FPGA_Global) sdfg.add_array("y", [N/8], dace.vector(dace.float32, 8), dtypes.StorageType.FPGA_Global) sdfg.add_array("result", [1], dace.float32, dtypes.StorageType.FPGA_Global) lib_node = dot.Dot("dot") state.add_node(lib_node) read_x = state.add_read("x") read_y = state.add_read("y") write_result = state.add_write("result") state.add_edge(read_x, None, lib_node, "_x", memlet.Memlet("x")) state.add_edge(read_y, None, lib_node, "_y", memlet.Memlet("y")) state.add_edge(lib_node, "_result", write_result, None, memlet.Memlet(f"result")) lib_node.implementation = "FPGA_PartialSums" lib_node.expand(sdfg, state, partial_width=64, n=N) sdfg.arrays["x"].storage = dtypes.StorageType.Default sdfg.arrays["y"].storage = dtypes.StorageType.Default sdfg.arrays["result"].storage = dtypes.StorageType.Default strip_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream") for nsdfg in sdfg.all_sdfgs_recursive(): if nsdfg.states()[0].label == "stream": StripMining.apply_to(nsdfg, {"tile_size": tile_size, "divides_evenly": True}, _map_entry=strip_map) state = nsdfg.start_state tile_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream" and x.map.params[0] == "i") tile_map.map.schedule = dtypes.ScheduleType.FPGA_Device break return sdfg ######### On Device HBM-implementations of pure blas def hbm_axpy_sdfg(banks_per_input: int): N = dace.symbol("N") sdfg = simple_vadd_sdfg(N) map = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.MapEntry) and x.map.params[0] == "tile_i") banks = {"x": ("HBM", f"0:{banks_per_input}", [banks_per_input]), "y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]), "z": ("HBM", f"{2*banks_per_input}:{3*banks_per_input}", [banks_per_input])} transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), banks, {(map, 0): banks_per_input}) return sdfg def hbm_dot_sdfg(banks_per_input: int): N = dace.symbol("N") sdfg = simple_dot_sdfg(N) state = sdfg.states()[0] for edge, state in sdfg.all_edges_recursive(): if isinstance(edge, graph.MultiConnectorEdge): if isinstance(edge.dst, nodes.AccessNode) and edge.dst.data == "_result": edge.data.other_subset = subsets.Range.from_string("k") set_shape(state.parent.arrays["_result"], [banks_per_input]) if isinstance(edge.dst, nodes.AccessNode) and edge.dst.data == "result": #one cannot update the other_subset. Leads to problems with out of bounds checking #edge.data.other_subset = subsets.Range.from_string("k") set_shape(state.parent.arrays["result"], [banks_per_input]) array_banks = {"x": ("HBM", f"0:{banks_per_input}", [banks_per_input]), "y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]), "result": ("DDR", "0", None)} div_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream" and x.map.params[0] == "tile_i") transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), array_banks, {(div_map.map, 0): banks_per_input}, True) return sdfg ######### Full implementations of pure blas applications def only_hbm_axpy_sdfg(banks_per_input: int): sdfg = hbm_axpy_sdfg(banks_per_input) sdfg.apply_fpga_transformations() sdfg.apply_transformations_repeated(InlineSDFG) z_access1 = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "z") sdfg.start_state.remove_nodes_from([sdfg.start_state.out_edges(z_access1)[0].dst, z_access1]) distribute_along_dim0(sdfg, ["x", "y", "z"]) return sdfg def _modify_dot_host_side(sdfg, start_state, end_state): # Add final reduction state = end_state host_result = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result") sum_up = state.add_reduce("lambda a, b : a + b", None, 0) sdfg.add_array("final_result", [1], dace.float32) host_final = state.add_access("final_result") state.add_edge(host_result, None, sum_up, None, memlet.Memlet("result")) state.add_edge(sum_up, None, host_final, None, memlet.Memlet("final_result[0]")) sum_up.expand(sdfg, state) sdfg.apply_transformations(InlineSDFG) # Remove copy result state = start_state access_result_start = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result") state.remove_nodes_from([state.out_edges(access_result_start)[0].dst, access_result_start]) sdfg.arrays["result"].transient = True def only_hbm_dot_sdfg(banks_per_input: int): sdfg = hbm_dot_sdfg(banks_per_input) sdfg.apply_fpga_transformations() sdfg.apply_transformations_repeated(InlineSDFG) distribute_along_dim0(sdfg, ["x", "y"]) _modify_dot_host_side(sdfg, sdfg.start_state, sdfg.states()[2]) return sdfg def hbm_axpy_dot(banks_per_input: int): N = dace.symbol("N") axpy_sdfg = simple_vadd_sdfg(N, vec_len=8, tile_size=8192) dot_sdfg = simple_dot_sdfg(N, tile_size=8192) sdfg = SDFG("axpydot") sdfg.add_symbol("alpha", dace.float32) state = sdfg.add_state() sdfg.add_array("axpy_x", [N//8], dace.vector(dace.float32, 8)) sdfg.add_array("axpy_y", [N//8], dace.vector(dace.float32, 8)) sdfg.add_array("dot_y", [N//8], dace.vector(dace.float32, 8)) sdfg.add_array("middle", [N//8], dace.vector(dace.float32, 8), transient=True) sdfg.add_array("result", [banks_per_input], dace.float32) acc_axpy_x = state.add_access("axpy_x") acc_axpy_y = state.add_access("axpy_y") acc_dot_y = state.add_access("dot_y") acc_middle = state.add_access("middle") acc_result = state.add_access("result") axpynode = state.add_nested_sdfg(axpy_sdfg, sdfg, set(["x", "y", "z"]), set(["z"]), {"N": N, "alpha": "alpha"}) dotnode = state.add_nested_sdfg(dot_sdfg, sdfg, set(["x", "y", "result"]), set(["result"]), {"N": N}) acc_middle_dummy = state.add_access("middle") acc_middle_dummy_2 = state.add_access("middle") acc_result_dummy = state.add_access("result") state.add_edge(acc_axpy_x, None, axpynode, "x", memlet.Memlet("axpy_x")) state.add_edge(acc_axpy_y, None, axpynode, "y", memlet.Memlet("axpy_y")) state.add_edge(acc_middle_dummy, None, axpynode, "z", memlet.Memlet("middle")) state.add_edge(axpynode, "z", acc_middle, None, memlet.Memlet("middle")) state.add_edge(acc_middle_dummy_2, None, dotnode, "x", memlet.Memlet("middle")) state.add_edge(acc_dot_y, None, dotnode, "y", memlet.Memlet("dot_y")) state.add_edge(acc_result_dummy, None, dotnode, "result", memlet.Memlet("result")) state.add_edge(dotnode, "result", acc_result, None, memlet.Memlet("result")) sdfg.apply_transformations_repeated(InlineSDFG) def _nodes_from_path(path): nodes = [path[0].src] for edge in path: nodes.append(edge.dst) return nodes sdfg.add_stream("connect", dace.vector(dace.float32, 8), 128, [banks_per_input], storage=dtypes.StorageType.FPGA_Local, transient=True) old_acc_node = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "middle" and state.in_degree(x) == 1) update_access(state, old_acc_node, "connect", memlet.Memlet("connect[k]")) old_acc_node = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "middle" and state.out_degree(x) == 1) update_access(state, old_acc_node, "connect", memlet.Memlet("connect[k]")) acc_result = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result") path = state.memlet_path(state.in_edges(acc_result)[0]) path[0].data.subset = subsets.Range.from_string("k") modification_map_axpy = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and "axpy" in x.label and x.params[0] == "tile_i") modification_map_dot = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream" and x.params[0] == "tile_i") array_updates = {"axpy_x": ("HBM", f"0:{banks_per_input}", [banks_per_input]), "axpy_y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]), "dot_y": ("HBM", f"{2*banks_per_input}:{3*banks_per_input}", [banks_per_input]), "result": ("DDR", "0", None)} transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), array_updates, {(modification_map_axpy, 0): banks_per_input, (modification_map_dot, 0): banks_per_input}) # Fpga transform cannot be applied here, because stream is not in a map, and because there # are FPGA storagetypes and schedules around. However since the actual application of # FPGATransform works non-destructive we just force application here fpga_xform = FPGATransformSDFG(sdfg.sdfg_id, -1, {}, -1) fpga_xform.apply(sdfg) sdfg.apply_transformations_repeated(InlineSDFG) _modify_dot_host_side(sdfg, sdfg.start_state, sdfg.states()[2]) return sdfg ```
{ "source": "jnicho02/interview-alexa-skill", "score": 3 }
#### File: interview-alexa-skill/tests/test_handler.py ```python import unittest import handler from tests.mock_alexa import MockAlexa class TestHandler(unittest.TestCase): def test_launch(self): alexa = MockAlexa("interview", handler) alexa_says = alexa.ask_text("open interview") assert "Hello" in alexa_says def test_timeout(self): alexa = MockAlexa("interview", handler) alexa.ask("open interview") alexa_says = alexa.timeout() assert "Thank you for speaking to me" in alexa_says def test_help(self): alexa = MockAlexa("interview", handler) alexa.ask("open interview") alexa_says = alexa.ask_text("help") assert "Hello" in alexa_says def test_exit(self): alexa = MockAlexa("interview", handler) alexa.ask("open interview") alexa_says = alexa.ask_text("exit") assert "Thank you for speaking to me" in alexa_says def test_introduction_intent(self): intent = { "name": "IntroduceYourself" } alexa_says = handler.introduce_yourself(intent) assert "my name is Alexa" in alexa_says def test_introduction_intent(self): alexa = MockAlexa("interview", handler) alexa.ask("open interview") alexa_says = alexa.ask_text("say hello") assert "my name is Alexa" in alexa_says def test_say_how_is_this_talk_going(self): intent = { "name": "HowsItGoing", "slots": { "subject": { "name": "subject", "value": "this talk" } } } response = handler.hows_it_going(intent) assert "I think you are mad" in response \ or "Did you think this through" in response \ or "I wish you good luck" in response \ or "I am here to help" in response def test_how_is_this_talk_going(self): alexa = MockAlexa("interview", handler) alexa.ask("open interview") alexa_says = alexa.ask_text("how is this talk going") assert "I think you are mad" in alexa_says \ or "Did you think this through" in alexa_says \ or "I wish you good luck" in alexa_says \ or "I am here to help" in alexa_says def test_how_is_this_conference_going(self): alexa = MockAlexa("interview", handler) alexa.ask("open interview") alexa_says = alexa.ask_text("how is the conference going") assert "wonderfully" in alexa_says ```
{ "source": "jnicho02/osm-to-geojson-api", "score": 2 }
#### File: jnicho02/osm-to-geojson-api/wikidata.py ```python import json from qwikidata.sparql import return_sparql_query_results class Wikidata(): def __init__(self, wikidata_id: str): self.wikidata_id = wikidata_id query_string = """ SELECT ?operatorLabel ?image WHERE { BIND( <http://www.wikidata.org/entity/%s> as ?recycling_centre ) OPTIONAL { ?recycling_centre wdt:P18 ?image. } OPTIONAL { ?recycling_centre wdt:P137 ?operator. } SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } }""" % (wikidata_id) res = return_sparql_query_results(query_string) props = res["results"]["bindings"][0] self.properties = {} for key in props.keys(): self.properties[key.replace('Label', '')] = props[key]['value'] # recycling_centres = """ # SELECT ?item ?itemLabel # WHERE # { # ?item wdt:P31 wd:Q27106436. # SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } # }""" ```
{ "source": "jnicho02/pywind", "score": 2 }
#### File: pywind/pywind/log.py ```python import logging def setup_logging(debug=False, stdout=True, request_logging=False, filename=None): """ Setup the logging for pywind. :param debug: Enable debug level messages (default False) :param stdout: Enable logging to stdout (default True) :param request_logging: Enable full logging of network requests (default False) :param filename: Filename to use for log. """ logger = logging.getLogger('pywind') logger.setLevel(logging.INFO if debug is False else logging.DEBUG) if stdout: stdh = logging.StreamHandler() logger.addHandler(stdh) if filename is not None: fileh = logging.FileHandler(filename) filefmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') fileh.setFormatter(filefmt) logger.addHandler(fileh) if request_logging: try: import http.client as http_client except ImportError: import httplib as http_client http_client.HTTPConnection.debuglevel = 1 requests_log = logging.getLogger('requests.packages.urllib3') requests_log.setLevel(logging.DEBUG) requests_log.propagate = True ``` #### File: pywind/ofgem/cmd.py ```python import sys from pywind.ofgem.search import CertificateSearch, StationSearch def ofgem_certificate_search(args): """ Ofgem Certificate Search """ if args.period is None and args.scheme is None: print("You must supply at least a period or scheme.") sys.exit(0) ocs = CertificateSearch() if ocs.start() is False: print("Unable to get the form from Ofgem website.") sys.exit(0) if args.period is not None: if ocs.set_period(args.period) is False: print("There was an error setting the period.") sys.exit(0) if args.scheme is not None: if ocs.filter_scheme(args.scheme) is False: print("There was an error setting the scheme.") sys.exit(0) if ocs.get_data() is False: print("Unable to get the data from Ofgem.") sys.exit(0) print("A total of {} certificate records have been extracted".format(len(ocs))) return ocs def ofgem_station_search(args): """ Ofgem Station Search """ oss = StationSearch() if oss.start() is False: print("Unable to get the form from the Ofgem website") sys.exit(0) if args.station is not None: oss.filter_name(args.station) if oss.get_data() is False: print("Unable to get any records.") sys.exit(0) print("Total of {} station records returned.".format(len(oss))) # todo - add output return oss ``` #### File: pywind/sample_scripts/derived_unit_data.py ```python import argparse from datetime import datetime, timedelta, date from pywind.bmreports.unit import UnitData def mkdate(datestr): return datetime.strptime(datestr, '%Y-%m-%d').date() def main(): parser = argparse.ArgumentParser(description='Get Constraint Payment information for yesterday') parser.add_argument('--date', action='store', type=mkdate, help='Date to get results for') parser.add_argument('--period', action='store', help='Period to get data for') args = parser.parse_args() data = {} ud = UnitData({'date': args.date or date.today() - timedelta(days=2)}) pr = [args.period] or range(1,49) for period in pr: ud.period = period if ud.get_data(): data[period] = ud.data else: print ("Unable to get data for %s, period %d" % (ud.date.strftime("%d %b %Y"), period)) for period, units in sorted(data.iteritems()): print ("Period: ", period) for unit in sorted(units, key=lambda x: x['ngc']): print (" ", unit['ngc'], unit['lead']) if unit['bid'].has_key('volume'): print (" BID: ", unit['bid']['volume']+'MWh ', unit['bid']['cashflow']) if unit['offer'].has_key('volume'): print (" OFFER: ", unit['offer']['volume']+'MWh ', unit['offer']['cashflow']) if __name__ == '__main__': main() ``` #### File: pywind/tests/form_data_test.py ```python import os from pprint import pprint from unittest import TestCase from pywind.ofgem.form import _make_url from pywind.ofgem.form_data import FormData class UrlTest(TestCase): """ Tests for the basic url function we use. """ def test_01(self): """ URL Tests """ for case in [ ('Default.aspx', False, 'https://www.renewablesandchp.ofgem.gov.uk/Default.aspx'), ('/ReportViewer.aspx', True, 'https://www.renewablesandchp.ofgem.gov.uk/ReportViewer.aspx'), ('./ReportViewer.aspx', True, 'https://www.renewablesandchp.ofgem.gov.uk/Public/ReportViewer.aspx') ]: self.assertEqual(_make_url(case[0], case[1]), case[2]) class FormDataTest(TestCase): """ Tests for the FormData class. """ HERE = os.path.dirname(__file__) def test_01(self): """ Parse and test files/ofgem_station_search.html (this will take a while...) """ fnn = os.path.join(self.HERE, 'files', 'ofgem_station_search.html') with open(fnn, 'r') as cfh: content = cfh.read() self.assertIsNotNone(content) ofd = FormData(content) self.assertIsInstance(ofd, FormData) self.assertEqual(len(ofd.elements), 117) # Check for some elements... for name in ['__VIEWSTATE', 'ReportViewer$ctl03$ctl00', 'ReportViewer$ctl11', 'ReportViewer$AsyncWait$HiddenCancelField', 'ReportViewer$ctl04$ctl03$ddValue', 'ReportViewer$ctl04$ctl05$txtValue', 'ReportViewer$ctl04$ctl25$cbNull']: self.assertTrue(name in ofd.elements) self.assertTrue('__ASYNCPOST' in ofd.elements) self.assertEqual(ofd.elements['__ASYNCPOST'], {'value': 'true'}) def test_02(self): """ Parse and test files/ofgem_certificate_search.html """ fnn = os.path.join(self.HERE, 'files', 'ofgem_certificate_search.html') with open(fnn, 'r') as cfh: content = cfh.read() self.assertIsNotNone(content) ofd = FormData(content) self.assertIsInstance(ofd, FormData) self.assertTrue('__ASYNCPOST' in ofd.elements) self.assertEqual(ofd.elements['__ASYNCPOST'], {'value': 'true'}) # def test_03(self): # """ Parse and test files/ofgem_station_search.html (this will take a while...) """ # fnn = os.path.join(self.HERE, 'files', 'ofgem_certificate_search.html') ```
{ "source": "jnichols0/strictyaml", "score": 2 }
#### File: strictyaml/strictyaml/dumper.py ```python from __future__ import absolute_import from strictyaml.ruamel.representer import RoundTripRepresenter from strictyaml.ruamel.scalarstring import ScalarString from strictyaml.ruamel.emitter import Emitter from strictyaml.ruamel.serializer import Serializer from strictyaml.ruamel.resolver import BaseResolver import sys if sys.version_info[0] == 3: RoundTripRepresenter.add_representer( ScalarString, RoundTripRepresenter.represent_str ) else: RoundTripRepresenter.add_representer( ScalarString, RoundTripRepresenter.represent_unicode ) class StrictYAMLResolver(BaseResolver): def __init__(self, version=None, loader=None): BaseResolver.__init__(self, loader) class StrictYAMLDumper(Emitter, Serializer, RoundTripRepresenter, StrictYAMLResolver): def __init__( self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, block_seq_indent=None, top_level_colon_align=None, prefix_colon=None, ): # type: (Any, StreamType, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA Emitter.__init__( self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, block_seq_indent=block_seq_indent, top_level_colon_align=top_level_colon_align, prefix_colon=prefix_colon, dumper=self, ) Serializer.__init__( self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags, dumper=self, ) RoundTripRepresenter.__init__( self, default_style=default_style, default_flow_style=default_flow_style, dumper=self, ) StrictYAMLResolver.__init__(self, loader=self) ``` #### File: strictyaml/ruamel/util.py ```python from __future__ import absolute_import, print_function from functools import partial import re from .compat import text_type, binary_type if False: # MYPY from typing import Any, Dict, Optional, List, Text # NOQA from .compat import StreamTextType # NOQA class LazyEval(object): """ Lightweight wrapper around lazily evaluated func(*args, **kwargs). func is only evaluated when any attribute of its return value is accessed. Every attribute access is passed through to the wrapped value. (This only excludes special cases like method-wrappers, e.g., __hash__.) The sole additional attribute is the lazy_self function which holds the return value (or, prior to evaluation, func and arguments), in its closure. """ def __init__(self, func, *args, **kwargs): # type: (Any, Any, Any) -> None def lazy_self(): # type: () -> Any return_value = func(*args, **kwargs) object.__setattr__(self, "lazy_self", lambda: return_value) return return_value object.__setattr__(self, "lazy_self", lazy_self) def __getattribute__(self, name): # type: (Any) -> Any lazy_self = object.__getattribute__(self, "lazy_self") if name == "lazy_self": return lazy_self return getattr(lazy_self(), name) def __setattr__(self, name, value): # type: (Any, Any) -> None setattr(self.lazy_self(), name, value) RegExp = partial(LazyEval, re.compile) # originally as comment # https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605 # if you use this in your code, I suggest adding a test in your test suite # that check this routines output against a known piece of your YAML # before upgrades to this code break your round-tripped YAML def load_yaml_guess_indent(stream, **kw): # type: (StreamTextType, Any) -> Any """guess the indent and block sequence indent of yaml stream/string returns round_trip_loaded stream, indent level, block sequence indent - block sequence indent is the number of spaces before a dash relative to previous indent - if there are no block sequences, indent is taken from nested mappings, block sequence indent is unset (None) in that case """ from .main import round_trip_load # load a YAML document, guess the indentation, if you use TABs you're on your own def leading_spaces(line): # type: (Any) -> int idx = 0 while idx < len(line) and line[idx] == " ": idx += 1 return idx if isinstance(stream, text_type): yaml_str = stream # type: Any elif isinstance(stream, binary_type): # most likely, but the Reader checks BOM for this yaml_str = stream.decode("utf-8") else: yaml_str = stream.read() map_indent = None indent = None # default if not found for some reason block_seq_indent = None prev_line_key_only = None key_indent = 0 for line in yaml_str.splitlines(): rline = line.rstrip() lline = rline.lstrip() if lline.startswith("- "): l_s = leading_spaces(line) block_seq_indent = l_s - key_indent idx = l_s + 1 while line[idx] == " ": # this will end as we rstripped idx += 1 if line[idx] == "#": # comment after - continue indent = idx - key_indent break if map_indent is None and prev_line_key_only is not None and rline: idx = 0 while line[idx] in " -": idx += 1 if idx > prev_line_key_only: map_indent = idx - prev_line_key_only if rline.endswith(":"): key_indent = leading_spaces(line) idx = 0 while line[idx] == " ": # this will end on ':' idx += 1 prev_line_key_only = idx continue prev_line_key_only = None if indent is None and map_indent is not None: indent = map_indent return round_trip_load(yaml_str, **kw), indent, block_seq_indent def configobj_walker(cfg): # type: (Any) -> Any """ walks over a ConfigObj (INI file with comments) generating corresponding YAML output (including comments """ from configobj import ConfigObj # type: ignore assert isinstance(cfg, ConfigObj) for c in cfg.initial_comment: if c.strip(): yield c for s in _walk_section(cfg): if s.strip(): yield s for c in cfg.final_comment: if c.strip(): yield c def _walk_section(s, level=0): # type: (Any, int) -> Any from configobj import Section assert isinstance(s, Section) indent = u" " * level for name in s.scalars: for c in s.comments[name]: yield indent + c.strip() x = s[name] if u"\n" in x: i = indent + u" " x = u"|\n" + i + x.strip().replace(u"\n", u"\n" + i) elif ":" in x: x = u"'" + x.replace(u"'", u"''") + u"'" line = u"{0}{1}: {2}".format(indent, name, x) c = s.inline_comments[name] if c: line += u" " + c yield line for name in s.sections: for c in s.comments[name]: yield indent + c.strip() line = u"{0}{1}:".format(indent, name) c = s.inline_comments[name] if c: line += u" " + c yield line for val in _walk_section(s[name], level=level + 1): yield val # def config_obj_2_rt_yaml(cfg): # from .comments import CommentedMap, CommentedSeq # from configobj import ConfigObj # assert isinstance(cfg, ConfigObj) # #for c in cfg.initial_comment: # # if c.strip(): # # pass # cm = CommentedMap() # for name in s.sections: # cm[name] = d = CommentedMap() # # # #for c in cfg.final_comment: # # if c.strip(): # # yield c # return cm ```
{ "source": "jnicholson56/irrtree", "score": 2 }
#### File: irrtree/irrtree/cli.py ```python from collections import OrderedDict as OD from queue import Queue import asciitree import getopt import irrtree import progressbar import re import socket import sys class server(): irr_host = 'rr.ntt.net' irr_port = 43 afi = 4 search = False sources_list = False def connect(irr_host, irr_port): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((irr_host, irr_port)) sock_in = sock.makefile('r') sock_out = sock.makefile('w') return sock, sock_in, sock_out def send(connection, command): sock, sock_in, sock_out = connection if debug: print("sending: %s" % command) sock_out.write(command + '\r\n') sock_out.flush() def receive(connection): sock, sock_in, sock_out = connection return sock_in.readline()[:-1] def query(connection, cmd, as_set, recurse=False, search=False): query = "!%s%s%s" % (cmd, as_set, ",1" if recurse else "") send(connection, query) answer = receive(connection) if answer == "D": return set() elif answer[0] == "F": if debug: print("Error: %s" % answer[1:]) print("Query was: %s" % query) elif answer[0] == "A": if debug: print("Info: receiving %s bytes" % answer[1:]) unfiltered = receive(connection).split() results = set() if cmd == "i": for result in unfiltered: if re.match(r'^[aA][sS]\d+', result): results.add(result.upper()) # found an autnum elif re.match(r'^[aA][sS]-.*', result): results.add(result.upper()) # found as-set else: if debug: print("Warning: not honoring mbrs-by-ref for object %s with '%s'" % (as_set, result)) else: results = unfiltered if not receive(connection) == "C": print("Error: something went wrong with: %s" % query) return set(results) def usage(): print("IRRtool v%s" % irrtree.__version__) print("usage: irrtree [-h host] [-p port] [-l sources] [-d] [-4 | -6] [-s ASXX] <AS-SET>") print(" -d,--debug print debug information") print(" -4,--ipv4 resolve IPv4 prefixes (default)") print(" -6,--ipv6 resolve IPv6 prefixes") print(" -l,--list=SOURCES list of sources (e.g.: RIPE,NTTCOM,RADB)") print(" -p,--port=PORT port on which IRRd runs (default: 43)") print(" -h,--host=HOST hostname to connect to (default: rr.ntt.net)") print(" -s,--search=AUTNUM output only related to autnum (in ASXXX format)") print("") print("Written by <NAME> <<EMAIL>>") print("Source: https://github.com/job/irrtree") sys.exit() def resolve_prefixes(db, item): all_prefixes = set() if "-" not in item: return len(db[item]) for origin in db[item]['origin_asns']: all_prefixes |= db[origin] return len(all_prefixes) def process(irr_host, afi, db, as_set, search): import datetime now = datetime.datetime.now() now = now.strftime("%Y-%m-%d %H:%M") print("IRRTree (%s) report for '%s' (IPv%i), using %s at %s" % (irrtree.__version__, as_set, afi, irr_host, now)) if search and "-" not in list(db.keys()): if search not in list(db.keys()): print("NOT_FOUND: %s not present in %s or any of its members" % (search, as_set)) sys.exit() def print_member(as_set, db, search): if "-" not in as_set: res = "%s (%s pfxs)" % (as_set, resolve_prefixes(db, as_set)) elif search: res = "%s (%s ASNs)" % (as_set, len(db[as_set]['origin_asns'])) else: res = "%s (%s ASNs, %s pfxs)" % (as_set, len(db[as_set]['origin_asns']), resolve_prefixes(db, as_set)) return res def getasncount(db, item): v = db[item] if type(v) == set: ret = (0, len(v)) else: ret = (len(v['origin_asns']), resolve_prefixes(db, item)) return ret def resolve_tree(as_set, db, tree=OD(), seen=set()): seen.add(as_set) for member in sorted(db[as_set]['members'], key=lambda x: getasncount(db, x), reverse=True): if member in seen: tree["%s - already expanded" % print_member(member, db, search)] = {} continue if "-" in member: seen.add(member) tree["%s" % print_member(member, db, search)] = resolve_tree(member, db, OD(), seen) else: if not search or search == member: tree["%s" % print_member(member, db, search)] = {} else: continue return tree tree = OD() tree["%s" % print_member(as_set, db, search)] = resolve_tree(as_set, db) tr = asciitree.LeftAligned() print(tr(tree)) def main(): global debug debug = False try: opts, args = getopt.getopt(sys.argv[1:], "h:dp:64s:l:", ["host=", "debug", "port=", "ipv6", "ipv4", "search=", "list="]) except getopt.GetoptError as err: print(str(err)) usage() for o, a in opts: if o == "-d": debug = True elif o in ("-h", "--host"): server.irr_host = a elif o in ("-6", "--ipv6"): server.afi = 6 elif o in ("-p", "--port"): server.irr_port = int(a) elif o in ("-s", "--search"): server.search = a.upper() elif o in ("-l", "--list"): server.sources_list = a if not len(args) == 1: usage() if "-" not in args[0]: print("Error: %s does not appear to be an AS-SET" % args[0]) usage() query_object = args[0].upper() queue = Queue() queue.put(query_object) connection = connect(server.irr_host, server.irr_port) send(connection, "!!") if server.sources_list: send(connection, "!s%s" % server.sources_list) answer = receive(connection) if answer != "C": print("Error: %s" % answer) sys.exit(2) db = {} widgets = ['Processed: ', progressbar.Counter(), ' objects (', progressbar.Timer(), ')'] pbar = progressbar.ProgressBar(widgets=widgets, maxval=2**32) if not debug: pbar.start() counter = 0 while not queue.empty(): item = queue.get() if debug: print("Info: expanding %s" % item) if "-" not in item: # expand aut-nums if not server.search or server.search == item: prefixes = query(connection, "g" if server.afi == 4 else "6", item, False, False) else: prefixes = set() db[item] = prefixes counter += 1 if not debug: pbar.update(counter) queue.task_done() continue db.setdefault(item, {})['members'] = query(connection, "i", item, False, False) db[item]['origin_asns'] = query(connection, "i", item, True, False) for candidate in db[item]['members'] | db[item]['origin_asns']: if candidate not in db and candidate not in queue.queue: queue.put(candidate) counter += 1 if not debug: pbar.update(counter) queue.task_done() send(connection, '!q') connection[0].close() if server.search: to_delete = set() iter_db = dict(db) for item in iter_db: if "-" in item: if server.search not in db[item]['origin_asns']: del db[item] to_delete.add(item) for item in db: if "-" in item: db[item]['members'] = db[item]['members'] - to_delete process(server.irr_host, server.afi, db, query_object, server.search) def export(*data): global debug debug = False if data: args = [data[0]] if not len(args) == 1: usage() if "-" not in args[0]: print("Error: %s does not appear to be an AS-SET" % args[0]) usage() query_object = args[0].upper() queue = Queue() queue.put(query_object) connection = connect(server.irr_host, server.irr_port) send(connection, "!!") if server.sources_list: send(connection, "!s%s" % server.sources_list) answer = receive(connection) if answer != "C": print("Error: %s" % answer) sys.exit(2) db = {} counter = 0 while not queue.empty(): item = queue.get() if debug: print("Info: expanding %s" % item) db.setdefault(item, {})['members'] = query(connection, "i", item, False, False) db[item]['origin_asns'] = query(connection, "i", item, True, False) for candidate in db[item]['members'] | db[item]['origin_asns']: if candidate not in db and candidate not in queue.queue: queue.put(candidate) counter += 1 queue.task_done() send(connection, '!q') connection[0].close() if server.search: to_delete = set() iter_db = dict(db) for item in iter_db: if "-" in item: if server.search not in db[item]['origin_asns']: del db[item] to_delete.add(item) for item in db: if "-" in item: db[item]['members'] = db[item]['members'] - to_delete return(db) if __name__ == "__main__": main() ```
{ "source": "jnicol31/compile-commands", "score": 2 }
#### File: compile-commands/src/test_compile_commands.py ```python from compile_commands import * DATA = [ { "directory": "/path/to/build/directory", "command": "/usr/bin/gcc path/to/file1.c -o path/to/output.o -I..", "file": "path/to/file1.c", }, { "directory": "/path/to/build/directory", "command": "/usr/bin/g++ path/to/file2.cpp -o path/to/output.o -iquote .", "file": "path/to/file2.cpp", }, { "directory": "/path/to/build/directory", "command": "/usr/bin/clang++ path/to/file3.cpp -o path/to/output.o -Isomething", "file": "path/to/file3.cpp", }, { "directory": "/path/to/build/directory", "command": "/usr/bin/clang path/to/file4.c -o path/to/output.o -isystem /path/to/build/directory/include", "file": "path/to/file4.c", }, ] def test_remove_files(): assert len(remove_files(DATA, "path/to/file2.cpp")) == 3 assert ( len(remove_files(DATA, str("path/to/file1.c,path/to/file2.cpp").split(","))) == 2 ) assert len(remove_files(DATA, "path/to/doesnotexist.c")) == 4 def test_include_files(): assert len(include_files(DATA, "path/to/file2.cpp")) == 1 assert ( len(include_files(DATA, str("path/to/file1.c,path/to/file2.cpp").split(","))) == 2 ) assert len(include_files(DATA, "path/to/doesnotexist.c")) == 0 def test_absolute_include_paths(): data = absolute_include_paths(DATA) print(data[0]["command"]) assert data[0]["command"].endswith("-I/path/to/build/directory/..") assert data[1]["command"].endswith("-iquote /path/to/build/directory/.") assert data[2]["command"].endswith("-I/path/to/build/directory/something") assert data[3]["command"].endswith("-isystem /path/to/build/directory/include") def test_add_flags(): data = add_flags(DATA, "-flag") for entry in data: assert "-flag" in entry["command"] def test_to_gcc(): data = to_gcc(DATA) assert data[0]["command"].startswith("/usr/bin/gcc") assert data[1]["command"].startswith("/usr/bin/g++") assert data[2]["command"].startswith("/usr/bin/g++") assert data[3]["command"].startswith("/usr/bin/gcc") def test_to_clang(): data = to_clang(DATA) assert data[0]["command"].startswith("/usr/bin/clang") assert data[1]["command"].startswith("/usr/bin/clang++") assert data[2]["command"].startswith("/usr/bin/clang++") assert data[3]["command"].startswith("/usr/bin/clang") def test_change_compiler_path(): data = change_compiler_path(DATA, "/usr/local/bin/") for entry in data: assert entry["command"].startswith("/usr/local/bin/") def test_filter_files(): assert len(filter_files(DATA, "file")) == 0 assert len(filter_files(DATA, "\\.cpp$")) == 2 assert len(filter_files(DATA, "\\.c$")) == 2 def test_get_compile_dbs(): assert len(get_compile_dbs("src/tests/compile_commands_tests/")) == 3 def test_merge_json_files(): assert ( len(merge_json_files(get_compile_dbs("src/tests/compile_commands_tests/"))) == 6 ) def test_filter_commands(): data = filter_commands(DATA, "-o .*\\.o", "") for entry in data: assert "-o" not in entry["command"] and "output" not in entry["command"] assert "-o" not in entry["command"] and "output" not in entry["command"] assert "-o" not in entry["command"] and "output" not in entry["command"] def test_normalize_cdb(): data = [ {"file": "somefile.cpp", "command": "command", "dir": "somedir"}, { "file": "somefile.cpp", "arguments": ["gcc", "somefile", "-Iinclude", "-o", "someoutput"], }, { "file": "somefile.cpp", "arguments": ["command", "with spaces!"], }, ] data = normalize_cdb(data) for entry in data: assert entry.get("command") is not None assert entry.get("arguments", 0) == 0 assert data[1]["command"] == "gcc somefile -Iinclude -o someoutput" assert data[2]["command"] == "command 'with spaces!'" ```
{ "source": "jnicolasthouvenin/Deep_Learning_to_play_Connect4", "score": 3 }
#### File: jnicolasthouvenin/Deep_Learning_to_play_Connect4/dataManager.py ```python import numpy as np import pandas as pd class DataManager: def __init__(self,root_folder): self.root_folder = root_folder self.name_values = "values.csv" self.name_labels = "labels.csv" pass def import_x_y(self,id_dataset,size): name_file = self.root_folder + id_dataset name_file_values = name_file + self.name_values name_file_labels = name_file + self.name_labels x = pd.read_csv(name_file_values).to_numpy()[0:size,] y = pd.read_csv(name_file_labels).to_numpy()[0:size,] return x,y def import_x_y_coupled_dataset(self,id_dataset_win,id_dataset_lost,size): half_size = size//2 name_file_win = self.root_folder + id_dataset_win name_file_lost = self.root_folder + id_dataset_lost name_file_values_win = name_file_win + self.name_values name_file_labels_win = name_file_win + self.name_labels name_file_values_lost = name_file_lost + self.name_values name_file_labels_lost = name_file_lost + self.name_labels x = np.vstack((pd.read_csv(name_file_values_win).to_numpy()[0:half_size,],pd.read_csv(name_file_values_lost).to_numpy()[0:half_size,])) y = np.vstack((pd.read_csv(name_file_labels_win).to_numpy()[0:half_size,],pd.read_csv(name_file_labels_lost).to_numpy()[0:half_size,])) return x,y def create_train_test_sets(self,x,y,lenTest): """Shuffle the given dataset and split it into a training set and a test set""" nbInd = x.shape[0] shuffler = np.random.permutation(nbInd) x_train = x[shuffler][0:(nbInd-lenTest),] y_train = y[shuffler][0:(nbInd-lenTest),] x_test = x[shuffler][(nbInd-lenTest):nbInd,] y_test = y[shuffler][(nbInd-lenTest):nbInd,] return x_train,y_train,x_test,y_test DATA_MANAGER = DataManager("data/") ``` #### File: jnicolasthouvenin/Deep_Learning_to_play_Connect4/encoder.py ```python import numpy as np from game import * class Encoder: def __init__(self): pass def filter(self, arr, flt): ## Conv layer computation covLayer = [0 for i in range(20)] j = 0 for clayer in range(20): step = 0 for i in range(j, j+3): covLayer[clayer] += arr[i] * flt[step%3] + arr[i+6] * flt[(step%3)+3] + arr[i+12] * flt[(step%3)+6] step += 1 if j%3==0 and j%6!=0 : j += 3 else: j += 1 ## Max pooling -> 4x5 becomes 2x4 poolLayer = [] for col in range(4): for row in range(0,3,2): index = row + col * 4 maximum = max(covLayer[index], covLayer[index+1], covLayer[index+4], covLayer[index+5]) poolLayer.append(int(maximum)) return(poolLayer) def encode_prediction(self,output_layer): """Returns the predicted class associated with the given output layer""" predicted_class = 0 if output_layer < 0.5: predicted_class = 0 else: predicted_class = 1 return predicted_class def encode_board(self,game, isFiltered = False): """Return the encoding of the board that can be given to the neural network""" # create the 42 neurons for the current player turn = game.get_turn() turn_input = 0 if turn == 2: turn = 0 turn_input = np.zeros(42) else: turn_input = np.ones(42) board = np.ndarray.flatten(np.array(game.get_board())) # create the board for each player one_input = (board == 1).astype(int) two_input = (board == 2).astype(int) # concatenate all three inputs into one input of 126 elements final_input = np.hstack((one_input,two_input)) final_input = np.hstack((final_input,turn_input)) if not isFiltered: return final_input else: initial = final_input[:] filter_hline_top = [0,0,1,0,0,1,0,0,1] filter_hline_mid = [0,1,0,0,1,0,0,1,0] filter_hline_bottom = [1,0,0,1,0,0,1,0,0] filter_hline_left = [1,1,1,0,0,0,0,0,0] filter_hline_center = [0,0,0,1,1,1,0,0,0] filter_hline_right = [0,0,0,0,0,0,1,1,1] filter_diag_left = [1,0,0,0,1,0,0,0,1] filter_diag_right = [0,0,1,0,1,0,1,0,0] filter_plus = [0,1,0,1,0,1,0,1,0] filter_plus_full = [0,1,0,1,1,1,0,1,0] filter_cross = [1,0,1,0,1,0,1,0,1] filters = [filter_diag_left, filter_diag_right, filter_hline_bottom, filter_hline_mid, filter_hline_top, filter_hline_left, filter_hline_center, filter_hline_right, filter_cross, filter_plus, filter_plus_full] longArray = [] data = initial[0:42] for rowsF in filters: output_filter = self.filter(data, rowsF) longArray.append(output_filter) data = initial[42:84] for rowsF in filters: output_filter = self.filter(data, rowsF) longArray.append(output_filter) for rowsF in range(88): longArray.append(int(initial[-1])) result = np.array([]) for iter in range(len(longArray)): result = np.hstack((result, np.array(longArray[iter]))) return result ENCODER = Encoder() ``` #### File: jnicolasthouvenin/Deep_Learning_to_play_Connect4/network.py ```python import numpy as np import math from encoder import * class NeuralNetwork: def __init__(self, *args): if len(args) == 1: shape, learning_rate = args[0], 0.05 self.size = len(shape) self.shape = shape self.l_r = learning_rate self.biases = [] self.weights = [] for prev_layer, layer in zip(self.shape[:-1], self.shape[1:]): b = np.squeeze(np.random.randn(layer, 1)) self.biases.append(b) w = np.random.randn(layer, prev_layer) self.weights.append(w) elif len(args) == 2: shape, learning_rate = args[0], args[1] self.size = len(shape) self.shape = shape self.l_r = learning_rate self.biases = [] self.weights = [] for prev_layer, layer in zip(self.shape[:-1], self.shape[1:]): b = np.squeeze(np.random.randn(layer, 1)) self.biases.append(b) w = np.random.randn(layer, prev_layer) self.weights.append(w) else: self.size = args[0] self.shape = args[1] self.l_r = args[2] self.biases = args[3] self.weights = args[4] def train(self, x, y): y_pred = self.forward(x) nabla_b, nabla_w = self.backprop(x, y) self.update(nabla_b, nabla_w) return y_pred def forward(self, a): self.zs = [] self.activations = [np.array(a)] for b, w in zip(self.biases, self.weights): z = np.dot(w, a) + b self.zs.append(z) a = sigmoid(z) self.activations.append(np.array(a)) return a def backprop(self, x, y): self.forward(x) gradient_bias = [np.zeros(b.shape) for b in self.biases] gradient_weights = [np.zeros(w.shape) for w in self.weights] # last layer delta = cost_derivative(self.activations[-1], y) * sigmoid_derivative(self.zs[-1]) gradient_bias[-1] = delta gradient_weights[-1] = computeGradientW(self.activations[-2],delta,len(self.zs[-1])) # from before last layer to first layer # last layer is self.size-2 # before last layer is self.size-3 for l in range(self.size - 3, -1, -1): delta = np.dot(self.weights[l + 1].T, delta) * sigmoid_derivative(self.zs[l]) gradient_bias[l] = delta # len(activation) == len(weights)+1 # activation[i] is the previous activations to the layer weights[i] #delta_w = np.dot(delta, self.activations[l].T) gradient_weights[l] = computeGradientW(self.activations[l],delta,len(self.zs[l])) return gradient_bias, gradient_weights def update(self, nabla_b, nabla_w): self.biases = [b - self.l_r * nb for b, nb in zip(self.biases, nabla_b)] self.weights = [w - self.l_r * nw for w, nw in zip(self.weights, nabla_w)] def train_sgd(self, x_train, y_train, batch_size=20): x_batches = [ x_train[i : i + batch_size] for i in range(0, len(x_train), batch_size) ] y_batches = [ y_train[i : i + batch_size] for i in range(0, len(y_train), batch_size) ] for x_batch, y_batch in zip(x_batches,y_batches): gradient_bias = [np.zeros(b.shape) for b in self.biases] gradient_weights = [np.zeros(w.shape) for w in self.weights] for x, y in zip(x_batch, y_batch): delta_grad_b, delta_grad_w = self.backprop(x, y) gradient_bias = [ nb + dnb for nb, dnb in zip(gradient_bias, delta_grad_b) ] gradient_weights = [ nw + dnw for nw, dnw in zip(gradient_weights, delta_grad_w) ] gradient_weights = [nw / batch_size for nw in gradient_weights] gradient_bias = [nb / batch_size for nb in gradient_bias] self.weights = [ w - self.l_r * nw for w, nw in zip(self.weights, gradient_weights) ] self.biases = [ b - self.l_r * nb for b, nb in zip(self.biases, gradient_bias) ] def supervised_learning(self,x_train,y_train,x_test,y_test,lenTest,it,EPOCH=100,batch_size=1000,dataset="classic",file="networks/",write=False): print("[INIT] - classification rate =",self.evaluate(x_test,y_test)) for j in range(EPOCH+1): # train shuffler = np.random.permutation(x_train.shape[0]) x_train = x_train[shuffler] y_train = y_train[shuffler] self.train_sgd(x_train,y_train,batch_size=batch_size) # test goodPred = 0 preds = 0 for i in range(lenTest): label_pred = ENCODER.encode_prediction(self.forward(x_test[i])) if (label_pred == y_test[i]).all(): goodPred += 1 preds += 1 print(j," - classification rate =",self.evaluate(x_test,y_test)) if j%10 == 0: if write: self.save((file+dataset+"_"+str(it)+"_"+str(j))) def evaluate(self, x_test, y_test): test_results = [ (ENCODER.encode_prediction(self.forward(_x)), (_y)) for _x, _y in zip(x_test, y_test) ] result = sum(int(_y_pred == _y) for (_y_pred, _y) in test_results) result /= len(y_test) return round(result, 3) def save(self, fileName): file = open(fileName, "w") file.write(str(self.size)+"\n") for i in range(self.size): file.write(str(self.shape[i])+"\n") file.write(str(self.l_r)+"\n") for i in range(1, self.size): for j in range(self.shape[i]): for k in range(self.shape[i-1]): file.write(str(self.weights[i-1][j][k])+"\n") for i in range(self.size-1): for j in range(self.shape[i+1]): file.write(str(self.biases[i][j])+"\n") file.close() def readNeuralNetwork(fileName): file = open(fileName, "r") size = int(file.readline()) shape = [] for i in range(size): shape.append(int(file.readline())) l_r = float(file.readline()) weights = [] for i in range(1, size): weights.append([]) for j in range(shape[i]): weights[i-1].append([]) for k in range(shape[i-1]): weights[i-1][j].append(float(file.readline())) biases = [] for i in range(size-1): biases.append([]) for j in range(shape[i+1]): biases[i].append(float(file.readline())) file.close() return NeuralNetwork(size, shape, l_r, [np.array(obj) for obj in biases], [np.array(obj) for obj in weights]) def cost(a, y): return (a - y) ** 2 def cost_derivative(a, y): return 2*(a - y) def sigmoid(x): return 1.0 / (1.0 + np.exp(-x)) def sigmoid_derivative(x): return sigmoid(x) * (1 - sigmoid(x)) def computeGradientW(a,delta,repeat): aBuffer = a.transpose() aBuffer = np.tile(a,repeat).reshape(repeat,len(a)).transpose() g_w = np.multiply(aBuffer,delta).transpose() return g_w ``` #### File: jnicolasthouvenin/Deep_Learning_to_play_Connect4/program_test.py ```python from numpy.core.arrayprint import BoolFormat from game import * from encoder import * from arena import * from dataManager import * from network import * class Program: def __init__(self,the_game): self.the_game = the_game self.best_network = readNeuralNetwork("networks/best_network") self.new_network = NeuralNetwork([264,30,30,30,30,1],0.6514) def select_move(self): return ARENA.select_move_net(self.best_network, self.the_game) def train_network(self, it = 1, EPOCHS = 1000, both_datasets = True, coupled_dataset = False, dataset = "classic", write = True): lenTest = 0 if not(both_datasets): if coupled_dataset: if dataset == "classic": x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_","loss_",720000) else: x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_filtered_","loss_filtered_",720000) lenTest = 72000 else: if dataset == "classic": x,y = DATA_MANAGER.import_x_y("unfinished_",135000) else: x,y = DATA_MANAGER.import_x_y("unfinished_filtered_",135000) lenTest = 13500 else: if dataset == "classic": x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_","loss_",720000) x_2,y_2 = DATA_MANAGER.import_x_y("unfinished_",135000) x = np.vstack((x,x_2)) y = np.vstack((y,y_2)) else: x,y = DATA_MANAGER.import_x_y_coupled_dataset("win_filtered_","loss_filtered_",720000) x_2,y_2 = DATA_MANAGER.import_x_y("unfinished_filtered_",135000) x = np.vstack((x,x_2)) y = np.vstack((y,y_2)) lenTest = 72000 + 13500 x_train,y_train,x_test,y_test = DATA_MANAGER.create_train_test_sets(x,y,lenTest) self.new_network.supervised_learning(x_train,y_train,x_test,y_test,lenTest,it=it,EPOCH=EPOCHS,batch_size=100,dataset=dataset,write=write) def set_network_structure(self,sizes,learning_rate): self.new_network = NeuralNetwork(sizes,learning_rate) def study_against_random(self, dataset = 1, classic = False): if dataset == 1 and not(classic): net = readNeuralNetwork("networks/net_dataset_1_filters") score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0] elif dataset == 2 and not(classic): net = readNeuralNetwork("networks/net_dataset_2_filters") score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0] elif dataset == 3 and not(classic): net = readNeuralNetwork("networks/net_dataset_1&2_filters") score = ARENA.games_net_VS_random(net,game,nb_games=1000)[0] return score ```
{ "source": "jni/dask-image", "score": 2 }
#### File: dask_image/ndfilters/_smooth.py ```python import scipy.ndimage.filters from . import _utils from ._gaussian import gaussian_filter gaussian_filter = gaussian_filter @_utils._update_wrapper(scipy.ndimage.filters.uniform_filter) def uniform_filter(image, size=3, mode='reflect', cval=0.0, origin=0): size = _utils._get_size(image.ndim, size) depth = _utils._get_depth(size, origin) depth, boundary = _utils._get_depth_boundary(image.ndim, depth, "none") result = image.map_overlap( scipy.ndimage.filters.uniform_filter, depth=depth, boundary=boundary, dtype=image.dtype, size=size, mode=mode, cval=cval, origin=origin ) return result ```
{ "source": "jniediek/combinato", "score": 2 }
#### File: combinato/manager/manager_cat.py ```python from __future__ import print_function, division, absolute_import import numpy as np import tables import os from .. import SIGNS, TYPE_NAMES, TYPE_ART, GROUP_NOCLASS, GROUP_ART, NcsFile,\ TYPE_NON_NOISE, TYPE_ALL class SortingFile(object): """ represents a grouped sorting file """ def __del__(self): self.h5fid.close() def __init__(self, h5fname): self.h5fid = tables.open_file(h5fname, 'r+') self.index = self.h5fid.root.index[:] self.classes = self.h5fid.root.classes[:] self.groups = self.h5fid.root.groups[:] self.types = self.h5fid.root.types[:] self.sign = str(self.h5fid.get_node_attr('/', 'sign'), 'utf-8') self.basedir = os.path.dirname(h5fname) self.matches = self.h5fid.root.matches[:] def get_gids(self): """ return list of gids """ return np.unique(self.groups[:, 1]) def get_cluster_ids_by_gid(self, gid): """ return class ids for a group """ idx = self.groups[:, 1] == gid return self.groups[idx, 0] def get_non_noise_cluster_index(self): """ returns an index of spikes that are not in unassigned or artifact groups """ bad_groups = np.array((GROUP_ART, GROUP_NOCLASS)) idx = np.in1d(self.types[:, 1], bad_groups) gids = self.types[-idx, 0] idx = self.get_cluster_index_joined_list(gids) return idx def get_cluster_index(self, clid): """ return index for a cluster """ return self.index[self.classes == clid] def _get_group_matches(self, gid): """ specific function to get matches """ clids = self.get_cluster_ids_by_gid(gid) return self.matches[np.in1d(self.classes, clids)] def get_cluster_index_joined(self, gid): """ return index for group (concatenated from all clusters) get_cluster_index_alt will be renamed to this function """ clids = self.get_cluster_ids_by_gid(gid) all_idx = [] for clid in clids: # print('Getting index for {}'.format(clid)) all_idx.append(self.get_cluster_index(clid)) return np.sort(np.hstack(all_idx)) def get_cluster_index_alt(self, gid): """ alternative implementation """ return self.get_cluster_index_joined_list([gid]) def get_cluster_index_joined_list(self, gids): """ return index for several groups together """ idx = np.in1d(self.groups[:, 1], gids) all_clids = self.groups[idx, 0] return self.index[np.in1d(self.classes, all_clids)] def get_group_type(self, gid): """ return the type of a group """ idx = self.types[:, 0] == gid return self.types[idx, 1][0] def save_groups_and_types(self, groups, types): """ save a new group and type array """ self.groups = groups self.types = types self.h5fid.root.groups[:] = groups self.h5fid.remove_node('/', 'types') self.h5fid.create_array('/', 'types', types) self.h5fid.flush() class SortingManagerGrouped(object): """ represents a sorting session after grouping """ def __del__(self): if self.h5datafile is not None: self.h5datafile.close() def __init__(self, h5fname): self.basedir = os.path.dirname(h5fname) self.h5datafile = None try: self.h5datafile = tables.open_file(h5fname, 'r') except IOError as error: print('Could not initialize {}: {}'.format(h5fname, error)) self.initialized = False return self.start_idx = None self.stop_idx = None self.sign = None self.all_times = dict() self.spikes = dict() self.times = dict() for sign in SIGNS: self.all_times[sign] = None self.spikes[sign] = None self.times[sign] = None self.sorting = None self.header = None self.init_header() self.initialized = True def get_thresholds(self): """ get extraction thresholds """ try: thr = self.h5datafile.root.thr[:, :] except tables.exceptions.NoSuchNodeError: print('Extraction thresholds were not saved!') thr = None return thr def init_header(self): """ Tries to initialize a ncs header. Not necessarily possible. """ ext = os.path.basename(self.basedir) cand_folders = (os.path.join(self.basedir, '..'), ext) name = None for folder in cand_folders: for suffix in ('.ncs', '.Ncs'): cand_name = os.path.join(folder, ext + suffix) if os.path.exists(cand_name): name = cand_name break if name is not None: fid = NcsFile(name) self.header = fid.header del fid return for folder in cand_folders: cand_name = os.path.join(folder, 'channel_names.csv') if os.path.exists(cand_name): import csv with open(cand_name) as fid: reader = csv.reader(fid, delimiter=';') names = {l[0]: l[1] for l in reader} self.header = {'AcqEntName': names[ext]} return print('Ncs file not found, no header!') self.header = None def init_sorting(self, sorting_folder): """ initialize a sorting folder returns True if init worked, else False """ sorting_path = os.path.join(sorting_folder, 'sort_cat.h5') if os.path.exists(sorting_path): self.sorting = SortingFile(sorting_path) self.sign = self.sorting.sign return True else: return False def get_start_stop_index(self, sign, start_time, stop_time): """ return where to start and stop for a given time frame """ if self.times[sign] is None: self.times[sign] = self.h5datafile.get_node('/' + sign, 'times')[:] t = self.times[sign] start_idx = t.searchsorted(start_time) stop_idx = t.searchsorted(stop_time) if stop_idx + 1 < t.shape[0]: stop_idx += 2 return start_idx, stop_idx def set_sign_times_spikes(self, sign, start_idx=0, stop_idx=np.inf): """ set times, spikes, start, stop """ self.start_idx = start_idx if stop_idx in [np.inf, None]: stop_idx = self.h5datafile.get_node('/' + sign, 'times').shape[0] self.stop_idx = stop_idx self.sign = str(sign) self.spikes[sign] =\ self.h5datafile.\ get_node('/' + sign, 'spikes')[start_idx:stop_idx, :] if self.all_times[sign] is not None: t = self.all_times[sign] else: t = self.h5datafile.get_node('/' + sign, 'times') self.times[sign] = t[start_idx:stop_idx] def get_groups(self, times=True, spikes=True): """ return groups, each containing its times and spikes if requested """ gids = self.sorting.get_gids() ret = dict() for gid in gids: clids = self.sorting.get_cluster_ids_by_gid(gid) for clid in clids: idx = self.sorting.get_cluster_index(clid) # shorten it sel = (idx >= self.start_idx) & (idx < self.stop_idx) idx = idx[sel] - self.start_idx if idx.any(): if gid not in ret: ret[gid] = dict() ret[gid][clid] = dict() if times: ret[gid][clid]['times'] = self.times[self.sign][idx] if spikes: ret[gid][clid]['spikes'] =\ self.spikes[self.sign][idx, :] imgname = 'class_{:03d}.png'.format(clid) imgpath1 = os.path.join(self.basedir, self.sorting.basedir, imgname) imgpath2 = os.path.join(self.sorting.basedir, imgname) if os.path.exists(imgpath1): imgval = imgpath1 elif os.path.exists(imgpath2): imgval = imgpath2 else: imgval = None ret[gid][clid]['image'] = imgval return ret def get_group_joined(self, gid, times=True, spikes=True, artifacts=True): """ get one group, all clusters joined """ ret = dict() gtype = self.get_group_type(gid) if (artifacts is False) and (gtype == TYPE_ART): return ret idx = self.sorting.get_cluster_index_joined(gid) n_clusters = len(self.sorting.get_cluster_ids_by_gid(gid)) # shorten it sel = (idx >= self.start_idx) & (idx <= self.stop_idx) if not sel.any(): return ret idx = idx[sel] - self.start_idx # idx -= self.start_idx shape = self.times[self.sign].shape[0] if idx[-1] >= shape: idx = idx[idx < shape] print('Shortened index!') ret['type'] = gtype ret['n_clusters'] = n_clusters if times: ret['times'] = self.times[self.sign][idx] if spikes: ret['spikes'] = self.spikes[self.sign][idx] return ret def get_data_from_index(self, index, times=True, spikes=True): """ return data from a given index """ idx = index - self.start_idx ret = dict() if times: ret['times'] = self.times[self.sign][idx] if spikes: ret['spikes'] = self.spikes[self.sign][idx] return ret def get_groups_joined(self, times=True, spikes=True, artifacts=True): """ return groups with times and spikes joined """ gids = self.sorting.get_gids() ret = dict() for gid in gids: group = self.get_group_joined(gid, times, spikes, artifacts) if len(group) > 0: ret[gid] = group return ret def get_group_type(self, gid): """ return group type """ return self.sorting.get_group_type(gid) def get_samples_per_spike(self): """ return samples per spike... """ return self.spikes[self.sign].shape[1] def save_groups_and_types(self, groups, types): """ save to underlying sorting file """ self.sorting.save_groups_and_types(groups, types) def get_group_table(self): """ get group table """ return self.sorting.groups def get_type_table(self): """ get type table """ return self.sorting.types def get_non_noise_spikes(self, spikes=True, times=True): """ return all non-noise spikes joined """ idx = self.sorting.get_non_noise_cluster_index() sel = (idx >= self.start_idx) & (idx < self.stop_idx) idx = idx[sel] ret = self.get_data_from_index(idx, times=times, spikes=spikes) ret['type'] = TYPE_NON_NOISE return ret def get_all_spikes(self): """ return all spikes """ sel = (self.sorting.index >= self.start_idx) &\ (self.sorting.index < self.stop_idx) idx = self.sorting.index[sel] ret = self.get_data_from_index(idx) ret['type'] = TYPE_ALL return ret class Combinato(SortingManagerGrouped): """ convenience class, reads sorted data """ def __init__(self, fname, sign, label): self.initialized = False self.h5datafile = None # in case of early return basedir = os.path.dirname(fname) labelbasename = os.path.basename(label) sorting_session = os.path.join(basedir, labelbasename) # quick check if we can do this if not os.path.exists(sorting_session): print('Session folder {} ' 'not found'.format(sorting_session)) return super(Combinato, self).__init__(fname) self.set_sign_times_spikes(sign) res = self.init_sorting(sorting_session) if not res: print('Sorting session {} ' 'not initialized'.format(sorting_session)) else: self.initialized = True def test(name, label, ts): """ simple test case, needs a folder as argument """ with open(ts) as fid: start, stop = [int(x)/1000. for x in fid.readline().split()] fid.close() man = SortingManagerGrouped(name) if not man.initialized: return print('Working on {}, from time {} to {} ({:.1f} min)' .format(name, start, stop, (stop-start)/6e4)) start_idx, stop_idx = man.get_start_stop_index('pos', start, stop) print('Setting start index: {}, stop index: {}'. format(start_idx, stop_idx)) man.set_sign_times_spikes('pos', start_idx, stop_idx) ret = man.init_sorting(os.path.join(os.path.dirname(name), label)) if not ret: print('Unable to initialize!') return print(man.sorting.index.shape) groups = man.get_groups() print('Retrieved Groups') test_gid = groups.keys()[0] man.get_group_joined(test_gid) all_groups = man.get_groups_joined() # iterate through clusters all_good = 0 for k, v in groups.items(): print('Group {} type {}'.format(k, TYPE_NAMES[man.get_group_type(k)])) print(v.keys()) sumidx = 0 for clid in v: print('Cluster {} len {}'.format(clid, v[clid]['times'].shape[0])) sumidx += v[clid]['times'].shape[0] if man.get_group_type(k) > 0: all_good += sumidx idx1 = man.sorting.get_cluster_index_joined(k) idx2 = man.sorting.get_cluster_index_alt(k) assert not (idx1 - idx2).any() print('Total index len {} vs {} summed'. format(idx1.shape[0], sumidx)) # assert idx1.shape[0] == sumidx non_noise_spk = man.get_non_noise_spikes() total = man.get_all_spikes() print('Non-noise index has {} elements'. format(non_noise_spk['times'].shape[0])) assert non_noise_spk['times'].shape[0] == all_good print('Total has {} elements'.format(total['times'].shape[0])) for gid, group in all_groups.items(): print('Group {} has {} times, type {} and {} members'. format(gid, group['times'].shape[0], TYPE_NAMES[group['type']], group['n_clusters'])) ```
{ "source": "jniediek/response_viewer", "score": 2 }
#### File: jniediek/response_viewer/model.py ```python from __future__ import division, print_function, absolute_import from PyQt4 import QtCore as qc NCOLS = 7 OBJECT, DAYTIME, GROUP, CLUSTER_STATE, RESP_STATE, IMAGE, FNAME = range(NCOLS) CLUSTER_STATES = {'A': 'Artifact', 'O': 'Okay', 'E': 'Edit'} RESP_STATES = {'M': 'Maybe', 'R': 'Response', 'N': 'No Response'} class ResponseTableModel(qc.QAbstractTableModel): """ Table of channels with properties such as extracted, sorted etc """ def __init__(self): super(ResponseTableModel, self).__init__() self.images = [] def rowCount(self, index=qc.QModelIndex()): return len(self.images) def columnCount(self, index=qc.QModelIndex()): return NCOLS - 2 # last cols: image, fname def data(self, index, role=qc.Qt.DisplayRole): if not (index.isValid() and 0 <= index.row() < len(self.images)): return qc.QVariant() this_image = self.images[index.row()] col = index.column() if role == qc.Qt.DisplayRole: data = this_image[col] if col == CLUSTER_STATE: data = CLUSTER_STATES[data] if col == RESP_STATES: data = RESP_STATES[data] return qc.QVariant(data) def headerData(self, section, orientation, role=qc.Qt.DisplayRole): if role == qc.Qt.TextAlignmentRole: if orientation == qc.Qt.Horizontal: return qc.QVariant(int(qc.Qt.AlignLeft | qc.Qt.AlignVCenter)) elif role == qc.Qt.DisplayRole: if orientation == qc.Qt.Horizontal: if section == OBJECT: ret = 'Channel' elif section == DAYTIME: ret = 'Daytime' elif section == GROUP: ret = 'Group' elif section == CLUSTER_STATE: ret = 'Cluster' elif section == RESP_STATE: ret = 'Response' return qc.QVariant(ret) return qc.QVariant(int(section) + 1) def add_row(self, row): """ simply add a channel to table """ self.images.append(row) print('Added ' + row[0]) def set_type(self, index, which_one, new_type): self.images[index.row()][which_one] = new_type self.reset() def get_image(self, row): """ just return the image """ return self.images[row][IMAGE] def get_status(self, row, which_one): """ return a status row text """ act = [self.images[row][which_one]] print(act) ret = self.images[row][OBJECT] + ' classification: ' + act[0] return ret def set_action(self, index, which_one, to_what): """ set classification attribute """ row = self.channels[index.row()] row[which_one] = to_what self.reset() def get_states(self): """ return internal data """ ret = [] for row in self.images: retval = (row[FNAME], row[OBJECT], row[DAYTIME], row[GROUP], row[CLUSTER_STATE], row[RESP_STATE]) ret.append(retval) return ret ```
{ "source": "jniedrauer/cloudformation", "score": 3 }
#### File: cloudformation/meta/get_params_file.py ```python import argparse import json import tempfile import boto3 SECRET_IDENTIFIER = 'CfnSecret' def main(): """Commandline entrypoint""" parser = argparse.ArgumentParser() parser.add_argument('-e', '--env', required=True, help='target environment') parser.add_argument('-n', '--name', required=True, help='stack name') args = parser.parse_args() ssm = boto3.client('ssm') response = ssm.get_parameters_by_path( Path='/'.join([ '/' + args.env.title(), SECRET_IDENTIFIER, args.name, ]), Recursive=False, WithDecryption=True, ) params = [ { 'ParameterKey': i['Name'].split('/')[-1], 'ParameterValue': i['Value'] } for i in response['Parameters'] ] params.append({ 'ParameterKey': 'StackName', 'ParameterValue': args.name }) with tempfile.NamedTemporaryFile(delete=False) as tmpf: tmpf.write( json.dumps(params).encode(), ) print(tmpf.name) if __name__ == '__main__': main() ``` #### File: cloudformation/resources/ecs.py ```python from typing import List from troposphere import ( Base64, Sub, Ref, Template, ) from troposphere.autoscaling import ( Metadata, AutoScalingGroup, Tag as AsgTag, LaunchConfiguration, ) from troposphere.ecs import ( Cluster, ) from troposphere.iam import InstanceProfile from troposphere.policies import ( AutoScalingReplacingUpdate, AutoScalingRollingUpdate, UpdatePolicy, ) from troposphere.cloudformation import ( Init, InitConfig, InitFiles, InitFile, ) from .wrapper import Wrapper class EcsWrapper(Wrapper): """TODO: Documentation""" def __init__(self, subnets: List[Ref], security_groups: list, size: str, role: Ref, cluster_name: str, min_size: int, max_size: int, t: Template): super().__init__() self.userdata = self.render_template('EcsUserData.sh', metadata='LaunchConfiguration', resource='AutoscalingGroup') self.cluster = t.add_resource(Cluster( 'EcsCluster', ClusterName=cluster_name, )) self.instance_profile = t.add_resource(InstanceProfile( 'EcsInstanceProfile', Roles=[role] )) self.launch_config = t.add_resource(LaunchConfiguration( 'LaunchConfiguration', UserData=Base64(Sub(self.userdata)), ImageId=self.config.AMIs.ecs['2017.09'][self.config.region], InstanceType=size, SecurityGroups=security_groups, IamInstanceProfile=Ref(self.instance_profile), Metadata=Metadata( Init(dict( config=InitConfig( files=InitFiles({ '/etc/ecs/ecs.config': InitFile( content=Sub('''\ ECS_CLUSTER=${EcsCluster} '''.strip()), mode="000644", owner="root", group="root", ) }), ) )), ), )) self.asg = t.add_resource(AutoScalingGroup( 'AutoscalingGroup', DesiredCapacity=min_size, Tags=[ AsgTag('Name', cluster_name, True), AsgTag('Env', self.config.env, True), ], LaunchConfigurationName=Ref(self.launch_config), MinSize=min_size, MaxSize=max_size, VPCZoneIdentifier=subnets, HealthCheckType='EC2', UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService='1', MaxBatchSize='1', WaitOnResourceSignals=True ) ) )) ``` #### File: cloudformation/resources/subnet.py ```python from troposphere import Output, Select from troposphere import GetAtt, Ref, Tags, Template from troposphere.ec2 import NatGateway from troposphere.ec2 import EIP from troposphere.ec2 import Route from troposphere.ec2 import PortRange from troposphere.ec2 import NetworkAcl from troposphere.ec2 import SubnetRouteTableAssociation from troposphere.ec2 import Subnet from troposphere.ec2 import RouteTable from troposphere.ec2 import NetworkAclEntry from troposphere.ec2 import SubnetNetworkAclAssociation class SubnetWrapper: """Create a subnet, inbound and outbound NACLs, a route table, and a route table association.""" def __init__(self, cidr: str, idx: int, vpc: Ref, az: str, t: Template, gateway: Ref, private=True): self.idx = idx self.t = t self.az = az self.natgw = None self.nat_eip = None vis = 'Private' if private else 'Public' self.subnet = t.add_resource(Subnet( f'{vis}Subnet{idx}', VpcId=vpc, CidrBlock=cidr, AvailabilityZone=az, MapPublicIpOnLaunch=not private, Tags=Tags( Name=f'{vis}Subnet{idx}', Application=Ref('AWS::StackName'), Network=vis, ) )) self.routetable = t.add_resource(RouteTable( f'{vis}RouteTable{idx}', VpcId=vpc, Tags=Tags( Name=f'{vis}RouteTable{idx}', Application=Ref('AWS::StackName'), Network=vis, ) )) self.routetable_assoc = t.add_resource(SubnetRouteTableAssociation( f'{vis}RouteTableAssociation{idx}', SubnetId=Ref(self.subnet), RouteTableId=Ref(self.routetable) )) self.nacl = t.add_resource(NetworkAcl( f'{vis}NetworkAcl{idx}', VpcId=vpc, Tags=Tags( Name=f'{vis}NetworkAcl{idx}', Application=Ref('AWS::StackName'), Network=vis, ) )) self.nacl_rules = {} for i in ('in', 'out'): self.nacl_rules[i] = t.add_resource(NetworkAclEntry( f'{i.title()}Bound{vis}NetworkAclEntry{idx}', NetworkAclId=Ref(self.nacl), RuleNumber='100', Protocol='6', PortRange=PortRange(To='65535', From='0'), Egress=('true' if i == 'out' else 'false'), RuleAction='allow', CidrBlock='0.0.0.0/0', )) self.nacl_assoc = t.add_resource( SubnetNetworkAclAssociation( f'{vis}SubnetNetworkAclAssociation{idx}', SubnetId=Ref(self.subnet), NetworkAclId=Ref(self.nacl), ) ) if private: self.default_route = t.add_resource(Route( f'NatRoute{idx}', RouteTableId=Ref(self.routetable), DestinationCidrBlock='0.0.0.0/0', NatGatewayId=gateway, )) else: self.default_route = t.add_resource(Route( f'{vis}DefaultRoute{idx}', RouteTableId=Ref(self.routetable), DestinationCidrBlock='0.0.0.0/0', GatewayId=gateway, )) t.add_output(Output( f'{vis}Subnet{idx}', Description=f'SubnetId of {vis}Subnet{idx}', Value=Ref(self.subnet), )) def add_natgw(self, idx: int, nat_eips: Ref = None): """Add a NAT gateway to the subnet""" if nat_eips: eip = Select(idx, nat_eips) else: self.nat_eip = self.t.add_resource(EIP( f'NatEip{self.idx}', Domain='vpc', )) eip = GetAtt(self.nat_eip, 'AllocationId') self.natgw = self.t.add_resource(NatGateway( f'NatGw{self.idx}', AllocationId=eip, SubnetId=Ref(self.subnet), )) self.t.add_output(Output( f'NatEip{self.idx}', Value=eip, Description=f'Nat Gateway Elastic IP for {self.az}', )) ```
{ "source": "jniedrauer/cloudwatchiter", "score": 3 }
#### File: cloudwatchiter/cloudwatchiter/rate.py ```python from datetime import datetime, timedelta from typing import List from .abc_expression import AbstractExpression class Rate(AbstractExpression): """Cloudwatch rate Schedule Expression""" element_count: int = 2 valid_types: tuple = ( 'rate', ) valid_units = ( 'minute', 'minutes', 'hour', 'hours', 'day', 'days', ) def __init__(self, *args): super().__init__(*args) self._validate_unit() self._validate_value() def get_next(self, count: int = 1, start: int = 1) -> List[datetime]: """Get next event(s)""" return self._get_range(count=count, start=start) def get_previous(self, count: int = 1, start: int = 1) -> List[datetime]: """Get past event(s)""" return self._get_range(count=count, start=start, forward=False) def _get_range(self, count, start, forward=True) -> List[datetime]: """Shared implementation for get_next and get_previous""" if not self.unit.endswith('s') and not all((count == 1, start == 1)): # Only one event will occur raise ValueError( 'Requested range invalid for rate expression' ) result: List[datetime] = [] for i in range(count): result.append( self._get_unit_floor() + self._get_timedelta(start, forward=forward) + self._get_timedelta(i, forward=forward) ) return result def _get_timedelta(self, quantity: int, forward: bool = True) -> timedelta: """Return a timedelta for given unit""" if forward: multiplier = 1 else: multiplier = -1 if self.unit.startswith('minute'): return timedelta(minutes=(int(self.value) * quantity * multiplier)) elif self.unit.startswith('hour'): return timedelta(hours=(int(self.value) * quantity * multiplier)) elif self.unit.startswith('day'): return timedelta(days=(int(self.value) * quantity * multiplier)) else: raise ValueError def _get_unit_floor(self) -> datetime: """Return now rounded down to unit""" if self.unit.startswith('minute'): return self.now - timedelta( seconds=self.now.second, microseconds=self.now.microsecond, ) elif self.unit.startswith('hour'): return self.now - timedelta( minutes=self.now.minute, seconds=self.now.second, microseconds=self.now.microsecond, ) elif self.unit.startswith('day'): return self.now - timedelta( hours=self.now.hour, minutes=self.now.minute, seconds=self.now.second, microseconds=self.now.microsecond, ) else: raise ValueError def _validate_unit(self) -> None: """Validate unit property""" if self.unit not in self.valid_units: raise ValueError( 'Invalid unit: {self.unit}, ' 'expected: {self.valid_units}' ) def _validate_value(self) -> None: """Validate value property""" try: assert int(self.value) > 0 except (AssertionError, ValueError): raise ValueError( 'Invalid rate: {self.value}, ' 'expected: positive integer' ) @property def value(self) -> str: """Rate value""" return self.elements[0] @property def unit(self) -> str: """Rate unit""" return self.elements[1] ``` #### File: cloudwatchiter/tests/abc_expression_test.py ```python import pytest from cloudwatchiter.abc_expression import AbstractExpression def abc(cls): cls.__abstractmethods__ = set() cls.element_count = 2 cls.valid_types = ('cron', 'rate') return cls def test_invalid_init(): cls = abc(AbstractExpression) with pytest.raises(ValueError): expr = cls('foo') def test_cron_init(): cls = abc(AbstractExpression) expr = cls('cron(1 2)') assert expr.type == 'cron' def test_rate_init(): cls = abc(AbstractExpression) expr = cls('rate(1 2)') assert expr.type == 'rate' def test_invalidate_elements(): with pytest.raises(ValueError): AbstractExpression.validate_elements( elements=['1'], ) def test_validate_elements(): elements = AbstractExpression.validate_elements( elements=['1', '2'], ) assert elements == ['1', '2'] def test_elements(): cls = abc(AbstractExpression) expr = cls('cron(1 2)') assert expr.elements == ['1', '2'] ```
{ "source": "jniedrauer/jniedrauer.com", "score": 3 }
#### File: app/core/api.py ```python from flask import jsonify, request from sqlalchemy.exc import SQLAlchemyError, IntegrityError from dictalchemy import make_class_dictable from ..main import app, db make_class_dictable(db.Model) from .models import Guest @app.route('/api/ip') def api_ip(): """Return client IP""" return api_reply({'ipAddress': get_client_ip()}) @app.route('/api/guestbook', methods=['GET', 'POST']) def guestbook(): """Allow guests to view or post to the guestbook""" sucess = False reason = "You shouldn't ever see this" code = 200 if request.method == 'POST': data = request.get_json(force=True) app.logger.debug('/api/guestbook recieved POST: %s', data) if data.get('name'): success, reason, code = add_guest(data['name']) else: success = False reason = 'RTFM' code = 400 return api_reply({'acknowledged': True}, success=success, reason=reason), code else: guests = None try: guests = [r.asdict() for r in Guest.query.all()] success = True reason = None except SQLAlchemyError: success = False reason = 'Storage read error' code = 500 return api_reply({'guests': guests}, success=success, reason=reason), code def get_client_ip(): """Return the client x-forwarded-for header or IP address""" return request.headers.get('X-Forwarded-For') or request.remote_addr def add_guest(name): """Add a guest to the database""" guest = Guest(name=name) db.session.add(guest) try: db.session.commit() return True, None, 200 except IntegrityError: return False, 'Doh! You already signed', 400 except SQLAlchemyError: return False, 'Storage engine error', 500 def api_reply(body={}, success=True, reason=None): """Create a standard API reply interface""" return jsonify({**body, 'success': success, 'reason': reason}) ```
{ "source": "jniedrauer/tradebot", "score": 2 }
#### File: tradebot/tests/config_test.py ```python import os import shutil import tempfile import unittest from importlib import reload from mock import call, mock_open, patch from tradebot import config, constants as const def setup_and_teardown(f): def wrapper(*args, **kwargs): try: self = args[0] os.environ.pop('XDG_CONFIG_HOME', None) os.environ.pop('HOME', None) self.config = config.AppConfig() self.tmpdir = tempfile.mkdtemp() return f(*args, **kwargs) finally: shutil.rmtree(self.tmpdir) return wrapper def setup_config_file(path, content): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) with open(path, 'w') as f: f.write(content) class TestConfig(unittest.TestCase): @setup_and_teardown def test_get_config_file_failed(self): os.environ['XDG_CONFIG_HOME'] = 'notreal' os.environ['HOME'] = 'notreal' reload(const) with self.assertRaises(OSError): config.get_cfg_file() @setup_and_teardown def test_get_xdg_config_file(self): os.environ['XDG_CONFIG_HOME'] = self.tmpdir reload(const) setup_config_file(os.path.join(self.tmpdir, 'tradebot', const.CFG_FILE), '') res = config.get_cfg_file() self.assertEqual(res, os.path.join(os.environ['XDG_CONFIG_HOME'], 'tradebot', const.CFG_FILE)) @setup_and_teardown def test_get_config_value(self): os.environ['XDG_CONFIG_HOME'] = self.tmpdir reload(const) content = ( 'test_key: test_value\n' ) setup_config_file(os.path.join(self.tmpdir, 'tradebot', const.CFG_FILE), content) self.config.load() self.assertEqual('test_value', self.config.get('test_key')) @setup_and_teardown def test_get_default_config_value(self): os.environ['XDG_CONFIG_HOME'] = self.tmpdir reload(const) setup_config_file(os.path.join(self.tmpdir, 'tradebot', const.CFG_FILE), '') self.config.load() self.assertNotEqual(None, self.config.get('log')) @setup_and_teardown def test_get_config_value_override_default(self): os.environ['XDG_CONFIG_HOME'] = self.tmpdir reload(const) content = ( 'log: test_value\n' ) setup_config_file(os.path.join(self.tmpdir, 'tradebot', const.CFG_FILE), content) self.config.load() self.assertEqual('test_value', self.config.get('log')) @setup_and_teardown def test_set_config_value(self): os.environ['XDG_CONFIG_HOME'] = self.tmpdir reload(const) content = ( 'test_key: test_value\n' ) setup_config_file(os.path.join(self.tmpdir, 'tradebot', const.CFG_FILE), content) self.config.load() self.config.set(test_key='override') self.assertEqual('override', self.config.get('test_key')) @setup_and_teardown def test_environment_config_file(self): os.environ['TRADEBOT_CONFIG_FILE'] = os.path.join(self.tmpdir, 'test.yml') os.environ['XDG_CONFIG_HOME'] = self.tmpdir reload(const) wrong_content = ( 'test_key: wrong_value\n' ) right_content = ( 'test_key: right_value\n' ) setup_config_file(os.path.join(self.tmpdir, 'tradebot', const.CFG_FILE), wrong_content) setup_config_file(os.environ['TRADEBOT_CONFIG_FILE'], right_content) self.config.load() self.assertEqual('right_value', self.config.get('test_key')) @setup_and_teardown def test_commandline_args(self): os.environ['TRADEBOT_CONFIG_FILE'] = os.path.join(self.tmpdir, 'test.yml') reload(const) setup_config_file(os.environ['TRADEBOT_CONFIG_FILE'], '---') expected = {'test_key': 'test_value'} self.config = config.AppConfig(**expected) self.config.load() self.assertEqual('test_value', self.config.get('test_key')) ``` #### File: tradebot/tradebot/logging.py ```python import logging from logging import StreamHandler from logging.handlers import RotatingFileHandler import os def setup_logging(**kwargs): """Configure a root logger""" try: path = os.path.expanduser(kwargs['log']) except IndexError: raise RuntimeError('No log path provided') logging.getLogger().setLevel(logging.DEBUG) # Set root logger minimum level log_format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') log_file = RotatingFileHandler(path, maxBytes=1000000, backupCount=3) log_file.setFormatter(log_format) log_file.setLevel(log_level_to_constant(kwargs.get('loglevel'))) logging.getLogger().addHandler(log_file) log_console = StreamHandler() log_console.setFormatter(logging.Formatter('%(message)s')) log_console.setLevel(log_level_to_constant(kwargs.get('loglevel'))) logging.getLogger().addHandler(log_console) def log_level_to_constant(loglevel): """Convert human readable log level to logging constant""" return getattr(logging, loglevel) ``` #### File: tradebot/tradebot/main.py ```python import importlib import logging import pkg_resources from .api import ApiInterface from .config import AppConfig from .input import read_commandline_args from .logging import setup_logging def main(): """Module entrypoint""" args = read_commandline_args() config = AppConfig(**args) config.load() setup_logging( log=config.get('log'), loglevel=config.get('loglevel') ) log = logging.getLogger(__name__) # pylint: disable=protected-access log.debug('Configuration loaded: %s', config._config) plugins = { entry_point.name: entry_point.load() for entry_point in pkg_resources.iter_entry_points('tradebot.plugins') } plugins['dummy'] = 'tradebot.plugins.dummy' log.debug('Loaded plugins: %s', plugins) plugin = importlib.import_module(plugins.get(config.get('plugin'))) api = ApiInterface(plugin, config) print(api.get_holdings('test')) ```
{ "source": "jniehues-kit/NMTGMinor", "score": 3 }
#### File: NMTGMinor/onmt/optim.py ```python import torch.optim as optim from torch.optim.optimizer import Optimizer import torch, math def normalize_gradients(parameters, denom): """ early return if no need to normalize """ if denom == 1: return parameters = list(filter(lambda p: p.grad is not None, parameters)) denom = float(denom) for p in parameters: p.grad.data.div_(denom) def detech_nan(parameters): parameters = list(filter(lambda p: p.grad is not None, parameters)) for p in parameters: if torch.equal(p.grad.data, p.grad.data): continue else: return True return False def clip_grad_norm(parameters, max_norm, norm_type=2): r"""Clips gradient norm of an iterable of parameters. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. Arguments: parameters (Iterable[Variable]): an iterable of Variables that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). """ parameters = list(filter(lambda p: p.grad is not None, parameters)) max_norm = float(max_norm) norm_type = float(norm_type) if norm_type == float('inf'): total_norm = max(p.grad.data.abs().max() for p in parameters) else: total_norm = 0 for p in parameters: param_norm = p.grad.data.norm(norm_type) total_norm += param_norm ** norm_type total_norm = total_norm ** (1. / norm_type) if max_norm > 0: clip_coef = max_norm / (total_norm + 1e-6) if clip_coef < 1: for p in parameters: p.grad.data.mul_(clip_coef) return total_norm class Optim(object): def set_parameters(self, params): params_ = filter(lambda p: p.requires_grad, params) self.params = list(params_) # careful: params may be a generator if self.method == 'sgd': self.optimizer = optim.SGD(self.params, lr=self.lr, weight_decay=self.weight_decay, momentum=0.0) elif self.method in ['adam', 'fused_adam']: fast_adam = True try: import apex if self.amsgrad: print("Note: AMSGRAD is not compatible with Fused Adam") self.optimizer = apex.optimizers.FusedAdam(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-9, weight_decay=self.weight_decay, amsgrad=False) except RuntimeError: fast_adam = False if not fast_adam: self.optimizer = optim.Adam(self.params, lr=self.lr, betas=(self.beta1, self.beta2), eps=1e-9, weight_decay=self.weight_decay, amsgrad=self.amsgrad) else: raise RuntimeError("Invalid optim method: " + self.method) print(self.optimizer) def __init__(self, opt): self.optimizer = None self.params = None self.lr = opt.learning_rate self.model_size = opt.model_size self.max_grad_norm = opt.max_grad_norm self.update_method = opt.update_method self.method = opt.optim if 'noam' in self.update_method: self.init_lr = self.model_size ** (-0.5) * self.lr elif 'cosine' in self.update_method: print("* Using Cosine learning rate schedule") self.scheduler = None self.eta_min = 0.0 self.max_step = opt.max_step if hasattr(opt, 'max_step') else 33333 self.init_lr = self.lr # optim.lr_scheduler.CosineAnnealingLR(optimizer, # opt.max_step, eta_min=0.0) else: self.init_lr = self.lr self.lr = self.init_lr self._step = 0 if self.update_method == 'noam2': self._step = opt.warmup_steps if self.update_method == 'cosine': self.min_lr = 0.00 self.warmup_steps = opt.warmup_steps self.beta1 = opt.beta1 self.beta2 = opt.beta2 self.weight_decay = opt.weight_decay self.amsgrad = opt.amsgrad self.max_steps = opt.max_steps def step(self, grad_denom=None): "Normalize gradients by batch size" self.normalize_grad(denom=grad_denom) "Compute gradients norm." # grad_norm = clip_grad_norm(self.params, self.max_grad_norm).item() "Automatically scale learning rate over learning period" self._step += 1 if 'noam' in self.update_method or 'cosine' in self.update_method: self.updateLearningRate() self.optimizer.step() # return grad_norm """Reset the denom for normalization""" def normalize_grad(self, denom=None): if denom is None: denom = 1 normalize_gradients(self.params, denom) def updateLearningRate(self): """ Decay learning rate if val perf does not improve or we hit the start_decay_at limit. """ if self.update_method in ['noam', 'noam2']: if self._step <= self.warmup_steps: self.lr = self.init_lr * self._step * self.warmup_steps ** (-1.5) else: self.lr = self.init_lr * self._step ** (-0.5) self.optimizer.param_groups[0]['lr'] = self.lr elif self.update_method in ['cosine']: # if self.scheduler is None: # self.scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, self.max_step, # eta_min=self.eta_min) # # self.scheduler.step(self._step) self.lr = self.min_lr + 0.5 * (self.init_lr - self.min_lr) * \ (1 + math.cos((self._step / self.max_step) * math.pi)) self.optimizer.param_groups[0]['lr'] = self.lr # self.lr = self.optimizer.param_groups[0]['lr'] # self.lr = self.min_lr + (self.init_lr - self.min_lr) * \ # (1 + math.cos(math.pi * self._step / self.max_steps)) / 2 elif self.update_method in ['regular', 'basic']: " :) " self.lr = self.optimizer.param_groups[0]['lr'] self.optimizer.param_groups[0]['lr'] = self.lr def setLearningRate(self, lr): self.optimizer.param_groups[0]['lr'] = lr self.lr = lr def getLearningRate(self): return self.lr def state_dict(self): state_dict = self.optimizer.state_dict() state_dict['_step'] = self._step return state_dict def load_state_dict(self, state_dict): self._step = state_dict['_step'] state_dict.pop('_step', None) self.optimizer.load_state_dict(state_dict) def zero_grad(self): self.optimizer.zero_grad() ``` #### File: jniehues-kit/NMTGMinor/preprocess.py ```python import onmt import onmt.markdown import argparse import torch from onmt.data.indexed_dataset import IndexedDatasetBuilder import h5py as h5 import numpy as np import hashlib from collections import defaultdict parser = argparse.ArgumentParser(description='preprocess.py') onmt.markdown.add_md_help_argument(parser) # **Preprocess Options** parser.add_argument('-config', help="Read options from this file") parser.add_argument('-src_type', default="text", help="Type of the source input. Options are [text|img|audio].") parser.add_argument('-sort_type', default="ascending", help="Type of sorting. Options are [ascending|descending].") parser.add_argument('-src_img_dir', default=".", help="Location of source images") parser.add_argument('-stride', type=int, default=1, help="Stride on input features") parser.add_argument('-concat', type=int, default=1, help="Concate sequential audio features to decrease sequence length") parser.add_argument('-previous_context', type=int, default=0, help="Number of previous sentence for context") parser.add_argument('-input_type', default="word", help="Input type: word/char") parser.add_argument('-data_type', default="int64", help="Input type for storing text (int64|int32|int|int16) to reduce memory load") parser.add_argument('-format', default="raw", help="Save data format: binary or raw. Binary should be used to load faster") parser.add_argument('-train_src', required=True, help="Path to the training source data") parser.add_argument('-train_tgt', required=True, help="Path to the training target data") parser.add_argument('-valid_src', required=True, help="Path to the validation source data") parser.add_argument('-valid_tgt', required=True, help="Path to the validation target data") parser.add_argument('-train_src_lang', default="src", help="Language(s) of the source sequences.") parser.add_argument('-train_tgt_lang', default="tgt", help="Language(s) of the target sequences.") parser.add_argument('-valid_src_lang', default="src", help="Language(s) of the source sequences.") parser.add_argument('-valid_tgt_lang', default="tgt", help="Language(s) of the target sequences.") parser.add_argument('-save_data', required=True, help="Output file for the prepared data") parser.add_argument('-src_vocab_size', type=int, default=9999999, help="Size of the source vocabulary") parser.add_argument('-tgt_vocab_size', type=int, default=9999999, help="Size of the target vocabulary") parser.add_argument('-src_vocab', help="Path to an existing source vocabulary") parser.add_argument('-tgt_vocab', help="Path to an existing target vocabulary") parser.add_argument('-src_seq_length', type=int, default=64, help="Maximum source sequence length") parser.add_argument('-src_seq_length_trunc', type=int, default=0, help="Truncate source sequence length.") parser.add_argument('-tgt_seq_length', type=int, default=66, help="Maximum target sequence length to keep.") parser.add_argument('-tgt_seq_length_trunc', type=int, default=0, help="Truncate target sequence length.") parser.add_argument('-shuffle', type=int, default=1, help="Shuffle data") parser.add_argument('-asr', action='store_true', help="prepare data for asr task") parser.add_argument('-asr_format', default="h5", help="Format of asr data h5 or scp") parser.add_argument('-asr_hashing', action='store_true', help="hash the audio features so that joint training data ") parser.add_argument('-lm', action='store_true', help="prepare data for LM task") parser.add_argument('-fp16', action='store_true', help="store ASR data in fp16") parser.add_argument('-seed', type=int, default=3435, help="Random seed") parser.add_argument('-lower', action='store_true', help='lowercase data') parser.add_argument('-load_bpe_voc', action='store_true', help='load voc from bpe format') parser.add_argument('-no_bos', action='store_true', help='not adding bos word (this is done manually in the data)') parser.add_argument('-sort_by_target', action='store_true', help='lowercase data') parser.add_argument('-join_vocab', action='store_true', help='Using one dictionary for both source and target') parser.add_argument('-report_every', type=int, default=100000, help="Report status every this many sentences") parser.add_argument('-reshape_speech', type=int, default=1, help="Reshaping the speech segments here. Mostly for compatibility..") opt = parser.parse_args() torch.manual_seed(opt.seed) def make_vocab(filenames, size, tokenizer): vocab = onmt.Dict([onmt.constants.PAD_WORD, onmt.constants.UNK_WORD, onmt.constants.BOS_WORD, onmt.constants.EOS_WORD], lower=opt.lower) for filename in filenames: print("Reading file %s ... " % filename) with open(filename) as f: for sent in f.readlines(): tokens = tokenizer.tokenize(sent) for token in tokens: vocab.add(token) original_size = vocab.size() vocab = vocab.prune(size) print('Created dictionary of size %d (pruned from %d)' % (vocab.size(), original_size)) return vocab def init_vocab(name, data_files, vocab_file, vocab_size, tokenizer, join=False): vocab = None if vocab_file is not None: # If given, load existing word dictionary. print('Reading ' + name + ' vocabulary from \'' + vocab_file + '\'...') if not opt.load_bpe_voc: vocab = onmt.Dict() else: vocab = onmt.Dict([onmt.constants.PAD_WORD, onmt.constants.UNK_WORD, onmt.constants.BOS_WORD, onmt.constants.EOS_WORD], lower=opt.lower) vocab.loadFile(vocab_file) print('Loaded ' + str(vocab.size()) + ' ' + name + ' words') if vocab is None: print('Building ' + name + ' vocabulary...') gen_word_vocab = make_vocab(data_files, vocab_size, tokenizer) vocab = gen_word_vocab print() return vocab def save_vocabulary(name, vocab, file): print('Saving ' + name + ' vocabulary to \'' + file + '\'...') vocab.writeFile(file) def make_lm_data(tgt_file, tgt_dicts, tokenizer, max_tgt_length=1000, input_type='word', data_type='int32'): tgt = [] sizes = [] count, ignored = 0, 0 print('Processing %s ...' % (tgt_file)) tgtf = open(tgt_file) # eos = torch.LongTensor(1).fill_(onmt.constants.EOS) # # tensors = [eos] tensors = list() # find the number of words in the sentence while True: tline = tgtf.readline() # normal end of file if tline == "": break tline = tline.strip() # source and/or target are empty if tline == "": print('WARNING: ignoring an empty line (' + str(count + 1) + ')') continue tgt_words = tokenizer.tokenize(tline) # only uses EOS for language model tensor = tgt_dicts.convertToIdx(tgt_words, onmt.constants.UNK_WORD, None, onmt.constants.EOS_WORD, type=data_type) tensors.append(tensor) count = count + 1 if count % opt.report_every == 0: print('... %d sentences prepared' % count) tgtf.close() return tensors def make_translation_data(src_file, tgt_file, src_dicts, tgt_dicts, tokenizer, max_src_length=256, max_tgt_length=256, add_bos=True, data_type='int64'): """ :param src_file: source text file (to be read) :param tgt_file: target text file (to be read) :param src_dicts: source vocabulary :param tgt_dicts: target vocabulary :param max_src_length: filter sentences longer than this :param max_tgt_length: filter sentences longer than this :param add_bos: add <bos> to the target part :param tokenizer: tokenizer to tokenize sentence :param data_type: data type for storage :return: """ src, tgt = [], [] src_sizes = [] tgt_sizes = [] count, ignored = 0, 0 print('Processing %s & %s ...' % (src_file, tgt_file)) srcf = open(src_file) tgtf = open(tgt_file) while True: sline = srcf.readline() tline = tgtf.readline() # normal end of file if sline == "" and tline == "": break # source or target does not have same number of lines if sline == "" or tline == "": print('WARNING: src and tgt do not have the same # of sentences') break sline = sline.strip() tline = tline.strip() # source and/or target are empty if sline == "" or tline == "": print('WARNING: ignoring an empty line (' + str(count + 1) + ')') continue # TODO: source tokenizer != target tokenizer (unlikely in practice) src_words = tokenizer.tokenize(sline) tgt_words = tokenizer.tokenize(tline) if len(src_words) <= max_src_length \ and len(tgt_words) <= max_tgt_length - 2: # Check truncation condition. if opt.src_seq_length_trunc != 0: src_words = src_words[:opt.src_seq_length_trunc] if opt.tgt_seq_length_trunc != 0: tgt_words = tgt_words[:opt.tgt_seq_length_trunc] # For src text, we use BOS for possible reconstruction src += [src_dicts.convertToIdx(src_words, onmt.constants.UNK_WORD)] if add_bos: tgt += [tgt_dicts.convertToIdx(tgt_words, onmt.constants.UNK_WORD, onmt.constants.BOS_WORD, onmt.constants.EOS_WORD, type=data_type)] else: tgt += [tgt_dicts.convertToIdx(tgt_words, onmt.constants.UNK_WORD, None, onmt.constants.EOS_WORD, type=data_type)] src_sizes += [len(src_words)] tgt_sizes += [len(tgt_words)] else: ignored += 1 count += 1 if count % opt.report_every == 0: print('... %d sentences prepared' % count) srcf.close() tgtf.close() # don't sort anymore # if opt.shuffle == 1: # print('... shuffling sentences') # perm = torch.randperm(len(src)) # src = [src[idx] for idx in perm] # tgt = [tgt[idx] for idx in perm] # src_sizes = [src_sizes[idx] for idx in perm] # tgt_sizes = [tgt_sizes[idx] for idx in perm] # # print('... sorting sentences by size') # # z = zip(src, tgt, src_sizes, tgt_sizes) # # # ultimately sort by target size # sorted_z = sorted(sorted(z, key=lambda x: x[2]), key=lambda x: x[3]) # # src = [z_[0] for z_ in sorted_z] # tgt = [z_[1] for z_ in sorted_z] print(('Prepared %d sentences ' + '(%d ignored due to length == 0 or src len > %d or tgt len > %d)') % (len(src), ignored, max_src_length, max_tgt_length)) return src, tgt def make_asr_data(src_file, tgt_file, tgt_dicts, max_src_length=64, max_tgt_length=64, input_type='word', stride=1, concat=1, prev_context=0, fp16=False, reshape=True, asr_format="h5", asr_hashing=False): src, tgt = [], [] # sizes = [] src_sizes = [] tgt_sizes = [] count, ignored = 0, 0 n_unk_words = 0 print('Processing %s & %s ...' % (src_file, tgt_file)) if asr_format == "h5": file_idx = -1 if src_file[-2:] == "h5": srcf = h5.File(src_file, 'r') else: file_idx = 0 srcf = h5.File(src_file + "." + str(file_idx) + ".h5", 'r') elif asr_format in ["scp", "kaldi"]: import kaldiio from kaldiio import ReadHelper audio_data = iter(ReadHelper('scp:' + src_file)) tgtf = open(tgt_file) index = 0 s_prev_context = [] t_prev_context = [] while True: tline = tgtf.readline() # normal end of file if tline == "": break if asr_format == "h5": if str(index) in srcf: feature_vectors = np.array(srcf[str(index)]) elif file_idx != -1: srcf.close() file_idx += 1 srcf = h5.File(src_file + "." + str(file_idx) + ".h5", 'r') feature_vectors = np.array(srcf[str(index)]) else: print("No feature vector for index:", index, file=sys.stderr) exit(-1) elif asr_format in ["scp", "kaldi"]: _, feature_vectors = next(audio_data) else: raise NotImplementedError if asr_hashing: audio_hash = hashlib.sha1(feature_vectors).hexdigest() if stride == 1: sline = torch.from_numpy(feature_vectors) else: sline = torch.from_numpy(feature_vectors[0::opt.stride]) # handling reshaping the file if reshape: if concat != 1: # the number of frames added to make the length divisible by 4 add = (concat - sline.size()[0] % concat) % concat # create the additional zero frames z = torch.FloatTensor(add, sline.size()[1]).zero_() # added to the original tensor (sline) sline = torch.cat((sline, z), 0) # reshape: every $concat frames into one sline = sline.reshape((int(sline.size()[0] / concat), sline.size()[1] * concat)) index += 1 tline = tline.strip() if prev_context > 0: print("Multiple ASR context isn't supported") raise NotImplementedError # s_prev_context.append(sline) # t_prev_context.append(tline) # for i in range(1,prev_context+1): # if i < len(s_prev_context): # sline = torch.cat((torch.cat((s_prev_context[-i-1],torch.zeros(1,sline.size()[1]))),sline)) # tline = t_prev_context[-i-1]+" # "+tline # if len(s_prev_context) > prev_context: # s_prev_context = s_prev_context[-1*prev_context:] # t_prev_context = t_prev_context[-1*prev_context:] # source and/or target are empty if tline == "": print('WARNING: ignoring an empty line (' + str(count + 1) + ')') continue if input_type == 'word': tgt_words = tline.split() elif input_type == 'char': tgt_words = split_line_by_char(tline) if len(tgt_words) <= max_tgt_length - 2 and sline.size(0) <= max_src_length: # Check truncation condition. if opt.tgt_seq_length_trunc != 0: tgt_words = tgt_words[:opt.tgt_seq_length_trunc] if fp16: sline = sline.half() src += [sline] tgt_tensor = tgt_dicts.convertToIdx(tgt_words, onmt.constants.UNK_WORD, onmt.constants.BOS_WORD, onmt.constants.EOS_WORD) tgt += [tgt_tensor] src_sizes += [len(sline)] tgt_sizes += [len(tgt_words)] unks = tgt_tensor.eq(onmt.constants.UNK).sum().item() n_unk_words += unks # if unks > 0: # if "<unk>" not in tline: # print("DEBUGGING: This line contains UNK: %s" % tline) else: ignored += 1 count += 1 if count % opt.report_every == 0: print('... %d sentences prepared' % count) if asr_format == "h5": srcf.close() tgtf.close() print('Total number of unk words: %d' % n_unk_words) # if opt.shuffle == 1: # print('... shuffling sentences') # perm = torch.randperm(len(src)) # src = [src[idx] for idx in perm] # tgt = [tgt[idx] for idx in perm] # src_sizes = [src_sizes[idx] for idx in perm] # tgt_sizes = [tgt_sizes[idx] for idx in perm] # # print('... sorting sentences by size') # # z = zip(src, tgt, src_sizes, tgt_sizes) # # # ultimately sort by source size # sorted_z = sorted(sorted(z, key=lambda x: x[3]), key=lambda x: x[2]) # # src = [z_[0] for z_ in sorted_z] # tgt = [z_[1] for z_ in sorted_z] print(('Prepared %d sentences ' + '(%d ignored due to length == 0 or src len > %d or tgt len > %d)') % (len(src), ignored, max_src_length, max_tgt_length)) if asr_hashing: return audio_data, src, tgt else: return src, tgt def main(): dicts = {} tokenizer = onmt.Tokenizer(opt.input_type, opt.lower) # construct set of languages from the training languages src_langs = opt.train_src_lang.split("|") tgt_langs = opt.train_tgt_lang.split("|") langs = (src_langs + tgt_langs) langs = list(set(langs)) dicts['langs'] = dict() for lang in langs: idx = len(dicts['langs']) dicts['langs'][lang] = idx print(dicts['langs']) src_train_files = opt.train_src.split("|") tgt_train_files = opt.train_tgt.split("|") # for ASR and LM we only need to build vocab for the 'target' language if opt.asr or opt.lm: dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab, opt.tgt_vocab_size, tokenizer) elif opt.join_vocab: dicts['src'] = init_vocab('source', set(src_train_files + tgt_train_files), opt.src_vocab, opt.tgt_vocab_size, tokenizer) dicts['tgt'] = dicts['src'] # Translation model else: dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab, opt.src_vocab_size, tokenizer) dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab, opt.tgt_vocab_size, tokenizer) if opt.lm: print('Preparing training language model ...') train = dict() train['tgt'] = make_lm_data(opt.train_tgt, dicts['tgt'], tokenizer) train['src'] = None valid = dict() valid['tgt'] = make_lm_data(opt.valid_tgt, dicts['tgt'], tokenizer) valid['src'] = None elif opt.asr: print('Preparing training acoustic model ...') src_input_files = opt.train_src.split("|") tgt_input_files = opt.train_tgt.split("|") src_langs = opt.train_src_lang.split("|") tgt_langs = opt.train_tgt_lang.split("|") assert len(src_input_files) == len(src_langs) assert len(src_input_files) == len(tgt_input_files) assert len(tgt_input_files) == len(tgt_langs) n_input_files = len(src_input_files) train = dict() train['src'], train['tgt'] = list(), list() train['src_lang'], train['tgt_lang'] = list(), list() for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs): src_data, tgt_data = make_asr_data(src_file, tgt_file, dicts['tgt'], max_src_length=opt.src_seq_length, max_tgt_length=opt.tgt_seq_length, input_type=opt.input_type, stride=opt.stride, concat=opt.concat, prev_context=opt.previous_context, fp16=opt.fp16, reshape=(opt.reshape_speech == 1), asr_format=opt.asr_format, asr_hashing=opt.asr_hashing) n_samples = len(src_data) if n_input_files == 1: # For single-file cases we only need to have 1 language per file # which will be broadcasted src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])] tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])] else: # each sample will have a different language id src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)] tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)] train['src'] += src_data train['tgt'] += tgt_data train['src_lang'] += src_lang_data train['tgt_lang'] += tgt_lang_data # train = dict() # train['src'], train['tgt'] = print('Preparing validation ...') src_input_files = opt.valid_src.split("|") tgt_input_files = opt.valid_tgt.split("|") src_langs = opt.valid_src_lang.split("|") tgt_langs = opt.valid_tgt_lang.split("|") assert len(src_input_files) == len(src_langs) assert len(src_input_files) == len(tgt_input_files) assert len(tgt_input_files) == len(tgt_langs) n_input_files = len(src_input_files) valid = dict() valid['src'], valid['tgt'] = list(), list() valid['src_lang'], valid['tgt_lang'] = list(), list() for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs): src_data, tgt_data = make_asr_data(src_file, tgt_file, dicts['tgt'], max_src_length=max(1024, opt.src_seq_length), max_tgt_length=max(1024, opt.tgt_seq_length), input_type=opt.input_type, stride=opt.stride, concat=opt.concat, prev_context=opt.previous_context, fp16=opt.fp16, reshape=(opt.reshape_speech == 1), asr_format=opt.asr_format) n_samples = len(src_data) if n_input_files == 1: # For single-file cases we only need to have 1 language per file # which will be broadcasted src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])] tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])] else: # each sample will have a different language id src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)] tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)] valid['src'] += src_data valid['tgt'] += tgt_data valid['src_lang'] += src_lang_data valid['tgt_lang'] += tgt_lang_data else: print('Preparing training translation model...') src_input_files = opt.train_src.split("|") tgt_input_files = opt.train_tgt.split("|") src_langs = opt.train_src_lang.split("|") tgt_langs = opt.train_tgt_lang.split("|") assert len(src_input_files) == len(src_langs) assert len(src_input_files) == len(tgt_input_files) assert len(tgt_input_files) == len(tgt_langs) n_input_files = len(src_input_files) train = dict() train['src'], train['tgt'] = list(), list() train['src_lang'], train['tgt_lang'] = list(), list() for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs): src_data, tgt_data = make_translation_data(src_file, tgt_file, dicts['src'], dicts['tgt'], tokenizer, max_src_length=opt.src_seq_length, max_tgt_length=opt.tgt_seq_length, add_bos=(not opt.no_bos), data_type=opt.data_type) n_samples = len(src_data) if n_input_files == 1: # For single-file cases we only need to have 1 language per file # which will be broadcasted src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])] tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])] else: # each sample will have a different language id src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)] tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)] train['src'] += src_data train['tgt'] += tgt_data train['src_lang'] += src_lang_data train['tgt_lang'] += tgt_lang_data print('Preparing validation ...') src_input_files = opt.valid_src.split("|") tgt_input_files = opt.valid_tgt.split("|") src_langs = opt.valid_src_lang.split("|") tgt_langs = opt.valid_tgt_lang.split("|") assert len(src_input_files) == len(src_langs) assert len(src_input_files) == len(tgt_input_files) assert len(tgt_input_files) == len(tgt_langs) n_input_files = len(src_input_files) valid = dict() valid['src'], valid['tgt'] = list(), list() valid['src_lang'], valid['tgt_lang'] = list(), list() for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs): src_data, tgt_data = make_translation_data(src_file, tgt_file, dicts['src'], dicts['tgt'], tokenizer, max_src_length=max(1024, opt.src_seq_length), max_tgt_length=max(1024, opt.tgt_seq_length), add_bos=(not opt.no_bos), data_type=opt.data_type) n_samples = len(src_data) if n_input_files == 1: # For single-file cases we only need to have 1 language per file # which will be broadcasted src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])] tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])] else: # each sample will have a different language id src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)] tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)] valid['src'] += src_data valid['tgt'] += tgt_data valid['src_lang'] += src_lang_data valid['tgt_lang'] += tgt_lang_data if opt.src_vocab is None and opt.asr == False and opt.lm == False: save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict') if opt.tgt_vocab is None: save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict') if opt.format in ['raw', 'bin']: # save dicts in this format torch.save(dicts, opt.save_data + '.dict.pt') print('Saving data to \'' + opt.save_data + '.train.pt\'...') save_data = {'dicts': dicts, 'type': opt.src_type, 'train': train, 'valid': valid} torch.save(save_data, opt.save_data + '.train.pt') print("Done") elif opt.format in ['mmap', 'mmem']: train = defaultdict(lambda: None, train) valid = defaultdict(lambda: None, valid) print('Saving data to memory indexed data files') from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder if opt.asr: print("ASR data format isn't compatible with memory indexed format") raise AssertionError # save dicts in this format torch.save(dicts, opt.save_data + '.dict.pt') # binarize the training set first for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang']: if train[set_] is None: continue if opt.data_type == 'int64': dtype = np.int64 else: dtype = np.int32 if set_ == 'src' and opt.asr: if opt.fp16: dtype = np.float16 else: dtype = np.float32 train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype) # add item from training data to the indexed data for tensor in train[set_]: train_data.add_item(tensor) train_data.finalize(opt.save_data + ".train.%s.idx" % set_) del train_data if valid[set_] is None: continue if set_ == 'src' and opt.asr: dtype = np.double valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype) # add item from training data to the indexed data for tensor in valid[set_]: valid_data.add_item(tensor) valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_) del valid_data else: raise NotImplementedError if __name__ == "__main__": main() ```
{ "source": "jniehues-kit/sacrebleu", "score": 3 }
#### File: sacrebleu/sacrebleu/resegment.py ```python from __future__ import print_function import sys import numpy as np from tokenizers import TOKENIZERS, DEFAULT_TOKENIZER class Resegment(): """A class for resegmenting SLT to fit reference sentence segmentation""" def __init__(self,tokenize): self.tokenizer = TOKENIZERS[tokenize](); def align(self,ref,hyp): """First align tokenized hypothesis to tokenized reference Then align tokenized hypothesis to original hypothesis """ refLines = [self.tokenizer(l.strip().lower()) for l in ref] hypLines = [l.strip() for l in hyp] hypLinesTok = [self.tokenizer(l.lower()).strip() for l in hyp] hypLinesTok = self.calcAlignment(refLines,hypLinesTok,500,2,True); hypLines = self.calcAlignment(hypLinesTok,hypLines,250,2,False); return hypLines def calcAlignment(self,refLines,hypLines,beam,replaceCost,words): """Calculate alignment Then align remaing word to segments """ refString = " ".join(refLines) hypString = " ".join(hypLines) if(words): refWords = refString.split() hypWords = hypString.split() else: refWords = refString hypWords = hypString #Matrix stores estimate alignment points based on length ratio to perform beam search matrix = np.zeros((len(refWords)+1), dtype=np.int); i=0 while(i < len(refWords)): matrix[i]=int(1.0*i/len(refWords)*len(hypWords)) i+=1 matrix[len(refWords)] = matrix[len(refWords)-1] #Calcuate alignment via minimal edit distance operations = self.matches(refWords,hypWords,matrix,beam,replaceCost); #iterate through text an align the non-matched words op=0 length=0 hyp_length=0 result = [] for i in range(len(refLines)): if(words): length += len(refLines[i].split()) else: #plus space between lines length += len(refLines[i])+1 #print ("Ref:",refLines[i]) #print ("Links:",end=" ") #find last matching block in line while(op+1 < len(operations) and operations[op+1][0] < length): #print (operations[op+1][0],refWords[operations[op+1][0]],end=" ") op += 1; #print ("") #match is across reference regment boundaries if (operations[op][0] + operations[op][2] >=length): #take split at the appropriate point matchingSequence = length - operations[op][0] end = operations[op][1] + matchingSequence else: #start of nonmatching start_ref = operations[op][0] + operations[op][2] start_hyp = operations[op][1] + operations[op][2] if(op +1 < len(operations)): next_ref = operations[op+1][0] next_hyp = operations[op+1][1] else: next_ref = len(refWords) next_hyp = len(hypWords) #take same ratio in source and target of non-matches ratio = 1.0*(length - start_ref) / (next_ref - start_ref) end = int(ratio * (next_hyp - start_hyp))+start_hyp #only split on spaces if(not words): p1 = hypWords.find(" ",end) p2 = hypWords.rfind(" ",hyp_length,end) diff1 = p1-end diff2 = end-p2 minDiff = 0; if(p1 == -1): if(p2 != -1): end = p2+1 else: if(p2 == -1 or diff1 < diff2): end = p1+1 else: end = p2+1 if (words): result.append(" ".join(hypWords[hyp_length:end])) #print("Hype:"," ".join(hypWords[hyp_length:end])) else: result.append(hypWords[hyp_length:end]) hyp_length = end assert(len(result) == len(refLines)); return result; def matches(self,s1,s2,anchor,beam=100,replaceCost=2): l1=len(s1) l2=len(s2) #store matrix for optimal path matrix = np.zeros((l1+1,2*beam+1), dtype=np.int) #store backpointers backx=np.zeros((l1+1,2*beam+1), dtype=np.int) backy=np.zeros((l1+1,2*beam+1), dtype=np.int) hits=np.zeros((l1+1), dtype=np.int) hits_sum=np.zeros((l1+1), dtype=np.int) for i in range(l1+1): # if(i <= 5): # anchor[i] = 1.0*i/l1*l2 # else: # sum = 0; # for j in range(1,6): # sum += hits[i-j]+j/l1*l2 # anchor[i] = int(sum/5) for j in range(2*beam+1): if j == 0: y=anchor[i]-beam+j matrix[i][j] = i+y backx[i][j]=i-1 backy[i][j]=0 elif i == 0: y=anchor[i]-beam+j matrix[i][j] = y backx[i][j]=0 backy[i][j]=j-1 else: y=anchor[i]-beam+j #anchor of previous position might be different prevJ=y-anchor[i-1]+beam #step to the right matrix[i][j] = matrix[i][j-1] + 1 backx[i][j]=i backy[i][j]=j-1 #replacement or match if(prevJ > 0 and prevJ < 2*beam+1): jump = matrix[i-1][prevJ-1] + replaceCost if y > 0 and y <= l2 and s1[i-1].lower() == s2[y-1].lower(): #print("Match",i-1,y-1) jump = matrix[i-1][prevJ-1] hits_sum[i] +=1; hits[i] += y; if(jump < matrix[i][j]): matrix[i][j] = jump backx[i][j]=i-1 backy[i][j]=prevJ-1 #step down if(prevJ >= 0 and prevJ < 2*beam+1 and matrix[i-1][prevJ] + 1 < matrix[i][j]): matrix[i][j] = matrix[i-1][prevJ] + 1 backx[i][j]=i-1 backy[i][j]=prevJ if(hits_sum[i] > 0): hits[i] /= hits_sum[i] elif(i == 0): hits[i] = 0 else: hits[i] = hits[i-1]+l2/l1 matches = [] i=l1 j=2*beam #output matches while(i >0 or j > 0): y=anchor[i]-beam+j prevJ=y-anchor[i-1]+beam if(y > 0 and y <= l2 and backx[i][j] == i-1 and backy[i][j] == prevJ-1 and s1[i-1].lower() == s2[y-1].lower()): matches.append((i-1,y-1,1)) ii=backx[i][j] jj=backy[i][j] i=ii j=jj matches.reverse() matches.append((l1,l2,1)) return matches ```
{ "source": "jniemenmaa/cyris", "score": 3 }
#### File: instantiation/ruleset_modification/append_ruleset.py ```python import sys RULESET_FILE = sys.argv[1] IPCONFIGS_TEMP = sys.argv[2] class AppendRuleset(): def readRuleset(self): list_rules = [] f = open(RULESET_FILE, "r") if not f: print("Cannot open firewall ruleset file '{}' => abort ruleset append".format(RULESET_FILE)); return None for line in f: list_rules.append(line) return list_rules def appendRuleset(self): list_rules = self.readRuleset() if list_rules: with open(IPCONFIGS_TEMP, "a") as f: for rule in list_rules: f.write(rule) f.write("COMMIT\n") appendRuleset = AppendRuleset() appendRuleset.appendRuleset() ``` #### File: cyris/main/aws_image.py ```python def create_img(client, ins_id, name, des='New image created from the previous_instance(BaseVM)'): response = client.create_image( Description=des, InstanceId=ins_id, Name=name, #NoReboot=True|False ) img_id = response['ImageId'] return img_id # Get AWS image description def describe_image(client, img_id): response = client.describe_images( ImageIds=[ img_id, ] ) state = response['Images'][0]['State'] return state ``` #### File: cyris/main/aws_instances.py ```python def create_instances(client, gNames, basevm_id, numOfIns,basevm_os_type): # AMI IDs for various instance types: # - Amazon Linux 2 AMI (HVM), SSD Volume Type - ami-0323c3dd2da7fb37d # - Amazon Linux AMI 2018.03.0 (HVM), SSD Volume Type - ami-01d025118d8e760db # - Red Hat Enterprise Linux 8 (HVM), SSD Volume Type - ami-098f16afa9edf40be # - Ubuntu Server 16.04 LTS (HVM), SSD Volume Type - ami-039a49e70ea773ffc # - Ubuntu Server 18.04 LTS (HVM), SSD Volume Type - ami-085925f297f89fce1 # - Ubuntu Server 20.04 LTS (HVM), SSD Volume Type - ami-068663a3c619dd892 # - Microsoft Windows Server 2019 Base - ami-04a0ee204b44cc91a dic = {'amazon_linux':'ami-01d025118d8e760db', 'amazon_linux2':'ami-0323c3dd2da7fb37d', 'red_hat':'ami-098f16afa9edf40be', 'ubuntu_16':'ami-039a49e70ea773ffc', 'ubuntu_18':'ami-085925f297f89fce1', 'ubuntu_20':'ami-068663a3c619dd892', 'windows':'ami-04a0ee204b44cc91a'} if basevm_os_type not in dic.keys(): print('error ami') quit(-1) else: img_id = dic[basevm_os_type] # gNames: list, eg: ['aa','bb'] # tags: list[dict], eg: [{}] # numOfIns: int response = client.run_instances( BlockDeviceMappings=[ { 'DeviceName': '/dev/xvda', 'VirtualName': 'Desktop', 'Ebs': { 'DeleteOnTermination': True, 'VolumeSize': 8, 'VolumeType': 'gp2' }, }, ], ImageId=img_id, InstanceType='t2.micro', KeyName='TESTKEY', MaxCount= numOfIns, MinCount=1, Monitoring={ 'Enabled':True }, SecurityGroups=gNames, TagSpecifications=[ { 'ResourceType': 'instance', 'Tags': [ { 'Key': 'Name', 'Value': basevm_id } ] } ] ) n = len(response['Instances']) if n == numOfIns: print('* INFO: cyris_aws: %s instance(s) created.'%(n)) elif n < numOfIns: print('* ERROR: cyris_aws: Limit was exceeded => only %s instance(s) created.'%(n)) else: print('* ERROR: cyris_aws: Instance creation failed.') ins_ids = [] for x in response['Instances']: ins_ids.append(x['InstanceId']) return ins_ids # Check status of AWS instances def describe_instance_status(client, ins_ids): response = client.describe_instance_status( InstanceIds=ins_ids, IncludeAllInstances=True ) status = response['InstanceStatuses'][0]['InstanceState']['Name'] return status # Get public IP addresses of AWS instances def publicIp_get(client,ins_ids): response = client.describe_instances(InstanceIds=ins_ids) return response['Reservations'][0]['Instances'][0]['PublicIpAddress'] # Stop AWS instances # - input: ins_ids: list, the id of the instances to be stoped # - output: status: dictionary, the id and the status def stop_instances(client,ins_ids): response = client.stop_instances( InstanceIds=ins_ids ) return None # Clone AWS instances def clone_instances(client, gNames, key_name, cloned_name, numOfIns, img_id): response = client.run_instances( BlockDeviceMappings=[ { 'DeviceName': '/dev/xvda', 'VirtualName': 'Desktop', 'Ebs': { 'DeleteOnTermination': True, 'VolumeSize': 8, 'VolumeType': 'gp2' }, }, ], ImageId=img_id, InstanceType='t2.micro', KeyName=key_name, MaxCount= numOfIns, MinCount=1, Monitoring={ 'Enabled':True }, SecurityGroups=gNames, TagSpecifications= [ { 'ResourceType': 'instance', 'Tags':[{ 'Key': 'Name', 'Value': cloned_name }] } ] ) n = len(response['Instances']) ins_ids = [] for i in range(n): ins_id = response['Instances'][i]['InstanceId'] ins_ids.append(ins_id) return ins_ids ''' import boto3 import time def main(): client = boto3.client('ec2', region_name='us-east-1') gNames = ['cr01-sg'] tags = [ { 'ResourceType': 'instance', 'Tags': [ { 'Key': 'Name', 'Value': 'test3' } ] } ] numOfIns = 1 ins_ids = create_instances(client, gNames, tags, numOfIns) # check the state whether is running print('Check the status:') for i in range(10): res = describe_instance_status(client,ins_ids) print(res) if res[ins_ids[0]] == 'running': break time.sleep(10) # stop the instance print('Stop the instance:') res = stop_instances(client,ins_ids) print(res) main() ''' ``` #### File: cyris/main/aws_sg.py ```python def create_security_group(client,gName): # input: gName: string # output: groupId = client.create_security_group( Description='SG for SSH-access', GroupName=gName ) return groupId ''' # example import boto3 client = boto3.client('ec2', region_name='us-east-1') gName = 'cr01-sg' s = create_security_group(client, gName) print(s) ''' # Authorize the security group for an AWS client ingress def edit_ingress(client, gName): response = client.authorize_security_group_ingress( CidrIp='0.0.0.0/0', FromPort=22, GroupName=gName, IpProtocol='tcp', ToPort=22 ) return response ''' # example: import boto3 client = boto3.client('ec2', region_name='us-east-1') gName = 'cr01-sg' s = edit_ingress(client, gName) print(s) ''' # Describe the security groups for an AWS client def describe_security_groups(client, gNames): response = client.describe_security_groups( GroupNames=gNames ) # GroupID = [] # GroupID.append(response['SecurityGroups'][0]['GroupId']) return response ''' # example: import boto3 gNames = ['cr01-sg'] client = boto3.client('ec2', region_name='us-east-1') r = describe_security_groups(client, gNames) print(r) ''' ``` #### File: cyris/main/check_description.py ```python import yaml import os import re from storyboard import Storyboard # List of range ids that are forbidden to use: # * 127 => overlap with loopback address if used for cyber range: 127.1.1.2 etc. FORBIDDEN_ID_LIST = {127} FLAG = True DEBUG = False def raise_flag(error): print "* ERROR: check_description:", error global FLAG if FLAG == True: FLAG = False else: pass def get_existing_cr_id_list(cr_dir): cr_id_list = [] # Check that cyber range directory exists if os.path.isdir(cr_dir): # Loop for all the sub-directories inside it for cr_id in os.listdir(cr_dir): # Check whether the sub-directory name is made only of digits if cr_id.isdigit(): # Add the sub-directory name to the cyber range list as integer cr_id_list.append(int(cr_id)) return cr_id_list # Determine the set of networks that appear in the forwarding rules, both as src and dst def get_network_set(fw_rules): src_set = set() dst_set = set() for rule in fw_rules: elements = rule.strip().split(" ") for e in elements: # Add src networks to set (comma separated values allowed, but no space) if "src" in e: src_nw_string = e.split("=")[1] src_set = set().union(src_set, src_nw_string.split(",")) # Add dst networks to set (remove guest suffix that may follow) if "dst" in e: dst_nw_string = e.split("=")[1] dst_set = set().union(dst_set, [dst_nw_string.split(".")[0]]) # Create final set as union of src and dst networks nw_set = set().union(src_set, dst_set) return nw_set def check_description(filename, cr_dir): try: with open(filename, "r") as f: doc = yaml.load(f) except IOError, e: raise_flag(e) return FLAG except yaml.YAMLError, e: raise_flag(e) return FLAG # For each playbook in the training description. host_section = doc[0] guest_section = doc[1] clone_section = doc[2] for element in doc: if type(element) is dict: if Storyboard.HOST_SETTINGS in element.keys(): host_section = element elif Storyboard.GUEST_SETTINGS in element.keys(): guest_section = element elif Storyboard.CLONE_SETTINGS in element.keys(): clone_section = element else: raise_flag("Unknown section in description file: {0}".format(element)) else: raise_flag("Unknown element in description file: {0}".format(element)) # Store all guest and host ids that were defined in the # host_settings section, as well as forwarding rules defined_guest_ids = [] defined_host_ids = [] defined_forwarding_rules = [] ########################################################################### # Check the HOST_SETTINGS section if Storyboard.HOST_SETTINGS not in host_section.keys(): raise_flag("Section '{0}' is missing.".format(Storyboard.HOST_SETTINGS)) else: for host in host_section[Storyboard.HOST_SETTINGS]: host_id = Storyboard.NOT_AVAIL host_keys = host.keys() # ID tag if Storyboard.ID not in host_keys: raise_flag("Tag '{0}' is missing for one of the hosts in section '{1}'.".format(Storyboard.ID, Storyboard.HOST_SETTINGS)) else: host_id = host[Storyboard.ID] if not host_id in defined_host_ids: defined_host_ids.append(host_id) else: raise_flag("Host with id '{0}' is duplicated in section '{1}'.".format(host_id, Storyboard.HOST_SETTINGS)) host_keys.remove(Storyboard.ID) # MGMT_ADDR tag if Storyboard.MGMT_ADDR not in host_keys: raise_flag("Tag '{0}' is missing for host '{1}' in section '{2}' section.".format(Storyboard.MGMT_ADDR, host_id, Storyboard.HOST_SETTINGS)) else: host_keys.remove(Storyboard.MGMT_ADDR) # VIRBR_ADDR tag if Storyboard.VIRBR_ADDR not in host_keys: # Only raise flag if the host has KVM guests defined, so we need to check this first kvm_guest_defined = False for guest in guest_section.get(Storyboard.GUEST_SETTINGS): if guest_section.get(Storyboard.BASEVM_HOST) == host_id and guest.get(Storyboard.BASEVM_TYPE) == "kvm": kvm_guest_defined = True break # Raise flag if necessary if kvm_guest_defined: raise_flag("Tag '{0}' is missing for KVM host '{1}' in section '{2}'.".format(Storyboard.VIRBR_ADDR, host_id, Storyboard.HOST_SETTINGS)) else: host_keys.remove(Storyboard.VIRBR_ADDR) # ACCOUNT tag if Storyboard.ACCOUNT not in host_keys: raise_flag("Tag '{0}' is missing for host '{1}' in section '{2}.".format(Storyboard.ACCOUNT, host_id, Storyboard.HOST_SETTINGS)) else: host_keys.remove(Storyboard.ACCOUNT) # Check whether there are any (unknown) tags left in the list if host_keys: raise_flag("Unknown tag(s) for host '{0}' in section '{1}': {2}".format(host_id, Storyboard.HOST_SETTINGS, host_keys)) ########################################################################### # Check the GUEST_SETTINGS section if Storyboard.GUEST_SETTINGS not in guest_section.keys(): raise_flag("Section '{0}' is missing.".format(Storyboard.GUEST_SETTINGS)) else: for guest in guest_section[Storyboard.GUEST_SETTINGS]: guest_id = Storyboard.NOT_AVAIL guest_keys = guest.keys() # ID4GUEST tag if Storyboard.ID4GUEST not in guest_keys: raise_flag("Tag '{0}' is missing for one of the guests in section '{1}'.".format(Storyboard.ID4GUEST, Storyboard.GUEST_SETTINGS)) else: guest_id = guest[Storyboard.ID4GUEST] if not guest_id in defined_guest_ids: defined_guest_ids.append(guest_id) else: raise_flag("Guest with id '{0}' is duplicated in section '{1}'.".format(guest_id, Storyboard.GUEST_SETTINGS)) guest_keys.remove(Storyboard.ID4GUEST) # IP_ADDR tag (optional) if Storyboard.IP_ADDR in guest_keys: guest_keys.remove(Storyboard.IP_ADDR) # BASEVM_HOST tag if Storyboard.BASEVM_HOST not in guest_keys: raise_flag("Tag '{0}' is missing for guest '{1}' in section '{2}'.".format(Storyboard.BASEVM_HOST, guest_id, Storyboard.GUEST_SETTINGS)) else: guest_keys.remove(Storyboard.BASEVM_HOST) # BASEVM_CONFIG_FILE tag if Storyboard.BASEVM_CONFIG_FILE not in guest_keys: if guest.get(Storyboard.BASEVM_TYPE) == "kvm": raise_flag("Tag '{0}' is missing for KVM guest '{1}' in section '{2}'.".format(Storyboard.BASEVM_CONFIG_FILE, guest_id, Storyboard.GUEST_SETTINGS)) else: config_file = guest[Storyboard.BASEVM_CONFIG_FILE] # By convention, that VM disk image has same name with the config file, excluding the extension if ".xml" in config_file: harddisk_file = config_file.replace(".xml", "") if DEBUG: print config_file print harddisk_file # Check whether the VM config file and disk image have valid names if not os.path.exists(config_file): raise_flag("Tag '{0}' for guest '{1}' in section '{2}' references a non-existing VM configuration file: {3}".format(Storyboard.BASEVM_CONFIG_FILE, guest_id, Storyboard.GUEST_SETTINGS, config_file)) if not os.path.exists(harddisk_file): raise_flag("Tag '{0}' for guest '{1}' in section '{2}' implies a non-existing VM disk image: {3}".format(Storyboard.BASEVM_CONFIG_FILE, guest_id, Storyboard.GUEST_SETTINGS, harddisk_file)) guest_keys.remove(Storyboard.BASEVM_CONFIG_FILE) # BASEVM_TYPE tag if Storyboard.BASEVM_TYPE not in guest_keys: raise_flag("Tag '{0}' is missing for guest '{1}' in section '{2}'.".format(Storyboard.BASEVM_TYPE, guest_id, Storyboard.GUEST_SETTINGS)) else: guest_keys.remove(Storyboard.BASEVM_TYPE) # BASEVM_OS_TYPE tag (optional) if Storyboard.BASEVM_OS_TYPE in guest_keys: guest_keys.remove(Storyboard.BASEVM_OS_TYPE) # TASKS tag if Storyboard.TASKS in guest_keys and guest.get(Storyboard.TASKS): for task in guest[Storyboard.TASKS]: task_keys = task.keys() # ADD_ACCOUNT tag if Storyboard.ADD_ACCOUNT in task_keys: for account in task[Storyboard.ADD_ACCOUNT]: account_keys = account.keys() # ACCOUNT tag if Storyboard.ACCOUNT not in account_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.ACCOUNT, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.ADD_ACCOUNT, guest_id)) else: account_keys.remove(Storyboard.ACCOUNT) # PASSWD tag if Storyboard.PASSWD not in account_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.PASSWD, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.ADD_ACCOUNT, guest_id)) else: account_keys.remove(Storyboard.PASSWD) # FULL_NAME tag (optional) if Storyboard.FULL_NAME in account_keys: account_keys.remove(Storyboard.FULL_NAME) # Check whether there are any (unknown) tags left in the list if account_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.ADD_ACCOUNT, guest_id, account_keys)) task_keys.remove(Storyboard.ADD_ACCOUNT) # MODIFY_ACCOUNT tag if Storyboard.MODIFY_ACCOUNT in task_keys: for account in task[Storyboard.MODIFY_ACCOUNT]: account_keys = account.keys() # ACCOUNT tag if Storyboard.ACCOUNT not in account_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.ACCOUNT, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.MODIFY_ACCOUNT, guest_id)) else: account_keys.remove(Storyboard.ACCOUNT) # NEW_ACCOUNT and/or NEW_PASSWD tags new_tag_present = False if Storyboard.NEW_ACCOUNT in account_keys: new_tag_present = True account_keys.remove(Storyboard.NEW_ACCOUNT) if Storyboard.NEW_PASSWD in account_keys: new_tag_present = True account_keys.remove(Storyboard.NEW_PASSWD) if not new_tag_present: raise_flag("Neither tag '{0}' nor '{1}' are present in section '{2}', subsection '{3}' for task '{4}' of guests '{5}'.".format(Storyboard.NEW_ACCOUNT, Storyboard.NEW_PASSWD, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.MODIFY_ACCOUNT, guest_id)) # Check whether there are any (unknown) tags left in the list if account_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.MODIFY_ACCOUNT, guest_id, account_keys)) task_keys.remove(Storyboard.MODIFY_ACCOUNT) # INSTALL_PACKAGE tag if Storyboard.INSTALL_PACKAGE in task_keys: for package in task[Storyboard.INSTALL_PACKAGE]: package_keys = package.keys() # PACKAGE_MANAGER tag (optional) if Storyboard.PACKAGE_MANAGER in package_keys: package_keys.remove(Storyboard.PACKAGE_MANAGER) # NAME4PACKAGE tag if Storyboard.NAME4PACKAGE not in package_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.NAME4PACKAGE, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.INSTALL_PACKAGE, guest_id)) else: package_keys.remove(Storyboard.NAME4PACKAGE) # VERSION tag (optional) if Storyboard.VERSION in package_keys: package_keys.remove(Storyboard.VERSION) # Check whether there are any (unknown) tags left in the list if package_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.INSTALL_PACKAGE, guest_id, package_keys)) task_keys.remove(Storyboard.INSTALL_PACKAGE) # EMULATE_ATTACK tag if Storyboard.EMULATE_ATTACK in task_keys: for attack in task[Storyboard.EMULATE_ATTACK]: attack_keys = attack.keys() # ATTACK_TYPE tag if Storyboard.ATTACK_TYPE not in attack_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.ATTACK_TYPE, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_ATTACK, guest_id)) else: attack_keys.remove(Storyboard.ATTACK_TYPE) # TARGET_ACCOUNT type if Storyboard.TARGET_ACCOUNT not in attack_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.TARGET_ACCOUNT, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_ATTACK, guest_id)) else: attack_keys.remove(Storyboard.TARGET_ACCOUNT) # ATTEMPT_NUMBER tag if Storyboard.ATTEMPT_NUMBER not in attack_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.ATTEMPT_NUMBER, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_ATTACK, guest_id)) else: attack_keys.remove(Storyboard.ATTEMPT_NUMBER) # ATTACK_TIME tag (optional) if Storyboard.ATTACK_TIME in attack_keys: # Check parameter format is correct attack_time = attack[Storyboard.ATTACK_TIME] time_pattern1 = re.compile("[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]") time_pattern2 = re.compile("[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]") if not time_pattern1.match(str(attack_time)) and not time_pattern2.match(str(attack_time)): raise_flag("Format for value of tag '{0}' in section '{1}', subsection '{2}' for task '{3}' of guest '{4}' doesn't match pattern YYYY[-]MM[-]DD: {5}".format(Storyboard.ATTACK_TIME, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_ATTACK, guest_id, attack_time)) attack_keys.remove(Storyboard.ATTACK_TIME) # Check whether there are any (unknown) tags left in the list if attack_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_ATTACK, guest_id, attack_keys)) task_keys.remove(Storyboard.EMULATE_ATTACK) # EMULATE_TRAFFIC_CAPTURE_FILE tag if Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE in task_keys: for capture in task[Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE]: attack_type = Storyboard.NOT_AVAIL capture_keys = capture.keys() # FORMAT tag if Storyboard.FORMAT not in capture_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.FORMAT, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE, guest_id)) else: capture_keys.remove(Storyboard.FORMAT) # FILE_NAME tag if Storyboard.FILE_NAME not in capture_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.FILE_NAME, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE, guest_id)) else: capture_keys.remove(Storyboard.FILE_NAME) # ATTACK_TYPE tag if Storyboard.ATTACK_TYPE not in capture_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.ATTACK_TYPE, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE, guest_id)) else: attack_type = capture[Storyboard.ATTACK_TYPE] capture_keys.remove(Storyboard.ATTACK_TYPE) # ATTACK_SOURCE tag if Storyboard.ATTACK_SOURCE not in capture_keys: if attack_type == Storyboard.SSH_ATTACK: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}' (attack type '{5}').".format(Storyboard.ATTACK_SOURCE, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE, guest_id, attack_type)) else: # Nothing to do, since this tag is only required for the above attack pass else: capture_keys.remove(Storyboard.ATTACK_SOURCE) # NOISE_LEVEL tag if Storyboard.NOISE_LEVEL not in capture_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.NOISE_LEVEL, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE, guest_id)) else: capture_keys.remove(Storyboard.NOISE_LEVEL) # Check whether there are any (unknown) tags left in the list if capture_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE, guest_id, capture_keys)) task_keys.remove(Storyboard.EMULATE_TRAFFIC_CAPTURE_FILE) # EMULATE_MALWARE tag if Storyboard.EMULATE_MALWARE in task_keys: for malware in task[Storyboard.EMULATE_MALWARE]: malware_mode = Storyboard.NOT_AVAIL malware_keys = malware.keys() # NAME4MALWARE tag if Storyboard.NAME4MALWARE not in malware_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.NAME4MALWARE, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_MALWARE, guest_id)) else: malware_keys.remove(Storyboard.NAME4MALWARE) # MODE tag if Storyboard.MODE not in malware_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.MODE, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_MALWARE, guest_id)) else: malware_mode = malware[Storyboard.MODE] malware_keys.remove(Storyboard.MODE) # CPU_UTILIZATION tag if Storyboard.CPU_UTILIZATION not in malware_keys: if malware_mode == Storyboard.DUMMY_CALCULATION: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}' (mode '{5}').".format(Storyboard.CPU_UTILIZATION, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_MALWARE, guest_id, malware_mode)) else: pass else: malware_keys.remove(Storyboard.CPU_UTILIZATION) # PORT tag if Storyboard.PORT not in malware_keys: if malware_mode == Storyboard.PORT_LISTENING: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}' (mode '{5}').".format(Storyboard.PORT, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_MALWARE, guest_id, malware_mode)) else: pass else: malware_keys.remove(Storyboard.PORT) # Check whether there are any (unknown) tags left in the list if malware_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EMULATE_MALWARE, guest_id, malware_keys)) task_keys.remove(Storyboard.EMULATE_MALWARE) # COPY_CONTENT tag if Storyboard.COPY_CONTENT in task_keys: for content in task[Storyboard.COPY_CONTENT]: content_keys = content.keys() # SRC tag if Storyboard.SRC not in content_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.SRC, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.COPY_CONTENT, guest_id)) else: # Check whether the source file or directory actually exists src = content[Storyboard.SRC] if not os.path.exists(src): raise_flag("Tag '{}' value '{}' must be the name of an existing file or directory (section '{}', for subsection '{}', task '{}' of guest '{}').".format(Storyboard.SRC, src, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.COPY_CONTENT, guest_id)) content_keys.remove(Storyboard.SRC) # DST tag if Storyboard.DST not in content_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.DST, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.COPY_CONTENT, guest_id)) else: content_keys.remove(Storyboard.DST) # Check whether there are any (unknown) tags left in the list if content_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.COPY_CONTENT, guest_id, content_keys)) task_keys.remove(Storyboard.COPY_CONTENT) # EXECUTE_PROGRAM tag if Storyboard.EXECUTE_PROGRAM in task_keys: for program in task[Storyboard.EXECUTE_PROGRAM]: program_keys = program.keys() # PROGRAM tag if Storyboard.PROGRAM not in program_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.PROGRAM, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EXECUTE_PROGRAM, guest_id)) else: program_keys.remove(Storyboard.PROGRAM) # ARGS tag (optional) if Storyboard.ARGS in program_keys: program_keys.remove(Storyboard.ARGS) # ID tag (optional) if Storyboard.ID in program_keys: program_keys.remove(Storyboard.ID) # INTERPRETER tag if Storyboard.INTERPRETER not in program_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.INTERPRETER, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EXECUTE_PROGRAM, guest_id)) else: program_keys.remove(Storyboard.INTERPRETER) # EXECUTE_TIME tag (optional) if Storyboard.EXECUTE_TIME in program_keys: program_keys.remove(Storyboard.EXECUTE_TIME) # Check whether there are any (unknown) tags left in the list if program_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.EXECUTE_PROGRAM, guest_id, program_keys)) task_keys.remove(Storyboard.EXECUTE_PROGRAM) # FIREWALL_RULES tag if Storyboard.FIREWALL_RULES in task_keys: for rule in task[Storyboard.FIREWALL_RULES]: rule_keys = rule.keys() # RULE tag if Storyboard.RULE not in rule_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for task '{3}' of guest '{4}'.".format(Storyboard.RULE, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.FIREWALL_RULES, guest_id)) else: # Check whether the firewall rules file actually exists rule_file = rule[Storyboard.RULE] if not os.path.exists(rule_file): raise_flag("Tag '{}' value '{}' must be the name of an existing firewall rules file (section '{}', for subsection '{}', task '{}' of guest '{}').".format(Storyboard.RULE, rule_file, Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.FIREWALL_RULES, guest_id)) rule_keys.remove(Storyboard.RULE) # Check whether there are any (unknown) tags left in the list if rule_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}' for task '{2}' of guest '{3}': {4}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, Storyboard.FIREWALL_RULES, guest_id, rule_keys)) task_keys.remove(Storyboard.FIREWALL_RULES) # Check whether there are any (unknown) tags left in the list if task_keys: raise_flag("Unknown tag in section '{0}', subsection '{1}': {2}".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, task_keys)) guest_keys.remove(Storyboard.TASKS) elif Storyboard.TASKS in guest_keys and not guest.get(Storyboard.TASKS): raise_flag("Section '{0}', subsection '{1}' for guest '{2}' cannot be empty.".format(Storyboard.GUEST_SETTINGS, Storyboard.TASKS, guest_id)) guest_keys.remove(Storyboard.TASKS) # Check whether there are any (unknown) tags left in the list if guest_keys: raise_flag("Unknown tag(s) in section '{0}': {1}".format(Storyboard.GUEST_SETTINGS, guest_keys)) ########################################################################### # Check the CLONE_SETTINGS section if Storyboard.CLONE_SETTINGS not in clone_section.keys(): raise_flag("Section '{0}' is missing.".format(Storyboard.CLONE_SETTINGS)) else: # Check syntax and keywords # Only one clone entry is supported in CLONE_SETTINGS, so we just get the first element # TODO: Print error if more entries are found clone = clone_section[Storyboard.CLONE_SETTINGS][0] clone_keys = clone.keys() # RANGE_ID tag if Storyboard.RANGE_ID not in clone_keys: raise_flag("Tag '{0}' is missing in section '{1}'.".format(Storyboard.RANGE_ID, Storyboard.CLONE_SETTINGS)) else: range_id = int(clone[Storyboard.RANGE_ID]) # Check whether the id is forbidden to use if range_id in FORBIDDEN_ID_LIST: raise_flag("Range id '{0}' is forbidden to use, choose another id.".format(range_id)) # Check whether the is in use cr_id_list = get_existing_cr_id_list(cr_dir) if range_id in cr_id_list: raise_flag("Range with id '{0}' already exists, choose another id.".format(range_id)) clone_keys.remove(Storyboard.RANGE_ID) # HOSTS tag if Storyboard.HOSTS not in clone_keys: raise_flag("Tag '{0}' is missing in section '{1}'.".format(Storyboard.HOSTS, Storyboard.CLONE_SETTINGS)) else: for host in clone[Storyboard.HOSTS]: host_id = Storyboard.NOT_AVAIL host_keys = host.keys() # HOST_ID tag if Storyboard.HOST_ID not in host_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}'.".format(Storyboard.HOST_ID, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS)) else: # Check whether the host id was already defined in the host_settings section host_id = host[Storyboard.HOST_ID] # Convert to list of hosts, in case comma-separated format is used # (and make sure to remove potential spaces first) host_id_list = host_id.replace(" ","").split(",") for host_id_item in host_id_list: # Check host id existence if host_id_item not in defined_host_ids: raise_flag("Host with id '{0}' mentioned in section '{1}', subsection '{2}' was not defined in the section '{3}'.".format(host_id_item, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.HOST_SETTINGS)) host_keys.remove(Storyboard.HOST_ID) # INSTANCE_NUMBER tag if Storyboard.INSTANCE_NUMBER not in host_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}'.".format(Storyboard.INSTANCE_NUMBER, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS)) else: host_keys.remove(Storyboard.INSTANCE_NUMBER) # GUESTS tag if Storyboard.GUESTS not in host_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}'.".format(Storyboard.GUESTS, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS)) else: entry_point_count = 0 for guest in host[Storyboard.GUESTS]: hosts_guest_keys = guest.keys() # GUEST_ID tag if Storyboard.GUEST_ID not in hosts_guest_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for subsection '{3}' of host '{4}'.".format(Storyboard.GUEST_ID, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, host_id)) else: guest_id = guest[Storyboard.GUEST_ID] if guest_id not in defined_guest_ids: raise_flag("Guest with id '{0}' mentioned in section '{1}', subsection '{2}' was not defined in the section '{3}'.".format(guest_id, Storyboard.CLONE_SETTINGS, Storyboard.GUESTS, Storyboard.GUEST_SETTINGS)) hosts_guest_keys.remove(Storyboard.GUEST_ID) # NUMBER tag # TODO: Add guest_id to messages if Storyboard.NUMBER not in hosts_guest_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for subsection '{3}' of host '{4}'.".format(Storyboard.NUMBER, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, host_id)) else: hosts_guest_keys.remove(Storyboard.NUMBER) # ENTRY_POINT tag if Storyboard.ENTRY_POINT in hosts_guest_keys: entry_point_count += 1 hosts_guest_keys.remove(Storyboard.ENTRY_POINT) # FORWARDING_RULES tag if Storyboard.FORWARDING_RULES in hosts_guest_keys: defined_forwarding_rules = [] for rule_set in guest[Storyboard.FORWARDING_RULES]: if Storyboard.RULE not in rule_set.keys(): raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for subsection '{3}', subsubsection '{4}' of host '{5}'.".format(Storyboard.RULE, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, Storyboard.FORWARDING_RULES, host_id)) else: defined_forwarding_rules.append(rule_set[Storyboard.RULE]) hosts_guest_keys.remove(Storyboard.FORWARDING_RULES) # Check whether there are any (unknown) tags left in the list if hosts_guest_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}', for subsection '{2}' of host '{3}': {4}".format(Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, host_id, host_keys)) # TODO: How to check this in case multiple hosts are used?! if entry_point_count == 0: raise_flag("Tag '{0}' doesn't appear for any guest in section '{1}', subsection '{2}' for subsection '{3}' of host '{4}'.".format(Storyboard.ENTRY_POINT, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, host_id)) if entry_point_count > 1: raise_flag("Tag '{0}' appears for more than one guest in section '{1}', subsection '{2}' for subsection '{3}' of host '{4}'.".format(Storyboard.ENTRY_POINT, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, host_id)) host_keys.remove(Storyboard.GUESTS) # TOPOLOGY tag if Storyboard.TOPOLOGY not in host_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for subsection '{3}' of host '{4}'.".format(Storyboard.TOPOLOGY, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, host_id)) else: topology = host[Storyboard.TOPOLOGY][0] topology_keys = topology.keys() # TYPE tag if Storyboard.TYPE not in topology_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for subsection '{3}' of host '{4}'.".format(Storyboard.TYPE, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.TOPOLOGY, host_id)) else: topology_keys.remove(Storyboard.TYPE) # NETWORKS tag if Storyboard.NETWORKS not in topology_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for subsection '{3}' of host '{4}'.".format(Storyboard.NETWORKS, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.TOPOLOGY, host_id)) else: # Process each network definition, and check whether the forwarding rules specified previously # contain any undefined networks nw_set = get_network_set(defined_forwarding_rules) for network in topology[Storyboard.NETWORKS]: nw_name = Storyboard.NOT_AVAIL network_keys = network.keys() # NAME tag if Storyboard.NAME not in network_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for subsection '{3}', subsubsection '{4}' of host '{5}'.".format(Storyboard.NAME, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.TOPOLOGY, Storyboard.NETWORKS, host_id)) else: nw_name = network[Storyboard.NAME] # If network name present in nw_set, remove it to signify it was defined already if nw_name in nw_set: nw_set.remove(nw_name) network_keys.remove(Storyboard.NAME) # MEMBERS tag if Storyboard.MEMBERS not in network_keys: raise_flag("Tag '{0}' is missing in section '{1}', subsection '{2}' for subsection '{3}', subsubsection '{4}' of host '{5}'.".format(Storyboard.MEMBERS, Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.TOPOLOGY, Storyboard.NETWORKS, host_id)) else: network_keys.remove(Storyboard.MEMBERS) # GATEWAY tag if Storyboard.GATEWAY in network_keys: network_keys.remove(Storyboard.GATEWAY) # Check whether there are any (unknown) tags left in the list if network_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}', for subsection '{2}', subsubsection '{3}' of host '{4}', network '{5}': {6}".format(Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, Storyboard.TOPOLOGY, host_id, nw_name, network_keys)) # If there are still elements in nw_set, it means that the forwarding rules specified # previously contain undefined networks if nw_set: raise_flag("Undefined network(s) in section '{0}', subsection '{1}' for subsection '{2}', subsubsection '{3}' for host '{4}': {5}".format(Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, Storyboard.FORWARDING_RULES, host_id, list(nw_set))) topology_keys.remove(Storyboard.NETWORKS) # Check whether there are any (unknown) tags left in the list if topology_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}', for subsection '{2}', subsubsection '{3}' of host '{4}': {5}".format(Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, Storyboard.GUESTS, Storyboard.TOPOLOGY, host_id, host_keys)) host_keys.remove(Storyboard.TOPOLOGY) # Check whether there are any (unknown) tags left in the list if host_keys: raise_flag("Unknown tag(s) in section '{0}', subsection '{1}': {2}".format(Storyboard.CLONE_SETTINGS, Storyboard.HOSTS, host_keys)) clone_keys.remove(Storyboard.HOSTS) # Check whether there are any (unknown) tags left in the list if clone_keys: raise_flag("Unknown tag(s) in section '{0}': {1}".format(Storyboard.CLONE_SETTINGS, clone_keys)) return FLAG ``` #### File: cyris/main/entities.py ```python from collections import OrderedDict import yaml import string import random import os # Internal imports from storyboard import Storyboard DEBUG = False def represent_ordereddict(dumper, data): value = [] for item_key, item_value in data.items(): node_key = dumper.represent_data(item_key) node_value = dumper.represent_data(item_value) value.append((node_key, node_value)) return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value) yaml.add_representer(OrderedDict, represent_ordereddict) ''' Object host is created for containing information about hosts that are specified in the description. It has variables for: + host_id: id of the host. + virbr_addr: default virtual bridge that KVM uses to connect to virtual machines. + mgmt_addr: management address that the host uses to connect to other hosts. + account: account on the host for cyris to operate. ''' class Host(object): def __init__(self, host_id, virbr_addr, mgmt_addr, account): self.host_id = host_id self.virbr_addr = virbr_addr self.mgmt_addr = mgmt_addr self.account = account def getHostId(self): return self.host_id def getVirbrAddr(self): return self.virbr_addr def getMgmtAddr(self): return self.mgmt_addr def getAccount(self): return self.account def __str__(self): return "host_id: " + self.getHostId() + ", virbr_addr: " + self.getVirbrAddr() + ", mgmt_addr: " + self.getMgmtAddr() + ", account: " + self.getAccount() ''' Object guest is created for containing information of base image guests that are specified in the description. It has variables for: @param guest_id Id of the guest (desktop, webserver, etc.). @param basevm_addr IP address of the base image guest. @param root_passwd Password of the root account for cyris to access and operate. @param basevm_host The location of the host that has the base image guest. Normally it is on the master host. @param basevm_config_file The location of the xml config file of the base image guest. Normally it is in the same location of the basevm_host. @param basevm_type Type of the base image guest (raw, qcow2, etc.). This info is not really necessary. @param basevm_name Name of the base image guest that is used by KVM to define, start or stop it. @param tasks A list of content that are defined by instructors and supposed to install on the base image guest. ''' class Guest(object): def __init__(self, guest_id, basevm_addr, root_passwd, basevm_host, basevm_config_file, basevm_os_type, basevm_type, basevm_name, tasks): self.guest_id = guest_id self.basevm_addr = basevm_addr self.basevm_host = basevm_host self.root_passwd = <PASSWORD> self.basevm_config_file = basevm_config_file self.basevm_os_type =basevm_os_type self.basevm_type = basevm_type self.basevm_name = basevm_name self.tasks = tasks def getGuestId(self): return self.guest_id def getBasevmAddr(self): return self.basevm_addr def setBasevmAddr(self, basevm_addr): self.basevm_addr = basevm_addr def getRootPasswd(self): return self.root_passwd def setRootPasswd(self, <PASSWORD>_passwd): self.root_passwd = <PASSWORD> def getBasevmHost(self): return self.basevm_host def getBasevmConfigFile(self): return self.basevm_config_file def setBasevmConfigFile(self, new_file): self.basevm_config_file = new_file def getBasevmOSType(self): return self.basevm_os_type def getBasevmType(self): return self.basevm_type def getBasevmName(self): return self.basevm_name def setBasevmName(self, basevm_name): self.basevm_name = basevm_name def getAddrLastBit(self): last_bit = self.basevm_addr.split(".")[-1] return last_bit def getTasks(self): return self.tasks ''' Object Bridges are created for connecting virtual machines with the host. Each virtual machine network interface has one corresponding bridge. @param bridge_id The id (name) of the bridge. @param addr The IP address of the bridge. ''' class Bridge(object): def __init__(self, bridge_id, addr): self.bridge_id = bridge_id self.addr = addr def getId(self): return self.bridge_id def getAddr(self): return self.addr def __str__(self): return "bridge_id: " + self.getId() + ", addr: " + self.getAddr() ''' Object EntryPoints are created in each cyber range instance for trainees connect directly from outside to their environment. @param addr The address of the entry. @param port The port for trainees to connect to it via ssh connection. @param account The account that is generated randomly for the entry point. @param passwd The password that is generated randomly for the entry point. ''' class EntryPoint(object): def __init__(self): self.addr = "" self.port = "" self.account = "" self.passwd = "" self.host_id = "" def getAddr(self): return self.addr def setAddr(self, addr): self.addr = addr def getPort(self): return self.port def setPort(self, port): self.port = port def getAccount(self): return self.account def setAccount(self, account): self.account = account def getPasswd(self): return self.passwd def setPasswd(self, passwd): self.passwd = passwd def getHostId(self): return self.host_id def setHostId(self, host_id): self.host_id = host_id def __str__(self): return "entry_point addr: " + self.getAddr() + ", port: " + str(self.getPort()) + ", account: " + self.getAccount() + ", passwd: " + self.getPasswd() + ", host_id: " + self.getHostId() ''' Object FirewallRule is created for containing information of a firewall rule in one clone_guest. This firewall rule is for routing traffic. It has four arguments: @param src Source ip of the incoming traffic. @param dst Destination ip of the incoming traffic. @param sport Source port. @param dport Destination port. ''' class FirewallRule(object): def __init__(self, src, dst, sport, dport): self.src = src self.dst = dst self.sport = sport self.dport = dport def getSrc(self): return self.src def getDst(self): return self.dst def getSport(self): return self.sport def getDport(self): return self.dport ''' Object CloneGuest is created for containing information of hosts that are specified as tag "guests" in the session "clone_settings" of the cyber range description. It has two variables: @param guest_id Id of the guest that is cloned (desktop, webserver, etc.). This id has been specified above in the "guest_settings" part of the description. @param network_interfaces List of network interfaces of that guest. ''' class CloneGuest(object): #def __init__(self, guest_id, index, has_fw_setup, fwrule_desc_list, is_entry_point,os_type): def __init__(self, guest_id, index, instance_id, cyberrange_id, has_fw_setup, fwrule_desc_list, is_entry_point, os_type): self.guest_id = guest_id self.index = index self.up_instance = instance_id self.up_cyberrange = cyberrange_id self.nic_addr_dict = OrderedDict() self.nic_gw_dict = OrderedDict() self.gateway = "" self.has_fw_setup = has_fw_setup self.fwrule_desc_list = fwrule_desc_list self.fwrule_list = [] self.is_entry_point = is_entry_point self.os_type=os_type self.sepchar = "," def getFullId(self): return "cr" + str(self.up_cyberrange) + self.sepchar \ + "ins" + str(self.up_instance) + self.sepchar \ + str(self.guest_id) + self.sepchar \ + str(self.index) def getMidId(self): return "ins" + str(self.up_instance) + self.sepchar \ + str(self.guest_id) + self.sepchar \ + str(self.index) def getGuestId(self): return self.guest_id def getIndex(self): return self.index def setIndex(self, index): self.index = index def getNicAddrDict(self): return self.nic_addr_dict def addNicAddrDict(self, nic, addr): self.nic_addr_dict[nic] = "{0}".format(addr) def getNicGwDict(self): return self.nic_gw_dict def addNicGwDict(self, nic, gw): self.nic_gw_dict[nic] = "{0}".format(gw) def getHasFwSetup(self): return self.has_fw_setup def setHasFwSetup(self, value): self.has_fw_setup = value def getFwRuleDescList(self): return self.fwrule_desc_list def getFwRuleList(self): return self.fwrule_list def setFwRuleList(self, fwrule_list): self.fwrule_list = fwrule_list def getIsEntryPoint(self): return self.is_entry_point def setIsEntryPoint(self, value): self.is_entry_point = value def getOsType(self): return self.os_type def __str__(self): return "guest_id: " + self.getGuestId() + ", guest_index: " + str(self.getIndex()) + ", guest_nic_addr: " + str(self.getNicAddrDict()) + ", guest_nic_gw: " + str(self.getNicGwDict()) + ", fwrule_desc: " + str(self.getFwRuleDescList()) + ", fwrule_list: " + str(self.getFwRuleList()) + ", is_entry_point: " + str(self.getIsEntryPoint()) ''' Object SubNetwork is created for containing information of a subnetwork in the cyber range description. It has two variables: @param name Describe name of the subnetwork. @param member_list Describe elements in the subnetwork. @param gateway The gateway of that subnetwork. ''' class CloneSubnetwork(object): def __init__(self, name, members, gateway): self.name = name self.gateway = gateway self.node_list = self.setNodeList(members) def getName(self): return self.name def getGateway(self): return self.gateway def setNodeList(self, members_str): if DEBUG: print members_str # Remove all whitespaces in the members_str. members_str = members_str.replace(" ", "") node_list = [] if "," in members_str: node_list = members_str.split(",") if node_list[-1] == "": node_list.pop() # Remove space if any in node name. for node in node_list: node = node.strip() else: node_list.append(members_str) # Add gateway as a member of the network. if(self.getGateway() != ""): node_list.append(self.getGateway()) return node_list def getNodeList(self): return self.node_list ''' Object CloneInstance is created for containing information of a entire instance of the cyber range, including virtual machines, their ip addresses and network. It has three variables: @param index Index of the instance, calculated by looping the instance_number. @param clone_guest_list A list of guest. @param clone_subnw_list A list of subnetwork. It and @clone_guest_list will be used to generate ip addresses for each guest in the instance. The function setCloneGuestList set ip addresses for the guests in clone_guest_list. IP address of one guest's nic is defined as <range_id>.<instance_index>.i.j, in that: + i is the position of the segment/subnetwork that guest's nic belongs to in the clone_subnw_list. + j is the position of guest's nic in its segment/subnetwork. ''' class CloneInstance(object): def __init__(self, index, clone_guest_list, clone_subnw_list): self.index = index self.clone_guest_list = clone_guest_list self.clone_subnw_list = clone_subnw_list self.bridge_list = [] self.entry_point = EntryPoint() def getIndex(self): return self.index def getCloneSubnwList(self): return self.clone_subnw_list def getCloneGuestList(self): return self.clone_guest_list # Function for getting a list of IPs (source IPs or destination IPs) from # source network/destination network in the firewall_rules description. def getIpList(self, block, nwname_nodes_dict, nwname_ipaddrs_dict): ip_list = [] if "," in block: element_list = block.split(",") else: element_list = [] element_list.append(block) # For each element in element_list, get the corresponding IP address. for element in element_list: # If source is under from <network>.<guest_id>, then break them down. if "." in element: nw_id = element.split(".")[0] guest_id = element.split(".")[1] # Get the corresponding IP address from nwname_nodes_dict # and nwname_ipaddrs_dict, then add them to the ip_list. for i, member in enumerate(nwname_nodes_dict[nw_id]): if guest_id in member: ip_list.append(nwname_ipaddrs_dict[nw_id][i]) break # Otherwise add all IPs in the network to the ip_list. else: for ip in nwname_ipaddrs_dict[element]: ip_list.append(ip) return ip_list # Functions for generating ip addresses, gateway, and firewall rules for each guest # in the instance. Since there's no ip addressess in the beginning, it's mandatory to # generate ip addresses before parsing gateway address for each guest in the instance. def setCloneGuestList(self, range_id): # Dictionary of <network_name>:<a list of members' ipaddr>. nwname_ipaddrs_dict = dict() # Dictionary of <network_name>:<a list of (node_id).(interface)>. nwname_nodes_dict = dict() # For each subnetwork/segment in the clone_subnetwork_list. # i is the index, start from 0. i is used as the third byte in the ipaddr. # Bytes in ip addr couldnt be 0, that's why it's i+1. for i, subnw_element in enumerate(self.getCloneSubnwList()): # j is used as the index of node_element in each subnetwork/segment. # j is the fourth byte in the ipaddr. # Last byte in ip addr couldn't be 0 nor 1, that's why it's j+2. j = 0 # Network name. nwname = subnw_element.getName() # List of ip addr of members in the network. ipaddr_list = [] # List of nodes.interface in the network. node_list = [] if DEBUG: print subnw_element.getNodeList() for node_element in subnw_element.getNodeList(): # Split the segment value to get node_id and node_nic. node_id, node_nic = node_element.split(".") # Check node_id of each guest in the clone_guest_list to get the correct guest. # Depending on the number of the guest specified in the field "number" of "guests", # it will generate the corresponding last bit. for guest in self.getCloneGuestList(): if guest.getGuestId() == node_id: ip_addr = "{0}.{1}.{2}.{3}".format(range_id, self.getIndex(), i+1, j+2) if node_element != subnw_element.getGateway(): if DEBUG: print ip_addr # Add element to ipaddr_list (gateway is not included). ipaddr_list.append(ip_addr) # Add elements to node_list (gateway is not included). node_list.append(node_element) guest.addNicAddrDict(node_nic, ip_addr) j += 1 # Add element to the dictionary nwname_ipaddrs_dict. nwname_ipaddrs_dict[nwname] = ipaddr_list nwname_nodes_dict[nwname] = node_list # Set gateway for each guest in the list. # If user specify gateway for the guest via tag "gateway", then extract gateway_id and gateway_nic from it. if (subnw_element.getGateway() != ""): gateway_addr = "" gateway_id, gateway_nic = subnw_element.getGateway().split(".") # Compare gateway_id and gateway_nic with each guest in # the clone guest list to find out the gateway_addr. for guest in self.getCloneGuestList(): if guest.getGuestId() == gateway_id: gateway_addr = guest.getNicAddrDict()[gateway_nic] break # Set gateway_addr for each guest in the clone guest list. for node_element in subnw_element.getNodeList(): node_id, node_nic = node_element.split(".") for guest in self.getCloneGuestList(): if guest.getGuestId() == node_id and guest.getGuestId() != gateway_id: guest.addNicGwDict(node_nic, gateway_addr) # Otherwise, set gateway for the guest by default rule: gateway_ipaddr = {three first bits of guest_ipaddr}.1 # Ex: if guest_ipaddr = 192.168.127.12, then gateway_ipaddr = 192.168.3.11. # Note that this rule only applies for vm that doesn't have a gateway installed before. When there exists a gateway # configured in the vm, then the rule will be passed. else: for guest in self.getCloneGuestList(): if len(guest.getNicGwDict()) == 0: for guest_nic, guest_addr in guest.getNicAddrDict().items(): bits = guest_addr.split(".") bits.pop() bits.append("1") gateway_addr = ".".join(bits) guest.addNicGwDict(guest_nic, gateway_addr) break if DEBUG: print nwname_ipaddrs_dict, nwname_nodes_dict # Set rules for each guest in list clone_guest based on src=ipaddr, dst=ipaddr. for guest in self.getCloneGuestList(): if guest.getHasFwSetup() == True: fwrule_list = [] fwrule_list.append("sysctl -w net.ipv4.ip_forward=1; sysctl -p"); for rule_desc in guest.getFwRuleDescList(): elements = rule_desc.strip().split(" ") src_nw = "" dst_nw = "" sport = "" dport = "" multiport = "" for e in elements: if "src" in e: src_nw = e.split("=")[1] if "dst" in e: dst_nw = e.split("=")[1] if "sport" in e: sport = e.split("=")[1] if "dport" in e: dport = e.split("=")[1] if "," in dport: multiport = "-m multiport" # Get the list of source IP list and destination IP list from getIpList function. src_ip_list = self.getIpList(src_nw, nwname_nodes_dict, nwname_ipaddrs_dict) dst_ip_list = self.getIpList(dst_nw, nwname_nodes_dict, nwname_ipaddrs_dict) # Combine IPs in these above lists to put them in one firewall statement. src_ip_str = ",".join(src_ip_list[:]) dst_ip_str = ",".join(dst_ip_list[:]) if sport != "" and dport != "": fw_rule = "iptables -A FORWARD -m state -p tcp -s {0} -d {1} {2} --sport {3} --dport {4} --state NEW,ESTABLISHED,RELATED -j ACCEPT".format(src_ip_str, dst_ip_str, multiport, sport, dport) elif sport != "" and dport == "": fw_rule = "iptables -A FORWARD -m state -p tcp -s {0} -d {1} --sport {2} --state NEW,ESTABLISHED,RELATED -j ACCEPT".format(src_ip_str, dst_ip_str, sport) elif sport == "" and dport != "": fw_rule = "iptables -A FORWARD -m state -p tcp -s {0} -d {1} {2} --dport {3} --state NEW,ESTABLISHED,RELATED -j ACCEPT".format(src_ip_str, dst_ip_str, multiport, dport) else: fw_rule = "iptables -A FORWARD -m state -p tcp -s {0} -d {1} --state NEW,ESTABLISHED,RELATED -j ACCEPT".format(src_ip_str, dst_ip_str) fwrule_list.append(fw_rule) # Append the final rules as allowing all allowed traffic above comming back. if len(fwrule_list) != 0: fw_rule = "iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT" fwrule_list.append(fw_rule) #print fwrule_list guest.setFwRuleList(fwrule_list) # Function for generating ip addresses for bridges in the instance. def getBridgeList(self): return self.bridge_list def setBridgeList(self, range_id): for i, subnw_element in enumerate(self.getCloneSubnwList()): # bridge_id = <range_id>-<instance_index>-<position of the subnet in the instance> # bridge_addr = <range_id>.<instance_index>.<position of the subnet in the instance>.1 bridge_id = "{0}-{1}-{2}".format(range_id, self.getIndex(), i+1) bridge_addr = "{0}.{1}.{2}.1".format(range_id, self.getIndex(), i+1) append = 1 for bridge in self.bridge_list: if bridge_id == bridge.getId(): append = 0 break if append == 1: self.bridge_list.append(Bridge(bridge_id, bridge_addr)) # Function for generating entry point in the instance. # The current mechanism is to take the first desktop as the entry point. def getEntryPoint(self): return self.entry_point def setEntryPoint(self, instance_id, port, host_id): for clone_guest in self.getCloneGuestList(): #if clone_guest.getGuestId() == "desktop" and clone_guest.getIndex() == 1: if clone_guest.getIsEntryPoint() == True and clone_guest.getIndex() == 1: self.entry_point.setAddr(clone_guest.getNicAddrDict()["eth0"]) self.entry_point.setPort(port) # Generate random account and passwd for entry point. s = string.lowercase+string.digits # OLD VERSION: Random suffix of 5 digits #account = "trainee{0}".format(''.join(random.sample(s,5))) # NEW VERSION: Use instance id as suffix (add 1 so as to start from 1) # Use leading zeros (up to 2 digits) to match current Moodle settings account = "trainee{number:02d}".format(number=(instance_id+1)) passwd = ''.join(random.sample(s,10)) self.entry_point.setAccount(account) self.entry_point.setPasswd(<PASSWORD>) self.entry_point.setHostId(host_id) ''' Object CloneHost is created for containing information of the tag "hosts" in the cyber range description. It has two variables: @param host_id Id of the host that cyber range instances are deployed. @param instance_list List of instances. ''' class CloneHost(Host): def __init__(self, host, instance_list): if host: self.host_id = host.getHostId() self.virbr_addr = host.getVirbrAddr() self.mgmt_addr = host.getMgmtAddr() self.account = host.getAccount() self.instance_list = instance_list def getHostId(self): return self.host_id def getInstanceList(self): return self.instance_list # Pass the range_id variable for the function setCloneGuestList in # the class CloneInstance to calculate ip addresses for virtual machines. def setInstanceList(self, range_id, port_list): for i, instance in enumerate(self.getInstanceList()): instance.setCloneGuestList(range_id) instance.setBridgeList(range_id) instance.setEntryPoint(i, port_list[i], self.getHostId()) ''' Object CloneSetting is created for containing information of the tag "clone_settings" in the cyber range description. It has three variables: @param range_id Id of the cyber range. @param clone_host_list A list of CloneHost object. ''' class CloneSetting(object): def __init__(self, range_id, topology_type, clone_host_list): self.range_id = range_id self.topology_type = topology_type self.clone_host_list = clone_host_list def getRangeId(self): return self.range_id def getCloneHostList(self): return self.clone_host_list def getTopologyType(self): return self.topology_type def getTotalInstanceNum(self): instance_num = 0 for clone_host in self.getCloneHostList(): instance_num += len(clone_host.getInstanceList()) return instance_num # Pass the range_id variable for the function setInstanceList in the class CloneHost. def setCloneHostList(self, port_list): for i, clone_host in enumerate(self.getCloneHostList()): port_sublist = port_list[0:len(clone_host.getInstanceList())] clone_host.setInstanceList(self.getRangeId(), port_list) port_list = [port for port in port_list if port not in port_sublist] # Write down the detailed configuration file for the range depending on base VM type. def writeConfig(self, filename, base_vm_type): data = OrderedDict() data[Storyboard.RANGE_ID] = self.getRangeId() hostdict_list = [] for host in self.getCloneHostList(): host_dict = OrderedDict() host_dict[Storyboard.HOST_ID] = host.getHostId() host_dict[Storyboard.MGMT_ADDR] = host.getMgmtAddr() host_dict[Storyboard.INSTANCE_COUNT] = len(host.getInstanceList()) instancedict_list = [] for instance in host.getInstanceList(): instance_dict = OrderedDict() instance_dict[Storyboard.INSTANCE_INDEX] = instance.getIndex() guestdict_list = [] for j,guest in enumerate(instance.getCloneGuestList()): guest_dict = OrderedDict() addr_dict = OrderedDict() for key,value in guest.getNicAddrDict().items(): addr_dict[key] = value gateway_dict = OrderedDict() for key,value in guest.getNicGwDict().items(): gateway_dict[key] = value fwrule_dict = OrderedDict() for i,rule in enumerate(guest.getFwRuleList()): fwrule_dict['rule{0}'.format(i)] = rule guest_dict[Storyboard.GUEST_ID] = guest.getGuestId() # Generate KVM/AWS domain name at this point so that we can output it # TODO: Field name below should be just 'domain', but seems used in other files too guest.kvm_domain = "{0}_cr{1}_{2}_{3}".format(guest.getGuestId(), self.getRangeId(), instance.getIndex(), guest.getIndex()) if base_vm_type == 'kvm': guest_dict[Storyboard.KVM_DOMAIN] = guest.kvm_domain elif base_vm_type == 'aws': guest_dict[Storyboard.AWS_DOMAIN] = guest.kvm_domain guest_dict[Storyboard.IP_ADDRS] = addr_dict if len(gateway_dict) != 0: guest_dict[Storyboard.GATEWAYS] = gateway_dict if len(fwrule_dict) != 0: guest_dict[Storyboard.FIREWALL_RULE] = fwrule_dict # Deal with network membership information networks_dict = self.generateNetworkMembership(instance.clone_subnw_list, guest.getGuestId()) if networks_dict: guest_dict[Storyboard.NETWORK_MEMBERSHIP] = networks_dict guestdict_list.append(guest_dict) #instance_dict['node{0}'.format(j)] = guestdict_list instance_dict[Storyboard.GUESTS] = guestdict_list instancedict_list.append(instance_dict) host_dict[Storyboard.INSTANCES] = instancedict_list hostdict_list.append(host_dict) data[Storyboard.HOSTS] = hostdict_list # Write to temporary file first, then rename it, so as to make sure that programs # (e.g., CyRIS-vis) watching the output file do not read truncated versions filename_tmp = filename + ".tmp" with open(filename_tmp, 'w') as yaml_file: yaml.dump(data, yaml_file, width=float("inf"), allow_unicode=True, default_flow_style=False, explicit_start=True) os.rename(filename_tmp, filename) #print data def generateNetworkMembership(self, clone_subnw_list, guest_id): networks_dict = OrderedDict() if DEBUG: print "* DEBUG: cyris: Network info in cyber range of guest '{0}': ".format(guest_id) for clone_subnw in clone_subnw_list: if DEBUG: print "* DEBUG: cyris: Network name: ", clone_subnw.getName() print "* DEBUG: cyris: Node list: ", clone_subnw.getNodeList() for node_interface in clone_subnw.getNodeList(): node_interface_list = node_interface.split(".") if node_interface_list: if node_interface_list[0] == guest_id: networks_dict[node_interface_list[1]] = clone_subnw.getName() if DEBUG: print "* DEBUG: cyris: Generated networks dictionary: ", networks_dict return networks_dict class Command(object): #def __init__(self, command, description): def __init__(self, command, description, comtag="-"): self.command = command self.description = description self.comtag = comtag def getCommand(self): return self.command def getDescription(self): return self.description def __str__(self): return "command: " + self.getCommand() + " description: " + self.getDescription() """ def main(): yaml_file = sys.argv[1] try: with open(yaml_file, "r") as f: doc = yaml.load(f) except yaml.YAMLError, exc: print "Error in the cyber range description file: ", exc return hosts = [] clone_setting = None for element in doc: if "host_settings" in element.keys(): for i in element["host_settings"]: if i == 0: MSTNODE_ACCOUNT = i["account"] MSTNODE_MGMT_ADDR = i["mgmt_addr"] host = Host(i["id"], i["virbr_addr"], i["mgmt_addr"], i["account"]) hosts.append(host) if "clone_settings" in element.keys(): range_id = element["clone_settings"][0]["range_id"] clone_host_list = [] for host in element["clone_settings"][0]["hosts"]: host_id_str = host["host_id"].strip() host_id_list = [] if "," in host_id_str: host_id_list = host_id_str.replace(" ","").split(",") else: host_id_list.append(host_id_str) for host_id in host_id_list: instance_num = host["instance_number"] nw_type = host["topology"][0]["type"] # for subnetwork in host["topology"][0]["networks"]: # name = subnetwork["name"] # members = subnetwork["members"] # if "gateway" in subnetwork.keys(): # gateway = subnetwork["gateway"] # else: # gateway = "" # clone_subnetwork = CloneSubnetwork(name, members, gateway) # clone_subnw_list.append(clone_subnetwork) instance_list = [] for i in range(1, instance_num+1): # Since each instance reuse the information of the guest, it's important to # recreate a clone_guest_list when creating a new instance. It is the main # reason why clone_guest_list is created here but not in the same place with # the clone_subnw_list. clone_subnw_list = [] for subnetwork in host["topology"][0]["networks"]: name = subnetwork["name"] members = subnetwork["members"] if "gateway" in subnetwork.keys(): gateway = subnetwork["gateway"] else: gateway = "" clone_subnetwork = CloneSubnetwork(name, members, gateway) clone_subnw_list.append(clone_subnetwork) clone_guest_list = [] for guest in host["guests"]: guest_id = guest["guest_id"] number = guest["number"] firewall_rules = [] if "forwarding_rules" in guest.keys(): has_fw_setup = True for rule in guest["forwarding_rules"]: firewall_rules.append(rule["rule"]) else: has_fw_setup = False if "entry_point" in guest.keys(): is_entry_point = True else: is_entry_point = False # Create a list of clone_guest with size=number. for k in range(1, number+1): clone_guest = CloneGuest(guest_id, k, has_fw_setup, firewall_rules, is_entry_point) clone_guest_list.append(clone_guest) instance = CloneInstance(i, clone_guest_list, clone_subnw_list) instance_list.append(instance) clone_host = CloneHost(host_id, instance_list) clone_host_list.append(clone_host) clone_setting = CloneSetting(range_id, nw_type, clone_host_list) clone_setting.setCloneHostList([2,3,4,5,6,7,8,9,10]) clone_setting.writeConfig("result.yml") for host in clone_setting.getCloneHostList(): print host.getHostId() for i,instance in enumerate(host.getInstanceList()): print "instance",i for bridge in instance.getBridgeList(): print bridge for guest in instance.getCloneGuestList(): print guest print "\n" print "entry point: ", instance.getEntryPoint() # Send email function # f = open("/home/cyuser/cyris-development/main/mail_template", "r") # contents = f.readlines() # f.close() # contents.insert(0, "Dear <NAME>,") # contents.insert(4, "\n- Number of cyber range instances: {0}".format(clone_setting.getTotalInstanceNum())) # information = "" # instance_index = 1 # for host in clone_setting.getCloneHostList(): # for instance in host.getInstanceList(): # for host in hosts: # if instance.getEntryPoint().getHostId() == host.getHostId(): # entry_point = instance.getEntryPoint() # information += "\n- Cyber range instance {0}:\n\turl: ssh {1}@{2} -p {3}\n\tpasswd: {4}\n".format(instance_index, entry_point.getAccount(), host.getMgmtAddr(), entry_point.getPort(), entry_point.getPasswd()) # instance_index += 1 # break # contents.insert(6, "{0}\n".format(information)) # f = open("/home/cyuser/cyris-development/main/inform_email", "w") # contents = "".join(contents) # f.write(contents) # f.close() main() """ ``` #### File: cyris/main/modules.py ```python INSTANTIATION_DIR = "instantiation" # External imports from entities import Command ######################################################################### # Class Modules is the parent class of every other modules / features class Modules(object): def __init__(self, name, abspath): self.name = name self.abspath = abspath def getName(self): return self.name def getAbsPath(self): return self.abspath ############################################################ # Copy ssh keygen from the local machine to a remote one class SSHKeygenHostname(Modules): def __init__(self, vm_addr, root_passwd, hostname, mstnode_account, abspath, os_type): Modules.__init__(self, "SSHKeygen", abspath) self.vm_addr = vm_addr self.root_passwd = <PASSWORD> self.hostname = hostname self.mstnode_account = mstnode_account self.os_type =os_type def command(self): desc = "Generate ssh keys and do hostname setup" if self.os_type=="windows.7": command_string ="{0}{1}/sshkey_hostname_setup/sshkey_setup_win_cmd.sh {0} {1} {2} {3} {4};".format(self.getAbsPath(), INSTANTIATION_DIR, self.vm_addr, self.root_passwd, self.mstnode_account) elif self.os_type in ["windows.8.1","windows.10"] : command_string ="{0}{1}/sshkey_hostname_setup/sshkey_setup_win_unix.sh {0} {1} {2} {3} {4};".format(self.getAbsPath(), INSTANTIATION_DIR, self.vm_addr, self.root_passwd, self.mstnode_account) else: command_string = "{0}{5}/sshkey_hostname_setup/sshkey_setup.sh {1} {2} {3}; {0}{5}/sshkey_hostname_setup/hostname_setup.sh {1} {2} {4};".format(self.getAbsPath(), self.vm_addr, self.root_passwd, self.mstnode_account, self.hostname, INSTANTIATION_DIR) command = Command(command_string, desc) return command ######################################################################### # Manage users in the system. Contains functions for adding new accounts # and edit info of existing accounts. class ManageUsers(Modules): def __init__(self, addr, abspath): Modules.__init__(self, "ManageUsers", abspath) self.addr = addr def add_account(self, new_account, new_passwd, full_name, os_type, basevm_type): desc = "Add user account '{0}'".format(new_account) if full_name: full_name_arg=full_name else: full_name_arg="" if basevm_type == 'kvm': if os_type=="windows.7" : command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net user {2} {3} /ADD' ;".format(self.addr, self.getAbsPath(), new_account, new_passwd) command_string += "sshpass -p {0} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {1}@{2} 'dir' ;".format(new_passwd, new_account, self.addr) command_string += "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net localgroup \"Remote Desktop Users\" {2} /ADD'".format(self.addr, self.getAbsPath(), new_account) else: command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'bash -s' < {1}{5}/users_managing/add_user.sh {2} {3} yes {4}".format(self.addr, self.getAbsPath(), new_account, new_passwd, full_name_arg, INSTANTIATION_DIR) elif basevm_type == 'aws': if os_type=="windows" : command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net user {2} {3} /ADD' ;".format(self.addr, self.getAbsPath(), new_account, new_passwd) command_string += "sshpass -p {0} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {1}@{2} 'dir' ;".format(new_passwd, new_account, self.addr) command_string += "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net localgroup \"Remote Desktop Users\" {2} /ADD'".format(self.addr, self.getAbsPath(), new_account) elif os_type in ['amazon_linux', 'amazon_linux2', 'red_hat']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ec2-user@{0} 'sudo -s' 'bash -s' < {1}{5}/users_managing/add_user.sh {2} {3} yes {4}".format(self.addr, self.getAbsPath(), new_account, new_passwd, full_name_arg, INSTANTIATION_DIR) elif os_type in ['ubuntu_16', 'ubuntu_18', 'ubuntu_20']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ubuntu@{0} 'sudo -s' 'bash -s' < {1}{5}/users_managing/add_user.sh {2} {3} yes {4}".format(self.addr, self.getAbsPath(), new_account, new_passwd, full_name_arg, INSTANTIATION_DIR) command = Command(command_string, desc) return command def modify_account(self, account, new_account, new_passwd, os_type, basevm_type): sub_desc = "new name: {0} new password: {1}".format(new_account, new_passwd) if new_account == "null": sub_desc = "new password: {0}".format(new_passwd) elif new_passwd == "null": sub_desc = "new name: {0}".format(new_account) desc = "Modify user account '{0}': {1}".format(account, sub_desc) if basevm_type == 'kvm': if os_type =="windows.7": command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net user {1} {2} ' ".format(self.addr, account, new_passwd) else: command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'bash -s' < {1}{5}/users_managing/modify_user.sh {2} {3} {4}".format(self.addr, self.getAbsPath(), account, new_account, new_passwd, INSTANTIATION_DIR) elif basevm_type =='aws': if os_type =="windows": command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net user {1} {2} ' ".format(self.addr, account, new_passwd) elif os_type in ['amazon_linux', 'amazon_linux2', 'red_hat']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ec2-user@{0} 'sudo -s' 'bash -s' < {1}{5}/users_managing/modify_user.sh {2} {3} {4}".format(self.addr, self.getAbsPath(), account, new_account, new_passwd, INSTANTIATION_DIR) elif os_type in ['ubuntu_16', 'ubuntu_18', 'ubuntu_20']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ubuntu@{0} 'sudo -s' 'bash -s' < {1}{5}/users_managing/modify_user.sh {2} {3} {4}".format(self.addr, self.getAbsPath(), account, new_account, new_passwd, INSTANTIATION_DIR) command = Command(command_string, desc) return command ######################################################################### # Install tools from (i) package manager (apt-get, yum, etc.), (ii) source class InstallTools(Modules): def __init__(self, addr, account, abspath): Modules.__init__(self, "InstallTools", abspath) self.addr = addr self.account = account def package_install_command(self, package_manager, tool_name, version, os_type, basevm_type): if self.addr != "host": if version == "": desc = "Install package '{0}'".format(tool_name) else: desc = "Install package '{0}' version {1}".format(tool_name, version) if basevm_type == 'kvm': # Handle Windows package manager if package_manager == "chocolatey": if version == "": command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{1} {2} install -y {3}".format(self.account, self.addr, package_manager, tool_name) else: command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{1} {2} install -y {3} --version {4}".format(self.account, self.addr, package_manager, tool_name, version) # Handle other OS package managers else: command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {0}@{1} {2} install -y {3} {4}".format(self.account, self.addr, package_manager, tool_name, version) elif basevm_type == 'aws': # Handle RedHat-like package manager if os_type in ['amazon_linux', 'amazon_linux2', 'red_hat']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ec2-user@{1} 'sudo -s' '{2} install -y {3} {4}'".format(self.account, self.addr, package_manager, tool_name, version) # Handle Ubuntu package manager elif os_type in ['ubuntu_16', 'ubuntu_18', 'ubuntu_20']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ubuntu@{1} 'sudo apt-get update; sudo {2} install -y {3} {4}'".format(self.account, self.addr, package_manager, tool_name, version) command = Command(command_string, desc) return command else: return "sudo {0} install -y {1} {2}".format(package_manager, tool_name, version) def source_install_command(self, chdir, compiler): return "Install source '{0}' using '{1}'".format(chdir, compiler) class EmulateAttacks(Modules): def __init__(self, attack_type, target_addr, target_account, number, attack_time, abspath, basevm_type): Modules.__init__(self, "EmulateAttacks", abspath) self.attack_type = attack_type self.target_addr = target_addr self.target_account = target_account self.number = number self.attack_time = attack_time self.basevm_type = basevm_type def command(self): if self.attack_type == "ssh_attack": desc = "Perform ssh attack on account '{0}' (repeat {1} times)".format(self.target_account, self.number) command_string = "{0}{5}/attacks_emulation/install_paramiko.sh; python {0}{5}/attacks_emulation/attack_paramiko_ssh.py {1} {2} {3} {4} {6}".format(self.getAbsPath(), self.target_addr, self.target_account, self.number, self.attack_time, INSTANTIATION_DIR, self.basevm_type) command = Command(command_string, desc) return command class GenerateTrafficCaptureFiles(Modules): def __init__(self, virbr_addr, image_addr, image_passwd, attack_type, noise_level, file_path, file_name, abspath, cr_dir, basevm_type): Modules.__init__(self, "LogsPreparation", abspath) self.virbr_addr = virbr_addr self.image_addr = image_addr self.image_passwd = <PASSWORD> self.attack_type = attack_type self.noise_level = noise_level self.file_path = file_path self.file_name = file_name self.cr_dir = cr_dir self.basevm_type = basevm_type def ssh_attack(self, target_account, attack_source, number): desc = "Generate traffic capture file containing ssh attack trace" command_string = "{0}{11}/logs_preparation/pcap_sshattack_generator.sh {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {12}".format(self.getAbsPath(), self.virbr_addr, target_account, self.image_addr, self.image_passwd, attack_source, number, self.noise_level, self.file_path, self.file_name, self.cr_dir, INSTANTIATION_DIR, self.basevm_type) command = Command(command_string, desc) return command def ddos_attack(self): desc = "Generate traffic capture file containing DDoS attack trace" command_string = "{0}{8}/logs_preparation/pcap_ddosattack_generator.sh {0} {1} {2} {3} {4} {5} {6} {7}".format(self.getAbsPath(), self.virbr_addr, self.image_addr, self.image_passwd, self.noise_level, self.file_path, self.file_name, self.cr_dir, INSTANTIATION_DIR) command = Command(command_string, desc) return command def dos_attack(self, attack_source, dport): desc = "Generate traffic capture file containing DoS attack trace" command_string = "{0}{10}/logs_preparation/pcap_dosattack_generator.sh {0} {1} {2} {3} {4} {5} {6} {7} {8} {9}".format(self.getAbsPath(), self.virbr_addr, self.image_addr, self.image_passwd, self.noise_level, self.file_path, self.file_name, attack_source, dport, self.cr_dir, INSTANTIATION_DIR) command = Command(command_string, desc) return command class EmulateMalware(Modules): def __init__(self, addr, malware_name, mode, crspd_option, abspath, basevm_type, os_type): Modules.__init__(self, "EmulateMalware", abspath) self.addr = addr self.malware_name = malware_name self.mode = mode self.crspd_option = crspd_option self.basevm_type = basevm_type self.os_type = os_type def command(self): desc = "Deploy dummy malware" command_string = "{0}{5}/malware_creation/malware_launch.sh {1} {2} {3} {4} {6} {0} {7}".format(self.getAbsPath(), self.addr, self.malware_name, self.mode, self.crspd_option, INSTANTIATION_DIR, self.basevm_type, self.os_type) command = Command(command_string, desc) return command class ModifyRuleset(Modules): def __init__(self, image_addr, ruleset_file, abspath, os_type, basevm_type): Modules.__init__(self, "ModifyRuleset", abspath) self.image_addr = image_addr self.ruleset_file = ruleset_file self.basevm_type = basevm_type self.os_type = os_type def command(self): desc = "Modify firewall ruleset" command_string = "{0}{3}/ruleset_modification/ruleset_modify.sh {0} {1} {2} {4} {5}".format(self.getAbsPath(), self.image_addr, self.ruleset_file, INSTANTIATION_DIR, self.basevm_type, self.os_type) command = Command(command_string, desc) return command class CopyContent(Modules): def __init__(self, src, dst, image_addr, image_passwd, abspath, os_type, basevm_type): Modules.__init__(self, "CopyContent", abspath) self.src = src self.dst = dst self.image_addr = image_addr self.image_passwd = <PASSWORD> self.os_type = os_type self.basevm_type = basevm_type def command(self): desc = "Copy file '{0}'".format(self.src) if (self.os_type=="windows.7"): command_string = "{0}{5}/content_copy_program_run/copy_content_win.sh {1} \" {2} \" {3} {4}".format(self.getAbsPath(), self.src, self.dst, self.image_addr, self.image_passwd, INSTANTIATION_DIR) else: command_string = "{0}{4}/content_copy_program_run/copy_content.sh {1} {2} {3} {5} {6}".format(self.getAbsPath(), self.src, self.dst, self.image_addr, INSTANTIATION_DIR, self.basevm_type, self.os_type) command = Command(command_string, desc) return command class ExecuteProgram(Modules): def __init__(self, program, interpreter, args, image_addr, image_passwd, log_file, abspath,os_type,comtag="-"): Modules.__init__(self, "ExecuteProgram", abspath) self.program = program self.interpreter = interpreter self.args = args self.image_addr = image_addr self.image_passwd = <PASSWORD> self.log_file = log_file self.os_type = os_type self.comtag = comtag def getProgram(self): return self.program # This command_post_clone is for tasks that are required to be executed after the cloning step def command_post_clone(self, image_addr): desc = "Execute program post-cloning '{0}'".format(self.program) #command_string = "python {0}{7}/content_copy_program_run/run_program.py \"{1}\" {2} {3} {4} {5} {6} {8}".format(self.getAbsPath(), self.program, self.interpreter, self.args, self.image_addr, self.image_passwd, self.log_file, INSTANTIATION_DIR, self.os_type) command_string = "python {0}{7}/content_copy_program_run/run_program.py \"{1}\" {2} {3} {4} {5} {6} {8} {9}".format(self.getAbsPath(), self.program, self.interpreter, self.args, self.image_addr, self.image_passwd, self.log_file, INSTANTIATION_DIR, self.os_type, self.comtag) command = Command(command_string, desc) return command def command(self): desc = "Execute program '{0}'".format(self.program) #command_string = "python {0}{7}/content_copy_program_run/run_program.py \"{1}\" {2} {3} {4} {5} {6} {8}".format(self.getAbsPath(), self.program, self.interpreter, self.args, self.image_addr, self.image_passwd, self.log_file, INSTANTIATION_DIR, self.os_type) command_string = "python {0}{7}/content_copy_program_run/run_program.py \"{1}\" {2} {3} {4} {5} {6} {8} {9}".format(self.getAbsPath(), self.program, self.interpreter, self.args, self.image_addr, self.image_passwd, self.log_file, INSTANTIATION_DIR, self.os_type, self.comtag) command = Command(command_string, desc) return command class BaseImageLaunch(Modules): def __init__(self, xml_config, image_name, abspath): Modules.__init__(self, "LaunchBaseImage", abspath) self.xml_config = xml_config self.image_name = image_name def command(self): return "virsh --quiet define {0} > /dev/null; sleep 0.5; virsh --quiet start {1} > /dev/null".format(self.xml_config, self.image_name) ```
{ "source": "jniestroy/quoridor_bot", "score": 3 }
#### File: src/action/PawnMove.py ```python from src.action.IAction import * class PawnMove(IAction): def __init__(self, fromCoord, toCoord, throughCoord = None): self.fromCoord = fromCoord self.toCoord = toCoord self.throughCoord = throughCoord def isJump(self): return (self.throughCoord is not None) # https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes def __eq__(self, other): """Override the default Equals behavior""" if isinstance(other, self.__class__): #return self.__dict__ == other.__dict__ return self.fromCoord == other.fromCoord and self.toCoord == other.toCoord and self.throughCoord == other.throughCoord return NotImplemented # https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes def __ne__(self, other): """Define a non-equality test""" if isinstance(other, self.__class__): return not self.__eq__(other) return NotImplemented # https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes def __hash__(self): """Override the default hash behavior (that returns the id or the object)""" #return hash(tuple(sorted(self.__dict__.items()))) return hash((self.fromCoord, self.toCoord, self.throughCoord)) def __str__(self): return "from %s to %s%s" % (self.fromCoord, self.toCoord, " through %s" % self.throughCoord if self.throughCoord is not None else "") ``` #### File: quoridor_bot/src/GridCoordinates.py ```python class GridCoordinates: """ Coordinates on square grid """ def __init__(self, col, row): self.col = col self.row = row def left(self): """ Return the coordinates of the square at left, even if it does not exists """ return GridCoordinates(self.col - 1, self.row) def right(self): """ Return the coordinates of the square at right, even if it does not exists """ return GridCoordinates(self.col + 1, self.row) def top(self): """ Return the coordinates of the square at top, even if it does not exists """ return GridCoordinates(self.col, self.row - 1) def bottom(self): """ Return the coordinates of the square at bottom, even if it does not exists """ return GridCoordinates(self.col, self.row + 1) def clone(self): """ Return identical coordinates """ return GridCoordinates(self.col, self.row) def __eq__(self, other): """ Override the default Equals behavior. https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes """ if isinstance(other, self.__class__): #return self.__dict__ == other.__dict__ return self.col == other.col and self.row == other.row return NotImplemented def __ne__(self, other): """ Define a non-equality test. https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes """ if isinstance(other, self.__class__): return not self.__eq__(other) return NotImplemented def __hash__(self): """ Override the default hash behavior (that returns the id or the object). https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes """ return hash((self.col, self.row)) def __str__(self): return "%d,%d" % (self.col, self.row) ``` #### File: src/player/BuilderBot.py ```python import random import time from src.player.IBot import * from src.action.IAction import * class BuilderBot(IBot): def play(self, board) -> IAction: if self.remainingFences() > 0 and len(board.storedValidFencePlacings) > 0: randomFencePlacing = random.choice(board.storedValidFencePlacings) attempts = 5 while board.isFencePlacingBlocking(randomFencePlacing) and attempts > 0: #print("Cannot place blocking %s" % randomFencePlacing) randomFencePlacing = random.choice(board.storedValidFencePlacings) attempts -= 1 if (attempts == 0): validPawnMoves = board.storedValidPawnMoves[self.pawn.coord] return random.choice(validPawnMoves) return randomFencePlacing else: validPawnMoves = board.storedValidPawnMoves[self.pawn.coord] return random.choice(validPawnMoves) ``` #### File: src/player/RunnerBot.py ```python from src.player.IBot import * from src.action.IAction import * from src.Path import * class RunnerBot(IBot): def play(self, board) -> IAction: path = Path.BreadthFirstSearch(board, self.pawn.coord, self.endPositions, ignorePawns = False) if path is None: path = Path.BreadthFirstSearch(board, self.pawn.coord, self.endPositions, ignorePawns = True) firstMove = path.firstMove() if not board.isValidPawnMove(firstMove.fromCoord, firstMove.toCoord, ignorePawns = False): #board.drawOnConsole() return None return path.firstMove() ```
{ "source": "jnietol/DVH-Analytics", "score": 3 }
#### File: dvha/db/sql_to_python.py ```python from dvha.db.sql_connector import DVH_SQL from dateutil.parser import parse as date_parser from dvha.tools.errors import push_to_log class QuerySQL: """Object to generically query a specified table. Each column is stored as a property of the object For example, if you query 'dvhs' with condition string of "mrn = 'some_mrn'" you can access any column name 'some_column' with QuerySQL.some_column which will return a list of values for 'some_column'. All properties contain lists with the order of their values synced, unless unique=True Parameters ---------- table_name : str Beams', 'DVHs', 'Plans', or 'Rxs' condition_str : str condition in SQL syntax unique : bool, optional If True, only unique values stored columns : list, optional A list of SQL column names to be included in the return. If left as ``None``, all columns will be returned group : int, optional either 1 or 2 """ def __init__( self, table_name, condition_str, unique=False, columns=None, group=1 ): table_name = table_name.lower() if table_name in {"beams", "dvhs", "plans", "rxs"}: self.table_name = table_name self.condition_str = condition_str with DVH_SQL(group=group) as cnx: all_columns = cnx.get_column_names(table_name) if columns is not None: columns = set(all_columns).intersection( columns ) # ensure provided columns exist in SQL table else: columns = all_columns for column in columns: if column not in { "roi_coord_string", "distances_to_ptv", }: # ignored for memory since not used here self.cursor = cnx.query( self.table_name, column, self.condition_str ) force_date = cnx.is_sqlite_column_datetime( self.table_name, column ) # returns False for pgsql rtn_list = self.cursor_to_list(force_date=force_date) if unique: rtn_list = get_unique_list(rtn_list) # create property of QuerySQL based on SQL column name setattr(self, column, rtn_list) else: push_to_log( msg="QuerySQL: Table name in valid. Please select from Beams, " "DVHs, Plans, or Rxs." ) def cursor_to_list(self, force_date=False): """Convert a cursor return into a list of values Parameters ---------- force_date : bool, optional Apply dateutil.parser to values Returns ------- list queried data """ rtn_list = [] for row in self.cursor: if force_date: try: if type(row[0]) is int: rtn_list.append(str(date_parser(str(row[0])))) else: rtn_list.append(str(date_parser(row[0]))) except Exception: rtn_list.append("None") elif isinstance(row[0], (int, float)): rtn_list.append(row[0]) else: rtn_list.append(str(row[0])) return rtn_list def get_unique_list(input_list): """Remove duplicates in list and retain order Parameters ---------- input_list : list any list of objects Returns ------- list input_list without duplicates """ rtn_list_unique = [] for value in input_list: if value not in rtn_list_unique: rtn_list_unique.append(value) return rtn_list_unique def get_database_tree(): """Query SQL to get all columns of each table Parameters ---------- Returns ------- dict column data sorted by table """ with DVH_SQL() as cnx: tree = {table: cnx.get_column_names(table) for table in cnx.tables} return tree ``` #### File: DVH-Analytics/dvha/options.py ```python import pickle from os.path import isfile, isdir from os import unlink import hashlib from copy import deepcopy from dvha._version import __version__ from dvha.paths import ( OPTIONS_PATH, OPTIONS_CHECKSUM_PATH, INBOX_DIR, IMPORTED_DIR, REVIEW_DIR, ) from dvha.tools.errors import push_to_log class DefaultOptions: """Create default options, to be inherited by Options class""" def __init__(self): self.VERSION = __version__ self.is_edited = False self.DB_TYPE = "sqlite" self.SQL_PGSQL_IP_HIST = [] self.DEFAULT_CNF = { "pgsql": {"host": "localhost", "dbname": "dvh", "port": "5432"}, "sqlite": {"host": "dvha.db"}, } self.SQL_LAST_CNX = deepcopy(self.DEFAULT_CNF) self.DB_TYPE_GRPS = {grp: deepcopy(self.DB_TYPE) for grp in [1, 2]} self.SQL_LAST_CNX_GRPS = { 1: deepcopy(self.DEFAULT_CNF), 2: deepcopy(self.DEFAULT_CNF), } self.SYNC_SQL_CNX = True self._sql_vars = [ "DB_TYPE", "SQL_PGSQL_IP_HIST", "DEFAULT_CNF", "SQL_LAST_CNX", "DB_TYPE_GRPS", "SQL_LAST_CNX_GRPS", ] self.MIN_BORDER = 50 # These colors propagate to all tabs that visualize your two groups self.PLOT_COLOR = "blue" self.PLOT_COLOR_2 = "red" # The line width and style of selected DVHs in the DVH plot self.DVH_LINE_WIDTH_NONSELECTION = 2 self.DVH_LINE_DASH_NONSELECTION = "solid" self.DVH_LINE_ALPHA_NONSELECTION = 0.075 self.DVH_LINE_COLOR_NONSELECTION = "black" self.DVH_LINE_WIDTH_SELECTION = 4 self.DVH_LINE_DASH_SELECTION = "solid" self.DVH_LINE_ALPHA_SELECTION = 1.0 # Adjusts the opacity of the inner-quartile ranges self.IQR_ALPHA = 0.3 # Adjust the plot font sizes self.PLOT_AXIS_LABEL_FONT_SIZE = "12pt" self.PLOT_AXIS_MAJOR_LABEL_FONT_SIZE = "10pt" # Grid line properties self.GRID_LINE_COLOR = "lightgrey" self.GRID_LINE_WIDTH = 1 self.GRID_ALPHA = 1.0 # Number of data points are reduced by this factor during dynamic # plot interaction to speed-up visualizations # This is only applied to the DVH plot since it has a large amount # of data self.LOD_FACTOR = 100 # All DVHs in SQL DB have 1cGy bin widths regardless of this value. # However, the queried DVHs will be # down-sampled using this bin_width self.dvh_bin_width = 5 # Passed into dicompyler-core to put a cap on the maximium dose to # prevent numpy.histogram from # blowing up memory allocation self.dvh_bin_max_dose = {"Gy": 500.0, "% Rx": 300.0} self.dvh_bin_max_dose_units = "Gy" self.dvh_bin_max_dose_options = ["Gy", "% Rx"] # Options for the group statistical DVHs in the DVHs tab self.STATS_MEDIAN_LINE_WIDTH = 2 self.STATS_MEDIAN_LINE_DASH = "solid" self.STATS_MEDIAN_ALPHA = 0.6 self.STATS_MEAN_LINE_WIDTH = 3 self.STATS_MEAN_LINE_DASH = "dashed" self.STATS_MEAN_ALPHA = 0.5 self.STATS_MAX_LINE_WIDTH = 2 self.STATS_MAX_LINE_DASH = "dotted" self.STATS_MAX_ALPHA = 1 self.STATS_MIN_LINE_WIDTH = 2 self.STATS_MIN_LINE_DASH = "dotted" self.STATS_MIN_ALPHA = 1 # Options for the time-series plot self.CORRELATION_POS_COLOR_1 = "blue" self.CORRELATION_NEG_COLOR_1 = "green" self.CORRELATION_POS_COLOR_2 = "red" self.CORRELATION_NEG_COLOR_2 = "purple" self.CORRELATION_MATRIX_VARS = [ "Beam Area (Mean)", "Beam Dose (Mean)", "Beam MU (Mean)", "Beam Perimeter (Mean)", "PTV Cross-Section Median", "PTV Distance (Centroids)", "PTV Distance (Max)", "PTV Distance (Mean)", "PTV Distance (Median)", "PTV Distance (Min)", "PTV Max Dose", "PTV Min Dose", "PTV Surface Area", "PTV Volume", "Plan Complexity", "ROI Cross-Section Max", "ROI Cross-Section Median", "ROI Max Dose", "ROI Mean Dose", "ROI Min Dose", "ROI Surface Area", "ROI Volume", "Rx Dose", "Total Plan MU", ] # Options for the time-series plot self.TIME_SERIES_CIRCLE_SIZE = 10 self.TIME_SERIES_CIRCLE_ALPHA = 0.3 self.TIME_SERIES_TREND_LINE_WIDTH = 1 self.TIME_SERIES_TREND_LINE_DASH = "solid" self.TIME_SERIES_AVG_LINE_WIDTH = 1 self.TIME_SERIES_AVG_LINE_DASH = "dotted" self.TIME_SERIES_PATCH_ALPHA = 0.1 # Options for the time-series plot self.CONTROL_CHART_CIRCLE_SIZE = 10 self.CONTROL_CHART_CIRCLE_ALPHA = 0.3 self.CONTROL_CHART_LINE_WIDTH = 1 self.CONTROL_CHART_LINE_DASH = "solid" self.CONTROL_CHART_LINE_COLOR = "black" self.CONTROL_CHART_CENTER_LINE_WIDTH = 2 self.CONTROL_CHART_CENTER_LINE_DASH = "solid" self.CONTROL_CHART_CENTER_LINE_COLOR = "black" self.CONTROL_CHART_CENTER_LINE_ALPHA = 1 self.CONTROL_CHART_UCL_LINE_WIDTH = 2 self.CONTROL_CHART_UCL_LINE_DASH = "dashed" self.CONTROL_CHART_UCL_LINE_COLOR = "red" self.CONTROL_CHART_UCL_LINE_ALPHA = 1 self.CONTROL_CHART_LCL_LINE_WIDTH = 2 self.CONTROL_CHART_LCL_LINE_DASH = "dashed" self.CONTROL_CHART_LCL_LINE_COLOR = "red" self.CONTROL_CHART_LCL_LINE_ALPHA = 1 self.CONTROL_CHART_PATCH_ALPHA = 0.1 self.CONTROL_CHART_PATCH_COLOR = "grey" self.CONTROL_CHART_OUT_OF_CONTROL_COLOR = "green" self.CONTROL_CHART_OUT_OF_CONTROL_COLOR_2 = "purple" self.CONTROL_CHART_OUT_OF_CONTROL_ALPHA = 1 # Adjust the opacity of the histograms self.HISTOGRAM_ALPHA = 0.3 # Options for the plot in the Multi-Variable Regression tab self.REGRESSION_CIRCLE_SIZE = 10 self.REGRESSION_ALPHA = 0.5 self.REGRESSION_LINE_WIDTH = 2 self.REGRESSION_LINE_DASH = "dashed" self.REGRESSION_RESIDUAL_CIRCLE_SIZE = 3 self.REGRESSION_RESIDUAL_ALPHA = 0.5 self.REGRESSION_RESIDUAL_LINE_WIDTH = 2 self.REGRESSION_RESIDUAL_LINE_DASH = "solid" self.REGRESSION_RESIDUAL_LINE_COLOR = "black" # Random forest self.MACHINE_LEARNING_ALPHA = 0.5 self.MACHINE_LEARNING_ALPHA_DIFF = 0.35 self.MACHINE_LEARNING_SIZE_PREDICT = 5 self.MACHINE_LEARNING_SIZE_DATA = 5 self.MACHINE_LEARNING_SIZE_MULTI_VAR = 5 self.MACHINE_LEARNING_COLOR_PREDICT = "blue" self.MACHINE_LEARNING_COLOR_DATA = "black" self.MACHINE_LEARNING_COLOR_MULTI_VAR = "red" # This is the number of bins up do 100% used when resampling a DVH # to fractional dose self.RESAMPLED_DVH_BIN_COUNT = 5000 self.MLC_ANALYZER_OPTIONS = { "max_field_size_x": 400.0, "max_field_size_y": 400.0, "complexity_weight_x": 1.0, "complexity_weight_y": 1.0, } # Per TG-263 (plus NONE, ITV, and IGNORED) self.ROI_TYPES = [ "NONE", "ORGAN", "PTV", "ITV", "CTV", "GTV", "AVOIDANCE", "BOLUS", "CAVITY", "CONTRAST_AGENT", "EXTERNAL", "IRRAD_VOLUME", "REGISTRATION", "TREATED_VOLUME", "IGNORED", ] self.KEEP_IN_INBOX = 0 self.SEARCH_SUBFOLDERS = 1 self.IMPORT_UNCATEGORIZED = 0 self.COPY_MISC_FILES = 0 self.INBOX_DIR = INBOX_DIR self.IMPORTED_DIR = IMPORTED_DIR self.REVIEW_DIR = REVIEW_DIR self.MAX_DOSE_VOLUME = 0.03 self.USE_DICOM_DVH = False self.AUTO_SUM_DOSE = True self.save_fig_param = { "figure": { "y_range_start": -0.0005, "x_range_start": 0.0, "y_range_end": 1.0005, "x_range_end": 10000.0, "background_fill_color": "none", "border_fill_color": "none", "plot_height": 600, "plot_width": 820, }, "legend": { "background_fill_color": "white", "background_fill_alpha": 1.0, "border_line_color": "white", "border_line_alpha": 1.0, "border_line_width": 1, }, } self.apply_range_edits = False self.positions = { "user_settings": None, "export_figure": None, "main": None, } self.window_sizes = {"main": None, "import": None} self.AUTO_SQL_DB_BACKUP = False self.MIN_RESOLUTION_MAIN = (1200, 700) self.MAX_INIT_RESOLUTION_MAIN = (1550, 900) self.SHOW_NEW_PTV_CALC_WARNING = True self.GET_DVH_KWARGS = { "calculate_full_volume": True, "use_structure_extents": False, "interpolation_resolution": None, "interpolation_segments_between_planes": 0, "memmap_rtdose": False, } # compute high resolution DVH if volume less than this (cc) self.DVH_SMALL_VOLUME_THRESHOLD = 3 self.DVH_HIGH_RESOLUTION_FACTOR = 4 # Must be a factor of 2 self.DVH_HIGH_RESOLUTION_FACTOR_OPTIONS = ["2", "4", "8", "16", "32"] self.DVH_HIGH_RESOLUTION_SEGMENTS_BETWEEN = 3 # Must be int self.ENABLE_EDGE_BACKEND = False self.OVH_RESOLUTION = 3 # mm self.DTH_RESOLUTION = 0.5 # perimeter space resolution in mm class Options(DefaultOptions): def __init__(self): DefaultOptions.__init__(self) self.__set_option_attr() self.load() def __set_option_attr(self): option_attr = [] for attr in self.__dict__: if not attr.startswith("_"): option_attr.append(attr) self.option_attr = option_attr def load(self): self.is_edited = False if isfile(OPTIONS_PATH) and self.is_options_file_valid: try: with open(OPTIONS_PATH, "rb") as infile: loaded_options = pickle.load(infile) self.upgrade_options(loaded_options) except Exception as e: msg = ( "Options.load: Options file corrupted. Loading " "default options." ) push_to_log(e, msg=msg) loaded_options = {} for key, value in loaded_options.items(): if hasattr(self, key): # ignore directories that don't exist if key.endswith('_DIR') and not isdir(value): msg = f"Options.load: {value} is not a valid " \ f"directory. {key} will use default value of " \ f"{getattr(self, key)} instead" push_to_log(msg=msg) else: setattr(self, key, value) def save(self): self.is_edited = False out_options = {} for attr in self.option_attr: out_options[attr] = getattr(self, attr) out_options["VERSION"] = DefaultOptions().VERSION with open(OPTIONS_PATH, "wb") as outfile: pickle.dump(out_options, outfile) self.save_checksum() def set_option(self, attr, value): """ Change or create an option value :param attr: name of option :type attr: str :param value: value of option """ if not hasattr(self, attr): msg = "Options.set_option: %s did not previously exist" % attr push_to_log(msg=msg) setattr(self, attr, value) self.is_edited = True def save_checksum(self): check_sum = self.calculate_checksum() if check_sum: with open(OPTIONS_CHECKSUM_PATH, "w") as outfile: outfile.write(check_sum) @staticmethod def calculate_checksum(): if isfile(OPTIONS_PATH): with open(OPTIONS_PATH, "rb") as infile: options_str = str(infile.read()) return hashlib.md5(options_str.encode("utf-8")).hexdigest() return None @staticmethod def load_stored_checksum(): if isfile(OPTIONS_CHECKSUM_PATH): with open(OPTIONS_CHECKSUM_PATH, "r") as infile: checksum = infile.read() return checksum return None @property def is_options_file_valid(self): try: current_checksum = self.calculate_checksum() stored_checksum = self.load_stored_checksum() if current_checksum == stored_checksum: return True except Exception as e: msg = ( "Options.is_options_file_valid: Corrupted options file " "detected. Loading default options." ) push_to_log(e, msg=msg) return False def restore_defaults(self): """Delete the store options file and checksum, load defaults""" if isfile(OPTIONS_PATH): unlink(OPTIONS_PATH) if isfile(OPTIONS_CHECKSUM_PATH): unlink(OPTIONS_CHECKSUM_PATH) default_options = DefaultOptions() for attr in default_options.__dict__: if not attr.startswith("_") and attr not in self._sql_vars: setattr(self, attr, getattr(default_options, attr)) def clear_positions(self, *evt): """Clear all stored window positions, may be useful if window is off screen on Show""" self.positions = {key: None for key in list(self.positions)} def clear_window_sizes(self, *evt): """Clear all stored window sizes, may be useful if window is off screen on Show""" self.window_sizes = {key: None for key in list(self.window_sizes)} def apply_window_position(self, frame, position_key): """Given a frame, set to previously stored position or center it""" if self.positions[position_key] is not None: frame.SetPosition(self.positions[position_key]) else: frame.Center() def set_window_size(self, frame, size_key): if size_key in self.window_sizes.keys(): self.window_sizes[size_key] = frame.GetSize() def save_window_position(self, frame, position_key): """Store the position of the provided frame""" self.positions[position_key] = frame.GetPosition() def upgrade_options(self, loaded_options): """Reserve this space to apply all option file upgrades""" # This method is only needed for options that change type or structure # New options using a new attribute name will be automatically # generated by the DefaultOptions class self.db_group_upgrade(loaded_options) self.dvh_selection_upgrade(loaded_options) self.roi_type_upgrade(loaded_options) self.positions_upgrade(loaded_options) def positions_upgrade(self, loaded_options): if "main" not in loaded_options["positions"]: loaded_options["positions"]["main"] = None def db_group_upgrade(self, loaded_options): """DVHA v0.8.1 has a SQL cnx per group""" for key in ["DB_TYPE", "<KEY>"]: new_key = key + "_GRPS" if new_key not in loaded_options.keys(): # DVHA <0.6.7 did not have DB_TYPE or SQL_LAST_CNX backup_value = ( "pgsql" if key == "DB_TYPE" else getattr(self, key) ) # sqlite not supported <0.6.7 new_value = ( loaded_options[key] if key in loaded_options.keys() else backup_value ) if sorted(list(new_value)) == [ 1, 2, ]: # users who may have used the dev branch loaded_options[new_key] = { grp: deepcopy(new_value[grp]) for grp in [1, 2] } else: loaded_options[new_key] = { grp: deepcopy(new_value) for grp in [1, 2] } def dvh_selection_upgrade(self, loaded_options): # Making nonselection dvh lines visible as v0.8.3 # The following keys need updates to look nice, user can always # change later if "DVH_LINE_WIDTH_SELECTION" not in list(loaded_options): keys = [ "<KEY>", "STATS_MEDIAN_LINE_WIDTH", "STATS_MEAN_LINE_WIDTH", "STATS_MAX_LINE_WIDTH", "STATS_MIN_LINE_WIDTH", ] for key in keys: loaded_options[key] = getattr(self, key) @staticmethod def roi_type_upgrade(loaded_options): """DVHA v0.8.3 added ROI Types into the ROI MAP, and added NONE and IGNORED types""" if "NONE" not in loaded_options["ROI_TYPES"]: loaded_options["ROI_TYPES"].insert(0, "NONE") if "IGNORED" not in loaded_options["ROI_TYPES"]: loaded_options["ROI_TYPES"].append("IGNORED") ``` #### File: dvha/tools/roi_map_generator.py ```python from os.path import join from dvha.paths import TG263_CSV, PREF_DIR class ROIMapGenerator: """Class to interact with the TG263 table and generate compliant ROI Name Maps""" def __init__(self): # Load TG263 table with open(TG263_CSV, "r") as doc: keys = [key.strip() for key in doc.readline().split(",")] self.tg_263 = {key: [] for key in keys} for line in doc: if "xx" not in line: # ignore the rows with generic expansions for col, value in enumerate(line.split(",")): if value == "": value = "None" self.tg_263[keys[col]].append( value.strip().replace("^", ",") ) self.keys = keys self.keys.append(self.keys.pop(0)) # Move Target Type to the end self.key_map = {key: key for key in keys} def __call__( self, map_file_name, body_sites=None, data_filter=None, roi_uid_type="primary", ): """ Create a new ROI map file based on TG263 :param map_file_name: save the file in PREF_DIR with this name :type map_file_name: str :param body_sites: automatically create a data_filter based on these body sites :type body_sites: list :param data_filter: used to write a subset of the TG263 data :type data_filter: dict :param roi_uid_type: either 'primary', 'reverse', or 'fmaid' :type roi_uid_type: str :return: file_path of new map file """ if body_sites is not None: if type(body_sites) is not list: body_sites = [body_sites] data_filter = {"Anatomic Group": body_sites} data = ( self.tg_263 if data_filter is None else self.get_filtered_data(data_filter) ) lookup = { "primary": "TG263-Primary Name", "reverse": "TG-263-Reverse Order Name", "fmaid": "FMAID", } roi_uids = self.get_unique_values(lookup[roi_uid_type], data) file_path = join(PREF_DIR, map_file_name) with open(file_path, "w") as doc: for roi_uid in roi_uids: doc.write(": ".join([roi_uid] * 3) + "\n") return file_path def prep_data_for_roi_map_gui(self): """ """ ignored = [ c for c in self.keys if "Reverse" in c or "Character" in c or "FMAID" in c ] for c in ignored: self.drop_column(c) str_map = {"Category": "Cat.", "Anatomic": "Anat.", "TG263-": ""} for c in self.keys: if any([key in c for key in list(str_map)]): new_key = c for key, value in str_map.items(): new_key = new_key.replace(key, value) self.key_map[c] = new_key key_index = self.keys.index(c) self.tg_263[new_key] = self.drop_column(c, return_data=True) self.keys.insert(key_index, new_key) ########################################################## # Generalized Tools ########################################################## def get_filtered_data(self, data_filter): """Get TG263 data with a filter Parameters ---------- data_filter : dict column: list_allowed_values} Returns ------- type subset of tg_263 with the data_filter applied """ for key in list(data_filter): if ( not isinstance(data_filter[key], list) and data_filter[key].lower() == "all" ): data_filter.pop(key) data = {key: [] for key in self.keys} for row in range(len(self.tg_263[self.keys[0]])): is_included = [ self.tg_263[col][row] in data_filter[col] for col in list(data_filter) ] if all(is_included): for c in self.keys: data[c].append(self.tg_263[c][row]) return data def get_unique_values(self, column, data=None): """ Parameters ---------- column : TG263 column data : optionally provide filtered data, default is entire TG263 table Returns ------- type list of unique values for the provided column """ data = self.tg_263 if data is None else data return sorted([value for value in set(data[column]) if value]) def get_value_from_uid(self, roi_uid, output_type, reverse_name=False): """ Parameters ---------- roi_uid : str either a primary name or FMAID (auto-detects) output_type : str column name of output value reverse_name : bool roi_uid is assumed to be reverse order name if True (Default value = False) Returns ------- type another column value of the provided roi_uid """ input_type = ( "FMAID" if roi_uid.isdigit() else ["TG-263-Reverse Order Name", "TG263-Primary Name"][ reverse_name ] ) return self._get_value_from_uid( roi_uid, input_type=self.key_map[input_type], output_type=self.key_map[output_type], ) def _get_value_from_uid(self, input_value, input_type, output_type): """Generic function to look up another column with a given input value and type Parameters ---------- input_value : input_type : output_type : Returns ------- """ if input_value in self.tg_263[input_type]: index = self.tg_263[input_type].index(input_value) return self.tg_263[output_type][index] def drop_column(self, column, return_data=False): """Remove a column from tg_263 data and update keys Parameters ---------- column : return_data : (Default value = False) Returns ------- """ if column in self.keys and column in list(self.tg_263): self.keys.pop(self.keys.index(column)) popped_data = self.tg_263.pop(column) if return_data: return popped_data ########################################################## # Properties for coding ease ########################################################## @property def target_types(self): """ """ return self.get_unique_values(self.key_map["Target Type"]) @property def anatomic_groups(self): """ """ return self.get_unique_values(self.key_map["Anatomic Group"]) @property def major_categories(self): """ """ return self.get_unique_values(self.key_map["Major Category"]) @property def minor_categories(self): """ """ return self.get_unique_values(self.key_map["Minor Category"]) @property def primary_names(self): """ """ return self.get_unique_values(self.key_map["TG263-Primary Name"]) @property def reverse_order_primary_names(self): """ """ return self.get_unique_values( self.key_map["TG-263-Reverse Order Name"] ) @property def fmaids(self): """ """ return self.get_unique_values(self.key_map["FMAID"]) def get_primary_name(self, fmaid): """ Parameters ---------- fmaid : Returns ------- """ return self._get_value_from_uid( fmaid, input_type="FMAID", output_type="TG263-Primary Name" ) def get_fmaid(self, primary_name): """ Parameters ---------- primary_name : Returns ------- """ return self._get_value_from_uid( primary_name, input_type="TG263-Primary Name", output_type="FMAID" ) def get_target_type(self, roi_uid): """ Parameters ---------- roi_uid : Returns ------- """ return self.get_value_from_uid(roi_uid, output_type="Target Type") def get_major_category(self, roi_uid): """ Parameters ---------- roi_uid : Returns ------- """ return self.get_value_from_uid(roi_uid, output_type="Major Category") def get_minor_category(self, roi_uid): """ Parameters ---------- roi_uid : Returns ------- """ return self.get_value_from_uid(roi_uid, output_type="Minor Category") def get_anatomic_group(self, roi_uid): """ Parameters ---------- roi_uid : Returns ------- """ return self.get_value_from_uid(roi_uid, output_type="Anatomic Group") def get_reverse_order_name(self, roi_uid): """ Parameters ---------- roi_uid : Returns ------- """ return self.get_value_from_uid( roi_uid, output_type="TG-263-Reverse Order Name" ) def get_description(self, roi_uid): """ Parameters ---------- roi_uid : Returns ------- """ return self.get_value_from_uid(roi_uid, output_type="Description") ```
{ "source": "jni/explode-view", "score": 2 }
#### File: src/explode_view/_widget.py ```python from magicgui import magic_factory, widgets import napari from napari_plugin_engine import napari_hook_implementation import numpy as np from typing import List from ._explode_view import get_exploded_view_func @magic_factory( auto_call=True, factor={ 'widget_type': widgets.FloatSlider, # yapf: disable 'min': 1, 'max': 4, 'step': 0.1 }, ) def explode_view( viewer: napari.viewer.Viewer, factor: float, ) -> List[napari.types.LayerDataTuple]: labels_layer = [ layer for layer in viewer.layers if isinstance(layer, napari.layers.Labels) ][0] image_layers = [ layer for layer in viewer.layers if isinstance(layer, napari.layers.Image) ] if not hasattr(explode_view, '_explode_funcs'): explode_view._explode_funcs = [ get_exploded_view_func(labels_layer.data, image_layer.data) for image_layer in image_layers ] funcs = explode_view._explode_funcs new_labels, new_image_0 = funcs[0](factor) shape = np.asarray(labels_layer.data.shape) scale = np.asarray(labels_layer.scale) translate = np.asarray(labels_layer.translate ) + scale * shape * (1-factor) / 2 meta = {'scale': scale, 'translate': translate} new_images = [new_image_0] + [func(factor)[1] for func in funcs[1:]] new_layers = [] for image, image_layer in zip(new_images, image_layers): new_layers.append(( image, {**meta, 'name': image_layer.name + ' exploded', 'blending': image_layer.blending, 'colormap': image_layer.colormap}, 'image' )) new_layers.append(( new_labels, {**meta, 'name': labels_layer.name + ' exploded', 'visible': False}, 'labels' )) return new_layers @napari_hook_implementation def napari_experimental_provide_dock_widget(): return explode_view ```
{ "source": "jni/flatten", "score": 3 }
#### File: jni/flatten/flatdir.py ```python import os import sys import argparse import re BYTES_FINDER = re.compile(r'(\d+(?:\.\d+)?)\s?(k|K|m|M|g|G|t|T)?(B|b)?') POWERS_D = {'k': 10**3, 'm': 10**6, 'g': 10**9, 't': 10**12} POWERS_B = {'k': 2**10, 'm': 2**20, 'g': 2**30, 't': 2**40} def r_scandir(path, follow_symlinks=False): '''List files and directories recursively. Parameters ---------- path : string A path to a directory. Returns ------- dir_iterator : iterator of DirEntry objects Iterator of DirEntry objects, same as `os.scandir`. ''' for entry in os.scandir(path): yield entry if entry.is_dir() and (not entry.is_symlink() or follow_symlinks): yield from r_scandir(entry.path, follow_symlinks) def human2bytes(text, binary=True): '''Convert a human-readable file size spec to an integer number of bytes. Parameters ---------- text : string The text to be converted. binary : bool, optional Whether to use binary multipliers (1024, 1024^2, etc) (default), or decimal ones (1000, 1000000, etc). Returns ------- bytes_count : int The number of bytes matching the input text. Examples -------- >>> human2bytes('4500') 4500 >>> human2bytes('1.5kb') 1536 >>> human2bytes('2MB', False) 2000000 ''' parsed = BYTES_FINDER.match(text) if parsed is None: raise ValueError('Not a valid size spec: %s. Examples of valid ' 'specs include 4500, 512MB, 20kb, and 2TB.' % text) value = float(parsed.group(1)) mod = str.lower(parsed.group(2)) multiplier = POWERS_B[mod] if binary else POWERS_D[mod] bytes_count = round(value * multiplier) return bytes_count def flatten(indir, outdir, filetype='', minsize=0, maxsize=None): '''Place hardlinks in outdir to all files in nested directories in indir. Parameters ---------- indir : string The input directory to flatten. outdir : string The output directory, where to place all the files in `indir`. filetype : string, optional Link only files with this extension. minsize : int, optional Link only files larger than this size. maxsize : int, optional Link only files smaller than this size. ''' filetype = str.lower(filetype) if not os.path.isdir(outdir): os.makedirs(outdir) files = r_scandir(indir) for entry in files: info = entry.stat() if (entry.is_dir() or info.st_size < minsize or (maxsize is not None and info.st_size > maxsize)): continue if not entry.name.lower().endswith(filetype): continue src = os.path.abspath(entry.path) dst = os.path.join(outdir, entry.name) os.link(src, dst) __version__ = '0.1' def main(): parser = argparse.ArgumentParser(description='foo') parser.add_argument('indir', help='Input directory to flatten.') parser.add_argument('outdir', help='Output directory: all files recursively found in <indir> ' 'will be placed here. Created if it doesn\'t exist.') parser.add_argument('-t', '--filetype', help='Only flatten files matching this extension.') parser.add_argument('-m', '--minsize', type=human2bytes, help='Find only files larger than this size. This can be a human-' 'readable string, such as \'512kB\'.') parser.add_argument('-M', '--maxsize', type=human2bytes, help='Find only files smaller than this size. This can be a human-' 'readable string, such as \'512kB\'.') parser.parse_args(sys.argv[1:]) flatten(parser.indir, parser.outdir, filetype=parser.filetype, minsize=parser.minsize, maxsize=parser.maxsize) ```
{ "source": "jni/gala", "score": 2 }
#### File: gala/features/base.py ```python import numpy as np from .. import evaluate as ev class Null(object): def __init__(self, *args, **kwargs): self.default_cache = 'feature-cache' def __call__(self, g, n1, n2=None): return self.compute_features(g, n1, n2) def write_fm(self, json_fm={}): return json_fm def compute_features(self, g, n1, n2=None): if n2 is None: c1 = g.node[n1][self.default_cache] return self.compute_node_features(g, n1, c1) if g.node[n1]['size'] > g.node[n2]['size']: n1, n2 = n2, n1 # smaller node first c1, c2, ce = [d[self.default_cache] for d in [g.node[n1], g.node[n2], g[n1][n2]]] return np.concatenate(( self.compute_node_features(g, n1, c1), self.compute_node_features(g, n2, c2), self.compute_edge_features(g, n1, n2, ce), self.compute_difference_features(g, n1, n2, c1, c2) )) def create_node_cache(self, *args, **kwargs): return np.array([]) def create_edge_cache(self, *args, **kwargs): return np.array([]) def update_node_cache(self, *args, **kwargs): pass def update_edge_cache(self, *args, **kwargs): pass def compute_node_features(self, *args, **kwargs): return np.array([]) def compute_edge_features(self, *args, **kwargs): return np.array([]) def compute_difference_features(self, *args, **kwargs): return np.array([]) class Composite(Null): def __init__(self, children=[], *args, **kwargs): super(Composite, self).__init__() self.children = children def write_fm(self, json_fm={}): for child in self.children: json_fm.update(child.write_fm(json_fm)) return json_fm def create_node_cache(self, *args, **kwargs): return [c.create_node_cache(*args, **kwargs) for c in self.children] def create_edge_cache(self, *args, **kwargs): return [c.create_edge_cache(*args, **kwargs) for c in self.children] def update_node_cache(self, g, n1, n2, dst, src): for i, child in enumerate(self.children): child.update_node_cache(g, n1, n2, dst[i], src[i]) def update_edge_cache(self, g, e1, e2, dst, src): for i, child in enumerate(self.children): child.update_edge_cache(g, e1, e2, dst[i], src[i]) def compute_node_features(self, g, n, cache=None): if cache is None: cache = g.node[n][self.default_cache] features = [] for i, child in enumerate(self.children): features.append(child.compute_node_features(g, n, cache[i])) return np.concatenate(features) def compute_edge_features(self, g, n1, n2, cache=None): if cache is None: cache = g[n1][n2][self.default_cache] features = [] for i, child in enumerate(self.children): features.append(child.compute_edge_features(g, n1, n2, cache[i])) return np.concatenate(features) def compute_difference_features(self, g, n1, n2, cache1=None, cache2=None): if cache1 is None: cache1 = g.node[n1][self.default_cache] if cache2 is None: cache2 = g.node[n2][self.default_cache] features = [] for i, child in enumerate(self.children): features.append(child.compute_difference_features( g, n1, n2, cache1[i], cache2[i])) return np.concatenate(features) def _compute_delta_vi(ctable, fragments0, fragments1): c0 = np.sum(ctable[list(fragments0)], axis=0) c1 = np.sum(ctable[list(fragments1)], axis=0) cr = c0 + c1 p0 = np.sum(c0) p1 = np.sum(c1) pr = np.sum(cr) p0g = np.sum(ev.xlogx(c0)) p1g = np.sum(ev.xlogx(c1)) prg = np.sum(ev.xlogx(cr)) return (pr * np.log2(pr) - p0 * np.log2(p0) - p1 * np.log2(p1) - 2 * (prg - p0g - p1g)) class Mock(Null): ''' Mock feature manager to verify agglomerative learning works. This manager learns a different feature map for fragments vs agglomerated segments. It relies on knowing the ground truth for a given fragmentation. Parameters ---------- frag, gt : array of int, same shape The fragmentation and ground truth volumes. Must have same shape. ''' def __init__(self, frag, gt): super().__init__() self.ctable = ev.contingency_table(frag, gt, ignore_seg=[], ignore_gt=[]).toarray() self._std = 0.1 # standard deviation of feature computations def eps(self): return np.random.randn(2) * self._std def compute_features(self, g, n1, n2=None): if n2 is None: return np.array([]) f1, f2 = g.node[n1]['fragments'], g.node[n2]['fragments'] should_merge = _compute_delta_vi(self.ctable, f1, f2) < 0 if should_merge: return np.array([0., 0.]) + self.eps() else: if len(f1) + len(f2) == 2: # single-fragment merge return np.array([1., 0.]) + self.eps() else: # multi-fragment merge return np.array([0., 1.]) + self.eps() ```
{ "source": "jni/gala-scripts", "score": 2 }
#### File: jni/gala-scripts/crossval4x.py ```python import pickle import os import numpy as np from gala import imio, agglo, features, classify fman = features.default.snemi3d() def train(index): out_fn = 'training-data-%i.h5' % index if os.path.exists(out_fn): data, labels = classify.load_training_data_from_disk(out_fn, names=['data', 'labels']) else: ws_tr = imio.read_image_stack('watershed-%i.lzf.h5' % index) pr_tr = imio.read_image_stack('probabilities-%i.lzf.h5' % index) / 255 gt_tr = imio.read_image_stack('ground-truth-%i.lzf.h5' % index) g = agglo.Rag(ws_tr, pr_tr, feature_manager=fman) data, labels = g.learn_agglomerate(gt_tr, fman, min_num_epochs=4)[0][:2] classify.save_training_data_to_disk([data, labels], fn='training-data-%i.h5' % index, names=['data', 'labels']) print('total training data:', data.shape) print('size in MB:', data.size * data.itemsize / 1e6) rf = classify.DefaultRandomForest() rf.fit(data, labels[:, 0]) policy = agglo.classifier_probability(fman, rf) return policy def test(index, policy): ws = imio.read_image_stack('watershed-%i.lzf.h5' % index) pr = imio.read_image_stack('probabilities-%i.lzf.h5' % index) / 255 g = agglo.Rag(ws, pr, merge_priority_function=policy, feature_manager=fman) g.agglomerate(np.inf) return g.tree if __name__ == '__main__': trees = {} for training_index in range(4): print('training %i' % training_index) policy = train(training_index) for testing_index in range(4): if testing_index == training_index: continue print('testing %i' % testing_index) tree = test(testing_index, policy) trees[(training_index, testing_index)] = tree with open('results-%i-%i.pickle' % (training_index, testing_index), 'wb') as fout: pickle.dump(tree, fout, protocol=-1) with open('results.pickle', 'wb') as fout: pickle.dump(trees, fout, protocol=-1) ```
{ "source": "jni/gel", "score": 3 }
#### File: gel/gel/_geodesic.py ```python import heapq import itertools as it import numpy as np import scipy.ndimage as nd import scipy.spatial.distance as distance def gel(image, num_superpixels, use_channels=False, num_iter=20, mode='viscosity'): """Find GEL superpixels in an image. Parameters ---------- image : np.ndarray (arbitrary dimension) The image in which to find the superpixels num_superpixels : int The desired number of superpixels use_channels : bool (optional) Whether to treat the last dimension of `image` as the channels (default: False) num_iter : int (optional) The number of geodesic expansion and recentering iterations to run (default: 20) mode : string (optional) Whether to treat the image as a viscosity map (default). Returns ------- superpixels : np.ndarray (same shape as `image`) The superpixels found by GEL. """ ndim = float(image.ndim) spacing = int(np.floor((image.size / num_superpixels) ** (1 / ndim))) slices = [slice(spacing/2, None, spacing)] * image.ndim centers = np.zeros(image.shape, np.uint8) centers[slices] = 1 centers = nd.label(centers)[0] centers_old = None converged = False uniques = range(1, centers.max() + 1) for i in range(num_iter): if converged: break superpixels = geodesic_expansion(centers, image, mode) centers_new = label_centers_of_mass(superpixels, uniques) if i > 0 and centers_old == centers_new: break centers_old = centers_new centers = volume_of_labels(centers_new, image.shape) return superpixels def volume_of_labels(centers, shape): """Return a volume in which the given coordinates are point labels. Parameters ---------- centers : list of tuples A list of coordinates (possibly fractional). shape : tuple The shape of the volume containing the centers. Returns ------- volume : np.ndarray (shape given by `shape`) """ volume = np.zeros(shape, int) for i, center in enumerate(centers): center = [int(np.round(coord)) for coord in center] volume[tuple(center)] = i+1 return volume def label_centers_of_mass(labels, uniques=None): """Find the center of mass of each label assuming uniform weights. Parameters ---------- labels : np.ndarray, integer type A label field. uniques : list of int (optional) The labels for which to compute the centers of mass. Returns ------- centers : list of tuples Each tuple is a set of coordinates (of length labels.ndim) for the center of mass of the corresponding label. Notes ----- This function will be slow if called repeatedly without a `uniques` input, as it needs to run `np.unique` on the input array. """ if uniques is None: uniques = np.unique(labels) if uniques[0] == 0: uniques = uniques[1:] centers = nd.measurements.center_of_mass(labels, labels, uniques) return centers def neighbors(coords, shape, connectivity=1): """Return the neighbors of a set of coordinates. Parameters ---------- coords : array-like The coordinates for which we want neighbor coordinates. shape : array-like The shape of the array including coords. Used to check for borders. connectivity : int (optional), min=1, max=len(shape) The connectivity of the neighborhood. Returns ------- neighbors : np.ndarray (num_neighbors x len(shape)) The coordinates of neighboring array elements. """ coords = np.atleast_2d(coords) shape = np.asarray(shape) ndim = len(shape) n_elem = 3 ** ndim footprint = nd.generate_binary_structure(ndim, connectivity) footprint.ravel()[n_elem / 2] = 0 neighbors = coords + (np.asarray(footprint.nonzero()).T - np.ones(ndim)) not_border = True - ((neighbors < 0).any(axis=1) + (neighbors >= shape).any(axis=1)) return neighbors[not_border].astype(int) def geodesic_expansion(labels, image, mode='viscosity', connectivity=1): """Expand the location of labels into a geodesic space defined by image. Parameters ---------- labels : numpy array (integer type) The initial location of the labels (typically sparse) image : numpy array, dimensions = labels.ndim or labels.ndim + 1 The space along which labels must expand. It can either be single-channel (same dimension as `labels`) or multi-channel (one more dimension than `labels`; the last dimension is assumed to be the channels). mode : string (optional) Whether to treat the image values as a `viscosity` (default) or a `feature`. In the first case, the cost of expanding from pixel `x` to pixel `y` is `image[y]`. In the second, it is `d(image[x], image[y])`. connectivity : int (optional) The connectivity defining neighboring pixels. It is an int between 1 and `labels.ndim`, inclusive. (default: 1) Returns ------- labels : numpy array (integer type) The resulting label field """ label_locations = labels.nonzero() initial_labels = labels[label_locations] timer = it.count() distance_heap = [(0, time_added, coord, label) for time_added, coord, label in zip(timer, np.transpose(label_locations), initial_labels)] if mode == 'viscosity': def dist(img, src, dst): return img[dst] else: def dist(img, src, dst): return distance.euclidean(img[src], img[dst]) labels_out = np.zeros_like(labels) while len(distance_heap) > 0: nearest = heapq.heappop(distance_heap) d, t, loc, lab = nearest if labels_out[tuple(loc)] == 0: labels_out[tuple(loc)] = lab for n in neighbors(loc, labels_out.shape, connectivity): if labels_out[tuple(n)] == 0: next_d = d + dist(image, tuple(loc), tuple(n)) heapq.heappush(distance_heap, (next_d, timer.next(), n, lab)) return labels_out ```
{ "source": "jni/gobayes", "score": 3 }
#### File: gobayes/gobayes/hypertest.py ```python from scipy.stats import hypergeom def union(a, b): return a | b def unions(iterable): """Compute the set union of all items in a list. Parameters ---------- iterable : any iterable A list/tuple/generator of elements supporting set union. Returns ------- s : set A set of all the items represented within iterable. """ return reduce(union, iterable) def test(module, annots_dict, inverse_annots_dict, mode='standard'): """Use the hypergeometric test on functions in a gene module. The hypergeometric test is also known as Fisher's exact test. Parameters ---------- module : [string] The list of genes in a module. annots_dict : {string: [string]} dictionary A mapping of genes to functions inverse_annots_dict : {string: [string]} dictionary A mapping of functions to genes mode : {'standard', 'conditional'}, optional Whether to use the standard hypergeometric test or the conditional one (default: standard). Returns ------- d : {string: float} dictionary A mapping of functions to p-values. """ represented_functions = unions([annots_dict[gene] for gene in module]) d = {} num_genes = len(annots_dict) num_drawn = len(module) for function in represented_functions: num_labeled_total = len(inverse_annots_dict[function]) num_labeled_in_module = sum( [function in annots_dict[gene] for gene in module]) d[function] = hypergeom.sf(num_labeled_in_module - 1, num_genes, num_labeled_total, num_drawn) if mode.startswith('c'): d[function] /= hypergeom.sf(0, num_genes, num_labeled_total, num_drawn) return d ``` #### File: gobayes/parsers/obo.py ```python import itertools as it import networkx as nx def is_stanza_name(string): return string.startswith('[') and string.endswith(']') def is_transitive_relationship(typedef): return (typedef.has_key('is_transitive') and typedef['is_transitive'][0] == 'true') def obo2networkx(filename, parent_relationships=['is_a', 'part_of']): """Build a graph from an OBO file.""" header, stanzas = parse_obo_raw(filename) terms = stanzas['Term'] g = nx.DiGraph() g.add_nodes_from([term['id'] for term in terms]) for term in terms: g.node[term['id']].update(term) for rel in parent_relationships: if term.has_key(rel): for parent in term[rel]: g.add_edge(term['id'], parent['id'], kind=rel) return g def canonical_go_id(filename): """Return a mapping from synonymous GO IDs to their canonical ID. The Gene Ontology (GO) database maps more than one GO ID to the same GO term, probably for historical reasons. These appear as entries in a GO term stanza with the key `alt_id`. This function returns a Python dictionary mapping any ID to the canonical GO ID, that is, the one appearing under the `id` key in the OBO file. Parameters ---------- filename : str The name of the Gene Ontology OBO flat file. Returns ------- d : dict A python dictionary mapping IDs to canonical IDs. """ terms = parse_obo_raw(filename)[1]['Term'] d = {} for term in terms: term_id = term['id'] d[term_id] = term_id if term.has_key('alt_id'): for alt_id in term['alt_id']: d[alt_id] = term_id return d def parse_obo_raw(filename): """Parse an OBO file into list of stanzas.""" with open(filename, 'r') as f: lines_iter = (line.rstrip('\n') for line in f if line != '\n') lines_iter = it.groupby(lines_iter, is_stanza_name) lines_iter = (group[1] for group in lines_iter) header = get_header(lines_iter) stanzas = get_stanzas(lines_iter) return header, stanzas def get_header(lines_iter): """Return header dictionary and remove corresponding lines from input.""" header = {} header_lines = lines_iter.next() for line in header_lines: key, value = line.split(': ', 1) if header.has_key(key): existing_value = header[key] if type(existing_value) != list: header[key] = [existing_value, value] else: existing_value.append(value) else: header[key] = value return header def get_stanzas(lines_iter): """Return keyed lists of stanzas from OBO lines cleaned of the header.""" stanzas = {} while True: try: stanza_name, stanza = pop_stanza(lines_iter) stanzas.setdefault(stanza_name, []).append(stanza) except StopIteration: break return stanzas def pop_stanza(lines_iter): stanza = {} stanza_name = lines_iter.next().next()[1:-1] stanza_lines = lines_iter.next() for line in stanza_lines: key, value = line.split(': ', 1) if len(value.split(' ! ')) > 1: value_id, value_name = value.split(' ! ', 1) value = {'id': value_id, 'name': value_name} if key == 'id' or key == 'name': stanza[key] = value else: stanza.setdefault(key, []).append(value) return stanza_name, stanza ```
{ "source": "jni/gputools", "score": 2 }
#### File: gputools/transforms/transformations.py ```python from __future__ import print_function, unicode_literals, absolute_import, division import logging logger = logging.getLogger(__name__) import os import numpy as np import warnings from gputools import OCLArray, OCLImage, OCLProgram from gputools.core.ocltypes import cl_buffer_datatype_dict from gputools.utils import mat4_rotate, mat4_translate from ._abspath import abspath from mako.template import Template def affine(data, mat=np.identity(4), output_shape=None, mode="constant", interpolation="linear", res_g=None): """ affine transform data with matrix mat, which is the inverse coordinate transform matrix (similar to ndimage.affine_transform) Parameters ---------- data, ndarray or OCLImage 3d array to be transformed mat, ndarray or OCLArray 3x3 or 4x4 inverse coordinate transform matrix output_shape: tuple of ints shape of transformed array mode: string boundary mode, one of the following: 'constant' pads with zeros 'edge' pads with edge values 'wrap' pads with the repeated version of the input interpolation, string interpolation mode, one of the following 'linear' 'nearest' Returns ------- res: ndarray or openCL array transformed array (same shape as input) """ warnings.warn( "gputools.transform.affine: API change as of gputools>= 0.2.8: the inverse of the matrix is now used as in scipy.ndimage.affine_transform") if data.ndim != 3: raise ValueError("input data has to be a 3d array!") interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"], "nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]} mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"], "wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"], "edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"] } if not interpolation in interpolation_defines: raise KeyError( "interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys()))) if not mode in mode_defines: raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys()))) # reorder matrix, such that x,y,z -> z,y,x (as the kernel is assuming that) if output_shape is None: output_shape = data.shape if isinstance(data, OCLImage): d_im = data else: d_im = OCLImage.from_array(data.astype(np.float32, copy=False)) if res_g is None: res_g = OCLArray.empty(output_shape, np.float32) mat_inv_g = OCLArray.from_array(mat.astype(np.float32, copy=False)) prog = OCLProgram(abspath("kernels/affine.cl") , build_options=interpolation_defines[interpolation] + mode_defines[mode]) prog.run_kernel("affine3", output_shape[::-1], None, d_im, res_g.data, mat_inv_g.data) if isinstance(data, OCLImage): return res_g else: return res_g.get() def shift(data, shift=(0, 0, 0), mode="constant", interpolation="linear"): """ translates 3d data by given amount Parameters ---------- data: ndarray 3d array shift : float or sequence The shift along the axes. If a float, `shift` is the same for each axis. If a sequence, `shift` should contain one value for each axis. mode: string boundary mode, one of the following: 'constant' pads with zeros 'edge' pads with edge values 'wrap' pads with the repeated version of the input interpolation, string interpolation mode, one of the following 'linear' 'nearest' Returns ------- res: ndarray shifted array (same shape as input) """ if np.isscalar(shift): shift = (shift,) * 3 if len(shift) != 3: raise ValueError("shift (%s) should be of length 3!") shift = -np.array(shift) return affine(data, mat4_translate(*shift), mode=mode, interpolation=interpolation) def rotate(data, axis=(1., 0, 0), angle=0., center=None, mode="constant", interpolation="linear"): """ rotates data around axis by a given angle Parameters ---------- data: ndarray 3d array axis: tuple axis to rotate by angle about axis = (x,y,z) angle: float center: tuple or None origin of rotation (cz,cy,cx) in pixels if None, center is the middle of data mode: string boundary mode, one of the following: 'constant' pads with zeros 'edge' pads with edge values 'wrap' pads with the repeated version of the input interpolation, string interpolation mode, one of the following 'linear' 'nearest' Returns ------- res: ndarray rotated array (same shape as input) """ if center is None: center = tuple([s // 2 for s in data.shape]) cx, cy, cz = center m = np.dot(mat4_translate(cx, cy, cz), np.dot(mat4_rotate(angle, *axis), mat4_translate(-cx, -cy, -cz))) m = np.linalg.inv(m) return affine(data, m, mode=mode, interpolation=interpolation) def map_coordinates(data, coordinates, interpolation="linear", mode='constant'): """ Map data to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. should correspond to scipy.ndimage.map_coordinates Parameters ---------- data coordinates output interpolation mode cval prefilter Returns ------- """ if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)): raise ValueError("input data has to be a 2d or 3d array!") coordinates = np.asarray(coordinates, np.int32) if not (coordinates.shape[0] == data.ndim): raise ValueError("coordinate has to be of shape (data.ndim,m) ") interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"], "nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]} mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"], "wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"], "edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"] } if not interpolation in interpolation_defines: raise KeyError( "interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys()))) if not mode in mode_defines: raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys()))) if not data.dtype.type in cl_buffer_datatype_dict: raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys()))) dtype_defines = ["-D", "DTYPE=%s" % cl_buffer_datatype_dict[data.dtype.type]] d_im = OCLImage.from_array(data) coordinates_g = OCLArray.from_array(coordinates.astype(np.float32, copy=False)) res_g = OCLArray.empty(coordinates.shape[1], data.dtype) prog = OCLProgram(abspath("kernels/map_coordinates.cl") , build_options=interpolation_defines[interpolation] + mode_defines[mode] + dtype_defines) kernel = "map_coordinates{ndim}".format(ndim=data.ndim) prog.run_kernel(kernel, (coordinates.shape[-1],), None, d_im, res_g.data, coordinates_g.data) return res_g.get() def geometric_transform(data, mapping = "c0,c1", output_shape=None, mode='constant', interpolation="linear"): """ Apply an arbitrary geometric transform. The given mapping function is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. Parameters ---------- %(input)s mapping : {callable, scipy.LowLevelCallable} A callable object that accepts a tuple of length equal to the output array rank, and returns the corresponding input coordinates as a tuple of length equal to the input array rank. """ if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)): raise ValueError("input data has to be a 2d or 3d array!") interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"], "nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]} mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"], "wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"], "edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"] } if not interpolation in interpolation_defines: raise KeyError( "interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys()))) if not mode in mode_defines: raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys()))) if not data.dtype.type in cl_buffer_datatype_dict: raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys()))) dtype_defines = ["-D", "DTYPE={type}".format(type=cl_buffer_datatype_dict[data.dtype.type])] image_functions = {np.float32:"read_imagef", np.uint8: "read_imageui", np.uint16: "read_imageui", np.int32: "read_imagei"} image_read_defines = ["-D","READ_IMAGE=%s"%image_functions[data.dtype.type]] with open(abspath("kernels/geometric_transform.cl"), "r") as f: tpl = Template(f.read()) output_shape = tuple(output_shape) mappings = {"FUNC2": "c1,c0", "FUNC3": "c2,c1,c0"} mappings["FUNC%d" % data.ndim] = ",".join(reversed(mapping.split(","))) rendered = tpl.render(**mappings) d_im = OCLImage.from_array(data) res_g = OCLArray.empty(output_shape, data.dtype) prog = OCLProgram(src_str=rendered, build_options=interpolation_defines[interpolation] + mode_defines[mode] + dtype_defines+image_read_defines) kernel = "geometric_transform{ndim}".format(ndim=data.ndim) prog.run_kernel(kernel, output_shape[::-1], None, d_im, res_g.data) return res_g.get() if __name__ == '__main__': d = np.zeros((200, 200, 200), np.float32) d[20:-20, 20:-20, 20:-20] = 1. # res = translate(d, x = 10, y = 5, z= -10 ) res = rotate(d, center=(100, 100, 100), angle=.5) ```
{ "source": "jni/HAPPy", "score": 3 }
#### File: HAPPy/HAPPY/branching.py ```python import numpy as np from skimage.morphology import skeletonize from skan import Skeleton, summarize import networkx as nx import toolz as tz def branch_classification(thres): """Predict the extent of branching. Parameters ---------- thres: array thresholded image to be analysed scale: the scale bar size in pixels/metre Returns ------- skel: array skeletonised image is_main: array whether the hydride identified is part of the main section or if it is a branch BLF: int/float branch length fraction """ skeleton = skeletonize(thres) skel = Skeleton(skeleton, source_image=thres) summary = summarize(skel) is_main = np.zeros(summary.shape[0]) us = summary['node-id-src'] vs = summary['node-id-dst'] ws = summary['branch-distance'] edge2idx = { (u, v): i for i, (u, v) in enumerate(zip(us, vs)) } edge2idx.update({ (v, u): i for i, (u, v) in enumerate(zip(us, vs)) }) g = nx.Graph() g.add_weighted_edges_from( zip(us, vs, ws) ) for conn in nx.connected_components(g): curr_val = 0 curr_pair = None h = g.subgraph(conn) p = dict(nx.all_pairs_dijkstra_path_length(h)) for src in p: for dst in p[src]: val = p[src][dst] if (val is not None and np.isfinite(val) and val > curr_val): curr_val = val curr_pair = (src, dst) for i, j in tz.sliding_window( 2, nx.shortest_path( h, source=curr_pair[0], target=curr_pair[1], weight='weight' ) ): is_main[edge2idx[(i, j)]] = 1 summary['main'] = is_main # Branch Length Fraction total_length = np.sum(skeleton) trunk_length = 0 for i in range(summary.shape[0]): if summary['main'][i]: trunk_length += summary['branch-distance'][i] branch_length = total_length - trunk_length BLF = branch_length/total_length return skel, is_main, BLF ``` #### File: HAPPy/HAPPY/cropping_functions.py ```python import numpy as np from skimage.morphology import binary_dilation, remove_small_objects, disk def cropImage(image, crop_bottom, crop_top, crop_left, crop_right): """Crop an image. Parameters ---------- image : numpy array The image that is to be cropped crop_bottom, crop_top, crop_left, crop : int How many pixels to crop in each of these directions Returns ------- image: array The image that has been cropped """ if crop_bottom == 0: image = image[crop_top:] else: image = image[crop_top:-crop_bottom] if crop_right == 0: image = image[:, crop_left:] else: image = image[:, crop_left:-crop_right] return image def cropping_tube(image, crop_param, size_param, dilation_param): """Crop tubes of an image. Parameters ---------- image : numpy array The original tubed image that needs to be cropped. crop_param : int Threshold for removing the dark edges i.e., tube ends in the image. size_param : int Make sure features below this size (i.e. hydrides) are not included in cropping. dilation_param : int Dilate the cropped boundary by a number of pixels. Returns ------- cropped image : numpy array The final cropped image cropped threshold : array of bool True/False array highlighting the cropped and not cropped regions """ crop_threshold = image < crop_param crop_threshold = remove_small_objects(crop_threshold, size_param) crop_threshold = binary_dilation( crop_threshold, selem=disk(dilation_param) ) cropped_image = np.copy(image) cropped_image[crop_threshold] = np.nan return cropped_image, crop_threshold ```
{ "source": "jnihnat/Momentum-QQQ-GLD", "score": 3 }
#### File: lib/Backtrade/Financnik_SMO_PRO.py ```python from __future__ import (absolute_import, division, print_function, unicode_literals) import backtrader as bt import Backtrade.__main__ as main import pandas as pd import datetime import Backtrade.Strategy_Calculations as SDC import numpy as np import os.path home = os.path.expanduser("~") BT_home = str(home + '/Documents/Backtrade/') class IBCommision(bt.CommInfoBase): """A :class:`IBCommision` charges the way interactive brokers does. """ params = ( ('stocklike', True), ('commtype', bt.CommInfoBase.COMM_FIXED), #('percabs', True), # Float. The amount charged per share. Ex: 0.005 means $0.005 ('per_share', 0.005), # Float. The minimum amount that will be charged. Ex: 1.0 means $1.00 ('min_per_order', 1.0), # Float. The maximum that can be charged as a percent of the trade value. Ex: 0.005 means 0.5% ('max_per_order_abs_pct', 0.005), ) def _getcommission(self, size, price, pseudoexec): """ :param size: current position size. > 0 for long positions and < 0 for short positions (this parameter will not be 0) :param price: current position price :param pseudoexec: :return: the commission of an operation at a given price """ commission = size * self.p.per_share order_price = price * size commission_as_percentage_of_order_price = commission / order_price if commission < self.p.min_per_order: commission = self.p.min_per_order elif commission_as_percentage_of_order_price > self.p.max_per_order_abs_pct: commission = order_price * self.p.max_per_order_abs_pct return commission class Ranking(bt.Indicator): lines = ('RO',) params = dict(period1=10, period2=20, period3=60, period4=120) def __init__(self, dat): Var_pom = (bt.talib.ROC(dat, timeperiod=self.params.period1) + bt.talib.ROC(dat, timeperiod=self.params.period2) + bt.talib.ROC(dat, timeperiod=self.params.period3) + bt.talib.ROC(dat, timeperiod=self.params.period4)) / 4 self.lines.RO = bt.talib.EMA(Var_pom, timeperiod=60) class TestStrategy(bt.Strategy): lines = ('SMA', 'MoScore', 'final') params = dict(period_SMA=200, kontext_list=('QQQ',), kontext_data=('AAPL',), kontext_period=200) def __init__(self, **kwargs): # for arg in kwargs.keys(): self.last_date = self.datas[2].datetime.date(-1) self.logy = pd.DataFrame() self.order = None self.kontext = self.datas[:len(self.params.kontext_list)] self.stocks = self.datas[len(self.params.kontext_list)+1:] for d in self.kontext: d.lines.SMA = bt.talib.SMA(d.close, timeperiod=self.params.kontext_period) d.lines.SMA_comp = d.lines.SMA < d.close for i, d in enumerate(self.kontext): if i == len(self.kontext)-1: break else: self.konfil = bt.And(self.kontext[i].lines.SMA_comp, self.kontext[i+1].lines.SMA_comp) for d in self.stocks: d.lines.MoScore = Ranking(dat=d) d.lines.SMA = bt.indicators.SimpleMovingAverage(d.close, period=self.params.period_SMA) def log(self, txt, stockname=None, ordertype=None, stockprice=None, OrderPrice=None, Comm=None, Size=None, dt=None): ' Logging function for this strategy' dt = dt or self.datas[0].datetime.date(0) if isinstance(dt, float): dt = bt.num2date(dt) self.logy = self.logy.append({'Date': dt.isoformat(), 'Stock': stockname, 'Order type': ordertype ,'Stock Price': stockprice, 'Order Price': OrderPrice, 'Order Commission': Comm , 'Order Size': Size,'Order': txt, }, ignore_index=True) def notify_order(self, order): if order.status in [order.Expired]: self.log('BUY EXPIRED') elif order.status in [order.Completed]: if order.isbuy(): self.log('', order.data._name, 'BUY EXECUTED', order.executed.price, order.executed.value*order.executed.price, order.executed.comm, order.executed.size) else: # Sell self.log('', order.data._name,'SELL EXECUTED',order.executed.price,order.executed.value*order.executed.price, order.executed.comm,order.executed.size) # Sentinel to None: new orders allowed self.order = None def next(self): # Simply log the closing price of the series from the reference if self.datas[0].datetime.date(0).isoformat() < '2018-01-01' or self.datas[0].datetime.date(0).isoformat() > '2019-12-01': return if self.last_date == self.datas[0].datetime.date(0): return dta = self.datas[0].datetime.date(0) Rankings = self.stocks Rankings.sort(key=lambda d: float(d.lines.MoScore[0]), reverse=True) num_stocks = len(Rankings) if (self.datas[0].datetime.date(0).month < self.datas[0].datetime.date(+1).month) or ( self.datas[0].datetime.date(0).year < self.datas[0].datetime.date(+1).year): for d in self.stocks: if self.getposition(d).size != 0: self.order = self.close(data=d) self.log('SELL CREATE, %.2f' % d.open[1], d._name) if self.konfil[0] and ((self.datas[0].datetime.date(0).month > self.datas[0].datetime.date(-1).month) or (self.datas[0].datetime.date(0).year > self.datas[0].datetime.date(-1).year)): for i, d in enumerate(Rankings): if i < num_stocks * 0.15 and not np.isnan(d.lines.MoScore[0]): self.log('BUY CREATE, %.2f' % d.close[0], d._name) size_pom = round(self.broker.cash / d.close[0] - 1)*0.1 # self.order = self.order_target_percent(target=0.5) self.order = self.buy(data=d, size=size_pom, exectype=bt.Order.Market) else: break if self.order is None: if self.konfil[0]: for i, d in enumerate(Rankings): if i < num_stocks * 0.15 and not np.isnan(d.lines.MoScore[0]) and self.getposition(data=d).size == 0: self.log('BUY CREATE kontext, %.2f' % d.close[0], d._name) size_pom = round(self.broker.cash / d.close[0] - 1) *0.1 # self.order = self.order_target_percent(target=0.99) self.order = self.buy(data=d, size=size_pom, exectype=bt.Order.Market) else: break elif not self.konfil[0]: for d in self.stocks: if self.getposition(data=d).size != 0: self.log('SELL CREATE kontext, %.2f' % d.open[1], d._name) self.order = self.close(data=d) def run(): start = datetime.date(2018, 1, 1) cerebro = bt.Cerebro(cheat_on_open=True) kontext_list = ('QQQ', 'SPY', ) stock_list = ('NDX',) stock_data = SDC.DATA(*stock_list) kontext_data = SDC.DATA(*kontext_list) # Reload = input("Do you want to reload data from AV before running test? Y/N: ") # if Reload == "Y": # print("reloading") # # kontext_data.ReloadDataFromAV() # # stock_data.ReloadDataFromAV() # elif Reload == "N": # print("Not reload") # else: # print("wrong input") kontext_data.ReadDataCSV() kontext_data.data = kontext_data.data[((kontext_data.data.index.get_loc(key=str(start), method='backfill'))-200):-2] kontext_data.data.index = pd.to_datetime(kontext_data.data.index) for t in kontext_data.data.columns.levels[1]: data = kontext_data.data.loc[:,pd.IndexSlice[:, t]] data.columns = data.columns.get_level_values(0) data_pom = bt.feeds.PandasData(dataname=data) cerebro.adddata(data_pom, name=t) stock_data.ReadDataCSV() stock_data.data.index = pd.to_datetime(stock_data.data.index) stock_data.data = stock_data.data[((stock_data.data.index.get_loc(key=str(start), method='backfill'))-200):-2] for t in stock_data.tickers: data = stock_data.data.loc[:, pd.IndexSlice[:, t]] data.columns = data.columns.get_level_values(0) data_pom = bt.feeds.PandasData(dataname=data) cerebro.adddata(data_pom, name=t) kwargs = dict(kontext_list=kontext_list, kontext_data=kontext_data) cerebro.addstrategy(TestStrategy, **kwargs) cerebro.broker.setcommission(commission=0.005, commtype=bt.CommInfoBase.COMM_FIXED, leverage=2) # cerebro.broker.addcommissioninfo(IBCommision(leverage=2)) cerebro.broker.setcash(40000.0) print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue()) cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='mysharpe') cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name='TradeAn') cerebro.addanalyzer(bt.analyzers.AnnualReturn, _name='AnnualReturn') cerebro.addanalyzer(bt.analyzers.DrawDown, _name='drawdown') thestrats = cerebro.run() thestrat = thestrats[0] AnnualReturn = pd.DataFrame.from_dict(thestrat.analyzers.AnnualReturn.get_analysis(),orient='index') AnnualReturn.index=AnnualReturn.index.rename('Year') AnnualReturn.columns = ['Return'] AnnualReturn['Return'] *= 100 AverageReturn = AnnualReturn['Return'].mean() print('Sharpe Ratio:', thestrat.analyzers.mysharpe.get_analysis()['sharperatio']) print('DrowDown:') main.pretty(thestrat.analyzers.drawdown.get_analysis()) print('TradeAnalyzer:') main.pretty(thestrat.analyzers.TradeAn.get_analysis()) print(AnnualReturn) # logs = thestrat.logy # print(thestrat.logy) thestrat.logy.to_csv(path_or_buf=BT_home+'Data/Logs/logs.csv', index='False') print('Average Return: %.2f ' % AverageReturn) # print('Trade An:', thestrat.analyzers.TradeAn.get_analysis()) print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue()) #cerebro.plot() ```
{ "source": "jni/magicgui", "score": 3 }
#### File: magicgui/magicgui/application.py ```python from __future__ import annotations import signal from contextlib import contextmanager from importlib import import_module from types import ModuleType from typing import TYPE_CHECKING, Callable, Iterator, Optional, Union from magicgui.backends import BACKENDS if TYPE_CHECKING: from magicgui.widgets._protocols import BaseApplicationBackend DEFAULT_BACKEND = "qt" APPLICATION_NAME = "magicgui" @contextmanager def event_loop(backend=None) -> Iterator: """Start an event loop in which to run the application.""" with Application(backend) as app: try: yield app except Exception as e: print("An error was encountered in the event loop:\n", str(e)) class Application: """Magicgui Application, wrapping a native BaseApplicationBackend implementation.""" _backend_module: ModuleType _backend: "BaseApplicationBackend" _instance: Optional[Application] = None def __init__(self, backend_name: Optional[str] = None): self._use(backend_name) @property def backend_name(self) -> str: """Return name of the GUI backend that this app wraps.""" if self._backend is not None: return self._backend._mgui_get_backend_name() else: return "" @property def backend_module(self) -> ModuleType: """Return module object that defines the backend.""" return self._backend_module def _use(self, backend_name=None): """Select a backend by name.""" if not backend_name: backend_name = DEFAULT_BACKEND if not backend_name or backend_name.lower() not in BACKENDS: raise ValueError( f"backend_name must be one of {set(BACKENDS)!r}, " f"not {backend_name!r}" ) module_name, native_module_name = BACKENDS[backend_name] self._backend_module = import_module(f"magicgui.backends.{module_name}") self._backend = self.get_obj("ApplicationBackend")() def get_obj(self, name: str): """Get the backend object for the given ``name`` (such as a widget).""" try: return getattr(self.backend_module, name) except AttributeError as e: raise AttributeError( f"Could not import object {name!r} from backend {self.backend_module}" ) from e def run(self): """Enter the native GUI event loop.""" return self._backend._mgui_run() @property def native(self): """Return the native GUI application instance.""" return self._backend._mgui_get_native_app() def quit(self): """Quit the native GUI event loop.""" return self._backend._mgui_quit() def create(self): """Create the native application.""" # Ensure that the native app exists self.native def process_events(self): """Process all pending GUI events.""" return self._backend._mgui_process_events() def __repr__(self): """Return repr for this instance.""" if not self.backend_name: return "<magicgui app with no backend>" else: return f"<magicgui app, wrapping the {self.backend_name} GUI toolkit>" def __enter__(self): """Context manager to start this application.""" self.create() return self def __exit__(self, *exc_details): """Exit context manager for this application.""" # enable ctrl-C signal.signal(signal.SIGINT, lambda *a: self.quit()) self._backend._mgui_start_timer(500, lambda: None) self._backend._mgui_run() self._backend._mgui_stop_timer() def start_timer( self, interval: int = 1000, on_timeout: Optional[Callable[[], None]] = None, single_shot: bool = False, ): """Start a timer with a given interval, optional callback, and single_shot.""" self._backend._mgui_start_timer(interval, on_timeout, single=single_shot) def _use_app(backend_name: str = None): """Get/create the default Application object. It is safe to call this function multiple times, as long as backend_name is None or matches the already selected backend. Parameters ---------- backend_name : str | None The name of the backend application to use. If not specified, Vispy tries to select a backend automatically. See ``vispy.use()`` for details. """ # If we already have a default_app, raise error or return current = Application._instance if current is not None: if backend_name: names = current.backend_name.lower().replace("(", " ").strip(") ") _nm = [n for n in names.split(" ") if n] if backend_name.lower() not in _nm: raise RuntimeError( f"Can only select a backend once, already using {_nm}." ) else: return current # Current backend matches backend_name # Create default app Application._instance = Application(backend_name) return Application._instance AppRef = Union[Application, str, None] def use_app(app: AppRef = None) -> Application: """Get/create the default Application object. See _use_app docstring.""" if app is None: return _use_app() elif isinstance(app, Application): return app elif isinstance(app, str): Application._instance = Application(app) return Application._instance raise TypeError(f"'app' must be string, Application, or None, got: {app!r}. ") ``` #### File: widgets/_bases/slider_widget.py ```python from magicgui.widgets import _protocols from .mixins import _OrientationMixin from .ranged_widget import RangedWidget class SliderWidget(RangedWidget, _OrientationMixin): """Widget with a contstrained value and orientation. Wraps SliderWidgetProtocol. Parameters ---------- orientation : str, {'horizontal', 'vertical'} The orientation for the slider, by default "horizontal" """ _widget: _protocols.SliderWidgetProtocol def __init__(self, orientation: str = "horizontal", **kwargs): super().__init__(**kwargs) self.orientation = orientation @property def options(self) -> dict: """Return options currently being used in this widget.""" d = super().options.copy() d.update({"orientation": self.orientation}) return d ```
{ "source": "jni/microscopium", "score": 2 }
#### File: microscopium/tests/test_features.py ```python import numpy as np import pytest from skimage.util import img_as_int, img_as_float from microscopium import features @pytest.fixture(scope="module", params=[img_as_int, img_as_float]) def haralick_image(request): haralick_image = np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 2, 2, 2], [2, 2, 3, 3]]) haralick_image = request.param(haralick_image) return haralick_image def test_haralick_features_8bit(haralick_image): fs, names = features.haralick_features(haralick_image, distances=[5], angles=[0]) expected_names = [ 'haralick-contrast-distance5-angle0', 'haralick-dissimilarity-distance5-angle0', 'haralick-homogeneity-distance5-angle0', 'haralick-ASM-distance5-angle0', 'haralick-energy-distance5-angle0', 'haralick-correlation-distance5-angle0'] expected_features = np.array([0., 0., 0., 0., 0., 1.]) assert np.allclose(fs, expected_features) assert names == expected_names ``` #### File: microscopium/tests/test_image_xpress.py ```python from microscopium.screens import image_xpress import collections as coll def test_ix_semantic_filename(): test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif" expected = coll.OrderedDict([('directory', './Week1_22123'), ('prefix', ''), ('plate', 22123), ('well', 'G10'), ('field', 1), ('channel', 0), ('suffix', 'tif')]) assert image_xpress.ix_semantic_filename(test_fn) == expected def test_ix_semantic_filename2(): test_fn = "./BBBC022_v1_images_20585w1/IXMtest_L09_s3_w1538679C9-F03A-" \ "4656-9A57-0D4A440C1C62.tif" expected = coll.OrderedDict([('directory', './BBBC022_v1_images_20585w1'), ('prefix', 'IXMtest'), ('plate', 20585), ('well', 'L09'), ('field', 2), ('channel', 0), ('suffix', 'tif')]) assert image_xpress.ix_semantic_filename(test_fn) == expected ``` #### File: microscopium/tests/test_pre.py ```python import os import tempfile import numpy as np from microscopium.screens.cellomics import SPIRAL_CLOCKWISE_RIGHT_25 from microscopium import preprocess as pre from microscopium import io as mio import pytest import warnings @pytest.fixture def image_files(): # for clarity we define images as integer arrays in [0, 11) and # divide by 10 later i = np.array([[7, 4, 1, 1, 0], [2, 5, 9, 6, 7], [2, 3, 3, 8, 5], [3, 0, 1, 7, 5], [6, 0, 10, 1, 6]], np.uint8) j = np.array([[1, 10, 0, 9, 0], [3, 10, 4, 1, 1], [4, 10, 0, 7, 4], [9, 3, 2, 0, 7], [1, 3, 3, 9, 3]], np.uint8) k = np.array([[9, 1, 7, 7, 3], [9, 1, 6, 2, 2], [2, 8, 2, 0, 3], [4, 3, 8, 9, 10], [6, 0, 2, 3, 10]], np.uint8) files = [] for im in [i, j, k]: f, fn = tempfile.mkstemp(suffix='.png') files.append(fn) mio.imsave(fn, im) yield files for fn in files: os.remove(fn) def test_illumination_mean(image_files): illum = pre.find_background_illumination(image_files, radius=1, quantile=0.5) illum_true = np.array([[161, 174, 188, 81, 94], [174, 174, 81, 161, 94], [174, 67, 161, 121, 161], [134, 107, 107, 161, 215], [134, 134, 134, 174, 215]], np.uint8) np.testing.assert_array_almost_equal(illum, illum_true, decimal=1) def test_color_stack(image_files): images = list(map(mio.imread, image_files)) stack = pre.stack_channels(images[:2], [None, 1, 0]) np.testing.assert_equal(stack[0, 0], [0, 1, 7]) np.testing.assert_equal(stack[..., 2], images[0]) def conv(im): return np.round(np.clip(im, 0, np.inf) * 255).astype(np.uint8) @pytest.fixture def image_files_noise(request): """Three sham images; one has no signal, one has an intensity artifact.""" r = np.random.RandomState(0) shape = (5, 5) # no signal i = conv(0.01 * np.ones(shape, dtype=float) + 0.005 * r.randn(*shape)) # normal image j = conv(0.5 * r.rand(*shape)) # blown-out corner k = 0.5 * r.rand(*shape) k[3:, 3:] = 1.0 k = conv(k) files = [] for im in [i, j, k]: f, fn = tempfile.mkstemp(suffix='.png') files.append(fn) mio.imsave(fn, im) def cleanup(): for fn in files: os.remove(fn) request.addfinalizer(cleanup) illum = 0.01 * np.ones(shape, dtype=float) return files, illum def test_correct_multiimage_illum(image_files_noise): files, illum = image_files_noise with mio.temporary_file('.tif') as out_fn: ims = pre.correct_multiimage_illumination(files, illum, (2 / 25), 0) i, j, k = list(ims) # 1. check noise is not blown out in i assert not np.any(i > 10) # 2. check blown out corner in k has not suppressed all other values assert np.median(k) > 100 cellomics_pattern = "MFGTMP_150406100001_A01f{0:02d}d0.TIF" missing_test_fns = [ ([cellomics_pattern.format(i) for i in range(25)], []), ([cellomics_pattern.format(i) for i in range(25)], [1, 13]) ] # delete "images" with fields 1 and 13 from second set of # image filesnames missing_test_fns[1][0].remove(cellomics_pattern.format(1)) missing_test_fns[1][0].remove(cellomics_pattern.format(13)) @pytest.mark.parametrize("fns, expected", missing_test_fns) def test_find_missing_fields(fns, expected): actual = pre.find_missing_fields(fns) np.testing.assert_array_equal(actual, expected) # create a list of parameters for testing the create missing mask files # each entry in the tuple represents the fields: missing, order, rows, cols # and expected (the expected output from the function) missing_mask_test = [ ([], [[0, 1, 2]], 10, 5, np.ones((10, 15), dtype=np.bool)), ([0, 5], [[0, 1, 2], [4, 5, 6]], 5, 10, np.ones((10, 30), dtype=np.bool)), ([3, 4], [[0, 1], [2, 3], [4, 5]], 10, 5, np.ones((30, 10), dtype=np.bool)) ] # insert False to missing areas of expected output missing_mask_test[1][4][0:5, 0:10] = False missing_mask_test[1][4][5:10, 10:20] = False missing_mask_test[2][4][10:20, 5:10] = False missing_mask_test[2][4][20:30, 0:5] = False # pass the set of list parameters to the test_create_missing_mask # function. the test wil run against every of parameters in the # missing_mask_test list @pytest.mark.parametrize("missing, order, rows, cols, expected", missing_mask_test) def test_create_missing_mask(missing, order, rows, cols, expected): actual = pre.create_missing_mask(missing, order, rows, cols) np.testing.assert_array_equal(actual, expected) @pytest.fixture def test_image_files_montage(request): def make_test_montage_files(missing_fields): shape = (2, 2) fields = list(range(0, 25)) for missing_field in missing_fields: fields.remove(missing_field) ims = [np.ones(shape, np.uint8) * i for i in fields] files = [] for field, im in zip(fields, ims): prefix = "MFGTMP_140206180002_A01f{0:02d}d0".format(field) f, fn = tempfile.mkstemp(prefix=prefix, suffix=".tif") files.append(fn) with warnings.catch_warnings(): warnings.simplefilter("ignore") mio.imsave(fn, im) def cleanup(): for file in files: os.remove(file) request.addfinalizer(cleanup) return files return make_test_montage_files def test_montage_with_missing(test_image_files_montage): files = test_image_files_montage(missing_fields=[20]) montage, mask, number_missing = \ pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25, re_string=r'.*_[A-P]\d{2}f(\d{2})d0', re_group=1) expect_montage = np.array([[0, 0, 21, 21, 22, 22, 23, 23, 24, 24], [0, 0, 21, 21, 22, 22, 23, 23, 24, 24], [19, 19, 6, 6, 7, 7, 8, 8, 9, 9], [19, 19, 6, 6, 7, 7, 8, 8, 9, 9], [18, 18, 5, 5, 0, 0, 1, 1, 10, 10], [18, 18, 5, 5, 0, 0, 1, 1, 10, 10], [17, 17, 4, 4, 3, 3, 2, 2, 11, 11], [17, 17, 4, 4, 3, 3, 2, 2, 11, 11], [16, 16, 15, 15, 14, 14, 13, 13, 12, 12], [16, 16, 15, 15, 14, 14, 13, 13, 12, 12]], np.uint8) np.testing.assert_array_equal(expect_montage, montage) def test_montage_with_missing_mask(test_image_files_montage): files = test_image_files_montage(missing_fields=[3, 8]) montage, mask, number_missing = \ pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25, re_string=r'.*_[A-P]\d{2}f(\d{2})d0', re_group=1) expected_mask = np.ones((10, 10), np.bool) expected_mask[6:8, 4:6] = False expected_mask[2:4, 6:8] = False np.testing.assert_array_equal(expected_mask, mask) def test_montage_with_missing_number_missing(test_image_files_montage): files = test_image_files_montage(missing_fields=[10, 11, 12]) montage, mask, number_missing = \ pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25, re_string=r'.*_[A-P]\d{2}f(\d{2})d0', re_group=1) assert number_missing == 3 if __name__ == '__main__': pytest.main() ```
{ "source": "jnimmo/pyenvisalink", "score": 2 }
#### File: pyenvisalink/dsc_tests/test_keypad_update.py ```python import logging import json import re from pyenvisalink.dsc_envisalinkdefs import * from pyenvisalink import AlarmState _LOGGER = logging.getLogger(__name__) loggingconfig = {'level': 'DEBUG', 'format': '%(asctime)s %(levelname)s <%(name)s %(module)s %(funcName)s> %(message)s', 'datefmt': '%a, %d %b %Y %H:%M:%S'} logging.basicConfig(**loggingconfig) alarmState = AlarmState.get_initial_alarm_state(64, 8) def handle_keypad_update(code, data): """Handle general- non partition based info""" for part in alarmState['partition']: alarmState['partition'][part]['status'].update(evl_ResponseTypes[code]['status']) _LOGGER.debug(str.format("(All partitions) state has updated: {0}", json.dumps(evl_ResponseTypes[code]['status']))) _LOGGER.info('Alarm State before:') print(alarmState['partition']) handle_keypad_update('803','') _LOGGER.info('Alarm State after:') print(alarmState['partition']) ```
{ "source": "jni/networkx", "score": 3 }
#### File: networkx/generators/classic.py ```python import itertools from networkx.algorithms.bipartite.generators import complete_bipartite_graph __author__ ="""<NAME> (<EMAIL>)\<NAME> (<EMAIL>)""" __all__ = [ 'balanced_tree', 'barbell_graph', 'complete_graph', 'circular_ladder_graph', 'cycle_graph', 'dorogovtsev_goltsev_mendes_graph', 'empty_graph', 'full_rary_tree', 'grid_graph', 'grid_2d_graph', 'hypercube_graph', 'ladder_graph', 'lollipop_graph', 'null_graph', 'path_graph', 'star_graph', 'trivial_graph', 'wheel_graph'] #------------------------------------------------------------------- # Some Classic Graphs #------------------------------------------------------------------- import networkx as nx from networkx.utils import is_list_of_ints, flatten def _tree_edges(n,r): # helper function for trees # yields edges in rooted tree at 0 with n nodes and branching ratio r nodes=iter(range(n)) parents=[next(nodes)] # stack of max length r while parents: source=parents.pop(0) for i in range(r): try: target=next(nodes) parents.append(target) yield source,target except StopIteration: break def full_rary_tree(r, n, create_using=None): """Creates a full r-ary tree of n vertices. Sometimes called a k-ary, n-ary, or m-ary tree. "... all non-leaf vertices have exactly r children and all levels are full except for some rightmost position of the bottom level (if a leaf at the bottom level is missing, then so are all of the leaves to its right." [1]_ Parameters ---------- r : int branching factor of the tree n : int Number of nodes in the tree create_using : NetworkX graph type, optional Use specified type to construct graph (default = networkx.Graph) Returns ------- G : networkx Graph An r-ary tree with n nodes References ---------- .. [1] An introduction to data structures and algorithms, <NAME>, <NAME> 2001, (page 225). """ G=nx.empty_graph(n,create_using) G.add_edges_from(_tree_edges(n,r)) return G def balanced_tree(r, h, create_using=None): """Return the perfectly balanced r-tree of height h. Parameters ---------- r : int Branching factor of the tree h : int Height of the tree create_using : NetworkX graph type, optional Use specified type to construct graph (default = networkx.Graph) Returns ------- G : networkx Graph A tree with n nodes Notes ----- This is the rooted tree where all leaves are at distance h from the root. The root has degree r and all other internal nodes have degree r+1. Node labels are the integers 0 (the root) up to number_of_nodes - 1. Also refered to as a complete r-ary tree. """ # number of nodes is n=1+r+..+r^h if r==1: n=2 else: n = int((1-r**(h+1))/(1-r)) # sum of geometric series r!=1 G=nx.empty_graph(n,create_using) G.add_edges_from(_tree_edges(n,r)) return G return nx.full_rary_tree(r,n,create_using) def barbell_graph(m1,m2,create_using=None): """Return the Barbell Graph: two complete graphs connected by a path. For m1 > 1 and m2 >= 0. Two identical complete graphs K_{m1} form the left and right bells, and are connected by a path P_{m2}. The 2*m1+m2 nodes are numbered 0,...,m1-1 for the left barbell, m1,...,m1+m2-1 for the path, and m1+m2,...,2*m1+m2-1 for the right barbell. The 3 subgraphs are joined via the edges (m1-1,m1) and (m1+m2-1,m1+m2). If m2=0, this is merely two complete graphs joined together. This graph is an extremal example in <NAME> and <NAME> etext on Random Walks on Graphs. """ if create_using is not None and create_using.is_directed(): raise nx.NetworkXError("Directed Graph not supported") if m1<2: raise nx.NetworkXError(\ "Invalid graph description, m1 should be >=2") if m2<0: raise nx.NetworkXError(\ "Invalid graph description, m2 should be >=0") # left barbell G=complete_graph(m1,create_using) G.name="barbell_graph(%d,%d)"%(m1,m2) # connecting path G.add_nodes_from([v for v in range(m1,m1+m2-1)]) if m2>1: G.add_edges_from([(v,v+1) for v in range(m1,m1+m2-1)]) # right barbell G.add_edges_from( (u,v) for u in range(m1+m2,2*m1+m2) for v in range(u+1,2*m1+m2)) # connect it up G.add_edge(m1-1,m1) if m2>0: G.add_edge(m1+m2-1,m1+m2) return G def complete_graph(n,create_using=None): """ Return the complete graph K_n with n nodes. Node labels are the integers 0 to n-1. """ G=empty_graph(n,create_using) G.name="complete_graph(%d)"%(n) if n>1: if G.is_directed(): edges=itertools.permutations(range(n),2) else: edges=itertools.combinations(range(n),2) G.add_edges_from(edges) return G def circular_ladder_graph(n,create_using=None): """Return the circular ladder graph CL_n of length n. CL_n consists of two concentric n-cycles in which each of the n pairs of concentric nodes are joined by an edge. Node labels are the integers 0 to n-1 """ G=ladder_graph(n,create_using) G.name="circular_ladder_graph(%d)"%n G.add_edge(0,n-1) G.add_edge(n,2*n-1) return G def cycle_graph(n,create_using=None): """Return the cycle graph C_n over n nodes. C_n is the n-path with two end-nodes connected. Node labels are the integers 0 to n-1 If create_using is a DiGraph, the direction is in increasing order. """ G=path_graph(n,create_using) G.name="cycle_graph(%d)"%n if n>1: G.add_edge(n-1,0) return G def dorogovtsev_goltsev_mendes_graph(n,create_using=None): """Return the hierarchically constructed Dorogovtsev-Goltsev-Mendes graph. n is the generation. See: arXiv:/cond-mat/0112143 by Dorogovtsev, <NAME> Mendes. """ if create_using is not None: if create_using.is_directed(): raise nx.NetworkXError("Directed Graph not supported") if create_using.is_multigraph(): raise nx.NetworkXError("Multigraph not supported") G=empty_graph(0,create_using) G.name="Dorogovtsev-Goltsev-Mendes Graph" G.add_edge(0,1) if n==0: return G new_node = 2 # next node to be added for i in range(1,n+1): #iterate over number of generations. last_generation_edges = G.edges() number_of_edges_in_last_generation = len(last_generation_edges) for j in range(0,number_of_edges_in_last_generation): G.add_edge(new_node,last_generation_edges[j][0]) G.add_edge(new_node,last_generation_edges[j][1]) new_node += 1 return G def empty_graph(n=0,create_using=None): """Return the empty graph with n nodes and zero edges. Node labels are the integers 0 to n-1 For example: >>> G=nx.empty_graph(10) >>> G.number_of_nodes() 10 >>> G.number_of_edges() 0 The variable create_using should point to a "graph"-like object that will be cleaned (nodes and edges will be removed) and refitted as an empty "graph" with n nodes with integer labels. This capability is useful for specifying the class-nature of the resulting empty "graph" (i.e. Graph, DiGraph, MyWeirdGraphClass, etc.). The variable create_using has two main uses: Firstly, the variable create_using can be used to create an empty digraph, network,etc. For example, >>> n=10 >>> G=nx.empty_graph(n,create_using=nx.DiGraph()) will create an empty digraph on n nodes. Secondly, one can pass an existing graph (digraph, pseudograph, etc.) via create_using. For example, if G is an existing graph (resp. digraph, pseudograph, etc.), then empty_graph(n,create_using=G) will empty G (i.e. delete all nodes and edges using G.clear() in base) and then add n nodes and zero edges, and return the modified graph (resp. digraph, pseudograph, etc.). See also create_empty_copy(G). """ if create_using is None: # default empty graph is a simple graph G=nx.Graph() else: G=create_using G.clear() G.add_nodes_from(range(n)) G.name="empty_graph(%d)"%n return G def grid_2d_graph(m,n,periodic=False,create_using=None): """ Return the 2d grid graph of mxn nodes, each connected to its nearest neighbors. Optional argument periodic=True will connect boundary nodes via periodic boundary conditions. """ G=empty_graph(0,create_using) G.name="grid_2d_graph" rows=range(m) columns=range(n) G.add_nodes_from( (i,j) for i in rows for j in columns ) G.add_edges_from( ((i,j),(i-1,j)) for i in rows for j in columns if i>0 ) G.add_edges_from( ((i,j),(i,j-1)) for i in rows for j in columns if j>0 ) if G.is_directed(): G.add_edges_from( ((i,j),(i+1,j)) for i in rows for j in columns if i<m-1 ) G.add_edges_from( ((i,j),(i,j+1)) for i in rows for j in columns if j<n-1 ) if periodic: if n>2: G.add_edges_from( ((i,0),(i,n-1)) for i in rows ) if G.is_directed(): G.add_edges_from( ((i,n-1),(i,0)) for i in rows ) if m>2: G.add_edges_from( ((0,j),(m-1,j)) for j in columns ) if G.is_directed(): G.add_edges_from( ((m-1,j),(0,j)) for j in columns ) G.name="periodic_grid_2d_graph(%d,%d)"%(m,n) return G def grid_graph(dim,periodic=False): """ Return the n-dimensional grid graph. The dimension is the length of the list 'dim' and the size in each dimension is the value of the list element. E.g. G=grid_graph(dim=[2,3]) produces a 2x3 grid graph. If periodic=True then join grid edges with periodic boundary conditions. """ dlabel="%s"%dim if dim==[]: G=empty_graph(0) G.name="grid_graph(%s)"%dim return G if not is_list_of_ints(dim): raise nx.NetworkXError("dim is not a list of integers") if min(dim)<=0: raise nx.NetworkXError(\ "dim is not a list of strictly positive integers") if periodic: func=cycle_graph else: func=path_graph dim=list(dim) current_dim=dim.pop() G=func(current_dim) while len(dim)>0: current_dim=dim.pop() # order matters: copy before it is cleared during the creation of Gnew Gold=G.copy() Gnew=func(current_dim) # explicit: create_using=None # This is so that we get a new graph of Gnew's class. G=nx.cartesian_product(Gnew,Gold) # graph G is done but has labels of the form (1,(2,(3,1))) # so relabel H=nx.relabel_nodes(G, flatten) H.name="grid_graph(%s)"%dlabel return H def hypercube_graph(n): """Return the n-dimensional hypercube. Node labels are the integers 0 to 2**n - 1. """ dim=n*[2] G=grid_graph(dim) G.name="hypercube_graph_(%d)"%n return G def ladder_graph(n,create_using=None): """Return the Ladder graph of length n. This is two rows of n nodes, with each pair connected by a single edge. Node labels are the integers 0 to 2*n - 1. """ if create_using is not None and create_using.is_directed(): raise nx.NetworkXError("Directed Graph not supported") G=empty_graph(2*n,create_using) G.name="ladder_graph_(%d)"%n G.add_edges_from([(v,v+1) for v in range(n-1)]) G.add_edges_from([(v,v+1) for v in range(n,2*n-1)]) G.add_edges_from([(v,v+n) for v in range(n)]) return G def lollipop_graph(m,n,create_using=None): """Return the Lollipop Graph; `K_m` connected to `P_n`. This is the Barbell Graph without the right barbell. For m>1 and n>=0, the complete graph K_m is connected to the path P_n. The resulting m+n nodes are labelled 0,...,m-1 for the complete graph and m,...,m+n-1 for the path. The 2 subgraphs are joined via the edge (m-1,m). If n=0, this is merely a complete graph. Node labels are the integers 0 to number_of_nodes - 1. (This graph is an extremal example in <NAME> and Jim Fill's etext on Random Walks on Graphs.) """ if create_using is not None and create_using.is_directed(): raise nx.NetworkXError("Directed Graph not supported") if m<2: raise nx.NetworkXError(\ "Invalid graph description, m should be >=2") if n<0: raise nx.NetworkXError(\ "Invalid graph description, n should be >=0") # the ball G=complete_graph(m,create_using) # the stick G.add_nodes_from([v for v in range(m,m+n)]) if n>1: G.add_edges_from([(v,v+1) for v in range(m,m+n-1)]) # connect ball to stick if m>0: G.add_edge(m-1,m) G.name="lollipop_graph(%d,%d)"%(m,n) return G def null_graph(create_using=None): """Return the Null graph with no nodes or edges. See empty_graph for the use of create_using. """ G=empty_graph(0,create_using) G.name="null_graph()" return G def path_graph(n,create_using=None): """Return the Path graph P_n of n nodes linearly connected by n-1 edges. Node labels are the integers 0 to n - 1. If create_using is a DiGraph then the edges are directed in increasing order. """ G=empty_graph(n,create_using) G.name="path_graph(%d)"%n G.add_edges_from([(v,v+1) for v in range(n-1)]) return G def star_graph(n,create_using=None): """ Return the Star graph with n+1 nodes: one center node, connected to n outer nodes. Node labels are the integers 0 to n. """ G=complete_bipartite_graph(1,n,create_using) G.name="star_graph(%d)"%n return G def trivial_graph(create_using=None): """ Return the Trivial graph with one node (with integer label 0) and no edges. """ G=empty_graph(1,create_using) G.name="trivial_graph()" return G def wheel_graph(n,create_using=None): """ Return the wheel graph: a single hub node connected to each node of the (n-1)-node cycle graph. Node labels are the integers 0 to n - 1. """ if n == 0: return nx.empty_graph(n, create_using=create_using) G=star_graph(n-1,create_using) G.name="wheel_graph(%d)"%n G.add_edges_from([(v,v+1) for v in range(1,n-1)]) if n>2: G.add_edge(1,n-1) return G ```
{ "source": "jni/notedown", "score": 3 }
#### File: jni/notedown/tests.py ```python import os import nose.tools as nt import IPython.nbformat as nbformat import notedown simple_backtick = """ ``` code1 space_indent more code ``` text1 `` ``` code2 tab_indent ~~~ ``` text2""" simple_tilde = """ ~~~ code1 space_indent more code ~~~ text1 `` ~~~~ code2 tab_indent ~~~ ~~~~ text2""" simple_indented = """ code1 space_indent more code text1 `` code2 tab_indent ~~~ text2""" simple_code_cells = ['code1\n space_indent\n\n\nmore code', 'code2\n tab_indent\n~~~'] # note: ipython markdown cells do not end with a newline unless # explicitly present. simple_markdown_cells = ['text1\n``', 'text2'] alt_lang = """ This is how you write a code block in another language: ```bash echo "This is bash ${BASH_VERSION}!" ``` """ alt_lang_code = '%%bash\necho "This is bash ${BASH_VERSION}!"' sample_markdown = u"""### Create IPython Notebooks from markdown This is a simple tool to convert markdown with code into an IPython Notebook. Usage: ``` notedown input.md > output.ipynb ``` It is really simple and separates your markdown into code and not code. Code goes into code cells, not-code goes into markdown cells. Installation: pip install notedown """ # Generate the sample notebook from the markdown using # # import notedown # reader = notedown.MarkdownReader() # sample_notebook = reader.reads(sample_markdown) # print nbformat.writes(sample_notebook) # # which is defined in create_json_notebook() below sample_notebook = r"""{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "### Create IPython Notebooks from markdown\n", "\n", "This is a simple tool to convert markdown with code into an IPython\n", "Notebook.\n", "\n", "Usage:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "notedown input.md > output.ipynb" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It is really simple and separates your markdown into code and not\n", "code. Code goes into code cells, not-code goes into markdown cells.\n", "\n", "Installation:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "pip install notedown" ] } ], "metadata": {}, "nbformat": 4, "nbformat_minor": 0 }""" roundtrip_markdown = u"""## A roundtrip test Here is a code cell: ```python a = 1 ``` and here is another one: ```python b = 2 ``` """ def create_json_notebook(markdown): reader = notedown.MarkdownReader() notebook = reader.reads(markdown) json_notebook = nbformat.writes(notebook) return json_notebook def test_notedown(): """Integration test the whole thing.""" from difflib import ndiff notebook = create_json_notebook(sample_markdown) diff = ndiff(sample_notebook.splitlines(1), notebook.splitlines(1)) print '\n'.join(diff) nt.assert_multi_line_equal(create_json_notebook(sample_markdown), sample_notebook) def parse_cells(text, regex): reader = notedown.MarkdownReader(code_regex=regex) return reader.parse_blocks(text) def separate_code_cells(cells): codetype = notedown.MarkdownReader.code code_cells = [c['content'] for c in cells if c['type'] == codetype] return code_cells def separate_markdown_cells(cells): markdowntype = notedown.MarkdownReader.markdown markdown_cells = [c['content'] for c in cells if c['type'] == markdowntype] return markdown_cells def test_parse_gfm(): """Test with GFM code blocks.""" all_cells = parse_cells(simple_backtick, 'fenced') code_cells = separate_code_cells(all_cells) markdown_cells = separate_markdown_cells(all_cells) print "out: ", code_cells print "ref: ", simple_code_cells print "out: ", markdown_cells print "ref: ", simple_markdown_cells assert(code_cells == simple_code_cells) assert(markdown_cells == simple_markdown_cells) def test_parse_tilde(): """Test with ~~~ delimited code blocks.""" all_cells = parse_cells(simple_tilde, 'fenced') code_cells = separate_code_cells(all_cells) markdown_cells = separate_markdown_cells(all_cells) assert(code_cells == simple_code_cells) assert(markdown_cells == simple_markdown_cells) def test_parse_indented(): """Test with indented code blocks.""" all_cells = parse_cells(simple_indented, 'indented') code_cells = separate_code_cells(all_cells) markdown_cells = separate_markdown_cells(all_cells) print "out: ", code_cells print "ref: ", simple_code_cells print "out: ", markdown_cells print "ref: ", simple_markdown_cells assert(code_cells == simple_code_cells) assert(markdown_cells == simple_markdown_cells) def test_alt_lang(): """Specifying a language that isn't python should generate code blocks using %%language magic.""" reader = notedown.MarkdownReader(code_regex='fenced') all_blocks = reader.parse_blocks(alt_lang) code_blocks = [b for b in all_blocks if b['type'] == reader.code] magic_block = code_blocks[0] reader.process_code_block(magic_block) assert(magic_block['content'] == alt_lang_code) def test_format_agnostic(): """Test whether we can process markdown with either fenced or indented blocks.""" fenced_cells = parse_cells(simple_backtick, None) indented_cells = parse_cells(simple_indented, None) fenced_code_cells = separate_code_cells(fenced_cells) indented_code_cells = separate_code_cells(indented_cells) fenced_markdown_cells = separate_markdown_cells(fenced_cells) indented_markdown_cells = separate_markdown_cells(indented_cells) assert(fenced_code_cells == indented_code_cells) assert(fenced_markdown_cells == indented_markdown_cells) def test_pre_process_text(): """test the stripping of blank lines""" block = {} ref = "\t \n\n \t\n\ntext \t \n\n\n" block['content'] = ref notedown.MarkdownReader.pre_process_text_block(block) expected = "text" print "---" print "in: " print ref print "---" print "out: " print block['content'] print "---" print "expected: " print expected print "---" assert(block['content'] == expected) def test_roundtrip(): """Run nbconvert using our custom markdown template to recover original markdown from a notebook. """ # create a notebook from the markdown mr = notedown.MarkdownReader() roundtrip_notebook = mr.to_notebook(roundtrip_markdown) # write the notebook into json notebook_json = nbformat.writes(roundtrip_notebook) # write the json back into notebook notebook = nbformat.reads(notebook_json, as_version=4) # convert notebook to markdown mw = notedown.MarkdownWriter(template_file='notedown/templates/markdown.tpl', strip_outputs=True) markdown = mw.writes(notebook) nt.assert_multi_line_equal(roundtrip_markdown, markdown) def test_template_load(): """MarkdownWriter should be able to load a template from an absolute path. IPython requires a relative path. """ template_abspath = os.path.abspath('notedown/templates/markdown.tpl') writer = notedown.MarkdownWriter(template_file=template_abspath) import jinja2 assert(isinstance(writer.exporter.template, jinja2.Template)) def test_markdown_markdown(): mr = notedown.MarkdownReader() mw = notedown.MarkdownWriter(notedown.markdown_template) nb = mr.reads(roundtrip_markdown) markdown = mw.writes(nb) nt.assert_multi_line_equal(markdown, roundtrip_markdown) def test_R(): """Check that the R notebook generated from Rmd looks the same as the reference (without output cells). """ knitr = notedown.Knitr() with open('r-examples/r-example.Rmd') as rmd: knitted_markdown_file = knitr.knit(rmd) reader = notedown.MarkdownReader(precode=r"%load_ext rpy2.ipython", magic=True) notebook = reader.read(knitted_markdown_file) with open('r-examples/r-example.ipynb') as f: reference_notebook = nbformat.read(f, as_version=4) notedown.main.strip(notebook) notedown.main.strip(reference_notebook) writer = nbformat nbjson = writer.writes(notebook) reference_nbjson = writer.writes(reference_notebook) nt.assert_multi_line_equal(nbjson, reference_nbjson) ```
{ "source": "jnin/TC20210924032", "score": 3 }
#### File: notebooks/06_22/functions.py ```python from sklearn import datasets import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import os import csv import pandas as pd def rho_calculation(copy_model, X_train, y_train,d): y_pred_ohe = copy_model.predict(X_train) y_train_ohe=tf.one_hot(y_train, 2) avv_= 0 vec_rho = [] for y_t, t_p in zip(y_train_ohe,y_pred_ohe): temp=0 for dim in range(0,d): temp +=(y_t[dim]-t_p[dim])**2 temp = np.sqrt(temp) vec_rho.append(temp) avv_+=temp return avv_/len(y_train_ohe), vec_rho def remove_point_rho(X, y, threshold, vec_rho, d): X_train_ = np.empty((0, d)) y_train_ = np.empty((0), dtype=int) for i, rho in enumerate(vec_rho): if rho>=threshold: X_train_=np.append(X_train_,[X[i]]) y_train_=np.append(y_train_,y[i]) X_train_=X_train_.reshape((int(len(X_train_)/2), 2)) return X_train_ , y_train_ def define_compile_model(lr=0.01,seed=42): tf.random.set_seed(seed) copy_model = tf.keras.models.Sequential([ tf.keras.layers.Dense(64, input_shape=(2,), activation='relu', kernel_initializer='he_normal'), tf.keras.layers.Dense(32, activation='relu', kernel_initializer='he_normal'), tf.keras.layers.Dense(10, activation='relu', kernel_initializer='he_normal'), tf.keras.layers.Dense(2, activation='softmax') ]) opt = tf.keras.optimizers.Adam(learning_rate=lr) copy_model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy']) return copy_model def save_params(main_path,data_path,params): try: with open(main_path +'/' +data_path+ '/'+'params.txt', "w") as text_file: print("saving params in =",text_file) for key in params: text_file.write(str(key)+' = '+ str(params[key]) + '\n') except: with open(data_path+ '/'+'params.txt', "w") as text_file: print("saving params in =",text_file) for key in params: text_file.write(str(key)+' = '+ str(params[key]) + '\n') def fit_function_2(lr_, n_sampling, original, X_test, y_test, hot_start, plot=True, max_iter=15, n_epochs = 1000, max_subtraining = 2, deleting=False, threshold = 0.001, model_path='',step=0): d=2 t=0 X_train_ = np.empty((0, d)) y_train_ = np.empty((0), dtype=int) y_errors = np.empty((0), dtype=int) acc = [] avv = [] rho = [] nN = [] rho_vec = [] n_sub = [] ##DEFINING MODEL----------------------------------------------------------------------------------------- #os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if hot_start: r_seed = np.random.randint(1000) else: r_seed = 42 tf.random.set_seed(r_seed) print('1') copy_model = tf.keras.models.Sequential([ tf.keras.layers.Dense(64, input_shape=(2,), activation='relu', kernel_initializer='he_normal'), tf.keras.layers.Dense(32, activation='relu', kernel_initializer='he_normal'), tf.keras.layers.Dense(10, activation='relu', kernel_initializer='he_normal'), tf.keras.layers.Dense(2, activation='softmax') ]) opt = tf.keras.optimizers.Adam(learning_rate=lr_) copy_model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy']) ##\DEFINING MODEL----------------------------------------------------------------------------------------- print('2') n_subtraining = 0 #print('Step: {}'.format(i),'-----------------------------') t_rand=np.random.randint(15) while t < max_iter: print('Iteration: {}'.format(t)) #print('Subtraining: {}'.format(n_subtraining)) if len(y_errors)==0: # Generate new points and label them X_new = np.random.multivariate_normal(np.zeros((d,)), np.eye(d,d), size = n_sampling) y_new = original.predict(X_new) y_new_oh_mamma = tf.one_hot(y_new, 2) y_new = np.argmax(y_new_oh_mamma, axis=1) # Update synthetic trainign set with new points X_train_ = np.vstack((X_new, X_train_)) y_train_ = np.append(y_new, y_train_) rho_mean, vec_rho = rho_calculation(copy_model,X_train_,y_train_,d) rho.append(rho_mean) rho_vec.append(vec_rho) if deleting: X_train_, y_train_ = remove_point_rho(X_train_, y_train_, threshold, vec_rho,d) nN.append(len(X_train_)) else: # decrease of the lr weights = copy_model.get_weights() # save the weights copy_model = define_compile_model(lr=lr_*(n_subtraining+1.5)/(n_subtraining+0.5),seed=r_seed) # 0.5 , 0.75, ... copy_model.set_weights(weights) # set the weights X_train_, xx, y_train_, yy = train_test_split(X_train_, y_train_, test_size=1) X_train_ = np.vstack((xx, X_train_)) y_train_ = np.append(yy, y_train_) # Update copy y_train_ohe = tf.one_hot(y_train_, 2) copy_model.fit(X_train_, y_train_ohe, epochs=n_epochs, batch_size=32, verbose=0) # Compute copy accuracy on original test data acc_=copy_model.evaluate(X_test, tf.one_hot(y_test, 2), verbose=0)[1] #print('Accuracy: {}'.format(acc_)) # Identify errors y_pred_ohe = copy_model.predict(X_train_) #print(y_pred_ohe) y_pred_ = np.argmax(y_pred_ohe, axis=1) X_errors = X_train_[y_pred_!=y_train_,:] y_errors = y_train_[y_pred_!=y_train_] print('# Errors: {}'.format(len(y_errors)),' # nN: {}'.format(nN[-1]), ' Accuracy: {}'.format(acc_)) if len(y_errors)==0 or n_subtraining>max_subtraining: # Plot model----------------------- if plot and t==t_rand: x_range=3.0 xx,yy = np.meshgrid(np.linspace(-x_range,x_range,200),np.linspace(-x_range,x_range,200)) viz=np.c_[xx.ravel(),yy.ravel()] z = np.argmax(copy_model.predict(viz), axis=1) plt.scatter(X_train_[:, 0], X_train_[:, 1], c=y_train_, alpha=0.7) plt.scatter(X_errors[:, 0], X_errors[:, 1], c='red', alpha=0.2) plt.imshow(z.reshape((200,200)), origin='lower', extent=(-x_range,x_range,-x_range,x_range),alpha=0.3, vmin=0, vmax=1) plt.contour(xx,yy,z.reshape((200,200)),[0.5]) plt.autoscale(False) plt.gcf().set_size_inches((6,6)) plt.savefig(plot_path+'/iter='+str(t)+'_avg='+str(step)+'.pdf',bbox_inches='tight') #\ Plot model---------------------- if n_subtraining !=0: weights = copy_model.get_weights() # save the weights copy_model = define_compile_model(lr=lr_,seed=r_seed) # set original lr copy_model.set_weights(weights) # set the weights t += 1 n_sub.append(n_subtraining) n_subtraining = 0 y_errors = np.empty((0), dtype=int) acc.append(acc_) if not hot_start: copy_model = define_compile_model(lr=lr_,seed=r_seed) else: n_subtraining +=1 file_name=model_path+'/N=' + str(n_sampling) +'_step=' +str(step) +'_hot_start=' +str(hot_start) + '_acc.csv' with open(file_name, 'w', encoding='UTF8') as f: writer = csv.writer(f) # write the data writer.writerow(np.asarray(acc)) file_name=model_path+'/N=' + str(n_sampling) +'_step=' +str(step) +'_hot_start=' +str(hot_start) + '_nN.csv' with open(file_name, 'w', encoding='UTF8') as f: writer = csv.writer(f) # write the data writer.writerow(np.asarray(nN)) def moons(n_samples): X, y = datasets.make_moons(n_samples=n_samples, noise=0.05) return X,y,'moons' def yin_yang(n_samples): """ Returns the yin-yang dataset. """ r_max = 1 r = np.random.uniform(low=0, high=r_max**2, size=n_samples) theta = np.random.uniform(low=0, high=1, size=n_samples) * 2 * np.pi x = np.sqrt(r) * np.cos(theta) y = np.sqrt(r) * np.sin(theta) X = np.dstack([x, y])[0] y = np.empty((len(X),)) # Upper circle center_x_u = 0 center_y_u = 0.5 radius_u = 0.5 # Upper circle center_x_l = 0 center_y_l = -0.5 radius_l = 0.5 i = 0 for xi, yi in X: if ((xi > 0) & ((xi - center_x_u)**2 + (yi - center_y_u)**2 >= radius_u**2)) or ((xi < 0) & ((xi - center_x_l)**2 + (yi - center_y_l)**2 < radius_l**2)): y[i] = 1 else: y[i] = 0 if (xi - 0)**2 + (yi - 0.5)**2 < 0.15**2: y[i] = 1 if (xi - 0)**2 + (yi - (-0.5))**2 < 0.15**2: y[i] = 0 i += 1 return X, y, 'yinyang' def two_spirals(n_samples, noise=.5): """ Returns the two spirals dataset. """ n = np.sqrt(np.random.rand(n_samples,1)) * 780 * (2*np.pi)/360 d1x = -np.cos(n)*n + np.random.rand(n_samples,1) * noise d1y = np.sin(n)*n + np.random.rand(n_samples,1) * noise return (np.vstack((np.hstack((d1x,d1y)),np.hstack((-d1x,-d1y)))), np.hstack((np.zeros(n_samples),np.ones(n_samples))), 'spirals') ```
{ "source": "jni/ome-zarr-py", "score": 3 }
#### File: ome-zarr-py/ome_zarr/scale.py ```python import inspect import logging import os from collections.abc import MutableMapping from dataclasses import dataclass from typing import Callable, Iterator, List import cv2 import numpy as np import zarr from scipy.ndimage import zoom from skimage.transform import downscale_local_mean, pyramid_gaussian, pyramid_laplacian LOGGER = logging.getLogger("ome_zarr.scale") @dataclass class Scaler: """Helper class for performing various types of downsampling. A method can be chosen by name such as "nearest". All methods on this that do not begin with "_" and not either "methods" or "scale" are valid choices. These values can be returned by the :func:`~ome_zarr.scale.Scaler.methods` method. >>> import numpy as np >>> data = np.zeros((1, 1, 1, 64, 64)) >>> scaler = Scaler() >>> downsampling = scaler.nearest(data) >>> for x in downsampling: ... print(x.shape) (1, 1, 1, 64, 64) (1, 1, 1, 32, 32) (1, 1, 1, 16, 16) (1, 1, 1, 8, 8) (1, 1, 1, 4, 4) """ copy_metadata: bool = False downscale: int = 2 in_place: bool = False labeled: bool = False max_layer: int = 4 method: str = "nearest" @staticmethod def methods() -> Iterator[str]: """Return the name of all methods which define a downsampling. Any of the returned values can be used as the `methods` argument to the :func:`Scaler constructor <ome_zarr.scale.Scaler._init__>` """ funcs = inspect.getmembers(Scaler, predicate=inspect.isfunction) for name, func in funcs: if name in ("methods", "scale"): continue if name.startswith("_"): continue yield name def scale(self, input_array: str, output_directory: str) -> None: """Perform downsampling to disk.""" func = getattr(self, self.method, None) if not func: raise Exception store = self.__check_store(output_directory) base = zarr.open_array(input_array) pyramid = func(base) if self.labeled: self.__assert_values(pyramid) grp = self.__create_group(store, base, pyramid) if self.copy_metadata: print(f"copying attribute keys: {list(base.attrs.keys())}") grp.attrs.update(base.attrs) def __check_store(self, output_directory: str) -> MutableMapping: """Return a Zarr store if it doesn't already exist.""" assert not os.path.exists(output_directory) return zarr.DirectoryStore(output_directory) def __assert_values(self, pyramid: List[np.ndarray]) -> None: """Check for a single unique set of values for all pyramid levels.""" expected = set(np.unique(pyramid[0])) print(f"level 0 {pyramid[0].shape} = {len(expected)} labels") for i in range(1, len(pyramid)): level = pyramid[i] print(f"level {i}", pyramid[i].shape, len(expected)) found = set(np.unique(level)) if not expected.issuperset(found): raise Exception( f"{len(found)} found values are not " "a subset of {len(expected)} values" ) def __create_group( self, store: MutableMapping, base: np.ndarray, pyramid: List[np.ndarray] ) -> zarr.hierarchy.Group: """Create group and datasets.""" grp = zarr.group(store) grp.create_dataset("base", data=base) series = [] for i, dataset in enumerate(pyramid): if i == 0: path = "base" else: path = "%s" % i grp.create_dataset(path, data=pyramid[i]) series.append({"path": path}) return grp def nearest(self, base: np.ndarray) -> List[np.ndarray]: """ Downsample using :func:`cv2.resize`. The :const:`cvs2.INTER_NEAREST` interpolation method is used. """ return self._by_plane(base, self.__nearest) def __nearest(self, plane: np.ndarray, sizeY: int, sizeX: int) -> np.ndarray: """Apply the 2-dimensional transformation.""" return cv2.resize( plane, dsize=(sizeY // self.downscale, sizeX // self.downscale), interpolation=cv2.INTER_NEAREST, ) def gaussian(self, base: np.ndarray) -> List[np.ndarray]: """Downsample using :func:`skimage.transform.pyramid_gaussian`.""" return list( pyramid_gaussian( base, downscale=self.downscale, max_layer=self.max_layer, multichannel=False, ) ) def laplacian(self, base: np.ndarray) -> List[np.ndarray]: """Downsample using :func:`skimage.transform.pyramid_laplacian`.""" return list( pyramid_laplacian( base, downscale=self.downscale, max_layer=self.max_layer, multichannel=False, ) ) def local_mean(self, base: np.ndarray) -> List[np.ndarray]: """Downsample using :func:`skimage.transform.downscale_local_mean`.""" rv = [base] # FIXME: fix hard-coding rv = [base] for i in range(self.max_layer): rv.append( downscale_local_mean( rv[-1], factors=(1, 1, 1, self.downscale, self.downscale) ) ) return rv def zoom(self, base: np.ndarray) -> List[np.ndarray]: """Downsample using :func:`scipy.ndimage.zoom`.""" rv = [base] print(base.shape) for i in range(self.max_layer): print(i, self.downscale) rv.append(zoom(base, self.downscale ** i)) print(rv[-1].shape) return list(reversed(rv)) # # Helpers # def _by_plane( self, base: np.ndarray, func: Callable[[np.ndarray, int, int], np.ndarray], ) -> np.ndarray: """Loop over 3 of the 5 dimensions and apply the func transform.""" assert 5 == len(base.shape) rv = [base] for i in range(self.max_layer): fiveD = rv[-1] # FIXME: fix hard-coding of dimensions T, C, Z, Y, X = fiveD.shape smaller = None for t in range(T): for c in range(C): for z in range(Z): out = func(fiveD[t][c][z][:], Y, X) if smaller is None: smaller = np.zeros( (T, C, Z, out.shape[0], out.shape[1]), dtype=base.dtype ) smaller[t][c][z] = out rv.append(smaller) return rv ```
{ "source": "jni/performance-tests", "score": 4 }
#### File: performance-tests/function-calls/f.py ```python def f(): pass def g(*args, **kwargs): pass def loop_0_empty(n): """Run empty loop n times.""" for i in range(n): pass def loop_1_f(n): """Run loop with empty function n times.""" for i in range(n): f() def loop_2_f_twice(n): """Run loop calling empty function twice per loop, n times.""" for i in range(n): f() f() def loop_3_g(n): """Run loop with empty function taking args, n times.""" for i in range(n): g() def loop_4_g_twice(n): """Run loop with empty function taking args, twice per loop, n times.""" for i in range(n): g() g() def loop_5_g_arg(n): """Run loop with empty function passing an arg, n times.""" for i in range(n): g(n) def loop_6_g_kwarg(n): """Run loop with empty function passing a kwarg, n times.""" for i in range(n): g(n=n) ```
{ "source": "jni/pia-tracking", "score": 2 }
#### File: jni/pia-tracking/data_io.py ```python from dask import delayed import dask.array as da from datetime import datetime import glob import json import napari from nd2reader import ND2Reader import numpy as np import os from _parser import custom_parser, get_paths from pathlib import Path import tensorstore as ts from tensorstore import TensorStore import toolz as tz import zarr # ND2 as Zarr # ----------- def nd2_2_zarr(path): """ Save ND2 data as a zarr (all channels) based on single path or inputted directory """ if os.path.isdir(path): expr = os.path.join(path, '*.nd2') files = glob.glob(expr) for f in files: _nd2_2_zarr(os.path.join(path, f)) else: _nd2_2_zarr(path) def _nd2_2_zarr(data_path): """ Save ND2 data as a zarr (all channels) """ nd2_data = ND2Reader(data_path) meta = nd2_data.metadata input_path = Path(data_path) save_path = os.path.join(str(input_path.parent), str(input_path.stem) + '.zarr') if not os.path.exists(save_path): t = meta['num_frames'] y = meta['height'] x = meta['width'] z = meta['z_levels'].stop n_channels = len(meta['channels']) shape = (n_channels, t, y, x, z) # open a zarr array of correct size z = zarr.open_array( save_path, mode='w', shape=shape, chunks=(1, 1, None, None, None), dtype='i4', fill_value=0) # iterate through channels, producing a dask array, then saving each time for c in range(n_channels): arr = get_stack(nd2_data, c=c, t_max=t) for i in range(t): a = arr[i, ...].compute() z[c, i, ...] = a # save metadata save_path = os.path.join(str(input_path.parent), str(input_path.stem) + '.json') if not os.path.exists(save_path): md = _dict_2_JSON_serializable(meta) with open(save_path, 'w') as outfile: json.dump(md, outfile, indent=True) # Read ND2 # -------- def get_nd2_vol(nd2_data, c, frame): """ Get single frame of ND2Reader object """ nd2_data.default_coords['c']=c nd2_data.bundle_axes = ('y', 'x', 'z') #nd2_data.iter_axes = 'c' #v = nd2_data[c] v = nd2_data.get_frame(frame) v = np.array(v) return v def get_stack(nd2_data, c=2, frame=0, t_max=193, w_shape=False): """ Get a single channel of an ND2Reader object as dask stack """ nd2vol = tz.curry(get_nd2_vol) fram = get_nd2_vol(nd2_data, c, frame) arr = da.stack( [da.from_delayed(delayed(nd2vol(nd2_data, c))(i), shape=fram.shape, dtype=fram.dtype) for i in range(t_max)] ) shape = [t_max, ] shape[1:] = fram.shape if w_shape: return arr, shape else: return arr # Metadata to JSON # ---------------- def _dict_2_JSON_serializable(meta): """"" Convert ND2 metadata dict items to JSON serialisable form Returns ------- md: dict JSON serialisable dict Notes ----- Type conversions - datetime: str - ndarray: list - range: list(range.stop) """"" md = meta.copy() for key in meta.keys(): i = meta[key] md = _fix_values(key, md) return md def _fix_values(key, md): """ Recursively search each item in metadata and reassign values. """ i = md[key] md = _set_val(i, key, md) if isinstance(i, list): new = [] for idx in range(len(i)): new.append(_fix_values(idx, i)[idx]) md[key] = new if isinstance(i, dict): new = {} for k in i.keys(): new[k] = _fix_values(k, i)[k] md[key] = new return md def _set_val(i, key, md): """ Reassign values, changing type where necessary. """ if isinstance(i, datetime): md[key] = i.strftime("%m/%d/%Y, %H:%M:%S") elif isinstance(i, range): md[key] = [i.stop] elif isinstance(i, np.ndarray): md[key] = i.tolist() else: md[key] = i return md # Zarr via dask # ---------------------- def single_zarr(input_path, c=2, idx=0): ''' Parameters ---------- c: int or tuple Index of indices to return in array idx: int or tuple which indicies of the dim to apply c to ''' assert type(c) == type(idx) arr = da.from_zarr(input_path) slices = [slice(None)] * arr.ndim if isinstance(idx, int): slices[idx] = c elif isinstance(idx, tuple): for i, ind in enumerate(idx): slices[ind] = c[i] else: raise TypeError('c and idx must be int or tuple with same type') slices = tuple(slices) arr = arr[slices] return arr def view_zarr(input_path, scale=(1, 1, 1, 1, 4)): arr = da.from_zarr(input_path) with napari.gui_qt(): viewer = napari.Viewer() viewer.add_image(arr, name='all_channels', scale=scale) # Zarr via tensorstore # -------------------- #def shape(tsobj): #open_spec = tsobj.spec().to_json() # Tensorstore input_exclusive_max may have mixed list and int elements #input_exc_max = flatten_list(open_spec['transform']['input_exclusive_max'], []) #input_exc_max = np.array(input_exc_max) #input_inc_min = np.array(open_spec['transform']['input_inclusive_min']) #s = input_exc_max - input_inc_min #return s #def flatten_list(x, final): #""" # Tensorstore input_exclusive_max may have mixed lists # """ #for item in x: # if isinstance(item, list): # flatten_list(item, final) # else: # final.append(item) #return final #def ndim(tsobj): # return len(shape(tsobj)) #def hacky_ts_zarr_open(open_spec): #TensorStore.shape = property(shape) #TensorStore.ndim = property(ndim) # TensorStore.copy = TensorStore.__array__ #arr = ts.open(open_spec, create=False, open=True).result() # return arr # Save ND2 2 Zarr # --------------- if __name__ == "__main__": # Parser # ------ parser = custom_parser() args = parser.parse_args() path = get_paths(args, 'view_segmentation_3D', get={'data_path':'image'}, by_name=True )['data_path'] # TODO: fix this crap # Save Zarrs # ---------- nd2_2_zarr(path) ``` #### File: jni/pia-tracking/fl.py ```python import glob import numpy as np import matplotlib.pyplot as plt import cv2 from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from datetime import datetime import time import yaml from pathlib import Path from nd2reader import ND2Reader import pandas as pd from scipy import ndimage as ndi from skimage.segmentation import watershed from skimage.feature import peak_local_max import math from scipy.spatial.transform import Rotation as Rot import os import zarr import dask.array as da def get_datetime(): return datetime.now().strftime("%Y%m%d-%H%M%S") def get_nd2_files(data_path): return list(Path(data_path).rglob('*.nd2')) def get_objectDF_files(data_path): return list(Path(data_path).rglob('*.df.pkl')) def get_nd2_info(nd2_data): metadata_dict = dict(file=str(nd2_data.filename), px_microns=nd2_data.metadata['pixel_microns']) metadata_to_save = ['x', 'y', 'c', 't', 'z'] metadata_dict.update(**{ m:nd2_data._sizes[m] for m in metadata_to_save}) metadata_dict.update(**{ 'channel_'+ str(i):c for i, c in enumerate(nd2_data.metadata['channels'])}) metadata_dict.update(frame_rate = float(nd2_data.frame_rate)) metadata_dict.update(roi_t = float(np.mean(nd2_data.metadata['rois'][0]['timepoints']))) metadata_dict.update(roi_x = int(nd2_data.metadata['rois'][0]['positions'][0][1])) metadata_dict.update(roi_y = int(nd2_data.metadata['rois'][0]['positions'][0][0])) metadata_dict.update(roi_size = float(nd2_data.metadata['rois'][0]['sizes'][0][0])) return metadata_dict def nd2_info_to_df(data_path): nd2_infos=[] nd2_files = get_nd2_files(data_path) for file in nd2_files: with ND2Reader(file) as nd2_data: nd2_infos.append(get_nd2_info(nd2_data)) return pd.DataFrame(nd2_infos) def get_nd2_vol(nd2_data, c, frame): nd2_data.default_coords['c']=c nd2_data.bundle_axes = ('y', 'x', 'z') v = nd2_data.get_frame(frame) v = np.array(v) return v def dict_fix_numpy(dict_in): dict_out = dict() for di in dict_in.keys(): if type(dict_in[di]) == np.float64: dict_out[di] = float(dict_in[di]) elif type(dict_in[di]) == np.int64: dict_out[di] = int(dict_in[di]) else: dict_out[di] = dict_in[di] return dict_out def create_dog_func(s1, s2): def dog_func(image): image_dog = cv2.GaussianBlur(image.astype('float'),(0,0), s1) - cv2.GaussianBlur(image.astype('float'), (0,0), s2) return image_dog return dog_func def denoise(image): res_im = cv2.fastNlMeansDenoising(image, None, 6, 7, 20) return res_im def array_to_int8(arr): arr8 = arr-arr.min() arr8 = ((arr8/arr8.max())*255).astype('uint8') return arr8 def vol_to_images(vol): im_list= [vol[...,i] for i in range(vol.shape[-1])] return im_list def images_to_vol(images): vol = np.stack(images, axis=2) return vol def image_func_to_vol(func): def vol_func(vol): images = vol_to_images(vol) res_images = list(map(func, images)) res_vol = images_to_vol(res_images) return res_vol return vol_func def imshow(image): plt.figure(figsize=(10,10)) plt.imshow(image, cmap='gray') def imshow_spectral(image, **kwargs): plt.figure(figsize=(10,10)) plt.imshow(image, cmap='nipy_spectral', **kwargs) def imshow_prism(image, **kwargs): plt.figure(figsize=(10,10)) plt.imshow(image, cmap='prism', **kwargs) def median_threshold(image, adjust=10): image_median = np.median(image) threshold = image_median + adjust return threshold, image>threshold def voting_threshold(image, adjust=10): voting = np.bincount(image.ravel()).argmax() threshold = voting + adjust return threshold, image>threshold def interpolate_volume(vol, zfactor=4): int_vol = ndi.zoom(thv, (1, 1, zfactor), order=0) return int_vol def get_object_labels(vol): labels, num_objects = ndi.label(vol, structure=np.ones((3,3,3))) return num_objects, labels def get_object_labels_watershed(vol): distance = ndi.distance_transform_edt(vol) local_maxi = peak_local_max(distance, indices=False, min_distance=3,labels=vol) markers, num_objects = ndi.label(local_maxi, structure=np.ones((3,3,3))) labels = watershed(-distance, markers, mask=vol) return num_objects, labels def get_object_labels_watershed2(vol, th_vol): distance = ndi.distance_transform_edt(th_vol) local_maxi = peak_local_max(vol, indices=False, min_distance=3,labels=th_vol) markers, num_objects = ndi.label(local_maxi, structure=np.ones((3,3,3))) labels = watershed(-vol, markers, mask=th_vol) return num_objects, labels def get_object_labels_watershed3(vol, th_vol): distance = distance_transform_xy(th_vol) local_maxi = peak_local_max(vol, indices=False, min_distance=3,labels=th_vol) markers, num_objects = ndi.label(local_maxi, structure=np.ones((3,3,3))) labels = watershed(-vol, markers, mask=th_vol) return num_objects, labels def get_object_labels_watershed4(vol, vol_th): image_list = vol_to_images(vol) vol_dg = images_to_vol(list(map(dog, image_list))) vol_dg_th = vol_dg >0.1 vol_dg = array_to_int8(vol_dg) local_maxi = peak_local_max(vol_dg, indices=False, min_distance=3,labels=vol_dg_th) markers, num_objects = ndi.label(local_maxi, structure=np.ones((3,3,3))) labels = watershed(-vol_dg, markers, mask=vol_th) return num_objects, labels def save_yaml(dict_to_yaml, filename): with open(filename, 'w') as file: _ = yaml.dump(dict_to_yaml, file) class IvmObjects: def __init__(self, conf): self.conf = conf self.inspect_steps = {} self.labels = {} self.labels_volume = None self.nd2_file = None def add_nd2info(self, nd2info): self.conf['nd2info'] = dict_fix_numpy(nd2info.to_dict()) def _process_frame(self, frame): # load volume #d2_data = ND2Reader(self.nd2_file) with ND2Reader(self.nd2_file) as nd2_data: v = get_nd2_vol(nd2_data, self.conf['object_channel'], frame) self.inspect_steps[0]=dict(name = 'original_volume', data = v) v = array_to_int8(v) # denoise images vi = vol_to_images(v) vi_dn = list(map(denoise, vi)) v_dn = images_to_vol(vi_dn) v_dn = array_to_int8(v_dn) self.inspect_steps[1]=dict(name = 'denoised_volume', data = v_dn) #th, v_th = voting_threshold(v, adjust=8) # difference of gaussian v_dni= vol_to_images(v_dn) dog = create_dog_func(self.conf['dog_sigma1'], self.conf['dog_sigma2']) v_dg = images_to_vol(list(map(dog, v_dni))) self.inspect_steps[2]=dict(name = 'dog_volume', data = v_dg) # threshold v_dg_th = v_dg > self.conf['threshold'] v_dg = array_to_int8(v_dg) self.inspect_steps[3] = dict(name = 'threshold_volume', data = v_dg_th) # watershed and create labels local_maxi = peak_local_max(v_dg, indices=False, min_distance=self.conf['peak_min_dist'],labels=v_dg_th) markers, num_objects = ndi.label(local_maxi, structure=np.ones((3,3,3))) #v_labels = watershed(-v_dg, markers, mask=v_dg_th) v_labels = watershed(-v_dg, markers, mask=v_dg_th,compactness=1) self.inspect_steps[4] = dict(name = 'labels_volume', data = v_labels) # add to labels dask array to list self.labels[frame] = da.array(v_labels) # extract info from labels labels_idx = np.arange(1, v_labels.max()) label_pos=ndi.measurements.center_of_mass(v_dg_th, v_labels, labels_idx) df=pd.DataFrame(label_pos) #collect data for inspection if self.conf['process_type'] == 'single_thread': self.inspect_steps[0] = dict(name = 'original_volume', data = v) self.inspect_steps[1] = dict(name = 'denoised_volume', data = v_dn) self.inspect_steps[2] = dict(name = 'dog_volume', data = v_dg) self.inspect_steps[3] = dict(name = 'threshold_volume', data = v_dg_th) self.inspect_steps[4] = dict(name = 'labels_volume', data = v_labels) #makes a dataframe with all coordinates df.columns=['x', 'y', 'z'] # adjust xs, ys to centrer roi if self.conf['center_roi']: adjust_x = self.conf['nd2info']['roi_x'] adjust_y = self.conf['nd2info']['roi_y'] else: adjust_x = 0 adjust_y = 0 df['xs'] = df['x'] * self.conf['nd2info']['px_microns'] - adjust_x df['ys'] = df['y'] * self.conf['nd2info']['px_microns'] - adjust_y df['zs'] = df['z'] * self.conf['z_dist'] if self.conf['rotate']: #theta = np.radians(self.conf['rotate_angle']) #df['xxs'] = df['xs']*np.cos(theta) + df['ys']*np.sin(theta) #df['yys'] = df['ys']*np.cos(theta) - df['xs']*np.sin(theta) rot = Rot.from_euler('z', -self.conf['rotate_angle'], degrees=True) xyz = df[['xs', 'ys', 'zs']].to_numpy() xyz_rot = rot.apply(xyz) df['xs'], df['ys'] = xyz_rot[:,0], xyz_rot[:,1] df.insert(0, 'frame',frame) df.insert(1, 'time', frame/self.conf['nd2info']['frame_rate']) df.insert(0, 'path', self.nd2_file) df['size']=ndi.measurements.sum(v_dg_th, v_labels, labels_idx) df['int_mean']=ndi.measurements.mean(v, v_labels, labels_idx) df['int_max']=ndi.measurements.maximum(v, v_labels, labels_idx) #df['c']=c intensity_channels = self.conf['intensity_channels'] for c in intensity_channels: v_int = get_nd2_vol(nd2_data, c, frame) #v_int=ndimage.zoom(imf.get_vol(t=t, c=c2), (1,1,4),order=0) df['c' + str(c) + '_mean']=ndi.measurements.mean(v_int, v_labels, labels_idx) df['c' + str(c) + '_max']=ndi.measurements.maximum(v_int, v_labels, labels_idx) return df def process_file(self, nd2_file, frames): starttime = time.time() print('Starting :', nd2_file, '...',end='') self.nd2_file = nd2_file # Process according to the specified (or not) method if self.conf['process_type'] == 'multi_thread': with ThreadPoolExecutor(max_workers=self.conf['multi_workers'] ) as executor: futures = executor.map(self._process_frame, frames) df_obj_frames = list(futures) elif self.conf['process_type'] == 'multi_process': with ProcessPoolExecutor(max_workers=self.conf['multi_workers']) as executor: futures = executor.map(self._process_frame, frames) df_obj_frames = list(futures) else: df_obj_frames = list(map(self._process_frame, frames)) df_obj = pd.concat(df_obj_frames, ignore_index=True, axis=0) # save out the labels self._save_labels(frames) #post process dataframe df_obj['zf'] = df_obj['zs'] - np.percentile(df_obj['zs'], 2) df_obj.insert(0, 'pid', df_obj.reset_index()['index']) print('OK') print('Processed in {0:.2f} seconds. Found {1:} platelets.'.format((time.time()-starttime), len(df_obj.index))) return df_obj def _save_labels(self, frames): """ Save the labels as a zarr file in the data directory """ # get file name and path name = Path(self.nd2_file).stem data_path = Path(self.nd2_file).parents[0] lab_path = os.path.join(data_path, name + '_labels.zarr') # get the shape of the first frame shape = self.labels[list(self.labels.keys())[0]].shape # get the the number of frames if isinstance(frames, range): # e.g., range(0, 193) --> 194 frames t = frames.stop + 1 - frames.start else: t = len(frames) # instantiate zarr array self.labels_volume = zarr.open_array(lab_path, mode='w', shape=(t, shape[0], shape[1], shape[2]), chunks=(1, shape[0], shape[1], shape[2]), dtype='i4', fill_value=0) # add frames to volume for frame in frames: self.labels_volume[frame, ...] = self.labels[frame] ``` #### File: pia-tracking/ipython_scripts/view_data.py ```python import napari from nd2reader import ND2Reader # viewing a single timepoint # get_ipython().run_line_magic('cd', '~/Dropbox/share-files/') nd2_data = ND2Reader('200519_IVMTR69_Inj4_dmso_exp3.nd2') object_channel = 2 def get_nd2_vol(nd2_data, c, frame): nd2_data.default_coords['c']=c nd2_data.bundle_axes = ('y', 'x', 'z') v = nd2_data.get_frame(frame) v = np.array(v) return v fram = get_nd2_vol(nd2_data, object_channel, 70) napari.view_image(fram, scale=[1, 1, 4], ndisplay=3) # adding all timepoints using dask and viewing the whole volume from dask import delayed import toolz as tz nd2vol = tz.curry(get_nd2_vol) arr = da.stack( [da.from_delayed(delayed(nd2vol(nd2_data, 2))(i), shape=fram.shape, dtype=fram.dtype) for i in range(193)] # note hardcoded n-timepoints ) napari.view_image(arr, scale=[1, 1, 1, 4]) ```
{ "source": "jni/platelet-segmentation", "score": 3 }
#### File: jni/platelet-segmentation/plots.py ```python import matplotlib.pyplot as plt import numpy as np import os import pandas as pd from pathlib import Path import ptitprince as pt # ---------- # Loss Plots # ---------- def save_loss_plot(path, loss_function, v_path=None, show=True): df = pd.read_csv(path) if v_path is not None: vdf = pd.read_csv(v_path) else: vdf = None p = Path(path) n = p.stem d = p.parents[0] out_path = os.path.join(d, n + '_loss.png') fig, ax = plot_loss(df, vdf=vdf, x_lab='Iteration', y_lab=loss_function, save=out_path, show=show) def plot_loss(df, vdf=None, x_lab='Iteration', y_lab='BCE Loss', save=None, show=True): x = df['Unnamed: 0'].values y = df['loss'].values epochs = len(df['epoch'].unique()) no_batches = int(len(x) / epochs) epoch_ends = np.array([((i + 1) * no_batches) - 1 for i in range(epochs)]) epoch_end_x = x[epoch_ends] epoch_end_y = y[epoch_ends] fig, ax = plt.subplots() leg = ['loss',] ax.plot(x, y, linewidth=2) ax.scatter(epoch_end_x, epoch_end_y) title = 'Training loss' if vdf is not None: if len(vdf) > epochs: vy = vdf.groupby('batch_id').mean()['validation_loss'].values vx = vdf['batch_id'].unique() else: vy = vdf['validation_loss'].values vx = epoch_end_x title = title + ' with validation loss' leg.append('validation loss') if len(vdf) > epochs: #vy_err = v_df.groupby('batch_id').sem()['validation_loss'].values #ax.errorbar(vx, vy, vy_err, marker='.') ax.plot(vx, vy, linewidth=2, marker='o') else: ax.plot(vx, vy, linewidth=2, marker='o') ax.set(xlabel=x_lab, ylabel=y_lab) ax.set_title(title) ax.legend(leg) fig.set_size_inches(13, 9) if save is not None: plt.savefig(save, dpi=300) if show: plt.show() return fig, ax def save_channel_loss_plot(path, show=True): df = pd.read_csv(path) p = Path(path) n = p.stem d = p.parents[0] out_path = os.path.join(d, n + '_channel-loss.png') fig, ax = plot_channel_losses(df, save=out_path, show=show) def plot_channel_losses(df, x_lab='Iteration', y_lab='BCE Loss', save=None, show=True): cols = list(df.columns) x = df['Unnamed: 0'].values non_channel_cols = ['Unnamed: 0', 'epoch', 'batch_num', 'loss', 'data_id'] channel_losses = [col for col in cols if col not in non_channel_cols] fig, axs = plt.subplots(2, 2) zs, ys, xs, cs = [], [], [], [] for col in channel_losses: y = df[col].values if col.startswith('z'): ls = _get_linestyle(zs) axs[0, 0].plot(x, y, linewidth=1, linestyle=ls) zs.append(col) if col.startswith('y'): ls = _get_linestyle(ys) axs[0, 1].plot(x, y, linewidth=1, linestyle=ls) ys.append(col) if col.startswith('x'): ls = _get_linestyle(xs) axs[1, 0].plot(x, y, linewidth=1, linestyle=ls) xs.append(col) if col.startswith('centre'): ls = _get_linestyle(cs) axs[1, 1].plot(x, y, linewidth=1, linestyle=ls) cs.append(col) axs[0, 0].set_title('Z affinities losses') axs[0, 0].legend(zs) axs[0, 1].set_title('Y affinities losses') axs[0, 1].legend(ys) axs[1, 0].set_title('X affinities losses') axs[1, 0].legend(xs) axs[1, 1].set_title('Centreness losses') axs[1, 1].legend(cs) for ax in axs.flat: ax.set(xlabel=x_lab, ylabel=y_lab) fig.set_size_inches(13, 9) if save is not None: plt.savefig(save, dpi=300) if show: plt.show() return fig, axs def _get_linestyle(lis): if len(lis) == 0: ls = '-' elif len(lis) == 1: ls = '--' else: ls = ':' return ls # -------- # VI Plots # -------- def VI_plot( path, cond_ent_over="GT | Output", cond_ent_under="Output | GT", lab="", save=False, show=True): df = pd.read_csv(path) overseg = df[cond_ent_over].values o_groups = [cond_ent_over] * len(overseg) underseg = df[cond_ent_under].values u_groups = [cond_ent_under] * len(underseg) groups = o_groups + u_groups x = 'Variation of information' y = 'Conditional entropy' data = { x : groups, y : np.concatenate([overseg, underseg]) } data = pd.DataFrame(data) o = 'h' pal = 'Set2' sigma = .2 f, ax = plt.subplots(figsize=(12, 10)) pt.RainCloud(x = x, y = y, data = data, palette = pal, bw = sigma, width_viol = .6, ax = ax, orient = o) p = Path(path) plt.title(p.stem) if save: save_path = os.path.join(p.parents[0], p.stem + lab + '_VI_rainclout_plot.png') plt.savefig(save_path, bbox_inches='tight') if show: plt.show() def experiment_VI_plots( paths, names, title, out_name, out_dir, cond_ent_over="GT | Output", cond_ent_under="Output | GT", ): groups = [] ce0 = [] ce1 = [] for i, p in enumerate(paths): df = pd.read_csv(p) ce0.append(df[cond_ent_over].values) ce1.append(df[cond_ent_under].values) groups += [names[i]] * len(df) x = 'Experiment' data = { x : groups, cond_ent_over : np.concatenate(ce0), cond_ent_under : np.concatenate(ce1) } data = pd.DataFrame(data) f, axs = plt.subplots(1, 2, figsize=(12, 10)) ax0 = axs[0, 0] ax1 = axs[0, 1] o = 'h' pal = 'Set2' sigma = .2 pt.RainCloud(x = x, y = cond_ent_over, data = data, palette = pal, bw = sigma, width_viol = .6, ax = ax0, orient = o) pt.RainCloud(x = x, y = cond_ent_under, data = data, palette = pal, bw = sigma, width_viol = .6, ax = ax1, orient = o) plt.title(title) if save: save_path = os.path.join(out_dir, '_VI_rainclould_plots.png') plt.savefig(save_path, bbox_inches='tight') if show: plt.show() if __name__ == '__main__': #name = 'loss_z-1_z-2_y-1_y-2_y-3_x-1_x-2_x-3_c_cl.csv' name = 'loss_210401_150158_z-1_y-1_x-1__wBCE2-1-1.csv' #dir_ = '/Users/amcg0011/Data/pia-tracking/cang_training/210331_training_0' dir_ = '/Users/amcg0011/Data/pia-tracking/cang_training/210401_150158_z-1_y-1_x-1__wBCE2-1-1' path = os.path.join(dir_, name) save_channel_loss_plot(path) #v_name = 'validation-loss_z-1_z-2_y-1_y-2_y-3_x-1_x-2_x-3_c_cl.csv' v_name = 'validation-loss_210401_150158_z-1_y-1_x-1__wBCE2-1-1.csv' v_path = os.path.join(dir_, v_name) loss_function = 'Weighted BCE Loss (2, 1, 1)' save_loss_plot(path, loss_function, v_path) ``` #### File: jni/platelet-segmentation/train_io.py ```python from augment import augment_images from datetime import datetime from helpers import get_files, log_dir_or_None, write_log, LINE import numpy as np import os import pandas as pd from pathlib import Path import re import skimage.filters as filters from skimage.measure import regionprops from skimage.morphology._util import _offsets_to_raveled_neighbors from tifffile import TiffWriter, imread from time import time import torch from tqdm import tqdm import zarr # ------------------- # Generate Train Data # ------------------- def get_train_data( image_paths, labels_paths, out_dir, shape=(10, 256, 256), n_each=100, channels=('z-1', 'y-1', 'x-1', 'centreness'), scale=(4, 1, 1), log=True ): """ Generate training data from whole ground truth volumes. Parameters ---------- image_path: str path to the image zarr file, which must be in tczyx dim format (ome bioformats) labels_path: str path to the labels zarr file, which must be in tczyx dim format shape: tuple of int Shape of the test data to channels: tuple of str tuple of channels to be added to the training data. Affinities: 'axis-n' (pattern: r'[xyz]-\d+' e.g., 'z-1') Centreness: 'centreness' scale: tuple of numeric Scale of channels. This is used in calculating centreness score. log: bool Should print out be recorded in out_dir/log.txt? Returns ------- xs: list of torch.Tensor List of images for training ys: list of torch.Tensor List of affinities for training ids: list of str ID strings by which each image and label are named. Eventually used for correctly labeling network output Notes ----- It takes a very long time to obtain training data with sufficient information (as determined by min_affinity param). """ assert len(image_paths) == len(labels_paths) for i in range(len(image_paths)): print(LINE) s = f'Generating training data from image: {image_paths[i]}, labels: {labels_paths[i]}' print(s) if log: write_log(LINE, out_dir) write_log(s, out_dir) im = zarr.open_array(image_paths[i]) l = zarr.open_array(labels_paths[i]) if i == 0: xs, ys, ids = get_random_chunks(im, l, out_dir, shape=shape, n=n_each, channels=channels, scale=scale, log=log) else: xs_n, ys_n, ids_n = get_random_chunks(im, l, out_dir, shape=shape, n=n_each, channels=channels, scale=scale, log=log) for j in range(len(xs_n)): xs.append(xs_n[j]) ys.append(ys_n[j]) ids.append(ids_n[j]) return xs, ys, ids def get_random_chunks( image, labels, out_dir, shape=(10, 256, 256), n=25, min_affinity=300, channels=('z-1', 'y-1', 'x-1', 'centreness'), scale=(4, 1, 1), log=True ): ''' Obtain random chunks of data from whole ground truth volumes. Parameters ---------- image: array like same shape as labels labels: array like same shape as image shape: tuple of int shape of chunks to obtain n: int number of random chunks to obtain min_affinity: minimum cut off sum of affinities for an image. As affinities as belong to {0, 1}, this param is the number of voxels that boarder labels. Returns ------- xs: list of torch.Tensor List of images for training ys: list of torch.Tensor List of affinities for training ids: list of str ID strings by which each image and label are named. Eventually used for correctly labeling network output ''' im = np.array(image) l = np.array(labels) assert len(im.shape) == len(shape) a = get_training_labels(l, channels=channels, scale=scale) xs = [] ys = [] labs = [] i = 0 df = {'z_start' : [], 'y_start' : [], 'x_start' : []} while i < n: dim_randints = [] for j, dim in enumerate(shape): max_ = im.shape[j] - dim - 1 ri = np.random.randint(0, max_) dim_randints.append(ri) # Get the network output: affinities s_ = [slice(None, None),] # for j in range(len(shape)): s_.append(slice(dim_randints[j], dim_randints[j] + shape[j])) s_ = tuple(s_) y = a[s_] if y.sum() > min_affinity * len(channels): # if there are a sufficient number of boarder voxels # add coords to output df for j in range(len(shape)): _add_to_dataframe(j, dim_randints[j], df) # Get the network input: image s_ = [slice(dim_randints[j], dim_randints[j] + shape[j]) for j in range(len(shape))] s_ = tuple(s_) x = im[s_] x = normalise_data(x) # get the GT labels so that later quatitative comparison can be made with final # segmentation lab = l[s_] # that's right, be confused by my variable names!! # data augmentation for better generalisation x, y, lab = augment_images(x, y, lab) # add the affinities and image chunk to the training data y = torch.from_numpy(y.copy()) ys.append(y) x = torch.from_numpy(x.copy()) xs.append(x) labs.append(lab) # another successful addition, job well done you crazy mofo i += 1 print(LINE) s = f'Obtained {n} {shape} chunks of training data' print(s) if log: write_log(LINE, out_dir) write_log(s, out_dir) log_dir = log_dir_or_None(log, out_dir) print_labels_info(channels, out_dir=log_dir) ids = save_random_chunks(xs, ys, labs, out_dir) now = datetime.now() d = now.strftime("%y%m%d_%H%M%S") df['data_ids'] = ids df = pd.DataFrame(df) df.to_csv(os.path.join(out_dir, 'start_coords' + d + '.csv')) return xs, ys, ids def _add_to_dataframe(dim, start, df): if dim == 0: df['z_start'].append(start) if dim == 1: df['y_start'].append(start) if dim == 2: df['x_start'].append(start) # -------------------------- # Lable Generating Functions # -------------------------- def get_training_labels( l, channels=('z-1', 'y-1', 'x-1', 'centreness'), scale=(4, 1, 1)): labels = [] for chan in channels: if chan.startswith('z'): axis = 0 elif chan.startswith('y'): axis = 1 elif chan.startswith('x'): axis = 2 n = re.search(r'\d+', chan) if n is not None: # get the nth affinity n = int(n[0]) lab = nth_affinity(l, n, axis) elif chan == 'centreness': # get the centreness score lab = get_centreness(l, scale=scale) elif chan == 'centreness-log': lab = get_centreness(l, scale=scale, log=True) elif chan == 'centroid-gauss': lab = get_gauss_centroids(l) else: m = f'Unrecognised channel type: {chan} \n' m = m + 'Please enter str of form axis-n for nth affinity \n' m = m + 'or centreness for centreness score.' raise ValueError(m) if chan.endswith('-smooth'): lab = smooth(lab) labels.append(lab) labels = np.stack(labels, axis=0) return labels def nth_affinity(labels, n, axis): affinities = [] labs_pad = np.pad(labels, n, mode='reflect') for i in range(labels.shape[axis]): s_0 = [slice(None, None)] * len(labs_pad.shape) s_0[axis] = slice(i, i + 1) s_0 = tuple(s_0) s_n = [slice(None, None)] * len(labs_pad.shape) s_n[axis] = slice(i + n, i + n + 1) s_n = tuple(s_n) new = labs_pad[s_0] - labs_pad[s_n] new = np.squeeze(new) if len(new) > 0: affinities.append(new) affinities = np.stack(affinities, axis=axis) s_ = [slice(n, -n)] * len(labs_pad.shape) s_[axis] = slice(None, None) s_ = tuple(s_) affinities = affinities[s_] affinities = np.where(affinities != 0, 1., 0.) return affinities def get_centreness(labels, scale=(4, 1, 1), log=False, power=False): """ Obtains a centreness score for each voxel belonging to a labeled object. Values in each object sum to one. Values are inversely proportional to euclidian distance from the object centroid. Notes ----- Another possible implementation would involve the medioid, as in: Lalit, M., <NAME>. and <NAME>., 2021. Embedding-based Instance Segmentation of Microscopy Images. arXiv. Unfortunately, skimage doesn't yet have a method for finding the medioid (more dev, *sigh*). """ scale = np.array(scale) def dist_score(mask): output = np.zeros_like(mask, dtype=np.float32) c = np.mean(np.argwhere(mask), axis=0) indices, values = inverse_dist_score( mask, c, scale, log=log, power=power ) output[indices] = values return output t = time() props = regionprops(labels, extra_properties=(dist_score,)) new = np.zeros(labels.shape, dtype=np.float32) for i, prop in tqdm(enumerate(props), desc='Score centreness'): new[prop.slice] += prop.dist_score new = np.nan_to_num(new) print('------------------------------------------------------------') print(f'Obtained centreness scores in {time() - t} seconds') return new def inverse_dist_score(mask, centroid, scale, log, power): ''' Compute euclidian distances of each index from a mask representing a single object from the centroid of said object Uses scale to account for annisotropy in image ''' indices = np.argwhere(mask > 0) distances = [] centre = centroid for i in range(indices.shape[0]): ind = indices[i, ...] diff = (centre - ind) * scale dist = np.linalg.norm(diff) if log and abs(dist) > 0: m = f'Infinite value with distance of {dist}' dist = np.log(dist) assert not np.isinf(dist), m if power: dist = 2 ** dist distances.append(dist) distances = np.array(distances) if log: distances = distances + np.abs(distances.min()) # bring min value to 0 norm_distances = distances / distances.max() values = (1 - norm_distances) indices = tuple(indices.T.tolist()) return indices, values # not used def get_gauss_centroids(labels, sigma=1, z=0): centroids = [prop['centroid'] for prop in regionprops(labels)] centroids = tuple(np.round(np.stack(centroids).T).astype(int)) centroid_image = np.zeros(labels.shape, dtype=float) centroid_image[centroids] = 1. gauss_cent = [] for i in range(labels.shape[z]): s_ = [slice(None, None)] * labels.ndim s_[z] = slice(i, i+1) s_ = tuple(s_) plane = np.squeeze(centroid_image[s_]) gauss_cent.append(filters.gaussian(plane, sigma=sigma)) out = np.stack(gauss_cent, axis=z) out = out - out.min() out = out / out.max() #print(out.dtype, out.shape, out.max(), out.min()) return out def smooth(image, z=0, sigma=1): out = [] for i in range(image.shape[z]): s_ = [slice(None, None)] * image.ndim s_[z] = slice(i, i+1) s_ = tuple(s_) plane = np.squeeze(image[s_]) out.append(filters.gaussian(plane, sigma=sigma)) out = np.stack(out, axis=z) return out # not currently referenced, uses nth_affinity() for generality def get_affinities(image): """ Get short-range voxel affinities for a segmentation. Affinities are belonging to {0, 1} where 1 represents a segment boarder voxel in a particular direction. Affinities are produced for each dimension of the labels and each dim has its own channel (e.g, (3, z, y, x)). Note ---- Others may represent affinities with {-1, 0}, because technically... My network wasn't designed for this :) """ padded = np.pad(image, 1, mode='reflect') affinities = [] for i in range(len(image.shape)): a = np.diff(padded, axis=i) a = np.where(a != 0, 1.0, 0.0) a = a.astype(np.float32) s_ = [slice(1, -1)] * len(image.shape) s_[i] = slice(None, -1) s_ = tuple(s_) affinities.append(a[s_]) affinities = np.stack(affinities) return affinities # ------------- # Log and Print # ------------- def print_labels_info(channels, out_dir=None, log_name='log.txt'): print(LINE) s = f'Training labels have {len(channels)} output channels: \n' print(s) if out_dir is not None: write_log(LINE, out_dir, log_name) write_log(s, out_dir, log_name) for i, chan in enumerate(channels): affinity_match = re.search(r'[xyz]-\d*', chan) if affinity_match is not None: n = f'{affinity_match[0]} affinities' elif chan == 'centreness': n = 'centreness score' elif chan == 'centreness-log': n = 'log centreness score' elif chan == 'centroid-gauss': n = 'gaussian centroids' else: n = 'Unknown channel type' s = f'Channel {i}: {n}' print(s) if out_dir is not None: write_log(s, out_dir, log_name) # ----------- # Save Output # ----------- def save_random_chunks(xs, ys, labs, out_dir): ''' Save the random chunks as they are sampled ''' os.makedirs(out_dir, exist_ok=True) assert len(xs) == len(ys) ids = [] # iterate over the sample for i in range(len(xs)): # get the datetime to give the samples unique names now = datetime.now() d = now.strftime("%y%m%d_%H%M%S") + '_' + str(i) ids.append(d) # save the image i_name = d + '_image.tif' i_path = os.path.join(out_dir, i_name) with TiffWriter(i_path) as tiff: tiff.write(xs[i].numpy()) # save the labels l_name = d + '_labels.tif' l_path = os.path.join(out_dir, l_name) with TiffWriter(l_path) as tiff: tiff.write(ys[i].numpy()) # save the ground truth segmentation s_name = d + '_GT.tif' s_path = os.path.join(out_dir, s_name) with TiffWriter(s_path) as tiff: tiff.write(labs[i]) # this is already ndarray not tensor assert len(ids) == len(ys) print('------------------------------------------------------------') print('Training data saved at:') print(out_dir) return ids # --------------- # Load Train Data # --------------- def load_train_data( data_dir, id_regex=r'\d{6}_\d{6}_\d{1,3}', x_regex=r'\d{6}_\d{6}_\d{1,3}_image.tif', y_regex=r'\d{6}_\d{6}_\d{1,3}_labels.tif' ): ''' Load train data from a directory according to a naming convention Parameters ---------- data_dir: str Directory containing data id_regex: r string regex that will be used to extract IDs that will be used to label network output x_regex: r string regex that represents image file names, complete with extension (tiff please) y_regex: r string regex that represents training label files, complete with extension (tiff please) Returns ------- xs: list of torch.Tensor List of images for training ys: list of torch.Tensor List of affinities for training ids: list of str ID strings by which each image and label are named. Eventually used for correctly labeling network output ''' # Get file names for images and training labels x_paths, y_paths = get_files( data_dir, x_regex=x_regex, y_regex=x_regex ) # Get IDs id_pattern = re.compile(id_regex) ids = [] x_paths.sort() y_paths.sort() for i in range(len(x_paths)): xn = Path(x_paths[i]).stem # this could have been avoided yn = Path(y_paths[i]).stem # why would I bother now though?! # assumes there will be a match for each xid = id_pattern.search(xn)[0] yid = id_pattern.search(yn)[0] m = 'There is a mismatch in image and label IDs' assert xid == yid, m ids.append(xid) # Get images and training labels in tensor form xs = [] ys = [] for i in range(len(x_paths)): xp = x_paths[i] yp = y_paths[i] x = imread(xp) x = normalise_data(x) y = imread(yp) xs.append(torch.from_numpy(x)) ys.append(torch.from_numpy(y)) # returns objects in the same manner as get_train_data() print('------------------------------------------------------------') print(f'Loaded {len(xs)} sets of training data') print_labels_info(ys[0].shape) return xs, ys, ids # ------------------------ # General Helper Functions # ------------------------ def normalise_data(image): ''' Bring image values to between 0-1 Parameters ---------- image: np.array Image data. Dtype should be float. ''' im = image / image.max() return im if __name__ =="__main__": # pass ```
{ "source": "jnippula/agent_protocol_splitter", "score": 3 }
#### File: agent_protocol_splitter/test/rtps_reader.py ```python import argparse import socket import serial import sys SERIAL_PORT = '/dev/vcom_px4' UDP_ADDR = '127.0.0.1' UDP_PORT = 4801 MAVLINK1_HEADER_LEN = 6 MAVLINK1_CHECKSUM_LEN = 2 MAVLINK2_HEADER_LEN = 10 MAVLINK2_CHECKSUM_LEN = 2 MAVLINK2_SIGNATURE_LEN = 13 STATUS_NEED_MORE_DATA = -3; STATUS_NOT_FOUND = -2; STATUS_RECEIVING = -1; mavlink_packet_len = 0 parse_status = STATUS_NOT_FOUND storage = b'' msg = b'' def decode_bytes(data): string = '' for byte in data: string += '{:02x} '.format(byte) print(' bytes: ', string) print() def check_msgs(): global mavlink_packet_len global parse_status global storage if parse_status == STATUS_RECEIVING: return parse_status = STATUS_NOT_FOUND pos = 0 mavlink_packet_len = 0 while pos < len(storage): if storage[pos] == 0xFD: parse_status = STATUS_NEED_MORE_DATA if len(storage) >= 3: parse_status = pos payload_len = storage[pos+1] mavlink_packet_len = MAVLINK2_HEADER_LEN + payload_len + MAVLINK2_CHECKSUM_LEN incomp_flg = storage[pos+2] if incomp_flg & 1: mavlink_packet_len += MAVLINK2_SIGNATURE_LEN return elif storage[pos] == 0xFE: parse_status = STATUS_NEED_MORE_DATA if len(storage) > MAVLINK1_HEADER_LEN: payload_len = storage[pos+1] parse_status = pos mavlink_packet_len = MAVLINK1_HEADER_LEN + payload_len + MAVLINK1_CHECKSUM_LEN return else: pos += 1 def read_message(): global parse_status global storage global msg offset = 0 if parse_status > 0: storage = storage[parse_status:] if (len(msg) + len(storage)) < mavlink_packet_len: msg += storage parse_status = STATUS_RECEIVING else: remaining = mavlink_packet_len - len(msg) if remaining < 0: print("ERROR: msg is bigger than packet len!!!") msg += storage[:remaining] storage = storage[remaining:] decode_mavlink(msg) msg = b'' parse_status = STATUS_NOT_FOUND def decode_mavlink(data): if data[0] == 0xFE: print() print(' Mavlink1') payload_len = data[1] print(' [0] Magic : ' + hex(data[0])) print(' [1] Payload len : ' + str(data[1])) print(' [2] Sequence : ' + str(data[2])) print(' [3] SysId : ' + str(data[3])) print(' [4] CompId : ' + str(data[4])) print(' [5] MsgId : ' + str(data[5])) offset = MAVLINK1_HEADER_LEN pload = ' [n] Payload : [ ' for i in range(payload_len): pload += '{:02x} '.format(data[6+i]) pload += ']' print(pload) offset += payload_len print(' Checksum : ' + hex(data[offset] | data[offset+1]<<8)) offset += MAVLINK1_CHECKSUM_LEN elif data[0] == 0xFD: print() print(' Mavlink2') payload_len = data[1] incomp_flg = data[2] print(' [0] Magic : ' + hex(data[0])) print(' [1] Payload len : ' + str(data[1])) print(' [2] IncompatFlg : ' + hex(data[2])) print(' [3] CompatFlg : ' + hex(data[3])) print(' [4] Sequence : ' + str(data[4])) print(' [5] SysId : ' + str(data[5])) print(' [6] CompId : ' + str(data[6])) print(' [7-9] MsgId : ' + hex(data[7] | data[8] << 8 | data[9] << 16)) offset = MAVLINK2_HEADER_LEN pload = ' [n] Payload : [ ' for i in range(payload_len): pload += '{:02x} '.format(data[6+i]) pload += ']' print(pload) offset += payload_len print(' Checksum : ' + hex(data[offset] | data[offset+1]<<8)) offset += MAVLINK2_CHECKSUM_LEN if incomp_flg & 1: # signature exists in the message pload = ' Signature : [ ' for i in range(MAVLINK2_SIGNATURE_LEN): pload += '{:02x} '.format(data[offset+6+i]) pload += ']' print(pload) offset += MAVLINK2_SIGNATURE_LEN return offset def main(): global parse_status global storage global msg parse_status = 0 parser = argparse.ArgumentParser(description='Test protocol splitter') parser.add_argument('--serial', action='store', default="", help='Serial device to listen') parser.add_argument('--ip', action='store', default=UDP_ADDR, help='UDP address') parser.add_argument('--port', action='store', default=UDP_PORT, help='UDP port') args = parser.parse_args() source = None if args.serial != '': source = serial.Serial(args.serial, timeout=0.1) print('Listening to:', args.serial) else: source = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) addr = (args.ip, int(args.port)) source.bind(addr) print('Listening to:', args.ip, ':', args.port) storage = b'' msg = b'' while True: data = b'' if args.serial != '': while not len(data): data = source.read(256) else: data, address = source.recvfrom(256) print(' received', str(len(data)), 'bytes') decode_bytes(data) #storage += data #check_msgs() #if parse_status == STATUS_NOT_FOUND: # storage = b'' #elif parse_status != STATUS_NEED_MORE_DATA: # read_message() if __name__ == '__main__': main() ```
{ "source": "jnippula/satt", "score": 2 }
#### File: satt/common/initializer.py ```python import os import sys import glob import subprocess from satt.common import envstore class Satt: _usage_str = '' _sat_path = '' _satt_version = '' _variables = None _options = {} def __init__(self, sat_path, sat_venv_bin): self._sat_path = sat_path self._sat_venv_bin = sat_venv_bin envstore.get_instance().set_sat_home(sat_path) envstore.get_instance().set_sat_venv_bin(sat_venv_bin) def initialize(self): envstore.get_instance().load() self.check_version() def check_version(self): version_file = os.path.join(self._sat_path, '.version') self._satt_version = '' if os.path.isfile(version_file): self._satt_version = open(version_file).readline().rstrip() else: old_dir = os.getcwd() os.chdir(self._sat_path) try: self._satt_version = subprocess.check_output('git describe --tags --always', shell=True).rstrip() except: self._satt_version = '0.0.0' os.chdir(old_dir) envstore.get_instance().set_sat_version(self._satt_version) def parse_options(self): if len(sys.argv) > 1: if sys.argv[1] == "--version" or sys.argv[1] == "-v": print ("satt version: " + self._satt_version) sys.exit(0) if sys.argv[1] == '--completewords': print (' '.join(sorted(self._options))) sys.exit(0) if sys.argv[1] == '--home': print (self._sat_path) sys.exit(0) def get_commands(self): opt_desc = {} # glob all commands.py files for root, dirs, files in os.walk(os.path.join(self._sat_path, 'satt')): cmd_file = glob.glob(os.path.join(root, 'command.py')) if len(cmd_file) > 0: key = os.path.basename(os.path.dirname(cmd_file[0])) self._options[key] = 'satt.' + key f = open(cmd_file[0]) while True: line = f.readline() if not line or line.startswith('import'): break if line.startswith('# description: '): opt_desc[key] = line.split(':')[1] # Create USAGE string self._usage_str = ('\nUSAGE: satt [-v|--version] [command]\n Commands:\n') for k in sorted(self._options.keys()): self._usage_str += ' ' + k.ljust(15) + ': ' if k in opt_desc.keys(): self._usage_str += opt_desc[k] else: self._usage_str += '\n' # return commands return self._options, opt_desc def print_usage(self): print (self._usage_str) ``` #### File: common/sbparser/sbcombiner.py ```python from satparser import * from collections import defaultdict import sys import os if sys.platform.startswith('win'): import msvcrt class Enum(set): def __getattr__(self, name): if name in self: return name raise AttributeError class sideband_binary_dumper(sideband_parser_output): lines_ = {} def __init__(self, sb_lines): self.lines_ = sb_lines def header_and_binary(self, header, binary): if header['tscp'] in self.lines_.keys(): print "WARNING: duplicate timestamps in sideband streams!!!" while True: header['tscp'] += 1 if header['tscp'] not in self.lines_.keys(): break; self.lines_[header['tscp']] = binary class sideband_filein(sideband_parser_input): # c:\ type mydoc.txt | python.exe -u myscript.py eof_ = False bad_ = False input_file_ = None def __init__(self, input_file): self.input_file_ = open(input_file, 'rb') def read(self, size): try: buffer = self.input_file_.read(size) if not buffer: self.eof_ = True return False return buffer except: self.bad_ = True return False def eof(self): return self.eof_ def bad(self): # return ferror(stdin) return self.bad_ class sideband_combiner: input_paths_ = [] output_path_ = '' sb_lines_ = {} def __init__(self, input_paths, output_path): self.input_paths_ = input_paths self.output_path_ = output_path def combine(self): for path in self.input_paths_: sb_input = sideband_filein(path) sb_output = sideband_binary_dumper(self.sb_lines_) parser = sideband_parser(sb_input, sb_output) parsing_ok = parser.parse() f = open(self.input_paths_[0], 'rb') sat_ver_data = f.read(12) f.close() outf = open(self.output_path_, 'wb') outf.write(sat_ver_data) for i in sorted(self.sb_lines_.keys()): outf.write(self.sb_lines_[i]) outf.close() ``` #### File: common/targetos/targetos.py ```python """ Target OS awareness """ import os import sys from satt.common import helper from satt.common import envstore os_instance = None def get_instance(): global os_instance if os_instance is None: os = envstore.get_instance().get_variable('sat_os') if os < 0: print "\nNo target OS selected" print "Please run 'satt config'" sys.exit(-1) else: if os == OsHelper.Linux: from satt.common.targetos.os_linux import LinuxOs os_instance = LinuxOs() elif os == OsHelper.Android: from satt.common.targetos.os_android import AndroidOs os_instance = AndroidOs() elif os == OsHelper.ChromeOS: from satt.common.targetos.os_chromeos import ChromeOs os_instance = ChromeOs() elif os == OsHelper.YoctoOs: from satt.common.targetos.os_yocto import YoctoOs os_instance = YoctoOs() else: print "ERROR: Unsupported target OS type (" + os + ")" sys.exit(-1) return os_instance class OsData: Name = '' ConnMethods = [] SourcePathNeeded = False def __init__(self, name, connlist, sp): self.Name = name self.ConnMethods = connlist self.SourcePathNeeded = sp class OsHelper: Linux, Android, ChromeOS, YoctoOs = range(4) osdata = {Linux: OsData('Linux', ['SSH', 'SHELL'], False), Android: OsData('Android', ['ADB'], True), ChromeOS: OsData('ChromeOS', ['SSH', 'SHELL'], False), YoctoOs: OsData('Yocto', ['SSH'], False)} class TargetOs(object): """ OS specific base class """ _debug = False _control = None _trace_path = '' _trace_binary_path = '' _sat_module_paths = ['/tmp/sat.ko'] _sat_home_path = '' _readchar = None _helper = None def __init__(self, debug=False): self._sat_home_path = envstore.store.get_sat_home() from satt.common.control import control self._control = control.get_instance() self._debug = debug self._helper = helper.get_instance() self._readchar = self._helper.get_readchar_object() def get_os_data(self, trace_path): self.debug_print("TargetOs::get_os_data") self._trace_path = trace_path self._trace_binary_path = os.path.join(trace_path, 'binaries') def get_tmp_folder(self): return '/tmp' def get_vmlinux_path(self): ''' Virtual function ''' def print_path_type_hint(self, path_type): if path_type == 'sat_path_modules': print('\n Hint: folder which contains kernel module binaries under kernel sub folder') print(' dir - kernel') print(' file - modules.dep') print(' files - modules.*\n') elif path_type == 'sat_path_kernel': print('\n Hint: folder which contains vmlinux and System.map files') print(' dir - arch') print(' dir - drivers') print(' dir - include') print(' dir - kernel') print(' file - System.map') print(' file - vmlinux.*\n') elif path_type == 'sat_path_kernel': print('\n Hint: folder which contains vmlinux and System.map files') print(' dir - arch') print(' dir - drivers') print(' dir - include') print(' dir - kernel') print(' file - System.map') print(' file - vmlinux.*\n') elif path_type == 'sat_path_kernel_src': print('\n Hint: folder which contains kernel sources') print(' dir - arch') print(' dir - drivers') print(' dir - include') print(' file - Kbuild') print(' file - Kconfig') print(' dir - net') print(' dir - scripts') elif path_type == 'sat_target_build': print('\n Hint: folder which contains all other binaries e.g. *.so') print(' dir - etc') print(' dir - lib') print(' dir - usr') print(' dir - var') def validate_target_path(self, paths, path_type): if path_type in paths: paths[path_type] = os.path.abspath(os.path.expanduser(paths[path_type])) path_found = os.path.isdir(paths[path_type]) files_found = False if path_type == 'sat_path_modules': files_found = os.path.exists(os.path.join(paths[path_type], 'modules.dep')) elif path_type == 'sat_path_kernel': files_found = os.path.exists(os.path.join(paths[path_type], 'System.map')) elif path_type == 'sat_path_kernel_src': files_found = os.path.exists(os.path.join(paths[path_type], 'Kconfig')) elif path_type == 'sat_target_build': files_found = True if path_found: if files_found: print(' Path found and looks valid!') return True else: print(helper.color.BOLD + ' WARNING: Path found, but does not look a valid path!' + helper.color.END) else: print(helper.color.BOLD + ' ERROR: Path does not found?' + helper.color.END) # Pass trought, some path was wrong selection = raw_input(" Do you want to give that path again? [Y/n] ") if selection == 'Y' or selection == 'y' or selection == None or selection == '': return False return True else: print(' ERROR in validate_target_path validation') raise def get_system_map_path(self): ''' Virtual function ''' def is_os(self, os_name): self.debug_print("TargetOs::is_os") if self.get_name() == os_name: return True return False def copy_binaries(self): ''' Virtual function ''' def get_sat_module_paths(self): return self._sat_module_paths def get_debug_paths(self): return "" def debug_print(self, string): if self._debug: print string ``` #### File: satt/devel/command.py ```python import os import sys import argparse import subprocess from satt.common import envstore ################################## # Command file for satt build ################################## class_name = "SattDevel" # private satt_devel_help_scr_size = 18 satt_devel_help_indent_size = 22 class HelpTextFormatter(argparse.HelpFormatter): def _split_lines(self, text, width): if text.startswith('#'): return text[1:].splitlines() return argparse.HelpFormatter._split_lines(self, text, width) class ScriptData: Desc = '' Cmd = '' def __init__(self, desc, cmd): self.Desc = desc self.Cmd = cmd class SattDevel: _sat_home = '' _variables = {} _scripts = {} def __init__(self): self._sat_home = envstore.store.get_sat_home() self._variables = envstore.store.get_current() param1 = '' params = [] if len(sys.argv) > 2: param1 = sys.argv[2] if len(sys.argv) > 3: params = sys.argv[3:] self._scripts = {'compile-parser': ScriptData('Compile post-process parser fron source code\n', (os.path.join(self._sat_home, 'satt', 'devel', 'compile_parser.py') + ' ' + param1 + ' '.join(params) )), 'build-ui': ScriptData('Build SATT UI\n', (os.path.join(self._sat_home, 'satt', 'devel', 'build_ui.py') + ' ' + param1 + ' '.join(params) )), 'command': ScriptData('possibility to call sat-xxx commands directly\n', (os.path.join(self._sat_home, 'satt', 'process', 'bin', 'x86_64', param1) + ' ' + ' '.join(params))), 'pack-binaries': ScriptData('Pack object files executed in the target during\n' + ' ' * satt_devel_help_indent_size + ' the trace into tgz package\n', (os.path.join(self._sat_home, 'satt', 'devel', 'pack-binaries') + ' ' + param1 + ' '.join(params) )), 'pack-trace': ScriptData('Pack raw satt trace files into tgz package\n' + ' ' * satt_devel_help_indent_size + ' call sat-rtit-dump etc directly\n', (os.path.join(self._sat_home, 'satt', 'devel', 'pack-trace') + ' ' + param1 + ' '.join(params) ))} def action(self): # Complete words for bash autocomplete if len(sys.argv) > 1: if sys.argv[1] == '--completewords': print ' '.join(self._scripts.keys()) sys.exit(0) parser = argparse.ArgumentParser(description='satt devel', formatter_class=HelpTextFormatter) help_txt = '#Devel script to run:\n' for s in sorted(self._scripts.keys()): help_txt += ' ' + s.ljust(satt_devel_help_scr_size) + ': ' + self._scripts[s].Desc parser.add_argument('script', action='store', help=help_txt) self._args, additionals = parser.parse_known_args() if self._args.script in self._scripts.keys(): os.system(self._scripts[self._args.script].Cmd) else: print "Unknown command: '" + self._args.script + "'" sys.exit(-1) ``` #### File: satt/process/command.py ```python import os import re import sys import glob import shutil import argparse import subprocess import fnmatch from satt.common import envstore from satt.process.linkmodules import LinkModules from satt.process.binary_patch import BinaryPatch from satt.common.targetos import targetos ################################## # Command file for satt Process ################################## class_name = "SattProcess" IGNORE_SAT_MODULE_ON_PROCESSING = False NO_PATCHING_IF_ALREADY_PATCHED = False def intermediate_work(params): import subprocess import os commands = params[0] rm_files = params[1] for c in commands: subprocess.call(c, shell=True) for f in rm_files: os.remove(f) class SattProcess: _bin_path = '' _trace_folder_path = '' _post_process_bin_path = '' _official_build = False _args = None _sat_home = '' _variables = {} _os = None # ===============================================# def __init__(self): self._sat_home = envstore.store.get_sat_home() self._satt_venv_bin = envstore.store.get_sat_venv_bin() self._post_process_bin_path = os.path.join(self._sat_home, 'satt', 'process', 'bin') self._variables = envstore.store.get_current() self._os = targetos.get_instance() self.ParseArguments() # ===============================================# def action(self): if not os.path.exists(self._os._trace_path): print "ERROR: SATT trace '" + self._os._trace_path + "' not found!" return if self._args.rtit: print ("*************************") print ("* Processing RTIT trace *") print ("*************************") else: print ("************************") print ("* Processing IPT trace *") print ("************************") self.RemoveHostFileCache() self.CopyBinariesToTraceFolder() self.PatchKernelBinaries() if self._os.is_os('Linux') or self._os.is_os('Yocto'): self.AdjustKernelVma() self.DecodeRawPtiData() retval = 0 debug = False if self._args.debug_level: debug = True self.generate_model(debug, self._args.debug_level) if retval == 0: self.DemangleSymbols() # Adding SatVersion into satstats requires changes into sat ui backend # self.SatVersionIntoSatstats() subprocess.call(os.path.join(self._post_process_bin_path, 'post') + ' ' + self._os._trace_path + " | tee -a " + os.path.join(self._os._trace_path, self._os._trace_path + '-process.log'), shell=True) subprocess.call(os.path.join(self._post_process_bin_path, 'pack') + ' ' + self._os._trace_path + " | tee -a " + os.path.join(self._os._trace_path, self._os._trace_path + '-process.log'), shell=True) else: print "**************************************" print "** Uups, Processing Failed **" print "** - please, try to trace again **" print "**************************************" # ===============================================# def ParseArguments(self): parser = argparse.ArgumentParser(description='satt process') parser.add_argument('-d', '--debug', action='store', dest="debug_level", type=int, help='Enable parser debug output. Level: 0 .. x', required=False) parser.add_argument('-i', '--ipt', action='store_true', help='Process IPT traces', required=False) parser.add_argument('-r', '--rtit', action='store_true', help='Process RTIT traces', required=False) parser.add_argument('-p', '--patching_disable', action='store_true', help='Do not patch modules in case already patched', required=False) parser.add_argument('TRACE_PATH', action='store', help='trace path') self._args = parser.parse_args() self._bin_path = os.path.join(self._sat_home, 'lib', 'post-process') self._os._trace_path = self._args.TRACE_PATH if self._os._trace_path[-1:] == "/" or self._os._trace_path[-1:] == "\\": self._os._trace_path = self._os._trace_path[:-1] # If called with absolute path if os.path.isabs(self._os._trace_path): savedPath = os.getcwd() self._trace_folder_path = os.path.realpath(os.path.join(self._os._trace_path, '..')) self._os._trace_path = os.path.basename(os.path.normpath(self._os._trace_path)) os.chdir(self._trace_folder_path) # ===============================================# def RemoveHostFileCache(self): if os.path.exists(os.path.join(self._os._trace_path, 'binaries', 'sat-path-cache', 'cache')): os.remove(os.path.join(self._os._trace_path, 'binaries', 'sat-path-cache', 'cache')) # ===============================================# def CopyBinariesToTraceFolder(self): kernel_path = envstore.store.get_variable('sat_path_kernel') modules_path = envstore.store.get_variable('sat_path_modules') if not os.path.exists(os.path.join(self._os._trace_path, 'binaries', 'kernel')): os.makedirs(os.path.join(self._os._trace_path, 'binaries', 'kernel')) # 32-bit if os.path.isfile(os.path.join(kernel_path, 'arch', 'x86', 'vdso', 'vdso32-sysenter.so.dbg')): shutil.copyfile(os.path.join(kernel_path, 'arch', 'x86', 'vdso', 'vdso32-sysenter.so.dbg'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso32-sysenter.so')) elif os.path.isfile(os.path.join(kernel_path, 'arch', 'x86', 'vdso', 'vdso32-sysenter.so')): shutil.copyfile(os.path.join(kernel_path, 'arch', 'x86', 'vdso', 'vdso32-sysenter.so'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso32-sysenter.so')) elif os.path.isfile(os.path.join(os.path.dirname(kernel_path), 'vdso', 'vdso32.so')): shutil.copyfile(os.path.join(os.path.dirname(kernel_path), 'vdso', 'vdso32.so'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso32-sysenter.so')) # Yocto arch/x86/entry/vdso elif os.path.isfile(os.path.join(os.path.dirname(kernel_path), 'arch', 'x86', 'entry', 'vdso', 'vdso32.so.dbg')): shutil.copyfile(os.path.join(os.path.dirname(kernel_path), 'arch', 'x86', 'entry', 'vdso', 'vdso32.so.dbg'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso32.so')) elif os.path.isfile(os.path.join(kernel_path, 'arch', 'x86', 'entry', 'vdso', 'vdso32.so')): shutil.copyfile(os.path.join(kernel_path, 'arch', 'x86', 'entry', 'vdso', 'vdso32.so'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso32.so')) # Ubuntu # TODO check build-id to get debug version # TODO check if shell is used elif os.path.isfile(os.path.join(modules_path, 'vdso', 'vdso32.so')): shutil.copyfile(os.path.join(modules_path, 'vdso', 'vdso32.so'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso32.so')) # 64-bit if os.path.isfile(os.path.join(kernel_path, 'arch', 'x86', 'vdso', 'vdso64.so.dbg')): shutil.copyfile(os.path.join(kernel_path, 'arch', 'x86', 'vdso', 'vdso64.so.dbg'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso64.so')) elif os.path.isfile(os.path.join(kernel_path, 'arch', 'x86', 'vdso', 'vdso64.so')): shutil.copyfile(os.path.join(kernel_path, 'arch', 'x86', 'vdso', 'vdso64.so'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso64.so')) elif os.path.isfile(os.path.join(os.path.dirname(kernel_path), 'vdso', 'vdso64.so')): shutil.copyfile(os.path.join(os.path.dirname(kernel_path), 'vdso', 'vdso64.so'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso64.so')) # Yocto elif os.path.isfile(os.path.join(kernel_path, 'arch', 'x86', 'entry', 'vdso', 'vdso64.so.dbg')): shutil.copyfile(os.path.join(kernel_path, 'arch', 'x86', 'entry', 'vdso', 'vdso64.so.dbg'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso64.so')) elif os.path.isfile(os.path.join(kernel_path, 'arch', 'x86', 'entry', 'vdso', 'vdso64.so')): shutil.copyfile(os.path.join(kernel_path, 'arch', 'x86', 'entry', 'vdso', 'vdso64.so'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso64.so')) # Ubuntu # TODO check build-id to get debug version # TODO check if shell is used elif os.path.isfile(os.path.join(modules_path, 'vdso', 'vdso64.so')): shutil.copyfile(os.path.join(modules_path, 'vdso', 'vdso64.so'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vdso64.so')) # Ubuntu if os.path.isfile(self._os.get_system_map_path()): shutil.copyfile(self._os.get_system_map_path(), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'System.map')) # copy original vmlinux to trace/binaries folder if os.path.isfile(self._os.get_vmlinux_path()): # extract_vmlinux = os.path.join(kernel_path, 'scripts', 'extract-vmlinux') if os.path.isfile(extract_vmlinux) and 'vmlinuz' in os.path.basename(self._os.get_vmlinux_path()): subprocess.call(extract_vmlinux + ' ' + self._os.get_vmlinux_path() + " > " + os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vmlinux_'), shell=True) else: shutil.copyfile(self._os.get_vmlinux_path(), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vmlinux_')) # create modules dir if not os.path.exists(os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules')): os.makedirs(os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules')) # copy original kmods to trace/binaries folder modules_path = envstore.store.get_variable('sat_path_modules') if os.path.exists(modules_path): if self._os.is_os('Linux') or self._os.is_os('Yocto'): kmod_pattern = '*.ko' for root, dirs, files in os.walk(modules_path): for filename in fnmatch.filter(files, kmod_pattern): shutil.copyfile(os.path.join(root, filename), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules', os.path.basename(filename)+'_')) else: for f in glob.glob(os.path.join(modules_path, '*.ko')): shutil.copyfile(f, os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules', os.path.basename(f)+'_')) # move&rename sat.ko from trace binaries folder to binaries/kernel/modules/sat.ko_ if not self._official_build: if not os.path.exists(os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules', 'sat.ko_')): if os.path.exists(os.path.join(self._os._trace_path, 'binaries', 'sat.ko')): shutil.move(os.path.join(self._os._trace_path, 'binaries', 'sat.ko'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules')) os.rename(os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules', 'sat.ko'), os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules', 'sat.ko_')) self._os.copy_binaries() # ===============================================# def PatchKernelBinaries(self): if not self._args.patching_disable: bp = BinaryPatch(self._variables['sat_target_build'], os.path.realpath(self._os._trace_path)) retstr = bp.patchModules(self._official_build, NO_PATCHING_IF_ALREADY_PATCHED, IGNORE_SAT_MODULE_ON_PROCESSING) if retstr == "no_dump": # dump files not found, perform linking print "No dump files found, perform linking for modules" lm = LinkModules(self._variables['sat_target_build'], os.path.realpath(self._os._trace_path)) lm.linkModules(self._official_build, IGNORE_SAT_MODULE_ON_PROCESSING) # ===============================================# def AdjustKernelVma(self): print "AdjustKernelVma" kernel_address = 0 python_path = 'python' if self._satt_venv_bin: python_path = os.path.join(self._satt_venv_bin, python_path) sub_python = os.popen(python_path + ' ' + os.path.join(self._sat_home, 'satt', 'common', 'sbparser', 'sbdump.py') + ' -c < ' + os.path.join(self._os._trace_path, "sideband.bin")) if sub_python: while True: line = sub_python.readline() if not line: break match = re.search("codedump @ ([0-9a-fA-F]+),\s+(\d+): (\S+)", line) if match: if match.group(3).find("vmlinux") >= 0: kernel_address = "0x" + match.group(1) break if kernel_address > 0: # Adjust vma for vmlinux file vmlinux_path = os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vmlinux') if os.path.exists(vmlinux_path): data = subprocess.check_output(['objdump', '-h', vmlinux_path]) for line in data.splitlines(): match = re.search('\d+ \.text\s+(\S+)\s+(\S+)', line) if match: orig_address = match.group(2) offset = int(kernel_address, 16) - int(orig_address, 16) subprocess.call(['objcopy', '--adjust-vma', str(offset), vmlinux_path]) # Adjust vma for System.map file systemmap_path = os.path.join(self._os._trace_path, 'binaries', 'kernel', 'System.map') if os.path.exists(systemmap_path): os.rename(systemmap_path, systemmap_path + '_') offset = 0; inf = open(systemmap_path+'_', 'r') outf = open(systemmap_path, 'w') while True: line = inf.readline() if not line: break match = re.search('(\S+)\s+(\S)\s+(\S+)', line) if match: if match.group(2) == 'T' and match.group(3) == '_text': offset = int(kernel_address, 16) - int(match.group(1), 16) addr = format(int(match.group(1), 16) + offset, 'x') addr = addr.zfill(len(match.group(1))) line = addr + ' ' + match.group(2) + ' ' + match.group(3) + '\n' outf.write(line) # ===============================================# def DecodeRawPtiData(self): if not os.path.isfile(os.path.join(self._os._trace_path, 'cpu0.bin')): if os.path.isfile(os.path.join(self._os._trace_path, 'stma.raw')): print 'Decode cpu rtit streams from PTI stream' savedPath_ = os.getcwd() os.chdir(self._os._trace_path) size_ = os.path.getsize('stma.raw') subprocess.call(os.path.join(self._post_process_bin_path, 'sat-stp-dump') + ' -s ' + str(size_) + ' -m < stma.raw', shell=True) os.chdir(savedPath_) # ===============================================# def generate_model(self, debug, debug_level): # Make collection import multiprocessing max_procs = multiprocessing.cpu_count() command = '' collection_file = os.path.join(self._os._trace_path, self._os._trace_path + '.collection') # commands for rtit/ipt collection_make_version = '' collection_model_version = '' sat_collection_cbr_version = '' sat_collection_stats_version = '' sat_collection_tasks_version = '' if self._args.rtit: collection_make_version = 'sat-rtit-collection-make' collection_model_version = 'sat-rtit-collection-model' sat_collection_cbr_version = 'sat-rtit-collection-cbr' sat_collection_stats_version = 'sat-rtit-collection-stats' sat_collection_tasks_version = 'sat-rtit-collection-tasks' else: collection_make_version = 'sat-ipt-collection-make' collection_model_version = 'sat-ipt-model' sat_collection_cbr_version = 'sat-ipt-collection-cbr' sat_collection_stats_version = 'sat-ipt-collection-stats' sat_collection_tasks_version = 'sat-ipt-collection-tasks' print 'MAKING COLLECTION ' + collection_file command = (os.path.join(self._post_process_bin_path, collection_make_version) + ' -s ' + os.path.join(self._os._trace_path, 'sideband.bin')) cpu_files = glob.glob(os.path.join(self._os._trace_path, 'cpu*.bin')) cpu_files.sort() for i in cpu_files: command += ' -r ' + i command += ' | grep -v "^#" > ' + collection_file #print "COMMAND=" + command # Execute: MAKE COLLECTION ret = subprocess.call(command, shell=True) # Building execution model print 'BUILDING EXECUTION MODEL ON COLLECTION ' + collection_file python_path = 'python' if self._satt_venv_bin: python_path = os.path.join(self._satt_venv_bin, python_path) path_helper = (python_path + " " + os.path.join(self._sat_home, 'satt', 'process', 'binary_server.py') + " '%s' " + "-p " + os.path.join(self._post_process_bin_path, 'sat-path-map') + " " + "-k " + os.path.join(self._os._trace_path, 'binaries', 'kernel', 'vmlinux') + " " + "-m " + os.path.join(self._os._trace_path, 'binaries', 'kernel', 'modules') + " ") path_helper += "-d " + self._os.get_debug_paths() + " " # Host tracing, local host files can be used for processing if envstore.get_instance().get_variable('sat_control_bus') == 'SHELL': path_helper += "--host_tracing " path_helper += (os.path.join(self._os._trace_path, 'binaries', 'symbols') + " " + os.path.join(self._os._trace_path, 'binaries') + " " + os.path.join(self._os._trace_path, 'binaries', 'sat-path-cache')) command = (os.path.join(self._post_process_bin_path, collection_model_version) + ' -C ' + collection_file + ' -m ' + os.path.join(self._os._trace_path, 'binaries', 'kernel', 'System.map') + ' -f "' + path_helper + '"' ' -F ' + os.path.join(self._os._trace_path, 'binaries', 'sat-path-cache') + ' -P ' + str(max_procs) + ' -o ' + os.path.join(self._os._trace_path, self._os._trace_path + '-%u.model') + ' -w ' + os.path.join(self._os._trace_path, self._os._trace_path + '-%u.lwm') + ' -n ' + os.path.join(self._os._trace_path, self._os._trace_path + '.satsym') + ' -e ' + os.path.join(self._os._trace_path, self._os._trace_path + '.satmod') + ' -h ' + os.path.join(self._os._trace_path, self._os._trace_path + '.satmodh')) if debug: command += ' -d' command += ' -D' * debug_level # Execute: BUILD MODELS ret = subprocess.call(command, shell=True) print "INTERMEDIATE PROCESSING", print "AND SHRINKING MODEL" if not debug else '' tid_files = glob.glob(os.path.join(self._os._trace_path, self._os._trace_path + '-*.model')) tid_files.sort() for i, t in enumerate(tid_files): tid_files[i] = os.path.splitext(os.path.basename(t))[0] tid_string = '\n'.join(tid_files) command = ('echo "' + tid_string + '" | xargs -n 1 --max-procs=' + str(max_procs) + ' -I PER_TID bash -c ' + '"' + os.path.join(self._post_process_bin_path, 'sat-intermediate')) if debug: command += ' -d ' command += (' -w ' + os.path.join(self._os._trace_path, 'PER_TID.lwm') + ' -o ' + os.path.join(self._os._trace_path, 'PER_TID.sat') + ' ' + os.path.join(self._os._trace_path, 'PER_TID.model') + ';') if not debug: command += (' rm ' + os.path.join(self._os._trace_path, 'PER_TID.model') + ' ' + os.path.join(self._os._trace_path, 'PER_TID.lwm') + ';') command += (' ' + os.path.join(self._post_process_bin_path, 'sat-shrink-output') + ' ' + os.path.join(self._os._trace_path, 'PER_TID.sat')) command += ('" | ' + os.path.join(self._post_process_bin_path, 'sat-post') + ' -o ' + os.path.join(self._os._trace_path, self._os._trace_path + '.log')) # Execute: INTERMEDIATE subprocess.call(command, shell=True) if not debug: print "MERGING MODEL" command = (os.path.join(self._post_process_bin_path, 'sat-merge') + ' ' + os.path.join(self._os._trace_path, self._os._trace_path + '-*.sat') + ' > ' + os.path.join(self._os._trace_path, self._os._trace_path + '.sat0')) subprocess.call(command, shell=True) # Generate satcbr command = (os.path.join(self._post_process_bin_path, sat_collection_cbr_version) + ' ' + collection_file + ' | grep -v "^#" > ' + os.path.join(self._os._trace_path, self._os._trace_path + '.satcbr')) subprocess.call(command, shell=True) # Generate satstats command = (os.path.join(self._post_process_bin_path, sat_collection_stats_version) + ' ' + collection_file + ' | grep -v "^#" > ' + os.path.join(self._os._trace_path, self._os._trace_path + '.satstats')) subprocess.call(command, shell=True) # Generate satp command = (os.path.join(self._post_process_bin_path, sat_collection_tasks_version) + ' ' + collection_file + ' | grep -v "^#" > ' + os.path.join(self._os._trace_path, self._os._trace_path + '.satp')) subprocess.call(command, shell=True) print "REMOVING PER-PROCESS MODELS" sat_files = glob.glob(os.path.join(self._os._trace_path, self._os._trace_path + '-*.sat')) for f in sat_files: os.remove(f) # ===============================================# def DemangleSymbols(self): # demangle symbol names in satsyms file print "demangle symbols.." operators = ['<<=', '>>=', '->*', '<<', '>>', '<=', '>=', '->', '>', '<'] satsym_file = self._os._trace_path + '/' + self._os._trace_path + '.satsym' tmpfile = self._os._trace_path + '/' + self._os._trace_path + '.satsym_' if os.path.isfile(satsym_file): os.rename(satsym_file, tmpfile) fin = open(tmpfile, 'r') fout = open(satsym_file, 'w') while True: output = '' line = fin.readline() if not line: break demagle_success = False sid, sym = line.rstrip().split(';') plt = '' pre_dl = '' if sym.startswith('__dl_'): sym = sym[5:] pre_dl = '__dl_' if sym.startswith('_Z'): if sym.endswith('@plt'): sym = sym[:-4] plt = '@plt' dec = subprocess.check_output('c++filt -p ' + sym, shell=True).rstrip() # rip off possible template definitions to reduce symbol size if dec != '' and dec[:2] != '_Z': demagle_success = True # exclude operator having '<' or '>' blocked_area = [-9, -9] operidx = dec.find("::operator") if operidx >= 0: for op in operators: if dec[operidx+10:(operidx+10 + len(op))] == op: blocked_area[0] = operidx blocked_area[1] = operidx+10+len(op) idx = 0 while True: idx = dec.find('>', idx) if idx < 0: break if idx >= blocked_area[0] and idx <= blocked_area[1]: idx += 1 continue idx += 1 ridx = dec.rfind('<', 0, idx) while ridx >= blocked_area[0] and ridx <= blocked_area[1]: ridx = dec.rfind('<', 0, ridx) if ridx < 0: demagle_success = False print "Warning: template parenthesis does not match!!! (" + str(sid) + ")" break dec = dec[:ridx] + ';' * (idx - ridx) + dec[idx:] if demagle_success: output = sid + ';' + pre_dl + dec.replace(';', '') + plt + ';' + sym + '\n' if not demagle_success: output = sid + ';' + sym + ';' + sym + '\n' fout.write(output) os.remove(tmpfile) # ===============================================# def SatVersionIntoSatstats(self): satstats_file = self._os._trace_path + '/' + self._os._trace_path + '.satstats' ver = envstore.store.get_sat_version() f = open(satstats_file, 'w+') f.write('VERSION|' + ver + '|SATT tool version used for post-processing') ``` #### File: satt/process/linkmodules.py ```python import subprocess import os import sys import shutil import glob import platform class LinkModules: sat_home = '' trace_folder = '' sat_target_build = '' sideband_dump_bin = '' linux_kernel_path = '' kernel_modules_path = '' kernel_module_target_path = '' def __init__(self, target_build, trace_folder): # Set Paths # GET Environment variables self.sat_home = os.environ.get('SAT_HOME') self.sat_target_build = target_build self.trace_folder = trace_folder self.sideband_dump_bin = os.path.join(self.sat_home,"bin","bin","sat-sideband-dump") print "self.sideband_dump_bin = " + self.sideband_dump_bin self.linux_kernel_path = os.environ.get('SAT_PATH_KERNEL') self.kernel_modules_path = os.environ.get('SAT_PATH_MODULES') print "self.sat_target_build = " + self.sat_target_build print "os.environ.get('SAT_PATH_MODULES') = " + os.environ.get('SAT_PATH_MODULES') print "self.linux_kernel_path = " + self.linux_kernel_path print "self.kernel_modules_path = " + self.kernel_modules_path self.kernel_module_target_path = os.path.join(self.trace_folder,"binaries","ld-modules") def getModulesFromSb(self, trace_folder): # Get module info from Sideband p = subprocess.Popen(self.sideband_dump_bin + ' < ' + trace_folder + '/sideband.bin', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) modules = {} for line in iter(p.stdout.readline, ''): if "module" in line: row = line.split() modules[row[5]] = row[4][:-1] p.wait() return modules def createLinkingDir(self): # # Empty and Create Directory for linked kernel modules # if os.path.isdir(self.kernel_module_target_path): shutil.rmtree(self.kernel_module_target_path) os.mkdir(self.kernel_module_target_path) def getSatModuleFromDevice(self): # # Get sat.ko module from the device to the KERNEL_MODULE_PATH # curdir = os.getcwd() os.chdir(self.kernel_modules_path) subprocess.call(["adb", "pull", "/data/sat.ko"]) if os.path.isfile(self.kernel_modules_path + "sat.ko"): if os.path.isfile(self.sat_home + "/kernel-module/sat.ko"): shutil.copy(self.sat_home + "/kernel-module/sat.ko", self.kernel_modules_path) os.chdir(curdir) def getModulesLists(self): # Get list of modules from self.kernel_modules_path modules_in_fs = {} curdir = os.getcwd() os.chdir(self.kernel_modules_path) kernel_modules = glob.glob("*.ko") for km in kernel_modules: modules_in_fs[km.lower().replace('-', '_')[:-3]] = km os.chdir(curdir) return kernel_modules, modules_in_fs def createSystemMapLd(self): # # Create system.map.ld for the linking # system_map_ld_file = open(self.linux_kernel_path + "system.map.ld", "w") curdir = os.getcwd() os.chdir(self.linux_kernel_path) systen_map_file = open(self.linux_kernel_path + "System.map") for line in systen_map_file: items = line.split() system_map_ld_file.write("--defsym=" + items[2] + "=0x" + items[0] + "\n") systen_map_file.close() system_map_ld_file.close() os.chdir(curdir) def linkedModulesExists(self): if os.path.isdir(self.kernel_module_target_path): return True return False def linkModules(self, official, ignore_sat_module=False): # Check if build is official and linked modules already exist # if so, we are good to return if official and self.linkedModulesExists(): return self.createLinkingDir() # Check if SAT module fetch is needed if not official and not ignore_sat_module: self.getSatModuleFromDevice() modules = self.getModulesFromSb(self.trace_folder) # # Get Architecture from the vmlinux or first found kernel modules # kernel_modules, modules_in_fs = self.getModulesLists() arch = '64bit' if os.path.isfile(self.linux_kernel_path + "vmlinux"): arch = platform.architecture(self.linux_kernel_path + 'vmlinux')[0] elif os.path.isfile(self.kernel_modules_path + kernel_modules[0]): arch = platform.architecture(self.kernel_modules_path + kernel_modules[0])[0] self.createSystemMapLd() # # Link all the modules # curdir = os.getcwd() os.chdir(self.linux_kernel_path) ko_link_output = curdir + "/" + self.trace_folder + "/ko-link-output.log" print "Link kernel modules:" print "(Linker output written into '" + ko_link_output + "')" if os.path.isfile(ko_link_output): os.system("rm " + ko_link_output + " > /dev/null") index_count = 0 module_count = len(modules) for module, addr in modules.items(): index_count += 1 print "\rProcessing: " + str(index_count * 100 / module_count).rjust(3," ") + "%", sys.stdout.flush() if ignore_sat_module and module == "sat": continue if module in modules_in_fs.keys(): os.system("echo '\n============================================================================' >> " + ko_link_output + " 2>&1") os.system("echo '" + modules_in_fs[module] + " : " + module + " = " + addr + "' >> " + ko_link_output + " 2>&1") os.system("echo '============================================================================' >> " + ko_link_output + " 2>&1") if arch == '64bit': os.system("ld -static -z muldefs -m elf_x86_64 @system.map.ld --oformat=elf64-x86-64 --section-start=.text=" + addr + " " + self.kernel_modules_path + modules_in_fs[module] + " -o " + self.kernel_module_target_path + modules_in_fs[module] + " >> " + ko_link_output + " 2>&1") else: os.system("ld -static -z muldefs -m elf_i386 @system.map.ld --oformat=elf32-i386 --section-start=.text=" + addr + " " + self.kernel_modules_path + modules_in_fs[module] + " -o " + self.kernel_module_target_path + modules_in_fs[module] + " >> " + ko_link_output + " 2>&1") os.chdir(curdir) print "" ``` #### File: trace/logger/panic.py ```python """ PanicLogger RAM-tracing """ import sys import time from logger import Logger class PanicLogger(Logger): """ Panic logger """ def __init__(self, control): # Base class init call Logger.__init__(self, control) # Add default kernel module parameter for RAM-tracing self._kernel_module_parameters += " trace_method=1 sideband_log_method=1" # Add more option to command line input self._parser.add_argument('-p', '--panic', action='store', help='Panic tracing mode: 1=Normal, 2=Hooked(default)', required=False, default=2) self._parser.add_argument('-s', '--sideband', action='store', help='Panic tracing mode: 0=Off, 1=On(default)', required=False, default=1) self._parser.add_argument('-g', '--gbuffer', action='store', help='Dump trace data to gbuffer: 0=Off, 1=On(default)', required=False, default=1) self._parser.add_argument('-u', '--userspace', action='store', help='Exclude user space: 0=Off, 1=On(default)', required=False, default=1) self._parser.add_argument('-k', '--kernel', action='store', help='Exclude kernel: 0=Off(default), 1=On', required=False, default=0) self._parser.add_argument('-d', '--dump', action='store', help='Dump kernel and kernel modules for processing: 0=Off, 1=On(default)', required=False, default=0) self.args = self._parser.parse_args() self._kernel_module_parameters += " panic_tracer=" + str(self.args.panic) self._kernel_module_parameters += " panic_sideband=" + str(self.args.sideband) self._kernel_module_parameters += " panic_gbuffer=" + str(self.args.gbuffer) self._kernel_module_parameters += " exclude_userspace=" + str(self.args.userspace) self._kernel_module_parameters += " exclude_kernel=" + str(self.args.kernel) def initialize(self): self._debug_print("PanicLogger::initialize") # Initialize Logger base class Logger.initialize(self) # Call start_tracing earlier to stop execution earlier self.start_tracing() def start_tracing(self): self._debug_print("start_tracing") trace_name, trace_path = self.get_trace_name("Enter <<trace name>> to start panic tracing? :") if trace_name: self.set_trace_path(trace_path, trace_name) self.get_build_info() # TODO Problem, there is no Sideband.bin info yet # Quick Fix # Start tracing, wait 100ms, Stop tracing, fetch sideband info Logger.start_tracing(self) time.sleep(0.2) Logger.stop_tracing(self) time.sleep(0.2) Logger.get_sideband_data(self) self.dump_kernel() self.dump_linux_gate() self.dump_kernel_modules() Logger.start_tracing(self) print "" print "Panic tracing activated" print "If panic happens, wait 10s and reboot device." print "" print "When device boot up run following command:" print "sat-panic-fetch " + self.trace_name sys.exit(0) else: print "Panic Tracer did not get started" def stop_tracing(self): return def get_data(self): return def get_trace_data(self): return ``` #### File: trace/logger/usb.py ```python """ Logger Class """ from logger import Logger from satt.common import envstore class UsbLogger(Logger): """ USB logger """ def __init__(self, control): Logger.__init__(control) self._sat_home_path = envstore.store.get_sat_home() print "init" def initialize(self): print "initialize" def start_tracing(self): print "start_tracing" def stop_tracing(self): print "stop_tracing" def get_sideband_data(self): print "get_sideband_data" def get_trace_data(self): print "get_trace_data" ``` #### File: visualize/backend/db_import.py ```python import psycopg2 import psycopg2.extras import colorsys from struct import pack from io import BytesIO import glob import os import argparse from time import gmtime, strftime import status as stat import pickle import sys status = stat.getStatus() conn = psycopg2.connect( dbname=status.getDbConfig('dbname'), user=status.getDbConfig('user'), password=status.getDbConfig('password')) curs = conn.cursor() named_cur = conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) cpu_count = 0 insert_id = 0 def prepare_text(dat): cpy = BytesIO() for row in dat: cpy.write('\t'.join([repr(x) for x in row]) + '\n') return(cpy) def get_separator(filename): separator = '|' with open(filename, 'r') as inF: for line in inF: if ';' in line: separator = ';' break return separator def importCSV(insert_id, fn, fpath): global cpu_count schema = 't' + str(insert_id) curs.execute('CREATE TABLE IF NOT EXISTS ' + schema + '.tgid (id serial, tgid int4, pid int4, name varchar(256), color varchar(7), PRIMARY KEY(id))') filename = fn + '.satp' file_columns = ('id', 'tgid', 'pid', 'name') curs.copy_from(file=open(filename), sep='|', table=schema + '.tgid', columns=file_columns) conn.commit() # id | ts | call stack level | OoT | InT | ins count | call (c/r/e/u) | cpu | thread_id | mod | sym curs.execute('CREATE TABLE IF NOT EXISTS ' + schema + '.ins (id serial, ts bigint, level smallint, ts_oot bigint, ts_int bigint, ins_count int, ' + 'call varchar(1), cpu smallint, thread_id int, module_id smallint, symbol_id int ) ' + 'with (fillfactor=100)') filename = fn + '.sat0' file_columns = ('ts', 'level', 'ts_oot', 'ts_int', 'ins_count', 'call', 'cpu', 'thread_id', 'module_id', 'symbol_id') curs.copy_from(file=open(filename), sep='|', table=schema + '.ins', columns=file_columns) conn.commit() filename = fn + '.satmod' separator = get_separator(filename) curs.execute('CREATE TABLE IF NOT EXISTS ' + schema + '.module (id serial, module varchar(1024), PRIMARY KEY(id)) with (fillfactor=100)') filename = fn + '.satmod' file_columns = ('id', 'module') curs.copy_from(file=open(filename), sep=separator, table=schema + '.module', columns=file_columns) conn.commit() # for backward compatibility to be removed later filename = fn + '.satsym' separator = get_separator(filename) # Find longest symbol name sep_cnt = 0 longest_sym = 0 longest_fullsym = 0 with open(filename, 'r') as inF: for line in inF: if sep_cnt == 0: sep_cnt = line.count(separator) if sep_cnt == 1: id_, sym_ = line.split(separator) if longest_sym < len(sym_): longest_sym = len(sym_) else: id_, sym_, fsym_ = line.split(separator) if longest_sym < len(sym_): longest_sym = len(sym_) if longest_fullsym < len(fsym_): longest_fullsym = len(fsym_) file_colums = None if sep_cnt == 1: curs.execute('CREATE TABLE IF NOT EXISTS ' + schema + '.symbol (id serial, symbol varchar(' + str(longest_sym) + '), PRIMARY KEY(id)) with (fillfactor=100)') file_columns = ('id', 'symbol') else: curs.execute('CREATE TABLE IF NOT EXISTS ' + schema + '.symbol (id serial, symbol varchar(' + str(longest_sym) + '), fullsymbol varchar(' + str(longest_fullsym) + '), PRIMARY KEY(id)) with (fillfactor=100)') file_columns = ('id', 'symbol', 'fullsymbol') filename = fn + '.satsym' curs.copy_from(file=open(filename), sep=separator, table=schema + '.symbol', columns=file_columns) conn.commit() curs.execute('CREATE TABLE IF NOT EXISTS ' + schema + '.cbr (ts bigint, cpu smallint, acbr smallint, ecbr smallint, PRIMARY KEY(ts, cpu)) ' + 'with (fillfactor=100)') filename = fn + '.satcbr' file_columns = ('ts', 'cpu', 'acbr', 'ecbr') curs.copy_from(file=open(filename), sep='|', table=schema + '.cbr', columns=file_columns) conn.commit() # Import extra trace info to db if os.path.isfile(fn + '.satstats'): curs.execute('CREATE TABLE IF NOT EXISTS ' + schema + '.info (key varchar(256), value bigint, info varchar(2048), PRIMARY KEY(key))') filename = fn + '.satstats' file_columns = ('key', 'value', 'info') curs.copy_from(file=open(filename), sep='|', table=schema + '.info', columns=file_columns) conn.commit() # Import screen shot from the device to DB if os.path.isfile(fpath + '/screen.png'): f = open(fpath + '/screen.png', 'rb') filedata = psycopg2.Binary(f.read()) f.close() curs.execute("INSERT INTO public.screenshots(id, file_data) VALUES (%s,%s)", (insert_id, filedata, )) conn.commit() curs.execute("""UPDATE public.traces SET screenshot = TRUE WHERE id = %s;""", (insert_id, )) conn.commit() # Calculate global CPU count curs.execute("""select max(cpu) from """ + schema + """.ins""") conn.commit() cpu_count = curs.fetchone()[0] + 1 curs.execute("""UPDATE public.traces SET cpu_count = %s WHERE id = %s;""", (cpu_count, insert_id)) conn.commit() return def bugFixHack1(schema): curs.execute('DELETE FROM ' + schema + '.ins WHERE ts > 2147483646;') return def createIndexs(schema): curs.execute('ALTER TABLE ' + schema + '.ins ADD CONSTRAINT id_pk PRIMARY KEY(id);') curs.execute('CREATE INDEX ts_idx ON ' + schema + '.ins USING btree (ts) with (fillfactor=100);') conn.commit() curs.execute('CREATE INDEX ins_idx ON ' + schema + '.ins USING btree (thread_id, level, ts, module_id, symbol_id ) with (fillfactor=100);') conn.commit() def RGBToHTMLColor(rgb_tuple): """ convert an (R, G, B) tuple to #RRGGBB """ hexcolor = '#%02x%02x%02x' % rgb_tuple return hexcolor def createColors(schema): # Create field for the HTML-color # named_cur.execute("""SELECT * FROM """ + schema + """.tgid GROUP BY pid,tgid ORDER by pid,tgid""") # Get process and task id's named_cur.execute("""select pid, array_agg(id) as ids, array_agg(tgid) as tgids, array_agg(name) name_arr from ( select * from """ + schema + """.tgid order by pid, tgid ) as s1 group by pid order by pid""") rows = named_cur.fetchall() # Calculate the colors for processes # Every process should have own color # Every thead in same process should same color, but different lightning processMaxCount = named_cur.rowcount processCounter = 0 for row in rows: x = (1.0 / processMaxCount) * processCounter processCounter += 1 threadCount = len(row.tgids) for tc in range(0, threadCount): y = 0.0 + (0.4 / threadCount) * tc y = 1.0 - y - (0.05 / threadCount) z = 200 + (40 / threadCount) + (50 / threadCount * tc) # Push color back to DB curs.execute("""UPDATE """ + schema + """.tgid SET color = (%s) WHERE id = (%s);""", (RGBToHTMLColor(colorsys.hsv_to_rgb(x, y, z)), row.ids[tc],)) def helperCreateAvgGraphTable(schema, cpux): if cpux == 0: sql_action = """create table """ + schema + """.graph as """ else: sql_action = """insert into """ + schema + """.graph """ print "cpu =", cpux curs.execute(sql_action + """ select * from ( select gen_ts, thread_id, sum(part2)::int, cpu, count(*) over (partition by gen_ts/7980) as ts_count from ( select * , -- FINAL CALCULATION IS DONE HERE!!!! CASE WHEN row_number() over(partition by id ) = 1 AND (gen_ts + 7980) < (end_ts) THEN -- THIS NEED Percent calcutation to figure out exec size -- ( Amount * Multiplier ) / slice --Multiplier (ins_count * (7980 - ( great - gen_ts))::bigint) / (len + 1)::real ELSE -- Check if we are in row 2 or so -- ( Amount * Multiplier ) / slice -- OPTION 1 or --(ins_count * CASE WHEN end_ts - great + 1 >= 7980 THEN 7980 ELSE end_ts - great + 1 END) / (len + 1) -- OPTION 2 check witch one is faster CASE WHEN row_number() over(partition by id ) = 1 THEN ins_count::real ELSE (ins_count * CASE WHEN end_ts - great + 1 >= 7980 THEN 7980 ELSE end_ts - great + 1 END::bigint) / (len + 1)::real END END as part2 from ( select *, greatest(ts, gen_ts) as great from ( select end_ts - ts as len, *, generate_series(ts - (ts % 7980), end_ts, 7980) as gen_ts from ( --SELECT lead(ts) over (order by id) -1 as end_ts ,* SELECT lead(ts) over () -1 as end_ts ,* FROM """ + schema + """.ins WHERE cpu = """ + str(cpux) + """ ORDER BY ID ) s2 ) s3 ) s4 ) s5 group by 1,2, cpu order by gen_ts ) s6 where ts_count = 1 and sum <> 0;""") conn.commit() # This will create average Graph table which will help querying graph out of big ins traces # We will use 1us resultion which is now 1596 tics def createAvgGraphTable(schema): for x in range(0, cpu_count): helperCreateAvgGraphTable(schema, x) def getTscTick(schema): # Backward compatibility with old traces data = "1330000" # Backward compatibility check, if info table exists curs.execute("select exists(select * from information_schema.tables where table_name=%s and table_schema=%s)", ('info', schema,)) res = curs.fetchone() if res[0]: curs.execute("SELECT value from " + schema + ".info WHERE key = 'TSC_TICK'") data = curs.fetchone()[0] return int(data) # Find out different infos def digTraceInfo(schema, id, trace_path): curs.execute("select max(ts) from " + schema + ".ins") ts = curs.fetchone()[0] tsc_tick = getTscTick(schema) # Lenght in ms length = int(round(ts / tsc_tick)) curs.execute("UPDATE public.traces SET length = %s WHERE id = %s;", (length, id)) conn.commit() # get Build info build_info = getTraceBuildInfo(trace_path) if build_info: curs.execute("UPDATE public.traces SET build = %s, device=%s WHERE id = %s;", (build_info['version'] + '/' + build_info['type'], build_info['name'] + '/' + build_info['platform'], id)) conn.commit() # Load Trace Build info from the file def getTraceBuildInfo(trace_path): if os.path.exists(os.path.join(trace_path, "build_info.p")): build_info = pickle.load(open(os.path.join(trace_path, "build_info.p"), "rb")) else: build_info = None return build_info def main(): global insert_id # # Argument parsing and menu # parser = argparse.ArgumentParser(version='1.0', description='SAT db trace importer', add_help=True) parser.add_argument('-t', action='store', dest='device', help='Device under tracing?', default='VLV') parser.add_argument('-d', action='store', dest='description', help='Explain, what have you traced?', default='Explain, what have you traced?') parser.add_argument('-i', action='store', dest='traceid', help='TraceID?', default=False) parser.add_argument('trace_path', action="store", help='Trace folder path') results = parser.parse_args() print 'path_value =', results.trace_path if os.path.isabs(results.trace_path): TRACE_FOLDER_PATH = os.path.realpath(results.trace_path + '/..') results.trace_path = os.path.basename(os.path.normpath(results.trace_path)) os.chdir(TRACE_FOLDER_PATH) # Look for sat0 file files = glob.glob('./' + results.trace_path + '/*.sat0') if not len(files): print "\n Can't find SAT-files from ./" + results.trace_path + '/ folder' print "" parser.print_help() return trace_name = files[0][:-5] trace_path = results.trace_path # # Open Task switch file handles # cpuf = {} # # Create trace metadata row to DB # - id will be schema name 't' + id # try: status.createTracesTable() curs.execute('CREATE TABLE IF NOT EXISTS public.screenshots (id int, file_data bytea not null , PRIMARY KEY(id))') conn.commit() status.rollup_db_schema() if not results.traceid: curs.execute("INSERT INTO public.traces (name, cpu_count, device, created) " + "values (%s, %s, %s, now()) RETURNING id", (results.trace_path, len(cpuf), results.device)) conn.commit() insert_id = curs.fetchone()[0] else: curs.execute("UPDATE public.traces SET name = %s, cpu_count = %s, device = %s " + "WHERE id = %s;", (results.trace_path, len(cpuf), results.device, results.traceid)) conn.commit() insert_id = results.traceid status.update_status(insert_id, status.IMPORT) schema = 't' + str(insert_id) curs.execute('DROP SCHEMA IF EXISTS ' + schema + ';') curs.execute('CREATE SCHEMA ' + schema + ';') print "*************************************\n" print "Import Data" print strftime("%Y-%m-%d %H:%M:%S", gmtime()) importCSV(insert_id, trace_name, trace_path) print "*************************************\n" print "create Indexes" print strftime("%Y-%m-%d %H:%M:%S", gmtime()) createIndexs(schema) print "*************************************\n" print "Create colors for prosesses and threads" print strftime("%Y-%m-%d %H:%M:%S", gmtime()) createColors(schema) print "*************************************\n" print "Calculate avg graph table" print strftime("%Y-%m-%d %H:%M:%S", gmtime()) createAvgGraphTable(schema) print "*************************************\n" print "Dig some trace info for the trace" digTraceInfo(schema, insert_id, results.trace_path) print strftime("%Y-%m-%d %H:%M:%S", gmtime()) print "*************************************\n" print "All Done" print strftime("%Y-%m-%d %H:%M:%S", gmtime()) status.update_status(insert_id, status.READY, results.description) except Exception as e: print "Import Failed: " + str(e) status.update_status(insert_id, status.FAILED, 'Importing to DB Failed!') sys.exit(1) return if __name__ == "__main__": main() # All ok sys.exit(0) ```
{ "source": "jni/profile-regionprops", "score": 3 }
#### File: jni/profile-regionprops/regprops.py ```python import numpy as np from scipy import ndimage as nd from skimage import filter as imfilter, measure, io from line_profiler import LineProfiler # threshold and labeling number of objects, statistics about object size and # shape def intensity_object_features(im, sample_size=None): """Segment objects based on intensity threshold and compute properties. Parameters ---------- im : 2D np.ndarray of float or uint8. The input image. adaptive_t_radius : int, optional The radius to calculate background with adaptive threshold. sample_size : int, optional Sample this many objects randomly, rather than measuring all objects. Returns ------- f : 1D np.ndarray of float The feature vector. names : list of string The list of feature names. """ tim1 = im > imfilter.threshold_otsu(im) f, names = object_features(tim1, im, sample_size=sample_size) return f, names def object_features(bin_im, im, erode=2, sample_size=None): """Compute features about objects in a binary image. Parameters ---------- bin_im : 2D np.ndarray of bool The image of objects. im : 2D np.ndarray of float or uint8 The actual image. erode : int, optional Radius of erosion of objects. sample_size : int, optional Sample this many objects randomly, rather than measuring all objects. Returns ------- fs : 1D np.ndarray of float The feature vector. names : list of string The names of each feature. """ lab_im, n_objs = nd.label(bin_im) if sample_size is None: sample_size = n_objs sample_indices = np.arange(n_objs) else: sample_indices = np.random.randint(0, n_objs, size=sample_size) objects = measure.regionprops(lab_im, intensity_image=im) prop_names = measure._regionprops.PROPS.values() properties = [] for i, j in enumerate(sample_indices): properties.append([]) properties[i].append(objects[j].area()) properties[i].append(objects[j].bbox()) properties[i].append(objects[j].moments_central()) properties[i].append(objects[j].centroid()) properties[i].append(objects[j].convex_area()) properties[i].append(objects[j].convex_image()) properties[i].append(objects[j].coords()) properties[i].append(objects[j].eccentricity()) properties[i].append(objects[j].equivalent_diameter()) properties[i].append(objects[j].euler_number()) properties[i].append(objects[j].extent()) properties[i].append(objects[j].filled_area()) properties[i].append(objects[j].filled_image()) properties[i].append(objects[j].moments_hu()) properties[i].append(objects[j].image()) properties[i].append(objects[j].label) properties[i].append(objects[j].major_axis_length()) properties[i].append(objects[j].max_intensity()) properties[i].append(objects[j].mean_intensity()) properties[i].append(objects[j].min_intensity()) properties[i].append(objects[j].minor_axis_length()) properties[i].append(objects[j].moments()) properties[i].append(objects[j].moments_normalized()) properties[i].append(objects[j].orientation()) properties[i].append(objects[j].perimeter()) properties[i].append(objects[j].solidity()) properties[i].append(objects[j].weighted_moments_central()) properties[i].append(objects[j].weighted_centroid()) properties[i].append(objects[j].weighted_moments_hu()) properties[i].append(objects[j].weighted_moments()) properties[i].append(objects[j].weighted_moments_normalized()) return properties, prop_names if __name__ == '__main__': image = io.imread('test-image.png') green = image[..., 1].copy() lp = LineProfiler() lp.add_function(object_features) lp.run('intensity_object_features(green, 100)') lp.print_stats() lp.dump_stats('profile.lprof') print(__file__) ```
{ "source": "jni/ray", "score": 2 }
#### File: ray/ray/imio.py ```python import os import sys import argparse import re import json from os.path import split as split_path, join as join_path from fnmatch import filter as fnfilter import logging import json import itertools as it import subprocess import tempfile as tmp # libraries import h5py, Image, numpy from scipy.ndimage.measurements import label from numpy import array, asarray, uint8, uint16, uint32, uint64, zeros, \ zeros_like, squeeze, fromstring, ndim, concatenate, newaxis, swapaxes, \ savetxt, unique, double, ones, ones_like, prod, cumsum, ndarray import numpy as np # local files import evaluate import morpho ### Random utilities def tryint(s): try: return int(s) except ValueError: return s def alphanumeric_key(s): """Turn a string into a list of string and number chunks. "z23a" --> ["z", 23, "a"] Copied from http://stackoverflow.com/questions/4623446/how-do-you-sort-files-numerically/4623518#4623518 on 2011-09-01 """ return [tryint(c) for c in re.split('([0-9]+)', s)] ### Auto-detect file format supported_image_extensions = ['png', 'tif', 'tiff', 'jpg', 'jpeg'] def read_image_stack(fn, *args, **kwargs): """Read a 3D volume of images in image or .h5 format into a numpy.ndarray. The format is automatically detected from the (first) filename. A 'crop' keyword argument is supported, as a list of [xmax, xmin, ymax, ymin, zmax, zmin]. Use 'None' for no crop in that coordinate. If reading in .h5 format, keyword arguments are passed through to read_h5_stack(). """ if os.path.isdir(fn): fn += '/' d, fn = split_path(os.path.expanduser(fn)) if len(d) == 0: d = '.' crop = kwargs.get('crop', [None]*6) if len(crop) == 4: crop.extend([None]*2) elif len(crop) == 2: crop = [None]*4 + crop kwargs['crop'] = crop if any([fn.endswith(ext) for ext in supported_image_extensions]): # image types, such as a set of pngs or a multi-page tiff xmin, xmax, ymin, ymax, zmin, zmax = crop if len(args) > 0 and type(args[0]) == str and args[0].endswith(fn[-3:]): # input is a list of filenames fns = [fn] + [split_path(f)[1] for f in args] else: # input is a filename pattern to match fns = fnfilter(os.listdir(d), fn) if len(fns) == 1 and fns[0].endswith('.tif'): stack = read_multi_page_tif(join_path(d,fns[0]), crop) else: fns.sort(key=alphanumeric_key) # sort filenames numerically fns = fns[zmin:zmax] im0 = pil_to_numpy(Image.open(join_path(d,fns[0]))) ars = (pil_to_numpy(Image.open(join_path(d,fn))) for fn in fns) im0 = im0[xmin:xmax,ymin:ymax] dtype = im0.dtype stack = zeros((len(fns),)+im0.shape, dtype) for i, im in enumerate(ars): stack[i] = im[xmin:xmax,ymin:ymax] elif fn.endswith('_boundpred.h5') or fn.endswith('_processed.h5'): # Ilastik batch prediction output file stack = read_prediction_from_ilastik_batch(os.path.join(d,fn), **kwargs) elif fn.endswith('.h5'): # other HDF5 file stack = read_h5_stack(join_path(d,fn), *args, **kwargs) elif os.path.isfile(os.path.join(d, 'superpixel_to_segment_map.txt')): # Raveler export stack = raveler_to_labeled_volume(d, *args, **kwargs) return squeeze(stack) def single_arg_read_image_stack(fn): """Read an image stack and print exceptions as they occur. argparse.ArgumentParser() subsumes exceptions when they occur in the argument type, masking lower-level errors. This function prints out the error before propagating it up the stack. """ try: return read_image_stack(fn) except Exception as err: print err raise def write_image_stack(npy_vol, fn, **kwargs): """Write a numpy.ndarray 3D volume to a stack of images or an HDF5 file.""" fn = os.path.expanduser(fn) if fn.endswith('.png'): write_png_image_stack(npy_vol, fn, **kwargs) elif fn.endswith('.h5'): write_h5_stack(npy_vol, fn, **kwargs) elif fn.endswith('.vtk'): write_vtk(npy_vol, fn, **kwargs) else: raise ValueError('Image format not supported: ' + fn + '\n') ### Standard image formats (png, tiff, etc.) def pil_to_numpy(img): return squeeze(array(img.getdata()).reshape((img.size[1], img.size[0], -1))) def read_multi_page_tif(fn, crop=[None]*6): """Read a multi-page tif file and return a numpy array.""" xmin, xmax, ymin, ymax, zmin, zmax = crop img = Image.open(fn) pages = [] if zmin is not None and zmin > 0: img.seek(zmin) eof = False while not eof and img.tell() != zmax: pages.append(pil_to_numpy(img)[...,newaxis]) try: img.seek(img.tell()+1) except EOFError: eof = True return concatenate(pages, axis=-1) def write_png_image_stack(npy_vol, fn, **kwargs): """Write a numpy.ndarray 3D volume to a stack of .png images. Only 8-bit and 16-bit single-channel images are currently supported. """ axis = kwargs.get('axis', -1) bitdepth = kwargs.get('bitdepth', None) npy_vol = swapaxes(npy_vol, 0, axis) fn = os.path.expanduser(fn) if 0 <= npy_vol.max() <= 1 and npy_vol.dtype == double: bitdepth = 16 if None else bitdepth imdtype = uint16 if bitdepth == 16 else uint8 npy_vol = ((2**bitdepth-1)*npy_vol).astype(imdtype) if 1 < npy_vol.max() < 256 and bitdepth == None or bitdepth == 8: mode = 'L' mode_base = 'L' npy_vol = uint8(npy_vol) elif 256 <= numpy.max(npy_vol) < 2**16 and bitdepth == None or \ bitdepth == 16: mode = 'I;16' mode_base = 'I' npy_vol = uint16(npy_vol) else: mode = 'RGBA' mode_base = 'RGBA' npy_vol = uint32(npy_vol) for z, pl in enumerate(npy_vol): im = Image.new(mode_base, pl.T.shape) im.fromstring(pl.tostring(), 'raw', mode) im.save(fn % z) ### VTK structured points array format numpy_type_to_vtk_string = { np.uint8:'unsigned_char', np.int8:'char', np.uint16:'unsigned_short', np.int16:'short', np.uint32:'unsigned_int', np.int32:'int', np.uint64:'unsigned_long', np.int64:'long', np.float32:'float', np.float64:'double' } vtk_string_to_numpy_type = \ dict([(v,k) for k, v in numpy_type_to_vtk_string.items()]) def write_vtk(ar, fn, **kwargs): """Write volume to VTK structured points format file. Code adapted from <NAME>'s writeVTK.m Matlab implementation. """ # write header f = open(fn, 'w') f.write('# vtk DataFile Version 3.0\n') f.write('created by write_vtk (Python implementation by JNI)\n') f.write('BINARY\n') f.write('DATASET STRUCTURED_POINTS\n') f.write(' '.join(['DIMENSIONS'] + map(str, ar.shape[-1::-1])) + '\n') f.write(' '.join(['ORIGIN'] + map(str, zeros(3))) + '\n') f.write(' '.join(['SPACING'] + map(str, kwargs.get('spacing', ones(3)))) + '\n') f.write('POINT_DATA ' + str(ar.size) + '\n') f.write('SCALARS image_data ' + numpy_type_to_vtk_string[ar.dtype.type] + '\n') f.write('LOOKUP_TABLE default\n'); f.close() # write data as binary f = open(fn, 'ab') f.write(ar.data) f.close() def read_vtk(fin, **kwargs): """Read a numpy volume from a VTK structured points file. Code adapted from <NAME>'s readVTK.m Matlab implementation. """ f = open(fin, 'r') num_lines_in_header = 10 lines = [f.readline() for i in range(num_lines_in_header)] shape_line = [line for line in lines if line.startswith('DIMENSIONS')][0] type_line = [line for line in lines if line.startswith('SCALARS') or line.startswith('VECTORS')][0] ar_shape = map(int, shape_line.rstrip('\n').split(' ')[1:])[-1::-1] ar_type = vtk_string_to_numpy_type[type_line.rstrip('\n').split(' ')[2]] itemsize = np.dtype(ar_type).itemsize ar = squeeze(fromstring(f.read(), ar_type).reshape(ar_shape+[-1])) return ar ### HDF5 format def read_h5_stack(fn, *args, **kwargs): """Read a volume in HDF5 format into numpy.ndarray. Accepts keyword arguments 'group' (the group in the HDF5 file containing the array information; default: 'stack') and 'crop' (format as in read_image_stack()) """ fn = os.path.expanduser(fn) if len(args) > 0: group = args[0] elif kwargs.has_key('group'): group = kwargs['group'] else: group = 'stack' if kwargs.has_key('crop'): crop = kwargs['crop'] else: crop = [None,None,None,None,None,None] xmin, xmax, ymin, ymax, zmin, zmax = crop dset = h5py.File(fn, 'r') a = dset[group] if ndim(a) == 2: a = a[xmin:xmax,ymin:ymax] elif ndim(a) == 3: a = a[xmin:xmax,ymin:ymax,zmin:zmax] ar = array(a) dset.close() return ar def write_h5_stack(npy_vol, fn, **kwargs): """Write a numpy.ndarray 3D volume to an HDF5 file. The following keyword arguments are supported: - 'group': the group into which to write the array. (default: 'stack') - 'compression': The type of compression. (default: None) - 'chunks': Chunk size in the HDF5 file. (default: None) """ fn = os.path.expanduser(fn) if not kwargs.has_key('compression'): kwargs['compression'] = None if not kwargs.has_key('chunks'): kwargs['chunks'] = None try: group = kwargs['group'] del kwargs['group'] except KeyError: group = 'stack' fout = h5py.File(fn, 'a') if group in fout: del fout[group] fout.create_dataset(group, data=npy_vol, **kwargs) fout.close() ### Raveler format def ucm_to_raveler(ucm, sp_threshold=0, body_threshold=0.1, **kwargs): """Return Raveler map from a UCM.""" sps = label(ucm<sp_threshold)[0] bodies = label(ucm<=body_threshold)[0] return segs_to_raveler(sps, bodies, **kwargs) def segs_to_raveler(sps, bodies, min_size=0, do_conn_comp=False, sps_out=None): if sps_out is None: sps_out = raveler_serial_section_map(sps, min_size, do_conn_comp, False) segment_map = raveler_serial_section_map(bodies, min_size, do_conn_comp) segment_to_body = unique(zip(segment_map.ravel(), bodies.ravel())) segment_to_body = segment_to_body[segment_to_body[:,0] != 0] segment_to_body = concatenate((array([[0,0]]), segment_to_body), axis=0) sp_to_segment = [] for i, (sp_map_i, segment_map_i, body_map_i) in \ enumerate(zip(sps_out, segment_map, bodies)): segment_map_i *= sp_map_i.astype(bool) valid = (sp_map_i != 0) + (segment_map_i == 0) sp_to_segment.append( unique(zip(it.repeat(i), sp_map_i[valid], segment_map_i[valid]))) valid = segment_map != 0 logging.debug('plane %i done'%i) logging.info('total superpixels before: ' + str(len(unique(sps))) + ' total superpixels after: ' + str(len(unique(sps_out)))) sp_to_segment = concatenate(sp_to_segment, axis=0) return sps_out, sp_to_segment, segment_to_body def raveler_serial_section_map(nd_map, min_size=0, do_conn_comp=False, globally_unique_ids=True): nd_map = serial_section_map(nd_map, min_size, do_conn_comp, globally_unique_ids) if not (nd_map == 0).any(): nd_map[:,0,0] = 0 return nd_map def serial_section_map(nd_map, min_size=0, do_conn_comp=False, globally_unique_ids=True): if do_conn_comp: label_fct = label else: def label_fct(a): relabeled, fmap, imap = evaluate.relabel_from_one(a) return relabeled, len(imap) def remove_small(a): return morpho.remove_small_connected_components(a, min_size, False) mplanes = map(remove_small, nd_map) relabeled_planes, nids_per_plane = zip(*map(label_fct, mplanes)) start_ids = concatenate((array([0], int), cumsum(nids_per_plane)[:-1])) \ if globally_unique_ids else [0]*len(nids_per_plane) relabeled_planes = [(relabeled_plane + start_id)[newaxis, ...] for relabeled_plane, start_id in zip(relabeled_planes, start_ids)] return concatenate(relabeled_planes, axis=0) def write_to_raveler(sps, sp_to_segment, segment_to_body, directory, gray=None, raveler_dir='/usr/local/raveler-hdf', nproc_contours=16, body_annot=None): """Output a segmentation to Raveler format. Arguments: - sps: the superpixel map (nplanes * nx * ny numpy ndarray). Superpixels can only occur on one plane. - sp_to_segment: superpixel-to-segment map as a 3 column list of (plane number, superpixel id, segment id). Segments must be unique to a plane. - segment_to_body: the segment to body map. (nsegments * 2 numpy array) - directory: the directory in which to write the stack. This directory and all necessary subdirectories will be created. - [gray]: The grayscale images corresponding to the superpixel maps (nplanes * nx * ny numpy ndarray). - [raveler dir]: where Raveler is installed. - [nproc_contours]: how many processors to use when generating the Raveler contours. - [body_annot]: either a dictionary to write to JSON in Raveler body annotation format, or a numpy ndarray of the segmentation from which to compute orphans and non traversing bodies (which then get written out as body annotations). Value: None. Raveler is the EM segmentation proofreading tool developed in-house at Janelia for the FlyEM project. """ sp_path = os.path.join(directory, 'superpixel_maps') im_path = os.path.join(directory, 'grayscale_maps') # write conventional Raveler stack if not os.path.exists(directory): os.makedirs(directory) if not os.path.exists(sp_path): os.mkdir(sp_path) write_png_image_stack(sps, os.path.join(sp_path, 'sp_map.%05i.png'), bitdepth=16, axis=0) savetxt(os.path.join(directory, 'superpixel_to_segment_map.txt'), sp_to_segment, '%i') savetxt(os.path.join(directory, 'segment_to_body_map.txt'), segment_to_body, '%i') if gray is not None: if not os.path.exists(im_path): os.mkdir(im_path) write_png_image_stack(gray, os.path.join(im_path, 'img.%05d.png'), axis=0) # body annotations if body_annot is not None: if type(body_annot) == ndarray: orphans = morpho.orphans(body_annot) non_traversing = morpho.non_traversing_segments(body_annot) body_annot = raveler_body_annotations(orphans, non_traversing) write_json(body_annot, os.path.join(directory, 'annotations-body.json')) # make tiles, bounding boxes, and contours, and compile HDF5 stack info. with tmp.TemporaryFile() as tmp_stdout: try: def call(arglist): return subprocess.call(arglist, stdout=tmp_stdout) r1, r2, r3, r4 = [-1]*4 r1 = call(['python', os.path.join(raveler_dir, 'util/createtiles.py'), directory, '1024', '0']) r2 = call([os.path.join(raveler_dir, 'bin/bounds'), directory]) r3 = call([ os.path.join(raveler_dir, 'bin/compilestack'), directory]) except: logging.warning( 'Error during Raveler export post-processing step. ' + 'Possible causes are that you do not have Raveler installed ' + 'or you did not specify the correct installation path.') logging.warning('Return codes: %i, %i, %i' % (r1, r2, r3)) # with sys.exc_info() as ex: # logging.warning('Exception info:\n' + '\n'.join(map(str, ex))) # make permissions friendly for proofreaders. try: subprocess.call(['chmod', '-R', 'go=u', directory]) except: logging.warning('Could not change Raveler export permissions.') def raveler_output_shortcut(svs, seg, gray, outdir, sps_out=None): """Compute the Raveler format and write to directory, all at once.""" sps_out, sp2seg, seg2body = segs_to_raveler(svs, seg, sps_out=sps_out) write_to_raveler(sps_out, sp2seg, seg2body, outdir, gray, body_annot=seg) return sps_out def raveler_body_annotations(orphans, non_traversing=None): data = [{'status': 'not sure', 'comment': 'orphan', 'body ID': int(o)} for o in orphans] if non_traversing is not None: data.extend([{'status': 'not sure', 'comment': 'does not traverse', 'body ID': int(n)} for n in non_traversing]) metadata = {'description': 'body annotations', 'file version': 2} return {'data': data, 'metadata': metadata} def write_json(annot, fn='annotations-body.json', directory=None): """Write an annotation dictionary in Raveler format to a JSON file. The annotation file format is described in: https://wiki.janelia.org/wiki/display/flyem/body+annotation+file+format and: https://wiki.janelia.org/wiki/display/flyem/generic+file+format """ if directory is not None: fn = join_path(directory, fn) with open(fn, 'w') as f: json.dump(annot, f, indent=2) def raveler_to_labeled_volume(rav_export_dir, get_glia=False, use_watershed=False, **kwargs): """Import a raveler export stack into a labeled segmented volume.""" import morpho spmap = read_image_stack( os.path.join(rav_export_dir, 'superpixel_maps', '*.png'), **kwargs) sp2seg_list = numpy.loadtxt( os.path.join(rav_export_dir, 'superpixel_to_segment_map.txt'), uint32) seg2bod_list = numpy.loadtxt( os.path.join(rav_export_dir, 'segment_to_body_map.txt'), uint32) sp2seg = {} max_sp = sp2seg_list[:,1].max() start_plane = sp2seg_list[:,0].min() for z, sp, seg in sp2seg_list: if not sp2seg.has_key(z): sp2seg[z] = zeros(max_sp+1, uint32) sp2seg[z][sp] = seg max_seg = seg2bod_list[:,0].max() seg2bod = zeros(max_seg+1, uint32) seg2bod[seg2bod_list[:,0]] = seg2bod_list[:,1] initial_output_volume = zeros_like(spmap) for i, m in enumerate(spmap): j = start_plane + i initial_output_volume[i] = seg2bod[sp2seg[j][m]] probs = kwargs.get('probability_map', ones_like(spmap)) output_volume = morpho.watershed(probs, seeds=initial_output_volume) \ if use_watershed else initial_output_volume if (output_volume[:, 0, 0] == 0).all() and \ (output_volume == 0).sum() == output_volume.shape[0]: output_volume[:, 0, 0] = output_volume[:, 0, 1] if get_glia: annots = json.load( open(os.path.join(rav_export_dir, 'annotations-body.json'), 'r')) glia = [a['body ID'] for a in annots['data'] if a.get('comment', None) == 'glia'] return output_volume, glia else: return output_volume ### Ilastik formats # obtained from Ilastik 0.5.4 ilastik_label_colors = \ [0xffff0000, 0xff00ff00, 0xffffff00, 0xff0000ff, 0xffff00ff, 0xff808000, 0xffc0c0c0, 0xfff2022d] def write_ilastik_project(images, labels, fn, label_names=None): """Write one or more image volumes and corresponding labels to Ilastik. Limitations: - Assumes the same labels are used for all images. - Supports only grayscale images and volumes, and a maximum of 8 labels. - Requires at least one unlabeled voxel in the label field. """ f = h5py.File(fn, 'w') if type(images) != list: images = [images] labels = [labels] ulbs = unique(concatenate(map(unique, labels)))[1:] colors = array(ilastik_label_colors[:len(ulbs)]) names = ['Label %i'%i for i in ulbs] names = array(names, '|S%i'%max(map(len, names))) label_attributes = {'color':colors, 'name':names, 'number':ulbs} for i, (im, lb) in enumerate(zip(images, labels)): if im.ndim == 2: new_shape = (1,1)+im.shape+(1,) elif im.ndim == 3: new_shape = (1,)+im.shape+(1,) else: raise ValueError('Unsupported number of dimensions in image.') im = im.reshape(new_shape) lb = lb.reshape(new_shape) root = 'DataSets/dataItem%02i/'%i f[root+'data'] = im f[root+'labels'] = lb for k, v in label_attributes.items(): f[root+'labels'].attrs[k] = v f[root].attrs['Name'] = '' f[root].attrs['fileName'] = '' for subgroup in ['Description', 'Labeler', 'Name']: f['Project/%s'%subgroup] = array('', dtype='|S1') f['ilastikVersion'] = array(0.5) f.close() def write_ilastik_batch_volume(im, fn): """Write a volume to an HDF5 file for Ilastik batch processing.""" if im.ndim == 2: im = im.reshape((1,1)+im.shape+(1,)) elif im.ndim == 3: im = im.reshape((1,)+im.shape+(1,)) else: raise ValueError('Unsupported number of dimensions in image.') write_h5_stack(im, fn, group='/volume/data') def read_prediction_from_ilastik_batch(fn, **kwargs): """Read the prediction produced by Ilastik from batch processing.""" if not kwargs.has_key('group'): kwargs['group'] = '/volume/prediction' a = squeeze(read_h5_stack(fn, **kwargs)) if kwargs.get('single_channel', True): a = a[...,0] return a ### Shiv Vitaladevuni's binary raw array format shiv_typecode_to_numpy_type = { 0:np.int8, 1:np.uint8, 2:np.int16, 3:np.uint16, 4:np.int32, 5:np.uint32, 6:np.int64, 7:np.uint64, 8:np.float32, 9:np.float64 } def read_shiv_raw_stack(ws_fn, sp2body_fn): ws_fn, sp2body_fn = map(os.path.expanduser, [ws_fn, sp2body_fn]) ws = read_shiv_raw_array(ws_fn) sp2b = read_shiv_raw_array(sp2body_fn)[1] ar = sp2b[ws] return remove_merged_boundaries(ar) def remove_merged_boundaries(ar, connectivity=1): import morpho arp = morpho.pad(ar, [0,ar.max()+1]) arpr = arp.ravel() zero_idxs = (arpr == 0).nonzero()[0] ns = arpr[morpho.get_neighbor_idxs(arp, zero_idxs, connectivity)] ns_compl = ns.copy() ns_compl[ns==0] = ns.max()+1 merged_boundaries = (ns.max(axis=1) == ns_compl.min(axis=1)).nonzero()[0] arpr[zero_idxs[merged_boundaries]] = ns.max(axis=1)[merged_boundaries] return morpho.juicy_center(arp, 2) def read_shiv_raw_array(fn): fin = open(fn, 'rb') typecode = fromstring(fin.read(4), uint8)[1] ar_type = shiv_typecode_to_numpy_type[typecode] ar_ndim = fromstring(fin.read(4), uint8)[0] ar_shape = fromstring(fin.read(ar_ndim*4), uint32) ar = fromstring(fin.read(), ar_type).reshape(ar_shape, order='F') return ar ```
{ "source": "jnirschl/cookiecutter-data-science", "score": 3 }
#### File: models/networks/resnet50_dropout.py ```python import string import tensorflow as tf # Use batch normalization defaults from Pytorch. BATCH_NORM_DECAY = 0.9 BATCH_NORM_EPSILON = 1e-5 def apply_dropout(inputs, dropout_rate, filterwise_dropout): """Apply a dropout layer to the inputs.""" noise_shape = None if filterwise_dropout: noise_shape = [inputs.shape[0], 1, 1, inputs.shape[3]] return tf.keras.layers.Dropout(dropout_rate, noise_shape=noise_shape)( inputs, training=True ) def bottleneck_block( inputs, filters, stage, block, strides, dropout_rate, filterwise_dropout ): """Residual block with 1x1 -> 3x3 -> 1x1 convs in main path. Note that strides appear in the second conv (3x3) rather than the first (1x1). This is also known as "ResNet v1.5" as it differs from He et al. (2015) (http://torch.ch/blog/2016/02/04/resnets.html). Dropout is applied post-activation to every batch-normalized conv layer. Args: inputs: tf.Tensor. filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names strides: Strides for the second conv layer in the block. dropout_rate: Dropout rate. filterwise_dropout: Dropout whole convolutional filters instead of individual values in the feature map. Returns: tf.Tensor. """ filters1, filters2, filters3 = filters conv_name_base = "res" + str(stage) + block + "_branch" bn_name_base = "bn" + str(stage) + block + "_branch" x = tf.keras.layers.Conv2D( filters1, kernel_size=1, use_bias=False, kernel_initializer="he_normal", name=conv_name_base + "2a", )(inputs) x = tf.keras.layers.BatchNormalization( momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + "2a" )(x) x = tf.keras.layers.Activation("relu")(x) x = apply_dropout(x, dropout_rate, filterwise_dropout) x = tf.keras.layers.Conv2D( filters2, kernel_size=3, strides=strides, padding="same", use_bias=False, kernel_initializer="he_normal", name=conv_name_base + "2b", )(x) x = tf.keras.layers.BatchNormalization( momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + "2b" )(x) x = tf.keras.layers.Activation("relu")(x) x = apply_dropout(x, dropout_rate, filterwise_dropout) x = tf.keras.layers.Conv2D( filters3, kernel_size=1, use_bias=False, kernel_initializer="he_normal", name=conv_name_base + "2c", )(x) x = tf.keras.layers.BatchNormalization( momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + "2c" )(x) shortcut = inputs if not x.shape.is_compatible_with(shortcut.shape): shortcut = tf.keras.layers.Conv2D( filters3, kernel_size=1, use_bias=False, strides=strides, kernel_initializer="he_normal", name=conv_name_base + "1", )(shortcut) shortcut = tf.keras.layers.BatchNormalization( momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + "1", )(shortcut) shortcut = apply_dropout(shortcut, dropout_rate, filterwise_dropout) x = tf.keras.layers.add([x, shortcut]) x = tf.keras.layers.Activation("relu")(x) return x def group( inputs, filters, num_blocks, stage, strides, dropout_rate, filterwise_dropout ): """Group of residual blocks.""" blocks = string.ascii_lowercase x = bottleneck_block( inputs, filters, stage, block=blocks[0], strides=strides, dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout, ) for i in range(num_blocks - 1): x = bottleneck_block( x, filters, stage, block=blocks[i + 1], strides=1, dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout, ) return x def resnet50_dropout( input_shape, num_classes: int, dropout_rate: float = 0.1, filterwise_dropout: bool = True, ) -> tf.keras.models.Model: """Builds ResNet50. Using strided conv, pooling, four groups of residual blocks, and pooling, the network maps spatial features of size 224x224 -> 112x112 -> 56x56 -> 28x28 -> 14x14 -> 7x7 (Table 1 of He et al. (2015)). Args: input_shape: Shape tuple of input excluding batch dimension. num_classes: Number of output classes. dropout_rate: Dropout rate. filterwise_dropout: Dropout whole convolutional filters instead of individual values in the feature map. Returns: tf.keras.Model. """ inputs = tf.keras.layers.Input(shape=input_shape) x = tf.keras.layers.ZeroPadding2D(padding=3, name="conv1_pad")(inputs) x = tf.keras.layers.Conv2D( 64, kernel_size=7, strides=2, padding="valid", use_bias=False, kernel_initializer="he_normal", name="conv1", )(x) x = tf.keras.layers.BatchNormalization( momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name="bn_conv1" )(x) x = tf.keras.layers.Activation("relu")(x) x = apply_dropout(x, dropout_rate, filterwise_dropout) x = tf.keras.layers.MaxPooling2D(3, strides=2, padding="same")(x) x = group( x, [64, 64, 256], stage=2, num_blocks=3, strides=1, dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout, ) x = group( x, [128, 128, 512], stage=3, num_blocks=4, strides=2, dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout, ) x = group( x, [256, 256, 1024], stage=4, num_blocks=6, strides=2, dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout, ) x = group( x, [512, 512, 2048], stage=5, num_blocks=3, strides=2, dropout_rate=dropout_rate, filterwise_dropout=filterwise_dropout, ) x = tf.keras.layers.GlobalAveragePooling2D(name="avg_pool")(x) x = tf.keras.layers.Dense( num_classes, activation=None, kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01), name="fc1000", )(x) return tf.keras.Model(inputs=inputs, outputs=x, name="resnet50_dropout") ``` #### File: models/networks/unet_xception.py ```python import tensorflow as tf from tensorflow.keras import layers def unet_xception( input_shape: tuple, num_classes: int, padding: str = "same", downsample: int = 2, )-> tf.keras.models.Model: """ From https://keras.io/examples/vision/oxford_pets_image_segmentation/#prepare-unet-xceptionstyle-model """ inputs = layers.Input(shape=input_shape) ### [First half of the network: downsampling inputs] ### # Entry block x = layers.Conv2D(32 / downsample, 3, strides=2, padding=padding)(inputs) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) previous_block_activation = x # Set aside residual # Blocks 1, 2, 3 are identical apart from the feature depth. filter_block_1 = [int(elem / downsample) for elem in [64, 128, 256]] filter_block_2 = [int(elem / downsample) for elem in [256, 128, 64, 32]] for filters in filter_block_1: x = layers.Activation("relu")(x) x = layers.SeparableConv2D(filters, 3, padding=padding)(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.SeparableConv2D(filters, 3, padding=padding)(x) x = layers.BatchNormalization()(x) x = layers.MaxPooling2D(3, strides=2, padding=padding)(x) # Project residual residual = layers.Conv2D(filters, 1, strides=2, padding=padding)( previous_block_activation ) x = layers.add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual # [Second half of the network: upsampling inputs] for filters in filter_block_2: x = layers.Activation("relu")(x) x = layers.Conv2DTranspose(filters, 3, padding=padding)(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.Conv2DTranspose(filters, 3, padding=padding)(x) x = layers.BatchNormalization()(x) x = layers.UpSampling2D(2)(x) # Project residual residual = layers.UpSampling2D(2)(previous_block_activation) residual = layers.Conv2D(filters, 1, padding=padding)(residual) x = layers.add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual # Add a per-pixel classification layer outputs = layers.Conv2D(num_classes, 3, activation=None, padding=padding)(x) # tf.keras.backend.clear_session() return tf.keras.Model(inputs, outputs, name="unet_xception") # def build_unet(input_shape): # inputs = Input(input_shape) # # s1, p1 = encoder_block(inputs, 64) # s2, p2 = encoder_block(p1, 128) # s3, p3 = encoder_block(p2, 256) # s4, p4 = encoder_block(p3, 512) # # b1 = conv_block(p4, 1024) # # d1 = decoder_block(b1, s4, 512) # d2 = decoder_block(d1, s3, 256) # d3 = decoder_block(d2, s2, 128) # d4 = decoder_block(d3, s1, 64) # # outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d4) # # model = Model(inputs, outputs, name="U-Net") # return model ``` #### File: src/tests/test_mapfile.py ```python import os from pathlib import Path import pandas as pd import pytest from src.data import mapfile @pytest.fixture def input_dir(): return "./src/tests/test_data/mnist_small" @pytest.fixture def output_dir(): return "./src/tests/test_data/mnist_small" @pytest.fixture def output_filename(): return "pytest_mapfile.csv" @pytest.fixture def mapfile_df(output_filename): return pd.read_csv( Path("./src/tests/test_data/mnist_small/").joinpath(output_filename) ) @pytest.fixture def params_filepath(): return str(Path("./src/tests/test_data/mnist_small/").joinpath("params.yaml")) class TestMapfile: # input_dir, output_dir, output_filename def test_create_mapfile(self, input_dir, output_dir, output_filename): """Tests for mapfile.create.create_mapfile Test the output of mapfile.create is a Pandas DataFrame Tests that output filename and label_encoding.yaml exists""" mapfile_df = mapfile.create(input_dir, output_dir, output_filename) assert type(mapfile_df) is pd.DataFrame assert Path(output_dir).joinpath(output_filename).exists() assert Path(output_dir).joinpath("label_encoding.yaml").exists() def test_split(self, mapfile_df, output_dir, params_filepath): """Tests for mapfile.split Test that each image is a test for one and only one cross- validation fold. Also tests file saving""" split_df = mapfile.split( mapfile_df, output_dir, params_filepath=params_filepath ) # each image should be a test for one and only one fold assert ( split_df.apply(lambda x: x == "test").sum(axis=1).all() ), "The same image is a 'test' for more than one fold" # test output file exists assert Path(output_dir).joinpath("split_train_dev.csv").exists() def test_split_rng(self, mapfile_df, input_dir, params_filepath): """Tests for mapfile.split Test that repeated calls to mapfile.split return the same stratified k-fold based on the random seed in params.yaml""" split_1 = mapfile.split(mapfile_df, params_filepath=params_filepath) split_2 = mapfile.split(mapfile_df, params_filepath=params_filepath) assert (split_1 == split_2).all().all() ``` #### File: src/tests/test_train_model.py ```python from pathlib import Path import pytest from click.testing import CliRunner from src.models import train @pytest.fixture def mapfile_path(): filepath = "./src/tests/test_data/mnist_small/pytest_mapfile.csv" return str(Path(filepath).resolve()) @pytest.fixture def mapfile_path_seg(): filepath = "./src/tests/test_data/mito_seg/pytest_mapfile.csv" return str(Path(filepath).resolve()) @pytest.fixture def cv_idx_path(): filepath = "./src/tests/test_data/mnist_small/split_train_dev.csv" return str(Path(filepath).resolve()) @pytest.fixture def cv_idx_path_seg(): filepath = "./src/tests/test_data/mito_seg/split_train_dev.csv" return str(Path(filepath).resolve()) @pytest.fixture def output_filename(): return "pytest_mapfile.csv" @pytest.fixture def mnist_params(): return "./src/tests/test_data/mnist_small/params.yaml" @pytest.fixture def mito_seg_params(): return "./src/tests/test_data/mito_seg/params.yaml" class TestTrainModel: def test_mnist_python(self, mapfile_path, cv_idx_path, mnist_params): """ """ expected_history = { "loss": 2.7250454425811768, "accuracy": 0.02901785634458065, } history = train.fit(mapfile_path, cv_idx_path, params_filepath=mnist_params, debug=True) # assert abs(history.history["loss"][-1] - expected_history["loss"]) < 0.0001 # assert ( # abs(history.history["accuracy"][-1] - expected_history["accuracy"]) < 0.0001 # ) def test_mnist_click(self, mapfile_path, cv_idx_path, mnist_params): """ """ runner = CliRunner() result = runner.invoke( train.main, [mapfile_path, cv_idx_path, "-p", mnist_params, "-d"] ) assert not result.exception assert result.exit_code == 0 def test_mito_seg(self, mapfile_path_seg, cv_idx_path_seg, mito_seg_params): """ """ pass # history = train_model.train( # mapfile_path_seg, # cv_idx_path_seg, # params_filepath=test_params_seg, # debug=True, # ) # assert history.history["loss"][-1] == expected_history["loss"] # assert ( # history.history["sparse_categorical_accuracy"][-1] # == expected_history["sparse_categorical_accuracy"] # ) def test_mito_seg_click(self, mapfile_path_seg, cv_idx_path_seg, mito_seg_params): """ """ pass # runner = CliRunner() # result = runner.invoke( # train_model.main, [mapfile_path_seg, cv_idx_path_seg, "-p", test_params] # ) # # assert not result.exception # assert result.exit_code == 0 ```
{ "source": "jnirschl/risk-slim", "score": 3 }
#### File: risk-slim/batch/train_risk_slim.py ```python import os import sys import time import argparse import logging import pickle import json import numpy as np # add the source directory to search path to avoid module import errors if riskslim has not been installed sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from riskslim.helper_functions import load_data_from_csv, setup_logging from riskslim.coefficient_set import CoefficientSet from riskslim.lattice_cpa import run_lattice_cpa, DEFAULT_LCPA_SETTINGS # uncomment for debugging # TODO: run the following when building # with open(settings_json, 'w') as outfile: # json.dump(DEFAULT_LCPA_SETTINGS, outfile, sort_keys = False, indent=4) def setup_parser(): """ Create an argparse Parser object for RiskSLIM command line arguments. This object determines all command line arguments, handles input validation and default values. See https://docs.python.org/3/library/argparse.html for configuration """ #parser helper functions def is_positive_integer(value): parsed_value = int(value) if parsed_value <= 0: raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value) return parsed_value def is_positive_float(value): parsed_value = float(value) if parsed_value <= 0.0: raise argparse.ArgumentTypeError("%s must be a positive value" % value) return parsed_value def is_negative_one_or_positive_integer(value): parsed_value = int(value) if not (parsed_value == -1 or parsed_value >= 1): raise argparse.ArgumentTypeError("%s is an invalid value (must be -1 or >=1)" % value) else: return parsed_value def is_file_on_disk(file_name): if not os.path.isfile(file_name): raise argparse.ArgumentTypeError("the file %s does not exist!" % file_name) else: return file_name def is_file_not_on_disk(file_name): if os.path.isfile(file_name): raise argparse.ArgumentTypeError("the file %s already exists on disk" % file_name) else: return file_name def is_valid_fold(value): parsed_value = int(value) if parsed_value < 0: raise argparse.ArgumentTypeError("%s must be a positive integer" % value) return parsed_value parser = argparse.ArgumentParser( prog='train_risk_slim', description='Train a RiskSLIM classifier from the command shell', epilog='Copyright (C) 2017 <NAME>', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--data', type=str, required=True, help='csv file with training data') parser.add_argument('--results', type=str, required=True, help='name of results file (must not already exist)') parser.add_argument('--cvindices', type=is_file_on_disk, help='csv file with indices for K-fold CV') parser.add_argument('--fold', type=is_valid_fold, default=0, help='index of test fold; set as 0 to use all data for training') parser.add_argument('--weights', type=is_file_on_disk, help='csv file with non-negative weights for each point') parser.add_argument('--settings', type=is_file_on_disk, help='JSON file with additional settings for LCPA') parser.add_argument('--timelimit', type=is_negative_one_or_positive_integer, default=300, help='time limit on training (in seconds); set as -1 for no time limit') parser.add_argument('--max_size', type = is_negative_one_or_positive_integer, default=-1, help='maximum number of non-zero coefficients; set as -1 for no limit') parser.add_argument('--max_coef', type=is_positive_integer, default=5, help='value of upper and lower bounds for any coefficient') parser.add_argument('--max_offset', type=is_negative_one_or_positive_integer, default=-1, help='value of upper and lower bound on offset parameter; set as -1 to use a conservative value') parser.add_argument('--c0_value', type=is_positive_float, default=1e-6, help='l0 regularization parameter; set as a positive number between 0.00 and log(2)') parser.add_argument('--w_pos', type=is_positive_float, default=1.00, help='w_pos') parser.add_argument('--log', type=str, help='name of the log file') parser.add_argument('--silent', action='store_true', help='flag to suppress logging to stderr') return parser if __name__ == '__main__': parser = setup_parser() parsed = parser.parse_args() parsed_dict = vars(parsed) parsed_string = [key + ' : ' + str(parsed_dict[key]) + '\n' for key in parsed_dict] parsed_string.sort() # setup logging logger = logging.getLogger() logger = setup_logging(logger, log_to_console =(not parsed.silent), log_file = parsed.log) logger.setLevel(logging.INFO) logger.info("running 'train_risk_slim.py'") logger.info("working directory: %r" % os.getcwd()) logger.info("parsed the following variables:\n-%s" % '-'.join(parsed_string)) # check results_file does not exist if os.path.isfile(parsed.results): logger.error("results file %s already exists)" % parsed.results) logger.error("either delete %s or choose a different name" % parsed.results) sys.exit(1) # check settings_json exists / or use default settings settings = dict(DEFAULT_LCPA_SETTINGS) if parsed.settings is not None: with open(parsed.settings) as json_file: loaded_settings = json.load(json_file) loaded_settings = {str(key): loaded_settings[key] for key in loaded_settings if key in settings} settings.update(loaded_settings) #overwrite parameters specified by the user settings['max_runtime'] = float('inf') if parsed.timelimit == -1 else parsed.timelimit settings['c0_value'] = parsed.c0_value settings['w_pos'] = parsed.w_pos # check if sample weights file was specified, if not set as None logger.info("loading data and sample weights") data = load_data_from_csv(dataset_csv_file = parsed.data, sample_weights_csv_file = parsed.weights, fold_csv_file = parsed.cvindices, fold_num = parsed.fold) N, P = data['X'].shape # initialize coefficient set and offset parameter logger.info("creating coefficient set and constraints") max_coefficient = parsed.max_coef max_model_size = parsed.max_size if parsed.max_size >= 0 else float('inf') max_offset = parsed.max_offset if parsed.max_offset >= 0 else float('inf') coef_set = CoefficientSet(variable_names = data['variable_names'], lb = -max_coefficient, ub = max_coefficient, sign = 0) coef_set.update_intercept_bounds(X = data['X'], y = data['y'], max_offset = max_offset, max_L0_value = max_model_size) #print coefficient set if not parsed.silent: print(coef_set) constraints = { 'L0_min': 0, 'L0_max': max_model_size, 'coef_set': coef_set, } # fit RiskSLIM model using Lattice Cutting Plane Algorithm model_info, mip_info, lcpa_info = run_lattice_cpa(data, constraints, settings) # save output to disk results = { "date": time.strftime("%d/%m/%y", time.localtime()), "data_file": parsed.data, "fold_file": parsed.cvindices, "fold_num": parsed.settings, "results_file": parsed.results, } results.update(model_info) coef_set = results.pop('coef_set') results['coef_set_ub'] = coef_set.ub results['coef_set_lb'] = coef_set.lb results['coef_set_signs'] = coef_set.sign results['coef_set_c0'] = coef_set.c0 logger.info("saving results...") with open(parsed.results, 'wb') as outfile: pickle.dump(results, outfile, protocol=pickle.HIGHEST_PROTOCOL) logger.info("saved results as pickle file: %r" % parsed.results) logger.info('''to access results, use this snippet: \t\t\t import pickle \t\t\t f = open(results_file, 'rb') \t\t\t results = pickle.load(f) ''' ) logger.info("finished training") logger.info("quitting\n\n") sys.exit(0) ``` #### File: riskslim/tests/test_loss_functions.py ```python import numpy as np import riskslim.loss_functions.fast_log_loss as fast import riskslim.loss_functions.log_loss as normal import riskslim.loss_functions.log_loss_weighted as weighted import riskslim.loss_functions.lookup_log_loss as lookup from riskslim.setup_functions import _setup_training_weights np.random.seed(seed = 0) #initialize data matrix X and label vector Y n_rows = 1000000 n_cols = 20 rho_ub = 100 rho_lb = -100 #helper function s def generate_binary_data(n_rows = 1000000, n_cols = 20): X = np.random.randint(low=0, high=2, size=(n_rows, n_cols)) Y = np.random.randint(low=0, high=2, size=(n_rows, 1)) pos_ind = Y == 1 Y[~pos_ind] = -1 return X, Y def generate_integer_model(n_cols = 20, rho_ub = 100, rho_lb = -100, sparse_pct = 0.5): rho = np.random.randint(low=rho_lb, high=rho_ub, size=n_cols) rho = np.require(rho, dtype=Z.dtype, requirements=['F']) nnz_count = int(sparse_pct * np.floor(n_cols / 2)) set_to_zero = np.random.choice(range(0, n_cols), size=nnz_count, replace=False) rho[set_to_zero] = 0.0 return rho def get_score_bounds(Z_min, Z_max, rho): pos_ind = np.where(rho>0.0)[0] neg_ind = np.where(rho<0.0)[0] s_min, s_max = 0, 0 for j in pos_ind: s_max += rho[j] * Z_max[j] s_min += rho[j] * Z_min[j] for j in neg_ind: s_max += rho[j] * Z_min[j] s_min += rho[j] * Z_max[j] return s_min, s_max def get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = None): "global variables: L0_reg_ind" edge_values = np.vstack([Z_min * rho_lb, Z_max * rho_lb, Z_min * rho_ub, Z_max * rho_ub]) if L0_max is None or L0_max == Z_min.shape[0]: s_min = np.sum(np.min(edge_values, axis = 0)) s_max = np.sum(np.max(edge_values, axis = 0)) else: min_values = np.min(edge_values, axis = 0) s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max]) s_min_no_reg = np.sum(min_values[~L0_reg_ind]) s_min = s_min_reg + s_min_no_reg max_values = np.max(edge_values, axis = 0) s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max]) s_max_no_reg = np.sum(max_values[~L0_reg_ind]) s_max = s_max_reg + s_max_no_reg return s_min, s_max #generate data X, Y = generate_binary_data(n_rows, n_cols) Z = X * Y Z = np.require(Z, requirements=['F'], dtype=np.float64) rho = generate_integer_model(n_cols, rho_ub, rho_lb) L0_reg_ind = np.ones(n_cols, dtype='bool') L0_reg_ind[0] = False Z_min = np.min(Z, axis = 0) Z_max = np.max(Z, axis = 0) #setup weights weights = _setup_training_weights(Y, w_pos = 1.0, w_neg = 1.0, w_total_target = 2.0) #create lookup table min_score, max_score = get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = n_cols) loss_value_tbl, prob_value_tbl, loss_tbl_offset = lookup.get_loss_value_and_prob_tables(min_score, max_score) loss_tbl_offset = int(loss_tbl_offset) #assert correctnes of log_loss from scores function for s in range(int(min_score), int(max_score)+1): normal_value = normal.log_loss_value_from_scores(np.array(s, dtype = Z.dtype, ndmin = 1)) #loss_value_tbl[s+loss_tbl_offset] cython_value = fast.log_loss_value_from_scores(np.array(s, dtype = Z.dtype, ndmin = 1)) table_value = loss_value_tbl[s+loss_tbl_offset] lookup_value = lookup.log_loss_value_from_scores(np.array(s,dtype = Z.dtype, ndmin = 1), loss_value_tbl, loss_tbl_offset) assert(np.isclose(normal_value, cython_value, rtol = 1e-06)) assert(np.isclose(table_value, cython_value, rtol = 1e-06)) assert(np.isclose(table_value, normal_value, rtol = 1e-06)) assert(np.equal(table_value, lookup_value)) #python implementations need to be 'C' aligned instead of D aligned Z_py = np.require(Z, requirements = ['C']) rho_py = np.require(rho, requirements = ['C']) scores_py = Z_py.dot(rho_py) #define tests def normal_value_test(): return normal.log_loss_value(Z_py, rho_py) def fast_value_test(): return fast.log_loss_value(Z, rho) def lookup_value_test(): return lookup.log_loss_value(Z, rho, loss_value_tbl, loss_tbl_offset) def normal_cut_test(): return normal.log_loss_value_and_slope(Z_py, rho_py) def fast_cut_test(): return fast.log_loss_value_and_slope(Z, rho) def lookup_cut_test(): return lookup.log_loss_value_and_slope(Z, rho, loss_value_tbl, prob_value_tbl, loss_tbl_offset) # def dynamic_lookup_value_test(): # s_min_dynamic, s_max_dynamic = get_score_bounds(Z_min, Z_max, rho) # tbl, offset = lookup.get_loss_value_table(s_min_dynamic, s_max_dynamic) # return lookup.log_loss_value(Z, rho, tbl, offset) #check values and cuts normal_cut = normal_cut_test() cython_cut = fast_cut_test() lookup_cut = lookup_cut_test() assert(np.isclose(fast_value_test(), lookup_value_test())) assert(np.isclose(normal_cut[0], cython_cut[0])) assert(np.isclose(lookup_cut[0], cython_cut[0])) assert(all(np.isclose(normal_cut[1], cython_cut[1]))) assert(all(np.isclose(lookup_cut[1], cython_cut[1]))) print("passed cut tests") #weighted tests def weighted_value_test(weights): return weighted.log_loss_value(Z_py, weights, np.sum(weights), rho_py) def weighted_cut_test(weights): return weighted.log_loss_value_and_slope(Z_py, weights, np.sum(weights), rho_py) def weighted_scores_test(weights): return weighted.log_loss_value_from_scores(weights, np.sum(weights), scores_py) #w_pos = w_neg = 1.0 weights = _setup_training_weights(Y, w_pos = 1.0, w_neg = 1.0, w_total_target = 2.0) weights_match_unit_weights = all(weights == 1.0) if weights_match_unit_weights: print("tests for match between normal and weighted loss function") #value assert(np.isclose(normal_value_test(), weighted_value_test(weights))) assert(np.isclose(normal_value_test(), weighted_scores_test(weights))) #cut normal_cut = normal_cut_test() weighted_cut = weighted_cut_test(weights) assert(np.isclose(normal_cut[0], weighted_cut[0])) assert(all(np.isclose(normal_cut[1], weighted_cut[1]))) print("passed all tests for weighted implementations when w_pos = w_neg = 1.0") #w_pos = w_neg = 1.0 w_pos = 0.5 + np.random.rand() w_neg = 1.0 weights = _setup_training_weights(Y, w_pos = 0.5 + np.random.rand(), w_neg = 1.0, w_total_target = 2.0) weighted_value = weighted_value_test(weights) weighted_cut = weighted_cut_test(weights) weighted_value_from_scores = weighted_scores_test(weights) assert(np.isclose(weighted_value, weighted_value_from_scores)) assert(np.isclose(weighted_value, weighted_cut[0])) print("passed all tests for weighted loss functions when w_pos = %1.2f and w_neg = %1.2f" % (w_pos, w_neg)) # print 'timing for loss value computation \n' # %timeit -n 20 normal_value = normal_value_test() # %timeit -n 20 cython_value = fast_value_test() # %timeit -n 20 lookup_value = lookup_value_test() # # print 'timing for loss cut computation \n' # %timeit -n 20 normal_cut = normal_cut_test() # %timeit -n 20 cython_cut = fast_cut_test() # %timeit -n 20 lookup_cut = lookup_cut_test() ```
{ "source": "jnirschl/titanic_dvc", "score": 2 }
#### File: src/data/replace_nan.py ```python import argparse import os from pathlib import Path import yaml from src.data import load_data, load_params, save_as_csv def main(train_path, test_path, output_dir): """Split data into train, dev, and test""" output_dir = Path(output_dir).resolve() assert (os.path.isdir(output_dir)), NotADirectoryError # load data train_df, test_df = load_data([train_path, test_path], sep=",", header=0, index_col="PassengerId") # load params params = load_params() # fill nans with column mean/mode on test set # TODO - switch to allow for different interpolation methods (e.g., mean, median, MICE) if params["imputation"]["method"].lower() == "mean": mean_age = float(round(train_df["Age"].mean(), 4)) mean_fare = float(round(train_df["Fare"].mean(), 4)) train_df["Age"].fillna(value=mean_age, inplace=True) test_df["Age"].fillna(value=mean_age, inplace=True) test_df["Fare"].fillna(value=mean_fare, inplace=True) # update params and save imputation scheme params["imputation"]["Age"] = mean_age params["imputation"]["Fare"] = mean_fare elif params["imputation"]["method"].lower() == "mice": # TODO MICE interpolation raise NotImplementedError else: raise NotImplementedError # update params new_params = yaml.safe_dump(params) with open("params.yaml", "w") as writer: writer.write(new_params) # save data save_as_csv([train_df, test_df], [train_path, test_path], output_dir, replace_text="_categorized.csv", suffix="_nan_imputed.csv", na_rep="nan") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-tr", "--train", dest="train_path", required=True, help="Train CSV file") parser.add_argument("-te", "--test", dest="test_path", required=True, help="Test CSV file") parser.add_argument("-o", "--out-dir", dest="output_dir", default=Path("./data/interim").resolve(), required=False, help="output directory") args = parser.parse_args() # convert categorical variables into integer codes main(args.train_path, args.test_path, args.output_dir) ```
{ "source": "jnis77diver/django-pipeline", "score": 2 }
#### File: django-pipeline/pipeline/decorator.py ```python from __future__ import unicode_literals """ This code is a part of django.utils.six on https://github.com/django/django/blob/stable/2.2.x/django/utils/six.py removed form Django 3.0 To keep the backward compatibility between python 2 and 3 the decorator need to be used as well, during the time we find a proper way to handle MetaClass overwright working on both versions (or dropping python 2 support). """ def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper ``` #### File: django-pipeline/pipeline/forms.py ```python from __future__ import unicode_literals from django.contrib.staticfiles.storage import staticfiles_storage from django.utils.functional import cached_property try: from django.utils.six import iteritems, add_metaclass except ImportError: from .decorator import add_metaclass def iteritems(dictionary): return dictionary.items() from .collector import default_collector from .conf import settings from .packager import Packager class PipelineFormMediaProperty(object): """A property that converts Pipeline packages to lists of files. This is used behind the scenes for any Media classes that subclass :py:class:`PipelineFormMedia`. When accessed, it converts any Pipeline packages into lists of media files and returns or forwards on lookups to that list. """ def __init__(self, get_media_files_func, media_cls, extra_files): """Initialize the property. Args: get_media_files_func (callable): The function to call to generate the media files. media_cls (type): The Media class owning the property. extra_files (object): Files listed in the original ``css`` or ``js`` attribute on the Media class. """ self._get_media_files_func = get_media_files_func self._media_cls = media_cls self._extra_files = extra_files @cached_property def _media_files(self): """The media files represented by the property.""" return self._get_media_files_func(self._media_cls, self._extra_files) def __get__(self, *args, **kwargs): """Return the media files when accessed as an attribute. This is called when accessing the attribute directly through the Media class (for example, ``Media.css``). It returns the media files directly. Args: *args (tuple, unused): Unused positional arguments. **kwargs (dict, unused): Unused keyword arguments. Returns: object: The list or dictionary containing the media files definition. """ return self._media_files def __getattr__(self, attr_name): """Return an attribute on the media files definition. This is called when accessing an attribute that doesn't otherwise exist in the property's dictionary. The call is forwarded onto the media files definition. Args: attr_name (unicode): The attribute name. Returns: object: The attribute value. Raises: AttributeError: An attribute with this name could not be found. """ return getattr(self._media_files, attr_name) def __iter__(self): """Iterate through the media files definition. This is called when attempting to iterate over this property. It iterates over the media files definition instead. Yields: object: Each entry in the media files definition. """ return iter(self._media_files) class PipelineFormMediaMetaClass(type): """Metaclass for the PipelineFormMedia class. This is responsible for converting CSS/JavaScript packages defined in Pipeline into lists of files to include on a page. It handles access to the :py:attr:`css` and :py:attr:`js` attributes on the class, generating a list of files to return based on the Pipelined packages and individual files listed in the :py:attr:`css`/:py:attr:`css_packages` or :py:attr:`js`/:py:attr:`js_packages` attributes. """ def __new__(cls, name, bases, attrs): """Construct the class. Args: name (bytes): The name of the class. bases (tuple): The base classes for the class. attrs (dict): The attributes going into the class. Returns: type: The new class. """ new_class = super(PipelineFormMediaMetaClass, cls).__new__( cls, name, bases, attrs) # If we define any packages, we'll need to use our special # PipelineFormMediaProperty class. We use this instead of intercepting # in __getattribute__ because Django does not access them through # normal property access. Instead, grabs the Media class's __dict__ # and accesses them from there. By using these special properties, we # can handle direct access (Media.css) and dictionary-based access # (Media.__dict__['css']). if 'css_packages' in attrs: new_class.css = PipelineFormMediaProperty( cls._get_css_files, new_class, attrs.get('css') or {}) if 'js_packages' in attrs: new_class.js = PipelineFormMediaProperty( cls._get_js_files, new_class, attrs.get('js') or []) return new_class def _get_css_files(cls, extra_files): """Return all CSS files from the Media class. Args: extra_files (dict): The contents of the Media class's original :py:attr:`css` attribute, if one was provided. Returns: dict: The CSS media types and files to return for the :py:attr:`css` attribute. """ packager = Packager() css_packages = getattr(cls, 'css_packages', {}) return dict( (media_target, cls._get_media_files(packager=packager, media_packages=media_packages, media_type='css', extra_files=extra_files.get(media_target, []))) for media_target, media_packages in iteritems(css_packages) ) def _get_js_files(cls, extra_files): """Return all JavaScript files from the Media class. Args: extra_files (list): The contents of the Media class's original :py:attr:`js` attribute, if one was provided. Returns: list: The JavaScript files to return for the :py:attr:`js` attribute. """ return cls._get_media_files( packager=Packager(), media_packages=getattr(cls, 'js_packages', {}), media_type='js', extra_files=extra_files) def _get_media_files(cls, packager, media_packages, media_type, extra_files): """Return source or output media files for a list of packages. This will go through the media files belonging to the provided list of packages referenced in a Media class and return the output files (if Pipeline is enabled) or the source files (if not enabled). Args: packager (pipeline.packager.Packager): The packager responsible for media compilation for this type of package. media_packages (list of unicode): The list of media packages referenced in Media to compile or return. extra_files (list of unicode): The list of extra files to include in the result. This would be the list stored in the Media class's original :py:attr:`css` or :py:attr:`js` attributes. Returns: list: The list of media files for the given packages. """ source_files = list(extra_files) if (not settings.PIPELINE_ENABLED and settings.PIPELINE_COLLECTOR_ENABLED): default_collector.collect() for media_package in media_packages: package = packager.package_for(media_type, media_package) if settings.PIPELINE_ENABLED: source_files.append( staticfiles_storage.url(package.output_filename)) else: source_files += packager.compile(package.paths) return source_files @add_metaclass(PipelineFormMediaMetaClass) class PipelineFormMedia(object): """Base class for form or widget Media classes that use Pipeline packages. Forms or widgets that need custom CSS or JavaScript media on a page can define a standard :py:class:`Media` class that subclasses this class, listing the CSS or JavaScript packages in :py:attr:`css_packages` and :py:attr:`js_packages` attributes. These are formatted the same as the standard :py:attr:`css` and :py:attr:`js` attributes, but reference Pipeline package names instead of individual source files. If Pipeline is enabled, these will expand to the output files for the packages. Otherwise, these will expand to the list of source files for the packages. Subclasses can also continue to define :py:attr:`css` and :py:attr:`js` attributes, which will be returned along with the other output/source files. Example: from django import forms from pipeline.forms import PipelineFormMedia class MyForm(forms.Media): ... class Media(PipelineFormMedia): css_packages = { 'all': ('my-form-styles-package', 'other-form-styles-package'), 'print': ('my-form-print-styles-package',), } js_packages = ('my-form-scripts-package',) js = ('some-file.js',) """ ``` #### File: tests/tests/test_compiler.py ```python from __future__ import unicode_literals import sys from unittest import skipIf, skipUnless from django.conf import settings from django.contrib.staticfiles.storage import staticfiles_storage from django.test import TestCase from django.test.client import RequestFactory from django.utils.encoding import smart_bytes from pipeline.collector import default_collector from pipeline.compilers import Compiler, CompilerBase, SubProcessCompiler from pipeline.exceptions import CompilerError from pipeline.utils import to_class from tests.utils import _, pipeline_settings class FailingCompiler(SubProcessCompiler): output_extension = 'junk' def match_file(self, path): return path.endswith('.coffee') def compile_file(self, infile, outfile, outdated=False, force=False): command = (("/usr/bin/env", "false",),) return self.execute_command(command) class InvalidCompiler(SubProcessCompiler): output_extension = 'junk' def match_file(self, path): return path.endswith('.coffee') def compile_file(self, infile, outfile, outdated=False, force=False): command = ( ("this-exists-nowhere-as-a-command-and-should-fail",), infile, outfile ) return self.execute_command(command) class CompilerWithEmptyFirstArg(SubProcessCompiler): output_extension = 'junk' def match_file(self, path): return path.endswith('.coffee') def compile_file(self, infile, outfile, outdated=False, force=False): command = ( ('', '/usr/bin/env', 'cat'), infile, ) return self.execute_command(command, stdout_captured=outfile) class CopyingCompiler(SubProcessCompiler): output_extension = 'junk' def match_file(self, path): return path.endswith('.coffee') def compile_file(self, infile, outfile, outdated=False, force=False): command = ( "cp", infile, outfile ) return self.execute_command(command) class LineNumberingCompiler(SubProcessCompiler): output_extension = 'junk' def match_file(self, path): return path.endswith('.coffee') def compile_file(self, infile, outfile, outdated=False, force=False): command = (("/usr/bin/env", "cat"), ("-n",), infile,) return self.execute_command(command, stdout_captured=outfile) class DummyCompiler(CompilerBase): output_extension = 'js' def match_file(self, path): return path.endswith('.coffee') def compile_file(self, infile, outfile, outdated=False, force=False): return @pipeline_settings(COMPILERS=['tests.tests.test_compiler.DummyCompiler']) class DummyCompilerTest(TestCase): def setUp(self): default_collector.collect() self.compiler = Compiler() def test_output_path(self): compiler_class = self.compiler.compilers[0] compiler = compiler_class(verbose=self.compiler.verbose, storage=self.compiler.storage) output_path = compiler.output_path("js/helpers.coffee", "js") self.assertEqual(output_path, "js/helpers.js") def test_compilers_class(self): compilers_class = self.compiler.compilers self.assertEqual(compilers_class[0], DummyCompiler) def test_compile(self): paths = self.compiler.compile([ _('pipeline/js/dummy.coffee'), _('pipeline/js/application.js'), ]) self.assertEqual([_('pipeline/js/dummy.js'), _('pipeline/js/application.js')], list(paths)) def tearDown(self): default_collector.clear() @skipIf(sys.platform.startswith("win"), "requires posix platform") @pipeline_settings(COMPILERS=['tests.tests.test_compiler.LineNumberingCompiler']) class CompilerStdoutTest(TestCase): def setUp(self): default_collector.collect() self.compiler = Compiler() def test_output_path(self): compiler_class = self.compiler.compilers[0] compiler = compiler_class(verbose=self.compiler.verbose, storage=self.compiler.storage) output_path = compiler.output_path("js/helpers.coffee", "js") self.assertEqual(output_path, "js/helpers.js") def test_compile(self): paths = self.compiler.compile([_('pipeline/js/dummy.coffee')]) self.assertEqual([_('pipeline/js/dummy.junk')], list(paths)) def tearDown(self): default_collector.clear() @skipIf(sys.platform.startswith("win"), "requires posix platform") @pipeline_settings(COMPILERS=['tests.tests.test_compiler.CopyingCompiler']) class CompilerSelfWriterTest(TestCase): def setUp(self): default_collector.collect() self.compiler = Compiler() def test_output_path(self): compiler_class = self.compiler.compilers[0] compiler = compiler_class(verbose=self.compiler.verbose, storage=self.compiler.storage) output_path = compiler.output_path("js/helpers.coffee", "js") self.assertEqual(output_path, "js/helpers.js") def test_compile(self): paths = self.compiler.compile([_('pipeline/js/dummy.coffee')]) default_collector.collect() self.assertEqual([_('pipeline/js/dummy.junk')], list(paths)) def tearDown(self): default_collector.clear() @pipeline_settings(COMPILERS=['tests.tests.test_compiler.CompilerWithEmptyFirstArg']) class CompilerWithEmptyFirstArgTest(TestCase): def setUp(self): default_collector.collect() self.compiler = Compiler() def test_compile(self): paths = self.compiler.compile([_('pipeline/js/dummy.coffee')]) default_collector.collect() self.assertEqual([_('pipeline/js/dummy.junk')], list(paths)) def tearDown(self): default_collector.clear() @pipeline_settings(COMPILERS=['tests.tests.test_compiler.InvalidCompiler']) class InvalidCompilerTest(TestCase): def setUp(self): default_collector.collect() self.compiler = Compiler() def test_compile(self): with self.assertRaises(CompilerError) as cm: self.compiler.compile([_('pipeline/js/dummy.coffee')]) e = cm.exception self.assertEqual( e.command, ['this-exists-nowhere-as-a-command-and-should-fail', 'pipeline/js/dummy.coffee', 'pipeline/js/dummy.junk']) self.assertEqual(e.error_output, '') def tearDown(self): default_collector.clear() @skipIf(sys.platform.startswith("win"), "requires posix platform") @pipeline_settings(COMPILERS=['tests.tests.test_compiler.FailingCompiler']) class FailingCompilerTest(TestCase): def setUp(self): default_collector.collect() self.compiler = Compiler() def test_compile(self): with self.assertRaises(CompilerError) as cm: self.compiler.compile([_('pipeline/js/dummy.coffee')]) e = cm.exception self.assertEqual(e.command, ['/usr/bin/env', 'false']) self.assertEqual(e.error_output, '') def tearDown(self): default_collector.clear() @skipUnless(settings.HAS_NODE, "requires node") class CompilerImplementation(TestCase): def setUp(self): self.compiler = Compiler() default_collector.collect(RequestFactory().get('/')) def tearDown(self): default_collector.clear() def _test_compiler(self, compiler_cls_str, infile, expected): compiler_cls = to_class(compiler_cls_str) compiler = compiler_cls(verbose=False, storage=staticfiles_storage) infile_path = staticfiles_storage.path(infile) outfile_path = compiler.output_path(infile_path, compiler.output_extension) compiler.compile_file(_(infile_path), _(outfile_path), force=True) with open(outfile_path) as f: result = f.read() with staticfiles_storage.open(expected) as f: expected = f.read() self.assertEqual(smart_bytes(result), expected) def test_sass(self): self._test_compiler('pipeline.compilers.sass.SASSCompiler', 'pipeline/compilers/scss/input.scss', 'pipeline/compilers/scss/expected.css') def test_coffeescript(self): self._test_compiler('pipeline.compilers.coffee.CoffeeScriptCompiler', 'pipeline/compilers/coffee/input.coffee', 'pipeline/compilers/coffee/expected.js') def test_less(self): self._test_compiler('pipeline.compilers.less.LessCompiler', 'pipeline/compilers/less/input.less', 'pipeline/compilers/less/expected.css') def test_es6(self): self._test_compiler('pipeline.compilers.es6.ES6Compiler', 'pipeline/compilers/es6/input.es6', 'pipeline/compilers/es6/expected.js') def test_stylus(self): self._test_compiler('pipeline.compilers.stylus.StylusCompiler', 'pipeline/compilers/stylus/input.styl', 'pipeline/compilers/stylus/expected.css') def test_livescript(self): self._test_compiler('pipeline.compilers.livescript.LiveScriptCompiler', 'pipeline/compilers/livescript/input.ls', 'pipeline/compilers/livescript/expected.js') ```
{ "source": "jnisarg/django-scraper", "score": 3 }
#### File: management/commands/get_or_create_superuser.py ```python from django.core.management.base import BaseCommand from django.contrib.auth import get_user_model User = get_user_model() class Command(BaseCommand): def handle(self, *args, **options): self.stdout.write("Checking if superuser exists...") try: admin = User.objects.get(username="admin") if admin.is_superuser: # admin.set_password("<PASSWORD>") # admin.save() self.stdout.write("Superuser found...") else: admin.delete() except User.DoesNotExist: admin = User.objects.create_superuser(username="admin", password="<PASSWORD>") self.stdout.write("Superuser created...") ``` #### File: app/core/views.py ```python from django.http import HttpResponse from django.shortcuts import render from django.core.paginator import Paginator from .models import NewsItem from .tasks import scrape_dev_to_task, scrape_hacker_news_task def news_list_ordering(request, qs): source = request.GET.get("source", None) author = request.GET.get("author", None) title = request.GET.get("title", None) date = request.GET.get("date", None) if source: qs = qs.order_by( "-source") if source == "desc" else qs.order_by("source") elif author: qs = qs.order_by( "-author") if author == "desc" else qs.order_by("author") elif title: qs = qs.order_by( "-title") if title == "desc" else qs.order_by("title") elif date: qs = qs.order_by( "-publish_date") if date == "desc" else qs.order_by("publish_date") else: qs = qs.order_by("-created") return qs def news_list_view(request): query = request.GET.get("q", None) page_number = request.GET.get("page") articles = NewsItem.objects.all() if not articles.count(): scrape_hacker_news_task.delay() scrape_dev_to_task.delay() if query: articles = articles.search(query) articles = news_list_ordering(request, articles) paginator = Paginator(articles, 15) context = { "object_list": paginator.get_page(page_number), "total_count": articles.count(), } return render(request, "news_list.html", context) ```
{ "source": "jnishii/midi-roll", "score": 3 }
#### File: midi-roll/midiroll/roll.py ```python import streamlit as st import librosa import librosa.display import os import mido import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.colors import colorConverter import plotly.graph_objects as go import plotly.express as px from turtle import bgcolor import re from pathlib import Path class MidiFile(mido.MidiFile): def __init__(self, midifile, verbose=False): self.sr = 10 # down sampling rate from MIDI to time axis self.meta = {} self.max_nch = 16 mido.MidiFile.__init__(self, midifile) self.fpath = Path(midifile) self.events, self.nch = self.get_events(verbose) self.roll, self.note_range, self.intensity_range = self.get_roll(self.events) self.length_ticks = self.get_total_ticks() self.length_seconds = mido.tick2second( self.length_ticks, self.ticks_per_beat, self.get_tempo()) self.ticks_per_sec = self.length_ticks/self.length_seconds # miditicks/sec self.xticks_per_sec = self.ticks_per_sec / self.sr st.sidebar.write('## midi file') st.sidebar.write("Num. of tracks: ", len(self.tracks)) st.sidebar.write("Num. of active channels: ", self.nch) st.sidebar.write("Intensity range [0, 100]: [{}, {}]".format( self.intensity_range[0], self.intensity_range[1])) st.sidebar.write("Note range [0, 127]: [{}, {}]".format( self.note_range[0], self.note_range[1])) st.sidebar.write("ticks/beat: ", self.ticks_per_beat) st.sidebar.write("ticks/second: ", self.ticks_per_sec) st.sidebar.write("Tick length: [ticks]", self.length_ticks) st.sidebar.write("Time length [s]: ", self.length_seconds) @st.cache def get_tempo(self): try: return self.meta["set_tempo"]["tempo"] except: return 500000 @st.cache def get_total_ticks(self): max_ticks = 0 for channel in range(self.nch): ticks = sum(msg.time for msg in self.events[channel]) if ticks > max_ticks: max_ticks = ticks return max_ticks @st.cache def get_events(self, verbose=False): """ Extract self.max_nch (default: 16) channel data from MIDI and return a list. Lyrics and meta data used in extra channels are not include in the list. Returns: list : [[ch1],[ch2]....[ch16]] # Note that empty channel is removed! """ if verbose: print(self) mid = self events = [[] for i in range(self.max_nch)] for track in mid.tracks: for msg in track: try: channel = msg.channel events[channel].append(msg) except AttributeError: try: if type(msg) != type(mido.UnknownMetaMessage): self.meta[msg.type] = msg.dict() else: pass except: print("error", type(msg)) events = list(filter(None, events)) # remove emtpy channel return events, len(events) @st.cache def get_roll(self, events, verbose=False): """ Convert event (channel) data to piano roll data """ intensity_range = [100,0] # [min, max] adjusted by get_roll() note_range = [127,0] # [min, max] adjusted by get_roll() length_ticks = self.get_total_ticks() # get total length in tick unit roll = np.zeros( (self.nch, 128, length_ticks // self.sr), dtype="int8") register_note = [int(-1)]*128 # register the state (on/off) of each key register_timbre = np.ones(self.nch) # register the state (program_change) of each channel for idx, channel in enumerate(events): time_counter = 0 volume = 100 if verbose: print("channel", idx, "start") for msg in channel: if msg.type == "control_change": if msg.is_cc(7): # if msg.control == 7: Main Volume volume = 100*msg.value //127 # [0, 100] if msg.is_cc(11): # if msg.control == 11: Expression Controller # volume[0,100] x expression[0,127]/127 volume *= msg.value // 127 if msg.type == "program_change": register_timbre[idx] = msg.program if verbose: print("channel", idx, "pc", msg.program, "time", time_counter, "duration", msg.time) if msg.type == "note_on": if verbose: print("on ", msg.note, "time", time_counter, "duration", msg.time, "velocity", msg.velocity) # note_on_start_time = time_counter // self.sr note_on_end_time = (time_counter + msg.time) // self.sr intensity = volume * msg.velocity // 127 if intensity_range[0] > intensity: # update minimum intensity intensity_range[0] = intensity if intensity_range[1] < intensity: # update maximum intensity intensity_range[1] = intensity if register_note[msg.note] != -1: # not after note_off last_end_time, last_intensity = register_note[msg.note] roll[idx, msg.note, last_end_time:note_on_end_time] = last_intensity register_note[msg.note] = (note_on_end_time, intensity) if note_range[0] > msg.note: # update minimum note note_range[0] = msg.note if note_range[1] < msg.note: # update maximum note note_range[1] = msg.note if msg.type == "note_off": if verbose: print("off", msg.note, "time", time_counter, "duration", msg.time, "velocity", msg.velocity) # note_off_start_time = time_counter // self.sr note_off_end_time = (time_counter + msg.time) // self.sr last_end_time, last_intensity = register_note[msg.note] roll[idx, msg.note, last_end_time:note_off_end_time] = last_intensity register_note[msg.note] = -1 # reinitialize register time_counter += msg.time # if there is a note not closed at the end of a channel, close it for key, data in enumerate(register_note): if data != -1: note_on_end_time = data[0] intensity = data[1] # note_off_start_time = time_counter // self.sr roll[idx, key, note_on_end_time:] = intensity register_note[idx] = -1 return roll, note_range, intensity_range def _grp_init(self, figsize=(15, 9), xlim=None, ylim=None, bgcolor='white'): """ Display basic information and initialize graphics. Called by draw_roll() """ dsp_len_seconds = xlim[1]-xlim[0] # x ticks xticks_interval_sec = dsp_len_seconds // 10 if dsp_len_seconds > 10 else dsp_len_seconds/10 xticks_interval = xticks_interval_sec * self.xticks_per_sec #xticks_interval = mido.second2tick( # xticks_interval_sec, self.ticks_per_beat, self.get_tempo()) / self.sr # [ticks/interval] #print("xticks_interval_sec: ", xticks_interval_sec) #print("xticks_interval: {} [ticks/label]".format(xticks_interval)) # Initialize graphics plt.rcParams["font.size"] = 20 fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) ax.axis("equal") ax.set_facecolor(bgcolor) nxticks = int(self.length_ticks//xticks_interval) plt.xticks( [int(x * xticks_interval) for x in range(nxticks)], [round(x * xticks_interval_sec, 2) for x in range(nxticks)] ) plt.yticks([y*12 for y in range(9)], [y*12 for y in range(9)]) ax.set_xlabel("time [s]") ax.set_ylabel("note") xlim_ticks=[0,self.length_ticks-1] if xlim != None: #xticks_per_sec = xticks_interval/xticks_interval_sec xlim_ticks=np.array(xlim)*self.xticks_per_sec ax.set_xlim(xlim_ticks) if ylim == None: ylim=[0, 127] elif ylim == "Auto" or ylim == "auto": ylim=[self.note_range[0]-1, self.note_range[1]+1] ax.set_ylim(ylim) ax.set_xlabel("time [s]") ax.set_ylabel("note") return fig, ax, xlim_ticks def get_colormap_selector(self, cmap_name=None, bgcolor='white'): """ Define color map for each channel """ cmap_list=(None,'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds', 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn') try: default_idx=cmap_list.index(cmap_name) except ValueError: default_idx=None cmap_name=st.sidebar.selectbox('colormap', cmap_list, index=default_idx) if cmap_name==None: transparent = colorConverter.to_rgba(bgcolor) colors = [ mpl.colors.to_rgba(mpl.colors.hsv_to_rgb( (i / self.nch, 1, 1)), alpha=1) for i in range(self.nch) ] cmaps = [ mpl.colors.LinearSegmentedColormap.from_list( 'my_cmap', [transparent, colors[i]], 128) for i in range(self.nch) ] else: cmap = plt.cm.get_cmap(cmap_name) cmaps = [ cmap for i in range(self.nch) ] """ make look up table (LUT) data, e.g., (K=3) array([[0. , 0. , 0. , 0. ], [0.5, 0. , 0. , 0.2], [1. , 0. , 0. , 0.4], [0. , 0. , 0. , 0.6], [1. , 0. , 0. , 0.8], [0. , 0. , 0. , 1. ]]) The first 3 rows are colormap, and the last 3 rows are the colours for data low and high out-of-range values and for masked values. https://stackoverflow.com/questions/18035411/meaning-of-the-colormap-lut-list-in-matplotlib-color """ for i in range(self.nch): cmaps[i]._init() alphas = np.linspace(0, 1, cmaps[i].N + 3) # about 3 extra rows, see the example above cmaps[i]._lut[:, -1] = alphas return cmaps def get_bgcolor_slider(self, bgcolor='white'): bgcolors=('white','black') default_idx=bgcolors.index(bgcolor) bgcolor=st.sidebar.selectbox('background color', bgcolors, index=default_idx) return bgcolor def get_xlim_slider(self,xlim): if xlim == None: xlim = [0, int(self.length_seconds)] xlim = st.sidebar.slider('Time range [s]: ', min_value=0, max_value=int( self.length_seconds), value=(xlim[0], xlim[1])) return xlim def draw_roll(self, figsize=(15, 9), xlim=None, ylim=None, cmaps=None, bgcolor='white', vlines=None, hlines=False, colorbar=False): """Create piano roll image. Args: figsize (tuple or list): figure size xlim: Time range to be displayed [s] None (not specified) : Full range tuple or list : (xmin, xmax) [s] ylim: Range of notes to be displayed in vertical axis None (not specified) : Full range of notes "Auto" or "auto" : automatic range adjustment tuple or list : range of notes to be displayed [s] bgcolor (string): name of background color colorbar (boolean): enable colorbar of intensity """ if xlim == None: xlim = [0,int(self.length_seconds)] fig, ax1, xlim_ticks = self._grp_init( figsize=figsize, xlim=xlim, ylim=ylim, bgcolor=bgcolor) if cmaps == None: self.get_colormap_selector('Purple') for i in range(self.nch): try: target_roll = self.roll[i, :, :int(xlim_ticks[1])] #target_roll = self.roll[i, :, :] max_intensity = np.max(np.array(target_roll)) #print("max_intensity:", max_intensity) im = ax1.imshow(self.roll[i], origin="lower", interpolation='nearest', cmap=cmaps[i], aspect='auto', clim=[0, max_intensity]) if hlines != False: ax1.hlines([12*(i+1) for i in range(9)], xlim_ticks[0], xlim_ticks[1], colors='b', linewidth=1, linestyles='dotted') if vlines != None: ax1.vlines(np.array(vlines)*self.xticks_per_sec, ylim[0], ylim[1], color='r', linewidth=1, colors='b') if colorbar: fig.colorbar(im) except IndexError: pass # draw color bar for channel color # if colorbar: # cmap = mpl.colors.LinearSegmentedColormap.from_list( # 'my_cmap', colors, self.nch) # ax2 = fig.add_axes([0.1, 0.9, 0.8, 0.05]) # cbar = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, # orientation='horizontal', # ticks=list(range(self.nch))) #ax1.set_title(self.fpath.name) #plt.draw() #plt.ion() with st.container(): st.pyplot(fig) plt.savefig("outputs/"+self.fpath.name+".png", bbox_inches="tight") #plt.show(block=True) def get_dirs(folder_path): dirs = [ f for f in os.listdir(folder_path) if os.path.isdir(folder_path+"/"+f) ] return (sorted(dirs)) # def file_selector(folder_path): # filenames = os.listdir(folder_path) # selected_filename = st.selectbox('Select a file', filenames) # return os.path.join(folder_path, selected_filename) def show_wav(file): wav,sr = librosa.load(file) wav_seconds=int(len(wav)/sr) st.sidebar.write('## audio file') st.sidebar.write('sampling rate [Hz]: ', sr) st.audio(file) def main(): dir = "data/pedb2_v0.0.1.b/" #target="bac-inv001-o-p2" target = "bac-wtc101-p-a-p1" st.set_page_config(layout='wide') dirs = get_dirs(dir) target = st.sidebar.selectbox('Select file to visualize', dirs) st.write(target) path_wav = "{0}/{1}/{1}.wav".format(dir, target) show_wav(path_wav) path_mid = "{0}/{1}/{1}.mid".format(dir, target) mid = MidiFile(path_mid) path_pdf = "{0}/{1}/{1}.pdf".format(dir, target) st.sidebar.write('[PDF]({})'.format(path_pdf)) # events = mid.get_events() # roll = mid.get_roll(verbose=False) st.sidebar.write("## parameters") # cmap_list: colormap name # https://matplotlib.org/stable/tutorials/colors/colormaps.html bgcolor = mid.get_bgcolor_slider(bgcolor='white') cmaps = mid.get_colormap_selector(cmap_name='Purples',bgcolor=bgcolor) hlines=st.sidebar.checkbox('Draw lines on C') xlim=[0,4] xlim=mid.get_xlim_slider(xlim) params={ 'figsize': (20, 4), 'ylim': [30,92], 'cmaps': cmaps, 'bgcolor': bgcolor, 'vlines': xlim, 'hlines': hlines, 'colorbar': None } mid.draw_roll(xlim=None, **params) mid.draw_roll(xlim=xlim, **params) st.sidebar.write("## MIDI database") st.sidebar.write("[PEDB: Music Performance Expression with Phrase Structure](https://crestmuse.jp/pedb_edition2/)") if __name__ == "__main__": main() ```