input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
**kwargs):
requires_backends(self, ["torch"])
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CamembertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CanineForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CaninePreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_canine(*args, **kwargs):
requires_backends(load_tf_weights_in_canine, ["torch"])
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CLIPModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ConvBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_convbert(*args, **kwargs):
requires_backends(load_tf_weights_in_convbert, ["torch"])
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ConvNextForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvNextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvNextPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CTRLForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaV2ForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DeiTForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTForImageClassificationWithTeacher(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DistilBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DPRContextEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedContextEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedQuestionEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRQuestionEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ElectraForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_electra(*args, **kwargs):
requires_backends(load_tf_weights_in_electra, ["torch"])
class EncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FlaubertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertWithLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FNetForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FSMTForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FSMTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PretrainedFSMTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FunnelBaseModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_funnel(*args, **kwargs):
requires_backends(load_tf_weights_in_funnel, ["torch"])
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPT2DoubleHeadsModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2LMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_gpt2(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt2, ["torch"])
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTNeoForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_gpt_neo(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt_neo, ["torch"])
GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTJForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class HubertForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = | |
#!/usr/bin/env python
'''
SPARTA - Network Infrastructure Penetration Testing Tool (http://sparta.secforce.com)
Copyright (c) 2015 SECFORCE (<NAME> and <NAME>)
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys, os, ntpath, signal, re, subprocess # for file operations, to kill processes, for regex, for subprocesses
import Queue
from PyQt4.QtGui import * # for filters dialog
from app.logic import *
from app.auxiliary import *
from app.settings import *
class Controller():
# initialisations that will happen once - when the program is launched
def __init__(self, view, logic):
self.version = 'SPARTA 1.0.3 (BETA)' # update this everytime you commit!
self.logic = logic
self.view = view
self.view.setController(self)
self.loadSettings() # creation of context menu actions from settings file and set up of various settings
self.initNmapImporter()
self.initScreenshooter()
self.initBrowserOpener()
self.start() # initialisations (globals, etc)
self.initTimers()
# initialisations that will happen everytime we create/open a project - can happen several times in the program's lifetime
def start(self, title='*untitled'):
self.processes = [] # to store all the processes we run (nmaps, niktos, etc)
self.fastProcessQueue = Queue.Queue() # to manage fast processes (banner, snmpenum, etc)
#self.slowProcessQueue = Queue.Queue() # to manage slow processes (dirbuster, hydra, etc)
self.fastProcessesRunning = 0 # counts the number of fast processes currently running
self.slowProcessesRunning = 0 # counts the number of slow processes currently running
self.nmapImporter.setDB(self.logic.db) # tell nmap importer which db to use
self.updateOutputFolder() # tell screenshooter where the output folder is
self.view.start(title)
def initNmapImporter(self):
self.nmapImporter = NmapImporter()
self.nmapImporter.tick.connect(self.view.importProgressWidget.setProgress) # update the progress bar
self.nmapImporter.done.connect(self.nmapImportFinished)
self.nmapImporter.schedule.connect(self.scheduler) # run automated attacks
def initScreenshooter(self):
self.screenshooter = Screenshooter(self.settings.general_screenshooter_timeout) # screenshot taker object (different thread)
self.screenshooter.done.connect(self.screenshotFinished)
def initBrowserOpener(self):
self.browser = BrowserOpener() # browser opener object (different thread)
def initTimers(self): # these timers are used to prevent from updating the UI several times within a short time period - which freezes the UI
self.updateUITimer = QTimer()
self.updateUITimer.setSingleShot(True)
self.updateUITimer.timeout.connect(self.view.updateProcessesTableView)
self.updateUITimer.timeout.connect(self.view.updateToolsTableView)
self.updateUI2Timer = QTimer()
self.updateUI2Timer.setSingleShot(True)
self.updateUI2Timer.timeout.connect(self.view.updateInterface)
# this function fetches all the settings from the conf file. Among other things it populates the actions lists that will be used in the context menus.
def loadSettings(self):
self.settingsFile = AppSettings()
self.settings = Settings(self.settingsFile) # load settings from conf file (create conf file first if necessary)
self.originalSettings = Settings(self.settingsFile) # save the original state so that we can know if something has changed when we exit SPARTA
self.logic.setStoreWordlistsOnExit(self.settings.brute_store_cleartext_passwords_on_exit=='True')
self.view.settingsWidget.setSettings(Settings(self.settingsFile))
def applySettings(self, newSettings): # call this function when clicking 'apply' in the settings menu (after validation)
print '[+] Applying settings!'
self.settings = newSettings
def cancelSettings(self): # called when the user presses cancel in the Settings dialog
self.view.settingsWidget.setSettings(self.settings) # resets the dialog's settings to the current application settings to forget any changes made by the user
def saveSettings(self):
if not self.settings == self.originalSettings:
print '[+] Settings have been changed.'
self.settingsFile.backupAndSave(self.settings)
else:
print '[+] Settings have NOT been changed.'
def getSettings(self):
return self.settings
#################### AUXILIARY ####################
def getCWD(self):
return self.logic.cwd
def getProjectName(self):
return self.logic.projectname
def getVersion(self):
return self.version
def getRunningFolder(self):
return self.logic.runningfolder
def getOutputFolder(self):
return self.logic.outputfolder
def getUserlistPath(self):
return self.logic.usernamesWordlist.filename
def getPasslistPath(self):
return self.logic.passwordsWordlist.filename
def updateOutputFolder(self):
self.screenshooter.updateOutputFolder(self.logic.outputfolder+'/screenshots') # update screenshot folder
def copyNmapXMLToOutputFolder(self, filename):
self.logic.copyNmapXMLToOutputFolder(filename)
def isTempProject(self):
return self.logic.istemp
def getDB(self):
return self.logic.db
def getRunningProcesses(self):
return self.processes
def getHostActions(self):
return self.settings.hostActions
def getPortActions(self):
return self.settings.portActions
def getPortTerminalActions(self):
return self.settings.portTerminalActions
#################### ACTIONS ####################
def createNewProject(self):
self.view.closeProject() # removes temp folder (if any)
self.logic.createTemporaryFiles() # creates new temp files and folders
self.start() # initialisations (globals, etc)
def openExistingProject(self, filename):
self.view.closeProject()
self.view.importProgressWidget.reset('Opening project..')
self.view.importProgressWidget.show() # show the progress widget
self.logic.openExistingProject(filename)
self.start(ntpath.basename(str(self.logic.projectname))) # initialisations (globals, signals, etc)
self.view.restoreToolTabs() # restores the tool tabs for each host
self.view.hostTableClick() # click on first host to restore his host tool tabs
self.view.importProgressWidget.hide() # hide the progress widget
def saveProject(self, lastHostIdClicked, notes):
if not lastHostIdClicked == '':
self.logic.storeNotesInDB(lastHostIdClicked, notes)
def saveProjectAs(self, filename, replace=0):
success = self.logic.saveProjectAs(filename, replace)
if success:
self.nmapImporter.setDB(self.logic.db) # tell nmap importer which db to use
return success
def closeProject(self):
self.saveSettings() # backup and save config file, if necessary
self.screenshooter.terminate()
self.initScreenshooter()
self.logic.toggleProcessDisplayStatus(True)
self.view.updateProcessesTableView() # clear process table
self.logic.removeTemporaryFiles()
def addHosts(self, iprange, runHostDiscovery, runStagedNmap):
if iprange == '':
print '[-] No hosts entered..'
return
if runStagedNmap:
self.runStagedNmap(iprange, runHostDiscovery)
elif runHostDiscovery:
outputfile = self.logic.runningfolder+"/nmap/"+getTimestamp()+'-host-discover'
command = "nmap -n -sn -T4 "+iprange+" -oA "+outputfile
self.runCommand('nmap', 'nmap (discovery)', iprange, '','', command, getTimestamp(True), outputfile, self.view.createNewTabForHost(str(iprange), 'nmap (discovery)', True))
else:
outputfile = self.logic.runningfolder+"/nmap/"+getTimestamp()+'-nmap-list'
command = "nmap -n -sL "+iprange+" -oA "+outputfile
self.runCommand('nmap', 'nmap (list)', iprange, '','', command, getTimestamp(True), outputfile, self.view.createNewTabForHost(str(iprange), 'nmap (list)', True))
#################### CONTEXT MENUS ####################
def getContextMenuForHost(self, isChecked, showAll=True): # showAll exists because in some cases we only want to show host tools excluding portscans and 'mark as checked'
menu = QMenu()
self.nmapSubMenu = QMenu('Portscan')
actions = []
for a in self.settings.hostActions:
if "nmap" in a[1] or "unicornscan" in a[1]:
actions.append(self.nmapSubMenu.addAction(a[0]))
else:
actions.append(menu.addAction(a[0]))
if showAll:
actions.append(self.nmapSubMenu.addAction("Run nmap (staged)"))
menu.addMenu(self.nmapSubMenu)
menu.addSeparator()
if isChecked == 'True':
menu.addAction('Mark as unchecked')
else:
menu.addAction('Mark as checked')
return menu, actions
def handleHostAction(self, ip, hostid, actions, action):
if action.text() == 'Mark as checked' or action.text() == 'Mark as unchecked':
self.logic.toggleHostCheckStatus(ip)
self.view.updateInterface()
return
if action.text() == 'Run nmap (staged)':
print '[+] Purging previous portscan data for ' + str(ip) # if we are running nmap we need to purge previous portscan results
if self.logic.getPortsForHostFromDB(ip, 'tcp'):
self.logic.deleteAllPortsAndScriptsForHostFromDB(hostid, 'tcp')
if self.logic.getPortsForHostFromDB(ip, 'udp'):
self.logic.deleteAllPortsAndScriptsForHostFromDB(hostid, 'udp')
self.runStagedNmap(ip, False)
return
for i in range(0,len(actions)):
if action == actions[i]:
name = self.settings.hostActions[i][1]
invisibleTab = False
if 'nmap' in name: # to make sure different nmap scans appear under the same tool name
name = 'nmap'
invisibleTab = True
# remove all chars that are not alphanumeric from tool name (used in the outputfile's name)
outputfile = self.logic.runningfolder+"/"+re.sub("[^0-9a-zA-Z]", "", str(name))+"/"+getTimestamp()+"-"+re.sub("[^0-9a-zA-Z]", "", str(self.settings.hostActions[i][1]))+"-"+ip
command = str(self.settings.hostActions[i][2])
command = command.replace('[IP]', ip).replace('[OUTPUT]', outputfile)
# check if same type of nmap scan has already been made and purge results before scanning
if 'nmap' in command:
proto = 'tcp'
if '-sU' in command:
proto = 'udp'
if self.logic.getPortsForHostFromDB(ip, proto): # if we are running nmap we need to purge previous portscan results (of the same protocol)
self.logic.deleteAllPortsAndScriptsForHostFromDB(hostid, proto)
tabtitle = self.settings.hostActions[i][1]
self.runCommand(name, tabtitle, ip, '','', command, getTimestamp(True), outputfile, self.view.createNewTabForHost(ip, tabtitle, invisibleTab))
break
def getContextMenuForServiceName(self, serviceName='*', menu=None):
if menu == None: # if no menu was given, create a new one
menu = QMenu()
if serviceName == '*' or serviceName in self.settings.general_web_services.split(","):
menu.addAction("Open in browser")
menu.addAction("Take screenshot")
actions = []
for a in self.settings.portActions:
if serviceName is None or serviceName == '*' or serviceName in a[3].split(",") or a[3] == '': # if the service name exists in the portActions list show the command in the context menu
actions.append([self.settings.portActions.index(a), menu.addAction(a[0])]) # in actions list write the service and line number that corresponds to it in portActions
modifiers = QtGui.QApplication.keyboardModifiers() # if the user pressed SHIFT+Right-click show full menu
if modifiers == QtCore.Qt.ShiftModifier:
shiftPressed = True
else:
shiftPressed = False
return menu, actions, shiftPressed
def handleServiceNameAction(self, targets, actions, action, restoring=True):
if action.text() == 'Take screenshot':
for ip in targets:
url = ip[0]+':'+ip[1]
self.screenshooter.addToQueue(url)
self.screenshooter.start()
return
elif action.text() == 'Open in browser':
for ip in targets:
url = ip[0]+':'+ip[1]
self.browser.addToQueue(url)
self.browser.start()
return
for i in range(0,len(actions)):
if action == actions[i][1]:
srvc_num = actions[i][0]
for ip in targets:
tool = self.settings.portActions[srvc_num][1]
tabtitle = self.settings.portActions[srvc_num][1]+" ("+ip[1]+"/"+ip[2]+")"
outputfile = self.logic.runningfolder+"/"+re.sub("[^0-9a-zA-Z]", "", str(tool))+"/"+getTimestamp()+'-'+tool+"-"+ip[0]+"-"+ip[1]
command = str(self.settings.portActions[srvc_num][2])
command = command.replace('[IP]', ip[0]).replace('[PORT]', ip[1]).replace('[OUTPUT]', outputfile)
if 'nmap' in command and ip[2] == 'udp':
command=command.replace("-sV","-sVU")
if 'nmap' in tabtitle: # we don't want to show nmap tabs
restoring = True
self.runCommand(tool, tabtitle, ip[0], ip[1], ip[2], command, getTimestamp(True), outputfile, self.view.createNewTabForHost(ip[0], tabtitle, restoring))
break
def getContextMenuForPort(self, serviceName='*'):
menu = QMenu()
modifiers = QtGui.QApplication.keyboardModifiers() # if the user pressed SHIFT+Right-click show full menu
if modifiers == QtCore.Qt.ShiftModifier:
serviceName='*'
terminalActions = [] # custom terminal actions from settings file
for a in self.settings.portTerminalActions: # if wildcard or the command is valid for this specific service or if the command is valid for all services
if serviceName is None or serviceName == '*' or serviceName in a[3].split(",") or a[3] == '':
terminalActions.append([self.settings.portTerminalActions.index(a), menu.addAction(a[0])])
menu.addSeparator()
menu.addAction("Send to Brute")
menu.addSeparator()
# dummy is there because we don't need the third return value
menu, actions, dummy = self.getContextMenuForServiceName(serviceName, menu)
# menu.addSeparator()
# menu.addAction("Run custom command")
return menu, actions, terminalActions
def handlePortAction(self, targets, actions, terminalActions, action, restoring):
if action.text() == 'Send to Brute':
for ip in targets:
self.view.createNewBruteTab(ip[0], ip[1], ip[3]) # ip[0] is the IP, ip[1] is the port number and ip[3] is the service name
return
if action.text() == 'Run custom command':
print 'custom command'
return
terminal = self.settings.general_default_terminal # handle terminal actions
for i in range(0,len(terminalActions)):
if action == terminalActions[i][1]:
srvc_num = terminalActions[i][0]
for ip in targets:
command = str(self.settings.portTerminalActions[srvc_num][2])
command = command.replace('[IP]', ip[0]).replace('[PORT]', ip[1])
subprocess.Popen(terminal+" -e 'bash -c \""+command+"; exec bash\"'", shell=True)
return
self.handleServiceNameAction(targets, actions, action, restoring)
def getContextMenuForProcess(self):
menu = QMenu()
killAction = menu.addAction("Kill")
clearAction = menu.addAction("Clear")
return menu
def handleProcessAction(self, selectedProcesses, action): # selectedProcesses is a list of tuples (pid, status, procId)
if action.text() == 'Kill':
if self.view.killProcessConfirmation():
for p in selectedProcesses:
if p[1]!="Running":
if p[1]=="Waiting":
#print "\t[-] Process still waiting to start. Skipping."
if str(self.logic.getProcessStatusForDBId(p[2])) == 'Running':
self.killProcess(self.view.ProcessesTableModel.getProcessPidForId(p[2]), p[2])
self.logic.storeProcessCancelStatusInDB(str(p[2]))
else:
print "\t[-] This process has already been terminated. Skipping."
else:
self.killProcess(p[0], p[2])
self.view.updateProcessesTableView()
return
if action.text() == 'Clear': # hide all the processes that are not running
self.logic.toggleProcessDisplayStatus()
self.view.updateProcessesTableView()
#################### LEFT PANEL INTERFACE UPDATE FUNCTIONS ####################
def isHostInDB(self, host):
return self.logic.isHostInDB(host)
def getHostsFromDB(self, filters):
return self.logic.getHostsFromDB(filters)
def getServiceNamesFromDB(self, filters):
return self.logic.getServiceNamesFromDB(filters)
def getProcessStatusForDBId(self, dbId):
return self.logic.getProcessStatusForDBId(dbId)
def getPidForProcess(self,dbId):
return self.logic.getPidForProcess(dbId)
def storeCloseTabStatusInDB(self,pid):
return self.logic.storeCloseTabStatusInDB(pid)
def getServiceNameForHostAndPort(self, hostIP, port):
return self.logic.getServiceNameForHostAndPort(hostIP, port)
#################### RIGHT PANEL INTERFACE UPDATE FUNCTIONS ####################
def getPortsAndServicesForHostFromDB(self, hostIP, filters):
return self.logic.getPortsAndServicesForHostFromDB(hostIP, filters)
def getHostsAndPortsForServiceFromDB(self, serviceName, filters):
return self.logic.getHostsAndPortsForServiceFromDB(serviceName, filters)
def | |
<gh_stars>1-10
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import os
from argparse import Namespace
import pytest
from six import StringIO
import llnl.util.filesystem as fs
import llnl.util.link_tree
import spack.cmd.env
import spack.environment as ev
import spack.hash_types as ht
import spack.modules
import spack.util.spack_json as sjson
from spack.cmd.env import _env_create
from spack.main import SpackCommand, SpackCommandError
from spack.spec import Spec
from spack.stage import stage_prefix
from spack.util.mock_package import MockPackageMultiRepo
from spack.util.path import substitute_path_variables
# everything here uses the mock_env_path
pytestmark = [
pytest.mark.usefixtures('mutable_mock_env_path', 'config', 'mutable_mock_repo'),
pytest.mark.maybeslow
]
env = SpackCommand('env')
install = SpackCommand('install')
add = SpackCommand('add')
remove = SpackCommand('remove')
concretize = SpackCommand('concretize')
stage = SpackCommand('stage')
uninstall = SpackCommand('uninstall')
find = SpackCommand('find')
def check_mpileaks_and_deps_in_view(viewdir):
"""Check that the expected install directories exist."""
assert os.path.exists(str(viewdir.join('.spack', 'mpileaks')))
assert os.path.exists(str(viewdir.join('.spack', 'libdwarf')))
def check_viewdir_removal(viewdir):
"""Check that the uninstall/removal worked."""
assert (not os.path.exists(str(viewdir.join('.spack'))) or
os.listdir(str(viewdir.join('.spack'))) == ['projections.yaml'])
@pytest.fixture()
def env_deactivate():
yield
ev._active_environment = None
os.environ.pop('SPACK_ENV', None)
def test_add():
e = ev.create('test')
e.add('mpileaks')
assert Spec('mpileaks') in e.user_specs
def test_env_add_virtual():
env('create', 'test')
e = ev.read('test')
e.add('mpi')
e.concretize()
hashes = e.concretized_order
assert len(hashes) == 1
spec = e.specs_by_hash[hashes[0]]
assert spec.satisfies('mpi')
def test_env_add_nonexistant_fails():
env('create', 'test')
e = ev.read('test')
with pytest.raises(ev.SpackEnvironmentError, match=r'no such package'):
e.add('thispackagedoesnotexist')
def test_env_list(mutable_mock_env_path):
env('create', 'foo')
env('create', 'bar')
env('create', 'baz')
out = env('list')
assert 'foo' in out
assert 'bar' in out
assert 'baz' in out
# make sure `spack env list` skips invalid things in var/spack/env
mutable_mock_env_path.join('.DS_Store').ensure(file=True)
out = env('list')
assert 'foo' in out
assert 'bar' in out
assert 'baz' in out
assert '.DS_Store' not in out
def test_env_remove(capfd):
env('create', 'foo')
env('create', 'bar')
out = env('list')
assert 'foo' in out
assert 'bar' in out
foo = ev.read('foo')
with foo:
with pytest.raises(spack.main.SpackCommandError):
with capfd.disabled():
env('remove', '-y', 'foo')
assert 'foo' in env('list')
env('remove', '-y', 'foo')
out = env('list')
assert 'foo' not in out
assert 'bar' in out
env('remove', '-y', 'bar')
out = env('list')
assert 'foo' not in out
assert 'bar' not in out
def test_concretize():
e = ev.create('test')
e.add('mpileaks')
e.concretize()
env_specs = e._get_environment_specs()
assert any(x.name == 'mpileaks' for x in env_specs)
def test_env_uninstalled_specs(install_mockery, mock_fetch):
e = ev.create('test')
e.add('cmake-client')
e.concretize()
assert any(s.name == 'cmake-client' for s in e.uninstalled_specs())
e.install_all()
assert not any(s.name == 'cmake-client' for s in e.uninstalled_specs())
e.add('mpileaks')
e.concretize()
assert not any(s.name == 'cmake-client' for s in e.uninstalled_specs())
assert any(s.name == 'mpileaks' for s in e.uninstalled_specs())
def test_env_install_all(install_mockery, mock_fetch):
e = ev.create('test')
e.add('cmake-client')
e.concretize()
e.install_all()
env_specs = e._get_environment_specs()
spec = next(x for x in env_specs if x.name == 'cmake-client')
assert spec.package.installed
def test_env_install_single_spec(install_mockery, mock_fetch):
env('create', 'test')
install = SpackCommand('install')
e = ev.read('test')
with e:
install('cmake-client')
e = ev.read('test')
assert e.user_specs[0].name == 'cmake-client'
assert e.concretized_user_specs[0].name == 'cmake-client'
assert e.specs_by_hash[e.concretized_order[0]].name == 'cmake-client'
def test_env_roots_marked_explicit(install_mockery, mock_fetch):
install = SpackCommand('install')
install('dependent-install')
# Check one explicit, one implicit install
dependent = spack.store.db.query(explicit=True)
dependency = spack.store.db.query(explicit=False)
assert len(dependent) == 1
assert len(dependency) == 1
env('create', 'test')
with ev.read('test') as e:
# make implicit install a root of the env
e.add(dependency[0].name)
e.concretize()
e.install_all()
explicit = spack.store.db.query(explicit=True)
assert len(explicit) == 2
def test_env_modifications_error_on_activate(
install_mockery, mock_fetch, monkeypatch, capfd):
env('create', 'test')
install = SpackCommand('install')
e = ev.read('test')
with e:
install('cmake-client')
def setup_error(pkg, env):
raise RuntimeError("cmake-client had issues!")
pkg = spack.repo.path.get_pkg_class("cmake-client")
monkeypatch.setattr(pkg, "setup_run_environment", setup_error)
with e:
pass
_, err = capfd.readouterr()
assert "cmake-client had issues!" in err
assert "Warning: couldn't get environment settings" in err
def test_activate_adds_transitive_run_deps_to_path(
install_mockery, mock_fetch, monkeypatch):
env('create', 'test')
install = SpackCommand('install')
e = ev.read('test')
with e:
install('depends-on-run-env')
cmds = ev.activate(e)
assert 'DEPENDENCY_ENV_VAR=1' in cmds
def test_env_install_same_spec_twice(install_mockery, mock_fetch):
env('create', 'test')
e = ev.read('test')
with e:
# The first installation outputs the package prefix, updates the view
out = install('cmake-client')
assert 'Updating view at' in out
# The second installation reports all packages already installed
out = install('cmake-client')
assert 'already installed' in out
def test_env_install_two_specs_same_dep(
install_mockery, mock_fetch, tmpdir, capsys):
"""Test installation of two packages that share a dependency with no
connection and the second specifying the dependency as a 'build'
dependency.
"""
path = tmpdir.join('spack.yaml')
with tmpdir.as_cwd():
with open(str(path), 'w') as f:
f.write("""\
env:
specs:
- a
- depb
""")
env('create', 'test', 'spack.yaml')
with ev.read('test'):
with capsys.disabled():
out = install()
# Ensure both packages reach install phase processing and are installed
out = str(out)
assert 'depb: Executing phase:' in out
assert 'a: Executing phase:' in out
depb = spack.repo.path.get_pkg_class('depb')
assert depb.installed, 'Expected depb to be installed'
a = spack.repo.path.get_pkg_class('a')
assert a.installed, 'Expected a to be installed'
def test_remove_after_concretize():
e = ev.create('test')
e.add('mpileaks')
e.concretize()
e.add('python')
e.concretize()
e.remove('mpileaks')
assert Spec('mpileaks') not in e.user_specs
env_specs = e._get_environment_specs()
assert any(s.name == 'mpileaks' for s in env_specs)
e.add('mpileaks')
assert any(s.name == 'mpileaks' for s in e.user_specs)
e.remove('mpileaks', force=True)
assert Spec('mpileaks') not in e.user_specs
env_specs = e._get_environment_specs()
assert not any(s.name == 'mpileaks' for s in env_specs)
def test_remove_command():
env('create', 'test')
assert 'test' in env('list')
with ev.read('test'):
add('mpileaks')
assert 'mpileaks' in find()
assert 'mpileaks@' not in find()
assert 'mpileaks@' not in find('--show-concretized')
with ev.read('test'):
remove('mpileaks')
assert 'mpileaks' not in find()
assert 'mpileaks@' not in find()
assert 'mpileaks@' not in find('--show-concretized')
with ev.read('test'):
add('mpileaks')
assert 'mpileaks' in find()
assert 'mpileaks@' not in find()
assert 'mpileaks@' not in find('--show-concretized')
with ev.read('test'):
concretize()
assert 'mpileaks' in find()
assert 'mpileaks@' not in find()
assert 'mpileaks@' in find('--show-concretized')
with ev.read('test'):
remove('mpileaks')
assert 'mpileaks' not in find()
# removed but still in last concretized specs
assert 'mpileaks@' in find('--show-concretized')
with ev.read('test'):
concretize()
assert 'mpileaks' not in find()
assert 'mpileaks@' not in find()
# now the lockfile is regenerated and it's gone.
assert 'mpileaks@' not in find('--show-concretized')
def test_environment_status(capsys, tmpdir):
with tmpdir.as_cwd():
with capsys.disabled():
assert 'No active environment' in env('status')
with ev.create('test'):
with capsys.disabled():
assert 'In environment test' in env('status')
with ev.Environment('local_dir'):
with capsys.disabled():
assert os.path.join(os.getcwd(), 'local_dir') in env('status')
e = ev.Environment('myproject')
e.write()
with tmpdir.join('myproject').as_cwd():
with e:
with capsys.disabled():
assert 'in current directory' in env('status')
def test_env_status_broken_view(
mutable_mock_env_path, mock_archive, mock_fetch, mock_packages,
install_mockery
):
with ev.create('test'):
install('trivial-install-test-package')
# switch to a new repo that doesn't include the installed package
# test that Spack detects the missing package and warns the user
new_repo = MockPackageMultiRepo()
with spack.repo.use_repositories(new_repo):
output = env('status')
assert 'In environment test' in output
assert 'Environment test includes out of date' in output
# Test that the warning goes away when it's fixed
output = env('status')
assert 'In environment test' in output
assert 'Environment test includes out of date' not in output
def test_env_activate_broken_view(
mutable_mock_env_path, mock_archive, mock_fetch, mock_packages,
install_mockery
):
with ev.create('test'):
install('trivial-install-test-package')
# switch to a new repo that doesn't include the installed package
# test that Spack detects the missing package and fails gracefully
new_repo = MockPackageMultiRepo()
with spack.repo.use_repositories(new_repo):
with pytest.raises(SpackCommandError):
env('activate', '--sh', 'test')
# test replacing repo fixes it
env('activate', '--sh', 'test')
def test_to_lockfile_dict():
e = ev.create('test')
e.add('mpileaks')
e.concretize()
context_dict = e._to_lockfile_dict()
e_copy = ev.create('test_copy')
e_copy._read_lockfile_dict(context_dict)
assert e.specs_by_hash == e_copy.specs_by_hash
def test_env_repo():
e = ev.create('test')
e.add('mpileaks')
e.write()
with ev.read('test'):
concretize()
package = e.repo.get('mpileaks')
assert package.name == 'mpileaks'
assert package.namespace == 'builtin.mock'
def test_user_removed_spec():
"""Ensure a user can remove from any position in the spack.yaml file."""
initial_yaml = StringIO("""\
env:
specs:
- mpileaks
- hypre
- libelf
""")
before = ev.create('test', initial_yaml)
before.concretize()
before.write()
# user modifies yaml externally to spack and removes hypre
with open(before.manifest_path, 'w') as f:
f.write("""\
env:
specs:
- mpileaks
- libelf
""")
after = ev.read('test')
after.concretize()
after.write()
env_specs = after._get_environment_specs()
read = ev.read('test')
env_specs = read._get_environment_specs()
assert not any(x.name == 'hypre' for x in env_specs)
def test_init_from_lockfile(tmpdir):
"""Test that an environment can be instantiated from a lockfile."""
initial_yaml = StringIO("""\
env:
specs:
- mpileaks
- hypre
- libelf
""")
e1 = ev.create('test', initial_yaml)
e1.concretize()
e1.write()
e2 = ev.Environment(str(tmpdir), e1.lock_path)
for s1, s2 in zip(e1.user_specs, e2.user_specs):
assert s1 == s2
for h1, h2 in zip(e1.concretized_order, e2.concretized_order):
assert h1 == h2
assert e1.specs_by_hash[h1] == e2.specs_by_hash[h2]
for s1, s2 in zip(e1.concretized_user_specs, e2.concretized_user_specs):
assert s1 == s2
def test_init_from_yaml(tmpdir):
"""Test that an environment can be instantiated from a lockfile."""
initial_yaml = StringIO("""\
env:
specs:
- mpileaks
- hypre
- libelf
""")
e1 = ev.create('test', initial_yaml)
e1.concretize()
e1.write()
e2 = ev.Environment(str(tmpdir), e1.manifest_path)
for s1, s2 in zip(e1.user_specs, e2.user_specs):
assert s1 == s2
assert not e2.concretized_order
assert not e2.concretized_user_specs
assert not e2.specs_by_hash
@pytest.mark.usefixtures('config')
def test_env_view_external_prefix(
tmpdir_factory, mutable_database, mock_packages
):
fake_prefix = tmpdir_factory.mktemp('a-prefix')
fake_bin = fake_prefix.join('bin')
fake_bin.ensure(dir=True)
initial_yaml = StringIO("""\
env:
specs:
- a
view: true
""")
external_config = StringIO("""\
packages:
a:
externals:
- spec: [email protected]
prefix: | |
p4.p4_parse_state):
next_parse_state = ("parse_state", next_state.name)
elif isinstance(next_state, p4.p4_conditional_node):
next_parse_state = ("conditional_table",
get_conditional_node_name(next_state))
elif isinstance(next_state, p4.p4_table):
next_parse_state = ("table",
get_table_name(next_state))
else:
assert(False)
branch_to += [(case_type, value, mask, next_parse_state)]
parse_info["branch_to"] = branch_to
render_dict["parse_states"][name] = parse_info
def render_dict_populate_actions(render_dict, hlir):
render_dict["action_info"] = {}
field_info = render_dict["field_info"]
# these are the only actions for which we have to generate C code, since we
# are using the flat sequence of primitive calls
table_actions_set = set()
for _, table in hlir.p4_tables.items():
for action in table.actions: table_actions_set.add(action)
# we need to get the size of the match data
for action in table_actions_set:
a_info = {}
param_names = []
param_byte_widths = []
param_bit_widths = []
for param, width in zip(action.signature, action.signature_widths):
if not width :
print "unused parameter discarded"
continue
param_names += [param]
param_byte_widths += [max(bytes_round_up(width), 4)]
param_bit_widths += [width]
a_info["param_names"] = param_names
a_info["param_byte_widths"] = param_byte_widths
a_info["param_bit_widths"] = param_bit_widths
# find out which fields / instances we need to copy for parallel
# execution semantics
field_access = defaultdict(set)
for call in action.flat_call_sequence:
primitive = call[0]
for index, arg in enumerate(call[1]):
if type(arg) is p4.p4_field or\
type(arg) is p4.p4_header_instance:
sig_arg_name = primitive.signature[index]
flags = primitive.signature_flags[sig_arg_name]
if "access" not in flags:
field_access[arg].add(p4.P4_WRITE)
field_access[arg].add(p4.P4_READ)
else:
access = flags["access"]
field_access[arg].add(access)
field_copies = set()
header_copies = set()
for arg, access in field_access.items():
if len(access) > 1: # read and write
if type(arg) is p4.p4_field:
field_copies.add(get_field_instance_name(arg))
else:
header_copies.add(get_header_instance_name(arg))
a_info["field_copies"] = field_copies
a_info["header_copies"] = header_copies
call_sequence = []
for call in action.flat_call_sequence:
primitive_name = get_action_name(call[0]).upper()
primitive_args = []
# is this outdated, do I really need to have the immedaite value be
# the exact same size as the destination? Let's try to get rid of
# it.
# width = 0
# if call[0].name == "add_to_field" or\
# call[0].name == "modify_field" or\
# call[0].name == "modify_field_with_hash_based_offset":
# ref_arg = call[1][0]
# field_name = get_field_instance_name(ref_arg)
# width = field_info[field_name]["byte_width_phv"]
for arg in call[1]:
if type(arg) is int or type(arg) is long:
# assert(width > 0)
tmp = arg
nbytes = 0
while tmp > 0:
nbytes += 1
tmp /= 256
width = max(4, nbytes)
type_ = "immediate"
value = int_to_byte_array(arg, width)
elif type(arg) is p4.p4_field:
type_ = "field_ref"
value = get_field_instance_name(arg)
elif type(arg) is p4.p4_header_instance:
type_ = "header_ref"
value = get_header_instance_name(arg)
elif type(arg) is p4.p4_signature_ref:
type_ = "param"
value = arg.idx
elif type(arg) is p4.p4_parse_state:
type_ = "parse_state"
value = arg.name
elif type(arg) is p4.p4_field_list:
type_ = "field_list"
value = arg.name
elif type(arg) is p4.p4_field_list_calculation:
type_ = "p4_field_calculation"
value = arg.name
elif type(arg) is p4.p4_counter:
type_ = "p4_counter"
value = arg.name
elif type(arg) is p4.p4_meter:
type_ = "p4_meter"
value = arg.name
elif type(arg) is p4.p4_register:
type_ = "p4_register"
value = arg.name
else:
print type(arg)
assert(False)
primitive_args.append((type_, value))
call_sequence.append((primitive_name, primitive_args))
a_info["call_sequence"] = call_sequence
action_name = get_action_name(action)
render_dict["action_info"][action_name] = a_info
def render_dict_populate_conditional_tables(render_dict, hlir):
render_dict["conditional_table_info"] = OrderedDict()
for name, cnode in hlir.p4_conditional_nodes.items():
ct_info = {}
ct_info["expression"] = str(cnode.condition)
conditional_name = get_conditional_node_name(cnode)
ct_info["expression_computation"] = dump_p4_expression(cnode.condition)
ct_info["next_tables"] = {} # True, False
for b, next_ in cnode.next_.items():
if next_ is None:
next_name = None
if isinstance(next_, p4.p4_conditional_node):
next_name = get_conditional_node_name(next_)
elif isinstance(next_, p4.p4_table):
next_name = get_table_name(next_)
ct_info["next_tables"][b] = next_name
render_dict["conditional_table_info"][conditional_name] = ct_info
def render_dict_populate_tables(render_dict, hlir):
render_dict["table_info"] = OrderedDict()
field_info = render_dict["field_info"]
action_info = render_dict["action_info"]
select_tables = []
action_data_tables = []
for name, table in hlir.p4_tables.items():
t_info = {}
# can be None
t_info["min_size"] = table.min_size
t_info["max_size"] = table.max_size
t_info["support_timeout"] = table.support_timeout
t_info["action_profile"] = None
act_prof = table.action_profile
if act_prof is not None:
t_info["action_profile"] = act_prof.name
action_data_tables.append(name)
if act_prof.selector is not None:
select_tables.append(name)
# will be set in render_dict_populate_counters
t_info["bytes_counter"] = None
t_info["packets_counter"] = None
t_info["meter"] = None
t_info["registers"] = []
match_types = []
match_precedence = {
p4.p4_match_type.P4_MATCH_VALID: 0,
p4.p4_match_type.P4_MATCH_EXACT: 1,
p4.p4_match_type.P4_MATCH_TERNARY: 3,
p4.p4_match_type.P4_MATCH_LPM: 2,
}
for _, m_type, _ in table.match_fields:
if m_type not in match_precedence:
print m_type, "match not yet supported"
assert(False)
match_types.append(m_type)
# If no match fields, indicate exact match
if len(match_types) == 0:
match_type = p4.p4_match_type.P4_MATCH_EXACT
elif p4.p4_match_type.P4_MATCH_TERNARY in match_types:
match_type = p4.p4_match_type.P4_MATCH_TERNARY
elif match_types.count(p4.p4_match_type.P4_MATCH_LPM) >= 2:
print "cannot have 2 different lpm in a single table"
assert(False)
elif p4.p4_match_type.P4_MATCH_LPM in match_types:
match_type = p4.p4_match_type.P4_MATCH_LPM
else:
# that includes the case when we only have one valid match and
# nothing else
match_type = p4.p4_match_type.P4_MATCH_EXACT
type_mappings = {
p4.p4_match_type.P4_MATCH_EXACT: "exact",
p4.p4_match_type.P4_MATCH_TERNARY: "ternary",
p4.p4_match_type.P4_MATCH_LPM: "lpm",
p4.p4_match_type.P4_MATCH_RANGE: "range",
p4.p4_match_type.P4_MATCH_VALID: "valid",
None: "none",
}
t_info["match_type"] = type_mappings[match_type]
# basically same code as for branch_on in parse functions, because the
# build_key function is going to be the same
match_fields = []
key_fields_bit_widths = []
big_mask = []
has_mask = False
key_byte_width = 0
reordered_fields_idx = sorted(
range(len(table.match_fields)),
key = lambda x: match_precedence[table.match_fields[x][1]]
)
for field_ref, m_type, mask in table.match_fields:
if m_type is p4.p4_match_type.P4_MATCH_VALID:
if type(field_ref) is p4.p4_header_instance:
header_ref = field_ref
elif type(field_ref) is p4.p4_field:
header_ref = field_ref.instance
else:
assert(False) # should not happen
header_instance_name = get_header_instance_name(header_ref)
field_bit_width = 1
key_fields_bit_widths.append(field_bit_width)
num_bytes = 1
key_byte_width += num_bytes # this will use only 1 byte, not 4
match_fields += [(header_instance_name, type_mappings[m_type])]
else:
field_instance_name = get_field_instance_name(field_ref)
field_bit_width = field_info[field_instance_name]["bit_width"]
key_fields_bit_widths.append(field_bit_width)
num_bytes = max(bytes_round_up(field_bit_width), 4)
key_byte_width += num_bytes
match_fields += [(field_instance_name, type_mappings[m_type])]
# common to all match types
if mask:
big_mask += int_to_byte_array(mask, num_bytes)
has_mask = True
else:
big_mask += [255 for i in xrange(num_bytes)]
t_info["match_fields"] = match_fields
t_info["reordered_match_fields_idx"] = reordered_fields_idx
t_info["reordered_match_fields"] = [match_fields[i] \
for i in reordered_fields_idx]
t_info["has_mask"] = has_mask
t_info["big_mask"] = big_mask
# will be useful for PD code
t_info["key_fields_bit_widths"] = key_fields_bit_widths
t_info["key_byte_width"] = key_byte_width
t_info["actions"] = [get_action_name(a) for a in table.actions]
t_info["next_tables"] = {}
with_hit_miss_spec = False
if "hit" in table.next_:
with_hit_miss_spec = True
t_info["with_hit_miss_spec"] = with_hit_miss_spec
if with_hit_miss_spec:
for event in {"hit", "miss"}:
t = table.next_[event]
if t:
t_info["next_tables"][event] = get_table_name(t)
else:
t_info["next_tables"][event] = None
else:
for a in table.actions:
t = table.next_[a]
if t:
t_info["next_tables"][get_action_name(a)] = get_table_name(t)
else:
t_info["next_tables"][get_action_name(a)] = None
actions_idx = {}
idx = 0
for action in t_info["actions"]:
actions_idx[action] = idx
idx += 1
t_info["actions_idx"] = actions_idx
if table.action_profile is None:
action_data_byte_width = 0
for action in t_info["actions"]:
a_info = action_info[action]
action_data_byte_width = max(action_data_byte_width,
sum(a_info["param_byte_widths"]))
t_info["action_data_byte_width"] = action_data_byte_width
else:
# with an action profile, the first bit is for group vs.member, the
# remaining 31 bits are for index value
t_info["action_data_byte_width"] = 4
table_name = get_table_name(table)
render_dict["table_info"][table_name] = t_info
render_dict["select_tables"] = select_tables
render_dict["action_data_tables"] = action_data_tables
def render_dict_populate_table_types(render_dict, hlir):
render_dict["table_types"] = ["lpm", "exact", "ternary", "range", "valid",
"none"]
def render_dict_populate_action_profiles(render_dict, hlir):
action_profiles = OrderedDict()
action_info = render_dict["action_info"]
for name, act_prof in hlir.p4_action_profiles.items():
act_prof_info = {}
act_prof_info["actions"] = [get_action_name(a) for a in act_prof.actions]
# actions_idx = {}
# idx = 0
# for action in act_prof_info["actions"]:
# actions_idx[action] = idx
# idx += 1
# act_prof_info["actions_idx"] = actions_idx
action_data_byte_width = 0
for action in act_prof_info["actions"]:
a_info = action_info[action]
action_data_byte_width = max(action_data_byte_width,
sum(a_info["param_byte_widths"]))
act_prof_info["action_data_byte_width"] = action_data_byte_width
act_prof_info["size"] = act_prof.size
act_prof_info["selection_key"] = None
if act_prof.selector is not None:
act_prof_info["selection_key"] = act_prof.selector.selection_key.name
action_profiles[name] = act_prof_info
render_dict["action_profiles"] = action_profiles
def dump_p4_expression(expression):
# This function generates a regiter name for an expression to store its
# result.
def get_next_register():
register_name = "reg[" + str(get_next_register.register_counter) + "]"
get_next_register.register_counter += 1
return register_name
get_next_register.register_counter = 0
# Assignment statements used to generate C code are stored in this array.
# An assignment statement is a tuple of the following format:
# (register_name, operator, operand1, optional operand2, ...)
register_assignments = []
dump_register_assignments(expression, get_next_register, register_assignments)
return register_assignments
# Only supports expressions involving uint32_t fields
def dump_register_assignments(expression, get_next_register, register_assignments):
if expression is None: return None
# The result of an expression should be stored in the first register. While
# generating C code, we are assuming that the top-level expression stores
# its result in reg[0].
register = get_next_register()
if type(expression) is int:
register_assignments.append((register, "assign_immediate", expression))
elif type(expression) is p4.p4_header_instance:
assert(False)
elif type(expression) is p4.p4_field:
assert(expression.width <= 32)
register_assignments.append((register, "assign_field", get_field_instance_name(expression)))
elif type(expression) is p4.p4_expression:
left = expression.left
right = expression.right
op = expression.op
if op == "not":
operand_register = dump_register_assignments(right, get_next_register, register_assignments)
register_assignments.append((register, "not", | |
class SkyUnits():
#{{{
'''
Transform units in the sky for a given cosmology
'''
from astropy.cosmology import Planck15 as cosmodel
from astropy import units as u
def __init__(self, theta=0, z=0., distance=0., glxsize=0.):
self.theta = theta
self.z = z
self.distance = distance
self.glxsize = glxsize
def ang2proj(self, theta, z):
from astropy.cosmology import Planck15 as cosmodel
from astropy import units as u
d_A = cosmodel.angular_diameter_distance(z)
distance_Mpc = (theta * d_A).to(u.kpc, u.dimensionless_angles())
return(distance_Mpc)
def ang2glxsize(self, theta, z, glxsize):
# in development (always compute??)
rglxsize=1.
return(rglxsize)
def glxsize2ang(self, glxsize):
# in development (always compute??)
theta=1.
return(theta)
def glxsize2proj(self, size):
# in development (always compute??)
Rp = 1.
return(Rp)
def proj2ang(self, proj):
# in development (always compute??)
theta=1.
return(theta)
def proj2glxsize(self, proj):
# in development (always compute??)
rglxsize=1.
return(rglxsize)
#}}}
def check_file(sys_args):
import sys
from os.path import isfile
if len(sys_args) == 2:
filename = sys_args[1]
if isfile(filename):
msg = "Loading configuration parameters from {}"
print(msg.format(filename) )
else:
print("Input argument is not a valid file")
raise SystemExit(1)
else:
print('Configuration file expected (just 1 argument)')
print('example: python run_correlation.py ../set/config.ini')
raise SystemExit(1)
return filename
# import configparser
# #class Config(configparser.ConfigParser):
# class Config():
#
# def __init__(self):
# #{{{
# #ConfigParser.ConfigParser.__init__(self)
#
# self.cfg = configparser.ConfigParser()
# #}}}
#
# def load_config(self, sys_args):
# # https://stackoverflow.com/questions/3609852/which-is-the-best-way-to-allow-configuration-options-be-overridden-at-the-comman
# # https://docs.python.org/3/library/configparser.html
# # https://tomassetti.me/parsing-in-python/#tools
class SkyMap():
# {{{
'''
class SkyMap: methods for computing angular correlations in the CMB
methods: load: loads a CMB map
'''
import healpy as hp
def __init__(self, nside=256, ordering='ring', frame='equatorial'):
import healpy as hp
self.nside = nside
self.ordering = 'ring'
self.frame = 'equatorial'
self.npixs = hp.nside2npix(self.nside)
# fac =
# Dsel: sample of galaxies
# self, skymap, nside,fac,rprof,Dsel,vec,hp_data_sel,hp_mask):
def __len__(self):
return self.npixs
def __repr__(self):
return 'Sky Map with {!s} pixels in {!s} order'.format(\
self.npixs, self.ordering)
def __str__(self):
return 'Sky Map with {!s} pixels in {!s} order'.format(\
self.npixs, self.ordering)
def load(self, filename, *args, **kwargs):
'''
Reads the CMB map
Args:
filename (str): the file name of the map to be read
Raises:
Returns:
readmap: a healpix map, class ?
'''
import healpy as hp
d = hp.read_map(filename, h=True, **kwargs)
self.data = d[0]
self.header = d[1]
return(True)
def apply_mask(self, mask):
import healpy as hp
m = self[0].copy()
k = mask[0].copy()
m[k<0.5] = hp.UNSEEN
masked_map = hp.ma(m)
return(masked_map)
# }}}
from joblib import Parallel, delayed
def unwrap_profile_self(arg, **kwarg):
return RadialProfile.radialprofile(*arg, **kwarg)
def unwrap_anisotropicprofile_self(arg, **kwarg):
return AnisotropicProfile.anisotropic_profile(c, **kwarg)
def unwrap_correlation_self(correlation, c, **kwarg):
return correlation.correlation(c, **kwarg)
class RadialProfile:
# {{{
'''
class RadialProfile
methods for computing angular correlations in the CMB
methods:
set_breaks: select bin scheme for the profile
radialprofile: computes the radial profile
radialprofile_II: computes the radial profile in parallel
'''
def __init__(self, breaks=[0], Nran=0):
#{{{
"""init(self, breaks, Nran) : sets the partition (binning
scheme) for the computing of the radial profile.
Tasks:
1. this works as a combination of np.linspace and units.
Args:
unit:
selected unit for the distance to the center
Raises:
errors?
Returns:
"""
import numpy as np
self.breaks = breaks
N = len(breaks)-1
self.N = N
self.max_centers = 0
self.signal = np.zeros(N)
self.sigma = np.zeros(N)
self.controlsample_mean = np.zeros(N)
self.controlsample_sigma = np.zeros(N)
#}}}
def set_breaks(self, unit, *args, **kwargs):
#{{{
"""set_breaks(self, unit) : sets the breaks for the binned
profile.
Tasks:
1. this works as a combination of np.linspace and units.
Args:
unit:
selected unit for the distance to the center
Raises:
errors?
Returns:
"""
import numpy as np
self.breaks = np.linspace(*args, **kwargs)
self.breaks = self.breaks * unit
self.N = len(self.breaks)-1
self.signal = np.zeros(self.N)
self.sigma = np.zeros(self.N)
#}}}
def radialprofile(self, center, skymap, skymask):
#{{{
"""radialprofile(self, skymap) : computes the radial profile of
CMB pixels around a selected center
Tasks:
1. traverse all centers (paralalize here)
2. traverse all radial bins
3. traverse all pixels in the ring
4. compute the mean
5. store the mean values for all the rings
Args:
skymap (class SkyMap):
Map of the cosmic background, including scalar and mask
centers_catalog (class Centers):
Catalog of the centers, including (x, y, z) position
in Healpix convention and position angle of the galaxy
disk.
Raises:
errors?
Returns:
profdata:
proferror:
uncertaintydata:
uncertaintyerror:
"""
# en la version paralela hace un solo centro cada vez
# que estra a esta funcion
import numpy as np
import healpy as hp
import astropy.units as u
import time
radiifloat = self.breaks.to(u.rad)
listpixs_internal = []
listpixs_mask = []
profile = []
first = True
for radiusfloat in radiifloat:
listpixs_external = hp.query_disc(
skymap.nside,
center,
radiusfloat.value,
inclusive=True,
fact=4,
nest=False)
if(not first):
listpixs_ring = list(set(listpixs_external) -
set(listpixs_internal))
listpixs_mask = skymask.data[listpixs_ring]
mean_ring = np.nanmean(skymap.data[listpixs_ring])
profile.append(mean_ring)
first = False
listpixs_internal = listpixs_external.copy()
return(profile)
#}}}
def radialprofile_align(self, center, skymap, skymask):
#{{{
"""radialprofile(self, skymap) : computes the radial profile of
ACA INCORPORAR LO DE LA ROTACION
"""
# en la version paralela hace un solo centro cada vez
# que estra a esta funcion
import numpy as np
import healpy as hp
import astropy.units as u
import time
radiifloat = self.breaks.to(u.rad)
listpixs_internal = []
listpixs_mask = []
profile = []
first = True
for radiusfloat in radiifloat:
listpixs_external = hp.query_disc(
skymap.nside,
center,
radiusfloat.value,
inclusive=True,
fact=4,
nest=False)
if(not first):
listpixs_ring = list(set(listpixs_external) -
set(listpixs_internal))
listpixs_mask = skymask.data[listpixs_ring]
mean_ring = np.nanmean(skymap.data[listpixs_ring])
profile.append(mean_ring)
first = False
listpixs_internal = listpixs_external.copy()
return(profile)
#}}}
def radialprofile_II(self, centers, skymap, skymask, njobs):
#{{{
"""radialprofile_II(self, skymap) : computes the radial profile of
CMB pixels around selected centers in parallel. Uses a wrapper
and the joblib library.
Tasks:
1. traverse all centers (paralalize here)
2. traverse all radial bins
3. traverse all pixels in the ring
4. compute the mean
5. store the mean values for all the rings
Args:
skymap (class SkyMap):
Map of the cosmic background, including scalar and mask
centers_catalog (class Centers):
Catalog of the centers, including (x, y, z) position
in Healpix convention and position angle of the galaxy
disk.
Raises:
errors?
Returns:
profdata:
proferror:
uncertaintydata:
uncertaintyerror:
"""
results = []
# threading? multiprocessing?
results = Parallel(n_jobs=njobs, verbose=5, backend="multiprocessing")\
(delayed(unwrap_profile_self)(i, skymap=skymap, skymask=skymask)
for i in zip([self]*len(centers), centers))
return(results)
#}}}
#}}}
class AnisotropicProfile:
# {{{
'''
class AnisotropicProfile
methods for computing angular correlations in the CMB, as a
function of the angle wrt the position angle of the galaxy and the
radial distance
methods:
set_breaks: select bin scheme for the profile
'''
def __init__(self, breaks=[0], Nran=0):
#{{{
"""init(self, breaks, Nran) : sets the partition (binning
scheme) for the computing of the radial profile.
Tasks:
1. this works as a combination of np.linspace and units.
Args:
unit:
selected unit for the distance to the center
Raises:
errors?
Returns:
"""
import numpy as np
self.breaks_rad = breaks
self.breaks_ang = breaks
self.Nrad = len(self.breaks_rad) - 1
self.Nang = len(self.breaks_ang) - 1
self.max_centers = 0
#}}}
def set_breaks_radial(self, unit, *args, **kwargs):
#{{{
"""set_breaks(self, unit) : sets the breaks for the binned
profile.
Tasks:
1. this works as a combination of np.linspace and units.
Args:
unit:
selected unit for the distance to the center
Raises:
errors?
Returns:
"""
import numpy as np
self.breaks_rad = np.linspace(*args, **kwargs)
self.breaks_rad = self.breaks_rad * unit
self.Nrad = len(self.breaks_rad)-1
#}}}
def set_breaks_angular(self, unit, *args, **kwargs):
#{{{
"""set_breaks(self, unit) : sets the breaks for the binned
profile.
Tasks:
1. this works as a combination of np.linspace and units.
Args:
unit:
selected unit for the distance to the center
Raises:
errors?
Returns:
"""
import numpy as np
self.breaks_ang = np.linspace(*args, **kwargs)
self.breaks_ang = self.breaks_ang * unit
self.Nang = len(self.breaks_ang)-1
#}}}
def anisotropic_profile(self, center, skymap, skymask):
#{{{
"""radialprofile(self, skymap) : computes the radial profile of
CMB pixels around a selected center
Pasos:
armar la matriz del perfil (bineado en r y o)
recorrer la lista de centros
rotar: armar la matriz de rotacion con angulos de Euler
recorrer lista de pixels
Objetivo:
calcular el angulo entre la direccion del disco y la direccion al pixel
calcular | |
<reponame>cjsteel/python3-venv-ansible-2.10.5
#!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: na_santricity_host
short_description: NetApp E-Series manage eseries hosts
description: Create, update, remove hosts on NetApp E-series storage arrays
author:
- <NAME> (@hulquest)
- <NAME> (@ndswartz)
extends_documentation_fragment:
- netapp_eseries.santricity.santricity.santricity_doc
options:
name:
description:
- If the host doesn't yet exist, the label/name to assign at creation time.
- If the hosts already exists, this will be used to uniquely identify the host to make any required changes
type: str
required: True
aliases:
- label
state:
description:
- Set to absent to remove an existing host
- Set to present to modify or create a new host definition
type: str
choices:
- absent
- present
default: present
host_type:
description:
- Host type includes operating system and multipath considerations.
- If not specified, the default host type will be utilized. Default host type can be set using M(netapp_eseries.santricity.na_santricity_global).
- For storage array specific options see M(netapp_eseries.santricity.na_santricity_facts).
- All values are case-insensitive.
- AIX MPIO - The Advanced Interactive Executive (AIX) OS and the native MPIO driver
- AVT 4M - Silicon Graphics, Inc. (SGI) proprietary multipath driver
- HP-UX - The HP-UX OS with native multipath driver
- Linux ATTO - The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
- Linux DM-MP - The Linux OS and the native DM-MP driver
- Linux Pathmanager - The Linux OS and the SGI proprietary multipath driver
- Mac - The Mac OS and the ATTO Technology, Inc. driver
- ONTAP - FlexArray
- Solaris 11 or later - The Solaris 11 or later OS and the native MPxIO driver
- Solaris 10 or earlier - The Solaris 10 or earlier OS and the native MPxIO driver
- SVC - IBM SAN Volume Controller
- VMware - ESXi OS
- Windows - Windows Server OS and Windows MPIO with a DSM driver
- Windows Clustered - Clustered Windows Server OS and Windows MPIO with a DSM driver
- Windows ATTO - Windows OS and the ATTO Technology, Inc. driver
type: str
required: False
aliases:
- host_type_index
ports:
description:
- A list of host ports you wish to associate with the host.
- Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are
uniquely identified by a label and these must be unique.
type: list
required: False
suboptions:
type:
description:
- The interface type of the port to define.
- Acceptable choices depend on the capabilities of the target hardware/software platform.
required: true
choices:
- iscsi
- sas
- fc
- ib
- nvmeof
label:
description:
- A unique label to assign to this port assignment.
required: true
port:
description:
- The WWN or IQN of the hostPort to assign to this port definition.
required: true
force_port:
description:
- Allow ports that are already assigned to be re-assigned to your current host
required: false
type: bool
"""
EXAMPLES = """
- name: Define or update an existing host named "Host1"
na_santricity_host:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "<PASSWORD>"
validate_certs: true
name: "Host1"
state: present
host_type_index: Linux DM-MP
ports:
- type: "iscsi"
label: "PORT_1"
port: "iqn.1996-04.de.suse:01:56f86f9bd1fe"
- type: "fc"
label: "FC_1"
port: "fc00:db20:35b:7399::5"
- type: "fc"
label: "FC_2"
port: "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"
- name: Ensure a host named "Host2" doesn"t exist
na_santricity_host:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "<PASSWORD>"
validate_certs: true
name: "Host2"
state: absent
"""
RETURN = """
msg:
description:
- A user-readable description of the actions performed.
returned: on success
type: str
sample: The host has been created.
id:
description:
- the unique identifier of the host on the E-Series storage-system
returned: on success when state=present
type: str
sample: 00000000600A098000AAC0C3003004700AD86A52
ssid:
description:
- the unique identifer of the E-Series storage-system with the current api
returned: on success
type: str
sample: 1
api_url:
description:
- the url of the API that this request was proccessed by
returned: on success
type: str
sample: https://webservices.example.com:8443
"""
import re
from ansible.module_utils._text import to_native
from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
class NetAppESeriesHost(NetAppESeriesModule):
PORT_TYPES = ["iscsi", "sas", "fc", "ib", "nvmeof"]
def __init__(self):
ansible_options = dict(state=dict(type="str", default="present", choices=["absent", "present"]),
ports=dict(type="list", required=False),
force_port=dict(type="bool", default=False),
name=dict(type="str", required=True, aliases=["label"]),
host_type=dict(type="str", required=False, aliases=["host_type_index"]))
super(NetAppESeriesHost, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
supports_check_mode=True)
self.check_mode = self.module.check_mode
args = self.module.params
self.ports = args["ports"]
self.force_port = args["force_port"]
self.name = args["name"]
self.state = args["state"]
self.post_body = dict()
self.all_hosts = list()
self.host_obj = dict()
self.new_ports = list()
self.ports_for_update = list()
self.ports_for_removal = list()
# Update host type with the corresponding index
host_type = args["host_type"]
if host_type:
host_type = host_type.lower()
if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]:
self.host_type_index = self.HOST_TYPE_INDEXES[host_type]
elif host_type.isdigit():
self.host_type_index = int(args["host_type"])
else:
self.module.fail_json(msg="host_type must be either a host type name or host type index found integer the documentation.")
else:
self.host_type_index = None
if not self.url.endswith("/"):
self.url += "/"
# Fix port representation if they are provided with colons
if self.ports is not None:
for port in self.ports:
port["label"] = port["label"].lower()
port["type"] = port["type"].lower()
port["port"] = port["port"].lower()
if port["type"] not in self.PORT_TYPES:
self.module.fail_json(msg="Invalid port type! Port interface type must be one of [%s]." % ", ".join(self.PORT_TYPES))
# Determine whether address is 16-byte WWPN and, if so, remove
if re.match(r"^(0x)?[0-9a-f]{16}$", port["port"].replace(":", "")):
port["port"] = port["port"].replace(":", '').replace("0x", "")
if port["type"] == "ib":
port["port"] = "0" * (32 - len(port["port"])) + port["port"]
@property
def default_host_type(self):
"""Return the default host type index."""
try:
rc, default_index = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/defaultHostTypeIndex" % self.ssid)
return default_index[0]
except Exception as error:
self.module.fail_json(msg="Failed to retrieve default host type index")
@property
def valid_host_type(self):
host_types = None
try:
rc, host_types = self.request("storage-systems/%s/host-types" % self.ssid)
except Exception as err:
self.module.fail_json(msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
try:
match = list(filter(lambda host_type: host_type["index"] == self.host_type_index, host_types))[0]
return True
except IndexError:
self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
def check_port_types(self):
"""Check to see whether the port interface types are available on storage system."""
try:
rc, interfaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid)
for port in self.ports:
for interface in interfaces:
# Check for IB iSER
if port["type"] == "ib" and "iqn" in port["port"]:
if ((interface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and
interface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["type"] == "infiniband" and
interface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["infinibandData"]["isIser"]) or
(interface["ioInterfaceTypeData"]["interfaceType"] == "ib" and
interface["ioInterfaceTypeData"]["ib"]["isISERSupported"])):
port["type"] = "iscsi"
break
# Check for NVMe
elif (port["type"] == "nvmeof" and "commandProtocolPropertiesList" in interface and
"commandProtocolProperties" in interface["commandProtocolPropertiesList"] and
interface["commandProtocolPropertiesList"]["commandProtocolProperties"]):
if interface["commandProtocolPropertiesList"]["commandProtocolProperties"][0]["commandProtocol"] == "nvme":
break
# Check SAS, FC, iSCSI
elif ((port["type"] == "fc" and interface["ioInterfaceTypeData"]["interfaceType"] == "fibre") or
(port["type"] == interface["ioInterfaceTypeData"]["interfaceType"])):
break
else:
self.module.fail_json(msg="Invalid port type! Type [%s]. Port [%s]." % (port["type"], port["label"]))
except Exception as error:
# For older versions of web services
for port in self.ports:
if port["type"] == "ib" and "iqn" in port["port"]:
port["type"] = "iscsi"
break
def assigned_host_ports(self, apply_unassigning=False):
"""Determine if the hostPorts requested have already been assigned and return list of required used ports."""
used_host_ports = {}
for host in self.all_hosts:
if host["label"] != self.name.lower():
for host_port in host["hostSidePorts"]:
for port in self.ports:
if port["port"] == host_port["address"] or port["label"] == host_port["label"]:
if not self.force_port:
self.module.fail_json(msg="Port label or address is already used and force_port option is set to false!")
else:
# Determine port reference
port_ref = [port["hostPortRef"] for port in host["ports"]
if port["hostPortName"] == host_port["address"]]
port_ref.extend([port["initiatorRef"] for port in host["initiators"]
if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
# Create dictionary of hosts containing list of port references
if host["hostRef"] not in used_host_ports.keys():
used_host_ports.update({host["hostRef"]: port_ref})
else:
used_host_ports[host["hostRef"]].extend(port_ref)
# else:
# for host_port in host["hostSidePorts"]:
# for port in self.ports:
# if ((host_port["label"] == port["label"] and host_port["address"] != port["port"]) or
# (host_port["label"] != port["label"] and host_port["address"] == port["port"])):
# if not self.force_port:
# self.module.fail_json(msg="Port label or address is already used and force_port is false!")
# # self.module.fail_json(msg="There are no host ports available OR there are not enough unassigned host ports")
# else:
# # Determine port reference
# port_ref = [port["hostPortRef"] for port in host["ports"]
# if port["hostPortName"] == host_port["address"]]
# port_ref.extend([port["initiatorRef"] for port in host["initiators"]
# if | |
<gh_stars>0
# -*- coding: utf-8 -*-
# @Time : 2021/5/30
# @Author : <NAME>
import numpy as np
from scipy.sparse import csc_matrix, dok_matrix
from scipy.sparse.linalg import splu, cg
from sksparse.cholmod import cholesky
# from scipy.sparse import save_npz, load_npz
# import line_profiler
class Edge(object):
def __init__(self, sourceNodeNumber, sinkNodeNumber, branchValue, is_sourceNode=None):
self.sourceNodeNumber = sourceNodeNumber
self.sinkNodeNumber = sinkNodeNumber
self.branchValue = branchValue
self.is_sourceNode = is_sourceNode # when Edge in GraphOfNetwork.edgeResNodeDict, need it to indicate if Node is sourceNode of Edge
class GraphOfNetwork(object):
def __init__(self, method='LU', orderingMethod=None, nodes=0, edges=0, shorts=0, voltageSource=0, currentSource=0):
self.name = ""
self.nodes = nodes # Number of nodes in the network(include shorts)
self.edges = edges # Number of branches in the network(include shorts)
self.shorts = shorts # Number of shorts in the network(zero value voltage sources)
self.voltageSource = voltageSource # Number of Voltage Source(not include shorts)
self.currentSource = currentSource # Number of Current Source
self.method = method # Solve method
self.orderingMethod = orderingMethod # Ordering method——"Cholesky": ["natural","amd","metis","nesdis","colamd","default","best"], "LU": ["NATURAL","COLAMD","MMD_ATA","MMD_AT_PLUS_A"]
self.order = 0 # Order of matrix
self.sparseMatrix = None # Sparse matrix for MNA system of equations
self.currentVector = None # Right hand side of Ax = b
self.solVec = None # Solution Vector
self.nodeDict = {} # dict{nodeNumber: [NodeName,……]}
self.edgeResNodeDict = {} # dict{nodeNumber:[Edge]}, including edges of type "R"
self.currentList = [] # list[Edge], including edge of type "I"
self.voltNodeDict = {} # dict{voltNodeNumber: Voltage value}, for nodes of not short edge of type "V"
def add_edge_to_graph(self, sourceNodeNumber, sinkNodeNumber, branchValue, edgeType):
"""
Processing branch information by classification.
"""
if edgeType == "I":
self.currentSource += 1
self.currentList.append(Edge(sourceNodeNumber, sinkNodeNumber, branchValue))
elif edgeType == "R":
branchValue = 1.0 / branchValue
if sourceNodeNumber:
edge = Edge(sourceNodeNumber, sinkNodeNumber, branchValue, is_sourceNode=True)
self.edgeResNodeDict[sourceNodeNumber] = self.edgeResNodeDict.get(sourceNodeNumber, list()) + [edge]
if sinkNodeNumber:
edge = Edge(sourceNodeNumber, sinkNodeNumber, branchValue, is_sourceNode=False)
self.edgeResNodeDict[sinkNodeNumber] = self.edgeResNodeDict.get(sinkNodeNumber, list()) + [edge]
elif edgeType == 'V':
self.voltageSource += 1
if sinkNodeNumber == 0:
self.voltNodeDict[sourceNodeNumber] = branchValue
elif sourceNodeNumber == 0:
self.voltNodeDict[sinkNodeNumber] = -branchValue
else:
raise NotImplementedError("V should be grounded!")
def generate_nodeNumbers(self, sourceNodeName, sinkNodeName, nodesNumber, nodesList1, nodesList2, del_nodeNums, is_edgeShort=False):
"""
Generates initial ordinal number for nodes
"""
# 1: for short edge's nodes
if is_edgeShort:
self.shorts += 1
# (1)one node has ordinal number, the other don't: choose the existed number for two nodes
if sourceNodeName in nodesList1.keys() and sinkNodeName not in nodesList1.keys():
self.nodes += 1
sourceNodeNumber = nodesList1[sourceNodeName]
sinkNodeNumber = sourceNodeNumber
nodesList1[sinkNodeName] = sinkNodeNumber
nodesList2[sinkNodeNumber] = nodesList2.get(sinkNodeNumber, list()) + [sinkNodeName]
elif sourceNodeName not in nodesList1.keys() and sinkNodeName in nodesList1.keys():
self.nodes += 1
sinkNodeNumber = nodesList1[sinkNodeName]
sourceNodeNumber = sinkNodeNumber
nodesList1[sourceNodeName] = sourceNodeNumber
nodesList2[sourceNodeNumber] = nodesList2.get(sourceNodeNumber, list()) + [sourceNodeName]
# (2)both two nodes have ordinal number: delete the big number and choose the small number for two nodes
elif sourceNodeName in nodesList1.keys() and sinkNodeName in nodesList1.keys():
NodeNumber = min(nodesList1[sourceNodeName], nodesList1[sinkNodeName])
del_nodeNum = max(nodesList1[sourceNodeName], nodesList1[sinkNodeName])
del_nodeNums.append(del_nodeNum)
# print("{} is del".format(del_nodeNum))
if NodeNumber == nodesList1[sourceNodeName]:
nodesList2[NodeNumber] = nodesList2.get(NodeNumber, list()) + [sinkNodeName]
nodesList2[del_nodeNum].remove(sinkNodeName)
nodesList1[sinkNodeName] = NodeNumber
else:
nodesList2[NodeNumber] = nodesList2.get(NodeNumber, list()) + [sourceNodeName]
nodesList2[del_nodeNum].remove(sourceNodeName)
nodesList1[sourceNodeName] = NodeNumber
# (3)two nodes don't have ordinal number: generate one number for two nodes
else:
self.nodes += 2
nodesNumber += 1
nodesList1[sourceNodeName] = nodesNumber
nodesList1[sinkNodeName] = nodesNumber
nodesList2[nodesNumber] = nodesList2.get(nodesNumber, list()) + [sourceNodeName, sinkNodeName]
# 2: for not short edge's node, generate two number for two nodes
else:
for NodeName in [sourceNodeName, sinkNodeName]:
if NodeName not in nodesList1.keys():
self.nodes += 1
nodesNumber += 1
nodesList1[NodeName] = nodesNumber
nodesList2[nodesNumber] = nodesList2.get(nodesNumber, list()) + [NodeName]
return nodesNumber, nodesList1, nodesList2, del_nodeNums
def convert_network_into_graph(self, filename, filepath=""):
"""
Preprocessing of Spice files and build the grid data structure.
:return:
"""
print("\n-----------Convert powerGridFile into graph-----------")
print(filepath+filename)
file = open(filepath+filename, 'r')
self.name = filename.split(".")[0]
nodesNumber = 0
nodesList1 = {"0": 0} # {nodeName:nodeNumber}
nodesList2 = {0: ["0"]} # {nodeNumber:[nodeName1,nodeName2,……]}
del_nodeNums = [] # Some Nodenumbers do not have corresponding nodes due to short circuit
temp = [] # Temporarily stores information for non-short-circuited "V" edge: [[sourceNodeName, sinkNodeName, branchValue, edgeType],[……],……]
for line in file.readlines():
is_edgeShort = False
if ".end" in line:
break
elif ".op" in line or "*" in line:
continue
else:
self.edges += 1
edgeName, sourceNodeName, sinkNodeName, branchValue = line.split()
edgeType = edgeName[0].upper()
branchValue = float(branchValue)
if branchValue == 0.0 and edgeType == "V":
# print('short edge:', line)
is_edgeShort = True
else:
temp.append([sourceNodeName, sinkNodeName, branchValue, edgeType])
nodesNumber, nodesList1, nodesList2, del_nodeNums = self.generate_nodeNumbers(sourceNodeName, sinkNodeName,
nodesNumber, nodesList1, nodesList2, del_nodeNums, is_edgeShort)
self.nodes += 1
file.close()
print("------------read powerGridFile over------------")
# generate nodeDict from nodeList2,nodeList1
if len(del_nodeNums):
print("length of deleted nodeNumbers is not 0, but {}".format(len(del_nodeNums)))
print("------------sort again for nodeNumber----------")
nodesList3 = list(nodesList2.items())
nodesList3.sort(key=lambda x: x[0], reverse=False)
i = 0
for num_nodeName in nodesList3:
if num_nodeName[1]:
if num_nodeName[0] != i:
for nodeName in num_nodeName[1]:
nodesList1[nodeName] = i
self.nodeDict[i] = self.nodeDict.get(i, list()) + [nodeName]
else:
for nodeName in num_nodeName[1]:
self.nodeDict[i] = self.nodeDict.get(i, list()) + [nodeName]
i += 1
else:
for nodeName in nodesList1:
self.nodeDict[nodesList1[nodeName]] = self.nodeDict.get(nodesList1[nodeName], list()) + [nodeName]
# add edge to graph
print("------------add edge to graph------------------")
for edge in temp:
sourceNodeName, sinkNodeName, branchValue, edgeType = edge
sourceNodeNumber = nodesList1[sourceNodeName]
sinkNodeNumber = nodesList1[sinkNodeName]
if edgeType not in "RIV":
raise TypeError("edge is not in proper format!")
else:
self.add_edge_to_graph(sourceNodeNumber, sinkNodeNumber, branchValue, edgeType)
# print(sourceNodeName, ":", sourceNodeNumber, sinkNodeName, ":", sinkNodeNumber, branchValue, edgeType)
print("Parse over!")
print("Total number of Nodes(include shorts):", self.nodes)
print("Total number of Edges(include shorts):", self.edges)
print("Total number of short Edges = ", self.shorts)
print("Total number of Current Source = ", self.currentSource)
print("Total number of Voltage Source(not include shorts):", self.voltageSource)
# print("nodesDict:\n", self.nodeDict)
print("-------------------------------------------------------\n")
def init_sparse_matrix(self):
"""
Initialize the sparse matrix in DOK format.
"""
print("-------------Initialize the sparse matrix--------------")
order = self.nodes - self.shorts - 1
if order > 0:
self.order = order
print("order:", order)
self.sparseMatrix = dok_matrix((order, order))
self.currentVector = np.zeros(order)
else:
raise ValueError("sparseMatrix's order is <= 0!")
# @profile
def fill_sparse_matrix(self):
"""
Convert it into a linear system solving problem: Ax=b.
Generate sparse matrix A in DOK format and then convert it to CSC format.
"""
self.init_sparse_matrix()
print("----------------fill the sparse matrix-----------------")
# "V" edgeType
print("for \"V\" edgeType")
for nodeNumber, nodeVolt in self.voltNodeDict.items():
self.sparseMatrix[nodeNumber - 1, nodeNumber - 1] = 1
self.currentVector[nodeNumber - 1] = nodeVolt
# 'I' edgeType
print("for \"I\" edgeType")
for edge in self.currentList:
sourceNodeNo = edge.sourceNodeNumber
sinkNodeNo = edge.sinkNodeNumber
if sinkNodeNo == 0:
self.currentVector[sourceNodeNo - 1] -= edge.branchValue
elif sourceNodeNo == 0:
self.currentVector[sinkNodeNo - 1] += edge.branchValue
else:
self.currentVector[sourceNodeNo - 1] -= edge.branchValue # current out from sourceNode of I: -
self.currentVector[sinkNodeNo - 1] += edge.branchValue # current in to sinkNode of I: +
# 'R' edgeType
print("for \"R\" edgeType")
for nodeNumber in self.edgeResNodeDict:
if nodeNumber in self.voltNodeDict.keys():
continue
else:
for edge in self.edgeResNodeDict[nodeNumber]:
if edge.is_sourceNode:
otherNodeNo = edge.sinkNodeNumber
else:
otherNodeNo = edge.sourceNodeNumber
self.sparseMatrix[nodeNumber - 1, nodeNumber - 1] += edge.branchValue
if otherNodeNo == 0:
continue
elif otherNodeNo in self.voltNodeDict.keys():
self.currentVector[nodeNumber - 1] += self.voltNodeDict[otherNodeNo] * edge.branchValue
else:
self.sparseMatrix[nodeNumber - 1, otherNodeNo - 1] -= edge.branchValue
self.sparseMatrix = csc_matrix(self.sparseMatrix)
# save_npz('sparse_matrix_{}.npz'.format(self.name), self.sparseMatrix)
# ----------Display MNA matrix and current vector-----------
# print("MNA sparseMatrix:")
# print(self.sparseMatrix)
# print("currentVector:")
# print(self.currentVector)
# @profile
def node_voltage_solver(self):
"""
Interface realization of solving module of linear equations
"""
print("\n-----------------Node voltage solution-----------------")
current_vector = self.currentVector
matrix = self.sparseMatrix
method = self.method
ordering_method = self.orderingMethod
if method == 'LU':
print("--------------1: LU Decomposition--------------")
if ordering_method is None:
LU = splu(matrix)
elif ordering_method == "MMD_AT_PLUS_A":
LU = splu(matrix, permc_spec=ordering_method, diag_pivot_thresh=0.0, options=dict(SymmetricMode=True))
else:
LU = splu(matrix, permc_spec=ordering_method)
self.solVec = LU.solve(current_vector)
elif method == 'CG':
print("------------2: Conjugate Gradient--------------")
self.solVec, exitCode = cg(matrix, current_vector)
if exitCode == 0:
print('0 : successful')
elif exitCode > 0:
print('>0 : convergence to tolerance not achieved, number of iterations:{}'.format(exitCode))
else:
print('<0 : illegal input or breakdown')
elif method == 'cholesky':
print("----------3: Cholesky Decomposition------------")
if ordering_method is None:
factor = cholesky(matrix)
else:
factor = cholesky(matrix, ordering_method=ordering_method)
self.solVec = factor(current_vector)
else:
raise NotImplementedError("no method \"{}\"".format(method))
def print_solution(self, filepath=""):
"""
Output node voltage values to the solution file
"""
print("\n--------------------Print solution---------------------")
print("---------------------Voltage-------------------")
if self.orderingMethod is None:
solution_file = open(filepath + self.name + '_' + self.method + ".solution", 'w')
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# <NAME> (<EMAIL>)
import py_trees as pt
import py_trees_ros as ptr
import time
import numpy as np
import rospy
import tf
import actionlib
# from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from smarc_msgs.msg import GotoWaypointAction, GotoWaypointGoal
import actionlib_msgs.msg as actionlib_msgs
from geometry_msgs.msg import PointStamped, PoseArray, PoseStamped
from nav_msgs.msg import Path
from std_msgs.msg import Float64, Header, Bool, Empty
from visualization_msgs.msg import MarkerArray
from sensor_msgs.msg import NavSatFix
from std_srvs.srv import SetBool
from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver
import bb_enums
import imc_enums
import common_globals
from mission_plan import MissionPlan
from mission_log import MissionLog
class A_PublishFinalize(pt.behaviour.Behaviour):
def __init__(self, topic):
super(A_PublishFinalize, self).__init__(name="A_PublishFinalize")
self.bb = pt.blackboard.Blackboard()
self.topic = topic
self.last_published_time = None
self.message_object = Empty()
def setup(self, timeout):
self.pub = rospy.Publisher(self.topic, Empty, queue_size=1)
return True
def update(self):
if self.last_published_time is not None:
time_since = time.time() - self.last_published_time
self.feedback_message = "Last pub'd:{:.2f}s ago".format(time_since)
else:
self.feedback_message = "Never published!"
finalized = self.bb.get(bb_enums.MISSION_FINALIZED)
if not finalized:
try:
self.pub.publish(self.message_object)
self.last_published_time = time.time()
self.feedback_message = "Just published"
self.bb.set(bb_enums.MISSION_FINALIZED, True)
return pt.Status.SUCCESS
except:
msg = "Couldn't publish"
rospy.logwarn_throttle(1, msg)
self.feedback_message = msg
return pt.Status.FAILURE
return pt.Status.SUCCESS
class A_ManualMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_ManualMissionLog, self).__init__(name="A_ManualMissionLog")
self.bb = pt.blackboard.Blackboard()
self.started_logs = 0
self.num_saved_logs = 0
def start_new_log(self):
save_location = self.bb.get(bb_enums.MISSION_LOG_FOLDER)
log = MissionLog(mission_plan = None,
save_location = save_location)
self.bb.set(bb_enums.MANUAL_MISSION_LOG_OBJ, log)
rospy.loginfo("Started new manual mission log")
self.started_logs += 1
return log
def update(self):
enabled = self.bb.get(bb_enums.ENABLE_MANUAL_MISSION_LOG)
log = self.bb.get(bb_enums.MANUAL_MISSION_LOG_OBJ)
if not enabled:
# if we have a log, we save it now
# and set it to None, so next time we are
# disabled we dont do anything
if log is not None:
log.save()
self.bb.set(bb_enums.MANUAL_MISSION_LOG_OBJ, None)
self.num_saved_logs += 1
self.feedback_message = "Disabled, {} logs saved".format(self.num_saved_logs)
return pt.Status.SUCCESS
if log is None:
log = self.start_new_log()
# first add the auv pose
world_trans = self.bb.get(bb_enums.WORLD_TRANS)
x,y = world_trans[0], world_trans[1]
z = -self.bb.get(bb_enums.DEPTH)
log.navigation_trace.append((x,y,z))
# then add the raw gps
gps = self.bb.get(bb_enums.RAW_GPS)
if gps is None or gps.status.status == -1: # no fix
gps_utm_point = None
else:
# translate the latlon to utm point using the same service as the mission plan
gps_utm_x, gps_utm_y = mplan.latlon_to_utm(gps.latitude, gps.lonitude)
if gps_utm_x is None:
gps_utm_point = None
log.raw_gps_trace.append(gps_utm_point)
# then add the tree tip and its status
tree_tip = self.bb.get(bb_enums.TREE_TIP_NAME)
tip_status = self.bb.get(bb_enums.TREE_TIP_STATUS)
log.tree_tip_trace.append((tree_tip, tip_status))
self.feedback_message = "Log len:{} of log#{}".format(len(log.navigation_trace), self.started_logs)
return pt.Status.SUCCESS
class A_SaveMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_SaveMissionLog, self).__init__(name="A_SaveMissionLog")
self.bb = pt.blackboard.Blackboard()
self.num_saved_logs = 0
def update(self):
log = self.bb.get(bb_enums.MISSION_LOG_OBJ)
if log is not None:
log.save()
self.num_saved_logs += 1
self.bb.set(bb_enums.MISSION_LOG_OBJ, None)
self.feedback_message = "Saved log #{}!".format(self.num_saved_logs)
else:
self.feedback_message = "#saved logs:{}".format(self.num_saved_logs)
return pt.Status.SUCCESS
class A_UpdateMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_UpdateMissionLog, self).__init__(name="A_UpdateMissionLog")
self.bb = pt.blackboard.Blackboard()
self.started_logs = 0
def start_new_log(self, mplan):
save_location = self.bb.get(bb_enums.MISSION_LOG_FOLDER)
log = MissionLog(mission_plan = mplan,
save_location = save_location)
self.bb.set(bb_enums.MISSION_LOG_OBJ, log)
rospy.loginfo("Started new mission log")
self.started_logs += 1
return log
def update(self):
# only update if there is an unfinalized mission that has been started
mplan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mplan is None:
rospy.loginfo("Mission plan is None, can't make a log of this?")
self.feedback_message = "No mission plan!"
return pt.Status.FAILURE
log = self.bb.get(bb_enums.MISSION_LOG_OBJ)
if log is None:
log = self.start_new_log(mplan)
# check if the mission has changed in the meantime
# this can happen when the user starts a mission, stops it,
# and then starts a different one
# we dont wanna log the incomplete one
# did it change since we last got called?
if log.creation_time != mplan.creation_time:
# it changed!
# re-start a log
log = self.start_new_log(mplan)
# now we got a valid mission plan
# first add the auv pose
world_trans = self.bb.get(bb_enums.WORLD_TRANS)
x,y = world_trans[0], world_trans[1]
z = -self.bb.get(bb_enums.DEPTH)
log.navigation_trace.append((x,y,z))
# then add the raw gps
gps = self.bb.get(bb_enums.RAW_GPS)
if gps is None or gps.status.status == -1: # no fix
gps_utm_point = None
else:
# translate the latlon to utm point using the same service as the mission plan
gps_utm_x, gps_utm_y = mplan.latlon_to_utm(gps.latitude, gps.lonitude)
if gps_utm_x is None:
gps_utm_point = None
log.raw_gps_trace.append(gps_utm_point)
# then add the tree tip and its status
tree_tip = self.bb.get(bb_enums.TREE_TIP_NAME)
tip_status = self.bb.get(bb_enums.TREE_TIP_STATUS)
log.tree_tip_trace.append((tree_tip, tip_status))
self.feedback_message = "Log len:{} of log#{}".format(len(log.navigation_trace), self.started_logs)
return pt.Status.SUCCESS
class A_SetDVLRunning(pt.behaviour.Behaviour):
def __init__(self, dvl_on_off_service_name, running, cooldown):
super(A_SetDVLRunning, self).__init__(name="A_SetDVLRunning")
self.switcher_service = rospy.ServiceProxy(dvl_on_off_service_name,
SetBool)
self.bb = pt.blackboard.Blackboard()
self.sb = SetBool()
self.sb.data = running
self.running = running
self.last_toggle = 0
self.cooldown = cooldown
self.service_name = dvl_on_off_service_name
def update(self):
# try not to call the service every tick...
dvl_is_running = self.bb.get(bb_enums.DVL_IS_RUNNING)
if dvl_is_running is not None:
if dvl_is_running == self.sb.data:
rospy.loginfo_throttle_identical(20, "DVL is already running:"+str(self.sb.data))
return pt.Status.SUCCESS
# check if enough time has passed since last call
t = time.time()
if t - self.last_toggle < self.cooldown:
# nope, return running while we wait
rospy.loginfo_throttle_identical(5, "Waiting on DVL toggle cooldown")
return pt.Status.RUNNING
try:
ret = self.switcher_service(self.running)
except rospy.service.ServiceException:
rospy.logwarn_throttle_identical(60, "DVL Start/stop service not found! Succeeding by default namespace:{}".format(self.service_name))
return pt.Status.SUCCESS
if ret.success:
rospy.loginfo_throttle_identical(5, "DVL TOGGLED:"+str(self.sb.data))
self.last_toggle = time.time()
self.bb.set(bb_enums.DVL_IS_RUNNING, self.sb.data)
return pt.Status.SUCCESS
rospy.logwarn_throttle_identical(5, "DVL COULD NOT BE TOGGLED:{}, ret:{}".format(self.sb.data, ret))
return pt.Status.FAILURE
class A_EmergencySurface(ptr.actions.ActionClient):
def __init__(self, emergency_action_namespace):
"""
What to do when an emergency happens. This should be a very simple
action that is super unlikely to fail, ever. It should also 'just work'
without a goal.
Like surfacing.
"""
self.bb = pt.blackboard.Blackboard()
self.action_goal_handle = None
ptr.actions.ActionClient.__init__(
self,
name="A_EmergencySurface",
action_spec=GotoWaypointAction,
action_goal=None,
action_namespace= emergency_action_namespace,
override_feedback_message_on_running="EMERGENCY SURFACING"
)
self.action_server_ok = False
def setup(self, timeout):
"""
Overwriting the normal ptr action setup to stop it from failiing the setup step
and instead handling this failure in the tree.
"""
self.logger.debug("%s.setup()" % self.__class__.__name__)
self.action_client = actionlib.SimpleActionClient(
self.action_namespace,
self.action_spec
)
if not self.action_client.wait_for_server(rospy.Duration(timeout)):
self.logger.error("{0}.setup() could not connect to the action server at '{1}'".format(self.__class__.__name__, self.action_namespace))
self.action_client = None
self.action_server_ok = False
else:
self.action_server_ok = True
return True
def initialise(self):
if not self.action_server_ok:
rospy.logwarn_throttle_identical(5, "No Action Server found for emergency action, will just block the tree!")
return
self.feedback_message = "EMERGENCY SURFACING"
# construct the message
self.action_goal = GotoWaypointGoal()
self.sent_goal = False
def update(self):
if not self.action_server_ok:
self.feedback_message = "Action Server for emergency action can not be used!"
rospy.logerr_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if your action client is not valid
if not self.action_client:
self.feedback_message = "ActionClient for emergency action is invalid!"
rospy.logwarn_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if the action_goal is invalid
if not self.action_goal:
self.feedback_message = "No action_goal!"
rospy.logwarn(self.feedback_message)
return pt.Status.FAILURE
# if goal hasn't been sent yet
if not self.sent_goal:
self.action_goal_handle = self.action_client.send_goal(self.action_goal, feedback_cb=self.feedback_cb)
self.sent_goal = True
rospy.loginfo("Sent goal to action server:"+str(self.action_goal))
self.feedback_message = "Emergency goal sent"
return pt.Status.RUNNING
# if the goal was aborted or preempted
if self.action_client.get_state() in [actionlib_msgs.GoalStatus.ABORTED,
actionlib_msgs.GoalStatus.PREEMPTED]:
self.feedback_message = "Aborted emergency"
rospy.loginfo(self.feedback_message)
return pt.Status.FAILURE
result = self.action_client.get_result()
# if the goal was accomplished
if result:
self.feedback_message = "Completed emergency"
rospy.loginfo(self.feedback_message)
return pt.Status.SUCCESS
# if we're still trying to accomplish the goal
return pt.Status.RUNNING
def feedback_cb(self, msg):
pass
class A_SetNextPlanAction(pt.behaviour.Behaviour):
def __init__(self, do_not_visit=False):
"""
Sets the current plan action to the next one
SUCCESS if it can set it to something that is not None
FAILURE otherwise
if do_not_visit=True, then this action will only get the current wp
and set it and wont actually advance the plan forward.
This is useful for when you want to set the current wp right after
you created a plan.
"""
self.bb = pt.blackboard.Blackboard()
super(A_SetNextPlanAction, self).__init__('A_SetNextPlanAction')
self.do_not_visit = do_not_visit
def update(self):
mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission_plan is None:
rospy.logwarn_throttle(5, "Mission plan was None!")
return pt.Status.FAILURE
if not self.do_not_visit:
mission_plan.visit_wp()
next_action = mission_plan.get_current_wp()
if next_action is None:
self.feedback_message = "Next action was None"
rospy.logwarn_throttle(5, "Mission is complete:{}".format(mission_plan.is_complete()))
return pt.Status.FAILURE
rospy.loginfo_throttle_identical(5, "Set CURRENT_PLAN_ACTION {} to: {}".format(self.do_not_visit, str(next_action)))
self.bb.set(bb_enums.CURRENT_PLAN_ACTION, next_action)
return pt.Status.SUCCESS
class A_GotoWaypoint(ptr.actions.ActionClient):
def __init__(self,
action_namespace,
goal_tf_frame = 'utm',
node_name = "A_GotoWaypoint"):
"""
Runs an action server that will move the robot to the given waypoint
"""
self.bb = pt.blackboard.Blackboard()
self.node_name = node_name
list_of_maneuvers = self.bb.get(bb_enums.MANEUVER_ACTIONS)
if list_of_maneuvers is None:
list_of_maneuvers = [self.node_name]
else:
list_of_maneuvers.append(self.node_name)
self.bb.set(bb_enums.MANEUVER_ACTIONS, list_of_maneuvers)
self.action_goal_handle = None
# become action client
ptr.actions.ActionClient.__init__(
self,
name = self.node_name,
action_spec = GotoWaypointAction,
action_goal = None,
action_namespace = action_namespace,
override_feedback_message_on_running = "Moving to waypoint"
)
self.action_server_ok = False
self.goal_tf_frame = goal_tf_frame
def setup(self, timeout):
| |
'Parameter' , 'theta' , 'theta_boot' , 'bias/sigma [%]' , 'error [%]' )
table = []
n = 0
for name in sorted ( stats ) :
if name in fitresult :
p = fitresult [ name ]
theta = p * 1.0
if not isinstance ( theta , VE ) or theta.cov2() <= 0 :
logger.warning ('print_bootstrap: parameter "%s" is invalid in ``fitresult'', skip %s' % ( name , theta ) )
continue
elif name in morevars :
theta = morevars [ name ]
if not isinstance ( theta , VE ) or theta.cov2() <= 0 :
logger.warning ('print_bootstrap: parameter "%s" is invalid in ``morevars'', skip %s' % ( name , theta ) )
continue
else :
continue
statistics = stats [ name ]
n = max ( n , statistics.nEntries() )
theta_boot = VE ( statistics.mean().value() , statistics.mu2() )
bias = theta_boot.value () - theta .value ()
scale = theta .error () / theta_boot.error ()
row = ( name ,
"%+13.6g +/- %-13.6g" % ( theta . value () , theta .error () ) ,
"%+13.6g +/- %-13.6g" % ( theta_boot . value () , theta_boot .error () ) ,
'%+6.2f' % ( bias / theta.error() * 100 ) ,
'%+6.2f' % ( scale * 100 - 100 ) )
table.append ( row )
for name in sorted ( stats ) :
if name in fitresult : continue
if name in morevars : continue
statistics = stats [ name ]
theta_boot = VE ( statistics.mean().value() , statistics.mu2() )
row = name , '', "%+13.6g +/- %-13.6g" % ( theta_boot . value () , theta_boot .error () ) , '' , ''
table.append ( row )
table = [ header ] + table
title = title if title else "Bootstrapping with #%d samples" % n
import ostap.logger.table as Table
table = Table.table ( table ,
title = title ,
alignment = 'lcccc' ,
prefix = "# " )
logger.info ( '%s:\n%s' % ( title , table ) )
# ==============================================================================
## Default function to generate the data
# - simple call for <code>PDF.generate</code>
def generate_data ( pdf , varset , **config ) :
"""Default function to generate the data
- simple call for `PDF.generate`
"""
return pdf.generate ( varset = varset , **config )
# ==============================================================================
## Default function to perform the actual fit
# - simple call for <code>PDF.fitTo</code>
def make_fit ( pdf , dataset , **config ) :
"""Default function to perform the actual fit
- simple call for `PDF.fitTo`
"""
result , _ = pdf.fitTo ( dataset , **config )
return result
# ==============================================================================
## Accept fit?
# Accept the fit result?
# - valid fit result
# - fit status is 0 (SUCCESS)
# - covariance matrix quality is either 3(full accurate matrix) or -1 (unknown/externbally provided?)
# @param result fit result
# @param pdf pdf
# @param dataset pdf
#
def accept_fit ( result , pdf = None , dataset = None ) :
"""Accept the fit result?
- valid fit result
- fit status is 0 (SUCCESS)
- covariance matrix quality is either 0 or -1
"""
return result and ( 0 == result.status () ) and ( result.covQual () in ( -1 , 3 ) )
# ==============================================================================
## make <code>nToys</code> pseudoexperiments
#
# Schematically:
# @code
# for toy in range ( nToys ) :
# ... dataset = gen_fun ( pdf , ... , **gen_config )
# ... result = fit_fun ( pdf , dataset , **fit_config )
# ... if not accept_fun ( result , pdf , dataset ) : continue
# .... < collect statistics here >
# @endcode
#
# For each experiment
# - generate dataset using <code>pdf</code> with variables specified
# in <code>data</code> and configuration specified via<code>gen_config</code>
# for each generation the parameters of <code>pdf</code> are reset
# for their initial values and values from <code>init_pars</code>
# - fit generated dataset with <code>pdf</code> using configuration
# specified via <code>fit_config</code>
#
# @code
# pdf = ...
# results , stats = make_toys ( pdf , ## PDF to use
# nToys = 1000 , ## Number of pseudoexperiments
# data = [ 'mass' ] , ## variables in dataset
# gen_config = { 'nEvents' : 5000 } , ## configuration of <code>pdf.generate</code>
# fit_config = { 'ncpus' : 2 } , ## configuration of <code>pdf.fitTo</code>
# init_pars = { 'mean' : 0.0 , 'sigma' : 1.0 } ) ## parameters to use for generation
# @endcode
#
# Derived parameters can be also retrived via <code>more_vars</code> argument:
# @code
# ratio = lambda res,pdf : res.ratio('x','y')
# more_vars = { 'Ratio' : ratio }
# r, s = make_toys ( .... , more_vars = more_vars , ... )
# @endcode
#
# @param pdf PDF to be used for generation and fitting
# @param nToys number of pseudoexperiments to generate
# @param data variable list of variables to be used for dataset generation
# @param gen_config configuration of <code>pdf.generate</code>
# @param fit_config configuration of <code>pdf.fitTo</code>
# @param init_pars redefine these parameters for each pseudoexperiment
# @param more_vars calculate more variables form fit-result
# @param get_fun specific generate action (if needed)
# @param fit_fun specific fitting action (if needed)
# @param accept_fun specific accept action (if needed)
# @param silent silent toys?
# @param progress show the progress?
# @param logger use this logger
# @param frequency how often to dump the intermediate results ?
# @return dictionary with fit results for the toys and the dictionary of statistics
#
# - If <code>gen_fun</code> is not specified <code>generate_data</code> is used
# - If <code>fit_fun</code> is not specified <code>make_fit</code> is used
# - If <code>accept_fun</code> is not specified <code>accept_fit</code> is used
def make_toys ( pdf ,
nToys ,
data , ## template for dataset/variables
gen_config , ## parameters for <code>pdf.generate</code>
fit_config = {} , ## parameters for <code>pdf.fitTo</code>
init_pars = {} ,
more_vars = {} ,
gen_fun = None , ## generator function ( pdf , varset , **config )
fit_fun = None , ## fit function ( pdf , dataset , **config )
accept_fun = None , ## accept function ( fit-result, pdf, dataset )
silent = True ,
progress = True ,
logger = logger ,
frequency = 1000 ) : ##
"""Make `nToys` pseudoexperiments
- Schematically:
>>> for toy in range ( nToys ) :
>>> ... dataset = gen_fun ( pdf , ... , **gen_config )
>>> ... result = fit_fun ( pdf , dataset , **fit_config )
>>> ... if not accept_fun ( result , pdf , dataset ) : continue
>>> .... < collect statistics here >
For each pseudoexperiment:
1. generate dataset using `pdf` with variables specified
in `data` and configuration specified via `gen_config`
for each generation the parameters of `pdf` are reset
for their initial values and valeus from `init_pars`
2. fit generated dataset with `pdf` using configuration
specified via `fit_config`
- `pdf` : PDF to be used for generation and fitting
- `nToys` : number of pseudoexperiments to generate
- `data` : variable list of variables to be used for dataset generation
- `gen_config` : configuration of <code>pdf.generate</code>
- `fit_config` : configuration of <code>pdf.fitTo</code>
- `init_pars` : redefine these parameters for each pseudoexperiment
- `more_vars` : dictionary of functions to define the additional results
- `gen_fun` : generator function
- `fit_fun` : fitting function
- `accept_fun` : accept function
- `silent` : silent toys?
- `progress` : show progress bar?
- `logger` : use this logger
- `frequency` : how often to dump the intermediate results ?
It returns a dictionary with fit results for the toys and a dictionary of statistics
>>> pdf = ...
... results, stats = make_toys ( pdf , ## PDF to use
... 1000 , ## number of toys
... [ 'mass' ] , ## variables in dataset
... { 'nEvents' : | |
<reponame>jrStaff/pixiedust
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.display import display
from pixiedust.display.display import *
from pixiedust.utils.shellAccess import ShellAccess
from pixiedust.utils import Logger
from six import iteritems, string_types
from collections import OrderedDict, namedtuple
import base64
import inspect
import sys
from functools import partial
from IPython.utils.io import capture_output
def route(**kw):
def route_dec(fn):
fn.pixiedust_route = kw
if hasattr(fn, "fn"):
fn.fn.persist_args = kw.pop("persist_args", None)
return fn
return route_dec
@Logger()
class captureOutput(object):
"""
Decorator used for routes that allows using external libraries for generating
the html fragment.
When using this decorator the route doesn't need to return a string. If it does
it will be ignored.
Must be declared in after the route decorator.
captureOutput and templateArgs should not be used together
from pixiedust.display.app import *
import matplotlib.pyplot as plt
import numpy as np
@PixieApp
class Test():
@route()
@captureOutput
def mainScreen(self):
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2*np.pi*t)
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('About as simple as it gets, folks')
plt.grid(True)
plt.savefig("test.png")
plt.show()
Test().run()
"""
def __init__(self, fn):
self.fn = fn
def convert_html(self, output):
if "text/html" in output.data:
return output._repr_html_()
elif "image/png" in output.data:
return """<img alt="image" src="data:image/png;base64,{}"><img>""".format(
base64.b64encode(output._repr_png_()).decode("ascii")
)
elif "application/javascript" in output.data:
return """<script type="text/javascript">{}</script>""".format(output._repr_javascript_())
elif "text/markdown" in output.data:
import markdown
return markdown.markdown(output._repr_mime_("text/markdown"))
self.debug("Unused output: {}".format(output.data.keys()))
return ""
def __get__(self, instance, instance_type):
wrapper_fn = partial(self.wrapper, instance)
wrapper_fn.org_fn = self.fn
return wrapper_fn
def wrapper(self, instance, *args, **kwargs):
with capture_output() as buf:
self.fn(instance, *args, **kwargs)
return "\n".join([self.convert_html(output) for output in buf.outputs])
class templateArgs(object):
"""
Decorator that enables using local variable in a Jinja template.
Must be used in conjunction with route decorator and declared after
from pixiedust.display.app import *
@PixieApp
class Test():
@route()
@templateArgs
def mainScreen(self):
var1 = 'something computed'
return "<div>Accessing local variable {{var1}} from a jinja template"
Test().run()
"""
TemplateRetValue = namedtuple('TemplateRetValue', ['ret_value', 'locals'])
def __init__(self, fn):
self.fn = fn
def __get__(self, instance, instance_type):
wrapper_fn = partial(self.wrapper, instance)
wrapper_fn.org_fn = self.fn
return wrapper_fn
def wrapper(self, instance, *args, **kwargs):
locals = [{}]
def tracer(frame, event, arg):
if event == "return":
locals[0] = frame.f_locals.copy()
if 'self' in locals[0]:
del locals[0]['self']
sys.setprofile(tracer)
try:
ret_value = self.fn(instance, *args, **kwargs)
return templateArgs.TemplateRetValue(ret_value, locals[0])
finally:
sys.setprofile(None)
#Global object enables system wide customization of PixieApp run option
pixieAppRunCustomizer = None
def runPixieApp(app, parent_pixieapp=None, entity=None, **kwargs):
kwargs.get("options", {}).pop("prefix", None) #child pixieapp should have its own prefix
if isinstance(app, PixieDustApp):
app.run(entity, **kwargs)
elif isinstance(app, string_types):
parts = app.split('.')
instance_app = None
if len(parts) > 1:
instance_app = getattr(__import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0), parts[-1])()
else:
instance_app = ShellAccess[parts[-1]]()
if parent_pixieapp is not None:
instance_app.parent_pixieapp = ShellAccess[parent_pixieapp]
instance_app.parent_pixieapp.add_child(instance_app)
kwargs["is_running_child_pixieapp"] = True
instance_app.run(entity, **kwargs)
else:
raise ValueError("Invalid argument to runPixieApp. Only PixieApp or String allowed")
@Logger()
class PixieDustApp(Display):
routesByClass = {}
def __init__(self, options=None, entity=None, dataHandler=None):
super(PixieDustApp, self).__init__(options, entity, dataHandler)
self.parent_pixieapp = None
if not hasattr(self, "metadata"):
self.metadata = None
self.empty_metadata = False
def append_metadata(self, value):
if self.empty_metadata:
self.metadata = {}
self.empty_metadata = False
else:
self.metadata = self.metadata or {}
self.metadata.update(value)
def getOptionValue(self, optionName):
option = None
if self.metadata:
option = self.metadata.get(optionName, None)
if option is None:
#check if the key is an field of the class
option = getattr(self.entity, optionName) if self.entity is not None and hasattr(self.entity, optionName) else None
#make sure we don't have a conflict with an existing function
if callable(option):
option = None
if option is None:
option = self.options.get(optionName, None)
return option
def matchRoute(self, route):
for key, value in iteritems(route):
option = self.getOptionValue(key)
if (option is None and value == "*") or (value != "*" and option != value):
return False
return True
def has_persist_args(self, method):
if isinstance(method, partial) and hasattr(method, "org_fn"):
method = method.org_fn
return getattr(method, "persist_args", None) is not None
def injectArgs(self, method, route):
if isinstance(method, partial) and hasattr(method, "org_fn"):
method = method.org_fn
argspec = inspect.getargspec(method)
args = argspec.args
if len(args) > 0:
args = args[1:] if hasattr(method, "__self__") or args[0] == 'self' else args
return OrderedDict(zip([a for a in args], [self.getOptionValue(arg) for arg in args]))
def invoke_route(self, class_method, **kwargs):
"Programmatically invoke a route from arguments"
try:
injectedArgs = kwargs
retValue = class_method(*list(injectedArgs.values()))
finally:
if isinstance(retValue, templateArgs.TemplateRetValue):
injectedArgs.update(retValue.locals)
retValue = retValue.ret_value
if isinstance(retValue, string_types):
retValue = self.renderTemplateString(retValue, **injectedArgs)
return retValue
def __getattr__(self, name):
if ShellAccess[name] is not None:
return ShellAccess[name]
if name != "__pd_gateway_namespace__" and hasattr(self, "__pd_gateway_namespace__"):
name = self.__pd_gateway_namespace__ + name
if ShellAccess[name] is not None:
return ShellAccess[name]
raise AttributeError("{} attribute not found".format(name))
def hook_msg(self, msg):
msg['content']['metadata']['pixieapp_metadata'] = self.metadata
self.empty_metadata = True
return msg
def render(self):
from IPython.core.interactiveshell import InteractiveShell
display_pub = InteractiveShell.instance().display_pub
try:
display_pub.register_hook(self.hook_msg)
super(PixieDustApp, self).render()
finally:
display_pub.unregister_hook(self.hook_msg)
def doRender(self, handlerId):
if self.__class__.__name__ in PixieDustApp.routesByClass:
defRoute = None
retValue = None
injectedArgs = {}
try:
dispatchKey = "widgets" if "widget" in self.options else "routes"
for t in PixieDustApp.routesByClass[self.__class__.__name__][dispatchKey]:
if not t[0]:
defRoute = t[1]
elif self.matchRoute(t[0]):
self.debug("match found: {}".format(t[0]))
meth = getattr(self, t[1])
injectedArgs = self.injectArgs(meth, t[0])
self.debug("Injected args: {}".format(injectedArgs))
if self.metadata is None and self.has_persist_args(meth):
self.metadata = {key:self.getOptionValue(key) for key,_ in iteritems(t[0])}
retValue = meth(*list(injectedArgs.values()))
return
if defRoute:
retValue = getattr(self, defRoute)()
return
finally:
if isinstance(retValue, templateArgs.TemplateRetValue):
injectedArgs.update(retValue.locals)
retValue = retValue.ret_value
if isinstance(retValue, string_types):
if self.getBooleanOption("nostore_isrunningchildpixieapp", False):
self.options.pop("nostore_isrunningchildpixieapp", None)
retValue = """<div id="wrapperHTML{{prefix}}" pixiedust="{{pd_controls|htmlAttribute}}">""" + retValue + """</div>"""
self._addHTMLTemplateString(retValue, **injectedArgs)
elif isinstance(retValue, dict):
body = self.renderTemplateString(retValue.get("body", ""))
jsOnLoad = self.renderTemplateString(retValue.get("jsOnLoad", ""))
jsOK = self.renderTemplateString(retValue.get("jsOK", ""))
dialogRoot = retValue.get("dialogRoot", None)
if dialogRoot is not None:
jsOnLoad = """pixiedust.dialogRoot="{}";\n{}""".format(self.renderTemplateString(dialogRoot), jsOnLoad)
if body is not None:
self._addHTMLTemplateString("""
{{body}}
<pd_dialog>
<pd_onload>{{jsOnLoad|htmlAttribute}}</pd_onload>
<pd_ok>{{jsOK|htmlAttribute}}</pd_ok>
</pd_dialog>
""", body=body, jsOnLoad=jsOnLoad, jsOK=jsOK)
print("Didn't find any routes for {}. Did you forget to define a default route?".format(self))
pixieapp_child_prefix = "__pixieapp_child__"
@property
def pixieapp_children(self):
return {var:getattr(self, var) for var in dir(self) if var.startswith(PixieDustApp.pixieapp_child_prefix)}
def add_child(self, instance_app):
var_name = "{}{}".format(
PixieDustApp.pixieapp_child_prefix,
len([var for var in dir(self) if var.startswith(PixieDustApp.pixieapp_child_prefix)])
)
setattr(self, var_name, instance_app)
def get_custom_options(self):
return {}
def getDialogOptions(self):
return {}
@Logger()
def PixieApp(cls):
#reset the class routing in case the cell is being run multiple time
clsName = "{}_{}_Display".format(inspect.getmodule(cls).__name__, cls.__name__)
PixieDustApp.routesByClass[clsName] = {"routes":[], "widgets":[]}
#put the routes that define a widget in a separate bucket
def walk(cl):
for name, method in iteritems(cl.__dict__):
if hasattr(method, "pixiedust_route"):
if "widget" in method.pixiedust_route:
PixieDustApp.routesByClass[clsName]["widgets"].append( (method.pixiedust_route,name) )
else:
PixieDustApp.routesByClass[clsName]["routes"].append( (method.pixiedust_route,name) )
for c in [c for c in cl.__bases__]:
walk(c)
walk(cls)
#re-order the routes according to the number of constraints e.g. from more to less specific
p = PixieDustApp.routesByClass[clsName]["routes"]
PixieDustApp.routesByClass[clsName]["routes"] = [p[a[1]] for a in sorted([(len(a[0]), i) for i,a in enumerate(p)], reverse=True)]
def __init__(self, options=None, entity=None, dataHandler=None):
PixieDustApp.__init__(self, options or {}, entity, dataHandler)
def getPixieAppEntity(self):
return self.pixieapp_entity if hasattr(self, "pixieapp_entity") else None
def formatOptions(self,options):
"""Helper method that convert pd options from Json format to pixieApp html attribute compliant format"""
return ';'.join(["{}={}".format(key,value) for (key, value) in iteritems(options)])
def decoName(cls, suffix):
return "{}_{}_{}".format(cls.__module__, cls.__name__, suffix)
def run_method_with_super_classes(cls, instance, method_name):
fctSet = set()
for cl in reversed(inspect.getmro(cls)):
if hasattr(cl, 'setup'):
f = getattr(cl, 'setup')
if f not in fctSet and callable(f):
fctSet.add(f)
f(instance)
def run(self, entity=None, **kwargs):
is_running_child_pixieapp = kwargs.pop("is_running_child_pixieapp", False)
for key, value in iteritems(kwargs):
setattr(self, key, value)
if entity is not None:
self.pixieapp_entity = entity
var = None
if self.parent_pixieapp is not None:
parent_key = None
for key in ShellAccess.keys():
notebook_var = ShellAccess[key]
if notebook_var is self.parent_pixieapp and key != "self":
parent_key = key
break
for child_key, child in iteritems(notebook_var.pixieapp_children):
if child is self:
var = "{}.{}".format(parent_key, child_key)
break
else:
for key in ShellAccess.keys():
notebook_var = ShellAccess[key]
if notebook_var is self:
var = key
break
if not hasattr(self, "pd_initialized"):
run_method_with_super_classes(cls, self, "setup")
self.nostore_params = True
self.pd_initialized = True
instance_namespace = ""
if is_running_child_pixieapp:
cell_id = | |
# -*- coding: utf-8 -*-
"""
Various dependencies that are required for file-metadata which need some
special handling.
"""
from __future__ import (division, absolute_import, unicode_literals,
print_function)
import ctypes.util
import hashlib
import os
import subprocess
import sys
from distutils import sysconfig
from distutils.errors import DistutilsSetupError
try:
from urllib.request import urlopen
except ImportError: # Python 2
from urllib2 import urlopen
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
def data_path():
name = os.path.join(PROJECT_PATH, 'file_metadata', 'datafiles')
if not os.path.exists(name):
os.makedirs(name)
return name
def which(cmd):
try:
from shutil import which
return which(cmd)
except ImportError: # For python 3.2 and lower
try:
output = subprocess.check_output(["which", cmd],
stderr=subprocess.STDOUT)
except (OSError, subprocess.CalledProcessError):
return None
else:
output = output.decode(sys.getfilesystemencoding())
return output.strip()
def setup_install(packages):
"""
Install packages using pip to the current folder. Useful to import
packages during setup itself.
"""
packages = list(packages)
if not packages:
return True
try:
subprocess.call([sys.executable, "-m", "pip", "install",
"-t", PROJECT_PATH] + packages)
return True
except subprocess.CalledProcessError:
return False
def download(url, filename, overwrite=False, sha1=None):
"""
Download the given URL to the given filename. If the file exists,
it won't be downloaded unless asked to overwrite. Both, text data
like html, txt, etc. or binary data like images, audio, etc. are
acceptable.
:param url: A URL to download.
:param filename: The file to store the downloaded file to.
:param overwrite: Set to True if the file should be downloaded even if it
already exists.
:param sha1: The sha1 checksum to verify the file using.
"""
blocksize = 16 * 1024
_hash = hashlib.sha1()
if os.path.exists(filename) and not overwrite:
# Do a pass for the hash if it already exists
with open(filename, "rb") as downloaded_file:
while True:
block = downloaded_file.read(blocksize)
if not block:
break
_hash.update(block)
else:
# If it doesn't exist, or overwrite=True, find hash while downloading
response = urlopen(url)
with open(filename, 'wb') as out_file:
while True:
block = response.read(blocksize)
if not block:
break
out_file.write(block)
_hash.update(block)
return _hash.hexdigest() == sha1
class CheckFailed(Exception):
"""
Exception thrown when a ``SetupPackage.check()`` fails.
"""
pass
class SetupPackage(object):
name = None
optional = False
pkg_names = {
"apt-get": None,
"yum": None,
"dnf": None,
"pacman": None,
"zypper": None,
"brew": None,
"port": None,
"windows_url": None
}
def check(self):
"""
Check whether the dependencies are met. Should raise a ``CheckFailed``
exception if the dependency was not found.
"""
pass
def get_install_requires(self):
"""
Return a list of Python packages that are required by the package.
pip / easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def get_setup_requires(self):
"""
Return a list of Python packages that are required by the setup.py
itself. pip / easy_install will attempt to download and install this
package if it is not installed on top of the setup.py script.
"""
return []
def get_data_files(self):
"""
Perform required actions to add the data files into the directory
given by ``data_path()``.
"""
pass
def install_help_msg(self):
"""
The help message to show if the package is not installed. The help
message shown depends on whether some class variables are present.
"""
def _try_managers(*managers):
for manager in managers:
pkg_name = self.pkg_names.get(manager, None)
if pkg_name and which(manager) is not None:
pkg_note = None
if isinstance(pkg_name, (tuple, list)):
pkg_name, pkg_note = pkg_name
msg = ('Try installing {0} with `{1} install {2}`.'
.format(self.name, manager, pkg_name))
if pkg_note:
msg += ' Note: ' + pkg_note
return msg
message = ""
if sys.platform == "win32":
url = self.pkg_names.get("windows_url", None)
if url:
return ('Please check {0} for instructions to install {1}'
.format(url, self.name))
elif sys.platform == "darwin":
manager_message = _try_managers("brew", "port")
return manager_message or message
elif sys.platform.startswith("linux"):
try:
import distro
except ImportError:
setup_install(['distro'])
import distro
release = distro.id()
if release in ('debian', 'ubuntu', 'linuxmint', 'raspbian'):
manager_message = _try_managers('apt-get')
if manager_message:
return manager_message
elif release in ('centos', 'rhel', 'redhat', 'fedora',
'scientific', 'amazon', ):
manager_message = _try_managers('dnf', 'yum')
if manager_message:
return manager_message
elif release in ('sles', 'opensuse'):
manager_message = _try_managers('zypper')
if manager_message:
return manager_message
elif release in ('arch'):
manager_message = _try_managers('pacman')
if manager_message:
return manager_message
return message
class PkgConfig(SetupPackage):
"""
This is a class for communicating with pkg-config.
"""
name = "pkg-config"
pkg_names = {
"apt-get": 'pkg-config',
"yum": None,
"dnf": None,
"pacman": None,
"zypper": None,
"brew": 'pkg-config',
"port": None,
"windows_url": None
}
def __init__(self):
if sys.platform == 'win32':
self.has_pkgconfig = False
else:
self.pkg_config = os.environ.get('PKG_CONFIG', 'pkg-config')
self.set_pkgconfig_path()
try:
with open(os.devnull) as nul:
subprocess.check_call([self.pkg_config, "--help"],
stdout=nul, stderr=nul)
self.has_pkgconfig = True
except (subprocess.CalledProcessError, OSError):
self.has_pkgconfig = False
raise DistutilsSetupError("pkg-config is not installed. "
"Please install it to continue.\n" +
self.install_help_msg())
def set_pkgconfig_path(self):
pkgconfig_path = sysconfig.get_config_var('LIBDIR')
if pkgconfig_path is None:
return
pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
if not os.path.isdir(pkgconfig_path):
return
os.environ['PKG_CONFIG_PATH'] = ':'.join(
[os.environ.get('PKG_CONFIG_PATH', ""), pkgconfig_path])
def get_version(self, package):
"""
Get the version of the package from pkg-config.
"""
if not self.has_pkgconfig:
return None
try:
output = subprocess.check_output(
[self.pkg_config, package, "--modversion"],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return None
else:
output = output.decode(sys.getfilesystemencoding())
return output.strip()
# The PkgConfig class should be used through this singleton
pkg_config = PkgConfig()
class Distro(SetupPackage):
name = "distro"
def check(self):
return 'Will be installed with pip.'
def get_setup_requires(self):
try:
import distro # noqa (unused import)
return []
except ImportError:
return ['distro']
class SetupTools(SetupPackage):
name = 'setuptools'
def check(self):
return 'Will be installed with pip.'
def get_setup_requires(self):
try:
import setuptools # noqa (unused import)
return []
except ImportError:
return ['setuptools']
class PathLib(SetupPackage):
name = 'pathlib'
def check(self):
if sys.version_info < (3, 4):
return 'Backported pathlib2 will be installed with pip.'
else:
return 'Already installed in python 3.4+'
def get_install_requires(self):
if sys.version_info < (3, 4):
return ['pathlib2']
else:
return []
class AppDirs(SetupPackage):
name = 'appdirs'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['appdirs']
class LibMagic(SetupPackage):
name = 'libmagic'
pkg_names = {
"apt-get": 'libmagic-dev',
"yum": 'file',
"dnf": 'file',
"pacman": None,
"zypper": None,
"brew": 'libmagic',
"port": None,
"windows_url": None
}
def check(self):
file_path = which('file')
if file_path is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found "file" utility at {0}.'.format(file_path)
class PythonMagic(SetupPackage):
name = 'python-magic'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['python-magic']
class Six(SetupPackage):
name = 'six'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['six>=1.8.0']
class ExifTool(SetupPackage):
name = 'exiftool'
pkg_names = {
"apt-get": 'exiftool',
"yum": 'perl-Image-ExifTool',
"dnf": 'perl-Image-ExifTool',
"pacman": None,
"zypper": None,
"brew": 'exiftool',
"port": 'p5-image-exiftool',
"windows_url": 'http://www.sno.phy.queensu.ca/~phil/exiftool/'
}
def check(self):
exiftool_path = which('exiftool')
if exiftool_path is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found at {0}.'.format(exiftool_path)
class Pillow(SetupPackage):
name = 'pillow'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['pillow>=2.5.0']
class Numpy(SetupPackage):
name = 'numpy'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['numpy>=1.7.2']
class Dlib(SetupPackage):
name = 'dlib'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['dlib']
class ScikitImage(SetupPackage):
name = 'scikit-image'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
# For some reason some dependencies of scikit-image aren't installed
# by pip: https://github.com/scikit-image/scikit-image/issues/2155
return ['scipy', 'matplotlib', 'scikit-image>=0.12']
class MagickWand(SetupPackage):
name = 'magickwand'
pkg_names = {
"apt-get": 'libmagickwand-dev',
"yum": 'ImageMagick-devel',
"dnf": 'ImageMagick-devel',
"pacman": None,
"zypper": None,
"brew": 'imagemagick',
"port": 'imagemagick',
"windows_url": ("http://docs.wand-py.org/en/latest/guide/"
"install.html#install-imagemagick-on-windows")
}
def check(self):
# `wand` already checks for magickwand, but only when importing, not
# during installation. See https://github.com/dahlia/wand/issues/293
magick_wand = pkg_config.get_version("MagickWand")
if magick_wand is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found with pkg-config.'
class Wand(SetupPackage):
name = 'wand'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['wand']
class PyColorName(SetupPackage):
name = 'pycolorname'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['pycolorname']
class LibZBar(SetupPackage):
name = 'libzbar'
pkg_names = {
"apt-get": 'libzbar-dev',
"yum": 'zbar-devel',
"dnf": 'zbar-devel',
"pacman": None,
"zypper": None,
"brew": 'zbar',
"port": None,
"windows_url": None
}
def check(self):
libzbar = ctypes.util.find_library('zbar')
if libzbar is None:
raise CheckFailed('Needs to be installed manually.')
else:
return 'Found {0}.'.format(libzbar)
class ZBar(SetupPackage):
name = 'zbar'
def check(self):
return 'Will be installed with pip.'
def get_install_requires(self):
return ['zbar']
class JavaJRE(SetupPackage):
name = 'java'
pkg_names = {
"apt-get": 'default-jre',
"yum": 'java',
"dnf": 'java',
"pacman": None,
"zypper": None,
"brew": None,
"port": None,
"windows_url": "https://java.com/download/"
}
def check(self):
java_path = which('java')
if java_path is None:
raise CheckFailed('Needs to be installed manually.')
else:
| |
<gh_stars>0
from keras import initializers, regularizers, activations
from keras.engine.topology import Layer
from keras import backend as K
import tensorflow as tf
class GraphEmbed(Layer):
def __init__(self, **kwargs):
super(GraphEmbed, self).__init__(**kwargs)
def build(self, input_shape):
super(GraphEmbed, self).build(input_shape)
def call(self, inputs, mask=None):
# Import graph tensors
# atoms = (samples, max_atoms, atom_feat)
# distances = (samples, max_atoms, max_atoms, coor_dims)
atoms, distances = inputs
# Get parameters
max_atoms = int(atoms.shape[1])
atom_feat = int(atoms.shape[-1])
coor_dims = int(distances.shape[-1])
# Generate vector features filled with zeros
vector_features = tf.zeros_like(atoms)
vector_features = tf.reshape(vector_features, [-1, max_atoms, 1, atom_feat])
vector_features = tf.tile(vector_features, [1, 1, coor_dims, 1])
return [atoms, vector_features]
def compute_output_shape(self, input_shape):
return [input_shape[0], (input_shape[0][0], input_shape[0][1], input_shape[-1][-1], input_shape[0][-1])]
class GraphSToS(Layer):
def __init__(self,
filters,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_initializer='zeros',
activation=None,
**kwargs):
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.filters = filters
super(GraphSToS, self).__init__(**kwargs)
def get_config(self):
base_config = super(GraphSToS, self).get_config()
base_config['filters'] = self.filters
return base_config
def build(self, input_shape):
atom_feat = input_shape[-1]
self.w_ss = self.add_weight(shape=(atom_feat * 2, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name='w_ss')
self.b_ss = self.add_weight(shape=(self.filters,),
name='b_ss',
initializer=self.bias_initializer)
super(GraphSToS, self).build(input_shape)
def call(self, inputs, mask=None):
# Import graph tensors
# scalar_features = (samples, max_atoms, atom_feat)
scalar_features = inputs
# Get parameters
max_atoms = int(scalar_features.shape[1])
atom_feat = int(scalar_features.shape[-1])
# Expand scalar features to 4D
scalar_features = tf.reshape(scalar_features, [-1, max_atoms, 1, atom_feat])
scalar_features = tf.tile(scalar_features, [1, 1, max_atoms, 1])
# Combine between atoms
scalar_features_t = tf.transpose(scalar_features, perm=[0, 2, 1, 3])
scalar_features = tf.concat([scalar_features, scalar_features_t], -1)
# Linear combination
scalar_features = tf.reshape(scalar_features, [-1, atom_feat * 2])
scalar_features = tf.matmul(scalar_features, self.w_ss) + self.b_ss
scalar_features = tf.reshape(scalar_features, [-1, max_atoms, max_atoms, self.filters])
# Activation
scalar_features = self.activation(scalar_features)
return scalar_features
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[1], input_shape[1], self.filters
class GraphSToV(Layer):
def __init__(self,
filters,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_initializer='zeros',
activation=None,
**kwargs):
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.filters = filters
super(GraphSToV, self).__init__(**kwargs)
def get_config(self):
base_config = super(GraphSToV, self).get_config()
base_config['filters'] = self.filters
return base_config
def build(self, input_shape):
atom_feat = input_shape[0][-1]
self.w_sv = self.add_weight(shape=(atom_feat * 2, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name='w_sv')
self.b_sv = self.add_weight(shape=(self.filters,),
name='b_sv',
initializer=self.bias_initializer)
super(GraphSToV, self).build(input_shape)
def call(self, inputs, mask=None):
# Import graph tensors
# scalar_features = (samples, max_atoms, atom_feat)
# distances = (samples, max_atoms, max_atoms, coor_dims)
scalar_features, distances = inputs
# Get parameters
max_atoms = int(scalar_features.shape[1])
atom_feat = int(scalar_features.shape[-1])
coor_dims = int(distances.shape[-1])
# Expand scalar features to 4D
scalar_features = tf.reshape(scalar_features, [-1, max_atoms, 1, atom_feat])
scalar_features = tf.tile(scalar_features, [1, 1, max_atoms, 1])
# Combine between atoms
scalar_features_t = tf.transpose(scalar_features, perm=[0, 2, 1, 3])
scalar_features = tf.concat([scalar_features, scalar_features_t], -1)
# Apply weights
scalar_features = tf.reshape(scalar_features, [-1, atom_feat * 2])
scalar_features = tf.matmul(scalar_features, self.w_sv) + self.b_sv
scalar_features = tf.reshape(scalar_features, [-1, max_atoms, max_atoms, 1, self.filters])
scalar_features = tf.tile(scalar_features, [1, 1, 1, coor_dims, 1])
# Expand distances to 5D
distances = tf.reshape(distances, [-1, max_atoms, max_atoms, coor_dims, 1])
distances = tf.tile(distances, [1, 1, 1, 1, self.filters])
# Tensor product
vector_features = tf.multiply(scalar_features, distances)
# Activation
vector_features = self.activation(vector_features)
return vector_features
def compute_output_shape(self, input_shape):
return input_shape[0][0], input_shape[0][1], input_shape[0][1], input_shape[1][-1], self.filters
class GraphVToV(Layer):
def __init__(self,
filters,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_initializer='zeros',
activation=None,
**kwargs):
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.filters = filters
super(GraphVToV, self).__init__(**kwargs)
def get_config(self):
base_config = super(GraphVToV, self).get_config()
base_config['filters'] = self.filters
return base_config
def build(self, input_shape):
atom_feat = input_shape[-1]
self.w_vv = self.add_weight(shape=(atom_feat * 2, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name='w_vv')
self.b_vv = self.add_weight(shape=(self.filters,),
name='b_vv',
initializer=self.bias_initializer)
super(GraphVToV, self).build(input_shape)
def call(self, inputs, mask=None):
# Import graph tensors
# vector_features = (samples, max_atoms, coor_dims, atom_feat)
vector_features = inputs
# Get parameters
max_atoms = int(vector_features.shape[1])
atom_feat = int(vector_features.shape[-1])
coor_dims = int(vector_features.shape[-2])
# Expand vector features to 5D
vector_features = tf.reshape(vector_features, [-1, max_atoms, 1, coor_dims, atom_feat])
vector_features = tf.tile(vector_features, [1, 1, max_atoms, 1, 1])
# Combine between atoms
vector_features_t = tf.transpose(vector_features, perm=[0, 2, 1, 3, 4])
vector_features = tf.concat([vector_features, vector_features_t], -1)
# Apply weights
vector_features = tf.reshape(vector_features, [-1, atom_feat * 2])
vector_features = tf.matmul(vector_features, self.w_vv) + self.b_vv
vector_features = tf.reshape(vector_features, [-1, max_atoms, max_atoms, coor_dims, self.filters])
# Activation
vector_features = self.activation(vector_features)
return vector_features
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[1], input_shape[1], input_shape[-2], self.filters
class GraphVToS(Layer):
def __init__(self,
filters,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_initializer='zeros',
activation=None,
**kwargs):
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.filters = filters
super(GraphVToS, self).__init__(**kwargs)
def get_config(self):
base_config = super(GraphVToS, self).get_config()
base_config['filters'] = self.filters
return base_config
def build(self, input_shape):
atom_feat = input_shape[0][-1]
self.w_vs = self.add_weight(shape=(atom_feat * 2, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name='w_vs')
self.b_vs = self.add_weight(shape=(self.filters,),
name='b_vs',
initializer=self.bias_initializer)
super(GraphVToS, self).build(input_shape)
def call(self, inputs, mask=None):
# Import graph tensors
# vector_features = (samples, max_atoms, coor_dims, atom_feat)
# distances = (samples, max_atoms, max_atoms, coor_dims)
vector_features, distances = inputs
# Get parameters
max_atoms = int(vector_features.shape[1])
atom_feat = int(vector_features.shape[-1])
coor_dims = int(vector_features.shape[-2])
# Expand vector features to 5D
vector_features = tf.reshape(vector_features, [-1, max_atoms, 1, coor_dims, atom_feat])
vector_features = tf.tile(vector_features, [1, 1, max_atoms, 1, 1])
# Combine between atoms
vector_features_t = tf.transpose(vector_features, perm=[0, 2, 1, 3, 4])
vector_features = tf.concat([vector_features, vector_features_t], -1)
# Apply weights
vector_features = tf.reshape(vector_features, [-1, atom_feat * 2])
vector_features = tf.matmul(vector_features, self.w_vs) + self.b_vs
vector_features = tf.reshape(vector_features, [-1, max_atoms, max_atoms, coor_dims, self.filters])
# # Calculate r^ = r / |r| and expand it to 5D
# distances_hat = tf.sqrt(tf.reduce_sum(tf.square(distances), axis=-1, keepdims=True))
# distances_hat = distances_hat + tf.cast(tf.equal(distances_hat, 0), tf.float32)
# distances_hat = tf.divide(distances, distances_hat)
# distances_hat = tf.reshape(distances_hat, [-1, max_atoms, max_atoms, coor_dims, 1])
# distances_hat = tf.tile(distances_hat, [1, 1, 1, 1, self.filters])
distances_hat = tf.reshape(distances, [-1, max_atoms, max_atoms, coor_dims, 1])
distances_hat = tf.tile(distances_hat, [1, 1, 1, 1, self.filters])
# Projection of v onto r = v (dot) r^
scalar_features = tf.multiply(vector_features, distances_hat)
scalar_features = tf.reduce_sum(scalar_features, axis=-2)
# Activation
scalar_features = self.activation(scalar_features)
return scalar_features
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[1], input_shape[1], self.filters
class GraphConvS(Layer):
def __init__(self,
filters,
pooling='sum',
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_initializer='zeros',
activation=None,
**kwargs):
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.filters = filters
self.pooling = pooling
super(GraphConvS, self).__init__(**kwargs)
def get_config(self):
base_config = super(GraphConvS, self).get_config()
base_config['filters'] = self.filters
base_config['pooling'] = self.pooling
return base_config
def build(self, input_shape):
atom_feat_1 = input_shape[0][-1]
atom_feat_2 = input_shape[1][-1]
self.w_conv_scalar = self.add_weight(shape=(atom_feat_1 + atom_feat_2, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name='w_conv_scalar')
self.b_conv_scalar = self.add_weight(shape=(self.filters,),
name='b_conv_scalar',
initializer=self.bias_initializer)
super(GraphConvS, self).build(input_shape)
def call(self, inputs, mask=None):
# Import graph tensors
# scalar_features_1 = (samples, max_atoms, max_atoms, atom_feat)
# scalar_features_2 = (samples, max_atoms, max_atoms, atom_feat)
# adjacency = (samples, max_atoms, max_atoms)
scalar_features_1, scalar_features_2, adjacency = inputs
# Get parameters
max_atoms = int(scalar_features_1.shape[1])
atom_feat_1 = int(scalar_features_1.shape[-1])
atom_feat_2 = int(scalar_features_2.shape[-1])
# Concatenate two features
scalar_features = tf.concat([scalar_features_1, scalar_features_2], axis=-1)
# Linear combination
scalar_features = tf.reshape(scalar_features, [-1, atom_feat_1 + atom_feat_2])
scalar_features = tf.matmul(scalar_features, self.w_conv_scalar) + self.b_conv_scalar
scalar_features = tf.reshape(scalar_features, [-1, max_atoms, max_atoms, self.filters])
# Adjacency masking
adjacency = tf.reshape(adjacency, [-1, max_atoms, max_atoms, 1])
adjacency = tf.tile(adjacency, [1, 1, 1, self.filters])
scalar_features = tf.multiply(scalar_features, adjacency)
# Integrate over second atom axis
if self.pooling == "sum":
scalar_features = tf.reduce_sum(scalar_features, axis=2)
elif self.pooling == "max":
scalar_features = tf.reduce_max(scalar_features, axis=2)
elif self.pooling == "mean":
scalar_features = tf.reduce_mean(scalar_features, axis=2)
# Activation
scalar_features = self.activation(scalar_features)
return scalar_features
def compute_output_shape(self, input_shape):
return input_shape[0][0], input_shape[0][1], self.filters
class GraphConvV(Layer):
def __init__(self,
filters,
pooling='sum',
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
bias_initializer='zeros',
activation=None,
**kwargs):
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.filters = filters
self.pooling = pooling
super(GraphConvV, self).__init__(**kwargs)
def get_config(self):
base_config = super(GraphConvV, self).get_config()
base_config['filters'] = self.filters
base_config['pooling'] = self.pooling
return base_config
def build(self, input_shape):
atom_feat_1 = input_shape[0][-1]
atom_feat_2 = input_shape[1][-1]
self.w_conv_vector = self.add_weight(shape=(atom_feat_1 + atom_feat_2, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name='w_conv_vector')
self.b_conv_vector = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='b_conv_vector')
super(GraphConvV, self).build(input_shape)
def call(self, inputs, mask=None):
# Import graph tensors
# vector_features_1 = (samples, max_atoms, max_atoms, coor_dims, atom_feat)
# vector_features_2 = (samples, max_atoms, max_atoms, coor_dims, atom_feat)
# adjacency = (samples, max_atoms, max_atoms)
vector_features_1, vector_features_2, adjacency = inputs
# Get parameters
max_atoms = int(vector_features_1.shape[1])
atom_feat_1 = int(vector_features_1.shape[-1])
atom_feat_2 = int(vector_features_2.shape[-1])
coor_dims = int(vector_features_1.shape[-2])
# Concatenate two features
vector_features = tf.concat([vector_features_1, vector_features_2], axis=-1)
# Linear combination
vector_features = tf.reshape(vector_features, [-1, atom_feat_1 + atom_feat_2])
vector_features = tf.matmul(vector_features, self.w_conv_vector) + self.b_conv_vector
vector_features = tf.reshape(vector_features, [-1, max_atoms, max_atoms, coor_dims, self.filters])
# Adjacency masking
adjacency = tf.reshape(adjacency, [-1, max_atoms, max_atoms, 1, 1])
adjacency = tf.tile(adjacency, [1, 1, 1, coor_dims, self.filters])
vector_features = tf.multiply(vector_features, | |
if(periodic):
frame = reflection_pad2D(frame, int(kernel_size / 2), device)
frame = F.conv2d(frame, gaussian_kernel, groups=frame.shape[1])
frame = F.interpolate(frame, size = list(s), mode='bilinear', align_corners=False)
del gaussian_kernel
return frame
def laplace_pyramid_downscale3D(frame, level, downscale_per_level, device, periodic=False):
kernel_size = 5
sigma = 2 * (1 / downscale_per_level) / 6
xyz_grid = torch.zeros([kernel_size, kernel_size, kernel_size, 3])
for i in range(kernel_size):
for j in range(kernel_size):
for k in range(kernel_size):
xyz_grid[i, j, k, 0] = i
xyz_grid[i, j, k, 1] = j
xyz_grid[i, j, k, 2] = k
mean = (kernel_size - 1)/2.
variance = sigma**2.
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xyz_grid - mean)**2., dim=-1) /\
(2*variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size,
kernel_size, kernel_size).to(device)
gaussian_kernel = gaussian_kernel.repeat(frame.shape[1], 1, 1, 1, 1)
input_size = np.array(list(frame.shape[2:]))
with torch.no_grad():
for i in range(level):
s = (input_size * (downscale_per_level**(i+1))).astype(int)
if(periodic):
frame = reflection_pad3D(frame, int(kernel_size / 2), device)
frame = F.conv3d(frame, gaussian_kernel, groups=frame.shape[1])
frame = F.interpolate(frame, size = list(s), mode='trilinear', align_corners=False)
del gaussian_kernel
return frame
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
def generate_padded_noise(size, pad_size, pad_with_noise, mode, device):
if(pad_with_noise):
for i in range(2,len(size)):
size[i] += 2*pad_size
noise = torch.randn(size, device=device)
else:
noise = torch.randn(size, device=device)
if mode == "2D":
required_padding = [pad_size, pad_size, pad_size, pad_size]
else:
required_padding = [pad_size, pad_size, pad_size, pad_size, pad_size, pad_size]
noise = F.pad(noise, required_padding)
return noise
def init_scales(opt, dataset):
ns = []
if(opt["spatial_downscale_ratio"] < 1.0):
for i in range(len(dataset.resolution)):
ns.append(round(math.log(opt["min_dimension_size"] / dataset.resolution[i]) / math.log(opt["spatial_downscale_ratio"]))+1)
opt["n"] = min(ns)
print("The model will have %i scales" % (opt["n"]))
for i in range(opt["n"]):
scaling = []
factor = opt["spatial_downscale_ratio"]**(opt["n"] - i - 1)
for j in range(len(dataset.resolution)):
x = int(dataset.resolution[j] * factor)
scaling.append(x)
#opt["resolutions"].insert(0, scaling)
opt["resolutions"].append(scaling)
print("Scale %i: %s" % (opt["n"] - 1 - i, str(scaling)))
def init_gen(scale, opt):
num_kernels = int( 2** ((math.log(opt["base_num_kernels"]) / math.log(2)) + (scale / 4)))
generator = SinGAN_Generator(opt["resolutions"][scale], opt["num_blocks"],
opt["num_channels"], num_kernels, opt["kernel_size"], opt["stride"],
opt["pre_padding"], opt["mode"], opt["physical_constraints"], opt['separate_chans'], scale,
opt['zero_noise'], opt["device"])
generator.apply(weights_init)
return generator, num_kernels
def init_discrim(scale, opt):
num_kernels = int(2 ** ((math.log(opt["base_num_kernels"]) / math.log(2)) + (scale / 4)))
discriminator = SinGAN_Discriminator(opt["resolutions"][scale],
opt["num_blocks"], opt["num_channels"], num_kernels, opt["kernel_size"],
opt["stride"], opt['regularization'], opt["mode"],
opt["device"])
return discriminator
def generate(generators, mode, opt, device, generated_image=None, start_scale=0):
with torch.no_grad():
if(generated_image is None):
generated_image = torch.zeros(generators[0].get_input_shape()).to(device)
for i in range(0, len(generators)):
generated_image = F.interpolate(generated_image,
size=generators[i].resolution, mode=opt["upsample_mode"], align_corners=False)
if(mode == "reconstruct"):
noise = generators[i].optimal_noise
elif(mode == "random"):
noise = torch.randn(generators[i].get_input_shape(),
device=device)
generated_image = generators[i](generated_image,
opt["noise_amplitudes"][i+start_scale]*noise)
return generated_image
def generate_by_patch(generators, mode, opt, device, patch_size,
generated_image=None, start_scale=0):
with torch.no_grad():
#seq = []
if(generated_image is None):
generated_image = torch.zeros(generators[0].get_input_shape()).to(device)
for i in range(start_scale, len(generators)):
#print("Gen " + str(i))
rf = int(generators[i].receptive_field() / 2)
LR = F.interpolate(generated_image,
size=generators[i].resolution, mode=opt["upsample_mode"], align_corners=False)
generated_image = torch.zeros(generators[i].get_input_shape()).to(device)
if(mode == "reconstruct"):
full_noise = generators[i].optimal_noise
elif(mode == "random"):
full_noise = torch.randn(generators[i].optimal_noise.shape, device=device)
if(opt['mode'] == "2D" or opt['mode'] == "3Dto2D"):
y_done = False
y = 0
y_stop = min(generated_image.shape[2], y + patch_size)
while(not y_done):
if(y_stop == generated_image.shape[2]):
y_done = True
x_done = False
x = 0
x_stop = min(generated_image.shape[3], x + patch_size)
while(not x_done):
if(x_stop == generated_image.shape[3]):
x_done = True
noise = full_noise[:,:,y:y_stop,x:x_stop]
#print("[%i:%i, %i:%i, %i:%i]" % (z, z_stop, y, y_stop, x, x_stop))
result = generators[i](LR[:,:,y:y_stop,x:x_stop],
opt["noise_amplitudes"][i]*noise)
x_offset = rf if x > 0 else 0
y_offset = rf if y > 0 else 0
generated_image[:,:,
y+y_offset:y+noise.shape[2],
x+x_offset:x+noise.shape[3]] = result[:,:,y_offset:,x_offset:]
x += patch_size - 2*rf
x = min(x, max(0, generated_image.shape[3] - patch_size))
x_stop = min(generated_image.shape[3], x + patch_size)
y += patch_size - 2*rf
y = min(y, max(0, generated_image.shape[2] - patch_size))
y_stop = min(generated_image.shape[2], y + patch_size)
elif(opt['mode'] == '3D'):
z_done = False
z = 0
z_stop = min(generated_image.shape[2], z + patch_size)
while(not z_done):
if(z_stop == generated_image.shape[2]):
z_done = True
y_done = False
y = 0
y_stop = min(generated_image.shape[3], y + patch_size)
while(not y_done):
if(y_stop == generated_image.shape[3]):
y_done = True
x_done = False
x = 0
x_stop = min(generated_image.shape[4], x + patch_size)
while(not x_done):
if(x_stop == generated_image.shape[4]):
x_done = True
noise = full_noise[:,:,z:z_stop,y:y_stop,x:x_stop]
#print("[%i:%i, %i:%i, %i:%i]" % (z, z_stop, y, y_stop, x, x_stop))
result = generators[i](LR[:,:,z:z_stop,y:y_stop,x:x_stop],
opt["noise_amplitudes"][i]*noise)
x_offset = rf if x > 0 else 0
y_offset = rf if y > 0 else 0
z_offset = rf if z > 0 else 0
generated_image[:,:,
z+z_offset:z+noise.shape[2],
y+y_offset:y+noise.shape[3],
x+x_offset:x+noise.shape[4]] = result[:,:,z_offset:,y_offset:,x_offset:]
x += patch_size - 2*rf
x = min(x, max(0, generated_image.shape[4] - patch_size))
x_stop = min(generated_image.shape[4], x + patch_size)
y += patch_size - 2*rf
y = min(y, max(0, generated_image.shape[3] - patch_size))
y_stop = min(generated_image.shape[3], y + patch_size)
z += patch_size - 2*rf
z = min(z, max(0, generated_image.shape[2] - patch_size))
z_stop = min(generated_image.shape[2], z + patch_size)
#seq.append(generated_image.detach().cpu().numpy()[0].swapaxes(0,2).swapaxes(0,1))
#seq = np.array(seq)
#seq -= seq.min()
#seq /= seq.max()
#seq *= 255
#seq = seq.astype(np.uint8)
#imageio.mimwrite("patches_good.gif", seq)
#imageio.imwrite("patch_good_ex0.png", seq[0,0:100, 0:100,:])
#imageio.imwrite("patch_good_ex1.png", seq[1,0:100, 0:100,:])
#imageio.imwrite("patch_good_ex2.png", seq[2,0:100, 0:100,:])
return generated_image
def super_resolution(generator, frame, factor, opt, device):
frame = frame.to(device)
full_size = list(frame.shape[2:])
for i in range(len(full_size)):
full_size[i] *= factor
r = 1 / opt["spatial_downscale_ratio"]
curr_r = 1.0
while(curr_r * r < factor):
frame = F.interpolate(frame, scale_factor=r,mode=opt["upsample_mode"], align_corners=False)
noise = torch.randn(frame.shape).to(device)
frame = generator(frame, opt["noise_amplitudes"][-1]*noise)
curr_r *= r
frame = F.interpolate(frame, size=full_size, mode=opt["upsample_mode"], align_corners=False)
noise = torch.randn(frame.shape).to(device)
noise = torch.zeros(frame.shape).to(device)
frame = generator(frame, opt["noise_amplitudes"][-1]*noise)
return frame
def save_models(generators, discriminators, opt, optimizer=None):
folder = create_folder(opt["save_folder"], opt["save_name"])
path_to_save = os.path.join(opt["save_folder"], folder)
print_to_log_and_console("Saving model to %s" % (path_to_save),
os.path.join(opt["save_folder"], opt["save_name"]), "log.txt")
if(opt["save_generators"]):
optimal_noises = {}
gen_states = {}
for i in range(len(generators)):
gen_states[str(i)] = generators[i].state_dict()
optimal_noises[str(i)] = generators[i].optimal_noise
torch.save(gen_states, os.path.join(path_to_save, "SinGAN.generators"))
torch.save(optimal_noises, os.path.join(path_to_save, "SinGAN.optimal_noises"))
if(opt["save_discriminators"]):
discrim_states = {}
for i in range(len(discriminators)):
discrim_states[str(i)] = discriminators[i].state_dict()
torch.save(discrim_states, os.path.join(path_to_save, "SinGAN.discriminators"))
save_options(opt, path_to_save)
def load_models(opt, device):
generators = []
discriminators = []
load_folder = os.path.join(opt["save_folder"], opt["save_name"])
if not os.path.exists(load_folder):
print_to_log_and_console("%s doesn't exist, load failed" % load_folder,
os.path.join(opt["save_folder"], opt["save_name"]), "log.txt")
return
from collections import OrderedDict
if os.path.exists(os.path.join(load_folder, "SinGAN.generators")):
gen_params = torch.load(os.path.join(load_folder, "SinGAN.generators"),
map_location=device)
optimal_noises = torch.load(os.path.join(load_folder, "SinGAN.optimal_noises"),
map_location=device)
for i in range(opt["n"]):
if(str(i) in gen_params.keys()):
gen_params_compat = OrderedDict()
for k, v in gen_params[str(i)].items():
if("module" in k):
gen_params_compat[k[7:]] = v
else:
gen_params_compat[k] = v
generator, num_kernels = init_gen(i, opt)
generator.optimal_noise = optimal_noises[str(i)]
generator.load_state_dict(gen_params_compat)
generators.append(generator)
print_to_log_and_console("Successfully loaded SinGAN.generators",
os.path.join(opt["save_folder"], opt["save_name"]), "log.txt")
else:
print_to_log_and_console("Warning: %s doesn't exists - can't load these model parameters" % "MVTVSSRGAN.generators",
os.path.join(opt["save_folder"], opt["save_name"]), "log.txt")
if os.path.exists(os.path.join(load_folder, "SinGAN.discriminators")):
discrim_params = torch.load(os.path.join(load_folder, "SinGAN.discriminators"),
map_location=device)
for i in range(opt["n"]):
if(str(i) in discrim_params.keys()):
discrim_params_compat = OrderedDict()
for k, v in discrim_params[str(i)].items():
if(k[0:7] == "module."):
discrim_params_compat[k[7:]] = v
else:
discrim_params_compat[k] = v
discriminator = init_discrim(i, opt)
discriminator.load_state_dict(discrim_params_compat)
discriminators.append(discriminator)
print_to_log_and_console("Successfully loaded SinGAN.discriminators",
os.path.join(opt["save_folder"],opt["save_name"]), "log.txt")
else:
print_to_log_and_console("Warning: %s doesn't exists - can't load these model parameters" % "MVTVSSRGAN.s_discriminators",
os.path.join(opt["save_folder"], opt["save_name"]), "log.txt")
return generators, discriminators
def train_single_scale_wrapper(generators, discriminators, opt):
with LineProfiler(train_single_scale, generate, generate_by_patch, SinGAN_Generator.forward) as prof:
g, d = train_single_scale(generators, discriminators, opt)
print(prof.display())
return g, d
def train_single_scale(generators, discriminators, opt):
start_t = time.time()
# Initialize the dataset
dataset = Dataset(os.path.join(input_folder, opt["data_folder"]), opt)
torch.manual_seed(0)
# Create the new generator and discriminator for this level
if(len(generators) == opt['scale_in_training']):
generator, num_kernels_this_scale = init_gen(len(generators), opt)
generator = generator.to(opt["device"])
discriminator = init_discrim(len(generators), opt).to(opt["device"])
else:
generator = generators[-1].to(opt['device'])
generators.pop(len(generators)-1)
discriminator = discriminators[-1].to(opt['device'])
discriminators.pop(len(discriminators)-1)
# Move all models to this GPU and make them distributed
for i in range(len(generators)):
generators[i].to(opt["device"])
generators[i].eval()
for param in generators[i].parameters():
param.requires_grad = False
#print_to_log_and_console(generator, os.path.join(opt["save_folder"], opt["save_name"]),
# "log.txt")
print_to_log_and_console("Training on %s" % (opt["device"]),
os.path.join(opt["save_folder"], opt["save_name"]), "log.txt")
#print_to_log_and_console("Kernels this scale: %i" % num_kernels_this_scale,
# os.path.join(opt["save_folder"], opt["save_name"]), "log.txt")
generator_optimizer = optim.Adam(generator.parameters(), lr=opt["learning_rate"],
betas=(opt["beta_1"],opt["beta_2"]))
generator_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=generator_optimizer,
milestones=[1600-opt['iteration_number']],gamma=opt['gamma'])
discriminator_optimizer = optim.Adam(filter(lambda p: p.requires_grad, discriminator.parameters()),
lr=opt["learning_rate"],
betas=(opt["beta_1"],opt["beta_2"]))
discriminator_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=discriminator_optimizer,
milestones=[1600-opt['iteration_number']],gamma=opt['gamma'])
writer = SummaryWriter(os.path.join('tensorboard',opt['save_name']))
start_time = time.time()
next_save = 0
images_seen = 0
# Get properly sized frame for this generator
real = dataset.__getitem__(0)
real = real.to(opt["device"])
print(str(len(generators)) + ": " + str(opt["resolutions"][len(generators)]))
if(len(generators) + 1 is not len(opt["resolutions"])):
if(opt['mode'] == '2D' or opt['mode'] == | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module implements functions used to produce evolution charts and other
plots based on the
output of estrangement confinement :mod:`estrangement.estrangement.ECA` ."""
__all__ = ['GetDeltas','plot_by_param','plot_function','ChoosingDelta','preprocess_temporal_communities','plot_temporal_communities','plot_with_lambdas']
__author__ = """\n""".join(['<NAME> (<EMAIL>)',
'<NAME> <<EMAIL>>',
'<NAME> <<EMAIL>>'])
# Copyright (C) 2012 by
# Raytheon BBN Technologies and Rensselaer Polytechnic Institute
# All rights reserved.
# BSD license.
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import pylab
import os
import numpy
import collections
import random
import logging
from utils import match_labels
markers = [
'o' ,
'v' ,
'^' ,
'<' ,
'>' ,
'1' ,
'2' ,
'3' ,
'4' ,
's' ,
'p' ,
'*' ,
'h' ,
'H' ,
'+' ,
'x' ,
'D' ,
'd' ,
'|' ,
'_' ,
]
def GetDeltas():
""" Function to scan for simulation folders in the current working directory and read
the values of delta used in ECA.
The :mod:`estrangement.estrangement.ECA` function creates a folder specific to the value
of delta used in each simulation (e.g. task_delta_0.01). Within each of these folders,
a config file (simulation.conf) specifies the value of delta used in the simulation.
This function reads the value of delta from each such config file and returns them in
a list.
Alternatively, delta can be specified in the function calls, bypassing this
function. See :mod:`EstrangementDemo` for more details.
Returns
-------
deltas : list
A list of float, where each member denotes a value of delta used in :mod:`estrangement.estrangement.ECA`.
Examples
--------
>>> deltas = GetDeltas()
>>> print(deltas)
"""
deltas = []
dictOptions = {}
for dirname in os.listdir(os.getcwd()):
if not os.path.isdir(dirname):
continue
if not dirname.startswith("task"):
continue
infile = open(os.path.join(dirname, "options.log"), 'r')
for l in infile:
dictOptions = eval(l)
delta = dictOptions['delta']
deltas.append(delta)
deltas.sort()
return(deltas)
def plot_by_param(dictX, dictY, deltas=[], linewidth=2.0, markersize=15, label_fontsize=20, xfigsize=16.0, yfigsize=12.0, fontsize=28, fname=None, listLinestyles=None, xlabel="", ylabel="", title="", xscale='linear', yscale='linear', dictErr=None, display_on=False):
""" Given dictionaries, dictX with key=label, val = iterable of X values,
and dictY with key=label, val = iterable of Y values, this function
plots lines for each the labels specified on the same axes.
Parameters
----------
dictX : dictionary {label:[list of values]}
The X values of a set of lines to be plotted and their respective label.
dictY : dictionary {lable:[list of values]}
The Y values of a set of lines to be plotted and their respective label.
deltas : list of floats
The values of delta used for ECA for which there are results.
linewidth : float, optional
The desired font size of the lines to be plotted.
markersize : float, optional
The size of the markers used on each line.
label_fontsize : integer, optional
The size of the font used for the labels.
xfigsize : float, optional
The desired length of the x-axis.
yfigsize : float, optional
The desired length of the y-axis.
fontsize : integer, optional
The size of the font to be used in the figure.
fname : string, optional
The file is saved with this name.
listLinesstyles : list of strings, optional
A list consisting of the line styles to be used in the figures.
xlabel : string, optional
The label to appear on the x-axis.
ylabel : string, optional
The label to appear on the y-axis.
title : string, optional
The title of the figure.
xscale : string, optional
The type of scale to be used on the x-axis.
yscale : string, optional
The type of scale to be used on the y-axis.
dictErr : Dictionary {label:[list of values]}
Dictionary containing the Error Bars to be plotted on each line
display_on : boolean
If True, the graph is shown on the screen
Examples
--------
>>> dictX = {'Estrangement:delta_0.05': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'Estrangement:delta_0.025': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'Estrangement:delta_0.01': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'Estrangement:delta_1.0': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
>>> dictY = {'Estrangement:delta_0.05': [0.0, 0.020202020202020204, 0.04040404040404041, 0.031578947368421054, 0.010309278350515464, 0.010101010101010102, 0.020202020202020204, 0.030612244897959183, 0.030303030303030304, 0.0103092783505154], 'Estrangement:delta_0.025': [0.0, 0.020202020202020204, 0.0, 0.021052631578947368, 0.0, 0.020202020202020204, 0.010101010101010102, 0.02040816326530612, 0.010101010101010102, 0.0], 'Estrangement:delta_0.01': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Estrangement:delta_1.0': [0.061224489795918366, 0.020202020202020204, 0.04040404040404041, 0.05263157894736842, 0.0, 0.020202020202020204, 0.06060606060606061, 0.02040816326530612, 0.04040404040404041, 0.041237113402061855]}
>>> plot_by_param(dictX, dictY,deltas=[],fname='estrangement.svg', listLinestyles=['bs--', 'ro-',], xlabel="Time", ylabel='Estrangement', title="Estrangement")
"""
pyplot.clf()
fig2 = pyplot.figure(figsize=(xfigsize,yfigsize))
ax2 = fig2.add_subplot(111)
ax2.set_title(title, fontsize=fontsize)
ax2.set_xlabel(xlabel, fontsize=fontsize)
ax2.set_ylabel(ylabel, fontsize=fontsize)
ax2.set_xscale(xscale)
ax2.set_yscale(yscale)
xticklabels = pyplot.getp(pyplot.gca(), 'xticklabels')
pyplot.setp(xticklabels, fontsize=label_fontsize)
yticklabels = pyplot.getp(pyplot.gca(), 'yticklabels')
pyplot.setp(yticklabels, fontsize=label_fontsize)
pyplot.hold(True)
line_dict = {} # key = label, val = pyplot line object
i=0
for label in sorted(dictX.keys()):
arrayX = dictX[label]
arrayY = dictY[label]
if listLinestyles is not None:
fmt = listLinestyles[i]
i += 1
else:
fmt = random.choice(markers)
if dictErr is not None: # plot with errorbars
arrayErr = dictErr[label]
line_dict[label] = pyplot.errorbar(
arrayX, arrayY, yerr=[arrayErr, numpy.zeros(len(arrayErr))],
fmt=fmt,
label="%s"%str(label), linewidth=linewidth,
elinewidth=linewidth / 2.0,
markersize=markersize)
else:
line_dict[label] = pyplot.plot(
arrayX, arrayY, fmt,
label="%s"%str(label), linewidth=linewidth,
markersize=markersize)
pyplot.legend()
if fname is not None:
pyplot.savefig('%s'%fname)
if display_on is True:
pyplot.show()
return ax2
def plot_function(listNames,image_extension="svg"):
""" Plots a graph with the attributes specified in *listNames*.
This function relies on the file output of :mod:`estrangement.estrangement.ECA`.
The value of the parameter *write_stats* should be set to *True* when calling
:mod:`estrangement.estrangement.ECA`.
Parameters
----------
listNames : list of strings
Each string is an attribute to be plotted e.g. 'Estrangement','Q','F' etc.
image_extension : string, optional
The extension of the plot file to be saved.
Returns
-------
Nothing : The image is displayed on the screen and/or written to file.
Notes
-----
The function reads the relevant input data from file and formats it.
The actual plotting is done in :mod:`estrangement.plots.plot_by_param`.
Examples
--------
>>> deltas = [0.01,0.025,0.05,1.0]
>>> for d in deltas:
... estrangement.ECA(dataset_dir='../data',delta=d,increpeats=opt.increpeats,minrepeats=opt.minrepeats)
>>> plot_function(['Q', 'F',])
>>> plot_function(['Estrangement'])
"""
task_list = []
for dirname in os.listdir(os.getcwd()):
if not os.path.isdir(dirname):
continue
if not dirname.startswith("task"):
continue
task_list.append(dirname)
dictX = collections.defaultdict(list)
dictY = collections.defaultdict(list)
concat_datadict = {}
avg_datadict = {}
for task in task_list:
for name in listNames:
label = name + ':' + task[5:]
concat_datadict[label] = collections.defaultdict(list)
avg_datadict[label] = collections.defaultdict(float)
with open(os.path.join(task,"%s.log"%name), 'r') as infile:
data_dict = eval(infile.read())
for t in data_dict.keys():
concat_datadict[label][t] = data_dict[t]
for k in sorted(concat_datadict[label].keys(),key=int):
dictX[label].append(int(k))
dictY[label].append(concat_datadict[label][k])
plot_by_param(dictX, dictY, fname='%s.%s'%('-'.join(listNames), image_extension),
listLinestyles=['bo-', 'ro-', 'go-', 'mo-', 'ko-', 'yo-', 'co-',
'bs-', 'rs-', 'gs-', 'ms-', 'ks-', 'ys-', 'cs-',
'b*-', 'r*-', 'g*-', 'm*-', 'k*-', 'y*-', 'c*-',],
xlabel="Time", ylabel=name, title="%s evolution"% ', '.join(listNames))
def ChoosingDelta(image_extension="svg",deltas=[]):
""" Function to plot avg(Q*-E) versus delta to get insights into the best delta for the given dataset.
This module merely processes the data, the plotting is done by :mod:`estrangement.plots.plot_by_param`.
It requires the results from :mod:`estrangement.estrangement.ECA` to be outputted to file.
Parameters
----------
image_extension : string
The extension of the plot file to be saved
deltas : list of floats, optional
The values of deltas used in the simulation. If delta is not
specifed, :mod:`estrangement.plots.GetDeltas` is called to create the list.
Returns
-------
Nothing : The plot is displayed on the screen and/or written to file.
Notes
-----
To produce the necessary stat files, set 'write_stats=True' when calling :mod:`estrangement.estrangement.ECA`.
Examples
--------
>>> deltas = [0.01,0.025,0.05,1.0]
>>> for d in deltas:
... estrangement.ECA(dataset_dir='../data',delta=d,increpeats=opt.increpeats,minrepeats=opt.minrepeats)
>>> ChooseingDelta()
"""
dictX = collections.defaultdict(list)
dictY = collections.defaultdict(list)
Qavg_dict = {} # {delta: Qavg}
Eavg_dict = {} # {delta: Eavg}
# Get the values of delta used in the simulations if it is not specified
if(len(deltas) == 0):
deltas = GetDeltas()
for delta in deltas:
with open("./task_delta_" + str(delta) + "/Q.log", 'r') as f:
Q_dict = eval(f.read()) # {time: Q}
# remove the lowest time entry since the initial parition is a given
# this also keeps us consistent with Qstar and E below
del(Q_dict[sorted(Q_dict.keys())[0]])
with open("./task_delta_" + str(delta) +"/Qstar.log", 'r') as f:
Qstar_dict = eval(f.read()) # {time: Qstar}
with open("./task_delta_" + str(delta) +"/Estrangement.log", 'r') as f:
E_dict = eval(f.read()) # {time: E}
dictX["Average loss | |
already_processed):
value = find_attr_value_('LANG_REF', node)
if value is not None and 'LANG_REF' not in already_processed:
already_processed.add('LANG_REF')
self.LANG_REF = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class descMultiLangType
class propType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, NAME=None, valueOf_=None):
self.original_tagname_ = None
self.NAME = _cast(None, NAME)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, propType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if propType.subclass:
return propType.subclass(*args_, **kwargs_)
else:
return propType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_NAME(self): return self.NAME
def set_NAME(self, NAME): self.NAME = NAME
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='propType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('propType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='propType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.convert_unicode(self.valueOf_))
self.exportChildren(outfile, level + 1, namespace_='', name_='propType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propType'):
if self.NAME is not None and 'NAME' not in already_processed:
already_processed.add('NAME')
outfile.write(' NAME=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.NAME), input_name='NAME')), ))
def exportChildren(self, outfile, level, namespace_='', name_='propType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('NAME', node)
if value is not None and 'NAME' not in already_processed:
already_processed.add('NAME')
self.NAME = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class propType
class extRefType(GeneratedsSuper):
"""A reference to the id of an ISO Data Category (url including id). A
reference to an external (closed) Controlled Vocabulary (url). A
reference to the id of an Entry in an external Controlled
Vocabulary (id). A reference to the id of an entry in a lexicon
(url, url+id or id) A reference or hyperlink to any type
document (url)"""
subclass = None
superclass = None
def __init__(self, EXT_REF_ID=None, TYPE=None, VALUE=None):
self.original_tagname_ = None
self.EXT_REF_ID = _cast(None, EXT_REF_ID)
self.TYPE = _cast(None, TYPE)
self.VALUE = _cast(None, VALUE)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, extRefType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if extRefType.subclass:
return extRefType.subclass(*args_, **kwargs_)
else:
return extRefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_EXT_REF_ID(self): return self.EXT_REF_ID
def set_EXT_REF_ID(self, EXT_REF_ID): self.EXT_REF_ID = EXT_REF_ID
def get_TYPE(self): return self.TYPE
def set_TYPE(self, TYPE): self.TYPE = TYPE
def get_VALUE(self): return self.VALUE
def set_VALUE(self, VALUE): self.VALUE = VALUE
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='extRefType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('extRefType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='extRefType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='extRefType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='extRefType'):
if self.EXT_REF_ID is not None and 'EXT_REF_ID' not in already_processed:
already_processed.add('EXT_REF_ID')
outfile.write(' EXT_REF_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.EXT_REF_ID), input_name='EXT_REF_ID')), ))
if self.TYPE is not None and 'TYPE' not in already_processed:
already_processed.add('TYPE')
outfile.write(' TYPE=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.TYPE), input_name='TYPE')), ))
if self.VALUE is not None and 'VALUE' not in already_processed:
already_processed.add('VALUE')
outfile.write(' VALUE=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.VALUE), input_name='VALUE')), ))
def exportChildren(self, outfile, level, namespace_='', name_='extRefType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('EXT_REF_ID', node)
if value is not None and 'EXT_REF_ID' not in already_processed:
already_processed.add('EXT_REF_ID')
self.EXT_REF_ID = value
value = find_attr_value_('TYPE', node)
if value is not None and 'TYPE' not in already_processed:
already_processed.add('TYPE')
self.TYPE = value
value = find_attr_value_('VALUE', node)
if value is not None and 'VALUE' not in already_processed:
already_processed.add('VALUE')
self.VALUE = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class extRefType
class lexRefType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, LEX_REF_ID=None, NAME=None, TYPE=None, URL=None, LEXICON_ID=None, LEXICON_NAME=None, DATCAT_ID=None, DATCAT_NAME=None):
self.original_tagname_ = None
self.LEX_REF_ID = _cast(None, LEX_REF_ID)
self.NAME = _cast(None, NAME)
self.TYPE = _cast(None, TYPE)
self.URL = _cast(None, URL)
self.LEXICON_ID = _cast(None, LEXICON_ID)
self.LEXICON_NAME = _cast(None, LEXICON_NAME)
self.DATCAT_ID = _cast(None, DATCAT_ID)
self.DATCAT_NAME = _cast(None, DATCAT_NAME)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, lexRefType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if lexRefType.subclass:
return lexRefType.subclass(*args_, **kwargs_)
else:
return lexRefType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LEX_REF_ID(self): return self.LEX_REF_ID
def set_LEX_REF_ID(self, LEX_REF_ID): self.LEX_REF_ID = LEX_REF_ID
def get_NAME(self): return self.NAME
def set_NAME(self, NAME): self.NAME = NAME
def get_TYPE(self): return self.TYPE
def set_TYPE(self, TYPE): self.TYPE = TYPE
def get_URL(self): return self.URL
def set_URL(self, URL): self.URL = URL
def get_LEXICON_ID(self): return self.LEXICON_ID
def set_LEXICON_ID(self, LEXICON_ID): self.LEXICON_ID = LEXICON_ID
def get_LEXICON_NAME(self): return self.LEXICON_NAME
def set_LEXICON_NAME(self, LEXICON_NAME): self.LEXICON_NAME = LEXICON_NAME
def get_DATCAT_ID(self): return self.DATCAT_ID
def set_DATCAT_ID(self, DATCAT_ID): self.DATCAT_ID = DATCAT_ID
def get_DATCAT_NAME(self): return self.DATCAT_NAME
def set_DATCAT_NAME(self, DATCAT_NAME): self.DATCAT_NAME = DATCAT_NAME
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='lexRefType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('lexRefType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='lexRefType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='lexRefType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='lexRefType'):
if self.LEX_REF_ID is not None and 'LEX_REF_ID' not in already_processed:
already_processed.add('LEX_REF_ID')
outfile.write(' LEX_REF_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LEX_REF_ID), input_name='LEX_REF_ID')), ))
if self.NAME is not None and 'NAME' not in already_processed:
already_processed.add('NAME')
outfile.write(' NAME=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.NAME), input_name='NAME')), ))
if self.TYPE is not None and 'TYPE' not in already_processed:
already_processed.add('TYPE')
outfile.write(' TYPE=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.TYPE), input_name='TYPE')), ))
if self.URL is not None and 'URL' not in already_processed:
already_processed.add('URL')
outfile.write(' URL=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.URL), input_name='URL')), ))
if self.LEXICON_ID is not None and 'LEXICON_ID' not in already_processed:
already_processed.add('LEXICON_ID')
outfile.write(' LEXICON_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LEXICON_ID), input_name='LEXICON_ID')), ))
if self.LEXICON_NAME is not None and 'LEXICON_NAME' not in already_processed:
already_processed.add('LEXICON_NAME')
outfile.write(' LEXICON_NAME=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LEXICON_NAME), input_name='LEXICON_NAME')), ))
if self.DATCAT_ID is not None and 'DATCAT_ID' not in already_processed:
already_processed.add('DATCAT_ID')
outfile.write(' DATCAT_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.DATCAT_ID), input_name='DATCAT_ID')), ))
if self.DATCAT_NAME is not None and 'DATCAT_NAME' not in already_processed:
already_processed.add('DATCAT_NAME')
outfile.write(' DATCAT_NAME=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.DATCAT_NAME), input_name='DATCAT_NAME')), ))
def exportChildren(self, outfile, level, namespace_='', name_='lexRefType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('LEX_REF_ID', node)
if value is not None and 'LEX_REF_ID' not in already_processed:
already_processed.add('LEX_REF_ID')
self.LEX_REF_ID = value
value = find_attr_value_('NAME', node)
if value is not None and 'NAME' not in already_processed:
already_processed.add('NAME')
self.NAME = value
value = find_attr_value_('TYPE', node)
if value is not None and 'TYPE' not in already_processed:
already_processed.add('TYPE')
self.TYPE = value
value = find_attr_value_('URL', node)
if value is not None and 'URL' not in already_processed:
already_processed.add('URL')
self.URL = value
value = find_attr_value_('LEXICON_ID', node)
if value is not None and 'LEXICON_ID' not in already_processed:
already_processed.add('LEXICON_ID')
self.LEXICON_ID = value
value = find_attr_value_('LEXICON_NAME', node)
if value is not None and 'LEXICON_NAME' not in already_processed:
already_processed.add('LEXICON_NAME')
self.LEXICON_NAME = value
value = find_attr_value_('DATCAT_ID', node)
if value is not None and 'DATCAT_ID' not in already_processed:
already_processed.add('DATCAT_ID')
self.DATCAT_ID = value
value = find_attr_value_('DATCAT_NAME', node)
if value is not None and 'DATCAT_NAME' not in already_processed:
already_processed.add('DATCAT_NAME')
self.DATCAT_NAME = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class | |
"""This module contains wrappers around the ISIS campt and mappt that are
specific to understanding the relationship between pixels and projected
coordinates."""
# This is free and unencumbered software released into the public domain.
#
# The authors of autocnet do not claim copyright on the contents of this file.
# For more details about the LICENSE terms and the AUTHORS, you will
# find files of those names at the top level of this repository.
#
# SPDX-License-Identifier: CC0-1.0
import os
from collections import abc
from numbers import Number
import numpy as np
try:
import kalasiris as isis
except Exception as exception:
from autocnet.utils.utils import FailedImport
isis = FailedImport(exception)
import pvl
isis2np_types = {
"UnsignedByte" : "uint8",
"SignedWord" : "int16",
"Double" : "float64",
"Real" : "float32"
}
np2isis_types = {v: k for k, v in isis2np_types.items()}
def get_isis_special_pixels(arr):
"""
Returns coordinates of any ISIS no data pixels. Essentially,
np.argwhere results of where pixels match ISIS special
data types (NIRs, NHRs, HIS, HRS, NULLS).
Parameters
----------
arr : np.array
Array to find special pixels in
Returns
-------
: sp
np.array of coordinates in y,x format containing special pixel coordinates
"""
isis_dtype = np2isis_types[str(arr.dtype)]
sp_pixels = getattr(isis.specialpixels, isis_dtype)
null = np.argwhere(arr==sp_pixels.Null)
lrs = np.argwhere(arr==sp_pixels.Lrs)
lis = np.argwhere(arr==sp_pixels.Lis)
his = np.argwhere(arr==sp_pixels.His)
hrs = np.argwhere(arr==sp_pixels.Hrs)
sp = np.concatenate((null, lrs, lis, his, hrs))
return sp
def get_nodata_bounds(arr):
"""
Get bounds for an image that does not contain any ISIS special pixels. That is,
ISIS Nulls, NIRS, NRS, HIS and HRS pixels
Parameters
----------
arr : np.array
2D array representing the image
Returns
-------
: left_x
left x coordinate of new bounds
: right_x
right x coordinate of new bounds
: top_y
top y coordinate of new bounds
: bottom _y
bottom y coordinates of new bounds
"""
sp = get_isis_special_pixels(arr)
if not sp.any():
return 0, arr.shape[1], 0, arr.shape[0]
cy, cx = arr.shape[1]//2, arr.shape[0]//2
tree = KDTree(sp, metric='euclidean')
# For finding K neighbors of P1 with shape (1, 3)
distances, indices = tree.query(np.array([cy, cx]).reshape(1,2), 1)
# these are slightly misshapen by being in nested arrays (e.g. [[n]], [[y,x]])
nearest_idx = indices.reshape(1,)
neary, nearx = sp[nearest_idx].reshape(2,)
# subtract 1 to exclude the special pixel
x_dist = abs(cx - nearx) - 1
y_dist = abs(cy - neary) - 1
# left_x, right_x, top_y, bottom_y
left_x = cx - x_dist
right_x = cx + x_dist
top_y = cy - y_dist
bottom_y = cy + y_dist
return left_x, right_x, top_y, bottom_y
def point_info(
cube_path: os.PathLike,
x,
y,
point_type: str,
allowoutside=False
):
"""
Returns a pvl.collections.MutableMappingSequence object or a
Sequence of MutableMappingSequence objects which contain keys
and values derived from the output of ISIS campt or mappt on
the *cube_path*.
If x and y are single numbers, then a single MutableMappingSequence
object will be returned. If they are Sequences or Numpy arrays, then a
Sequence of MutableMappingSequence objects will be returned,
such that the first MutableMappingSequence object of the returned
Sequence will correspond to the result of *x[0]* and *y[0]*,
etc.
Raises subprocess.CalledProcessError if campt or mappt have failures.
May raise ValueError if campt completes, but reports errors.
Parameters
----------
cube_path : os.PathLike
Path to the input cube.
x : Number, Sequence of Numbers, or Numpy Array
Point(s) in the x direction. Interpreted as either a sample
or a longitude value determined by *point_type*.
y : Number, Sequence of Numbers, or Numpy Array
Point(s) in the y direction. Interpreted as either a line
or a latitude value determined by *point_type*.
point_type : str
Options: {"image", "ground"}
Pass "image" if x,y are in image space (sample, line) or
"ground" if in ground space (longitude, latitude)
allowoutside: bool
Defaults to False, this parameter is passed to campt
or mappt. Please read the ISIS documentation to
learn more about this parameter.
"""
point_type = point_type.casefold()
valid_types = {"image", "ground"}
if point_type not in valid_types:
raise ValueError(
f'{point_type} is not a valid point type, valid types are '
f'{valid_types}'
)
if isinstance(x, abc.Sequence) and isinstance(y, abc.Sequence):
if len(x) != len(y):
raise IndexError(
f"Sequences given to x and y must be of the same length."
)
x_coords = x
y_coords = y
elif isinstance(x, Number) and isinstance(y, Number):
x_coords = [x, ]
y_coords = [y, ]
elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
if not all((x.ndim == 1, y.ndim == 1)):
raise IndexError(
f"If they are numpy arrays, x and y must be one-dimensional, "
f"they were: {x.ndim} and {y.ndim}"
)
if x.shape != y.shape:
raise IndexError(
f"Numpy arrays given to x and y must be of the same shape."
)
x_coords = x
y_coords = y
else:
raise TypeError(
f"The values of x and y were neither Sequences nor individual "
f"numbers, they were: {x} and {y}"
)
results = []
if pvl.load(cube_path).get("IsisCube").get("Mapping"):
# We have a projected image, and must use mappt
mappt_common_args = dict(allowoutside=allowoutside, type=point_type)
for xx, yy in zip(x_coords, y_coords):
mappt_args = {
"ground": dict(
longitude=xx,
latitude=yy,
coordsys="UNIVERSAL"
),
"image": dict(
# Convert PLIO pixels to ISIS pixels
sample=xx+0.5,
line=yy+0.5
)
}
for k in mappt_args.keys():
mappt_args[k].update(mappt_common_args)
mapres = pvl.loads(isis.mappt(cube_path, **mappt_args[point_type]).stdout)["Results"]
# convert from ISIS pixels to PLIO pixels
mapres['Sample'] = mapres['Sample'] - 0.5
mapres['Line'] = mapres['Line'] - 0.5
results.append(mapres)
else:
# Not projected, use campt
if point_type == "ground":
# campt uses lat, lon for ground but sample, line for image.
# So swap x,y for ground-to-image calls
p_list = [f"{lat}, {lon}" for lon, lat in zip(x_coords, y_coords)]
else:
p_list = [
f"{samp+0.5}, {line+0.5}" for samp, line in zip(x_coords, y_coords)
]
# ISIS's campt needs points in a file
with isis.fromlist.temp(p_list) as f:
cp = isis.campt(
cube_path,
coordlist=f,
allowoutside=allowoutside,
usecoordlist=True,
coordtype=point_type
)
camres = pvl.loads(cp.stdout)
for r in camres.getall("GroundPoint"):
if r['Error'] is None:
# convert all pixels to PLIO pixels from ISIS
r["Sample"] -= .5
r["Line"] -= .5
results.append(r)
else:
raise ValueError(
f"ISIS campt completed, but reported an error: {r['Error']}"
)
if isinstance(x, (abc.Sequence, np.ndarray)):
return results
else:
return results[0]
def image_to_ground(
cube_path: os.PathLike,
sample,
line,
lontype="PositiveEast360Longitude",
lattype="PlanetocentricLatitude",
):
"""
Returns a two-tuple of numpy arrays or a two-tuple of floats, where
the first element of the tuple is the longitude(s) and the second
element are the latitude(s) that represent the coordinate(s) of the
input *sample* and *line* in *cube_path*.
If *sample* and *line* are single numbers, then the returned two-tuple
will have single elements. If they are Sequences, then the returned
two-tuple will contain numpy arrays.
Raises the same exceptions as point_info().
Parameters
----------
cube_path : os.PathLike
Path to the input cube.
sample : Number or Sequence of Numbers
Sample coordinate(s).
line : Number or Sequence of Numbers
Line coordinate(s).
lontype: str
Name of key to query in the campt or mappt return to get the returned
longitudes. Defaults to "PositiveEast360Longitude", but other values
are possible. Please see the campt or mappt documentation.
lattype: str
Name of key to query in the campt or mappt return to get the returned
latitudes. Defaults to "PlanetocentricLatitude", but other values
are possible. Please see the campt or mappt documentation.
"""
res = point_info(cube_path, sample, line, "image")
if isinstance(sample, (abc.Sequence, np.ndarray)):
lon_list = list()
lat_list = list()
for r in res:
lon_list.append(_get_value(r[lontype]))
lat_list.append(_get_value(r[lattype]))
lons = np.asarray(lon_list)
lats = np.asarray(lat_list)
else:
lons = _get_value(res[lontype])
lats = _get_value(res[lattype])
return lons, lats
def _get_value(obj):
"""Returns *obj*, unless *obj* is of type pvl.collections.Quantity, in
which case, the .value component of the object is returned."""
if isinstance(obj, pvl.collections.Quantity):
return obj.value
else:
return obj
def ground_to_image(cube_path, lon, lat):
"""
Returns a two-tuple of numpy arrays or a two-tuple of floats, where
the first element of the tuple is the sample(s) and the second
element are the lines(s) that represent the coordinate(s) of the
input *lon* and *lat* in *cube_path*.
If *lon* and *lat* are single numbers, then the returned two-tuple
will have single elements. If they are Sequences, then the returned
two-tuple will contain numpy arrays.
Raises the same exceptions | |
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""Base Contrail Provisioning module."""
import os
import re
import sys
import shutil
import socket
import argparse
import tempfile
import platform
import ConfigParser
from fabric.api import *
from contrail_provisioning.common.templates import contrail_keystone_auth_conf
from contrail_provisioning.config.templates import vnc_api_lib_ini
class ContrailSetup(object):
def __init__(self):
(self.pdist, self.pdistversion, self.pdistrelease) = platform.dist()
self.hostname = socket.gethostname()
if self.pdist == 'Ubuntu':
local("ln -sf /bin/true /sbin/chkconfig")
self._temp_dir_name = tempfile.mkdtemp()
self.contrail_bin_dir = '/opt/contrail/bin'
self._fixed_qemu_conf = False
# Parser defaults
self.global_defaults = {
}
def _parse_args(self, args_str):
'''
Base parser.
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help = False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, self.remaining_argv = conf_parser.parse_known_args(args_str.split())
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
self.global_defaults.update(dict(config.items("GLOBAL")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**self.global_defaults)
return parser
def is_cql_supported(self):
if self.pdist == 'Ubuntu' and self.pdistversion.find('12.') == 0:
return False
elif self.pdist == 'centos' and self.pdistversion.find('6.') == 0:
return False
return True
def update_vips_in_ctrl_details(self, ctrl_infos):
if self._args.internal_vip:
ctrl_infos.append('INTERNAL_VIP=%s' % self._args.internal_vip)
if self._args.contrail_internal_vip:
ctrl_infos.append('CONTRAIL_INTERNAL_VIP=%s' % self._args.contrail_internal_vip)
if self._args.external_vip:
ctrl_infos.append('EXTERNAL_VIP=%s' % self._args.external_vip)
def _template_substitute(self, template, vals):
data = template.safe_substitute(vals)
return data
def _template_substitute_write(self, template, vals, filename):
data = self._template_substitute(template, vals)
outfile = open(filename, 'w')
outfile.write(data)
outfile.close()
def _replaces_in_file(self, file, replacement_list):
rs = [ (re.compile(regexp), repl) for (regexp, repl) in replacement_list]
file_tmp = file + ".tmp"
with open(file, 'r') as f:
with open(file_tmp, 'w') as f_tmp:
for line in f:
for r, replace in rs:
match = r.search(line)
if match:
line = replace + "\n"
f_tmp.write(line)
shutil.move(file_tmp, file)
def replace_in_file(self, file, regexp, replace):
self._replaces_in_file(file, [(regexp, replace)])
def setup_crashkernel_params(self):
if self.pdistversion == '14.04':
with settings(warn_only=True):
local(r"sed -i 's/crashkernel=.*\([ | \"]\)/crashkernel=384M-2G:64M,2G-16G:128M,16G-:256M\1/g' /etc/default/grub.d/kexec-tools.cfg")
local("[ -f /etc/default/kdump-tools ] && sed -i 's/USE_KDUMP=0/USE_KDUMP=1/' /etc/default/kdump-tools")
else:
local(r"sed -i 's/crashkernel=.*\([ | \"]\)/crashkernel=384M-2G:64M,2G-16G:128M,16G-:256M\1/g' /etc/grub.d/10_linux")
local("update-grub")
def enable_kernel_core(self):
'''
enable_kernel_core:
update grub file
install grub2
enable services
'''
gcnf = ''
with open ('/etc/default/grub', 'r') as f:
gcnf = f.read ()
p = re.compile ('\s*GRUB_CMDLINE_LINUX')
el = ExtList (gcnf.split ('\n'))
try:
i = el.findex (p.match)
exec (el[i])
el[i] = 'GRUB_CMDLINE_LINUX="%s crashkernel=128M"' % (
' '.join (filter (lambda x: not x.startswith (
'crashkernel='), GRUB_CMDLINE_LINUX.split ())))
exec (el[i])
el[i] = 'GRUB_CMDLINE_LINUX="%s kvm-intel.nested=1"' % (
' '.join (filter (lambda x: not x.startswith (
'kvm-intel.nested='), GRUB_CMDLINE_LINUX.split ())))
with open ('%s/grub' % self._temp_dir_name, 'w') as f:
f.write ('\n'.join (el))
f.flush ()
local ('sudo mv %s/grub /etc/default/grub' % (self._temp_dir_name))
local ('sudo /usr/sbin/grub2-mkconfig -o /boot/grub2/grub.cfg')
except LookupError:
print 'Improper grub file, kernel crash not enabled'
def disable_selinux(self):
# Disable selinux
with lcd(self._temp_dir_name):
with settings(warn_only = True):
local("sudo sed 's/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config > config.new")
local("sudo mv config.new /etc/selinux/config")
local("setenforce 0")
# cleanup in case move had error
local("rm config.new")
def setup_sriov_grub(self):
if not self._args.sriov:
return
if self.pdist != 'Ubuntu':
print "Not configuring SRIOV Grub changes for ", self.pdist, " distribution"
return
with open ('/etc/default/grub', 'r') as f:
gcnf = f.read ()
p = re.compile ('\s*GRUB_CMDLINE_LINUX_DEFAULT')
el = gcnf.split ('\n')
for i, x in enumerate (el):
if not p.match(x):
continue
exec(el[i])
el[i] = 'GRUB_CMDLINE_LINUX_DEFAULT="%s intel_iommu=on"' % (
' '.join (filter (lambda x: not x.startswith (
'intel_iommu='), GRUB_CMDLINE_LINUX_DEFAULT.split ())))
exec(el[i])
el[i] = 'GRUB_CMDLINE_LINUX_DEFAULT="%s iommu=pt"' % (
' '.join (filter (lambda x: not x.startswith (
'iommu='), GRUB_CMDLINE_LINUX_DEFAULT.split ())))
exec(el[i])
with open ('%s/grub' % self._temp_dir_name, 'w') as f:
f.write ('\n'.join (el))
f.flush ()
local ('sudo mv %s/grub /etc/default/grub' % (self._temp_dir_name))
local ('sudo /usr/sbin/update-grub')
break
def setup_sriov_vfs(self):
# Set the required number of Virtual Functions for given interfaces
if self.pdist != 'Ubuntu':
print "Not configuring VF's for ", self.pdist, " distribution"
return
sriov_string = self._args.sriov
if sriov_string:
intf_list = sriov_string.split(",")
for intf_details in intf_list:
info = intf_details.split(":")
# Keep this command consistent with provision.py in fabric utils
str = 'echo %s > /sys/class/net/%s/device/sriov_numvfs; sleep 2; ifup -a' % (info[1], info[0])
# Do nothing if the entry already present in /etc/rc.local
with settings(warn_only = True):
if local('grep -w \'%s\' /etc/rc.local' % str).succeeded:
continue
sed = 'sudo sed -i \'/^\s*exit/i ' + str + '\' /etc/rc.local'
with settings(warn_only = True):
local(sed)
def disable_iptables(self):
# Disable iptables
with settings(warn_only = True):
local("sudo chkconfig iptables off")
local("sudo iptables --flush")
if self.pdist == 'redhat' or \
self.pdist == 'centos' and self.pdistversion.startswith('7'):
local("sudo service iptables stop")
local("sudo service ip6tables stop")
local("sudo systemctl stop firewalld")
local("sudo systemctl status firewalld")
local("sudo chkconfig firewalld off")
local("sudo /usr/libexec/iptables/iptables.init stop")
local("sudo /usr/libexec/iptables/ip6tables.init stop")
local("sudo service iptables save")
local("sudo service ip6tables save")
local("iptables -L")
def enable_kdump(self):
'''Enable kdump for centos based systems'''
with settings(warn_only=True):
status = local("chkconfig --list | grep kdump")
if status.failed:
print 'WARNING: Seems kexec-tools is not installed. Skipping enable kdump'
return False
local("chkconfig kdump on")
local("service kdump start")
local("service kdump status")
local("cat /sys/kernel/kexec_crash_loaded")
local("cat /proc/iomem | grep Crash")
def setup_coredump(self):
# usable core dump
initf = '/etc/sysconfig/init'
with settings(warn_only = True):
local("sudo sed '/DAEMON_COREFILE_LIMIT=.*/d' %s > %s.new" %(initf, initf))
local("sudo mv %s.new %s" %(initf, initf))
if self.pdist in ['centos', 'fedora', 'redhat']:
core_unlim = "echo DAEMON_COREFILE_LIMIT=\"'unlimited'\""
local("%s >> %s" %(core_unlim, initf))
#Core pattern
pattern= 'kernel.core_pattern = /var/crashes/core.%e.%p.%h.%t'
ip_fwd_setting = 'net.ipv4.ip_forward = 1'
sysctl_file = '/etc/sysctl.conf'
print pattern
with settings( warn_only= True) :
local('grep -q \'%s\' /etc/sysctl.conf || echo \'%s\' >> /etc/sysctl.conf' %(pattern, pattern))
local("sudo sed 's/net.ipv4.ip_forward.*/%s/g' %s > /tmp/sysctl.new" %(ip_fwd_setting,sysctl_file))
local("sudo mv /tmp/sysctl.new %s" %(sysctl_file))
local("rm /tmp/sysctl.new")
local('sysctl -p')
local('mkdir -p /var/crashes')
local('chmod 777 /var/crashes')
try:
if self.pdist in ['fedora', 'centos', 'redhat']:
self.enable_kernel_core ()
if self.pdist == 'Ubuntu':
self.setup_crashkernel_params()
except Exception as e:
print "Ignoring failure kernel core dump"
try:
if self.pdist in ['fedora', 'centos', 'redhat']:
self.enable_kdump()
except Exception as e:
print "Ignoring failure when enabling kdump"
print "Exception: %s" % str(e)
def _get_keystone_certs(self, ssl_path='/etc/contrail/ssl/certs/'):
cafile = os.path.join(ssl_path,
os.path.basename(self._args.keystone_cafile))
certfile = os.path.join(ssl_path,
os.path.basename(self._args.keystone_certfile))
keyfile = os.path.join(ssl_path,
os.path.basename(self._args.keystone_certfile))
return (certfile, cafile, keyfile)
def _get_apiserver_certs(self, ssl_path='/etc/contrail/ssl/certs/'):
cafile = os.path.join(ssl_path,
os.path.basename(self._args.apiserver_cafile))
certfile = os.path.join(ssl_path,
os.path.basename(self._args.apiserver_certfile))
keyfile = os.path.join(os.path.dirname(ssl_path).replace('certs', 'private'),
os.path.basename(self._args.apiserver_keyfile))
return (certfile, cafile, keyfile)
def fixup_keystone_auth_config_file(self, configure_memcache):
# Keystone auth config ini
template_vals = {
'__contrail_keystone_ip__': self._args.keystone_ip,
'__contrail_admin_user__': self._args.keystone_admin_user,
'__contrail_admin_password__': self._args.keystone_admin_passwd,
'__contrail_admin_tenant_name__': self._args.keystone_admin_tenant_name,
'__contrail_ks_auth_protocol__': self._args.keystone_auth_protocol,
'__contrail_ks_auth_port__': self._args.keystone_auth_port,
'__keystone_insecure_flag__': self._args.keystone_insecure,
'__contrail_memcached_opt__': 'memcache_servers=127.0.0.1:11211' if configure_memcache else '',
'__contrail_ks_auth_url__': '%s://%s:%s/%s' % (self._args.keystone_auth_protocol,
self._args.keystone_ip, self._args.keystone_auth_port, self._args.keystone_version),
'__keystone_cert_file_opt__': 'certfile=%s' % self._get_keystone_certs()[0] if self._args.keystone_certfile else '',
'__keystone_key_file_opt__': 'keyfile=%s' % self._get_keystone_certs()[2] if self._args.keystone_certfile else '',
'__keystone_ca_file_opt__': 'cafile=%s' % self._get_keystone_certs()[1] if self._args.keystone_cafile else '',
}
self._template_substitute_write(contrail_keystone_auth_conf.template,
template_vals, self._temp_dir_name + '/contrail-keystone-auth.conf')
local("sudo mv %s/contrail-keystone-auth.conf /etc/contrail/" %(self._temp_dir_name))
if self._args.keystone_version in ['v3']:
confs = {'auth_type':'password',
'user_domain_name': 'Default',
'domain_id': 'default'
}
auth_conf = '/etc/contrail/contrail-keystone-auth.conf'
for key, val in confs.items():
self.set_config(auth_conf, 'KEYSTONE', key, val)
def fixup_vnc_api_lib_ini(self):
if hasattr(self, 'contrail_internal_vip'):
api_server = self.contrail_internal_vip or self.cfgm_ip
else:
api_server = self._args.cfgm_ip
# vnc_api_lib.ini
authn_url = '/v3/auth/tokens' if 'v3' in self._args.keystone_version else '/v2.0/tokens'
template_vals = {
'__contrail_apiserver_ip__': api_server,
'__contrail_keystone_ip__': self._args.keystone_ip or '127.0.0.1',
'__contrail_authn_url__': authn_url,
'__auth_protocol__': self._args.keystone_auth_protocol,
}
self._template_substitute_write(vnc_api_lib_ini.template,
template_vals, self._temp_dir_name + '/vnc_api_lib.ini')
local("sudo mv %s/vnc_api_lib.ini /etc/contrail/" %(self._temp_dir_name))
conf_file = "/etc/contrail/vnc_api_lib.ini"
if self.api_ssl_enabled:
certfile, cafile, keyfile = self._get_apiserver_certs()
configs = {'certfile': certfile,
'keyfile': keyfile,
'cafile': cafile,
'insecure': self._args.apiserver_insecure}
for param, value in configs.items():
self.set_config(conf_file, 'global', param, value)
if self._args.orchestrator == 'vcenter':
# Remove the auth setion from /etc/contrail/vnc_api_lib.ini
# if orchestrator is not openstack
local("sudo contrail-config --del %s auth" % conf_file)
elif self._args.orchestrator == 'openstack' and self.keystone_ssl_enabled:
certfile, cafile, keyfile = self._get_keystone_certs()
configs = {'cafile': cafile,
'certfile': certfile,
'keyfile': keyfile,
'insecure': self._args.keystone_insecure}
for param, value in configs.items():
self.set_config(conf_file, 'auth', param, value)
local("sudo chown contrail:contrail %s" % conf_file)
def set_config(self, fl, sec, var, val=''):
with settings(warn_only=True):
local("contrail-config --set %s %s %s '%s'" % (
fl, sec, var, val))
def del_config(self, fl, sec, var=''):
with settings(warn_only=True):
local("contrail-config --del %s %s %s" % (
fl, sec, var))
def get_config(self, fl, sec, var=''):
output = None
with settings(warn_only=True):
output = local("openstack-config --get %s %s %s" % (
fl, sec, var), capture=True)
return output
def has_config(self, fl, sec, var=''):
has = False
with settings(warn_only=True):
output = local("openstack-config --has %s %s %s" % | |
SET EntityBridgeId=0 WHERE (EntityBridgeId = '%s') " % tmp[
"tblEntities"])
if ent["entitytype"] == "acl_group":
pass
# remove all old entities
order = 0
if "entities" in ent:
if not isinstance(ent["entities"], list):
LOG.critical(_("update attached_entities entities item for dbid %s is not a list: %s" % (
dbid, str(ent["entities"]))))
continue
for pri in ent["entities"]:
if not isinstance(pri, dict):
LOG.critical(_("update attached_entities entities item for dbid %s is not adict: %s" % (
dbid, str(pri))))
continue
if "AttachedSortSequenceId" in pri:
order = pri["AttachedSortSequenceId"]
else:
order += 1
pri["AttachedSortSequenceId"] = order
pri["tblEntities"] = dbid
if "entitytype" in options:
pri["entitytype"] = options["entitytype"]
else:
pri["entitytype"] = ""
pri["attachedentitytype"] = ent["entitytype"]
if "attachedentityid" in pri:
attach_entity = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": pri["attachedentityid"]},
order="ORDER BY id LIMIT 1"))
if not attach_entity:
continue
attach_entity_parent = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": attach_entity["parententityid"]},
order="ORDER BY id LIMIT 1"))
if not attach_entity_parent:
continue
pri["attachedentityname"] = attach_entity["name"]
pri["attachedentityuniqueid"] = attach_entity["uniqueid"]
pri["attachedentityparentname"] = attach_entity_parent["name"]
cloud_utils.update_or_insert(db, "tblAttachedEntities",
pri, {"tblentities": dbid,
"attachedentityid": pri["attachedentityid"],
})
# db.execute_db("INSERT INTO tblAttachedEntities (tblEntities, AttachedEntityId, AttachedSortSequenceId) VALUES ('%s', '%s', '%s')" %
# (dbid, i["dbid"], order))
except:
cloud_utils.log_exception(sys.exc_info())
externalnetwork_keys = []
def get_first_active_slice(db):
slice = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"EntityType": "slice", "EntityStatus": "active"},
order="ORDER BY id LIMIT 1"))
if not slice:
slice = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"EntityType": "slice"},
order="ORDER BY id LIMIT 1"))
return slice
def _externalnetwork_post_db_create(db, dbid, options, mode=None, **kwargs):
if mode != "create":
return
try:
net = None
if "network" in options and "name" in options["network"] and "slice" in options["network"]:
net = cloud_utils.lower_key(db.get_row_dict("tblEntities",
{"ParentEntityId": options["network"]["slice"],
"EntityType": "slice_attached_network",
"Name": options["network"]["name"]},
order="ORDER BY id LIMIT 1"))
elif "parententityid" in options:
parent_row, status = entity_utils.read_full_entity_status_tuple(db, options["parententityid"])
if "selectedsliceentityid" in parent_row:
if parent_row["selectedsliceentityid"] == 0:
slice = get_first_active_slice(db)
else:
slice = cloud_utils.lower_key(
db.get_row_dict("tblEntities", {"id": parent_row["selectedsliceentityid"]},
order="ORDER BY id LIMIT 1"))
if slice:
net = db.execute_db("SELECT * FROM tblEntities JOIN tblAttachedNetworkEntities "
" WHERE ( "
" tblEntities.EntityType = 'slice_attached_network' AND tblEntities.deleted=0 AND "
" tblAttachedNetworkEntities.NetworkType = 'external' AND "
" tblAttachedNetworkEntities.tblEntities = tblEntities.id AND tblEntities.ParentEntityId = '%s')"
" ORDER BY tblEntities.id LIMIT 1" % slice["id"])
if net and isinstance(net, (list, tuple)):
net = cloud_utils.lower_key(net[0])
if net:
attach_request = {
"attached_entities": [{"entitytype": net["entitytype"], "entities": [{"attachedentityid": net["id"]}]}]}
remove_and_add_attached_entities(db, dbid, attach_request, mode)
# cloud_utils.update_or_insert(db, "tblServices", {"SharedExternalNetworkEntityId": net["id"]}, {"tblEntities": dbid})
except:
cloud_utils.log_exception(sys.exc_info())
def _externalnetwork(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
# j.update(_network_services_common(db, dbid, row))
net_row = cloud_utils.lower_key(
db.get_row_dict("tblAttachedEntities", {"tblEntities": dbid}, order="ORDER BY id LIMIT 1"))
if net_row:
j.update({"params": {"external_network": _get_entity_name(db, net_row["attachedentityid"], default="none")}})
j.update(_network_services_interfaces(db, dbid, row))
return j, None
entities = {
"slice": Entity("tblSlices", None, None, None, "home", None,
None, None, default_entity_name_prefix="Slice-"),
"system": Entity(None, None, None, None, "home", None,
None, None, default_entity_name_prefix="System-"),
"slice_attached_network": Entity("tblAttachedNetworkEntities", _slice_attached_network_post_db_create,
None, None, "home", "SliceAttachedNetwork",
slice_attached_network_keys, _slice_attached_network),
"slice_compute_entity": Entity("tblComputeEntities", _cfa_post_db_create, None,
post_rest_get_function_slice_entities, "home", "Host", None, _slice_physical_json),
"slice_storage_entity": Entity("tblStorageEntities", None, None, post_rest_get_function_slice_entities, "home",
"Stor", None, _slice_physical_json),
"slice_network_entity": Entity("tblNetworkEntities", None, None, post_rest_get_function_slice_entities, "home",
"NetworkDevice", None, _slice_physical_json),
"slice_ipsecvpn_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"slice_fws_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"slice_sslaccelerator_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"slice_nat_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"slice_lbs_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"slice_ips_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"slice_wan_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"slice_rts_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"slice_nms_service": Entity("tblNetworkEntities", None, None, None, "home", "NetworkDevice", None,
_slice_physical_json),
"user_group": Entity(None, _group_post_db_create, _group_pre_delete,
None, None, None, None, None,
default_entity_name_prefix="UserGroup-",
pre_rest_status_check_function=_rest_disabled
),
"user": Entity("tblUsers", _user_post_db_create, _user_pre_delete,
None, None, None, None, None,
pre_db_create_function=_user_pre_db_create,
pre_rest_status_check_function=_rest_disabled
),
"widget": Entity("tblWidgets", None, None, None, "home", None,
None, None, default_entity_name_prefix="Widget-",
pre_rest_status_check_function=_rest_disabled),
"template": Entity("tblVDCTemplates", None, None, None, "home", None,
None, None, default_entity_name_prefix="Template-",
pre_rest_status_check_function=_rest_disabled),
"imagelibrary": Entity("tblImageLibrary", None, None, None, "home", "ImageLibrary",
image_library_keys, _image_library, default_entity_name_prefix="ImageLibrary-"),
"image": Entity("tblLibraryImages", _image_post_db_create, None, post_rest_get_function_image, "home", "Image",
image_keys, _image, default_entity_name_prefix="Image-"),
"organization": Entity("tblOrganizations", None, None, None, "home", "Organization",
organization_keys, _organization, default_entity_name_prefix="Organization-"),
"department": Entity("tblDepartments", None, None, None, "home", "Department",
department_keys, _department, default_entity_name_prefix="Department-"),
"vdc": Entity("tblVdcs", None, None, None, "home", "Vdc",
vdc_keys, _vdc, post_entity_final_status_function=vdc_post_final_status_function,
validate_entity_function=validate_entity.validate_vdc,
provision_entity_function=provision_entity.provision_vdc, default_entity_name_prefix="Vdc-"
),
"container": Entity("tblContainers", None, None, None, "storage", "Container",
container_keys, _container, default_entity_name_prefix="Container-"),
"volume": Entity("tblContainerVolumes", None, None, post_rest_get_function_volume, "home", "Volume",
volume_keys, _volume, default_entity_name_prefix="Volume-",
post_entity_final_status_function=volume_post_final_status_function,
pre_db_create_function=_volume_pre_db_create),
"snapshot": Entity("tblContainerVolumes", None, None, post_rest_get_function_volume, "home", "Volume",
volume_keys, _volume, ),
"archive": Entity("tblContainerVolumes", None, None, post_rest_get_function_volume, "home", "Volume",
volume_keys, _volume, ),
"backup": Entity("tblContainerVolumes", None, None, post_rest_get_function_volume, "home", "Volume",
volume_keys, _volume, ),
"virtual_network": Entity("tblVirtualNetworks", _virtual_networks_post_db_create, _virtual_networks_pre_db_delete,
None, "home",
"VirtualNetwork",
virtual_networks_keys, _virtual_networks, default_entity_name_prefix="VirtualNetwork-"),
"disk": Entity("tblDisks", None, None, None, "storage", "Disk",
disk_keys, _disk, default_entity_name_prefix="Disk-"),
"partition": Entity("tblDiskPartitions", None, None, None, "home", "Partition",
partition_keys, _partition, default_entity_name_prefix="Partition-"),
"bucket": Entity("tblBuckets", None, None, None, "storage", "Bucket",
bucket_keys, _bucket, default_entity_name_prefix="Bucket-"),
"object": Entity("tblBucketObjects", None, None, None, "home", "BucketObject",
object_keys, _object, default_entity_name_prefix="Object-"),
"serverfarm": Entity("tblServerFarms", _serverfarm_post_db_create, None, None, "compute", "ServerFarm",
serverfarm_keys, _serverfarm,
default_entity_name_prefix="Cluster-",
pre_rest_status_check_function=_check_profile_parent_status,
# statistics_manager=entity_resync.compute_statisics_manager
),
"server": Entity("tblServers", _server_post_db_create, None, None, "home", "Server",
server_keys, _server,
default_entity_name_prefix="Server-",
pre_db_create_function=_server_pre_db_create,
pre_rest_status_check_function=_check_profile_parent_status,
statistics_manager=entity_resync.compute_statisics_manager),
"security_group": Entity(None, None, None, None, "network", "SecurityGroup",
security_group_keys, _security_group,
pre_rest_status_check_function=_check_profile_parent_status,
default_entity_name_prefix="SecurityGroup-"),
"security_rule": Entity("tblSecurityRules", None, None, None, "home", "SecurityRule",
security_rule_keys, _security_rule,
pre_rest_status_check_function=_check_profile_parent_status,
default_entity_name_prefix="SecurityRule-"),
"lbs_group": Entity(None, None, None, None, "network", "LbsGroup",
lbs_group_keys, _lbs_group,
pre_rest_status_check_function=_check_profile_parent_status,
default_entity_name_prefix="LoadBalancerGroup-"),
"lbs_service": Entity("tblLBSServices", None, None, None, "home", "LbsService",
lbs_service_keys, _lbs_service,
pre_rest_status_check_function=_check_profile_parent_status,
default_entity_name_prefix="LBSService-"),
"acl_group": Entity(None, None, None, None, "network", "AccessGroup",
acl_group_keys, _acl_group,
pre_rest_status_check_function=_check_profile_parent_status,
default_entity_name_prefix="ACLGroup-"),
"acl_rule": Entity("tblACLRules", None, None, None, "home", "AccessRule",
acl_rule_keys, _acl_rule,
pre_rest_status_check_function=_check_profile_parent_status,
default_entity_name_prefix="ACLRule-"),
"vpn_group": Entity(None, None, None, None, "network", "VpnGroup",
vpn_group_keys, _vpn_group,
pre_rest_status_check_function=_check_profile_parent_status,
default_entity_name_prefix="VPNGroup-"),
"vpn_connection": Entity("tblVPNConnections", None, None, None, "home", "VpnConnection",
vpn_connection_keys,
_vpn_connection,
pre_rest_status_check_function=_check_profile_parent_status,
default_entity_name_prefix="VPNSession-"),
"switch_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, None, "home", "Subnet", switch_keys,
_switch,
validate_entity_function=validate_entity.validate_switch,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Switch-",
statistics_manager=entity_resync.network_service_statisics_manager),
"nat_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_nat, "home", "Nat",
nat_keys, _nat,
validate_entity_function=validate_entity.validate_nat,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Nat-",
statistics_manager=entity_resync.network_service_statisics_manager),
"lbs_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_lbs, "home",
"Loadbalancer", lbs_keys, _lbs,
validate_entity_function=validate_entity.validate_lbs,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="LoadBalancer-",
statistics_manager=entity_resync.network_service_statisics_manager),
"rts_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_rts, "home", "Router",
rts_keys, _rts,
validate_entity_function=validate_entity.validate_rts,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Router-",
statistics_manager=entity_resync.network_service_statisics_manager),
"ipsecvpn_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_vpn, "home", "Vpn",
vpn_keys, _vpn,
validate_entity_function=validate_entity.validate_vpn,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Vpn-",
statistics_manager=entity_resync.network_service_statisics_manager),
"sslvpn_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_vpn, "home", "Vpn",
vpn_keys, _vpn,
validate_entity_function=validate_entity.validate_vpn,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Ssl-",
statistics_manager=entity_resync.network_service_statisics_manager),
"fws_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_fws, "home",
"Firewall", fws_keys, _fws,
validate_entity_function=validate_entity.validate_fws,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Firewall-",
statistics_manager=entity_resync.network_service_statisics_manager),
"amazon_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_rts, "home", "Router",
rts_keys, _rts,
validate_entity_function=validate_entity.validate_rts,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Aws-",
statistics_manager=entity_resync.network_service_statisics_manager),
"rackspace_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_rts, "home", "Router",
rts_keys, _rts,
validate_entity_function=validate_entity.validate_rts,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Rks-",
statistics_manager=entity_resync.network_service_statisics_manager),
"sslaccelerator_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_rts, "home",
"Router",
rts_keys, _rts,
validate_entity_function=validate_entity.validate_rts,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Assl-",
statistics_manager=entity_resync.network_service_statisics_manager),
"wan_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_rts, "home", "Router",
rts_keys, _rts,
validate_entity_function=validate_entity.validate_rts,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Wan-",
statistics_manager=entity_resync.network_service_statisics_manager),
"thirdparty_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_rts, "home", "Router",
rts_keys, _rts,
validate_entity_function=validate_entity.validate_rts,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Tpy-",
statistics_manager=entity_resync.network_service_statisics_manager),
"cloudservice_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_rts, "home",
"Router",
rts_keys, _rts,
validate_entity_function=validate_entity.validate_rts,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Cls-",
statistics_manager=entity_resync.network_service_statisics_manager),
"compute_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_compute, "home",
"ComputeService", compute_keys, _compute,
validate_entity_function=validate_entity.validate_compute,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Compute-",
# statistics_manager=entity_resync.compute_statisics_manager
),
"storage_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_storage, "home",
"StorageService", storage_keys, _storage,
validate_entity_function=validate_entity.validate_storage,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Storage-",
statistics_manager=entity_resync.network_service_statisics_manager),
"nms_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_nms, "home",
"NetworkMonitor", nms_keys, _nms,
validate_entity_function=validate_entity.validate_nms,
provision_entity_function=provision_entity.provision_service,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Monitor-",
statistics_manager=entity_resync.network_service_statisics_manager),
"ips_network_service": Entity("tblServices", _network_service_post_db_create,
_network_service_pre_db_delete, _post_rest_get_function_ips, "home", "Ips",
ips_keys, _ips,
validate_entity_function=validate_entity.validate_ips,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="Ips-",
statistics_manager=entity_resync.network_service_statisics_manager),
"externalnetwork": Entity("tblServices",
_externalnetwork_post_db_create,
_ext_network_service_pre_db_delete,
None,
"home",
"ExternalNetwork",
externalnetwork_keys,
_externalnetwork,
validate_entity_function=validate_entity.validate_externalnetwork,
pre_rest_status_check_function=_check_vdc_status,
pre_db_create_function=_service_pre_db_create,
default_entity_name_prefix="ExternalNetwork-"),
"network_interface": Entity("tblServicesInterfaces", _interface_post_db_create, _interface_pre_db_delete,
None, None, "Interface",
None, None, pre_db_create_function=_interface_pre_db_create,
post_db_delete_function=_interface_post_db_delete,
default_entity_name_prefix="Interface-"),
"service_port": Entity("tblServicePorts",
_port_post_db_create,
None,
None,
"home",
None,
None,
_service_port,
default_entity_name_prefix="Port-",
statistics_manager=entity_resync.port_statisics_manager),
"tap_network_service": Entity("tblServices", _tap_service_post_db_create,
_tap_network_service_pre_db_delete, None, None, None,
None, None,
pre_db_create_function=_tap_service_pre_db_create,
default_entity_name_prefix="Tap-"),
"storage_class": Entity("tblStorageClasses", None, None, None, "home", "Class",
_class_keys, _class, default_entity_name_prefix="StorageClass-",
pre_rest_status_check_function=_rest_disabled,
post_db_delete_function=_storage_class_post_db_delete, ),
"compute_class": Entity("tblComputeClasses", None, None, None, "home", "Class",
_class_keys, _class, default_entity_name_prefix="ComputeClass-",
pre_rest_status_check_function=_rest_disabled,
post_db_delete_function=_compute_class_post_db_delete, ),
"network_class": Entity("tblNetworkClasses", None, None, None, "home", "Class",
_class_keys, _class, default_entity_name_prefix="NetworkClass-",
pre_rest_status_check_function=_rest_disabled
),
}
'''
def get_next_service(db, dbid):
try:
for entitytype in topology_network_services:
if entitytype in entities:
for service in cloud_utils.entity_children(db, dbid, entitytype, entities[entitytype].child_table):
yield service
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
'''
'''
def get_next_interface(db, dbid):
try:
for entitytype in topology_network_services:
if entitytype in entities:
for service in cloud_utils.entity_children(db, dbid, entitytype, entities[entitytype].child_table):
yield service
except GeneratorExit:
LOG.info(_("Ignoring Generator Error for dbid: %s" % dbid))
except:
cloud_utils.log_exception(sys.exc_info())
'''
'''
def get_next_group(db, dbid):
try:
for profile in profile_groups_provision_order:
if profile["group"] in entities:
for group in cloud_utils.entity_members(db, dbid, profile["group"], child_table= entities[profile["group"]].child_table):
yield group
if profile["child"] and profile["child"] in entities:
for child in cloud_utils.entity_members(db, group["id"], profile["child"] , child_table= entities[profile["child"] ].child_table):
yield child
except GeneratorExit:
LOG.info(_("Ignoring Generator | |
"""
Pod API
This document refers to Symphony API calls that do not need encryption or decryption of content. - sessionToken can be obtained by calling the authenticationAPI on the symphony back end and the key manager respectively. Refer to the methods described in authenticatorAPI.yaml. - Actions are defined to be atomic, ie will succeed in their entirety or fail and have changed nothing. - If it returns a 40X status then it will have made no change to the system even if ome subset of the request would have succeeded. - If this contract cannot be met for any reason then this is an error and the response code will be 50X. # noqa: E501
The version of the OpenAPI document: 20.13.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from symphony.bdk.gen.api_client import ApiClient, Endpoint as _Endpoint
from symphony.bdk.gen.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from symphony.bdk.gen.pod_model.error import Error
from symphony.bdk.gen.pod_model.membership_list import MembershipList
from symphony.bdk.gen.pod_model.success_response import SuccessResponse
from symphony.bdk.gen.pod_model.user_id import UserId
class RoomMembershipApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __v1_admin_room_id_membership_add_post(
self,
id,
session_token,
payload,
**kwargs
):
"""Add a member to an existing room. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_admin_room_id_membership_add_post(id, session_token, payload, async_req=True)
>>> result = thread.get()
Args:
id (str): Room streamId
session_token (str): Session authentication token.
payload (UserId):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SuccessResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['session_token'] = \
session_token
kwargs['payload'] = \
payload
return self.call_with_http_info(**kwargs)
self.v1_admin_room_id_membership_add_post = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/admin/room/{id}/membership/add',
'operation_id': 'v1_admin_room_id_membership_add_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'session_token',
'payload',
],
'required': [
'id',
'session_token',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'session_token':
(str,),
'payload':
(UserId,),
},
'attribute_map': {
'id': 'id',
'session_token': 'sessionToken',
},
'location_map': {
'id': 'path',
'session_token': 'header',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__v1_admin_room_id_membership_add_post
)
def __v1_admin_room_id_membership_list_get(
self,
id,
session_token,
**kwargs
):
"""Lists current and previous members of an existing room. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_admin_room_id_membership_list_get(id, session_token, async_req=True)
>>> result = thread.get()
Args:
id (str): Room streamId
session_token (str): Session authentication token.
Keyword Args:
include_past_members (bool): If true, the membership list will include past members of the stream. If false, the listing will only incude current members. Default false.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
MembershipList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['session_token'] = \
session_token
return self.call_with_http_info(**kwargs)
self.v1_admin_room_id_membership_list_get = _Endpoint(
settings={
'response_type': (MembershipList,),
'auth': [],
'endpoint_path': '/v1/admin/room/{id}/membership/list',
'operation_id': 'v1_admin_room_id_membership_list_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'session_token',
'include_past_members',
],
'required': [
'id',
'session_token',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'session_token':
(str,),
'include_past_members':
(bool,),
},
'attribute_map': {
'id': 'id',
'session_token': 'sessionToken',
'include_past_members': 'includePastMembers',
},
'location_map': {
'id': 'path',
'session_token': 'header',
'include_past_members': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__v1_admin_room_id_membership_list_get
)
def __v1_admin_room_id_membership_remove_post(
self,
id,
session_token,
payload,
**kwargs
):
"""Remove a member from a room. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_admin_room_id_membership_remove_post(id, session_token, payload, async_req=True)
>>> result = thread.get()
Args:
id (str): Room streamId
session_token (str): Session authentication token.
payload (UserId):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SuccessResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['session_token'] = \
session_token
kwargs['payload'] = \
payload
return self.call_with_http_info(**kwargs)
self.v1_admin_room_id_membership_remove_post = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/admin/room/{id}/membership/remove',
'operation_id': 'v1_admin_room_id_membership_remove_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'session_token',
'payload',
],
'required': [
'id',
'session_token',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'session_token':
(str,),
'payload':
(UserId,),
},
'attribute_map': {
'id': 'id',
'session_token': 'sessionToken',
},
'location_map': {
'id': 'path',
'session_token': 'header',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__v1_admin_room_id_membership_remove_post
)
def __v1_room_id_membership_add_post(
self,
id,
session_token,
payload,
**kwargs
):
"""Adds new member to an existing room. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_room_id_membership_add_post(id, session_token, payload, async_req=True)
>>> result = thread.get()
Args:
| |
<reponame>jdnc/SimpleCV
'''
SimpleCV Detection Library
This library includes classes for finding things in images
FYI -
All angles shalt be described in degrees with zero pointing east in the
plane of the image with all positive rotations going counter-clockwise.
Therefore a rotation from the x-axis to to the y-axis is positive and follows
the right hand rule.
'''
#load required libraries
from SimpleCV.base import *
from SimpleCV.ImageClass import *
from SimpleCV.Color import *
from SimpleCV.Features.Features import Feature, FeatureSet
class Corner(Feature):
"""
**SUMMARY**
The Corner feature is a point returned by the FindCorners function
Corners are used in machine vision as a very computationally efficient way
to find unique features in an image. These corners can be used in
conjunction with many other algorithms.
**SEE ALSO**
:py:meth:`findCorners`
"""
def __init__(self, i, at_x, at_y):
points = [(at_x-1,at_y-1),(at_x-1,at_y+1),(at_x+1,at_y+1),(at_x+1,at_y-1)]
super(Corner, self).__init__(i, at_x, at_y,points)
#can we look at the eigenbuffer and find direction?
def draw(self, color = (255, 0, 0),width=1):
"""
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - if width is less than zero we draw the feature filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.drawCircle((self.x, self.y), 4, color,width)
######################################################################
class Line(Feature):
"""
**SUMMARY**
The Line class is returned by the findLines function, but can also be initialized with any two points.
>>> l = Line(Image, (point1, point2))
Where point1 and point2 are (x,y) coordinate tuples.
>>> l.points
Returns a tuple of the two points
"""
#TODO - A nice feature would be to calculate the endpoints of the line.
def __init__(self, i, line):
self.image = i
self.vector = None
self.end_points = copy(line)
#print self.end_points[1][1], self.end_points[0][1], self.end_points[1][0], self.end_points[0][0]
if self.end_points[1][0] - self.end_points[0][0] == 0:
self.slope = float("inf")
else:
self.slope = float(self.end_points[1][1] - self.end_points[0][1])/float(self.end_points[1][0] - self.end_points[0][0])
#coordinate of the line object is the midpoint
at_x = (line[0][0] + line[1][0]) / 2
at_y = (line[0][1] + line[1][1]) / 2
xmin = int(np.min([line[0][0],line[1][0]]))
xmax = int(np.max([line[0][0],line[1][0]]))
ymax = int(np.min([line[0][1],line[1][1]]))
ymin = int(np.max([line[0][1],line[1][1]]))
points = [(xmin,ymin),(xmin,ymax),(xmax,ymax),(xmax,ymin)]
super(Line, self).__init__(i, at_x, at_y,points)
def draw(self, color = (0, 0, 255),width=1):
"""
Draw the line, default color is blue
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *color* - An RGB color triplet.
* *width* - Draw the line using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self.image.drawLine(self.end_points[0], self.end_points[1], color,width)
def length(self):
"""
**SUMMARY**
This method returns the length of the line.
**RETURNS**
A floating point length value.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> lines = img.findLines
>>> for l in lines:
>>> if l.length() > 100:
>>> print "OH MY! - WHAT A BIG LINE YOU HAVE!"
>>> print "---I bet you say that to all the lines."
"""
return float(spsd.euclidean(self.end_points[0], self.end_points[1]))
def crop(self):
"""
**SUMMARY**
This function crops the source image to the location of the feature and returns
a new SimpleCV image.
**RETURNS**
A SimpleCV image that is cropped to the feature position and size.
**EXAMPLE**
>>> img = Image("../sampleimages/EdgeTest2.png")
>>> l = img.findLines()
>>> myLine = l[0].crop()
"""
tl = self.topLeftCorner()
return self.image.crop(tl[0],tl[1],self.width(),self.height())
def meanColor(self):
"""
**SUMMARY**
Returns the mean color of pixels under the line. Note that when the line falls "between" pixels, each pixels color contributes to the weighted average.
**RETURNS**
Returns an RGB triplet corresponding to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].meanColor()
"""
(pt1, pt2) = self.end_points
#we're going to walk the line, and take the mean color from all the px
#points -- there's probably a much more optimal way to do this
(maxx,minx,maxy,miny) = self.extents()
d_x = maxx - minx
d_y = maxy - miny
#orient the line so it is going in the positive direction
#if it's a straight one, we can just get mean color on the slice
if (d_x == 0.0):
return self.image[pt1[0]:pt1[0] + 1, miny:maxy].meanColor()
if (d_y == 0.0):
return self.image[minx:maxx, pt1[1]:pt1[1] + 1].meanColor()
error = 0.0
d_err = d_y / d_x #this is how much our "error" will increase in every step
px = []
weights = []
if (d_err < 1):
y = miny
#iterate over X
for x in range(minx, maxx):
#this is the pixel we would draw on, check the color at that px
#weight is reduced from 1.0 by the abs amount of error
px.append(self.image[x, y])
weights.append(1.0 - abs(error))
#if we have error in either direction, we're going to use the px
#above or below
if (error > 0): #
px.append(self.image[x, y+1])
weights.append(error)
if (error < 0):
px.append(self.image[x, y-1])
weights.append(abs(error))
error = error + d_err
if (error >= 0.5):
y = y + 1
error = error - 1.0
else:
#this is a "steep" line, so we iterate over X
#copy and paste. Ugh, sorry.
x = minx
for y in range(miny, maxy):
#this is the pixel we would draw on, check the color at that px
#weight is reduced from 1.0 by the abs amount of error
px.append(self.image[x, y])
weights.append(1.0 - abs(error))
#if we have error in either direction, we're going to use the px
#above or below
if (error > 0): #
px.append(self.image[x + 1, y])
weights.append(error)
if (error < 0):
px.append(self.image[x - 1, y])
weights.append(abs(error))
error = error + (1.0 / d_err) #we use the reciprocal of error
if (error >= 0.5):
x = x + 1
error = error - 1.0
#once we have iterated over every pixel in the line, we avg the weights
clr_arr = np.array(px)
weight_arr = np.array(weights)
weighted_clrs = np.transpose(np.transpose(clr_arr) * weight_arr)
#multiply each color tuple by its weight
temp = sum(weighted_clrs) / sum(weight_arr) #return the weighted avg
return (float(temp[0]),float(temp[1]),float(temp[2]))
def findIntersection(self, line):
"""
**SUMMARY**
Returns the interesction point of two lines.
**RETURNS**
A point tuple.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].findIntersection[1]
TODO: THIS NEEDS TO RETURN A TUPLE OF FLOATS
"""
if self.slope == float("inf"):
x = self.end_points[0][0]
y = line.slope*(x-line.end_points[1][0])+line.end_points[1][1]
return (x, y)
if line.slope == float("inf"):
x = line.end_points[0][0]
y = self.slope*(x-self.end_points[1][0])+self.end_points[1][1]
return (x, y)
m1 = self.slope
x12, y12 = self.end_points[1]
m2 = line.slope
x22, y22 = line.end_points[1]
x = (m1*x12 - m2*x22 + y22 - y12)/float(m1-m2)
y = (m1*m2*(x12-x22) - m2*y12 + m1*y22)/float(m1-m2)
return (x, y)
def isParallel(self, line):
"""
**SUMMARY**
Checks whether two lines are parallel or not.
**RETURNS**
Bool. True or False
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].isParallel(l[1])
"""
if self.slope == line.slope:
return True
return False
def isPerpendicular(self, line):
"""
**SUMMARY**
Checks whether two lines are perpendicular or not.
**RETURNS**
Bool. True or False
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].isPerpendicular(l[1])
"""
if self.slope == float("inf"):
if line.slope == 0:
return True
return False
if line.slope == float("inf"):
if self.slope == 0:
return True
return False
if self.slope*line.slope == -1:
return True
return False
def imgIntersections(self, img):
"""
**SUMMARY**
Returns a set of pixels where the line intersects with the binary image.
**RETURNS**
list of points.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> c = l[0].imgIntersections(img.binarize())
"""
pixels = []
if self.slope == float("inf"):
for y in range(self.end_points[0][1], self.end_points[1][1]+1):
pixels.append((self.end_points[0][0], y))
else:
for x in range(self.end_points[0][0], self.end_points[1][0]+1):
pixels.append((x, int(self.end_points[1][1] + self.slope*(x-self.end_points[1][0]))))
for y in range(self.end_points[0][1], self.end_points[1][1]+1):
pixels.append((int(((y-self.end_points[1][1])/self.slope)+self.end_points[1][0]), y))
pixels = list(set(pixels))
matched_pixels=[]
for pixel in pixels:
if img[pixel[0], pixel[1]] == (255.0, 255.0, 255.0):
matched_pixels.append(pixel)
matched_pixels.sort()
return matched_pixels
def angle(self):
"""
**SUMMARY**
This is the angle of the line, from the leftmost point to the rightmost point
Returns angle (theta) in radians, with 0 = horizontal, -pi/2 = vertical positive slope, pi/2 = vertical negative slope
**RETURNS**
An angle | |
[
"thisisntaparam=somevalue",
"thisisntaparam",
]
@pytest.mark.parametrize("arg", SETTINGS_FAILURE)
def test_do_set_fail(arg):
itm = tm.InteractiveTomcatManager()
itm.do_set(arg)
assert itm.exit_code == itm.EXIT_ERROR
PREFIXES = [
("--", "--"),
("*", "*"),
(">>>", ">>>"),
# with no prefix, we should see the connected message
("", "connected"),
]
@pytest.mark.parametrize("prefix, expected", PREFIXES)
def test_status_prefix(tomcat_manager_server, prefix, expected, capsys):
itm = tm.InteractiveTomcatManager()
itm.status_prefix = prefix
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
out, err = capsys.readouterr()
assert err.startswith(expected)
assert itm.exit_code == itm.EXIT_SUCCESS
###
#
# test connect and which commands
#
###
def test_connect(tomcat_manager_server, capsys):
itm = tm.InteractiveTomcatManager()
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
assert itm.exit_code == itm.EXIT_SUCCESS
assert_connected_to(itm, tomcat_manager_server.url, capsys)
def test_connect_noverify(tomcat_manager_server, mocker):
itm = tm.InteractiveTomcatManager()
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command + " --noverify")
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify=False,
cert=None,
)
def test_connect_cacert(tomcat_manager_server, mocker):
itm = tm.InteractiveTomcatManager()
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command + " --cacert /tmp/ca")
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify="/tmp/ca",
cert=None,
)
def test_connect_cacert_noverify(tomcat_manager_server, mocker):
itm = tm.InteractiveTomcatManager()
get_mock = mocker.patch("requests.get")
cmd = tomcat_manager_server.connect_command + " --cacert /tmp/ca --noverify"
itm.onecmd_plus_hooks(cmd)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify=False,
cert=None,
)
def test_connect_cert(tomcat_manager_server, mocker):
itm = tm.InteractiveTomcatManager()
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command + " --cert /tmp/cert")
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify=True,
cert="/tmp/cert",
)
def test_connect_key_cert(tomcat_manager_server, mocker):
itm = tm.InteractiveTomcatManager()
get_mock = mocker.patch("requests.get")
cmd = tomcat_manager_server.connect_command + " --cert /tmp/cert --key /tmp/key"
itm.onecmd_plus_hooks(cmd)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify=True,
cert=("/tmp/cert", "/tmp/key"),
)
def test_connect_fail_debug(tomcat_manager_server, mocker):
itm = tm.InteractiveTomcatManager()
itm.debug = True
mock_ok = mocker.patch(
"tomcatmanager.models.TomcatManagerResponse.ok",
new_callable=mock.PropertyMock,
)
mock_ok.return_value = False
raise_mock = mocker.patch(
"tomcatmanager.models.TomcatManagerResponse.raise_for_status"
)
raise_mock.side_effect = tm.TomcatError()
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
assert itm.exit_code == itm.EXIT_ERROR
# pylint: disable=too-few-public-methods
class MockResponse:
"""Simple class to help mock.patch"""
def __init__(self, code):
self.status_code = code
FAIL_MESSAGES = [
(requests.codes.ok, "tomcat manager not found"),
(requests.codes.not_found, "tomcat manager not found"),
(requests.codes.server_error, "http error"),
]
@pytest.mark.parametrize("code, errmsg", FAIL_MESSAGES)
def test_connect_fail_ok(tomcat_manager_server, mocker, code, errmsg, capsys):
itm = tm.InteractiveTomcatManager()
itm.debug = False
mock_ok = mocker.patch(
"tomcatmanager.models.TomcatManagerResponse.response",
new_callable=mock.PropertyMock,
)
qmr = MockResponse(code)
mock_ok.return_value = qmr
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
out, err = capsys.readouterr()
assert not out
assert errmsg in err
assert itm.exit_code == itm.EXIT_ERROR
def test_connect_fail_not_found(tomcat_manager_server, mocker, capsys):
itm = tm.InteractiveTomcatManager()
itm.debug = False
mock_ok = mocker.patch(
"tomcatmanager.models.TomcatManagerResponse.response",
new_callable=mock.PropertyMock,
)
qmr = MockResponse(requests.codes.not_found)
mock_ok.return_value = qmr
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
out, err = capsys.readouterr()
assert not out
assert "tomcat manager not found" in err
assert itm.exit_code == itm.EXIT_ERROR
def test_connect_fail_other(tomcat_manager_server, mocker, capsys):
itm = tm.InteractiveTomcatManager()
itm.debug = False
mock_ok = mocker.patch(
"tomcatmanager.models.TomcatManagerResponse.response",
new_callable=mock.PropertyMock,
)
qmr = MockResponse(requests.codes.server_error)
mock_ok.return_value = qmr
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
out, err = capsys.readouterr()
assert not out
assert "http error" in err
assert itm.exit_code == itm.EXIT_ERROR
def test_connect_password_prompt(tomcat_manager_server, capsys, mocker):
itm = tm.InteractiveTomcatManager()
mock_getpass = mocker.patch("getpass.getpass")
mock_getpass.return_value = tomcat_manager_server.password
# this should call getpass.getpass, which is now mocked to return the password
cmdline = f"connect {tomcat_manager_server.url} {tomcat_manager_server.user}"
itm.onecmd_plus_hooks(cmdline)
# make sure it got called
assert mock_getpass.call_count == 1
assert itm.exit_code == itm.EXIT_SUCCESS
assert_connected_to(itm, tomcat_manager_server.url, capsys)
def test_connect_config(tomcat_manager_server, capsys, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
user={}
password={}"""
configstring = config.format(
configname,
tomcat_manager_server.url,
tomcat_manager_server.user,
tomcat_manager_server.password,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname}"
itm.onecmd_plus_hooks(cmdline)
assert itm.exit_code == itm.EXIT_SUCCESS
assert_connected_to(itm, tomcat_manager_server.url, capsys)
def test_connect_config_user_override(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
user={}
password={}"""
configstring = config.format(
configname,
tomcat_manager_server.url,
tomcat_manager_server.user,
tomcat_manager_server.password,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname} someotheruser"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=("someotheruser", tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify=True,
cert=None,
)
def test_connect_config_user_password_override(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
user={}
password={}"""
configstring = config.format(
configname,
tomcat_manager_server.url,
tomcat_manager_server.user,
tomcat_manager_server.password,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname} someotheruser someotherpassword"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=("someotheruser", "<PASSWORD>"),
params=None,
timeout=itm.timeout,
verify=True,
cert=None,
)
def test_connect_config_cert(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
cert=/tmp/mycert"""
configstring = config.format(
configname,
tomcat_manager_server.url,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname}"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=None,
params=None,
timeout=itm.timeout,
verify=True,
cert="/tmp/mycert",
)
def test_connect_config_cert_override(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
cert=/tmp/mycert"""
configstring = config.format(
configname,
tomcat_manager_server.url,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname} --cert /tmp/yourcert"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=None,
params=None,
timeout=itm.timeout,
verify=True,
cert="/tmp/yourcert",
)
def test_connect_config_cert_key(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
cert=/tmp/mycert
key=/tmp/mykey"""
configstring = config.format(
configname,
tomcat_manager_server.url,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname}"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=None,
params=None,
timeout=itm.timeout,
verify=True,
cert=("/tmp/mycert", "/tmp/mykey"),
)
def test_connect_config_cert_key_override(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
cert=/tmp/mycert
key=/tmp/mykey"""
configstring = config.format(
configname,
tomcat_manager_server.url,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname} --cert /tmp/yourcert --key /tmp/yourkey"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=None,
params=None,
timeout=itm.timeout,
verify=True,
cert=("/tmp/yourcert", "/tmp/yourkey"),
)
def test_connect_config_cacert(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
user={}
password={}
cacert=/tmp/cabundle"""
configstring = config.format(
configname,
tomcat_manager_server.url,
tomcat_manager_server.user,
tomcat_manager_server.password,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname}"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify="/tmp/cabundle",
cert=None,
)
def test_connect_config_cacert_override(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
user={}
password={}
cacert=/tmp/cabundle"""
configstring = config.format(
configname,
tomcat_manager_server.url,
tomcat_manager_server.user,
tomcat_manager_server.password,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname} --cacert /tmp/other"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify="/tmp/other",
cert=None,
)
def test_connect_config_noverify_override(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
user={}
password={}
verify=True"""
configstring = config.format(
configname,
tomcat_manager_server.url,
tomcat_manager_server.user,
tomcat_manager_server.password,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname} --noverify"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify=False,
cert=None,
)
def test_connect_config_noverify_override_cacert(tomcat_manager_server, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
user={}
password={}
cacert=/tmp/cabundle"""
configstring = config.format(
configname,
tomcat_manager_server.url,
tomcat_manager_server.user,
tomcat_manager_server.password,
)
itm = itm_with_config(mocker, configstring)
cmdline = f"connect {configname} --noverify"
get_mock = mocker.patch("requests.get")
itm.onecmd_plus_hooks(cmdline)
url = tomcat_manager_server.url + "/text/serverinfo"
get_mock.assert_called_once_with(
url,
auth=(tomcat_manager_server.user, tomcat_manager_server.password),
params=None,
timeout=itm.timeout,
verify=False,
cert=None,
)
def test_connect_config_password_prompt(tomcat_manager_server, capsys, mocker):
configname = str(uuid.uuid1())
config = """[{}]
url={}
user={}"""
configstring = config.format(
configname, tomcat_manager_server.url, tomcat_manager_server.user
)
itm = itm_with_config(mocker, configstring)
mock_getpass = mocker.patch("getpass.getpass")
mock_getpass.return_value = tomcat_manager_server.password
# this will call getpass.getpass, which is now mocked to return the password
cmdline = f"connect {configname}"
itm.onecmd_plus_hooks(cmdline)
assert mock_getpass.call_count == 1
assert itm.exit_code == itm.EXIT_SUCCESS
assert_connected_to(itm, tomcat_manager_server.url, capsys)
def test_connect_with_connection_error(tomcat_manager_server, capsys, mocker):
connect_mock = mocker.patch("tomcatmanager.TomcatManager.connect")
connect_mock.side_effect = requests.exceptions.ConnectionError()
itm = tm.InteractiveTomcatManager()
itm.debug = False
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
out, err = capsys.readouterr()
assert not out
assert connect_mock.call_count == 1
assert err == "connection error\n"
assert itm.exit_code == itm.EXIT_ERROR
def test_connect_with_connection_error_debug(tomcat_manager_server, capsys, mocker):
connect_mock = mocker.patch("tomcatmanager.TomcatManager.connect")
connect_mock.side_effect = requests.exceptions.ConnectionError()
itm = tm.InteractiveTomcatManager()
itm.debug = True
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
out, err = capsys.readouterr()
assert not out
assert connect_mock.call_count == 1
assert "requests.exceptions.ConnectionError" in err
assert itm.exit_code == itm.EXIT_ERROR
def test_connect_with_timeout(tomcat_manager_server, capsys, mocker):
connect_mock = mocker.patch("tomcatmanager.TomcatManager.connect")
connect_mock.side_effect = requests.exceptions.Timeout()
itm = tm.InteractiveTomcatManager()
itm.debug = False
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
out, err = capsys.readouterr()
assert not out
assert connect_mock.call_count == 1
assert err == "connection timeout\n"
assert itm.exit_code == itm.EXIT_ERROR
def test_connect_with_timeout_debug(tomcat_manager_server, capsys, mocker):
connect_mock = mocker.patch("tomcatmanager.TomcatManager.connect")
connect_mock.side_effect = requests.exceptions.Timeout()
itm = tm.InteractiveTomcatManager()
itm.debug = True
itm.onecmd_plus_hooks(tomcat_manager_server.connect_command)
out, err = capsys.readouterr()
assert not out
assert connect_mock.call_count == 1
assert "requests.exceptions.Timeout" in err
assert itm.exit_code == itm.EXIT_ERROR
def test_which(tomcat_manager_server, capsys):
itm = get_itm(tomcat_manager_server)
# force this to ensure `which` sets it to SUCCESS
itm.exit_code = itm.EXIT_ERROR
itm.onecmd_plus_hooks("which")
out, _ = capsys.readouterr()
assert itm.exit_code == itm.EXIT_SUCCESS
assert tomcat_manager_server.url in out
assert tomcat_manager_server.user in out
def test_which_cert(tomcat_manager_server, capsys, mocker):
# the mock tomcat erver can't authenticate using a certificate
# so we connect as normal, then mock it so it appears
# like we authenticated with a certificate
itm = get_itm(tomcat_manager_server)
cert_mock = mocker.patch(
"tomcatmanager.TomcatManager.cert",
new_callable=mock.PropertyMock,
)
cert_mock.return_value = "/tmp/mycert"
itm.onecmd_plus_hooks("which")
out, err = capsys.readouterr()
assert "/tmp/mycert" in out
def test_which_cert_key(tomcat_manager_server, capsys, mocker):
# the mock tomcat erver can't authenticate using a certificate
# so we connect as normal, then mock it so it appears
# like we authenticated with a certificate
itm = get_itm(tomcat_manager_server)
cert_mock = mocker.patch(
"tomcatmanager.TomcatManager.cert",
new_callable=mock.PropertyMock,
)
cert_mock.return_value = ("/tmp/mycert", "/tmp/mykey")
itm.onecmd_plus_hooks("which")
out, err = capsys.readouterr()
assert "/tmp/mykey" in out
REQUIRES_CONNECTION = [
"which",
"deploy",
| |
indices, shape, summed, linked_lengths):
assert isinstance(indices, str)
self.ast = tuple(ast)
self.indices = indices
self.shape = tuple(shape)
self.summed = frozenset(summed)
self.linked_lengths = frozenset(linked_lengths)
self.ndim = len(self.indices)
def _join_shapes(self, other):
'''Verify ``self + other`` is valid and return the resulting shape and linked lengths.
Args
----
other : :class:`_Array`
Should have the same (order of) indices as this array.
Returns
-------
shape : :class:`tuple`
The simplified shape of ``self + other``.
linked_lengths : :class:`frozenset` of :class:`frozensets` of :class:`_Length`\\s and :class:`int`\\s
See :attr:`_Array.linked_lengths`. Updated with links resulting from
applying ``self + other``.
'''
assert self.indices == other.indices, 'unaligned'
groups = set(self.linked_lengths | other.linked_lengths)
for index, a, b in zip(self.indices, self.shape, other.shape):
if a == b:
continue
if not isinstance(a, _Length) and not isinstance(b, _Length):
raise _IntermediateError('Shapes at index {!r} differ: {}, {}.'.format(index, a, b))
groups.add(frozenset({a, b}))
linked_lengths = self._join_lengths(other, groups)
return self._simplify_shape(linked_lengths), linked_lengths
def _simplify_shape(self, linked_lengths):
'''Return simplified shape by replacing :class:`_Length`\\s with :class:`int`\\s according to the ``linked_lengths``.'''
shape = []
cache = {k: v for v in linked_lengths for k in v}
for length in self.shape:
if isinstance(length, _Length):
for l in cache[length]:
if not isinstance(l, _Length):
length = l
break
shape.append(length)
return shape
def _join_lengths(*args):
'''Return updated linked lengths resulting from ``self + other``.'''
groups = set()
for arg in args:
groups |= arg.linked_lengths if isinstance(arg, _Array) else arg
cache = {}
for g in groups:
# g = frozenset(itertools.chain.from_iterable(map(linked_lenghts.get, g)))
new_g = set()
for k in g:
new_g |= cache.get(k, frozenset([k]))
new_g = frozenset(new_g)
cache.update((k, new_g) for k in new_g)
linked_lengths = frozenset(cache.values())
# Verify.
for g in linked_lengths:
known = tuple(sorted(set(k for k in g if not isinstance(k, _Length))))
if len(known) > 1:
raise _IntermediateError('Axes have different lengths: {}.'.format(', '.join(map(str, known))))
return linked_lengths
@staticmethod
def _update_lengths(linked_lengths, index, a, b):
'''Add link ``a``, ``b`` to ``linked_lengths``.'''
cache = {l: g for g in linked_lengths for l in g}
if a != b:
if not isinstance(a, _Length) and not isinstance(b, _Length):
raise _IntermediateError('Shapes at index {!r} differ: {}, {}.'.format(index, a, b))
g = cache.get(a, frozenset([a])) | cache.get(b, frozenset([b]))
cache.update((k, g) for k in g)
# Verify.
known = tuple(sorted(set(k for k in g if not isinstance(k, _Length))))
if len(known) > 1:
raise _IntermediateError('Shapes at index {!r} differ: {}.'.format(index, ', '.join(map(str, known))))
elif isinstance(a, _Length):
cache.setdefault(a, frozenset([a]))
return frozenset(cache.values())
def __neg__(self):
'''Return -self.'''
return self.replace(ast=('neg', self.ast))
def _add_sub(self, other, op, name):
'''Return op(self, other).'''
if frozenset(self.indices) != frozenset(other.indices):
raise _IntermediateError('Cannot {} arrays with unmatched indices: {!r}, {!r}.'.format(name, self.indices, other.indices))
other = other.transpose(self.indices)
shape, linked_lengths = self._join_shapes(other)
return _Array((op, self.ast, other.ast), self.indices, shape, self.summed, linked_lengths)
def __add__(self, other):
'''Return self+other.'''
return self._add_sub(other, 'add', 'add')
def __sub__(self, other):
'''Return self-other.'''
return self._add_sub(other, 'sub', 'subtract')
def __mul__(self, other):
'''Return self*other.'''
for a, b in ((self, other), (other, self)):
for index in sorted(frozenset(a.indices) | a.summed):
if index in b.summed:
raise _IntermediateError('Index {!r} occurs more than twice.'.format(index))
common = []
for index, length in zip(self.indices, self.shape):
if index in other.indices:
common.append(index)
else:
other = other.append_axis(index, length)
for index, length in zip(other.indices, other.shape):
if index not in self.indices:
self = self.append_axis(index, length)
indices = self.indices
other = other.transpose(indices)
shape, linked_lengths = self._join_shapes(other)
ast = 'mul', self.ast, other.ast
for index in reversed(common):
i = self.indices.index(index)
ast = 'sum', ast, _(i)
indices = indices[:i] + indices[i+1:]
shape = shape[:i] + shape[i+1:]
return _Array(ast, indices, shape, self.summed | other.summed | frozenset(common), linked_lengths)
def __truediv__(self, other):
'''Return self/value.'''
if other.ndim > 0:
raise _IntermediateError('A denominator must have dimension 0.')
for index in sorted((self.summed | set(self.indices)) & other.summed):
raise _IntermediateError('Index {!r} occurs more than twice.'.format(index))
return _Array(('truediv', self.ast, other.ast), self.indices, self.shape, self.summed | other.summed, self._join_lengths(other))
def __pow__(self, other):
'''Return self**value.'''
if other.ndim > 0:
raise _IntermediateError('An exponent must have dimension 0.')
for index in sorted((self.summed | set(self.indices)) & other.summed):
raise _IntermediateError('Index {!r} occurs more than twice.'.format(index))
return _Array(('pow', self.ast, other.ast), self.indices, self.shape, self.summed | other.summed, self._join_lengths(other))
def grad(self, index, geom, type):
'''Return the gradient w.r.t. ``geom``.'''
assert geom.ndim == 1
assert not isinstance(geom.shape[0], _Length)
assert type in ('grad','surfgrad')
ast = type, self.ast, _(geom)
return _Array._apply_indices(ast, self.ndim, self.indices+index, self.shape+geom.shape, self.summed, self.linked_lengths)
def derivative(self, arg):
'Return the derivative to ``arg``.'
return _Array._apply_indices(('derivative', self.ast, arg.ast), self.ndim, self.indices+arg.indices, self.shape+arg.shape, self.summed, self.linked_lengths)
def append_axis(self, index, length):
'''Return an :class:`_Array` with one additional axis.'''
if index in self.indices or index in self.summed:
raise _IntermediateError('Duplicate index: {!r}.'.format(index))
linked_lengths = self.linked_lengths
if isinstance(length, _Length):
for group in linked_lengths:
if length in group:
break
else:
linked_lengths |= frozenset({frozenset({length})})
return _Array(('append_axis', self.ast, _(length)), self.indices+index, self.shape+(length,), self.summed, linked_lengths)
def transpose(self, indices):
'''Return an :class:`_Array` transposed according to ``indices``.'''
if len(indices) != len(set(indices)):
raise _IntermediateError('Cannot transpose from {!r} to {!r}: duplicate indices.'.format(self.indices, indices))
elif set(self.indices) != set(indices):
raise _IntermediateError('Cannot transpose from {!r} to {!r}: indices differ.'.format(self.indices, indices))
if self.indices == indices:
return self
else:
transpose = tuple(map(self.indices.index, indices))
shape = tuple(map(self.shape.__getitem__, transpose))
return _Array(('transpose', self.ast, _(transpose)), indices, shape, self.summed, self.linked_lengths)
def replace(self, **updates):
'''Return a copy of this :class:`_Array` with attributes replaced by ``updates``.'''
kwargs = dict(ast=self.ast, indices=self.indices, shape=self.shape, summed=self.summed, linked_lengths=self.linked_lengths)
kwargs.update(updates)
return _Array(**kwargs)
class _ArrayOmittedIndices:
def __init__(self, ast, shape):
self.ast = tuple(ast)
self.shape = tuple(shape)
self.ndim = len(self.shape)
def __add__(self, other):
if self.shape != other.shape:
raise _IntermediateError('Cannot add arrays with omitted indices because the shapes differ: {}, {}.'.format(self.shape, other.shape))
return _ArrayOmittedIndices(('add', self.ast, other.ast), self.shape)
def __sub__(self, other):
if self.shape != other.shape:
raise _IntermediateError('Cannot subtract arrays with omitted indices because the shapes differ: {}, {}.'.format(self.shape, other.shape))
return _ArrayOmittedIndices(('sub', self.ast, other.ast), self.shape)
def __mul__(self, other):
if self.ndim != 0:
raise _IntermediateError('Arrays with omitted indices cannot be multiplied.')
self_ast = self.ast
for n in other.shape:
self_ast = ('append_axis', self_ast, _(n))
return _ArrayOmittedIndices(('mul', self_ast, other.ast), other.shape)
def __neg__(self):
return _ArrayOmittedIndices(('neg', self.ast), self.shape)
def __truediv__(self, other):
if other.ndim > 0:
raise _IntermediateError('A denominator must have dimension 0.')
return _ArrayOmittedIndices(('truediv', self.ast, other.ast), self.shape)
def __pow__(self, other):
if other.ndim > 0:
raise _IntermediateError('An exponent must have dimension 0.')
return _ArrayOmittedIndices(('pow', self.ast, other.ast), self.shape)
def replace(self, ast=None):
return _ArrayOmittedIndices(self.ast if ast is None else ast, self.shape)
class _ExpressionParser:
'''Expression parser
Args
----
expression : :class:`str`
See argument ``expression`` of :func:`parse`.
variables : :class:`dict` of :class:`str` and :class:`nutils.function.Array` pairs
See argument ``variables`` of :func:`parse`.
arg_shapes : :class:`dict` of :class:`str` and :class:`tuple` or :class:`int`\\s pairs
See argument ``arg_shapes`` of :func:`parse`.
default_geometry_name : class:`str`
See argument ``default_geometry_name`` of :func:`parse`.
fixed_lengths : :class:`dict` of :class:`str` and :class:`int`
See argument ``fixed_lengths`` of :func:`parse`.
'''
eye_symbols = '$', 'δ'
normal_symbols = 'n',
def __init__(self, expression, variables, arg_shapes, default_geometry_name, fixed_lengths):
self.expression = expression
self.variables = variables
self.arg_shapes = dict(arg_shapes)
self.default_geometry_name = default_geometry_name
self.fixed_lengths = fixed_lengths
def highlight(f):
'wrap ``f`` in a function that converts ``_IntermediateError`` objects'
def wrapper(self, *args, **kwargs):
if hasattr(self, '_tokens'):
pos = self._next.pos
else:
pos = 0
try:
return f(self, *args, **kwargs)
except _IntermediateError as e:
if e.at is None:
at = pos
count = self._next.pos - pos if self._next.pos > pos else len(self._next.data)
else:
at = e.at
count = 1 if e.count is None else e.count
raise ExpressionSyntaxError(e.msg + '\n' + self.expression + '\n' + ' '*at + '^'*count) from e
return wrapper
def _consume(self):
'advance to next token'
self._index += 1
if self._index >= len(self._tokens):
raise _IntermediateError('Unexpected end of expression.', at=len(self.expression))
return self._current
def _consume_if_whitespace(self):
'advance to next token if it is a whitespace'
if self._next.type == 'whitespace':
self._consume()
@highlight
def _consume_assert_whitespace(self):
'assert the next token is whitespace, skip it, and advance to next token'
if self._consume().type != 'whitespace':
raise _IntermediateError('Missing whitespace.', at=self._current.pos)
@highlight
def _consume_assert_equal(self, value, msg=None):
'assert the next token is equal to ``value``'
token = self._consume()
if token.type != value:
if msg is None:
msg = 'Expected {!r}.'.format(value)
raise _IntermediateError(msg, at=token.pos)
return token
@property
def _current(self):
'the current token'
return self._tokens[self._index]
@property
def _next(self):
'the next token'
return self._tokens[min(len(self._tokens)-1, self._index+1)]
@property
def _next_non_whitespace(self):
'the next non-whitespace token'
return self._tokens[self._index+2] if self._next.type == 'whitespace' else self._next
def _asarray(self, ast, indices_token, shape, omitted_indices):
indices = | |
str):
func_name = utils.get_value_by_insensitive_key_or_value(
param,
self.components._namespace) or param
if hasattr(self.components, func_name):
func = getattr(self.components, func_name)
else:
NameError(
"\n'%s' is not recognized as a model component."
% param)
else:
func = param
if not self.get_args(func):
value = func()
else:
value = func(0)
if isinstance(value, xr.DataArray):
dims = list(value.dims)
coords = {coord: list(value.coords[coord].values)
for coord in value.coords}
return coords, dims
else:
return None
def set_components(self, params):
""" Set the value of exogenous model elements.
Element values can be passed as keyword=value pairs in the function call.
Values can be numeric type or pandas Series.
Series will be interpolated by integrator.
Examples
--------
>>> model.set_components({'birth_rate': 10})
>>> model.set_components({'Birth Rate': 10})
>>> br = pandas.Series(index=range(30), values=np.sin(range(30))
>>> model.set_components({'birth_rate': br})
"""
# TODO: allow the params argument to take a pandas dataframe, where
# column names are variable names. However some variables may be
# constant or have no values for some index. This should be processed.
for key, value in params.items():
func_name = utils.get_value_by_insensitive_key_or_value(
key,
self.components._namespace)
if isinstance(value, np.ndarray) or isinstance(value, list):
raise TypeError(
'When setting ' + key + '\n'
'Setting subscripted must be done using a xarray.DataArray'
' with the correct dimensions or a constant value '
'(https://pysd.readthedocs.io/en/master/basic_usage.html)')
if func_name is None:
raise NameError(
"\n'%s' is not recognized as a model component."
% key)
try:
func = getattr(self.components, func_name)
_, dims = self.get_coords(func) or (None, None)
args = self.get_args(func)
except (AttributeError, TypeError):
dims, args = None, None
if isinstance(value, pd.Series):
new_function, cache = self._timeseries_component(
value, dims, args)
elif callable(value):
new_function = value
cache = None
else:
new_function = self._constant_component(value, dims, args)
cache = 'run'
# this won't handle other statefuls...
if '_integ_' + func_name in dir(self.components):
warnings.warn("Replacing the equation of stock"
+ "{} with params".format(key),
stacklevel=2)
# add cache
new_function.__name__ = func_name
if cache == 'run':
new_function = self.components.cache.run(new_function)
elif cache == 'step':
new_function = self.components.cache.step(new_function)
setattr(self.components, func_name, new_function)
self.components.cache.clean()
def _timeseries_component(self, series, dims, args=[]):
""" Internal function for creating a timeseries model element """
# this is only called if the set_component function recognizes a
# pandas series
# TODO: raise a warning if extrapolating from the end of the series.
if isinstance(series.values[0], xr.DataArray) and args:
# the argument is already given in the model when the model
# is called
return lambda x: utils.rearrange(xr.concat(
series.values,
series.index).interp(concat_dim=x).reset_coords(
'concat_dim', drop=True),
dims, self.components._subscript_dict), 'lookup'
elif isinstance(series.values[0], xr.DataArray):
# the interpolation will be time dependent
return lambda: utils.rearrange(xr.concat(
series.values,
series.index).interp(concat_dim=self.time()).reset_coords(
'concat_dim', drop=True),
dims, self.components._subscript_dict), 'step'
elif args and dims:
# the argument is already given in the model when the model
# is called
return lambda x: utils.rearrange(
np.interp(x, series.index, series.values),
dims, self.components._subscript_dict), 'lookup'
elif args:
# the argument is already given in the model when the model
# is called
return lambda x:\
np.interp(x, series.index, series.values), 'lookup'
elif dims:
# the interpolation will be time dependent
return lambda: utils.rearrange(
np.interp(self.time(), series.index, series.values),
dims, self.components._subscript_dict), 'step'
else:
# the interpolation will be time dependent
return lambda:\
np.interp(self.time(), series.index, series.values), 'step'
def _constant_component(self, value, dims, args=[]):
""" Internal function for creating a constant model element """
if args and dims:
# need to pass an argument to keep consistency with the calls
# to the function
return lambda x: utils.rearrange(
value, dims, self.components._subscript_dict)
elif args:
# need to pass an argument to keep consistency with the calls
# to the function
return lambda x: value
elif dims:
return lambda: utils.rearrange(
value, dims, self.components._subscript_dict)
else:
return lambda: value
def set_state(self, t, initial_value):
""" Old set_state method use set_initial_value"""
warnings.warn(
"\nset_state will be deprecated, use set_initial_value instead.",
FutureWarning)
self.set_initial_value(t, initial_value)
def set_initial_value(self, t, initial_value):
""" Set the system initial value.
Parameters
----------
t : numeric
The system time
initial_value : dict
A (possibly partial) dictionary of the system initial values.
The keys to this dictionary may be either pysafe names or
original model file names
"""
self.time.update(t)
self.components.cache.reset(t)
stateful_name = "_NONE"
# TODO make this more solid, link with builder or next TODO?
stateful_init = [
"_integ_", "_delay_", "_delayfixed_", "_delayn_",
"_sample_if_true_", "_smooth_", "_trend_", "_initial_"]
for key, value in initial_value.items():
component_name = utils.get_value_by_insensitive_key_or_value(
key, self.components._namespace)
if component_name is not None:
for element in self._stateful_elements:
# TODO make this more solid, add link between stateful
# objects and model vars
for init in stateful_init:
if init + component_name == element.py_name:
stateful_name = element.py_name
else:
component_name = key
stateful_name = key
try:
_, dims = self.get_coords(component_name)
except TypeError:
dims = None
if isinstance(value, xr.DataArray)\
and not set(value.dims).issubset(set(dims)):
raise ValueError(
f"\nInvalid dimensions for {component_name}."
f"It should be a subset of {dims}, "
f"but passed value has {list(value.dims)}")
if isinstance(value, np.ndarray) or isinstance(value, list):
raise TypeError(
'When setting ' + key + '\n'
'Setting subscripted must be done using a xarray.DataArray'
' with the correct dimensions or a constant value '
'(https://pysd.readthedocs.io/en/master/basic_usage.html)')
# Try to update stateful component
if hasattr(self.components, stateful_name):
element = getattr(self.components, stateful_name)
if dims:
value = utils.rearrange(
value, dims,
self.components._subscript_dict)
element.initialize(value)
self.components.cache.clean()
else:
# Try to override component
warnings.warn(
f"\nSetting {component_name} to a constant value with "
"initial_conditions will be deprecated. Use params={"
f"'{component_name}': {value}"+"} instead.",
FutureWarning)
setattr(self.components, component_name,
self._constant_component(
value, dims,
self.get_args(component_name)))
self.components.cache.clean()
def set_stateful(self, stateful_dict):
"""
Set stateful values.
Parameters
----------
stateful_dict: dict
Dictionary of the stateful elements and the attributes to change.
"""
for element, attrs in stateful_dict.items():
for attr, value in attrs.items():
setattr(getattr(self.components, element), attr, value)
def doc(self):
"""
Formats a table of documentation strings to help users remember
variable names, and understand how they are translated into
python safe names.
Returns
-------
docs_df: pandas dataframe
Dataframe with columns for the model components:
- Real names
- Python safe identifiers (as used in model.components)
- Units string
- Documentation strings from the original model file
"""
collector = []
for name, varname in self.components._namespace.items():
try:
# TODO correct this when Original Eqn is in several lines
docstring = getattr(self.components, varname).__doc__
lines = docstring.split('\n')
for unit_line in range(3, 9):
# this loop detects where Units: starts as
# sometimes eqn could be split in several lines
if re.findall('Units:', lines[unit_line]):
break
if unit_line == 3:
eqn = lines[2].replace("Original Eqn:", "").strip()
else:
eqn = '; '.join([l.strip() for l in lines[3:unit_line]])
collector.append(
{'Real Name': name,
'Py Name': varname,
'Eqn': eqn,
'Unit': lines[unit_line].replace("Units:", "").strip(),
'Lims': lines[unit_line+1].replace("Limits:", "").strip(),
'Type': lines[unit_line+2].replace("Type:", "").strip(),
'Subs': lines[unit_line+3].replace("Subs:", "").strip(),
'Comment': '\n'.join(lines[(unit_line+4):]).strip()})
except Exception:
pass
docs_df = pd.DataFrame(collector)
docs_df.fillna('None', inplace=True)
order = ['Real Name', 'Py Name', 'Unit', 'Lims',
'Type', 'Subs', 'Eqn', 'Comment']
return docs_df[order].sort_values(by='Real Name').reset_index(drop=True)
def __str__(self):
""" Return model source files """
# JT: Might be helpful to return not only the source file, but
# also how the instance differs from that source file. This
# would give a more accurate view of the current model.
string = 'Translated Model File: ' + self.py_model_file
if hasattr(self, 'mdl_file'):
string += '\n Original Model File: ' + self.mdl_file
return string
class Time(object):
def __init__(self, t=None, dt=None):
self._t = t
self._step = dt
self.stage = None
def __call__(self):
return self._t
def step(self):
return self._step
def update(self, value):
if self._t is not None:
self._step = value - self._t
self._t = value
class Model(Macro):
def __init__(self, py_model_file, initialize, missing_values):
""" Sets up the python objects """
super().__init__(py_model_file, None, None, Time())
self.time.stage = 'Load'
self.missing_values = missing_values
if initialize:
self.initialize()
def initialize(self):
""" Initializes the simulation model """
self.time.update(self.components.initial_time())
self.time.stage = 'Initialization'
External.missing = self.missing_values
super().initialize()
def _build_euler_timeseries(self, return_timestamps=None, final_time=None):
"""
- The integration steps need to include the return values.
- There is no point running the model past the last return value.
- The last timestep will be the last in that requested for return
- Spacing should be at maximum what is specified by the integration
time step.
- The initial time should be the one specified by the model file,
OR it should be the initial condition.
- This function needs to be called AFTER the model is set | |
<gh_stars>10-100
import argparse
import os
import numpy as np
import scipy
import cv2
from functools import partial
from shapely.geometry import Polygon
import torch
import torch.optim as optim
import torch.utils.data as data
from torch import distributed
import grasp_det_seg.models as models
from grasp_det_seg.algos.detection import PredictionGenerator, ProposalMatcher, DetectionLoss
from grasp_det_seg.algos.fpn import DetectionAlgoFPN, RPNAlgoFPN
from grasp_det_seg.algos.rpn import AnchorMatcher, ProposalGenerator, RPNLoss
from grasp_det_seg.algos.semantic_seg import SemanticSegAlgo, SemanticSegLoss
from grasp_det_seg.config import load_config
from grasp_det_seg.data_OCID import iss_collate_fn, OCIDTestDataset, OCIDTestTransform,read_boxes_from_file,\
prepare_frcnn_format
from grasp_det_seg.data_OCID.OCID_class_dict import colors_list, cls_list
from grasp_det_seg.data_OCID.sampler import DistributedARBatchSampler
from grasp_det_seg.models.det_seg import DetSegNet, NETWORK_INPUTS
from grasp_det_seg.modules.fpn import FPN, FPNBody
from grasp_det_seg.modules.heads import RPNHead, FPNROIHead, FPNSemanticHeadDeeplab
from grasp_det_seg.utils import logging
from grasp_det_seg.utils.meters import AverageMeter
from grasp_det_seg.utils.misc import config_to_string, scheduler_from_config, norm_act_from_config, freeze_params, \
all_reduce_losses, NORM_LAYERS, OTHER_LAYERS
from grasp_det_seg.utils.parallel import DistributedDataParallel
from grasp_det_seg.utils.snapshot import resume_from_snapshot
from skimage.measure import regionprops
parser = argparse.ArgumentParser(description="OCID detection and segmentation test script")
parser.add_argument("--local_rank", type=int)
parser.add_argument("--log_dir", type=str, default=".", help="Write logs to the given directory")
parser.add_argument("config", metavar="FILE", type=str, help="Path to configuration file")
parser.add_argument("model", metavar="FILE", type=str, help="Path to model file")
parser.add_argument("data", metavar="DIR", type=str, help="Path to dataset")
parser.add_argument("out_dir", metavar="DIR", type=str, help="Path to output directory")
def save_param_file(writer, param_file):
data_sum = ''
with open(param_file) as fp:
Lines = fp.readlines()
for line in Lines:
data_sum += line + ' \n'
writer.add_text('dataset_parameters', data_sum)
return
def ensure_dir(dir_path):
try:
os.mkdir(dir_path)
except FileExistsError:
pass
def Rotate2D(pts, cnt, ang):
ang = np.deg2rad(ang)
return scipy.dot(pts - cnt, scipy.array([[scipy.cos(ang), scipy.sin(ang)], [-scipy.sin(ang),
scipy.cos(ang)]])) + cnt
def calc_jacc_IOU(gt_boxes,gt_theta_, r_bbox__best,theta_best):
ret_val = 0
for cnt in range(gt_boxes.shape[0]):
try:
gt_pts = gt_boxes[cnt].T
gt_theta = gt_theta_[cnt]
pts_rot = np.asarray(r_bbox__best)
# check angle difference
if np.abs(gt_theta-theta_best) < 30 or \
(np.abs(np.abs(gt_theta-theta_best) - 180.)) < 30:
pts_rot = Polygon(pts_rot)
gt_pts = Polygon(gt_pts)
intersect = gt_pts.intersection(pts_rot).area / gt_pts.union(pts_rot).area
if intersect > .25:
ret_val = 1
break
except:
continue
return ret_val
def save_prediction_image(raw_pred, img_abs_path, img_root_path, im_size, idx_, out_dir):
num_classes_theta = 18
threshold = 0.05
total_boxes = 0
correct_boxes = 0
total_IOU_seg = 0
IOU_count = 0
IOU_seg_threshold = 100 # in px
for i, (sem_pred, bbx_pred, cls_pred, obj_pred) in enumerate(zip(
raw_pred["sem_pred"], raw_pred["bbx_pred"], raw_pred["cls_pred"], raw_pred["obj_pred"])):
item = os.path.join(img_root_path[i], img_abs_path[i])
im_size_ = im_size[i]
ensure_dir(out_dir)
seq_path, im_name = item.split(',')
sem_pred = np.asarray(sem_pred.detach().cpu().numpy(), dtype=np.uint8)
seg_mask_vis = np.zeros((im_size_[0], im_size_[1], 3))
cls_labels = np.unique(sem_pred)
img_path = os.path.join(img_root_path[i], seq_path, 'rgb', im_name)
mask_path = os.path.join(img_root_path[i], seq_path, 'seg_mask_labeled_combi', im_name)
img = cv2.imread(img_path)
img_best_boxes = np.copy(img)
mask_gt = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED)
delta_xy = np.array([[int(img.shape[1] / 2 - int(im_size_[1] / 2))],
[int(img.shape[0] / 2 - int(im_size_[0] / 2))]])
# img_cls = np.copy(img)
img_all_boxes = np.copy(img)
img_IOU_seg = 0
img_IOU_count = 0
for cnt, label in enumerate(cls_labels):
if label == 0:
continue
seg_mask_vis[sem_pred == label] = colors_list[label]
mask_per_label = np.zeros_like(sem_pred)
mask_per_label_gt = np.zeros_like(sem_pred)
mask_per_label[sem_pred == label] = 1
mask_per_label_gt[mask_gt == label] = 1
if sum(map(sum, mask_per_label)) < IOU_seg_threshold:
continue
intersection = np.logical_and(mask_per_label_gt, mask_per_label)
union = np.logical_or(mask_per_label_gt, mask_per_label)
iou_score = np.sum(intersection) / np.sum(union)
img_IOU_seg += iou_score
img_IOU_count += 1
# # only for object detection, to draw axis aligned bounding boxes
# props = regionprops(mask_per_label)
# for prop in props:
# cv2.rectangle(img_cls, (prop.bbox[1], prop.bbox[0]), (prop.bbox[3], prop.bbox[2]),
# colors_list[label].tolist(), 2)
# cv2.putText(img_cls, cls_list[label], (prop.bbox[1], prop.bbox[0] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
# colors_list[label].tolist(), 1)
total_IOU_seg += img_IOU_seg/img_IOU_count
IOU_count += 1
ensure_dir(out_dir)
out_path = os.path.join(out_dir, im_name[:-4] + ".png")
img_mask = (img * 0.25 + seg_mask_vis * 0.75)
anno_per_class_dir = os.path.join(os.path.join(img_root_path[i], seq_path, 'Annotations_per_class',
im_name[:-4]))
for class_dir in os.listdir(anno_per_class_dir):
if not os.path.isdir(os.path.join(anno_per_class_dir, class_dir)):
continue
gt_boxes_path = os.path.join(anno_per_class_dir, class_dir,im_name[:-4] + '.txt')
gt_boxes = read_boxes_from_file(gt_boxes_path, delta_xy)
if len(gt_boxes) == 0:
#print('txt file empty')
continue
else:
total_boxes += 1
if bbx_pred is None:
continue
else:
(gt_boxes_, gt_theta, cls) = prepare_frcnn_format(gt_boxes,im_size_)
best_confidence = 0.
r_bbox__best = None
theta_best = None
cnt_best = None
for bbx_pred_i, cls_pred_i, obj_pred_i in zip(bbx_pred, cls_pred, obj_pred):
if obj_pred_i.item() > threshold:
pt1 = (int(bbx_pred_i[0]), int(bbx_pred_i[1]))
pt2 = (int(bbx_pred_i[2]), int(bbx_pred_i[3]))
cls = cls_pred_i.item()
theta = ((180 / num_classes_theta) * cls) + 5
pts = scipy.array([[pt1[0], pt1[1]], [pt2[0], pt1[1]], [pt2[0], pt2[1]], [pt1[0], pt2[1]]])
cnt = scipy.array([(int(bbx_pred_i[0]) + int(bbx_pred_i[2])) / 2,
(int(bbx_pred_i[1]) + int(bbx_pred_i[3])) / 2])
r_bbox_ = Rotate2D(pts, cnt, 90 - theta)
r_bbox_ = r_bbox_.astype('int16')
if (int(cnt[1]) >= im_size_[0]) or (int(cnt[0]) >= im_size_[1]):
continue
if sem_pred[int(cnt[1]), int(cnt[0])] == int(class_dir):
if obj_pred_i.item() >= best_confidence:
best_confidence = obj_pred_i.item()
r_bbox__best = r_bbox_
theta_best = theta
cnt_best = cnt
cv2.line(img_all_boxes, tuple(r_bbox_[0]), tuple(r_bbox_[1]), (255, 0, 0), 2)
cv2.line(img_all_boxes, tuple(r_bbox_[1]), tuple(r_bbox_[2]), (0, 0, 255), 2)
cv2.line(img_all_boxes, tuple(r_bbox_[2]), tuple(r_bbox_[3]), (255, 0, 0), 2)
cv2.line(img_all_boxes, tuple(r_bbox_[3]), tuple(r_bbox_[0]), (0, 0, 255), 2)
if r_bbox__best is not None:
img_best_boxes = cv2.circle(img_best_boxes, (int(cnt_best[0]), int(cnt_best[1])), radius=5, color=(0, 255, 0), thickness=-1)
cv2.line(img_best_boxes, tuple(r_bbox__best[0]), tuple(r_bbox__best[1]), (255, 0, 0), 3)
cv2.line(img_best_boxes, tuple(r_bbox__best[1]), tuple(r_bbox__best[2]), (0, 0, 255), 3)
cv2.line(img_best_boxes, tuple(r_bbox__best[2]), tuple(r_bbox__best[3]), (255, 0, 0), 3)
cv2.line(img_best_boxes, tuple(r_bbox__best[3]), tuple(r_bbox__best[0]), (0, 0, 255), 3)
ret_val = calc_jacc_IOU(gt_boxes_, gt_theta, r_bbox__best, theta_best)
correct_boxes = correct_boxes + ret_val
# visualization of results
res = np.hstack((img,img_all_boxes, img_best_boxes, img_mask))
scale_percent = 75 # % of original size
width = int(res.shape[1] * scale_percent / 100)
height = int(res.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(res, dim, interpolation=cv2.INTER_AREA)
cv2.imwrite(out_path, resized)
if IOU_count == 0:
return correct_boxes, total_boxes, 0
else:
total_IOU_seg_mean = total_IOU_seg / IOU_count
return correct_boxes, total_boxes, total_IOU_seg_mean
def log_debug(msg, *args, **kwargs):
if distributed.get_rank() == 0:
logging.get_logger().debug(msg, *args, **kwargs)
def log_info(msg, *args, **kwargs):
if distributed.get_rank() == 0:
logging.get_logger().info(msg, *args, **kwargs)
def make_config(args):
log_debug("Loading configuration from %s", args.config)
conf = load_config(args.config, args.config)
log_debug("\n%s", config_to_string(conf))
return conf
def make_dataloader(args, config, rank, world_size):
config = config["dataloader"]
log_debug("Creating dataloaders for dataset in %s", args.data)
# Validation dataloader
val_tf = OCIDTestTransform(config.getint("shortest_size"),
config.getint("longest_max_size"),
config.getstruct("rgb_mean"),
config.getstruct("rgb_std")
)
val_db = OCIDTestDataset(args.data, config["root_path"], config["test_set"], val_tf)
val_sampler = DistributedARBatchSampler(
val_db, config.getint("val_batch_size"), world_size, rank, False)
val_dl = data.DataLoader(val_db,
batch_sampler=val_sampler,
collate_fn=iss_collate_fn,
pin_memory=True,
num_workers=config.getint("num_workers"))
return val_dl
def make_model(config):
body_config = config["body"]
fpn_config = config["fpn"]
rpn_config = config["rpn"]
roi_config = config["roi"]
sem_config = config["sem"]
general_config = config["general"]
classes = {"total": int(general_config["num_things"]) + int(general_config["num_stuff"]), "stuff":
int(general_config["num_stuff"]), "thing": int(general_config["num_things"]),
"semantic": int(general_config["num_semantic"])}
# BN + activation
norm_act_static, norm_act_dynamic = norm_act_from_config(body_config)
# Create backbone
log_debug("Creating backbone model %s", body_config["body"])
body_fn = models.__dict__["net_" + body_config["body"]]
body_params = body_config.getstruct("body_params") if body_config.get("body_params") else {}
body = body_fn(norm_act=norm_act_static, **body_params)
if body_config.get("weights"):
body.load_state_dict(torch.load(body_config["weights"], map_location="cpu"))
# Freeze parameters
for n, m in body.named_modules():
for mod_id in range(1, body_config.getint("num_frozen") + 1):
if ("mod%d" % mod_id) in n:
freeze_params(m)
body_channels = body_config.getstruct("out_channels")
# Create FPN
fpn_inputs = fpn_config.getstruct("inputs")
fpn = FPN([body_channels[inp] for inp in fpn_inputs],
fpn_config.getint("out_channels"),
fpn_config.getint("extra_scales"),
norm_act_static,
fpn_config["interpolation"])
body = FPNBody(body, fpn, fpn_inputs)
# Create RPN
proposal_generator = ProposalGenerator(rpn_config.getfloat("nms_threshold"),
rpn_config.getint("num_pre_nms_train"),
rpn_config.getint("num_post_nms_train"),
rpn_config.getint("num_pre_nms_val"),
rpn_config.getint("num_post_nms_val"),
rpn_config.getint("min_size"))
anchor_matcher = AnchorMatcher(rpn_config.getint("num_samples"),
rpn_config.getfloat("pos_ratio"),
rpn_config.getfloat("pos_threshold"),
rpn_config.getfloat("neg_threshold"),
rpn_config.getfloat("void_threshold"))
rpn_loss = RPNLoss(rpn_config.getfloat("sigma"))
rpn_algo = RPNAlgoFPN(
proposal_generator, anchor_matcher, rpn_loss,
rpn_config.getint("anchor_scale"), rpn_config.getstruct("anchor_ratios"),
fpn_config.getstruct("out_strides"), rpn_config.getint("fpn_min_level"), rpn_config.getint("fpn_levels"))
rpn_head = RPNHead(
fpn_config.getint("out_channels"), len(rpn_config.getstruct("anchor_ratios")), 1,
rpn_config.getint("hidden_channels"), norm_act_dynamic)
# Create detection network
prediction_generator = PredictionGenerator(roi_config.getfloat("nms_threshold"),
roi_config.getfloat("score_threshold"),
roi_config.getint("max_predictions"))
proposal_matcher = ProposalMatcher(classes,
roi_config.getint("num_samples"),
roi_config.getfloat("pos_ratio"),
roi_config.getfloat("pos_threshold"),
roi_config.getfloat("neg_threshold_hi"),
roi_config.getfloat("neg_threshold_lo"),
roi_config.getfloat("void_threshold"))
roi_loss = DetectionLoss(roi_config.getfloat("sigma"))
roi_size = roi_config.getstruct("roi_size")
roi_algo = DetectionAlgoFPN(
prediction_generator, proposal_matcher, roi_loss, classes, roi_config.getstruct("bbx_reg_weights"),
roi_config.getint("fpn_canonical_scale"), roi_config.getint("fpn_canonical_level"), roi_size,
roi_config.getint("fpn_min_level"), roi_config.getint("fpn_levels"))
roi_head = FPNROIHead(fpn_config.getint("out_channels"), classes, roi_size, norm_act=norm_act_dynamic)
# Create semantic segmentation network
sem_loss = SemanticSegLoss(ohem=sem_config.getfloat("ohem"))
sem_algo = SemanticSegAlgo(sem_loss, classes["semantic"])
sem_head = FPNSemanticHeadDeeplab(fpn_config.getint("out_channels"),
sem_config.getint("fpn_min_level"),
sem_config.getint("fpn_levels"),
classes["semantic"],
pooling_size=sem_config.getstruct("pooling_size"),
norm_act=norm_act_static)
# Create final network
return DetSegNet(body, rpn_head, roi_head, sem_head, rpn_algo, roi_algo, sem_algo, classes)
def make_optimizer(config, model, epoch_length):
body_config = config["body"]
opt_config = config["optimizer"]
sch_config = config["scheduler"]
# Gather parameters from the network
norm_parameters = []
other_parameters = []
for m in model.modules():
if any(isinstance(m, layer) for layer in NORM_LAYERS):
norm_parameters += [p for p in m.parameters() if p.requires_grad]
elif any(isinstance(m, layer) for layer in OTHER_LAYERS):
other_parameters += [p for p in m.parameters() if p.requires_grad]
assert len(norm_parameters) + len(other_parameters) == len([p for p in model.parameters() if p.requires_grad]), \
"Not all parameters that require grad are accounted for in the optimizer"
# Set-up optimizer hyper-parameters
parameters = [
{
"params": norm_parameters,
"lr": opt_config.getfloat("lr") if not body_config.getboolean("bn_frozen") else 0.,
"weight_decay": opt_config.getfloat("weight_decay") if opt_config.getboolean("weight_decay_norm") else 0.
},
{
"params": other_parameters,
"lr": opt_config.getfloat("lr"),
"weight_decay": opt_config.getfloat("weight_decay")
}
]
optimizer = optim.SGD(
parameters, momentum=opt_config.getfloat("momentum"), nesterov=opt_config.getboolean("nesterov"))
scheduler = scheduler_from_config(sch_config, optimizer, epoch_length)
assert sch_config["update_mode"] in ("batch", "epoch")
batch_update = sch_config["update_mode"] == "batch"
total_epochs = sch_config.getint("epochs")
return optimizer, scheduler, batch_update, total_epochs
def evaluate(model, dataloader, **varargs):
model.eval()
dataloader.batch_sampler.set_epoch(0)
all_boxes = 0
all_correct = 0
total_IOU_seg = 0
for it, batch in enumerate(dataloader):
print('Batch no. : ' + str(it))
with torch.no_grad():
# Extract data
img = batch["img"].cuda(device=varargs["device"], non_blocking=True)
abs_paths = batch["abs_path"]
root_paths = batch["root_path"]
im_size = batch["im_size"]
img_idx | |
is_FreeModule(gens):
gens = gens.gens()
return FreeModule_submodule_field(self.ambient_vector_space(), gens, check=check)
def vector_space_span_of_basis(self, basis, check=True):
"""
Create the vector subspace of the ambient vector space with given
basis.
INPUT:
- ``basis`` - a list of linearly independent vectors
- ``check`` - whether or not to verify that each gen
is in the ambient vector space
OUTPUT: a vector subspace with user-specified basis
EXAMPLES::
sage: V = VectorSpace(QQ, 3)
sage: B = V.basis()
sage: W = V.vector_space_span_of_basis([B[0]+B[1], 2*B[1]-B[2]])
sage: W
Vector space of degree 3 and dimension 2 over Rational Field
User basis matrix:
[ 1 1 0]
[ 0 2 -1]
"""
return FreeModule_submodule_with_basis_field(self.ambient_vector_space(), basis, check=check)
def quotient(self, sub, check=True):
"""
Return the quotient of self by the given submodule sub.
INPUT:
- ``sub`` - a submodule of self, or something that can
be turned into one via self.submodule(sub).
- ``check`` - (default: True) whether or not to check
that sub is a submodule.
EXAMPLES::
sage: A = ZZ^3; V = A.span([[1,2,3], [4,5,6]])
sage: Q = V.quotient( [V.0 + V.1] ); Q
Finitely generated module V/W over Integer Ring with invariants (0)
"""
# Calling is_subspace may be way too slow and repeat work done below.
# It will be very desirable to somehow do this step better.
if check and (not is_FreeModule(sub) or not sub.is_submodule(self)):
try:
sub = self.submodule(sub)
except (TypeError, ArithmeticError):
raise ArithmeticError, "sub must be a subspace of self"
if self.base_ring() == sage.rings.integer_ring.ZZ:
from fg_pid.fgp_module import FGP_Module
return FGP_Module(self, sub, check=False)
else:
raise NotImplementedError, "quotients of modules over rings other than fields or ZZ is not fully implemented"
def __div__(self, sub, check=True):
"""
Return the quotient of self by the given submodule sub.
This just calls self.quotient(sub, check).
EXAMPLES::
sage: V1 = ZZ^2; W1 = V1.span([[1,2],[3,4]])
sage: V1/W1
Finitely generated module V/W over Integer Ring with invariants (2)
sage: V2 = span([[1/2,1,1],[3/2,2,1],[0,0,1]],ZZ); W2 = V2.span([2*V2.0+4*V2.1, 9*V2.0+12*V2.1, 4*V2.2])
sage: V2/W2
Finitely generated module V/W over Integer Ring with invariants (4, 12)
"""
return self.quotient(sub, check)
class FreeModule_generic_field(FreeModule_generic_pid):
"""
Base class for all free modules over fields.
"""
def __init__(self, base_field, dimension, degree, sparse=False):
"""
Creates a vector space over a field.
EXAMPLES::
sage: FreeModule(QQ, 2)
Vector space of dimension 2 over Rational Field
sage: FreeModule(FiniteField(2), 7)
Vector space of dimension 7 over Finite Field of size 2
We test that the issue at Trac #11166 is solved::
sage: from sage.modules.free_module import FreeModule_generic_field
sage: FreeModule_generic_field(QQ, 5, 5)
<class 'sage.modules.free_module.FreeModule_generic_field_with_category'>
"""
if not isinstance(base_field, field.Field):
raise TypeError, "The base_field (=%s) must be a field"%base_field
FreeModule_generic_pid.__init__(self, base_field, dimension, degree, sparse=sparse)
def _Hom_(self, Y, category):
r"""
Returns a homspace whose morphisms have this vector space as domain.
This is called by the general methods such as
:meth:`sage.structure.parent.Parent.Hom` and
:meth:`sage.structure.parent_base.ParentWithBase.Hom`.
INPUT:
- ``Y`` - a free module (or vector space) that will
be the codomain of the morphisms in returned homspace
- ``category`` - the category for the homspace
OUTPUT:
If ``Y`` is a free module over a field, in other words, a vector space,
then this returns a space of homomorphisms between vector spaces,
in other words a space of linear transformations.
If ``Y`` is a free module that is not a vector space, then
the returned space contains homomorphisms between free modules.
EXAMPLES::
sage: V = QQ^2
sage: W = QQ^3
sage: H = V._Hom_(W, category=None)
sage: type(H)
<class 'sage.modules.vector_space_homspace.VectorSpaceHomspace_with_category'>
sage: H
Set of Morphisms (Linear Transformations) from Vector space of dimension 2 over Rational Field to Vector space of dimension 3 over Rational Field
sage: V = QQ^2
sage: W = ZZ^3
sage: H = V._Hom_(W, category=None)
sage: type(H)
<class 'sage.modules.free_module_homspace.FreeModuleHomspace_with_category'>
sage: H
Set of Morphisms from Vector space of dimension 2 over Rational Field to Ambient free module of rank 3 over the principal ideal domain Integer Ring in Category of vector spaces over Rational Field
"""
if Y.base_ring().is_field():
import vector_space_homspace
return vector_space_homspace.VectorSpaceHomspace(self, Y, category)
import free_module_homspace
return free_module_homspace.FreeModuleHomspace(self, Y, category)
def scale(self, other):
"""
Return the product of self by the number other, which is the module
spanned by other times each basis vector. Since self is a vector
space this product equals self if other is nonzero, and is the zero
vector space if other is 0.
EXAMPLES::
sage: V = QQ^4
sage: V.scale(5)
Vector space of dimension 4 over Rational Field
sage: V.scale(0)
Vector space of degree 4 and dimension 0 over Rational Field
Basis matrix:
[]
::
sage: W = V.span([[1,1,1,1]])
sage: W.scale(2)
Vector space of degree 4 and dimension 1 over Rational Field
Basis matrix:
[1 1 1 1]
sage: W.scale(0)
Vector space of degree 4 and dimension 0 over Rational Field
Basis matrix:
[]
::
sage: V = QQ^4; V
Vector space of dimension 4 over Rational Field
sage: V.scale(3)
Vector space of dimension 4 over Rational Field
sage: V.scale(0)
Vector space of degree 4 and dimension 0 over Rational Field
Basis matrix:
[]
"""
if other == 0:
return self.zero_submodule()
return self
def __add__(self, other):
"""
Return the sum of self and other.
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: V0 = V.span([V.gen(0)])
sage: V2 = V.span([V.gen(2)])
sage: V0 + V2
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[1 0 0]
[0 0 1]
sage: QQ^3 + 0
Vector space of dimension 3 over Rational Field
"""
if not isinstance(other, FreeModule_generic_field):
if other == 0:
return self
raise TypeError, "other must be a Vector Space"
V = self.ambient_vector_space()
if V != other.ambient_vector_space():
raise ArithmeticError, "self and other must have the same ambient space"
return V.span(self.basis() + other.basis())
def echelonized_basis_matrix(self):
"""
Return basis matrix for self in row echelon form.
EXAMPLES::
sage: V = FreeModule(QQ, 3).span_of_basis([[1,2,3],[4,5,6]])
sage: V.basis_matrix()
[1 2 3]
[4 5 6]
sage: V.echelonized_basis_matrix()
[ 1 0 -1]
[ 0 1 2]
"""
try:
return self.__echelonized_basis_matrix
except AttributeError:
pass
self.__echelonized_basis_matrix = self.basis_matrix().echelon_form()
return self.__echelonized_basis_matrix
def intersection(self, other):
"""
Return the intersection of self and other, which must be
R-submodules of a common ambient vector space.
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: W1 = V.submodule([V.gen(0), V.gen(0) + V.gen(1)])
sage: W2 = V.submodule([V.gen(1), V.gen(2)])
sage: W1.intersection(W2)
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[0 1 0]
sage: W2.intersection(W1)
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[0 1 0]
sage: V.intersection(W1)
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[1 0 0]
[0 1 0]
sage: W1.intersection(V)
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[1 0 0]
[0 1 0]
sage: Z = V.submodule([])
sage: W1.intersection(Z)
Vector space of degree 3 and dimension 0 over Rational Field
Basis matrix:
[]
"""
if not isinstance(other, FreeModule_generic):
raise TypeError, "other must be a free module"
if self.ambient_vector_space() != other.ambient_vector_space():
raise ArithmeticError, "self and other must have the same ambient space."
if self.rank() == 0 or other.rank() == 0:
if self.base_ring().is_field():
return other.zero_submodule()
else:
return self.zero_submodule()
if self.base_ring() != other.base_ring():
# Now other is over a ring R whose fraction field K is the base field of V = self.
# We compute the intersection using the following algorithm:
# 1. By explicitly computing the nullspace of the matrix whose rows
# are a basis for self, we obtain the matrix over a linear map
# phi: K^n ----> W
# with kernel equal to V = self.
# 2. Compute the kernel over R of Phi restricted to other. Do this
# by clearing denominators, computing the kernel of a matrix with
# entries in R, then restoring denominators to the answer.
K = self.base_ring()
R = other.base_ring()
B = self.basis_matrix().transpose()
W = B.kernel()
phi = W.basis_matrix().transpose()
# To restrict phi to other, we multiply the basis matrix for other
# by phi, thus computing the image of each basis vector.
X | |
0xA4A7A4,
"Stepping Stones": 0xB2A18C,
"Sterling": 0xD1D4D1,
"Sterling Blue": 0xA2B9C2,
"Sterling Shadow": 0xE9EBDE,
"Sterling Silver": 0x9EAFC2,
"Stetson": 0x9E7A58,
"Steveareno Beige": 0xC5B5A4,
"Sticks & Stones": 0xBAA482,
"Sticky Black Tarmac": 0x112111,
"Sticky Toffee": 0xCC8149,
"Stieglitz Silver": 0x8D8F8E,
"Stil De Grain Yellow": 0xFADB5E,
"Stiletto": 0x323235,
"Stiletto Love": 0xB6453E,
"Still": 0xADAF9C,
"Still Fuchsia": 0xC154C0,
"Still Grey": 0xABA9A0,
"Still Moment": 0xCBC4B2,
"Still Morning": 0xFFF8E1,
"Still Water": 0x4A5D5F,
"Stillwater": 0x70A4B0,
"Stillwater Lake": 0xC2D0DF,
"Stilted Stalks": 0xA29A6A,
"Stinging Nettle": 0x495D39,
"Stinging Wasabi": 0xAEFD6C,
"Stingray Grey": 0xB0ABA3,
"Stinkhorn": 0x2A545C,
"Stirland Battlemire": 0xAE5A2C,
"Stirland Mud": 0x492B00,
"Stirring Orange": 0xF6B064,
"Stizza": 0x900910,
"Stock Horse": 0x806852,
"Stockade Green": 0x104F4A,
"Stocking White": 0xE9E5D8,
"Stockleaf": 0x647B72,
"Stoic White": 0xE0E0FF,
"Stolen Kiss": 0xEFDCD3,
"Stomy Shower": 0x0088B0,
"Stone": 0xADA587,
"Stone Blue": 0x829CA5,
"Stone Bridge": 0x52706C,
"Stone Brown": 0xB79983,
"Stone Craft": 0x7D867C,
"Stone Creek": 0x8F9183,
"Stone Cypress Green": 0x5F7D6C,
"Stone Fence": 0x929C9C,
"Stone Fruit": 0xF2A28C,
"Stone Golem": 0xC2CBD2,
"Stone Green": 0x658E67,
"Stone Grey": 0x9F9484,
"Stone Guardians": 0xCABA97,
"Stone Harbour": 0xE8E0D8,
"Stone Hearth": 0x636869,
"Stone Lion": 0xB3A491,
"Stone Mason": 0x7A7B75,
"Stone Mill": 0xB6B7AD,
"Stone Path": 0xE4EFE5,
"Stone Pillar": 0xEFE5D4,
"Stone Pine": 0x665C46,
"Stone Quarry": 0xECE4DC,
"Stone Silver": 0x8BA8AE,
"Stone Terrace": 0xA09484,
"Stone Violet": 0x4D404F,
"Stone Walkway": 0xB5B09E,
"Stone Wall": 0xEFE1D8,
"Stone Walls": 0xAFA791,
"Stone Wash": 0xE5D4C0,
"Stone's Throw": 0x605C58,
"Stonebread": 0xDDCEA7,
"Stonebriar": 0xCBA97E,
"Stonecrop": 0xA08F6F,
"Stonegate": 0x99917E,
"Stonehenge Greige": 0xA79D8D,
"Stonelake": 0xBAB1A3,
"Stonetalon Mountains": 0x8D7A4D,
"Stonewall": 0x807661,
"Stonewall Grey": 0xC1C1C1,
"Stonewash": 0x74809A,
"Stonewashed": 0xDDD7C5,
"Stonewashed Brown": 0xDCCCC0,
"Stonewashed Pink": 0xF4EEE4,
"Stonish Beige": 0xCCB49A,
"Stony Creek": 0x948F82,
"Stony Field": 0x615547,
"Stop": 0xC33A36,
"Storksbill": 0xE5E1DD,
"Storksbill White": 0xF2F2E2,
"Storm": 0x444400,
"Storm Blue": 0x507B9C,
"Storm Break": 0x938988,
"Storm Cloud": 0x808283,
"Storm Dust": 0x65645F,
"Storm Front": 0x787376,
"Storm Green": 0x113333,
"Storm Grey": 0x717486,
"Storm Lightning": 0xF9E69C,
"Storm Petrel": 0x7F95A5,
"Storm Red": 0xA28A88,
"Storm Warning": 0x696863,
"Storm's Coming": 0xCFC9BC,
"Stormeye": 0xE7B57F,
"Stormfang": 0x80A7C1,
"Stormhost Silver": 0xBBC6C9,
"Storms Mountain": 0x8D9390,
"Stormvermin Fur": 0x5C5954,
"Stormy": 0xB0BCC3,
"Stormy Bay": 0x9AAFAF,
"Stormy Grey": 0x7D7B7C,
"Stormy Horizon": 0x777799,
"Stormy Mauve": 0x71738C,
"Stormy Oceans": 0x70818E,
"Stormy Pink": 0xE3B5AD,
"Stormy Ridge": 0x507B9A,
"Stormy Sea": 0x6E8082,
"Stormy Strait Green": 0x0F9B8E,
"Stormy Strait Grey": 0x6B8BA4,
"Stormy Sunrise": 0xC8A2C8,
"Stormy Weather": 0x58646D,
"Stout": 0x0F0B0A,
"Stowaway": 0x7B8393,
"Straightforward Green": 0x52A550,
"Straken Green": 0x628026,
"Stranglethorn Ochre": 0xDBB060,
"Stratford Blue": 0x528A9A,
"Stratford Sage": 0x8C8670,
"Stratos": 0x000741,
"Stratos Blue": 0x3799C8,
"Stratosphere": 0x9EC1CC,
"Stratus": 0x8193AA,
"Stravinsky": 0x996E74,
"Stravinsky Pink": 0x77515A,
"Straw": 0xE4D96F,
"Straw Basket": 0xD9C69A,
"Straw Gold": 0xFCF679,
"Straw Harvest": 0xDBC8A2,
"Straw Hat": 0xF0D5A8,
"Straw Hut": 0xBDB268,
"Straw Yellow": 0xF0D696,
"Strawberry": 0xFB2943,
"Strawberry Blonde": 0xFFDADC,
"Strawberry Confection": 0xF4BFC6,
"Strawberry Cough": 0x990011,
"Strawberry Cream": 0xF4C3C4,
"Strawberry Daiquiri": 0xA23D50,
"Strawberry Dreams": 0xFF88AA,
"Strawberry Dust": 0xFFF0EA,
"Strawberry Frappe": 0xFFA2AA,
"Strawberry Freeze": 0xC677A8,
"Strawberry Frosting": 0xFF6FFC,
"Strawberry Glaze": 0xDAB7BE,
"Strawberry Ice": 0xE78B90,
"Strawberry Jam": 0x86423E,
"Strawberry Jubilee": 0xC08591,
"Strawberry Milkshake Red": 0xD47186,
"Strawberry Mousse": 0xA5647E,
"Strawberry Pink": 0xF57F8E,
"Strawberry Pop": 0xEE2255,
"Strawberry Rhubarb": 0xB96364,
"Strawberry Rose": 0xE29991,
"Strawberry Shortcake": 0xFA8E99,
"Strawberry Smash": 0xEE0055,
"Strawberry Smoothie": 0xE79EA6,
"Strawberry Soap": 0xF7879A,
"Strawberry Spinach Red": 0xFA4224,
"Strawberry Surprise": 0xB9758D,
"Strawberry Whip": 0xF9D7CD,
"Strawberry Wine": 0xCB6A6B,
"Strawberry Yogurt": 0xE9B3B4,
"Strawflower": 0xDDBDBA,
"Stream": 0x495E7B,
"Streetwise": 0xD8E2DF,
"Stretch Limo": 0x2B2C30,
"Streusel Cake": 0xD7AA60,
"Strike a Pose": 0x5A4659,
"Strike It Rich": 0xD7B55F,
"Strikemaster": 0x946A81,
"Striking": 0x00667B,
"Striking Purple": 0x944E87,
"Striking Red": 0xC03543,
"String": 0xAA9F96,
"String Ball": 0xF1E8D8,
"String Cheese": 0xFBF1DD,
"String Deep": 0x7F7860,
"String of Pearls": 0xEBE3D8,
"Stromboli": 0x406356,
"Strong Blue": 0x0C06F7,
"Strong Cerise": 0x960056,
"Strong Envy": 0x782E2C,
"Strong Iris": 0x5E5F7E,
"Strong Mocha": 0x6F372D,
"Strong Mustard": 0xA88905,
"Strong Olive": 0x646756,
"Strong Pink": 0xFF0789,
"Strong Sage": 0x2B6460,
"Strong Strawberry": 0x8A3E34,
"Strong Tone Wash": 0x454129,
"Strong Winds": 0xA3A59B,
"Stroopwafel": 0xA86F48,
"Struck by Lightning": 0xF0E1E8,
"Structural Blue": 0x0E9BD1,
"Stucco": 0xA58D7F,
"Stucco Tan": 0xE8DECE,
"Stucco Wall": 0xF1B19D,
"Stucco White": 0xE2D3B9,
"Studer Blue": 0x005577,
"Studio": 0x724AA1,
"Studio Beige": 0xC1B2A1,
"Studio Blue Green": 0x6D817B,
"Studio Clay": 0xD9CCB8,
"Studio Cream": 0xEBDBAA,
"Studio Mauve": 0xC6B9B8,
"Studio Taupe": 0xA59789,
"Studio White": 0xE8DCD5,
"Stuffed Olive": 0xADAC7C,
"Stuffing": 0xBF9B84,
"Stump Green": 0x5E5F4D,
"Stunning Gold": 0xDA9A5D,
"Stunning Sapphire": 0x185887,
"Stunning Shade": 0x676064,
"Sturdy Brown": 0x9B856F,
"Sturgis Grey": 0x57544D,
"Stylish": 0xCEC1A5,
"Su-Nezumi Grey": 0x9FA0A0,
"Suave Grey": 0xD1D8DD,
"Subaqueous": 0x00576F,
"Subdue Red": 0xCCB8B3,
"Subdued Hue": 0xC6B1AD,
"Subdued Sienna": 0xCC896C,
"Sublime": 0xECEDE0,
"Submarine": 0x7A7778,
"Submarine Base": 0x5566AA,
"Submarine Grey": 0x4D585C,
"Submerged": 0x4A7D82,
"Submersible": 0x00576E,
"Subpoena": 0xD8CCC6,
"Subterranean River": 0x1F3B4D,
"Subtle Blue": 0xD9E3E5,
"Subtle Green": 0xB5CBBB,
"Subtle Night Sky": 0x554B4F,
"Subtle Shadow": 0xD8D8D0,
"Subtle Suede": 0xD0BD94,
"Subtle Sunshine": 0xE4D89A,
"Subtle Touch": 0xDBDBD9,
"Subtle Turquoise": 0x7A9693,
"Subtle Violet": 0xB29E9E,
"Subway": 0x87857C,
"Succinct Violet": 0x513B6E,
"Succubus": 0x990022,
"Succulent": 0xDCDD65,
"Succulent Garden": 0xBCCBB2,
"Succulent Green": 0x5E9B86,
"Succulent Leaves": 0x658E64,
"Succulents": 0x007744,
"Such Melodrama": 0xC6C1C5,
"Sudan Brown": 0xAC6B29,
"Sudden Sapphire": 0x6376A9,
"Suddenly Sapphire": 0x1A5897,
"Suds": 0xA6B4C5,
"Suede Beige": 0xD9C7B9,
"Suede Grey": 0x857F7A,
"Suede Indigo": 0x585D6D,
"Suede Leather": 0x896757,
"Suede Vest": 0xD79043,
"Suffragette Yellow": 0xECD0A1,
"Sugar Almond": 0x935529,
"Sugar Beet": 0x834253,
"Sugar Berry": 0xE3D4CD,
"Sugar Cane": 0xEEEFDF,
"Sugar Cane Dahlia": 0xF7C2BF,
"Sugar Chic": 0xFFCCFF,
"Sugar Coated Almond": 0xBB6611,
"Sugar Cookie": 0xF2E2A4,
"Sugar Coral": 0xF56C73,
"Sugar Crystal": 0xF8F4FF,
"Sugar Dust": 0xF9EDE3,
"Sugar Glaze": 0xFFF0E1,
"Sugar Glazed Cashew": 0xCC9955,
"Sugar Grape": 0x9437FF,
"Sugar Honey Cashew": 0xDDAA66,
"Sugar Maple": 0x9C7647,
"Sugar Mint": 0xC0E2C5,
"Sugar Pie": 0xC7A77B,
"Sugar Pine": 0x73776E,
"Sugar Plum": 0x914E75,
"Sugar Pool": 0xAED6D4,
"Sugar Poppy": 0xE58281,
"Sugar Quill": 0xEBE5D7,
"Sugar Rush Peach Pepper": 0xCFB599,
"Sugar Shack": 0xEED5B6,
"Sugar Soap": 0xEFE8DC,
"Sugar Sweet": 0xECC4DC,
"Sugar Swizzle": 0xF3EEE7,
"Sugar Tooth": 0xD68F9F,
"Sugar Tree": 0xA2999A,
"Sugar-Candied Peanuts": 0x8B2E16,
"Sugared Almond": 0xB49D7B,
"Sugared Peach": 0xFDDCC6,
"Sugared Pears": 0xEBD5B7,
"Sugarloaf Brown": 0x554400,
"Sugarpills": 0xFFDDFF,
"Sugilite": 0xA2999F,
"Suit Blue": 0x2B3036,
"Suitable Brown": 0x645A4B,
"Sulfur Pit": 0xE5CC69,
"Sulfur Yellow": 0xDBC058,
"Sulfuric Yellow": 0xA79F5C,
"Sullen Gold": 0xA58B34,
"Sullivan's Heart": 0xF7C5D1,
"Sulphur": 0xDDB614,
"Sulphur Spring": 0xD5D717,
"Sulphur Water": 0xF2F3CF,
"Sulphur Yellow": 0xCCC050,
"Sultan Sand": 0xE3C9BE,
"Sultan's Silk": 0x134558,
"Sultana": 0x674668,
"Sultry Castle": 0x948D84,
"Sultry Sea": 0x506770,
"Sultry Smoke": 0x73696F,
"Sultry Spell": 0x716563,
"Sulu": 0xC6EA80,
"Sumac dyed": 0xE08A1E,
"Sumatra": 0xF6E8CC,
"Sumatra Chicken": 0x4F666A,
"Sumi Ink": 0x595857,
"Sumire Violet": 0x7058A3,
"Summer Air": 0x3FAFCF,
"Summer Beige": 0xDBC2B9,
"Summer Birthday": 0xBBD5EF,
"Summer Bliss": 0xFCF1CF,
"Summer Bloom": 0xD1BEB4,
"Summer Blue": 0x1880A1,
"Summer Blush": 0xF6DFD6,
"Summer Breeze": 0xD3E5DB,
"Summer Citrus": 0xF8822A,
"Summer Cloud": 0xBBFFEE,
"Summer Clover": 0xE5CFDE,
"Summer Concrete": 0x57595D,
"Summer Cosmos": 0xFAD1E0,
"Summer Crush": 0xF2D6DA,
"Summer Daffodil": 0xFFE078,
"Summer Day": 0xEAAA62,
"Summer Dragonfly": 0x83ADA3,
"Summer Field": 0xE2C278,
"Summer Fig": 0xBE4B3B,
"Summer Forest Green": 0x228B22,
"Summer Garden": 0x7AAC80,
"Summer Glow": 0xEEAA44,
"Summer Green": 0x8FB69C,
"Summer Harvest": 0xFFE69A,
"Summer Heat": 0xAA5939,
"Summer Hill": 0xC1A58D,
"Summer House": 0xC8EFE2,
"Summer Hue": 0xFFEFC2,
"Summer in the City": 0xCDA168,
"Summer Jasmine": 0xEEEBD6,
"Summer Lake": 0x0077A7,
"Summer Lily": 0xF8D374,
"Summer Melon": 0xEAD3AE,
"Summer Memory": 0xDF856E,
"Summer Mist": 0xCBEAEE,
"Summer Moon": 0xFDEDCF,
"Summer Night": 0x36576A,
"Summer Orange": 0xFFB653,
"Summer Pear": 0xF5F0D1,
"Summer Rain": 0xE1E8DB,
"Summer Resort": 0xF7EFBA,
"Summer Sandcastle": 0xECE4CE,
"Summer Sea": 0x66A9B1,
"Summer Shade": 0xD1D9D7,
"Summer Shower": 0xE5EBE3,
"Summer Sky": 0x38B0DE,
"Summer Soft Blue": 0x94D3D1,
"Summer Solstice": 0xDED1A3,
"Summer Storm": 0xB0C5DF,
"Summer Sun": 0xFFDC00,
"Summer Sunset": 0xD88167,
"Summer Sunshine": 0xF7E8C7,
"Summer Turquoise": 0x008572,
"Summer Turquoise Blue": 0x4B9CAB,
"Summer Waters": 0x215399,
"Summer Weasel": 0xBB8E55,
"Summer White": 0xF4E9D6,
"Summer's End": 0xDC9367,
"Summer's Eve": 0xA97069,
"Summer's Heat": 0xF9E699,
"Summerday Blue": 0x376698,
"Summertime": 0xF2D178,
"Summertown": 0x8CBC9E,
"Summerville Brown": 0x997651,
"Summerwood": 0xD4B28B,
"Summit": 0x8BB6B8,
"Summit Gray": 0x959491,
"Sumptuous Peach": 0xE5B99B,
"Sun": 0xEF8E38,
"Sun Baked": 0xD27F63,
"Sun Baked Earth": 0xA36658,
"Sun Bleached Mint": 0xE3EFE1,
"Sun Bleached Ochre": 0xE3AB7B,
"Sun Bleached Pink": 0xFADADD,
"Sun City": 0xFFFED9,
"Sun Crete": 0xFF8C00,
"Sun Dance": 0xC4AA4D,
"Sun Deck": 0xF0DCA0,
"Sun Dial": 0xC79B36,
"Sun Drenched": 0xFFE7A3,
"Sun Dried": 0xEABD5B,
"Sun Dried Tomato": 0x752329,
"Sun Drops": 0xEAAF11,
"Sun Dust": 0xF6E0A4,
"Sun Glare": 0xF1F4D1,
"Sun Glint": 0xFAF3D9,
"Sun God": 0xDFBA5A,
"Sun Kiss": 0xEBD1BB,
"Sun Kissed": 0xFFEEC2,
"Sun Orange": 0xF48037,
"Sun Ray": 0xFFB219,
"Sun Salutation": 0xE7C26F,
"Sun Shower": 0xFFDE73,
"Sun Song": 0xE9AD17,
"Sun Splashed": 0xFBD795,
"Sun Surprise": 0xFFF2A0,
"Sun Touched": 0xFAD675,
"Sun Valley": 0x698538,
"Sun Wukong's Crown": 0xECC033,
"Sun Yellow": 0xFFDF22,
"Sun-Kissed Brick": 0xB75E41,
"Sun's Glory": 0xF6F2E5,
"Sun's Rage": 0xA94E37,
"Suna White": 0xDCD3B2,
"Sunbaked Adobe": 0xAB9A6E,
"Sunbeam": 0xF5EDB2,
"Sunbeam Yellow": 0xF0D39D,
"Sunblast Yellow": 0xFEFF0F,
"Sunbleached": 0xE5E0D7,
"Sunbound": 0xF9D964,
"Sunburn": 0xB37256,
"Sunburnt Cyclops": 0xFF404C,
"Sunburnt Toes": 0xD79584,
"Sunburst": | |
<gh_stars>0
import os
import sys
from configobj import ConfigObj, ConfigObjError
from logging import (getLevelName, getLogger,
FileHandler, StreamHandler, Formatter)
from optparse import OptionParser, SUPPRESS_HELP
from landscape import VERSION
from landscape.lib.persist import Persist
from landscape.upgraders import UPGRADE_MANAGERS
def init_logging(configuration, program_name):
"""Given a basic configuration, set up logging."""
handlers = []
if not os.path.exists(configuration.log_dir):
os.makedirs(configuration.log_dir)
log_filename = os.path.join(configuration.log_dir, program_name + ".log")
handlers.append(FileHandler(log_filename))
if not configuration.quiet:
handlers.append(StreamHandler(sys.stdout))
getLogger().setLevel(getLevelName(configuration.log_level.upper()))
for handler in handlers:
getLogger().addHandler(handler)
format = ("%(asctime)s %(levelname)-8s [%(threadName)-10s] "
"%(message)s")
handler.setFormatter(Formatter(format))
class ConfigSpecOptionParser(OptionParser):
_config_spec_definitions = {}
def __init__(self, unsaved_options=None):
OptionParser.__init__(self, unsaved_options)
def add_option(self, *args, **kwargs):
option = OptionParser.add_option(self, *args, **kwargs)
print dir(option)
print option.get_opt_string()
return option
class BaseConfiguration(object):
"""Base class for configuration implementations.
@cvar required_options: Optionally, a sequence of key names to require when
reading or writing a configuration.
@cvar unsaved_options: Optionally, a sequence of key names to never write
to the configuration file. This is useful when you want to provide
command-line options that should never end up in a configuration file.
@cvar default_config_filenames: A sequence of filenames to check when
reading or writing a configuration.
Default values for supported options are set as in L{make_parser}.
"""
required_options = ()
unsaved_options = ()
default_config_filenames = ["/etc/landscape/client.conf"]
if (os.path.dirname(os.path.abspath(sys.argv[0]))
== os.path.abspath("scripts")):
default_config_filenames.insert(0, "landscape-client.conf")
default_config_filenames = tuple(default_config_filenames)
config_section = "client"
def __init__(self):
self._set_options = {}
self._command_line_args = []
self._command_line_options = {}
self._config_filename = None
self._config_file_options = {}
self._parser = self.make_parser()
self._command_line_defaults = self._parser.defaults.copy()
# We don't want them mixed with explicitly given options,
# otherwise we can't define the precedence properly.
self._parser.defaults.clear()
def __getattr__(self, name):
"""Find and return the value of the given configuration parameter.
The following sources will be searched:
- The attributes that were explicitly set on this object,
- The parameters specified on the command line,
- The parameters specified in the configuration file, and
- The defaults.
If no values are found and the parameter does exist as a possible
parameter, C{None} is returned.
Otherwise C{AttributeError} is raised.
"""
for options in [self._set_options,
self._command_line_options,
self._config_file_options,
self._command_line_defaults]:
if name in options:
value = options[name]
break
else:
if self._parser.has_option("--" + name.replace("_", "-")):
value = None
else:
raise AttributeError(name)
if isinstance(value, basestring):
option = self._parser.get_option("--" + name.replace("_", "-"))
if option is not None:
value = option.convert_value(None, value)
return value
def clone(self):
"""
Return a new configuration object, with the same settings as this one.
"""
config = self.__class__()
config._set_options = self._set_options.copy()
config._command_line_options = self._command_line_options.copy()
config._config_filename = self._config_filename
config._config_file_options = self._config_file_options.copy()
return config
def get(self, name, default=None):
"""Return the value of the C{name} option or C{default}."""
try:
return self.__getattr__(name)
except AttributeError:
return default
def __setattr__(self, name, value):
"""Set a configuration parameter.
If the name begins with C{_}, it will only be set on this object and
not stored in the configuration file.
"""
if name.startswith("_"):
super(BaseConfiguration, self).__setattr__(name, value)
else:
self._set_options[name] = value
def reload(self):
"""Reload options using the configured command line arguments.
@see: L{load_command_line}
"""
self.load(self._command_line_args)
def load(self, args, accept_nonexistent_default_config=False):
"""
Load configuration data from command line arguments and a config file.
@param accept_nonexistent_default_config: If True, don't complain if
default configuration files aren't found
@raise: A SystemExit if the arguments are bad.
"""
self.load_command_line(args)
if self.config:
config_filenames = [self.config]
allow_missing = False
else:
config_filenames = self.default_config_filenames
allow_missing = accept_nonexistent_default_config
# Parse configuration file, if found.
for config_filename in config_filenames:
if (os.path.isfile(config_filename)
and os.access(config_filename, os.R_OK)):
self.load_configuration_file(config_filename)
break
else:
if not allow_missing:
if len(config_filenames) == 1:
message = (
"error: config file %s can't be read" %
config_filenames[0])
else:
message = "error: no config file could be read"
sys.exit(message)
self._load_external_options()
# Check that all needed options were given.
for option in self.required_options:
if not getattr(self, option):
sys.exit("error: must specify --%s "
"or the '%s' directive in the config file."
% (option.replace('_', '-'), option))
def _load_external_options(self):
"""Hook for loading options from elsewhere (e.g. for --import)."""
def load_command_line(self, args):
"""Load configuration data from the given command line."""
self._command_line_args = args
values = self._parser.parse_args(args)[0]
self._command_line_options = vars(values)
def load_configuration_file(self, filename):
"""Load configuration data from the given file name.
If any data has already been set on this configuration object,
then the old data will take precedence.
"""
self._config_filename = filename
config_obj = self._get_config_object()
try:
self._config_file_options = config_obj[self.config_section]
except KeyError:
pass
def _get_config_object(self, alternative_config=None):
"""Create a L{ConfigObj} consistent with our preferences.
@param config_source: Optional readable source to read from instead of
the default configuration file.
"""
config_source = alternative_config or self.get_config_filename()
# Setting list_values to False prevents ConfigObj from being "smart"
# about lists (it now treats them as strings). See bug #1228301 for
# more context.
# Setting raise_errors to False causes ConfigObj to batch all parsing
# errors into one ConfigObjError raised at the end of the parse instead
# of raising the first one and then exiting. This also allows us to
# recover the good config values in the error handler below.
# Setting write_empty_values to True prevents configObj writes
# from writing "" as an empty value, which get_plugins interprets as
# '""' which search for a plugin named "". See bug #1241821.
try:
config_obj = ConfigObj(config_source, list_values=False,
raise_errors=False, write_empty_values=True)
except ConfigObjError, e:
logger = getLogger()
logger.warn(str(e))
# Good configuration values are recovered here
config_obj = e.config
return config_obj
def write(self):
"""Write back configuration to the configuration file.
Values which match the default option in the parser won't be saved.
Options are considered in the following precedence:
1. Manually set options (C{config.option = value})
2. Options passed in the command line
3. Previously existent options in the configuration file
The filename picked for saving configuration options is the one
returned by L{get_config_filename}.
"""
# The filename we'll write to
filename = self.get_config_filename()
# Make sure we read the old values from the config file so that we
# don't remove *unrelated* values.
config_obj = self._get_config_object()
if not self.config_section in config_obj:
config_obj[self.config_section] = {}
all_options = self._config_file_options.copy()
all_options.update(self._command_line_options)
all_options.update(self._set_options)
section = config_obj[self.config_section]
for name, value in all_options.items():
if name != "config" and name not in self.unsaved_options:
if (value == self._command_line_defaults.get(name) and
name not in self._config_file_options and
name not in self._command_line_options):
# We don't want to write this value to the config file
# as it is default value and as not present in the
# config file
if name in config_obj[self.config_section]:
del config_obj[self.config_section][name]
else:
section[name] = value
config_obj[self.config_section] = section
config_obj.filename = filename
config_obj.write()
def make_parser(self):
"""Parser factory for supported options
@return: An L{OptionParser} preset with options that all
landscape-related programs accept. These include
- C{config} (C{None})
- C{data_path} (C{"/var/lib/landscape/client/"})
"""
parser = OptionParser(version=VERSION)
parser.add_option("-c", "--config", metavar="FILE",
help="Use config from this file (any command line "
"options override settings from the file) "
"(default: '/etc/landscape/client.conf').")
parser.add_option("-d", "--data-path", metavar="PATH",
default="/var/lib/landscape/client/",
help="The directory to store data files in "
"(default: '/var/lib/landscape/client/').")
return parser
def get_config_filename(self):
"""Pick the proper configuration file.
The picked filename is:
1. C{self.config}, if defined
2. The last loaded configuration file, if any
3. The first filename in C{self.default_config_filenames}
"""
if self.config:
return self.config
if self._config_filename:
return self._config_filename
if self.default_config_filenames:
for potential_config_file in self.default_config_filenames:
if os.access(potential_config_file, os.R_OK):
return potential_config_file
return self.default_config_filenames[0]
return None
def get_command_line_options(self):
"""Get currently loaded command line options.
@see: L{load_command_line}
"""
return self._command_line_options
class Configuration(BaseConfiguration):
"""Configuration data for Landscape client.
This contains all simple data, some of it calculated.
"""
DEFAULT_URL = "https://landscape.canonical.com/message-system"
def make_parser(self):
"""Parser factory for supported options.
@return: An L{OptionParser} preset for all options
from L{BaseConfiguration.make_parser} plus:
- C{quiet} (C{False})
- C{log_dir} (C{"/var/log/landscape"})
- C{log_level} (C{"info"})
- C{url} (C{"http://landscape.canonical.com/message-system"})
- C{ping_url} (C{"http://landscape.canonical.com/ping"})
- C{ssl_public_key}
- C{ignore_sigint} (C{False})
"""
parser = super(Configuration, self).make_parser()
parser.add_option("-q", "--quiet", default=False, action="store_true",
help="Do not log to the standard output.")
parser.add_option("-l", "--log-dir", metavar="FILE",
help="The directory to write log files to "
"(default: '/var/log/landscape').",
default="/var/log/landscape")
parser.add_option("--log-level", default="info",
help="One of debug, info, warning, error or "
"critical.")
parser.add_option("-u", "--url", default=self.DEFAULT_URL,
help="The server URL to connect to.")
parser.add_option("--ping-url",
help="The URL to perform lightweight exchange "
"initiation with.",
default="http://landscape.canonical.com/ping")
parser.add_option("-k", "--ssl-public-key",
help="The public SSL key to verify the server. "
"Only used if | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OptExp (c) University of Manchester 2018
OptExp is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>
Created on Tue Nov 27 16:01:46 2018
@author: pablo
"""
import numpy as np
import pandas as pd
import itertools, re
from scipy.stats import f as FDist, ncf as ncFDist
from .doebase import doeTemplate, promoterList, plasmidList, read_excel
def defineTemplate(parts, genes):
""" Generates the DoE template format from a list of parts and genes
- RefParts.csv: Name, Type, Part
- GeneParts.csv: Name, Type, Part, Step
Type: origin, resistance, promoter, gene
Step: Enzyme step in the pathway (eventually could be implemented
for the other genetic parts)
"""
prom = []
ori = []
for i in parts.index:
ptype = parts.loc[i,'Type']
name = parts.loc[i,'Name']
if ptype == 'promoter':
prom.append(name)
elif ptype == 'origin':
ori.append(name)
for i in range(0,len(prom)):
prom.append(None)
tree = []
gdict = {}
for i in genes.index:
name = genes.loc[i,'Name']
step = "gene%00d" % (int(genes.loc[i,'Step']),)
if step not in tree:
tree.append(step)
if step not in gdict:
gdict[step] = []
gdict[step].append(name)
doe = doeTemplate(tree, origins=ori, promoters=prom, genes=gdict, positional=False)
return doe, parts, genes
def mainDoe(doe,size):
""" Main DoE procedure
"""
fact, partinfo = read_excel( None, doedf=doe )
seed = np.random.randint(10000)
diagnostics = callDoE(fact, size, seed=seed)
return diagnostics
def getDoe(parts, genes,size=32):
""" DoE request from parts and genes files (see defineTemplate)
"""
doe,parts,genes = defineTemplate(parts, genes)
diagnostics = mainDoe(doe,size)
return diagnostics
def doeRequest(f, ftype, size):
""" DoE request from the template format
f: filename
ftype: csv or xlsx
size: lib size
"""
print('Received:',ftype)
if ftype == 'csv':
doe = pd.read_csv( f )
elif ftype == 'xlsx' or ftype == 'xls':
doe = pd.read_excel( f )
else:
doe = pd.read_table( f )
diagnostics = mainDoe(doe,size)
return diagnostics
def evaldes( steps, variants, npromoters, nplasmids, libsize, positional,
outfile=None, random=False ):
""" Generate and evaluate an optimal design of a pathway circuit following the template:
1. Vector: 1 to nplasmids
2. Promoter: 1 to npromoters
3. Gene: 1 to variants
4. Terminator + Promoter: None to npromoters, prob(None)= 0.5
5. Gene: 1 to variants
...
Parameters:
- steps: number of steps in the pathway
- variants: number of variants in each step (currently fixed)
- npromoters: number of promoters in each step
- nplasmids: number of plasmids in each step
- libsize: desired library size
- positional: gene rearrangement is allowed
- outfile: output the doe into outfile if given
- random: random DoE instead of optimal
"""
plasmids = plasmidList(nplasmids)
promoters = promoterList(npromoters)
tree = []
genes = {}
for i in np.arange(steps):
rid = "r%0d" % (i,)
tree.append(rid)
genes[rid] = []
for j in np.arange(variants):
gid = "g%0d_%0d" % (i,j)
genes[rid].append(gid)
doe = doeTemplate( tree, plasmids, promoters, genes, positional )
if outfile is not None:
doe.to_excel( outfile, index=False )
fact, partinfo = read_excel( outfile )
else:
fact, partinfo = read_excel( None, doedf=doe )
seed = np.random.randint(10000)
diagnostics = callDoE(fact, size=libsize, seed=seed)
diagnostics['steps'] = steps
diagnostics['variants'] = variants
diagnostics['npromoters'] = npromoters
diagnostics['nplasmids'] = nplasmids
return diagnostics
def callDoE(fact, size, seed, starts=1, RMSE=10,
alpha=0.05, random=False ):
starts = 1
RMSE = 10
alpha = 0.05
try:
factors, fnames, diagnostics = makeDoeOptDes(fact, size=size,
seed=seed, starts=starts,
RMSE= RMSE, alpha=alpha,
random=random )
except:
raise
diagnostics['libsize'] = size
return diagnostics
def makeDoeOptDes(fact, size, seed=None, starts=1040, makeFullFactorial=False, RMSE=1, alpha=0.05, verbose=False, random=False):
""" Full DoE script:
- fact: a dictionary contained the desired design
"""
# To Do: full factorial
factors = []
fnames = []
npos = 0
nfact = 0
for pos in sorted(fact):
name = fact[pos].component+str(pos)
if len(fact[pos].levels) > 1:
nfact += 1
# Currently only working with categorical
# if fact[pos]['component'] != 'gene' and '-' not in fact[pos]['levels']:
# varType = 'Discrete Numeric'
# theLevels = [ x for x in range(1, len(fact[pos]['levels'])+1 ) ]
# factors.append( theLevels )
# fnames.append(name)
# else:
# varType = 'Categorical'
theLevels = [ '"L{}"'.format(x) for x in range(1, len(fact[pos].levels)+1 ) ]
factors.append(set(theLevels))
fnames.append(name)
if fact[pos].positional is not None:
npos += 1
if npos > 1:
# Total possible arrangements in orthogonal latin squares
# varType = 'Categorical'
theLevels = ['"L{}"'.format(x) for x in range(1, npos*(npos-1)+1)]
factors.append( set( theLevels ) )
fnames.append('pos')
nfact += 1
if seed is not None:
np.random.seed( seed )
else:
seed = np.random.randint(100000, size=1)
np.random.seed( seed )
initGrid(factors)
if random:
# If set, perform a random design instead of a D-optimal design
M = randExp( factors, n=int(size) )
J = Deff2(M, factors)
else:
if np.product( [len(x) for x in factors] ) < size:
# raise Exception('Library size is too large!')
# TO DO: make a full factorial
M = fullFactorial( factors )
J = Deff2(M, factors)
size = M.shape[0]
ix = np.arange(size)
np.random.shuffle( ix )
M = M[ix,:]
else:
M, J = CoordExch(factors, n=int(size), runs=2, verb=verbose, mode='coordexch', seed=seed)
if M is None:
raise Exception('No solution')
M1 = MapDesign2(factors, M)
X = mapFactors2( M, factors )
df = pd.DataFrame(M1, columns=fnames)
pows = CatPower(X , factors, RMSE=RMSE, alpha=alpha)
rpvs = RPV(X)
diagnostics = {'J': J, 'pow': pows, 'rpv': rpvs, 'X': X,
'M': M, 'factors': factors, 'fact': fact,
'M1': M1, 'df': df, 'names': fnames, 'seed': seed}
return factors, fnames, diagnostics
def Deff(X):
# D-efficiency
return (100.0/X.shape[0]) * ( np.linalg.det( np.dot( np.transpose( X ), X ) )**(1.0/X.shape[1]))
def Deff2(M, factors):
X = mapFactors2(M, factors)
return (100.0/X.shape[0]) * ( np.linalg.det( np.dot( np.transpose( X ), X ) )**(1.0/X.shape[1]))
def Dopt(X):
# D-optimality
return np.linalg.det( np.dot( np.transpose( X ), X ) )
def Dopt2(M, factors):
# D-optimality
X = mapFactors2(M, factors)
return np.linalg.det( np.dot( np.transpose( X ), X ) )
def SE(X):
# Estimation efficiency
return np.diag( np.linalg.inv( np.dot( np.transpose( X ), X ) ) )
def RPV(X):
# Relative prediction variance
try:
XXi = np.linalg.inv( np.dot( np.transpose( X ), X ) )
except:
return [np.nan for i in np.arange(X.shape[0])]
return [np.dot( np.dot( np.transpose( X[i,:] ), XXi), X[i,:]) for i in np.arange(X.shape[0])]
def Contrib(X):
cn = []
for i in range(0, X.shape[0]):
cn.append( Dopt( np.vstack( [X[:i,:], X[(i+1):,:]] ) ) )
return cn
def VarAdd(X,xj):
# Variance of adding/removing one experiment
return np.dot( np.dot( np.transpose(xj) , np.linalg.inv( np.dot( np.transpose( X ), X) ) ), xj )
def randExp( factors, n ):
# Generate n random experiments
V = None
for levels in factors:
vnew = np.random.randint(0, len(levels), n)
if V is None:
V = vnew
else:
V = np.vstack( [V, vnew] )
if len(V.shape) == 1:
V = np.expand_dims(V, axis=0)
return np.transpose( V )
#%%
def grid(n, weighted=True):
""" Provide normalized vectors of n-1 dummy variables
Useful for computing the model matrix (X) to
use pseudo-orthogonal terms in the n-1 hypercube.
(Experimental)
In JMP, grid(3) is multiplied by sqrt(2), grid(4) by
sqrt(3), which brings back the weight of the number of
factors
"""
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
base = np.eye(n)*2 - 1
pc = PCA(n-1, whiten=True, random_state=0)
bt = pc.fit_transform(base)
W = normalize(bt)
if weighted:
W = W*np.sqrt(n-1)
return W
# Precompute the hypercube grids
gridList = {}
def initGrid(factors):
global gridList
vmax = set( [len(x) for x in factors] )
for i in vmax:
try:
if i < 2:
continue
except:
continue
gridList[i] = grid(i)
#%%
def mapFactors( factors, M ):
# Map a numerical factor into [-1,1] range, create dummy variables for categorical factors
Mn = np.transpose( [np.ones( M.shape[0] )] )
for i in np.arange( len(factors) ):
v = factors[i]
if type(v) == list:
if len(set(v)) > 1:
# Normalize between [-1,+1]
Vn = (2*(M[:,i] - M[:,i].min())/(M[:,i].max()-M[:,i].min()) - 1)
Vn = np.transpose( [Vn] )
else:
Vn = np.transpose( [np.ones( M.shape[0] )] )
else:
if len(v) > 1:
Vn = -np.ones( (M.shape[0],len(v)) )
j = np.arange(M.shape[0])
Vn[j,M[j,i]] = 1
Vn = Vn[:,:-1]
else:
Vn = np.transpose( [np.ones( M.shape[0] )] )
if Mn is None:
Mn = Vn
else:
Mn = np.hstack( [Mn, Vn])
return Mn
def mapFactors2( M, factors ):
# Map a numerical factor into [-1,1] range,
# create orthogonal coordinates for | |
external databases.
Notes
------
None
Returns
-------
flowdata : data-type
obtained streamflow observation dataframe between
Startyear and EndYear.
obtaindata : bool
True indicate successfully obtain data, False indicate no data are founded
for this gauge
obs_DA : float
The drainage area of this gauge read from HYDAT database
Examples
--------
>>> from WriteRavenInputs import DownloadStreamflowdata_CA
>>> Station_NM = '05PC019'
>>> StartYear = 2010
>>> EndYear = 2011
>>> CA_HYDAT = HYDAT_Path
>>> flowdata,obs_DA,obtaindata = DownloadStreamflowdata_CA(Station_NM,CA_HYDAT,StartYear,EndYear)
"""
obtaindata = True
con = sqlite3.connect(CA_HYDAT)
### obtain station info
sqlstat = "SELECT STATION_NUMBER, DRAINAGE_AREA_GROSS, DRAINAGE_AREA_EFFECT from STATIONS WHERE STATION_NUMBER=?"
Station_info = pd.read_sql_query(sqlstat, con, params=[Station_NM])
if len(Station_info) == 0:
flowdata = -1
obs_DA = -9999
obtaindata = False
return flowdata, obs_DA, obtaindata
DAS = np.array(
[
-1.2345,
Station_info["DRAINAGE_AREA_GROSS"].values[0],
Station_info["DRAINAGE_AREA_EFFECT"].values[0],
]
)
DAS = DAS[DAS != None]
if len(DAS) > 0:
obs_DA = np.nanmax(DAS)
else:
obs_DA = -1.2345
## obtain streamflow data
sqlstat = "select * from DLY_FLOWS WHERE STATION_NUMBER = ?"
Readed_Streamflow = pd.read_sql_query(sqlstat, con, params=[Station_NM])
Readed_Streamflow = Readed_Streamflow[Readed_Streamflow["YEAR"] >= StartYear]
Readed_Streamflow = Readed_Streamflow[Readed_Streamflow["YEAR"] <= EndYear]
## Initial dataframe
if len(Readed_Streamflow) == 0:
flowdata = -1
obs_DA = -9999
obtaindata = False
return flowdata, obs_DA, obtaindata
year_ini = Readed_Streamflow["YEAR"].values[0]
mon_ini = Readed_Streamflow["MONTH"].values[0]
year_end = Readed_Streamflow["YEAR"].values[len(Readed_Streamflow) - 1]
mon_end = Readed_Streamflow["MONTH"].values[len(Readed_Streamflow) - 1]
ndays_end = Readed_Streamflow["NO_DAYS"].values[len(Readed_Streamflow) - 1]
Date_ini = str(year_ini) + "-" + str(mon_ini) + "-" + "01"
Date_end = str(year_end) + "-" + str(mon_end) + "-" + str(ndays_end)
Date = pd.date_range(start=Date_ini, end=Date_end, freq="D")
flowdata = pd.DataFrame(
np.full((len(Date), 2), -1.2345), columns=["Flow", "QC"], index=Date
)
### loop read streamflow data
for index, row in Readed_Streamflow.iterrows():
NDays = row["NO_DAYS"]
for iday in range(1, NDays + 1):
cdate = pd.to_datetime(
{"year": [row["YEAR"]], "month": [row["MONTH"]], "day": [iday]}
).values
# cdates = pd.to_datetime(str(row['YEAR'])+'-'+str(row['MONTH'])+'-'+str(iday))
if (
row["FLOW" + str(iday)] != np.nan
and row["FLOW" + str(iday)] != None
and float(row["FLOW" + str(iday)]) > 0
):
flowdata.loc[cdate, "Flow"] = row["FLOW" + str(iday)]
flowdata.loc[cdate, "QC"] = row["FLOW_SYMBOL" + str(iday)]
return flowdata, obs_DA, obtaindata
def DownloadStreamflowdata_US(Station_NM, StartYear, EndYear):
"""Return streamflow data from USGS website
Function that used to obtain streamflow data of certain gauge from USGS website
Parameters
----------
Station_NM : string
The name of the gauge, "05127000"
Startyear : integer
Start year of simulation. Used to
read streamflow observations from external databases.
EndYear : integer
End year of simulation. Used to
read streamflow observations from external databases.
Notes
------
None
Returns
-------
flowdata : data-type
obtained streamflow observation dataframe between
Startyear and EndYear.
obtaindata : bool
True indicate successfully obtain data, False indicate no data are founded
for this gauge
obs_DA : float
The drainage area of this gauge read from HYDAT database
Examples
--------
>>> from WriteRavenInputs import DownloadStreamflowdata_US
>>> Station_NM = '05127000'
>>> StartYear = 2010
>>> EndYear = 2011
>>> flowdata,obs_DA,obtaindata = DownloadStreamflowdata_CA(Station_NM,StartYear,EndYear)
"""
obtaindata = True
#### Obtain station info
urlstlist = "https://waterdata.usgs.gov/nwis/inventory/?format=rdb&site_no=" + str(
int(Station_NM)
).zfill(8)
Reslist = urllib.request.urlopen(urlstlist)
stlistdata = Reslist.read()
stlistdata = stlistdata.splitlines()
station_info_name = stlistdata[len(stlistdata) - 3].split()
station_info_value = stlistdata[len(stlistdata) - 1].split()
if (
station_info_name[len(station_info_name) - 1].decode("utf-8")
!= "contrib_drain_area_va"
):
obs_DA = -1.2345
else:
try:
obs_DA = (
float(station_info_value[len(station_info_value) - 1].decode("utf-8"))
* 2.58999
) # square miles to square km
except:
try:
obs_DA = (
float(station_info_value[len(station_info_value) - 2].decode("utf-8"))
* 2.58999
) # square miles to square km
except:
obs_DA = -1.2345
## try to obtain data with in this period
Date_ini = str(StartYear) + "-" + "01" + "-" + "01"
Date_end = str(EndYear) + "-" + "12" + "-" + "31"
urlstlist = (
"https://waterdata.usgs.gov/nwis/dv?cb_00060=on&format=rdb&site_no="
+ str(int(Station_NM)).zfill(8)
+ "&referred_module=sw&begin_date="
+ Date_ini
+ "&end_date="
+ Date_end
)
# print(urlstlist)
Reslist = urllib.request.urlopen(urlstlist)
stlistdata = Reslist.read()
stlistdata = stlistdata.splitlines()
##obtain start of the data rows
datarow = -1
for i in range(0, len(stlistdata)):
istlistdata = stlistdata[i].split()
if len(istlistdata) == 0:
return -1.2345,-1.2345, False
if istlistdata[0] == "#" or len(istlistdata) != 5:
continue
if istlistdata[1].decode("utf-8") == str(int(Station_NM)).zfill(8):
datarow = i
break
Date_ini = stlistdata[datarow].split()[2].decode("utf-8")
Date_end = stlistdata[len(stlistdata) - 1].split()[2].decode("utf-8")
Date = pd.date_range(start=Date_ini, end=Date_end, freq="D")
flowdata = pd.DataFrame(
np.full((len(Date), 2), -1.2345), columns=["Flow", "QC"], index=Date
)
for i in range(datarow, len(stlistdata)):
istlistdata = stlistdata[i].split()
if len(istlistdata) < 5 or istlistdata[3].decode("utf-8") == "Ice":
continue
else:
date = istlistdata[2].decode("utf-8")
cdate = pd.to_datetime(
{"year": [date[0:4]], "month": [date[5:7]], "day": [date[8:10]]}
).values
flowdata.loc[cdate, "Flow"] = (
float(istlistdata[3].decode("utf-8")) * 0.0283168
) # cubic feet per second to cubic meters per second
flowdata.loc[cdate, "QC"] = istlistdata[4].decode("utf-8")
return flowdata, obs_DA, obtaindata
def Generate_Raven_Obsrvt_String(
flowdata, obsnm, outObsfileFolder
): # Writeobsrvtfile(flowdata,obsnm,outObsfileFolder):
"""Generate a string in Raven observation rvt input file format
Function that is used to subbasin id and observation guage name from obsnm
and reformat the streamflow observation data in flowdata
generate a string that follows the raven observation rvt input file format
Parameters
----------
flowdata : data-type
Obtained streamflow observation dataframe between
Startyear and EndYear. The index of the dataframe should be Date in
'%Y-%m-%d' format, and the streamflow observation data in m3/s should
in 'Flow' column
obsnm : data-type
Dataframe of observation gauge information for this gauge including
at least following two columns
'Obs_NM': the name of the stream flow obsrvation gauge
'SubId' : the subbasin Id of this stremflow gauge located at.
outObsfileFolder : string
Path and name of the output folder to save obervation rvt file
of each gauge
Notes
------
None
Returns
-------
obs_rvt_file_path : string
It is the file path inclding file names of the raven rvt input file
for this gauge
output_string : string
It is the string that contains the content of the raven rvt input
file of this gauge
See Also
--------
DownloadStreamflowdata_US : Generate flowdata inputs needed by this function
DownloadStreamflowdata_CA : Generate flowdata inputs needed by this function
Examples
--------
>>> from WriteRavenInputs import DownloadStreamflowdata_US,Generate_Raven_Obsrvt_String
>>> import pandas as pd
>>> Station_NM = '05127000'
>>> StartYear = 2010
>>> EndYear = 2011
>>> Subbasin_ID = 1
>>> flowdata_read, DA_obs_data,Finddata = DownloadStreamflowdata_US(Station_NM = iobs_nm,StartYear = startyear,EndYear = endyear)
>>> Date = pd.date_range(start=str(startyear)+'-'+'01'+'-'+'01', end=str(endyear)+'-'+'12'+'-'+'31', freq='D')
>>> flowdata = pd.DataFrame(np.full((len(Date),2),-1.2345),columns = ['Flow','QC'],index = Date)
>>> flowdata.loc[flowdata.index.isin(flowdata_read.index), ['Flow', 'QC']] = flowdata_read[['Flow', 'QC']]
>>> obsnms = pd.DataFrame(data=[Subbasin_ID,Station_NM],columns=['SubId','Obs_NM'])
>>> Outputfolderrvt = 'c:/some_folder_to_store_raven_rvt_file'
>>> obs_rvt_file_path, output_string = Generate_Raven_Obsrvt_String(flowdata = flowdata,obsnm = obsnms,outObsfileFolder = Outputfolderrvt)
"""
output_string_list = []
obs_rvt_file_path = os.path.join(
outObsfileFolder, obsnm["Obs_NM"] + "_" + str(obsnm["SubId"]) + ".rvt"
)
output_string_list.append(
":ObservationData HYDROGRAPH " + str(obsnm["SubId"]) + " m3/s"
)
output_string_list.append(
flowdata.index[0].strftime("%Y-%m-%d")
+ " "
+ "00:00:00 "
+ "1 "
+ str(len(flowdata))
)
for id in range(0, len(flowdata)):
output_string_list.append(" " + str(flowdata["Flow"].values[id]))
output_string_list.append(":EndObservationData" + "\n")
output_string = "\n".join(output_string_list)
return obs_rvt_file_path, output_string
def Generate_Raven_Timeseries_rvt_String(
outFolderraven, outObsfileFolder, obsnm, Model_Name
): # Modify_template_rvt(outFolderraven,outObsfileFolder,obsnm):
"""Generate a string in Raven time series rvt input file format
Function that used to modify raven model timeseries rvt file (Model_Name.rvt)
Add ":RedirectToFile ./obs/guagename_subbasinid.rvt"
for each gauge in the end of model rvt file (Model_Name.rvt)
Parameters
----------
outFolderraven : String
Path and name of the output folder of Raven input files
outObsfileFolder : String
Path and name of the output folder to save obervation rvt file
of each gauge
obsnm : data-type
Dataframe of observation gauge information for this gauge including
at least following two columns
'Obs_NM': the name of the stream flow obsrvation gauge
'SubId' : the subbasin Id of this stremflow gauge located at.
Model_Name : string
The Raven model base name. File name of the raven input will be
Model_Name.xxx.
Notes
------
None
See Also
--------
DownloadStreamflowdata_US : Generate flowdata inputs
needed by this function
DownloadStreamflowdata_CA : Generate flowdata inputs
needed by this function
Returns
-------
output_string : string
It is the string that contains the content that will be used to
modify the raven time series rvt input file of this gauge
Examples
--------
>>> from WriteRavenInputs import Generate_Raven_Timeseries_rvt_String
>>> outFolderraven = 'c:/path_to_the_raven_input_folder/'
>>> outObsfileFolder = 'c:/path_to_the_raven_streamflow_observation gauge_folder/'
>>> Subbasin_ID = 1
>>> Station_NM = '05127000'
>>> obsnms = pd.DataFrame(data=[Subbasin_ID,Station_NM],columns=['SubId','Obs_NM'])
>>> Model_Name = 'test'
>>> output_string = Generate_Raven_Timeseries_rvt_String(outFolderraven,outObsfileFolder,obsnm,Model_Name)
"""
toobsrvtfile = | |
[100, 120], [180, 230], [255, 255]])
>>> lc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> sc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyHLSCurve(hc,lc,sc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyRGBCurve`
"""
#TODO CHECK ROI
#TODO CHECK CURVE SIZE
#TODO CHECK COLORSPACE
#TODO CHECK CURVE SIZE
temp = cv.CreateImage(self.size(), 8, 3)
#Move to HLS space
cv.CvtColor(self._bitmap, temp, cv.CV_RGB2HLS)
tempMat = cv.GetMat(temp) #convert the bitmap to a matrix
#now apply the color curve correction
tempMat = np.array(self.getMatrix()).copy()
tempMat[:, :, 0] = np.take(hCurve.mCurve, tempMat[:, :, 0])
tempMat[:, :, 1] = np.take(sCurve.mCurve, tempMat[:, :, 1])
tempMat[:, :, 2] = np.take(lCurve.mCurve, tempMat[:, :, 2])
#Now we jimmy the np array into a cvMat
image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1])
cv.CvtColor(image, image, cv.CV_HLS2RGB)
return Image(image, colorSpace=self._colorSpace)
def applyRGBCurve(self, rCurve, gCurve, bCurve):
"""
**SUMMARY**
Apply a color correction curve in RGB space. This method can be used
to change values for each channel. The curves are :py:class:`ColorCurve` class objects.
**PARAMETERS**
* *rCurve* - the red ColorCurve object.
* *gCurve* - the green ColorCurve object.
* *bCurve* - the blue ColorCurve object.
**RETURNS**
A SimpleCV Image
**EXAMPLE**
>>> img = Image("lenna")
>>> rc = ColorCurve([[0,0], [100, 120], [180, 230], [255, 255]])
>>> gc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> bc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyRGBCurve(rc,gc,bc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyHLSCurve`
"""
tempMat = np.array(self.getMatrix()).copy()
tempMat[:, :, 0] = np.take(bCurve.mCurve, tempMat[:, :, 0])
tempMat[:, :, 1] = np.take(gCurve.mCurve, tempMat[:, :, 1])
tempMat[:, :, 2] = np.take(rCurve.mCurve, tempMat[:, :, 2])
#Now we jimmy the np array into a cvMat
image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1])
return Image(image, colorSpace=self._colorSpace)
def applyIntensityCurve(self, curve):
"""
**SUMMARY**
Intensity applied to all three color channels
**PARAMETERS**
* *curve* - a ColorCurve object.
**RETURNS**
A SimpleCV Image
**EXAMPLE**
>>> img = Image("lenna")
>>> rc = ColorCurve([[0,0], [100, 120], [180, 230], [255, 255]])
>>> gc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> bc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyRGBCurve(rc,gc,bc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyHLSCurve`
"""
return self.applyRGBCurve(curve, curve, curve)
def colorDistance(self, color = Color.BLACK):
"""
**SUMMARY**
Returns an image representing the distance of each pixel from a given color
tuple, scaled between 0 (the given color) and 255. Pixels distant from the
given tuple will appear as brighter and pixels closest to the target color
will be darker.
By default this will give image intensity (distance from pure black)
**PARAMETERS**
* *color* - Color object or Color Tuple
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.colorDistance(color=Color.BLACK)
>>> img2.show()
**SEE ALSO**
:py:meth:`binarize`
:py:meth:`hueDistance`
:py:meth:`findBlobsFromMask`
"""
pixels = np.array(self.getNumpy()).reshape(-1, 3) #reshape our matrix to 1xN
distances = spsd.cdist(pixels, [color]) #calculate the distance each pixel is
distances *= (255.0/distances.max()) #normalize to 0 - 255
return Image(distances.reshape(self.width, self.height)) #return an Image
def hueDistance(self, color = Color.BLACK, minsaturation = 20, minvalue = 20):
"""
**SUMMARY**
Returns an image representing the distance of each pixel from the given hue
of a specific color. The hue is "wrapped" at 180, so we have to take the shorter
of the distances between them -- this gives a hue distance of max 90, which we'll
scale into a 0-255 grayscale image.
The minsaturation and minvalue are optional parameters to weed out very weak hue
signals in the picture, they will be pushed to max distance [255]
**PARAMETERS**
* *color* - Color object or Color Tuple.
* *minsaturation* - the minimum saturation value for color (from 0 to 255).
* *minvalue* - the minimum hue value for the color (from 0 to 255).
**RETURNS**
A simpleCV image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.hueDistance(color=Color.BLACK)
>>> img2.show()
**SEE ALSO**
:py:meth:`binarize`
:py:meth:`hueDistance`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
if isinstance(color, (float,int,long,complex)):
color_hue = color
else:
color_hue = Color.hsv(color)[0]
vsh_matrix = self.toHSV().getNumpy().reshape(-1,3) #again, gets transposed to vsh
hue_channel = np.cast['int'](vsh_matrix[:,2])
if color_hue < 90:
hue_loop = 180
else:
hue_loop = -180
#set whether we need to move back or forward on the hue circle
distances = np.minimum( np.abs(hue_channel - color_hue), np.abs(hue_channel - (color_hue + hue_loop)))
#take the minimum distance for each pixel
distances = np.where(
np.logical_and(vsh_matrix[:,0] > minvalue, vsh_matrix[:,1] > minsaturation),
distances * (255.0 / 90.0), #normalize 0 - 90 -> 0 - 255
255.0) #use the maxvalue if it false outside of our value/saturation tolerances
return Image(distances.reshape(self.width, self.height))
def erode(self, iterations=1):
"""
**SUMMARY**
Apply a morphological erosion. An erosion has the effect of removing small bits of noise
and smothing blobs.
This implementation uses the default openCV 3X3 square kernel
Erosion is effectively a local minima detector, the kernel moves over the image and
takes the minimum value inside the kernel.
iterations - this parameters is the number of times to apply/reapply the operation
* See: http://en.wikipedia.org/wiki/Erosion_(morphology).
* See: http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-erode
* Example Use: A threshold/blob image has 'salt and pepper' noise.
* Example Code: /examples/MorphologyExample.py
**PARAMETERS**
* *iterations* - the number of times to run the erosion operation.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.erode(3).show()
**SEE ALSO**
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
cv.Erode(self.getBitmap(), retVal, kern, iterations)
return Image(retVal, colorSpace=self._colorSpace)
def dilate(self, iterations=1):
"""
**SUMMARY**
Apply a morphological dilation. An dilation has the effect of smoothing blobs while
intensifying the amount of noise blobs.
This implementation uses the default openCV 3X3 square kernel
Erosion is effectively a local maxima detector, the kernel moves over the image and
takes the maxima value inside the kernel.
* See: http://en.wikipedia.org/wiki/Dilation_(morphology)
* See: http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-dilate
* Example Use: A part's blob needs to be smoother
* Example Code: ./examples/MorphologyExample.py
**PARAMETERS**
* *iterations* - the number of times to run the dilation operation.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.dilate(3).show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
cv.Dilate(self.getBitmap(), retVal, kern, iterations)
return Image(retVal, colorSpace=self._colorSpace)
def morphOpen(self):
"""
**SUMMARY**
morphologyOpen applies a morphological open operation which is effectively
an erosion operation followed by a morphological dilation. This operation
helps to 'break apart' or 'open' binary regions which are close together.
* `Morphological opening on Wikipedia <http://en.wikipedia.org/wiki/Opening_(morphology)>`_
* `OpenCV documentation <http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-morphologyex>`_
* Example Use: two part blobs are 'sticking' together.
* Example Code: ./examples/MorphologyExample.py
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.morphOpen.show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
temp = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
try:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_OPEN, 1)
except:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_OPEN, 1)
#OPENCV 2.2 vs 2.3 compatability
return( Image(retVal) )
def morphClose(self):
"""
**SUMMARY**
morphologyClose applies a morphological close operation which is effectively
a dilation operation followed by a morphological erosion. This operation
helps to 'bring together' or 'close' binary regions which are close together.
* See: `Closing <http://en.wikipedia.org/wiki/Closing_(morphology)>`_
* See: `Morphology from OpenCV <http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-morphologyex>`_
* Example Use: Use when a part, which should be one blob is really two blobs.
* Example Code: ./examples/MorphologyExample.py
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp | |
# script that controls the computation procedure
# first create the file params_gcc.txt and params.txt, which contain the preprocessor definitions and relevant parameters included in the Python script
# then create pre-processed interpretable source files in the given subdir
# then execute the Python program
import os
import shutil
import sys
import re
import glob
import platform
import time
#set working directory in which Python program is executed
FILEPATH = 'Sims1D/Verification/SediAgg/nz50_inst20_k05_dz10_py/'
iverboseoutput = 1
ilog_linux = 1
def intermediate_format(tmp):
tmp = re.sub('^.+#GCC', '#GCC', tmp,flags=re.MULTILINE)
#include flag, then each line is tested. otherwise only the first line of the multiline string matches the pattern;
#"By default in python, the "^" and "$" special characters (these characters match the start and end of a line, respectively) only apply to the start and end of the entire string."
tmp = tmp.replace('#','!comment#')
tmp = tmp.replace('!comment#GCC','#')
return(tmp)
def py_format(tmp):
tmp = tmp.replace('!comment#','#')
return(tmp)
def py_format_file_inplace(fn):
f = open(fn,'r')
c = f.read()
f.close()
c_pyf = py_format(c)
f = open(fn,'w')
f.write(c_pyf)
f.close()
CurrDir = os.getcwd()
print(CurrDir)
output_to_file_params_gcc = """
#define COMP /* don't touch */
#/* the following PPDs (PreProcessor Directives) control how computations are performed.*/
#/* ----------Kernel------------- */
#define KERNEL 1 /* =0 Golovin kernel, =1 Long kernel, =2 Hall kernel, =3 product kernel */
#define LongK_Options 1 /* in case of Long kernel: =1 tabulated data, =2 computed data */
#define KERNEL_INTPOL 0 /* relevant for Hall/Long kernel: =1 kernal values are bilinearly interpolated ; =0 no nterpolation (a particular value is taken from the LookUp-Table) , =2 no interpolation, but linear instead of logarithmic mass grid is used */
#/* ---------- Initialization -------*/
#define INITV 1 /* = 1 Exponential distribution, =2 discrete distribution
# /* if computations are based on (small-size) discrete distributions, then no continuous size distributions are deduced. Instead, a discrete mass grid with multiples of the elemental mass is used. */
#/*----------- AON options ----------*/
#define AGG_MC 2 /* 0 no Multiple Collections, = 1 Integer Multiple Collection (conserves integer weights) = 2 Float Multiple Collection */
#define DISCRETE 0 /* = 0 continous use case, = 1 and 2 discrete use case (specify INITV =2 und AGG_MC = 1), Only multiples of a specified elemental mass are possible; = 1 weights are integer valued, = 2 only weights 1 */
#define LINEAR 0 /* =0 check all SIP combinations (quadratic sampling), = 1 linear sampling of SIP combinations */
#define LINEAR_LIMIT 0 /* options when nu_coll>nu_i,nu_j (the options are only active for LINEAR = 1):
# =0: no special treatment, can lead to negative weights, nu_coll can be larger than max(nu_i,nu_j):
# =1: limit nu_coll by 0.99*max(nu_i,j),
# =2: equal partition SIPs with nu_i,j=0.5 min(nu_i,j) (recommended by Shima during GMD,2020 review)
# in the present setup with nu-values being floats, equality of n_i and nu_j is not tested,
# as this is an extremely rare event for the current implementation of SIP ensemble generation.
# Currently, a collection between two equal weights SIPs generates a zero weight SIP which is removed.
# =3: use an uneven splitting with 0.4 and 0.6 min(nu_i,j)
# NOTE: for LINEAR = 0, always the limiter 0.99*max(nu_i,j) is used */
#define WELLMIXED 0 /* 0: classical 3D well mixed assumption, 1: 2D well mixed assumption, consider overtakes in each gridbox, 2, 3: 2D wellmixed assumption, consider overtakes in full column, 3: additionally accounts for overtakes across lower boundary, only relevant for period BCs */
#define REVERSE 1 /* =1: Reverse SIP processing start from highest SIP not lowest, benefitial for WM2D-variant */
#define COUNT_COLLS 0 /* track number of collisions and output to log file*/
#define SPEED_ZERO 1 /* = 1 do no test for zero weight SIPs inside AON */
#define SPEED_VECTOR 0 /* = 1 more vector evaluations */
#define WARN 0 /* =1 warnings in AON-algorithmus */
#/* ---------- 1D options ------------*/
#define COLUMN 1 /* = 0 classical box model, = 2 column model with additional sedimentation */
#define INFLUX_TOP 2 /* influx across top boundary: = 0 no influx , = 1 influx with given SD, =2 periodic BC, outfalling SIPs re-enter domain */
#define PROCESS 0 /* sedimentation and collisional growth are both active (=0), switch off sedimentation (=2) or collisional growth (=1) */
#define RANDOMZ 0 /* random placement of SIPs inside column after each time step */
#define TRACKCENTER 0 /* tracks SIPs centers in each grid box */
#define TRACKOUT 0 /* tracks all SIPs that cross lower boundary */
#/* ----------Plot options -----------*/
#define MOM_meanTD 1 /* plot mean moments, average over all instances and full column, TD = TotalDomain */
#define GV_meanTD 0 /* plot mean size distribution, average over all instances and full column, TD = TotalDomain */
#define MOM_prof 0 /* plot vertical profiles of mean moments, average over instances */
#define RZ_scatter 0 /* (r,z)-scatter plot */
#define FLUX_time 0 /* plot fluxes over time */
#/* -------- Reference Solution -----*/
#define IREF 1
# /* = 0 no reference solution,
# = 1 analytical Golovin solution or Long/Hall reference solution provided by Wang,
# = 2 Alfonso Hall kernel reference solution
# = 3 Alfonso product kernel reference solution
# = 9 read Bott/Wang(?) simulation data, set path fp_ref */
"""
f = open('params_gcc.txt','w')
f.write(output_to_file_params_gcc)
f.close()
output_to_file_params = """
import math
#----------- parameter definitions--------------------------
# time step in seconds
dt = 10.
# simulated time in seconds
Tsim = 3600. #3600
#grid box volume
dV = 1. #in m^-3
dV_skal = 1 #increase volume by this factor and call init routine multiple times (increase by factor dV_skal)
dV = dV*dV_skal
dVi = 1./dV
#number of realisations
nr_inst = 20
nr_ins_start = 0 # each realisation uses a different seed parameter. If a simulation is divided in several subsimulations, then provide a starting index of the current realisations range
#storage interval of output
#points in time, at which SIP data is saved and size disitributions can be produced
t_start_GVplot = 0 # in s
t_intervall_GVplot = 200 # in s
#points in time, at which moment data is saved (usually a finer time grid is used here)
t_start_MOMsave = 0 # in s
t_intervall_MOMsave = 50 # in s
#units of time axis in Moment and Flux plots
itime_legend = 2 # = 1 in seconds, =2 in minutes
#GCCif (KERNEL == 0)
b_original=1500 # in (cm^3)*(g^(-1))*(s^(-1))
b_golovin=b_original*(1e-6)*(1e3)*dt*dVi # SI units (m^3*kg^(-1)*(s^(-1)) * s/m^3
#GCCendif /* (KERNEL == 0) */
#GCCif (KERNEL == 3)
C_original=5.49e10 # in (cm^3)*(g^(-2))*(s^(-1)) bzw auch (m^3*kg^(-2)*(s^(-1))
C_prod=C_original*dt *dVi # SI units kg^(-2)
#GCCendif /* (KERNEL == 3) */
#GCCif (INITV == 1)
#Density of water in kg/m**3
rho_w=1e3
# conversion constant mass to radius
const_mass2rad=1./((4./3.)*math.pi*rho_w)
#Properties of initial size distribution
#the SingleSIP-method as described in Unterstrasser et al, 2017 is used to generate a SIP ensemble
#physical parameters
#Mean Droplet radius in m
r0=9.3e-6 # 9.3e-6 #50.e-6 #9.3e-6
#mass concentration in kg/m^3
LWC=1e-3 #1e-3
#numerical parameters
#number of bins per mass decade (called kappa in the GMD-papers)
n10=5
#number of mass decades
r10=18
#starting mass of bin grid (min10=log10(mass in kg))
min10=-18
#determines smallest mass/radius of SIP at which SIPs are eventually created
# = 0: min10 is natural lower threshold, = 1: use 1.5625um, = 2: use 0.6um;
imlow=2
#determines smallest SIP weight relative to the maximum SIP weight of the SIP ensemble Maximalgewicht
eta_nu=1e-9
#upper bound for SIP number (determines the size of the SIP arrays)
nr_sip_max=15000
#normalisation constant for SIP droplet masses, usually relevant for the discrete case
skal_m= 1. # = 1 => no scaling of SIP droplet masses
#GCCendif /* (INITV == 1) */
#GCCif (INITV == 2)
#Density of water in kg/m**3
rho_w=1e3
# conversion constant mass to radius
const_mass2rad=1./((4./3.)*math.pi*rho_w)
iSIPinit_discrete = '1b'
if (iSIPinit_discrete == '1a'):
#Maximum number of SIPs
nr_sip_max=30
#normalisation constants for concentration and mass
skal_rad=17.0e-6 # represents radius of unit particle in m
skal_m=(skal_rad**3.0)/const_mass2rad # 1 represents an elemental mass, i.e mass of a 17um particle in SI units
nplot = 40
if (iSIPinit_discrete == '1b'):
#Maximum number of SIPs
nr_sip_max=20*dV_skal
#normalisation constants for concentration and mass
skal_rad=17.0e-6 # represents radius of unit particle in m
skal_m=(skal_rad**3.0)/const_mass2rad # 1 represents an elemental mass, i.e mass of a 17um particle in SI units
nplot = 40*dV_skal
if (iSIPinit_discrete == '2'):
#Maximum number of SIPs
nr_sip_max = 100
#normalisation constants for concentration and mass
skal_rad = 14.0e-6 # represents radius of unit particle | |
= Upgrade(args.ip, args.port)
tarver = ugObj.get_target_version()
if (tarver and 'target_version' in tarver):
viprver = tarver['target_version']
viprver = viprver[viprver.find("vipr-")+5:]
cliver = common.get_viprcli_version()
cliver = cliver[cliver.find("storageos-cli-")+14:]
if ( viprver != cliver):
print "ViPR appliance [ "+ args.ip + \
" ] is at version : "+ viprver + \
" , CLI installed is at version : "+ cliver + \
" . Some functionality might not be up to date" \
" [ Refer to Release Notes ] , We recommend " \
" upgrading to latest version [ Refer to CLI " \
" Reference for Installation ] "
except SOSError as e:
if ("HTTP code: 403" in e.err_text):
return
else:
raise e
except SOSError as e:
raise e
def authenticate_parser(parent_subparser, sos_ip, sos_port):
# main authentication parser
authenticate_parser = parent_subparser.add_parser(
'authenticate',
description='ViPR authenticate CLI usage',
conflict_handler='resolve',
help='Authenticate ViPR user')
authenticate_parser.add_argument(
'-cf', '-cookiefile',
metavar='<cookiefile>',
help='filename for storing cookie information',
dest='cookiefile')
authenticate_parser.add_argument(
'-hostname', '-hn',
metavar='<hostname>',
default=sos_ip,
dest='ip',
help='Hostname (fully qualifiled domain name) of ViPR')
authenticate_parser.add_argument(
'-port', '-po',
type=int,
metavar='<port_number>',
default=sos_port,
dest='port',
help='port number of ViPR')
mandatory_args = authenticate_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument(
'-u', '-username',
metavar='<username>',
help='username for login',
dest='username',
required=True)
mandatory_args.add_argument(
'-d', '-cookiedir',
metavar='<cookiedir>',
help='cookie directory to store cookie files',
dest='cookiedir',
required=True)
authenticate_parser.set_defaults(func=authenticate_user)
def logout_user(args):
obj = Authentication(args.ip, args.port)
try:
res = obj.logout_user()
except SOSError as e:
raise e
def logout_parser(parent_subparser, sos_ip, sos_port):
# main authentication parser
logout_parser = parent_subparser.add_parser(
'logout',
description='ViPR authentication CLI usage',
conflict_handler='resolve',
help='Logout ViPR user')
logout_parser.add_argument(
'-cf', '-cookiefile',
metavar='<cookiefile>',
help='filename for storing cookie information',
dest='cookiefile')
logout_parser.add_argument(
'-hostname', '-hn',
metavar='<hostname>',
default=sos_ip,
dest='ip',
help='Hostname (fully qualifiled domain name) of ViPR')
logout_parser.add_argument(
'-port', '-po',
type=int,
metavar='<port_number>',
default=sos_port,
dest='port',
help='port number of ViPR')
logout_parser.set_defaults(func=logout_user)
def add_auth_provider_parser(subcommand_parsers, common_parser):
# add command parser
add_auth_provider_parser = subcommand_parsers.add_parser(
'add-provider',
description='ViPR Authentication Provider Add CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Add a Authentication Provider')
mandatory_args = add_auth_provider_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument(
'-configfile',
metavar='<configfile>',
help='config file for authentication provider',
dest='configfile',
required=True)
add_auth_provider_parser.set_defaults(func=add_authentication_provider)
def show_auth_provider_parser(subcommand_parsers, common_parser):
# show command parser
show_auth_provider_parser = subcommand_parsers.add_parser(
'show-provider',
description='ViPR Authentication Provider Show CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Show an Authentication Provider')
mandatory_args = show_auth_provider_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument('-name',
metavar='<name>',
help='name of the authentication provider',
dest='name',
required=True)
show_auth_provider_parser.add_argument('-xml',
dest='xml',
action='store_true',
help='XML response')
show_auth_provider_parser.set_defaults(func=show_authentication_provider)
def update_auth_provider_parser(subcommand_parsers, common_parser):
# update command parser
update_auth_provider_parser = subcommand_parsers.add_parser(
'update',
description='ViPR Authentication Provider Update CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Update a Authentication Provider')
mandatory_args = update_auth_provider_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument('-configfile',
metavar='<configfile>',
help='config file for authentication provider',
dest='configfile',
required=True)
update_auth_provider_parser.set_defaults(
func=update_authentication_provider)
def delete_auth_provider_parser(subcommand_parsers, common_parser):
# delete command parser
delete_auth_provider_parser = subcommand_parsers.add_parser(
'delete-provider',
description='ViPR Authentication Provider delete CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Delete an Authentication Provider')
mandatory_args = delete_auth_provider_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument('-name',
metavar='<name>',
help='name of the authentication provider',
dest='name',
required=True)
delete_auth_provider_parser.set_defaults(
func=delete_authentication_provider)
def list_auth_provider_parser(subcommand_parsers, common_parser):
# update command parser
list_auth_provider_parser = subcommand_parsers.add_parser(
'list-providers',
description='ViPR Authentication Provider List CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='List Authentication Providers')
list_auth_provider_parser.add_argument(
'-verbose', '-v',
action='store_true',
help='List Authentication providers with details',
dest='verbose')
list_auth_provider_parser.add_argument(
'-long', '-l',
action='store_true',
help='List Authentication providers with more details',
dest='long')
list_auth_provider_parser.set_defaults(func=list_authentication_provider)
def add_vdc_role_parser(subcommand_parsers, common_parser):
# add command parser
add_vdc_role_parser = subcommand_parsers.add_parser(
'add-vdc-role',
description='ViPR Add vdc Role CLI usage.',
conflict_handler='resolve',
parents=[common_parser],
help='Add a vdc role to an user')
mandatory_args = add_vdc_role_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument('-role',
help='role to be added',
dest='role',
required=True,
choices=Authentication.ZONE_ROLES)
arggroup = add_vdc_role_parser.add_mutually_exclusive_group(required=True)
arggroup.add_argument('-subject-id', '-sb',
help='Subject ID',
dest='subjectid',
metavar='<subjectid>')
arggroup.add_argument('-group', '-g',
help='Group',
dest='group',
metavar='<group>')
add_vdc_role_parser.set_defaults(func=add_vdc_role)
def add_vdc_role(args):
obj = Authentication(args.ip, args.port)
try:
res = obj.add_vdc_role(args.role, args.subjectid, args.group)
except SOSError as e:
raise e
def list_vdc_role_parser(subcommand_parsers, common_parser):
# add command parser
list_vdc_role_parser = subcommand_parsers.add_parser(
'list-vdc-role',
description='ViPR List vdc Roles CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='List Vdc Roles')
list_vdc_role_parser.set_defaults(func=list_vdc_role)
def list_vdc_role(args):
obj = Authentication(args.ip, args.port)
try:
res = obj.list_vdc_role()
return common.format_json_object(res)
except SOSError as e:
raise e
def delete_role_parser(subcommand_parsers, common_parser):
# register command parser
delete_role_parser = subcommand_parsers.add_parser(
'delete-role',
description='ViPR delete Vdc role CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Delete a vdc role of an user')
mandatory_args = delete_role_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument('-role',
metavar='<role>',
help='role to be deleted',
dest='role',
required=True,
choices=Authentication.ZONE_ROLES)
arggroup = delete_role_parser.add_mutually_exclusive_group(required=True)
arggroup.add_argument('-subject-id', '-sb',
help='Subject ID',
dest='subjectid',
metavar='<subjectid>')
arggroup.add_argument('-group', '-g',
help='Group',
dest='group',
metavar='<group>')
delete_role_parser.set_defaults(func=delete_vdc_role)
def delete_vdc_role(args):
obj = Authentication(args.ip, args.port)
try:
res = obj.delete_vdc_role(args.role, args.subjectid, args.group)
except SOSError as e:
raise e
def add_user_group_parser(subcommand_parsers, common_parser):
# add command parser
add_user_group_parser = subcommand_parsers.add_parser(
'add-user-group',
description='ViPR User Group Add CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Add an User Group')
mandatory_args = add_user_group_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument(
'-name',
metavar='<name>',
help='name of the user group to be created',
dest='name',
required=True)
mandatory_args.add_argument(
'-domain',
metavar='<domain>',
help='domain to which this user group to be mapped',
dest='domain',
required=True)
mandatory_args.add_argument(
'-key',
metavar='<key>',
help='attribute key',
dest='key',
required=True)
mandatory_args.add_argument(
'-values',
metavar='<values>',
help='attribute values',
dest='values',
required=True)
add_user_group_parser.set_defaults(func=add_user_group)
def add_user_group(args):
obj = Authentication(args.ip, args.port)
try:
name = args.name
domain = args.domain
key = args.key
values = args.values
if((name is "") or (domain is "") or (key is "") or (values is "")):
raise SOSError(SOSError.VALUE_ERR, "name, " +
"domain, key, values," +
" can not be empty")
res = obj.add_user_group(name, domain, key, values)
except IOError as e:
common.format_err_msg_and_raise("add", "user group",
e[1], e.errno)
except SOSError as e:
common.format_err_msg_and_raise("add", "user group",
e.err_text, e.err_code)
except ConfigParser.NoOptionError as e:
common.format_err_msg_and_raise("add", "user group",
str(e), SOSError.NOT_FOUND_ERR)
except (ConfigParser.ParsingError, ConfigParser.Error) as e:
common.format_err_msg_and_raise("add", "user group",
str(e), SOSError.VALUE_ERR)
def user_group_add_attribute_parser(subcommand_parsers, common_parser):
# add command parser
user_group_add_attribute_parser = subcommand_parsers.add_parser(
'user-group-add-attribute',
description='ViPR User Group Add Attribute CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Add an attribute to an User Group')
mandatory_args = user_group_add_attribute_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument(
'-name',
metavar='<name>',
help='name of the user group to which the attribute will be added',
dest='name',
required=True)
mandatory_args.add_argument(
'-key',
metavar='<key>',
help='attribute key',
dest='key',
required=True)
mandatory_args.add_argument(
'-values',
metavar='<values>',
help='attribute values',
dest='values',
required=True)
user_group_add_attribute_parser.set_defaults(func=user_group_add_attribute)
def user_group_add_attribute(args):
obj = Authentication(args.ip, args.port)
try:
name = args.name
key = args.key
values = args.values
if((name is "") or (key is "") or (values is "")):
raise SOSError(SOSError.VALUE_ERR, "name, " +
"key, values," +
" can not be empty")
res = obj.user_group_add_attribute(name, key, values)
except IOError as e:
common.format_err_msg_and_raise("add attribute to", "user group",
e[1], e.errno)
except SOSError as e:
common.format_err_msg_and_raise("add attribute to", "user group",
e.err_text, e.err_code)
except ConfigParser.NoOptionError as e:
common.format_err_msg_and_raise("add attribute to", "user group",
str(e), SOSError.NOT_FOUND_ERR)
except (ConfigParser.ParsingError, ConfigParser.Error) as e:
common.format_err_msg_and_raise("add attribute to", "user group",
str(e), SOSError.VALUE_ERR)
def user_group_add_values_parser(subcommand_parsers, common_parser):
# add command parser
user_group_add_values_parser = subcommand_parsers.add_parser(
'user-group-add-values',
description='ViPR User Group Add Values to the existing Attribute CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Add values to an attribute of the User Group')
mandatory_args = user_group_add_values_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument(
'-name',
metavar='<name>',
help='name of the user group to which the attribute will be modified',
dest='name',
required=True)
mandatory_args.add_argument(
'-key',
metavar='<key>',
help='name of the attribute key to which the values will be added',
dest='key',
required=True)
mandatory_args.add_argument(
'-values',
metavar='<values>',
help='attribute values to add',
dest='values',
required=True)
user_group_add_values_parser.set_defaults(func=user_group_add_values)
def user_group_add_values(args):
obj = Authentication(args.ip, args.port)
try:
name = args.name
key = args.key
values = args.values
if((name is "") or (key is "") or (values is "")):
raise SOSError(SOSError.VALUE_ERR, "name, " +
"key, values," +
" can not be empty")
res = obj.user_group_add_values(name, key, values)
except IOError as e:
common.format_err_msg_and_raise("add values to", "user group attribute",
e[1], e.errno)
except SOSError as e:
common.format_err_msg_and_raise("add values to", "user group attribute",
e.err_text, e.err_code)
except ConfigParser.NoOptionError as e:
common.format_err_msg_and_raise("add values to", "user group attribute",
str(e), SOSError.NOT_FOUND_ERR)
except (ConfigParser.ParsingError, ConfigParser.Error) as e:
common.format_err_msg_and_raise("add values to", "user group attribute",
str(e), SOSError.VALUE_ERR)
def user_group_remove_attribute_parser(subcommand_parsers, common_parser):
# add command parser
user_group_remove_attribute_parser = subcommand_parsers.add_parser(
'user-group-remove-attribute',
description='ViPR User Group Remove Attribute CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Remove an attribute from User Group')
mandatory_args = user_group_remove_attribute_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument(
'-name',
metavar='<name>',
help='name of the user group from which the attribute will be removed',
dest='name',
required=True)
mandatory_args.add_argument(
'-keys',
metavar='<keys>',
help='attribute keys to remove',
dest='keys',
required=True)
user_group_remove_attribute_parser.set_defaults(func=user_group_remove_attribute)
def user_group_remove_attribute(args):
obj = Authentication(args.ip, args.port)
try:
name = args.name
keys = args.keys
if((name is "") or (keys is "")):
raise SOSError(SOSError.VALUE_ERR, "name, " +
"keys " +
" can not be empty")
res = obj.user_group_remove_attribute(name, keys)
except IOError as e:
common.format_err_msg_and_raise("remove attribute from", "user group",
e[1], e.errno)
except SOSError as e:
common.format_err_msg_and_raise("remove attribute from", "user group",
e.err_text, e.err_code)
except ConfigParser.NoOptionError as e:
common.format_err_msg_and_raise("remove attribute from", "user group",
str(e), SOSError.NOT_FOUND_ERR)
except (ConfigParser.ParsingError, ConfigParser.Error) as e:
common.format_err_msg_and_raise("remove attribute from", "user group",
str(e), SOSError.VALUE_ERR)
def user_group_remove_values_parser(subcommand_parsers, common_parser):
# add command parser
| |
<gh_stars>1-10
#!/usr/bin/env python3
#============================================================
# IMSNG Pipeline
# => Processing
# Data Monitoring => Processing => Transient Search
#============================================================
#%%
# Library
#------------------------------------------------------------
import os
import glob
import numpy as np
import warnings
warnings.filterwarnings(action='ignore')
import time
st = time.time()
start_localtime = time.strftime('%Y-%m-%d %H:%M:%S (%Z)', time.localtime())
import sys
sys.path.append('/home/paek/imsngpy')
# IMSNGpy modules
from tableutil import getccdinfo
from preprocess import *
from misc import *
from phot import *
from util import *
#
# Astropy
from astropy.io import ascii
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
from ccdproc import ImageFileCollection
from astropy.time import Time
from astropy.nddata import CCDData
# Multiprocess tools
from itertools import repeat
import multiprocessing
#------------------------------------------------------------
# My library
# from tableutil import *
#============================================================
# USER SETTING
#============================================================
# Input
#------------------------------------------------------------
"""
# [0] Folder to process
try:
path_raw = sys.argv[1]
except:
path_raw = input('''# Data folder to process : ''')
# [1] Observatory_ccd
try:
obs = (sys.argv[2]).upper()
except:
obs = input('''# Observatory(_ccd) to run
--------------------
LOAO
DOAO
SOAO
CBNUO
KCT_ASI1600MM
KCT_STX16803
KHAO
RASA36
LSGT
---------------------
:''').upper()
print('# Observatory : {}'.format(obs.upper()))
# [3] The number of cores
try:
ncore = int(sys.argv[3])
except:
ncore = 8
"""
# Test setting
# path_raw = '/data6/obsdata/LOAO/1994_1026'
# path_raw = '/data6/obsdata/LOAO/1994_1003'
# path_raw = '/data6/obsdata/LOAO/1969_0119'
# path_raw = '/data6/obsdata/LOAO/test'
# path_raw = '/data6/obsdata/LOAO/test_fast'
path_raw = '/data6/obsdata/LOAO/2021_1227'
obs = 'LOAO'
fast_mode4mframe = True
ncore = 4
#------------------------------------------------------------
# PATH
#------------------------------------------------------------
path_factory = '/data3/paek/factory'
path_gal = '/data6/IMSNG/IMSNGgalaxies'
# path_config = '/home/paek/config'
path_config = '/home/paek/imsngpy/config'
path_log = '/home/paek/log'
path_bkg = '/data6/bkgdata'
path_table = '/home/paek/imsngpy/table'
path_gppy = '/home/paek/imsngpy'
#------------------------------------------------------------
path_mframe = f'{path_factory}/master_frames'
path_ref = f'{path_factory}/ref_frames/{obs.upper()}'
path_obs = f'{path_factory}/{obs.lower()}'
path_default_gphot = f'{path_config}/gphot.{obs.lower()}.config'
#------------------------------------------------------------
path_save = f'{path_bkg}/{obs.upper()}'
#------------------------------------------------------------
ccddat = f'{path_table}/obs.dat'
#------------------------------------------------------------
# Codes
path_phot = f'{path_gppy}/imsngpy/gpphot.py'
path_find = f'{path_gppy}/imsngpy/gpsearch.py'
#------------------------------------------------------------
# Table
logtbl = ascii.read(f'{path_log}/{obs.lower()}.log')
hdrtbl = ascii.read(f'{path_table}/changehdr.dat')
alltbl = ascii.read(f'{path_table}/alltarget.dat')
frgtbl = ascii.read(f'{path_table}/fringe.dat')
# ccdtbl = ascii.read(f'{path_table}/ccd.dat')
ccdtbl = ascii.read(f'{path_table}/ccd.tsv')
#------------------------------------------------------------
path_data = f'{path_obs}/{os.path.basename(path_raw)}'
print(f"""{'-'*60}\n#\tCOPY DATA\n{'-'*60}""")
# Remove former data
if os.path.exists(path_data):
rmcom = f'rm -rf {path_data}'
print(rmcom)
os.system(rmcom)
# Copy to factory director
cpcom = f'cp -r {path_raw} {path_data}'
print(cpcom)
os.system(cpcom)
#%%
# Identify CCD
ic0 = ImageFileCollection(path_data, keywords='*')
'''
print(f"""{'-'*60}\n#\tIDENTIFY CCD\n{'-'*60}""")
ic0 = ImageFileCollection(path_data, keywords='*')
for key, val, suf, ccd in zip((ccdtbl['key'][ccdtbl['obs']==obs]), (ccdtbl['value'][ccdtbl['obs']==obs]), (ccdtbl['suffix'][ccdtbl['obs']==obs]), (ccdtbl['ccd'][ccdtbl['obs']==obs])):
if (key.lower() in ic0.keywords) & (val == ic0.summary[key.lower()][0]):
ccdkey = key
ccdval = val
ccdtype = ccd
if suf.mask == True:
# No suffix
suffix = ''
obsccd = f'{obs}'
else:
suffix = suf
obsccd = f'{obs}_{suffix}'
print(f'OBSERVAT : {obs}\nCCD KEYWORD : {key}\nCCD HEADER VALUE : {val}\nCCD NAME : {ccdtype}\nSUFFIX : {suffix}\n==> OBS_CCD : {obsccd}')
'''
ccdkey, ccdval, ccdtype, obsccd = identify_ccdinfo(ic0, obs, ccdtbl)
# CCD INFO
indx_ccd = np.where(
(ccdtbl['obs']==obs) &
(ccdtbl['key']==ccdkey) &
(ccdtbl['value']==ccdval)
)
print(f"""{'-'*60}\n#\tCCD INFO\n{'-'*60}""")
gain = ccdtbl['gain'][indx_ccd][0]*(u.electron/u.adu)
rdnoise = ccdtbl['readnoise'][indx_ccd][0]*(u.electron)
pixscale = ccdtbl['pixelscale'][indx_ccd][0]*(u.arcsec/u.pixel)
fov = ccdtbl['foveff'][indx_ccd][0]*(u.arcmin)
print(f"""GAIN : {gain}\nREAD NOISE : {rdnoise}\nPIXEL SCALE : {pixscale}\nEffective FoV : {fov}""")
#------------------------------------------------------------
#%%
# Header correction
#------------------------------------------------------------
comment = f"""{'='*60}\n#\tHEADER CORRECTION\n{'='*60}"""
print(comment)
for i, inim in enumerate(ic0.summary['file']):
image = f'{path_data}/{inim}'
# CCD Type
'''
if i == 0:
for key in set(ccdtbl['key']):
if key.lower() in ic0.summary.keys():
ccdtype = ccdtbl[
(ccdtbl['value'] == ic0.summary[key.lower()][i]) &
(ccdtbl['obs'] == obs)
]['ccd'].item()
else:
ccdtype = 'UNKNOWN'
fits.setval(image, 'CCDNAME', value=ccdtype)
fits.setval(image, 'OBSERVAT', value=obs)'''
fits.setval(image, 'PATHPRC', value=path_data, comment='Path where data is processed')
fits.setval(image, 'CCDNAME', value=ccdtype)
fits.setval(image, 'OBSERVAT', value=obs)
fits.setval(image, 'OBSCCD', value=obsccd)
# Correction with table
for key, val, nval in zip(hdrtbl['key'], hdrtbl['val'], hdrtbl['newval']):
if ic0.summary[key.lower()][i] == val:
print(f'{inim} - {key} : {val} --> {nval}')
fits.setval(image, key, value=nval)
# DATE-OBS, JD, MJD
if 'T' not in ic0.summary['date-obs'][i]:
dateobs = f"{ic0.summary['date-obs']}'T'{ic0.summary['time-obs']}"
fits.setval(image, 'date-obs', value=dateobs)
del dateobs
else:
pass
t = Time(ic0.summary['date-obs'][i], format='isot')
fits.setval(image, 'JD', value=t.jd)
fits.setval(image, 'MJD', value=t.mjd)
del t
# OBJECT name
if 'ngc' in ic0.summary['object'][i]:
objectname = ic0.summary['object'][i]
while len(objectname)<7:
head = objectname[0:3]
tail = objectname[3:]
tail = f'0{tail}'
objectname = f'{head}{tail}'
fits.setval(image, 'OBJECT', value=objectname.upper())
del objectname
del head
del tail
print()
ic1 = ImageFileCollection(path_data, keywords='*')
t_med = np.median(ic1.filter(imagetyp='object').summary['jd']) # [JD]
#------------------------------------------------------------
#%%
# Object Master Table
#
# Write the status of processing
# Pointing the original filename and updated filename
# Each dtype is set to 'strtype' variable
#------------------------------------------------------------
strtype = 'U300'
omtbl = Table()
objtypelist = []
for obj in ic1.filter(imagetyp='object').summary['object']:
if obj in alltbl['obj']:
objtypelist.append('IMSNG')
elif 'GRB' in obj:
objtypelist.append('GRB')
elif ('GECKO' in obj) | ('GCK' in obj):
objtypelist.append('GECKO')
else:
objtypelist.append('NON-IMSNG')
omtbl['objtype'] = objtypelist
omtbl['raw'] = [inim for inim in ic1.filter(imagetyp='object').files]
omtbl['now'] = omtbl['raw']
omtbl['preprocess'] = ''
omtbl['defringe'] = ''
omtbl['cosmic_ray_removal'] = ''
omtbl['astrometry'] = ''
omtbl['final'] = [f'{path_data}/{fnamechange(inim)}' for inim in ic1.filter(imagetyp='object').files]
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
#============================================================
#%%
# Pre-processing
#------------------------------------------------------------
print(f"""{'='*60}\n#\tPREPARE FOR PRE-PROCESS\n{'='*60}""")
mframe = dict()
#------------------------------------------------------------
# BIAS
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tBIAS\n{'-'*60}""")
if 'bias' in ic1.summary['imagetyp']:
biaslist = ic1.filter(imagetyp='bias').files
print(f"""{len(biaslist)} bias frames --> master bias""")
biasdata, biasim = master_bias(biaslist)
mframe['bias'] = biasdata
del biasdata
path_bias = f'{path_mframe}/{obs}/zero'
cpcom = f"cp {biasim} {path_bias}"
print(cpcom)
# os.system(cpcom)
else:
print('No bias frame. Borrow from previous data.')
mftype = 'zero'
biasim = get_nearest_bias(
mftype=mftype,
t_med=t_med,
ccdkey=ccdkey,
ccdval=ccdval,
keyword=f'{path_mframe}/{obs}/{mftype}/????????-{mftype}.fits',
keywords=[ccdkey.lower(), 'jd'],
fast_mode4mframe=True,
)
print(f'Borrow {os.path.basename(biasim)}')
mframe['bias'] = CCDData(fits.getdata(biasim), unit="adu", meta=fits.getheader(biasim))
'''
mftype = 'zero'
if fast_mode4mframe == True:
biaslist = np.array(
[os.path.basename(inim) for inim in sorted(glob.glob(f'{path_mframe}/{obs}/{mftype}/????????-{mftype}.fits'))]
)
deltarr = np.array(
[np.abs(date2jd(inim.split('-')[0]).jd-t_med) for inim in biaslist]
)
indx_bias = np.where(deltarr == np.min(deltarr))
biasim = f"{path_mframe}/{obs}/{mftype}/{biaslist[indx_bias].item()}"
else:
ic_bias = ImageFileCollection(
location=f'{path_mframe}/{obs}/{mftype}',
keywords=[
ccdkey.lower(),
# 'date-obs',
# 'imagetyp',
'jd',
# 'mjd',
]
)
ic_bias_avail = ic_bias.summary[
(ic_bias.summary['jd'].mask == False) &
(ic_bias.summary[ccdkey.lower()]==ccdval)
]
biaslist = ic_bias_avail['file']
deltarr = np.array(np.abs(ic_bias_avail['jd']-t_med))
indx_bias = np.where(deltarr == np.min(deltarr))
biasim = f"{path_mframe}/{obs}/{mftype}/{biaslist[indx_bias].item()}"
del ic_bias_avail
# Delete variables
del mftype
del deltarr
del indx_bias
del biasim'''
#------------------------------------------------------------
#%%
# DARK
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tDARK\n{'-'*60}""")
darkframe = dict()
if 'dark' in ic1.summary['imagetyp']:
darkexptime = np.array(list(set(ic1.filter(imagetyp='dark').summary['exptime'])))
for exptime in darkexptime:
darkdata, darkim = master_dark(ic1.filter(imagetyp='dark', exptime=exptime).files, mbias=mframe['bias'])
# darkframe[f'{int(exptime)}'] = master_dark(ic1.filter(imagetyp='dark', exptime=exptime).files, mbias=mframe['bias'])
darkframe[f'{int(exptime)}'] = darkdata
del darkdata
path_dark = f'{path_mframe}/{obs}/dark'
cpcom = f"cp {darkim} {path_dark}"
print(cpcom)
# os.system(cpcom)
else:
print('No dark frame. Borrow from previous data.')
mftype = 'dark'
darkexptime = []
for exptime in set(ic1.filter(imagetyp='object').summary['exptime']):
mftype = 'dark'
darkim, nexptime = get_nearest_dark(
t_med=t_med,
keyword=f'{path_mframe}/{obs}/{mftype}/*-????????-{mftype}.fits',
exptime=exptime,
ccdkey=ccdkey,
ccdval=ccdval,
keywords=[ccdkey.lower(), 'jd', 'exptime'],
fast_mode4mframe=True,
)
print(f'Borrow {os.path.basename(darkim)}')
# darkframe[f'{int(nexptime)}'] = CCDData(fits.getdata(darkim), unit="adu", meta=fits.getheader(darkim))
if str(nexptime) not in darkframe.keys():
darkframe[f'{int(nexptime)}'] = CCDData(fits.getdata(darkim), unit="adu", meta=fits.getheader(darkim))
darkexptime.append(int(nexptime))
else:
print(f'No available dark frame')
pass
darkexptime = np.array(darkexptime)
mframe['dark'] = darkframe
del darkframe
#------------------------------------------------------------
#%%
# FLAT
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tFLAT\n{'-'*60}""")
flatframe = dict()
# Filter list
if 'object' in ic1.summary['imagetyp']:
objfilterlist = list(ic1.filter(imagetyp='object').summary['filter'])
print(f'OBJECT FILTERS : {list(set(objfilterlist))}')
else:
objfilterlist = []
if 'flat' in ic1.summary['imagetyp']:
flatfilterlist = list(ic1.filter(imagetyp='flat').summary['filter'])
print(f'FLAT FILTERS : {list(set(flatfilterlist))}')
else:
flatfilterlist = []
filterlist = set(objfilterlist+flatfilterlist)
print(f'==> ALL FILTERS : {list(set(filterlist))}')
if 'flat' in ic1.summary['imagetyp']:
# Dark exptime should be corrected!
indx_mindark = np.where(darkexptime == np.min(darkexptime))
mdark = mframe['dark'][str(int(darkexptime[indx_mindark].item()))]
for filte in filterlist:
# print(filte)
flatdata, flatim = master_flat(ic1.filter(imagetyp='flat', filter=filte).files, mbias=mframe['bias'], mdark=mdark, filte=filte)
# flatframe[filte] = master_flat(ic1.filter(imagetyp='flat', filter=filte).files, mbias=mframe['bias'], mdark=mdark, filte=filte)
flatframe[filte] = flatdata
path_flat = f'{path_mframe}/{obs}/flat'
cpcom = f"cp {flatim} {path_flat}"
print(cpcom)
# os.system(cpcom)
del flatdata
del mdark
else:
print('No Flat frame. Borrow from previous data.')
mftype = 'flat'
for filte in set(ic1.filter(imagetyp='object').summary['filter']):
flatim = get_nearest_flat(
t_med,
keyword=f'{path_mframe}/{obs}/{mftype}/????????-n{filte}.fits',
filte=filte,
ccdkey=ccdkey,
ccdval=ccdval,
keywords=[ccdkey.lower(), 'imagetyp', 'jd', 'filter',],
fast_mode4mframe=True,
)
print(f'Borrow {os.path.basename(flatim)}')
flatframe[filte] = CCDData(fits.getdata(flatim), unit="adu", meta=fits.getheader(flatim))
mframe['flat'] = flatframe
del flatframe
#------------------------------------------------------------
#%%
# OBJECT Correction
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tOBJECT CORRECTION ({len(ic1.filter(imagetyp='object').files)})\n{'-'*60}""")
# Function for multi-process
def obj_process4mp(inim, newim, filte, exptime, darkexptime, rdnoise, mframe,):
'''
Routine for multiprocess
'''
# Find the closest exposuretime betw dark and object
indx_closet = np.where(
np.abs(exptime-darkexptime) == np.min(np.abs(exptime-darkexptime))
)
bestdarkexptime = darkexptime[indx_closet].item()
print(f"{os.path.basename(inim)} {exptime} sec in {filte}-band <-- (scaled) DARK {int(darkexptime[indx_closet].item())} sec")
# Process
nccd = obj_process(
inim=inim,
# gain=ccdinfo['gain'],
gain=None,
readnoise=rdnoise,
mbias=mframe['bias'],
mdark=mframe['dark'][str(int(bestdarkexptime))],
mflat=mframe['flat'][filte],
)
# nccd.write(f'{os.path.dirname(inim)}/fdz{os.path.basename(inim)}', overwrite=True)
nccd.write(newim, overwrite=True)
# Run with multi-process
fdzimlist = add_prefix(ic1.filter(imagetyp='object').files, 'fdz')
if __name__ == '__main__':
# Fixed the number of cores (=4)
with multiprocessing.Pool(processes=4) as pool:
results = pool.starmap(
obj_process4mp,
zip(
ic1.filter(imagetyp='object').files,
fdzimlist,
ic1.filter(imagetyp='object').summary['filter'],
ic1.filter(imagetyp='object').summary['exptime'],
repeat(darkexptime),
repeat(rdnoise),
repeat(mframe),
)
)
# Image collection for pre-processed image
fdzic = ImageFileCollection(f'{path_data}', glob_include='fdzobj*', keywords='*')
omtbl['preprocess'] = fdzimlist
omtbl['now'] = fdzimlist
del fdzimlist
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
#------------------------------------------------------------
#%%
# Defringe
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tFRINGE CORRECTION\n{'-'*60}""")
# for filte in set(frgtbl[(frgtbl['ccd'] == ccdtype) & (frgtbl['obs'] == obs)]['filter']):
for filte in set(fdzic.summary['filter']):
frgtbl_ = frgtbl[
(frgtbl['filter']==filte) &
(frgtbl['ccd'] == ccdtype) &
(frgtbl['obs'] == obs)
]
if len(frgtbl_) > 0:
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
defringe,
zip(
fdzic.filter(filter=filte).files,
repeat(frgtbl_['image'][0]),
add_prefix(fdzic.filter(filter=filte).files, 'df'),
repeat(frgtbl_['table'][0]),
repeat(10)
)
)
for i, inim in enumerate(fdzic.filter(filter=filte).files):
indx_tmp = np.where(omtbl['now'] == inim)
omtbl['now'][indx_tmp] = results[i]
omtbl['defringe'][indx_tmp] = results[i]
del indx_tmp
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
else:
print(f'{filte} : N/A')
#------------------------------------------------------------
#%%
# FIX (TMP)
#------------------------------------------------------------
#------------------------------------------------------------
# COSMIC-RAY REMOVAL
#------------------------------------------------------------
# Seeing measurement w/ simple SE
prefix = 'simple'
path_conf = f'{path_config}/{prefix}.sex'
path_param = f'{path_config}/{prefix}.param'
path_nnw = f'{path_config}/{prefix}.nnw'
path_conv = f'{path_config}/{prefix}.conv'
# Single
'''
inim = omtbl['now'][0]
seeing, peeing = get_seeing(
inim,
gain,
pixscale,
fov,
path_conf,
path_param,
path_conv,
path_nnw,
seeing_assume=3*u.arcsec,
frac=0.68,
n_min_src=5
)
'''
if __name__ == '__main__':
# Seeing measurement
print(f"""{'-'*60}\n#\tSEEING MEASUREMENT\n{'-'*60}""")
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
get_seeing,
zip(
omtbl['now'],
repeat(gain),
repeat(pixscale),
repeat(fov),
repeat(path_conf),
repeat(path_param),
repeat(path_conv),
repeat(path_nnw),
repeat(3*u.arcsec),
repeat(0.68),
repeat(5),
)
)
print('DONE')
# Cosmic-ray removal
print(f"""{'-'*60}\n#\tCOSMIC-RAY REMOVAL\n{'-'*60}""")
cleantype = 'medmask'
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
cosmic_ray_removal,
zip(
omtbl['now'],
add_prefix(omtbl['now'], 'cr'),
add_suffix(omtbl['final'], 'mask'),
repeat(gain),
repeat(rdnoise),
[r[0] for r in results],
repeat(cleantype)
)
)
for i, inim in enumerate(omtbl['now']):
tmpim = add_prefix(omtbl['now'], 'cr')[i]
indx_tmp = np.where(omtbl['now'] == inim)
omtbl['now'][indx_tmp] = tmpim
omtbl['cosmic_ray_removal'][indx_tmp] = tmpim
del tmpim
del indx_tmp
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
#------------------------------------------------------------
#%%
# ASTROMETRY
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tASTROMETRY\n{'-'*60}""")
# Classification IMSNG and non-IMSNG target
frac = 0.10 # Pixel scale inverval ratio
tralist, tdeclist = [], []
for i, inim in enumerate(omtbl['now']):
if fits.getheader(inim)['OBJECT'] in alltbl['obj']:
indx_obj = np.where(fits.getheader(inim)['OBJECT']==alltbl['obj'])
# tra, tdec = alltbl['ra'][indx_obj].item(), alltbl['dec'][indx_obj].item()
tra, tdec = alltbl['ra'][indx_obj][0], alltbl['dec'][indx_obj][0]
else:
tra, tdec = None, None
tralist.append(tra)
tdeclist.append(tdec)
# Multi-processing
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncore) as pool:
results = pool.starmap(
astrometry,
zip(
omtbl['now'],
add_prefix(omtbl['now'], 'a'),
repeat(pixscale),
repeat(frac),
tralist,
tdeclist,
repeat(fov),
repeat(30)
)
)
# Check astrometry results
print(f"""{'-'*60}\n#\tCHECK ASTROMETRY RESULTS\n{'-'*60}""")
c_all = SkyCoord(alltbl['ra'], alltbl['dec'], unit=(u.hourangle, u.deg))
for i, inim in enumerate(add_prefix(omtbl['now'], 'a')):
hdr = fits.getheader(inim)
if os.path.exists(inim):
print(f'[{i}] {os.path.basename(inim)} : Astrometry Success')
c = SkyCoord(hdr['CRVAL1'], hdr['CRVAL2'], unit=u.deg)
indx_tmp, sep, _ = c.match_to_catalog_sky(c_all)
ra_offset, dec_offset = c.spherical_offsets_to(c_all)
# Put pointing offset info.
fits.setval(inim, keyword='CNTSEP', value=round(sep[0].arcmin, 3), comment='Offset between pointing and galaxy position [arcmin]')
fits.setval(inim, keyword='CNTRAOFF', value=round(ra_offset.arcmin[indx_tmp], 3), comment='RA offset between pointing and galaxy position [arcmin]')
fits.setval(inim, keyword='CNTDEOFF', value=round(dec_offset.arcmin[indx_tmp], 3), comment='Dec offset between pointing and galaxy position [arcmin]')
print('\tCalculate accuracy')
astrometry_analysis(
inim=omtbl['now'][i],
incor=f"{os.path.splitext(omtbl['now'][i])[0]}.corr",
# outpng=f'{os.path.splitext(inim)[0]}.astrm.png',
# outdat=f'{os.path.splitext(inim)[0]}.astrm.dat'
outpng=f"{os.path.splitext(omtbl['final'][i])[0]}.astrm.png",
outdat=f"{os.path.splitext(omtbl['final'][i])[0]}.astrm.dat"
)
# Update
# omtbl['now'][i] = inim
else:
print(f'{i} {os.path.basename(inim)} : Astrometry Fail')
# Suspect of wrong object name
if omtbl['objtype'][i] == 'IMSNG':
print('\tThis is IMSNG target. Start re-astronomy.')
# Retry astrometry
astrometry(
inim=omtbl['now'][i],
outim=add_prefix(omtbl['now'], 'a')[i],
pixscale=pixscale,
frac=frac,
cpulimit=60
)
if os.path.exists(inim):
print('\tRe-astrometry SUCCESS!')
c = SkyCoord(hdr['CRVAL1'], hdr['CRVAL2'], unit=u.deg)
indx_tmp, sep, _ = c.match_to_catalog_sky(c_all)
if sep[0] < fov:
newobj = alltbl['obj'][indx_tmp]
print(f"\tCorrect OBJECT HEADER, {hdr['OBJECT']} --> {newobj} position.")
fits.setval(inim, keyword='OBJECT', value=newobj)
# Put pointing offset info.
fits.setval(inim, keyword='CENTSEP', value=round(sep[0].arcmin, 3), comment='Offset between pointing and galaxy position')
ra_offset, dec_offset = c.spherical_offsets_to(c_all)
fits.setval(inim, keyword='CNTSEP', value=round(sep[0].arcmin, 3), comment='Offset between pointing and galaxy position [arcmin]')
fits.setval(inim, keyword='CNTRAOFF', value=round(ra_offset.arcmin, 3), comment='RA offset between pointing and galaxy position [arcmin]')
fits.setval(inim, keyword='CNTDEOFF', value=round(dec_offset.arcmin, 3), comment='Dec offset between pointing and galaxy position [arcmin]')
astrometry_analysis(
inim=omtbl['now'][i],
incor=f"{os.path.splitext(omtbl['now'][i])[0]}.corr",
outpng=f"{os.path.splitext(omtbl['final'][i])[0]}.astrm.png",
outdat=f"{os.path.splitext(omtbl['final'][i])[0]}.astrm.dat"
)
# omtbl['now'][i] = inim
pass
else:
print('\tRe-astrometry Fail...')
pass
else:
print('\tNo other actions')
del hdr
#
for i, inim in enumerate(omtbl['now']):
tmpim = add_prefix(omtbl['now'], 'a')[i]
if os.path.exists(tmpim):
indx_tmp = np.where(omtbl['now'] == inim)
omtbl['now'][indx_tmp] = tmpim
omtbl['astrometry'][indx_tmp] = tmpim
del indx_tmp
del tmpim
for key in omtbl.keys(): omtbl[key] = omtbl[key].astype(strtype)
#------------------------------------------------------------
#%%
# File name change
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tFILENAME CHANGE to IMSNG FORMAT\n{'-'*60}""")
for i, inim in enumerate(omtbl['now']):
newim = f"{omtbl['final'][i]}"
com = f'cp {inim} {newim}'
os.system(com)
print(f'[{i}] {os.path.basename(inim)} --> {os.path.basename(newim)}')
fits.setval(newim, keyword='IMNAME', value=os.path.basename(newim), comment='Formatted file name by gpPy process')
ic_cal = ImageFileCollection(path_data, glob_include='Calib-*.f*', glob_exclude='*mask*')
#------------------------------------------------------------
#%%
# Photometry
#------------------------------------------------------------
print(f"""{'='*60}\n#\tPHOTOMETRY FOR SINGLE IMAGEs\n{'-'*60}""")
photcom = f"python {path_phot} '{path_data}/Calib-*0.fits' {ncore}"
print(photcom)
os.system(photcom)
#------------------------------------------------------------
#%%
# IMAGE COMBINE
#------------------------------------------------------------
print(f"""{'='*60}\n#\tPHOTOMETRY FOR COMBINED IMAGEs\n{'-'*60}""")
def group_images(objtbl, tfrac):
delt = np.array(objtbl['jd'] - np.min(objtbl['jd']))*(24*60*60) # [days] --> [sec]
tsub = delt - (np.cumsum(objtbl['exptime']*tfrac) - objtbl['exptime'][0])
indx = np.where(tsub < 0)
indx_inv = np.where(tsub > 0)
return indx, indx_inv
#------------------------------------------------------------
print(f"""{'-'*60}\n#\tGROUP IMAGES\n{'-'*60}""")
tfrac = 1.5 # Time fraction for grouping
comimlist = []
for obj in set(ic_cal.summary['object']):
for filte | |
<reponame>cvtower/Sparse-Winograd-CNN
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: imagenet-resnet.py
import cv2
import sys
import argparse
import numpy as np
import os
import multiprocessing
import glob
import pickle
import tensorflow as tf
from tensorflow.contrib.layers import variance_scaling_initializer
from tensorpack import *
from tensorpack.utils.stats import RatioCounter
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.utils.gpu import get_nr_gpu
TOTAL_BATCH_SIZE = 64
INPUT_SHAPE = 224
DEPTH = None
test = False
mask_dict = None
use_mask = False
class Model(ModelDesc):
def __init__(self, data_format='NHWC'):
if data_format == 'NCHW':
assert tf.test.is_gpu_available()
self.data_format = data_format
def _get_inputs(self):
# uint8 instead of float32 is used as input type to reduce copy overhead.
# It might hurt the performance a liiiitle bit.
# The pretrained models were trained with float32.
return [InputDesc(tf.uint8, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
InputDesc(tf.int32, [None], 'label')]
def _build_graph(self, inputs):
image, label = inputs
image = tf.cast(image, tf.float32) * (1.0 / 255)
# Wrong mean/std are used for compatibility with pre-trained models.
# Should actually add a RGB-BGR conversion here.
image_mean = tf.constant([0.485, 0.456, 0.406], dtype=tf.float32)
image_std = tf.constant([0.229, 0.224, 0.225], dtype=tf.float32)
image = (image - image_mean) / image_std
if self.data_format == 'NCHW':
image = tf.transpose(image, [0, 3, 1, 2])
def shortcut(l, n_in, n_out, stride):
if n_in != n_out:
return Conv2D('convshortcut', l, n_out, 1, stride=stride)
else:
return l
def basicblock(l, ch_out, stride, preact):
ch_in = l.get_shape().as_list()[1]
if preact == 'both_preact':
l = BNReLU('preact', l)
input = l
elif preact != 'no_preact':
input = l
l = BNReLU('preact', l)
else:
input = l
l = Conv2D('conv1', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3)
return l + shortcut(input, ch_in, ch_out, stride)
def bottleneck(l, ch_out, stride, preact):
ch_in = l.get_shape().as_list()[1]
if preact == 'both_preact':
l = BNReLU('preact', l)
input = l
elif preact != 'no_preact':
input = l
l = BNReLU('preact', l)
else:
input = l
l = Conv2D('conv1', l, ch_out, 1, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1)
return l + shortcut(input, ch_in, ch_out * 4, stride)
def layer(layername, l, block_func, features, count, stride, first=False):
with tf.variable_scope(layername):
with tf.variable_scope('block0'):
l = block_func(l, features, stride,
'no_preact' if first else 'both_preact')
for i in range(1, count):
with tf.variable_scope('block{}'.format(i)):
l = block_func(l, features, 1, 'default')
return l
cfg = {
18: ([2, 2, 2, 2], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck)
}
defs, block_func = cfg[DEPTH]
with argscope(Conv2D, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_OUT')), \
argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format=self.data_format):
l = Conv2D('conv1', image, 64, 7, stride=2, nl=tf.identity)
# l = BatchNorm('conv1_bn', l)
l = MaxPooling('pool1', l, 3, stride=2, padding='SAME')
l_bra = BatchNorm('res2a_bn2a', l)
l_bra = WinogradImTrans('WinogradImTrans_2a_2a', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W2a_2a', l_bra, 64, 64, mask=mask_dict['Winograd_W2a_2a/W'] if use_mask else None)
l_bra = BatchNorm('res2a_bn2b', l_bra)
l_bra = WinogradImTrans('WinogradImTrans_2a_2b', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W2a_2b', l_bra, 64, 64, mask=mask_dict['Winograd_W2a_2b/W'] if use_mask else None)
l_bra = BatchNorm('res2a_bn2c', l_bra)
# l = tf.nn.relu(l)
l = BNReLU('res2a_1_relu', l)
l = Conv2D('res2a_1', l, 64, 1, nl=tf.identity)
l = BatchNorm('res2a_bn1', l)
l = l + l_bra
l_bra = BatchNorm('res2b_bn2a', l)
l_bra = WinogradImTrans('WinogradImTrans_2b_2a', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W2b_2a', l_bra, 64, 64, mask=mask_dict['Winograd_W2b_2a/W'] if use_mask else None)
l_bra = BatchNorm('res2b_bn2b', l_bra)
l_bra = WinogradImTrans('WinogradImTrans_2b_2b', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W2b_2b', l_bra, 64, 64, mask=mask_dict['Winograd_W2b_2b/W'] if use_mask else None)
l_bra = BatchNorm('res2b_bn2c', l_bra)
l = tf.nn.relu(l)
l = l + l_bra
l = MaxPooling('pool2', l, 3, stride=2, padding='SAME')
l_bra = BatchNorm('res3a_bn2a', l)
l_bra = WinogradImTrans('WinogradImTrans_3a_2a', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W3a_2a', l_bra, 64, 128, mask=mask_dict['Winograd_W3a_2a/W'] if use_mask else None)
l_bra = BatchNorm('res3a_bn2b', l_bra)
l_bra = WinogradImTrans('WinogradImTrans_3a_2b', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W3a_2b', l_bra, 128, 128, mask=mask_dict['Winograd_W3a_2b/W'] if use_mask else None)
l_bra = BatchNorm('res3a_bn2c', l_bra)
l = tf.nn.relu(l)
l = Conv2D('res3a_1', l, 128, 1, nl=tf.identity)
l = BatchNorm('res3a_bn1', l)
l = l + l_bra
l_bra = BatchNorm('res3b_bn2a', l)
l_bra = WinogradImTrans('WinogradImTrans_3b_2a', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W3b_2a', l_bra, 128, 128, mask=mask_dict['Winograd_W3b_2a/W'] if use_mask else None)
l_bra = BatchNorm('res3b_bn2b', l_bra)
l_bra = WinogradImTrans('WinogradImTrans_3b_2b', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W3b_2b', l_bra, 128, 128, mask=mask_dict['Winograd_W3b_2b/W'] if use_mask else None)
l_bra = BatchNorm('res3b_bn2c', l_bra)
l = tf.nn.relu(l)
l = l + l_bra
l = MaxPooling('pool3', l, 3, stride=2, padding='SAME')
l_bra = BatchNorm('res4a_bn2a', l)
l_bra = WinogradImTrans('WinogradImTrans_4a_2a', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W4a_2a', l_bra, 128, 256, mask=mask_dict['Winograd_W4a_2a/W'] if use_mask else None)
l_bra = BatchNorm('res4a_bn2b', l_bra)
l_bra = WinogradImTrans('WinogradImTrans_4a_2b', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W4a_2b', l_bra, 256, 256, mask=mask_dict['Winograd_W4a_2b/W'] if use_mask else None)
l_bra = BatchNorm('res4a_bn2c', l_bra)
l = tf.nn.relu(l)
l = Conv2D('res4a_1', l, 256, 1, nl=tf.identity)
l = BatchNorm('res4a_bn1', l)
l = l + l_bra
l_bra = BatchNorm('res4b_bn2a', l)
l_bra = WinogradImTrans('WinogradImTrans_4b_2a', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W4b_2a', l_bra, 256, 256, mask=mask_dict['Winograd_W4b_2a/W'] if use_mask else None)
l_bra = BatchNorm('res4b_bn2b', l_bra)
l_bra = WinogradImTrans('WinogradImTrans_4b_2b', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W4b_2b', l_bra, 256, 256, mask=mask_dict['Winograd_W4b_2b/W'] if use_mask else None)
l_bra = BatchNorm('res4b_bn2c', l_bra)
l = tf.nn.relu(l)
l = l + l_bra
# l = MaxPooling('pool4', l, 3, stride=2, padding='SAME')
l_bra = BatchNorm('res5a_bn2a', l)
l_bra = WinogradImTrans('WinogradImTrans_5a_2a', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W5a_2a', l_bra, 256, 512, mask=mask_dict['Winograd_W5a_2a/W'] if use_mask else None)
l_bra = BatchNorm('res5a_bn2b', l_bra)
l_bra = WinogradImTrans('WinogradImTrans_5a_2b', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W5a_2b', l_bra, 512, 512, mask=mask_dict['Winograd_W5a_2b/W'] if use_mask else None)
l_bra = BatchNorm('res5a_bn2c', l_bra)
l = tf.nn.relu(l)
l = Conv2D('res5a_1', l, 512, 1, nl=tf.identity)
l = BatchNorm('res5a_bn1', l)
l = l + l_bra
l_bra = BatchNorm('res5b_bn2a', l)
l_bra = WinogradImTrans('WinogradImTrans_5b_2a', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W5b_2a', l_bra, 512, 512, mask=mask_dict['Winograd_W5b_2a/W'] if use_mask else None)
l_bra = BatchNorm('res5b_bn2b', l_bra)
l_bra = WinogradImTrans('WinogradImTrans_5b_2b', l_bra, tf.nn.relu)
l_bra = WinogradConv('Winograd_W5b_2b', l_bra, 512, 512, mask=mask_dict['Winograd_W5b_2b/W'] if use_mask else None)
l_bra = BatchNorm('res5b_bn2c', l_bra)
l = tf.nn.relu(l)
l = l + l_bra
l = tf.nn.relu(l)
l = GlobalAvgPooling('gap', l)
l = Dropout('drop_fc', l, 0.85)
# l = Dropout('drop_fc', l, 0.7)
logits = FullyConnected('linear', l, 1000, nl=tf.identity)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
loss = tf.reduce_mean(loss, name='xentropy-loss')
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
wd_cost = regularize_cost('.*/W', l2_regularizer(1e-4), name='l2_regularize_loss')
add_moving_summary(loss, wd_cost)
self.cost = tf.add_n([loss, wd_cost], name='cost')
def _get_optimizer(self):
lr = get_scalar_var('learning_rate', 0.1, summary=True)
return tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
def get_data(train_or_test):
isTrain = train_or_test == 'train'
datadir = args.data
ds = dataset.ILSVRC12(datadir, train_or_test,
shuffle=True if isTrain else False, dir_structure='train')
if isTrain:
class Resize(imgaug.ImageAugmentor):
"""
crop 8%~100% of the original image
See `Going Deeper with Convolutions` by Google.
"""
def _augment(self, img, _):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(0.7, 1.0) * area
aspectR = self.rng.uniform(0.7, 1.0)
ww = int(np.sqrt(targetArea * aspectR))
hh = int(np.sqrt(targetArea / aspectR))
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else self.rng.randint(0, w - ww)
y1 = 0 if h == hh else self.rng.randint(0, h - hh)
out = img[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (224, 224), interpolation=cv2.INTER_CUBIC)
return out
out = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)
return out
augmentors = [
Resize(),
imgaug.RandomOrderAug(
[imgaug.Brightness(30, clip=False),
imgaug.Contrast((0.8, 1.2), clip=False),
imgaug.Saturation(0.4),
# rgb-bgr conversion
imgaug.Lighting(0.1,
eigval=[0.2175, 0.0188, 0.0045][::-1],
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Clip(),
imgaug.Flip(horiz=True),
imgaug.ToUint8()
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256),
imgaug.CenterCrop((224, 224)),
imgaug.ToUint8()
]
ds = AugmentImageComponent(ds, augmentors, copy=False)
if isTrain:
ds = PrefetchDataZMQ(ds, min(20, multiprocessing.cpu_count()))
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
return ds
def get_config(fake=False, data_format='NHWC'):
if fake:
dataset_train = dataset_val = FakeData(
[[64, 224, 224, 3], [64]], 1000, random=False, dtype='uint8')
else:
dataset_train = get_data('train')
dataset_val = get_data('val')
eval_freq = 5
return TrainConfig(
model=Model(data_format=data_format),
dataflow=dataset_train,
callbacks=[
ModelSaver(),
InferenceRunner(dataset_val, [
ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]),
ScheduledHyperParamSetter('learning_rate',
[(0, 1e-2), (30, 1e-3), (60, 1e-4), (95, 1e-5)]),
HumanHyperParamSetter('learning_rate'),
],
steps_per_epoch=1280000 // TOTAL_BATCH_SIZE // eval_freq if not test else 0,
# steps_per_epoch=0,
max_epoch=110 if not test else 1,
# max_epoch=1,
)
def eval_on_ILSVRC12(model_file, data_dir):
ds = get_data('val')
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_file),
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5']
)
pred = SimpleDatasetPredictor(pred_config, ds)
acc1, acc5 = RatioCounter(), RatioCounter()
for o in pred.get_result():
batch_size = o[0].shape[0]
acc1.feed(o[0].sum(), batch_size)
acc5.feed(o[1].sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
| |
from __future__ import absolute_import, print_function
from unittest import TestCase
import pytest
from conda import text_type
from conda.base.context import context
from conda.cli.common import arg2spec, spec_from_line
from conda.common.path import expand
from conda.common.url import path_to_url
from conda.exceptions import CondaValueError
from conda.models.channel import Channel
from conda.models.dist import Dist
from conda.models.index_record import IndexRecord, RepodataRecord
from conda.models.match_spec import ChannelMatch, MatchSpec, _parse_spec_str
from conda.models.version import VersionSpec
def DPkg(s, **kwargs):
d = Dist(s)
return IndexRecord(
fn=d.to_filename(),
name=d.name,
version=d.version,
build=d.build_string,
build_number=int(d.build_string.rsplit('_', 1)[-1]),
channel=d.channel,
subdir=context.subdir,
md5="012345789",
**kwargs)
class MatchSpecTests(TestCase):
def test_match_1(self):
for spec, result in [
('numpy 1.7*', True), ('numpy 1.7.1', True),
('numpy 1.7', False), ('numpy 1.5*', False),
('numpy >=1.5', True), ('numpy >=1.5,<2', True),
('numpy >=1.8,<1.9', False), ('numpy >1.5,<2,!=1.7.1', False),
('numpy >1.8,<2|==1.7', False),('numpy >1.8,<2|>=1.7.1', True),
('numpy >=1.8|1.7*', True), ('numpy ==1.7', False),
('numpy >=1.5,>1.6', True), ('numpy ==1.7.1', True),
('numpy >=1,*.7.*', True), ('numpy *.7.*,>=1', True),
('numpy >=1,*.8.*', False), ('numpy >=2,*.7.*', False),
('numpy 1.6*|1.7*', True), ('numpy 1.6*|1.8*', False),
('numpy 1.6.2|1.7*', True), ('numpy 1.6.2|1.7.1', True),
('numpy 1.6.2|1.7.0', False), ('numpy 1.7.1 py27_0', True),
('numpy 1.7.1 py26_0', False), ('numpy >1.7.1a', True),
('python', False),
]:
m = MatchSpec(spec)
assert m.match(DPkg('numpy-1.7.1-py27_0.tar.bz2')) == result
assert 'name' in m
assert m.name == 'python' or 'version' in m
# both version numbers conforming to PEP 440
assert not MatchSpec('numpy >=1.0.1').match(DPkg('numpy-1.0.1a-0.tar.bz2'))
# both version numbers non-conforming to PEP 440
assert not MatchSpec('numpy >=1.0.1.vc11').match(DPkg('numpy-1.0.1a.vc11-0.tar.bz2'))
assert MatchSpec('numpy >=1.0.1*.vc11').match(DPkg('numpy-1.0.1a.vc11-0.tar.bz2'))
# one conforming, other non-conforming to PEP 440
assert MatchSpec('numpy <1.0.1').match(DPkg('numpy-1.0.1.vc11-0.tar.bz2'))
assert MatchSpec('numpy <1.0.1').match(DPkg('numpy-1.0.1a.vc11-0.tar.bz2'))
assert not MatchSpec('numpy >=1.0.1.vc11').match(DPkg('numpy-1.0.1a-0.tar.bz2'))
assert MatchSpec('numpy >=1.0.1a').match(DPkg('numpy-1.0.1z-0.tar.bz2'))
assert MatchSpec('numpy >=1.0.1a py27*').match(DPkg('numpy-1.0.1z-py27_1.tar.bz2'))
assert MatchSpec('blas * openblas_0').match(DPkg('blas-1.0-openblas_0.tar.bz2'))
assert MatchSpec('blas')._is_simple()
assert not MatchSpec('blas 1.0')._is_simple()
assert not MatchSpec('blas 1.0 1')._is_simple()
m = MatchSpec('blas 1.0', optional=True)
m2 = MatchSpec(m, optional=False)
m3 = MatchSpec(m2, target='blas-1.0-0.tar.bz2')
m4 = MatchSpec(m3, target=None, optional=True)
assert m.spec == m2.spec and m.optional != m2.optional
assert m2.spec == m3.spec and m2.optional == m3.optional and m2.target != m3.target
assert m == m4
self.assertRaises(ValueError, MatchSpec, (1, 2, 3))
def test_no_name_match_spec(self):
ms = MatchSpec(track_features="mkl")
assert str(ms) == "*[track_features=mkl]"
def test_to_filename(self):
m1 = MatchSpec(fn='foo-1.7-52.tar.bz2')
m2 = MatchSpec(name='foo', version='1.7', build='52')
m3 = MatchSpec(Dist('defaults::foo-1.7-52'))
assert m1._to_filename_do_not_use() == 'foo-1.7-52.tar.bz2'
assert m2._to_filename_do_not_use() == 'foo-1.7-52.tar.bz2'
assert m3._to_filename_do_not_use() == 'foo-1.7-52.tar.bz2'
for spec in 'bitarray', 'pycosat 0.6.0', 'numpy 1.6*':
ms = MatchSpec(spec)
assert ms._to_filename_do_not_use() is None
def test_hash(self):
a = MatchSpec('numpy 1.7*')
b = MatchSpec('numpy 1.7*')
c = MatchSpec(name='numpy', version='1.7*')
# optional should not change the hash
d = MatchSpec(c, optional=True)
assert d.optional
assert not c.optional
assert a is not b
assert a is not c
assert a is not d
assert a == b
assert a == c
assert a != d
assert hash(a) == hash(b)
assert hash(a) == hash(c)
assert hash(a) == hash(d)
c = MatchSpec('python')
d = MatchSpec('python 2.7.4')
e = MatchSpec('python', version='2.7.4')
f = MatchSpec('* 2.7.4', name='python')
assert d == e
assert d == f
assert a != c
assert hash(a) != hash(c)
assert c != d
assert hash(c) != hash(d)
# def test_string_mcg1969(self):
# a = MatchSpec("foo1 >=1.3 2", optional=True, target="burg")
# b = MatchSpec('* [name="foo1", version=">=1.3", build="2"]', optional=True, target="burg")
# assert a.optional and a.target == 'burg'
# assert a == b
# c = MatchSpec("^foo1$ >=1.3 2 ")
# d = MatchSpec("* >=1.3 2", name=re.compile(u'^foo1$'))
# e = MatchSpec("* >=1.3 2", name='^foo1$')
# assert c == d
# assert c == e
# # build_number is not the same as build!
# f = MatchSpec('foo1 >=1.3', build_number=2, optional=True, target='burg')
# g = MatchSpec('foo1 >=1.3[build_number=2]', optional=True, target='burg')
# assert a != f
# assert f == g
#
# assert a._to_string() == "foo1 >=1.3 2"
# # assert b._to_string() == ""
# assert g._to_string() == "foo1 >=1.3[build_number=2]"
def test_canonical_string_forms(self):
def m(string):
return text_type(MatchSpec(string))
assert m("numpy") == "numpy"
assert m("numpy=1.7") == "numpy=1.7"
assert m("numpy 1.7*") == "numpy=1.7"
assert m("numpy 1.7.*") == "numpy=1.7"
assert m("numpy[version='1.7*']") == "numpy=1.7"
assert m("numpy[version='1.7.*']") == "numpy=1.7"
assert m("numpy[version=1.7.*]") == "numpy=1.7"
assert m("numpy==1.7") == "numpy==1.7"
assert m("numpy[version='1.7']") == "numpy==1.7"
assert m("numpy[version=1.7]") == "numpy==1.7"
assert m("numpy 1.7") == "numpy==1.7"
assert m("numpy[version='1.7|1.8']") == "numpy[version='1.7|1.8']"
assert m('numpy[version="1.7,1.8"]') == "numpy[version='1.7,1.8']"
assert m('numpy >1.7') == "numpy[version='>1.7']"
assert m('numpy>=1.7') == "numpy[version='>=1.7']"
assert m("numpy=1.7=py3*_2") == "numpy==1.7[build=py3*_2]"
assert m("numpy=1.7.*=py3*_2") == "numpy=1.7[build=py3*_2]"
assert m("https://repo.continuum.io/pkgs/free::numpy") == "defaults::numpy"
assert m("numpy[channel=https://repo.continuum.io/pkgs/free]") == "defaults::numpy"
assert m("defaults::numpy") == "defaults::numpy"
assert m("numpy[channel=defaults]") == "defaults::numpy"
assert m("conda-forge::numpy") == "conda-forge::numpy"
assert m("numpy[channel=conda-forge]") == "conda-forge::numpy"
assert m("numpy[channel=defaults,subdir=osx-64]") == "defaults/osx-64::numpy"
assert m("numpy[channel=https://repo.continuum.io/pkgs/free/osx-64, subdir=linux-64]") == "defaults/linux-64::numpy"
assert m("https://repo.continuum.io/pkgs/free/win-32::numpy") == "defaults/win-32::numpy"
assert m("numpy[channel=https://repo.continuum.io/pkgs/free/osx-64]") == "defaults/osx-64::numpy"
assert m("defaults/win-32::numpy") == "defaults/win-32::numpy"
assert m("conda-forge/linux-64::numpy") == "conda-forge/linux-64::numpy"
assert m("numpy[channel=conda-forge,subdir=noarch]") == "conda-forge/noarch::numpy"
assert m("numpy[subdir=win-32]") == '*/win-32::numpy'
assert m("*/win-32::numpy") == '*/win-32::numpy'
assert m("*/win-32::numpy[subdir=\"osx-64\"]") == '*/osx-64::numpy'
# TODO: should the result in these example pull out subdir?
assert m("https://repo.continuum.io/pkgs/free/linux-32::numpy") == "defaults/linux-32::numpy"
assert m("numpy[channel=https://repo.continuum.io/pkgs/free/linux-32]") == "defaults/linux-32::numpy"
assert m("numpy[build=py3*_2, track_features=mkl]") == "numpy[build=py3*_2,track_features=mkl]"
assert m("numpy[build=py3*_2, track_features='mkl debug']") == "numpy[build=py3*_2,track_features='debug mkl']"
assert m("numpy[track_features='mkl,debug', build=py3*_2]") == "numpy[build=py3*_2,track_features='debug mkl']"
def test_tarball_match_specs(self):
def m(string):
return text_type(MatchSpec(string))
url = "https://conda.anaconda.org/conda-canary/linux-64/conda-4.3.21.post699+1dab973-py36h4a561cd_0.tar.bz2"
assert m(url) == "conda-canary/linux-64::conda==4.3.21.post699+1dab973[build=py36h4a561cd_0]"
def test_exact_values(self):
assert MatchSpec("*").get_exact_value('name') is None
assert MatchSpec("numpy").get_exact_value('name') == 'numpy'
assert MatchSpec("numpy=1.7").get_exact_value('version') is None
assert MatchSpec("numpy==1.7").get_exact_value('version') == '1.7'
assert MatchSpec("numpy[version=1.7]").get_exact_value('version') == '1.7'
assert MatchSpec("numpy=1.7=py3*_2").get_exact_value('version') == '1.7'
assert MatchSpec("numpy=1.7=py3*_2").get_exact_value('build') is None
assert MatchSpec("numpy=1.7=py3*_2").get_exact_value('version') == '1.7'
assert MatchSpec("numpy=1.7=py3*_2").get_exact_value('build') is None
assert MatchSpec("numpy=1.7.*=py37_2").get_exact_value('version') is None
assert MatchSpec("numpy=1.7.*=py37_2").get_exact_value('build') == 'py37_2'
def test_channel_matching(self):
# TODO: I don't know if this invariance for multi-channels should actually hold true
# it might have to for backward compatibility
# but more ideally, the first would be true, and the second would be false
# (or maybe it's the other way around)
assert ChannelMatch("https://repo.continuum.io/pkgs/free").match('defaults') is True
assert ChannelMatch("defaults").match("https://repo.continuum.io/pkgs/free") is True
assert ChannelMatch("https://conda.anaconda.org/conda-forge").match('conda-forge') is True
assert ChannelMatch("conda-forge").match("https://conda.anaconda.org/conda-forge") is True
assert ChannelMatch("https://repo.continuum.io/pkgs/free").match('conda-forge') is False
def test_matchspec_errors(self):
with pytest.raises(ValueError):
MatchSpec('blas [optional')
with pytest.raises(ValueError):
MatchSpec('blas [test=]')
with pytest.raises(ValueError):
MatchSpec('blas[invalid="1"]')
with pytest.raises(CondaValueError):
MatchSpec("/some/file/on/disk/package-1.2.3-2.tar.bz2")
def test_dist(self):
dst = Dist('defaults::foo-1.2.3-4.tar.bz2')
a = MatchSpec(dst)
b = MatchSpec(a)
c = MatchSpec(dst, optional=True, target='burg')
d = MatchSpec(a, build='5')
assert a == b
assert hash(a) == hash(b)
assert a is b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
p = MatchSpec(channel='defaults',name='python',version=VersionSpec('3.5*'))
assert p.match(Dist(channel='defaults', dist_name='python-3.5.3-1', name='python',
version='3.5.3', build_string='1', build_number=1, base_url=None,
platform=None))
assert not p.match(Dist(channel='defaults', dist_name='python-3.6.0-0', name='python',
version='3.6.0', build_string='0', build_number=0, base_url=None,
platform=None))
assert p.match(Dist(channel='defaults', dist_name='python-3.5.1-0', name='python',
version='3.5.1', build_string='0', build_number=0, base_url=None,
platform=None))
assert p.match(RepodataRecord(name='python', version='3.5.1', build='0', build_number=0,
depends=('openssl 1.0.2*', 'readline 6.2*', 'sqlite',
'tk 8.5*', 'xz 5.0.5', 'zlib 1.2*', 'pip'),
channel=Channel(scheme='https', auth=None,
location='repo.continuum.io', token=None,
name='pkgs/free', platform='osx-64',
package_filename=None),
subdir='osx-64', fn='python-3.5.1-0.tar.bz2',
md5='a813bc0a32691ab3331ac9f37125164c', size=14678857,
priority=0,
url='https://repo.continuum.io/pkgs/free/osx-64/python-3.5.1-0.tar.bz2'))
def test_index_record(self):
dst = Dist('defaults::foo-1.2.3-4.tar.bz2')
rec = DPkg(dst)
a = MatchSpec(dst)
b = MatchSpec(rec)
assert b.match(rec)
assert a.match(rec)
def test_strictness(self):
assert MatchSpec('foo').strictness == 1
assert MatchSpec('foo 1.2').strictness == 2
assert MatchSpec('foo 1.2 3').strictness == 3
assert MatchSpec('foo 1.2 3 [channel=burg]').strictness == 3
# Seems odd, but this is needed for compatibility
assert MatchSpec('test* 1.2').strictness == 3
assert MatchSpec('foo', build_number=2).strictness == 3
def test_build_number_and_filename(self):
ms = MatchSpec('zlib 1.2.7 0')
assert ms.get_exact_value('name') == 'zlib'
assert ms.get_exact_value('version') == '1.2.7'
assert ms.get_exact_value('build') == '0'
assert ms._to_filename_do_not_use() == 'zlib-1.2.7-0.tar.bz2'
def test_features(self):
dst = Dist('defaults::foo-1.2.3-4.tar.bz2')
a = MatchSpec(features='test')
assert a.match(DPkg(dst, features='test'))
assert not a.match(DPkg(dst, features='test2'))
assert a.match(DPkg(dst, features='test me'))
assert a.match(DPkg(dst, features='you test'))
assert a.match(DPkg(dst, features='you test me'))
assert a.get_exact_value('features') == {'test'}
class TestArg2Spec(TestCase):
def test_simple(self):
assert arg2spec('python') == 'python'
assert arg2spec('python=2.6') == 'python=2.6'
assert arg2spec('python=2.6*') == 'python=2.6'
assert arg2spec('ipython=0.13.2') == 'ipython=0.13.2'
assert arg2spec('ipython=0.13.0') == 'ipython=0.13.0'
assert arg2spec('ipython==0.13.0') == 'ipython==0.13.0'
assert arg2spec('foo=1.3.0=3') == 'foo==1.3.0[build=3]'
def test_pip_style(self):
assert arg2spec('foo>=1.3') == "foo[version='>=1.3']"
assert arg2spec('zope.int>=1.3,<3.0') == "zope.int[version='>=1.3,<3.0']"
assert arg2spec('numpy >=1.9') == "numpy[version='>=1.9']"
def test_invalid_arg2spec(self):
with pytest.raises(CondaValueError):
arg2spec('!xyz 1.3')
class TestSpecFromLine(TestCase):
def cb_form(self, spec_str):
return MatchSpec(spec_str).conda_build_form()
def test_invalid(self):
assert spec_from_line('=') is None
assert spec_from_line('foo 1.0') is None
def test_comment(self):
assert spec_from_line('foo # comment') == 'foo' == self.cb_form('foo # comment')
assert spec_from_line('foo ## comment') == 'foo' == self.cb_form('foo ## comment')
def test_conda_style(self):
assert spec_from_line('foo') == 'foo' == self.cb_form('foo')
assert spec_from_line('foo=1.0=2') == 'foo 1.0 2' == self.cb_form('foo=1.0=2')
# assert spec_from_line('foo=1.0*') == 'foo 1.0.*' == self.cb_form('foo=1.0*')
# assert spec_from_line('foo=1.0|1.2') == 'foo 1.0|1.2' == self.cb_form('foo=1.0|1.2')
# assert spec_from_line('foo=1.0') == 'foo 1.0' == self.cb_form('foo=1.0')
def test_pip_style(self):
assert spec_from_line('foo>=1.0') == 'foo >=1.0' == self.cb_form('foo>=1.0')
assert spec_from_line('foo >=1.0') == 'foo >=1.0' == self.cb_form('foo >=1.0')
assert spec_from_line('FOO-Bar >=1.0') == 'foo-bar >=1.0' == self.cb_form('FOO-Bar >=1.0')
assert spec_from_line('foo >= 1.0') == 'foo >=1.0' == self.cb_form('foo >= 1.0')
assert spec_from_line('foo > | |
running this a few times.
# In[17]:
numberOfKittens = numberOfKittens + 10
numberOfKittens
# #### Floating points or floats
# Floats are similar to integers, but with more precision.
# Float comes from a Floating point or a number with a decimal point.
#
# This example starts at 0, but note that this is .0
# Adding the decimal tells Python that we should have a float value instead of an integer.
# In[18]:
aFloatVariable = .0
# Let's again, check the variable type.
# In[19]:
type( aFloatVariable )
# Looks good.
#
# And again, we will add 10 to this. There is something specific interesting here; see if you spot it.
# aFloatVariable = aFloatVariable + 10
# aFloatVariable
# If you guessed "mixing a float and an integer," you got it. Let's see an example.
# ##### Mixing integers and floats
# In Python (3, more specifically), the variable will always take the form of the most precision. So, by default, a float.
# In[20]:
letsSeeWhatHappens = numberOfKittens + aFloatVariable
letsSeeWhatHappens
# We can force variables to be a certain type. We call this 'type-cast' and can be used to:
#
# * make an integer into a float
# * a float to an integer
# * an integer to a string (we have not discussed this yet)
# * a float to a string (we have not discussed this yet)
# * etc...
# ##### type-cast
# ```{note}
# type-cast is temporary. If you do not use a type-cast, the variable will revert to its original variable type.
# ```
# Let's switch our numberOfKittens to a float using
# ```python
# float()
# ```
#
# and turn our aFloatVariable to an integer using
#
# ```python
# int()
# ```
# In[21]:
float(numberOfKittens)
# In[22]:
int(aFloatVariable)
# #### String or str
# So, up to this point, we started our conversation working with numbers. Well, what about the other things that are not numbers... like text? Well, for text, we use something called a String or str.
#
# Strings allow us to capture a single character up to thousands of characters (actually, much more than this). Let's go through a traditional example of "Hello, World!" but with my slight spin to it.
# In[23]:
helloStatement = "Hello, everyone!"
# As you can see, can capture text and other alphanumeric and special characters. There are several unique functions for strings but first, let's double-check and see what type we from our helloStatement.
# In[24]:
type( helloStatement )
# Not too surprising, we see this is type str or string.
# ##### String Indexing/String Slicing
# One of the first ways to interact with our string is to take a look at individual characters by using their **index**.
#
# The **index** is position (or multiple positions) for each character in the string. So, if we look at our string, we have Hello, everyone! If we wanted to see the first letter *H*, we could reference this using the index or the position where the letter is in the string.
# In[25]:
helloStatement[1]
# ohh.. wait a minute. We were expecting the letter *H*, but we got *e*. What happened?
# ```{note}
# For indexes, we always start at the number 0. So, 0 is the first thing, 1 is the second thing, and so on.
# ```
# Let's try this again.
# In[26]:
helloStatement[0]
# There we go!
# Visually, this is how the string looks to Python.
#
# 
# ###### Indexing Multiple Letters
# In[27]:
print( helloStatement[0:5] )
# Wait a second!
#
# 
# The way you should think of this is:
#
# ```python
# helloStatement[0 : 5 - 1]
# helloStatement[(starting number) to (ending number - 1)]
# ```
#
# There is also a shortcut way of writing this, without the 0.
# In[28]:
print( helloStatement[:5] )
# In[29]:
print( helloStatement[5:] )
# ##### String functions
# ###### Formatting
# In[30]:
print( helloStatement.capitalize() )
print( helloStatement.lower() )
# ###### Split
# In[31]:
print( helloStatement.split(" ") )
# ```{note}
# *.split()* will eventually become your best friend. *.split()* is a **great** function to use when using uniquelly spaced data.
# As in comma separated values or CSV.
# ```
# ##### Concatenating Strings
#
# When you want to put two strings together, we say you *concatenate* the strings. There are multiple ways of doing this but presented are what I believe to be the three most common ways.
# ###### + Method
# This is the most straightforward method of the three, but there can be some issues. You simply add a plus sign *+* between your strings. Let's take a look at this.
# In[32]:
print ( "hello, " + "everyone!")
# This works fine, but when you add a number to this idea. We run into issues.
# ```python
# print ( "hello, " + "every" + 1 + "!")
#
# ---------------------------------------------------------------------------
# TypeError Traceback (most recent call last)
# <ipython-input-41-1f53f06cad5c> in <module>
# ----> 1 print ( "hello, " + "every" + 1 + "!")
#
# TypeError: can only concatenate str (not "int") to str
# ```
# In this case we need to *type-cast* the integer as a string using
# ```python
# str()
# ```
# In[33]:
print ( "hello, " + "every" + str(1) + "!")
# ###### % Method
# This is my favorite method out of the three. Let's see how this works with the same example.
#
# In this case, we use a %s (s = string) for each string we want to embed in our overall string.
# In[34]:
print ( "%s, %s" % ("hello", "everyone") )
# There are three parts to this.
#
# *The format*
# * ```python
# "%s, %s"
# ```
#
# *The break*
# * ```python
# %
# ```
#
# *The fill*
# * ```python
# ("hello", "everyone")
# ```
#
# We have two %s, meaning we need to feed it with two strings.
# OK, but what about numbers?
# In[35]:
print ( "%s, %s%s%s" % ("hello","every",1,"!") )
# Still works! This reason is why I like this method. You pick the formating and feed in the strings.
# ###### join() Method
# The .join() method uses a function called
# ```python
# .join()
# ```
# This is a create function to be aware of, as it will allow you the ability to join strings with a specific, static format. What do I mean by static formatting? Well, unlike the % method, that can be formatted exactly how I want it. The .join() method requires a specific pattern. Example time!
# In[36]:
print ( " ".join(["hello, ", "everyone!"]) )
# There are two parts to this.
#
# *The splitter*
# * ```python
# " "
# ```
#
# *The fill*
# * ```python
# .join(["hello, ", "everyone!"])
# ```
#
# Notice that the join has the brackets around it. Technically, you are feeding this an array or list (we have not talked about this yet). This function again, like *.split()*, will be a great asset to you in the future.
#
# Let's show this with our number again.
# ```python
# print ( " ".join(["hello, ", "every", 1, "!"]) )
#
# ---------------------------------------------------------------------------
# TypeError Traceback (most recent call last)
# <ipython-input-54-e926f0c4c025> in <module>
# ----> 1 print ( " ".join(["hello, ", "every", 1, "!"]) )
#
# TypeError: sequence item 2: expected str instance, int found
# ```
# The same issue as before, we need to type-cast.
# In[37]:
print ( " ".join(["hello, ", "every", str(1), "!"]) )
# Notice the spaces? Again, we are saying with *the splitter* what each string is going to be seperated by, so in this case, everything will be split by spaces.
# #### Booleans
# Booleans are used to do comparisions (true/false), (1/0), (yes/no)
# In[38]:
someCondition = True
type( someCondition )
# ##### Boolean Logic
# We will talk about boolean logic more in the next section (Comparisons)
# In[39]:
(someCondition == False)
# In[40]:
if (False):
print( "yes for False!" )
if (True):
print( "yes for True!" )
# ```{note}
# A more "traditional" way to do booleans is to use 0 and 1. In Python, any number other than 0 is True. Including negative numbers and decimals.
# ```
# In[41]:
if (0):
print( "yes for 0!" )
if (1):
print( "yes for 1!" )
if (2):
print( | |
<reponame>admariner/centimani
"""Google Cloud function that uploads chunk of products to the Content API."""
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
import csv
import io
import json
import os
import re
import sys
import traceback
from typing import Any, Dict, List, Tuple
from flask import Response
from google.cloud import pubsub_v1
from google.cloud import secretmanager
from google.cloud import storage
from google.oauth2 import service_account
import google_auth_httplib2
from googleapiclient import discovery
from googleapiclient import http
CONTENT_API_SCOPE = 'https://www.googleapis.com/auth/content'
APPLICATION_NAME = 'mc_invoker'
SERVICE_NAME = 'content'
SERVICE_VERSION = 'v2.1'
MAX_PAGE_SIZE = 50
BUCKET_NAME = 'bucket_name'
PROJECT_ID = 'project_id'
DEPLOYMENT_NAME = 'deployment_name'
SOLUTION_PREFIX = 'solution_prefix'
REPORTING_TOPIC = 'reporting_topic'
CACHE_TTL_IN_HOURS = 'cache_ttl_in_hours'
FULL_PATH_TOPIC = 'full_path_topic'
MAX_ATTEMPTS = 'max_attempts'
ITEM_NOT_FOUND_MSG = 'item not found'
DELETE_METHOD = 'delete'
class NonRetryableError(Exception):
"""Exception raised for errors that shouldn't be retried.
Attributes:
message: explanation of the error
code: return code
"""
def __init__(self, message):
self.message = message
self.code = 200
super().__init__(self.message)
class RetryableError(Exception):
"""Exception raised for errors that should be retried.
Attributes:
message: explanation of the error
code: return code
"""
def __init__(self, message):
self.message = message
self.code = 500
super().__init__(self.message)
def _count_partial_errors(response: str, entries_dict: Dict[int, Dict[str, Any]]):
"""Counts the partial errors in the Content API response.
Args:
response: Content API upload response.
entries_dict: Dictionary with the entries batchid as key and a dictionary
with the batchid's characteristics as value.
Returns:
An integer representing the total number of partial errors in the response
failure error.
A list containing the code, message and number of times that each unique
error code was returned by the API for one of the products uploaded.
"""
error_count = 0
error_stats = {}
error_array = []
if response['kind'] == 'content#productsCustomBatchResponse':
entries = response['entries']
for entry in entries:
errors = entry.get('errors')
batchId=entry['batchId']
if errors:
if len(errors['errors'])==1 and errors['errors'][0]['message']==ITEM_NOT_FOUND_MSG and entries_dict[batchId]['method']==DELETE_METHOD:
continue
else:
print('Errors for batch entry %d:' % entry['batchId'])
print('A partial failure for batch entry '
f'{entry["batchId"]} occurred. Error messages are shown below.')
for error in errors['errors']:
if error['message']==ITEM_NOT_FOUND_MSG and entries_dict[batchId]['method']==DELETE_METHOD:
continue
else:
error_count += 1
error_message = error['message']
domain = error['domain']
reason = error['reason']
error_code = f'{error_message}_{domain}_{reason}'
if error_code not in error_stats:
error_stats[error_code] = {'count': 0}
error_stats[error_code]['message'] = error_message
error_stats[error_code]['count'] += 1
print(f' Error message: {error["message"]}, '
f'domain: {error["domain"]}, '
f'reason: {error["reason"]}')
for code_key in error_stats:
error_array.append({
'code': code_key,
'message': error_stats[code_key]['message'],
'count': error_stats[code_key]['count']
})
else:
print('There was an error. Response: %s' % response)
error_count += 1
error_stats['major'] = {
'count':
1,
'message':
('A major error ocurred as an invalid response was captured. '
'Response: {response}')
}
return error_count, error_array
def _add_errors_to_input_data(data: Dict[str, Any],
num_errors: int) -> Dict[str, Any]:
"""Includes the error count to the input data.
Args:
data: The input data received in the trigger invocation.
num_errors: The number of errors to add.
Returns:
The input data enriched with the num errors.
"""
data['child']['num_errors'] = num_errors
return data
def _read_csv_from_blob(bucket_name: str,
blob_name: str,
delimiter: str = '\t'):
"""Function to read a blob containing a CSV file and return it as an array.
Args:
bucket_name: The name of the source bucket.
blob_name: The name of the file to move.
delimiter: Optional, the CSV delimiter character (tab character if empty).
Returns:
A csv reader object to be used to iterate.
"""
storage_client = storage.Client()
print('Reading {} from {}'.format(blob_name, bucket_name))
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
downloaded_blob = blob.download_as_string()
decoded_blob = downloaded_blob.decode('utf-8')
return csv.reader(io.StringIO(decoded_blob), delimiter=delimiter)
def _read_json_from_blob(bucket_name: str, blob_name: str):
"""Function to read a blob containing a json file and return it as a list.
Args:
bucket_name: The name of the source bucket.
blob_name: The name of the file to move.
Returns:
A list of objects extracted from the file.
"""
products = []
storage_client = storage.Client()
print('Reading {} from {}'.format(blob_name, bucket_name))
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
downloaded_blob = blob.download_as_string()
decoded_blob = downloaded_blob.decode('utf-8')
lines = decoded_blob.split('\n')
for line in lines:
if line:
# Remove BOM if present
line = line.replace('\ufeff', '')
product = json.loads(line)
products.append(product)
return products
def _mv_blob(bucket_name, blob_name, new_bucket_name, new_blob_name):
"""Function for moving files between directories or buckets in GCP.
Args:
bucket_name: The name of the source bucket.
blob_name: The name of the file to move.
new_bucket_name: The name of target bucket (can be equal the source one).
new_blob_name: name of file in target bucket.
Returns:
None.
"""
storage_client = storage.Client()
source_bucket = storage_client.get_bucket(bucket_name)
source_blob = source_bucket.blob(blob_name)
destination_bucket = storage_client.get_bucket(new_bucket_name)
# copy to new destination
source_bucket.copy_blob(source_blob, destination_bucket, new_blob_name)
# delete from source
source_blob.delete()
print(f'File moved from {blob_name} to {new_blob_name}')
def _mv_blob_if_last_try(task_retries, max_attempts, input_json, bucket_name):
"""Checks if it is the last attempt and moves the chunk to the failed folder.
Args:
task_retries: Retry number passed from Cloud Tasks.
max_attempts: Max number of configured retries.
input_json: Configuration information.
bucket_name: Name of the GCS file storing the chunks.
Returns:
None.
"""
if task_retries + 1 >= max_attempts:
datestamp = input_json['date']
chunk_filename = input_json['child']['file_name']
full_chunk_path = datestamp + '/slices_processing/' + chunk_filename
new_file_name = full_chunk_path.replace('slices_processing/',
'slices_failed/')
_mv_blob(bucket_name, full_chunk_path, bucket_name, new_file_name)
def _upload_products(service: discovery.Resource, env_info: Dict[str, Any],
job_info: Dict[str, Any], products, task_retries: int,
full_chunk_path: str):
"""Loads a chunk of products from GCS and sends it to the Content API.
Args:
service: Initialized service of a Content API client.
env_info: GCP configuration extracted from enviroment variables.
job_info: Job configuration derived from the input_json object.
products: Chunk of products prepared to be uploaded.
task_retries: Number of retries performed in Cloud Tasks.
full_chunk_path: Full path to the chunk being processed.
Returns:
The status of the operation: 200 represents success or a non-retryable
error, 500 is a retryable error.
"""
try:
entries_dict = batch_dictionary(products)
response = _send_products_in_batch(service, products)
if response:
num_partial_errors, error_array = _count_partial_errors(response, entries_dict)
pubsub_payload = _add_errors_to_input_data(job_info['input_json'],
num_partial_errors)
if error_array:
pubsub_payload['child']['errors'] = error_array
_send_pubsub_message(env_info[PROJECT_ID], env_info[FULL_PATH_TOPIC],
pubsub_payload)
# Move blob to /slices_processed after a successful execution
new_file_name = full_chunk_path.replace('slices_processing/',
'slices_processed/')
_mv_blob(env_info[BUCKET_NAME], full_chunk_path, env_info[BUCKET_NAME],
new_file_name)
return 200
# pylint: disable=broad-except
except Exception:
print('Unexpected error while uploading products:', sys.exc_info()[0])
str_traceback = traceback.format_exc()
print('Unexpected exception traceback follows:')
print(str_traceback)
input_json = job_info['input_json']
pubsub_payload = _add_errors_to_input_data(input_json,
input_json['child']['num_rows'])
_send_pubsub_message(env_info[PROJECT_ID], env_info[FULL_PATH_TOPIC],
pubsub_payload)
# If last try, move blob to /slices_failed
_mv_blob_if_last_try(task_retries, env_info[MAX_ATTEMPTS], input_json,
env_info[BUCKET_NAME])
return 500
def _send_pubsub_message(project_id, reporting_topic, pubsub_payload):
"""Sends a pubsub message.
Args:
project_id: ID of the Google Cloud Project where the solution is deployed.
reporting_topic: Pub/Sub topic to use in the message to be sent.
pubsub_payload: Payload of the Pub/Sub message to be sent.
Returns:
None.
"""
publisher = pubsub_v1.PublisherClient()
topic_path_reporting = publisher.topic_path(project_id, reporting_topic)
publisher.publish(
topic_path_reporting, data=bytes(json.dumps(pubsub_payload),
'utf-8')).result()
def _blob_exists(bucket_name, blob_name):
"""Checks if a blob exists in Google Cloud Storage.
Args:
bucket_name: Name of the bucket to read the blob from.
blob_name: Name of the blob to check.
Returns:
Boolean indicating if the blob exists or not.
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
return storage.Blob(bucket=bucket, name=blob_name).exists(storage_client)
def _check_products_blob(datestamp, bucket_name, chunk_filename):
"""Checks if a blob exists either in the processing or processed directories.
Args:
datestamp: timestamp used to build the directory name
bucket_name: Name of the bucket to read the blob from
chunk_filename: Name of the chunk to check
Returns:
Full path to the blob or None if it cannot be found
"""
processing_chunk_name = datestamp + '/slices_processing/' + chunk_filename
if _blob_exists(bucket_name, processing_chunk_name):
print('Found products blob in slices_processing folder')
return processing_chunk_name
else:
processed_chunk_name = datestamp + '/slices_processed/' + chunk_filename
print('Looking for {}'.format(processing_chunk_name))
if _blob_exists(bucket_name, processed_chunk_name):
print('Found products blob in slices_processed folder')
return processed_chunk_name
else:
print('ERROR: Blob not found')
return None
def _mc_invoker_worker(service: discovery.Resource, env_info: Dict[str, Any],
job_info: Dict[str, Any], task_retries: int):
"""Loads a chunk of products from GCS and sends it to the Content API.
Args:
service: Initialized service of a Content API client.
env_info: GCP configuration extracted from enviroment variables.
job_info: Job configuration derived from the input_json object.
task_retries: Number of retries performed in Cloud Tasks.
| |
# ---------------------------*-python-*----------------------------------------#
# file interactive_utils.py
# author <NAME>
# date February 2019
# note Copyright (C) 2019, Los Alamos National Security, LLC.
# All rights reserved.
# -----------------------------------------------------------------------------#
'''
Interactive utilities define a set of function for command line interaction
.. autosummary::
get_option_num
get_key_num_vec
interactive_output_parser
interactive_dump_parser
interactive_tally_parser
'''
import sys
import re
from os import listdir, chdir
from os.path import isfile, getmtime
from matplotlib.pyplot import *
import argparse
from numpy import *
from opppy.version import __version__
from opppy.dump_utils import *
from opppy.plot_dictionary import *
from opppy.plot_dump_dictionary import *
from opppy.output import *
from opppy.plotting_help import *
from opppy.tally import *
def get_option_num(nmax):
'''
Interactive request for a valid number in a range from 1 to nmax provided
to the user
'''
while(1):
nvar = input('Enter the option number for the variable to plot - ')
if nvar == '-q' or nvar == 'q': sys.exit()
if nvar == '': nvar = '1'
try:
nvar = int(nvar)
if nvar < 1 or nvar > nmax: raise
except:
print("Must chose between 1 and " + str(nmax))
continue
break
return nvar
def get_option_series_value(series_name, series_data):
'''
Interactive request for a valid series value
arguments:
series_name - string
series_data - numpy.array of available series data
'''
while(1):
value = input('Enter the series location value ('+series_name+' min='+str(min(series_data))+' max='+str(max(series_data))+') [default uses max_value]: ')
if value == '-q' or value == 'q': sys.exit()
if value == 'max_value': value = max(series_data)
if value == '': value = max(series_data)
try:
value = float(value)
if value < min(series_data) or value > max(series_data): raise
except:
print("Must chose between"+' min='+str(min(series_data))+' max='+str(max(series_data))+' : ')
continue
break
return value
def get_key_num_vec(keys):
'''
Interactive request for a valid set of numbers (e.g. [1, 2, 5]) in a range
from 1 to nmax provided to the user
'''
nmax = len(keys)+1
while(1):
nvar = input('Enter the option number to plot (this can be a vector [1,2,3,..]) - ')
if nvar == 'q': sys.exit()
if nvar == '': nvar = '1'
# split number verctor
nvar = re.findall(r'\d+', nvar)
vals = []
for var in nvar:
try:
val = int(var)
if val < 1 or val > nmax: raise
vals.append(val)
except:
print("Must chose between 1 and " + str(nmax))
continue
break
return_keys = []
for val in vals:
return_keys.append(keys[val-1])
return return_keys
class interactive_output_parser:
'''
This is an interactive output parser and plotting class. It provides three
basic interactive options:
pickle_output generates a pickled python dictionary to be used for plotting
plot_output provides pre-formated interactive plotting
plot_dictionary provides command line dictionary plotting of pickled python dictionaries
A user must supply:
opppy_parser a user define parser to extract cycle dictionary data from a user defined outputfile
option_string a user defined string the specifies dictionary plotting options
argument_parser a user defined argparser object to attach the subparser options to
'''
def __init__(self, opppy_parser, option_string, argument_parser):
self.opppy_parser = opppy_parser
self.option_string = option_string
self.parser = argument_parser
self.subparser = self.parser.add_subparsers(help="Output options")
self.pickle_output_parser(self.subparser)
self.plot_dictionary_parser(self.subparser)
self.plot_output_parser(self.subparser)
def append_pickle(self, args):
'''
append_pickle -
This function generates/appends a opppy pickle file of dictionary
data from an output file.
arguments:
args - Parsed input arguments
'''
data = {}
data['version'] = __version__
try:
data = pickle.load(open(args.pickle_name,'rb'))
print("Appending to the existing pickle file - ", args.pickle_name)
except:
print("Generating a new pickle file - ", args.pickle_name)
if not 'version' in data or not (data['version'] == __version__):
print('')
print("Error: pickle file does not match this version of OPPPY")
if 'version' in data:
print(args.pickle_name, "was build with version", data['version'])
else:
print("This ", args.pickle_name," has no version")
print("This version of OPPPY is ", __version__)
print("Delete the old ", args.pickle_name, "file and rebuild it")
sys.exit(0)
# append new dictionary data to the pickle file
append_output_dictionary(data, args.output_files, self.opppy_parser, args.append_date)
pickle.dump(data,open(args.pickle_name,"wb"))
print("Output Data Saved To: ", args.pickle_name)
def pickle_output_parser(self, subparser):
pickle_parser = subparser.add_parser('pickle', help=" A simple example: pickle_output --pickle_file your_output_pickle_file.p --output_files you_output_files_to_pickle ")
pickle_parser.add_argument('-of','--output_files', dest='output_files', help='output files to generate/append the pickle file', nargs='+', required=True )
pickle_parser.add_argument('-pf','--pickle_file', dest='pickle_name', help='Pickle file name to be created or appended to', required=True )
pickle_parser.add_argument('-ad','--append_date', dest='append_date', help='Append the date and time to the output file name', nargs='?', type=bool, const=True, default=False)
pickle_parser.set_defaults(func=self.append_pickle)
def plot_dictionary_parser(self, subparser):
'''
Add a parser for the dictionary plotter to a user provided subparser
'''
plot_parser = subparser.add_parser('plot', help=" A simple example: plot_dictionary -pf your_pickle_file.p --dictionary_name mat_eng --x_data time --y_data mat_name ")
input_type_parser = plot_parser.add_mutually_exclusive_group(required=True)
input_type_parser.add_argument('-pf','--pickle_files', dest='pickle_files', help='pickle files to be plotted (run1.p run2.p etc...)', nargs='+' )
input_type_parser.add_argument('-of','--output_files', dest='output_files', help='output files to be parsed and plotted (output_file1.txt output_file2.txt etc...)', nargs='+', action='append')
self.dict_ploter = plot_dictionary()
self.dict_ploter.setup_parser(plot_parser)
plot_parser.set_defaults(func=self.plot_dictionary)
def plot_dictionary(self, args):
'''
Command line based dictionary plotting tool.
arguments:
args parsed dictionary plotting arguments
'''
dictionaries = []
file_names = []
if args.output_files is not None:
dictionaries, file_names = build_output_dictionary_list(args.output_files, self.opppy_parser)
else:
# get the dictionaries from the pickle files
file_names = args.pickle_files
for filename in args.pickle_files:
pickle_data = pickle.load(open(filename,'rb'))
dictionaries.append(pickle_data)
# plot dictionaries based on input arguments
self.dict_ploter.plot_dict(args,dictionaries,file_names)
def plot_output_parser(self, subparser):
plot_output_parser = subparser.add_parser('iplot',help='Load a previously created pickle files (your_run.p) for interactive plotting or a set of output files to be parsed and plotted')
input_type_parser = plot_output_parser.add_mutually_exclusive_group(required=True)
input_type_parser.add_argument('-pf','--pickle_files', dest='pickle_files', help='pickle files to be plotted (run1.p run2.p etc...)', nargs='+' )
input_type_parser.add_argument('-of','--output_files', dest='output_files', help='output files to be parsed and plotted (output_file1.txt output_file2.txt etc...)', nargs='+', action='append')
plot_output_parser.set_defaults(func=self.plot_output)
def get_plot_option(self):
'''
Interactive request for valid plotting options
'''
while(1):
opt = input('Additional options (-h for list and -q to quit): ')
input_args = opt.split()
parser = self.get_interactive_plot_parser()
try:
args=parser.parse_args(input_args)
break
except:
parser.print_help()
if args.quit:
sys.exit(0)
return args
def get_interactive_plot_parser(self):
'''
return parser object that contains interactive plotting options
'''
parser = argparse.ArgumentParser(description=" Output Plotting options ",
epilog =" Specify the desired plotting options ", usage='')
parser.add_argument('-q','--quit', dest='quit', help='quit program', nargs='?', type=bool, const=True, default=False)
parser.add_argument('-n','--new', dest='new', help='generate a new plot', nargs='?', type=bool, const=True, default=False)
parser.add_argument('-bg','--background', dest='background', help='keep plot in background', nargs='?', type=bool, const=True, default=False)
parser.add_argument('-p','--plot', dest='plot', help='re-open plot', nargs='?', type=bool, const=True, default=False)
parser.add_argument('-l','--labels', dest='legend_labels', help='specify the legend labels [line1_label, line2_label,...]', type=str, nargs='+')
parser.add_argument('-rs','--resize', dest='plot_size', help='specify the plot size [x_size, y_size]', type=float, nargs=2)
add_plot_options(parser)
return parser
def plot_output(self, args):
'''
This is an interactive plotter for a python dictionary.
The plotting option are specified via an option string.
arguments:
args - argeparse data structure with pickle name info
option_string - a string to be passed to get_plot_options for pre-designed plots
'''
dictionary_data=[]
dictionary_names=[]
if args.output_files is not None:
dictionary_data, dictionary_names = build_output_dictionary_list(args.output_files, self.opppy_parser)
else:
# We no longer flatten this data
#file_list = []
#for sublist in args.pickle_files:
# for item in sublist:
# file_list.append(item)
# get the dictionaries from the pickle files
for pickle_file_name in args.pickle_files:
dictionary_names.append(pickle_file_name.split('/')[-1].split('.p')[0])
dictionary_data.append(pickle.load(open(pickle_file_name,'rb')))
option_parser = self.get_interactive_plot_parser()
option = option_parser.parse_args(["--new"])
ptype = []
while(1):
if option.new or option.background:
close()
plot_labels = self.get_plot_options(self.option_string)
xsize = 8
ysize = 5
try:
fig = figure(figsize=(xsize,ysize))
except:
PyPloter.switch_backend('agg')
fig = figure(figsize=(xsize,ysize))
xlog_flag = 0
ylog_flag = 0
counter = 1
labels = []
for i in range(len(plot_labels)):
if self.dict_ploter.is_data_available(plot_labels[i][-1],dictionary_data[0]):
labels.append(plot_labels[i])
counter = counter + 1
for i in range(0,len(labels),2):
if i+1<counter-1:
print('%3i %-50s %3i %-50s' %(i+1, labels[i][0], i+2, labels[i+1][0]))
else:
print('%3i %-50s' %(i+1, labels[i][0]))
plot_num = get_option_num(counter)-1
label = labels[plot_num][0]
plot_args = labels[plot_num][-1]
if plot_args.y_value_names[0] == "select_key":
keys = list(dictionary_data[-1][plot_args.dictionary_name].keys())
keys.remove('cycle')
keys.remove('time')
for i, key in zip(list(range(len(keys))),keys):
if (i & 1)==0:
print('%3i %-50s' %(i+1, key), end=' ')
else:
print('%3i %-50s' %(i+1, key))
print()
plot_args.y_value_names = get_key_num_vec(keys)
last_xmin = None
last_xmax = None
last_ymin = None
last_ymax = None
if len(plot_args.scale_x) != len(dictionary_data):
if len(plot_args.scale_x) == 0:
plot_args.scale_x = [1.0]*len(dictionary_data)
else:
plot_args.scale_x = [plot_args.scale_x[-1]]*len(dictionary_data)
if len(plot_args.scale_y) != len(dictionary_data):
if len(plot_args.scale_y) == 0:
plot_args.scale_y = [1.0]*len(dictionary_data)
else:
plot_args.scale_y = [plot_args.scale_y[-1]]*len(dictionary_data)
for dictionary, name, scale_x, scale_y in zip(dictionary_data, dictionary_names, plot_args.scale_x, plot_args.scale_y):
data = dictionary[plot_args.dictionary_name]
xmin = []
xmax = []
ymin = []
ymax = []
plabels = []
x = []
y = []
xmin.append(min(data[plot_args.x_value_name])*scale_x)
xmax.append(max(data[plot_args.x_value_name])*scale_x)
ymin.append(min(data[plot_args.y_value_names[0]])*scale_y)
ymax.append(max(data[plot_args.y_value_names[0]])*scale_y)
# material specific plot
for yname | |
<filename>src/aaindex_encoding_ResLikeCNN.py<gh_stars>10-100
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers,regularizers
import pandas as pd
from sklearn.metrics import roc_curve, auc, precision_recall_curve, confusion_matrix, f1_score,accuracy_score
import matplotlib.pyplot as plt
import numpy as np
class ResBlock(layers.Layer):
def __init__(self,in_channel,pool_size):
super(ResBlock,self).__init__()
intermediate_channel = in_channel
out_channel = in_channel * 2
self.conv1 = layers.Conv2D(filters=intermediate_channel,kernel_size=(1,1),strides=(1,1),padding='same')
self.bn1 = layers.BatchNormalization()
self.conv2 = layers.Conv2D(filters=intermediate_channel,kernel_size=(3,1),strides=(1,1),padding='same')
self.bn2 = layers.BatchNormalization()
self.conv3 = layers.Conv2D(filters=out_channel,kernel_size=(1,1),strides=(1,1),padding='same')
self.bn3 = layers.BatchNormalization()
self.identity = layers.Conv2D(filters=out_channel,kernel_size=(1,1),strides=(1,1))
self.maxpool = layers.MaxPool2D(pool_size=pool_size,strides=pool_size)
def call(self,x):
out = keras.activations.relu(self.bn1(self.conv1(x))) # (8,1,16)
out = keras.activations.relu(self.bn2(self.conv2(out))) # (8,1,16)
out = keras.activations.relu(self.bn3(self.conv3(out))) # (8,1,32)
identity_map = self.identity(x) # change original input (8,1,16) --> (8,1,32)
out = out + identity_map # (8,1,32)
out = self.maxpool(out) # (4,1,32)
return out
class CNN_peptide_aaindex(layers.Layer):
def __init__(self):
super(CNN_peptide_aaindex,self).__init__()
self.conv = layers.Conv2D(filters=16,kernel_size=(3,12),strides=(1,1))
self.block1 = ResBlock(16,(2,1))
self.block2 = ResBlock(32,(2,1))
self.block3 = ResBlock(64,(2,1))
def call(self,x): # (10,21,1)
out = self.conv(x) # (8,1,16)
out = self.block1(out) # (4,1,32)
out = self.block2(out) # (2,1,64)
out = self.block3(out) # (1,1,128)
return out
class CNN_MHC_aaindex(layers.Layer):
def __init__(self):
super(CNN_MHC_aaindex,self).__init__()
self.conv = layers.Conv2D(filters=16,kernel_size=(15,12),strides=(1,1)) # (32,1,16)
self.block1 = ResBlock(16, (2, 1)) # (16,1,32)
self.block2 = ResBlock(32, (2, 1)) # (8,1,64)
self.block3 = ResBlock(64, (2, 1)) # (4,1,128)
self.conv_add = layers.Conv2D(filters=128,kernel_size=(4,1),strides=(1,1))
self.bn = layers.BatchNormalization()
def call(self, x):
out = self.conv(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = keras.activations.relu(self.bn(self.conv_add(out))) # (1,1,128)
return out
class model_aaindex(keras.Model):
def __init__(self):
super(model_aaindex,self).__init__()
self.br_pep = CNN_peptide_aaindex()
self.br_mhc = CNN_MHC_aaindex()
self.flatten = layers.Flatten()
self.fc1 = layers.Dense(128,activation='relu')
self.fc2 = layers.Dense(1,activation='sigmoid')
def call(self,input):
x1,x2 = input[0],input[1] # x1: (10,12,1) x2: (46,12,1)
out1 = self.flatten(self.br_pep(x1))
out2 = self.flatten(self.br_mhc(x2))
out = layers.concatenate([out1,out2])
out = self.fc1(out)
out = self.fc2(out)
return out
def model(self):
x1 = keras.Input(shape=(10,12,1))
x2 = keras.Input(shape=(46,12,1))
return keras.Model(inputs=[x1,x2],outputs=self.call([x1,x2]))
def aaindex(peptide,after_pca):
amino = 'ARNDCQEGHILKMFPSTWYV-'
matrix = np.transpose(after_pca) # [12,21]
encoded = np.empty([len(peptide), 12]) # (seq_len,12)
for i in range(len(peptide)):
query = peptide[i]
if query == 'X': query = '-'
query = query.upper()
encoded[i, :] = matrix[:, amino.index(query)]
return encoded
# def aaindex(peptide,after_pca):
# amino = 'ARNDCQEGHILKMFPSTWYV-'
# encoded = np.empty([len(peptide),21])
# onehot = np.identity(21)
# for i in range(len(peptide)):
# query = peptide[i]
# if query == 'X': query = '-'
# query = query.upper()
# encoded[i,:] = onehot[:,amino.index(query)]
# return encoded
def pull_peptide_aaindex(dataset):
result = np.empty([len(dataset),10,12,1])
for i in range(len(dataset)):
result[i,:,:,:] = dataset[i][0]
return result
def pull_hla_aaindex(dataset):
result = np.empty([len(dataset),46,12,1])
for i in range(len(dataset)):
result[i,:,:,:] = dataset[i][1]
return result
def pull_label_aaindex(dataset):
result = np.empty([len(dataset),1])
for i in range(len(dataset)):
result[i,:] = dataset[i][2]
return result
def retain_910(ori):
cond = []
for i in range(ori.shape[0]):
peptide = ori['peptide'].iloc[i]
if len(peptide) == 9 or len(peptide) == 10:
cond.append(True)
else:
cond.append(False)
data = ori.loc[cond]
data = data.set_index(pd.Index(np.arange(data.shape[0])))
return data
def wrapper_train():
# train
reslike_model = model_aaindex()
reslike_model.compile(
loss=keras.losses.MeanSquaredError(),
optimizer=keras.optimizers.Adam(lr=0.0001),
metrics=['accuracy'])
callback_val = keras.callbacks.EarlyStopping(monitor='val_loss', patience=15, restore_best_weights=False)
callback_train = keras.callbacks.EarlyStopping(monitor='loss', patience=2, restore_best_weights=False)
history = reslike_model.fit(
x=[train_input1, train_input2], # feed a list into
y=train_label,
validation_data=([test_input1, test_input2], test_label),
batch_size=128,
epochs=200,
class_weight={0: 0.5, 1: 0.5}, # I have 20% positive and 80% negative in my training data
callbacks=[callback_val, callback_train])
return reslike_model
def hla_df_to_dic(hla):
dic = {}
for i in range(hla.shape[0]):
col1 = hla['HLA'].iloc[i] # HLA allele
col2 = hla['pseudo'].iloc[i] # pseudo sequence
dic[col1] = col2
return dic
def dict_inventory(inventory):
dicA, dicB, dicC = {}, {}, {}
dic = {'A': dicA, 'B': dicB, 'C': dicC}
for hla in inventory:
type_ = hla[4] # A,B,C
first2 = hla[6:8] # 01
last2 = hla[8:] # 01
try:
dic[type_][first2].append(last2)
except KeyError:
dic[type_][first2] = []
dic[type_][first2].append(last2)
return dic
def peptide_data_aaindex(peptide,after_pca): # return numpy array [10,12,1]
length = len(peptide)
if length == 10:
encode = aaindex(peptide,after_pca)
elif length == 9:
peptide = peptide[:5] + '-' + peptide[5:]
encode = aaindex(peptide,after_pca)
encode = encode.reshape(encode.shape[0], encode.shape[1], -1)
return encode
def dict_inventory(inventory):
dicA, dicB, dicC = {}, {}, {}
dic = {'A': dicA, 'B': dicB, 'C': dicC}
for hla in inventory:
type_ = hla[4] # A,B,C
first2 = hla[6:8] # 01
last2 = hla[8:] # 01
try:
dic[type_][first2].append(last2)
except KeyError:
dic[type_][first2] = []
dic[type_][first2].append(last2)
return dic
def rescue_unknown_hla(hla, dic_inventory):
type_ = hla[4]
first2 = hla[6:8]
last2 = hla[8:]
big_category = dic_inventory[type_]
#print(hla)
if not big_category.get(first2) == None:
small_category = big_category.get(first2)
distance = [abs(int(last2) - int(i)) for i in small_category]
optimal = min(zip(small_category, distance), key=lambda x: x[1])[0]
return 'HLA-' + str(type_) + '*' + str(first2) + str(optimal)
else:
small_category = list(big_category.keys())
distance = [abs(int(first2) - int(i)) for i in small_category]
optimal = min(zip(small_category, distance), key=lambda x: x[1])[0]
return 'HLA-' + str(type_) + '*' + str(optimal) + str(big_category[optimal][0])
def hla_data_aaindex(hla_dic,hla_type,after_pca): # return numpy array [34,12,1]
try:
seq = hla_dic[hla_type]
except KeyError:
hla_type = rescue_unknown_hla(hla_type,dic_inventory)
seq = hla_dic[hla_type]
encode = aaindex(seq,after_pca)
encode = encode.reshape(encode.shape[0], encode.shape[1], -1)
return encode
def construct_aaindex(ori,hla_dic,after_pca):
series = []
for i in range(ori.shape[0]):
peptide = ori['peptide'].iloc[i]
hla_type = ori['HLA'].iloc[i]
immuno = np.array(ori['immunogenicity'].iloc[i]).reshape(1,-1) # [1,1]
encode_pep = peptide_data_aaindex(peptide,after_pca) # [10,12]
encode_hla = hla_data_aaindex(hla_dic,hla_type,after_pca) # [46,12]
series.append((encode_pep, encode_hla, immuno))
return series
def hla_df_to_dic(hla):
dic = {}
for i in range(hla.shape[0]):
col1 = hla['HLA'].iloc[i] # HLA allele
col2 = hla['pseudo'].iloc[i] # pseudo sequence
dic[col1] = col2
return dic
if __name__ == '__main__':
after_pca = np.loadtxt('immuno2/data/after_pca.txt')
ori = pd.read_csv('/Users/ligk2e/Desktop/immuno3/data/remove_low_negative/remove0123_sample100.csv')
ori = ori.sample(frac=1,replace=False).set_index(pd.Index(np.arange(ori.shape[0])))
hla = pd.read_csv('immuno2/data/hla2paratopeTable_aligned.txt',sep='\t')
hla_dic = hla_df_to_dic(hla)
inventory = list(hla_dic.keys())
dic_inventory = dict_inventory(inventory)
ori['immunogenicity'],ori['potential'] = ori['potential'],ori['immunogenicity']
dataset = construct_aaindex(ori,hla_dic,after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
from sklearn.model_selection import KFold
kf = KFold(n_splits=10)
fold_indices = list(kf.split(np.arange(input1.shape[0])))
holding = {'validation':[],'dengue':[],'cell':[],'covid':[]}
i = 1
for fold in fold_indices:
# split
ori = pd.read_csv('/Users/ligk2e/Desktop/immuno3/data/remove_low_negative/remove0123_sample100.csv')
ori = ori.sample(frac=1, replace=False).set_index(pd.Index(np.arange(ori.shape[0])))
ori['immunogenicity'], ori['potential'] = ori['potential'], ori['immunogenicity']
dataset = construct_aaindex(ori, hla_dic, after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
train_input1, train_input2, train_label = input1[fold[0]],input2[fold[0]],label[fold[0]]
test_input1, test_input2, test_label = input1[fold[1]],input2[fold[1]],label[fold[1]]
print('round {}, split finished'.format(i))
# train
reslike_model = wrapper_train()
# predict in validation set
result = reslike_model.predict([test_input1,test_input2])
from sklearn.metrics import mean_squared_error
loss = mean_squared_error(test_label,result,squared=False)
holding['validation'].append(loss)
print('round {}, finished validation'.format(i))
# predict in dengue
ori_test_dengue = pd.read_csv('/Users/ligk2e/Desktop/immuno3/data/remove_low_negative/dengue_test.csv')
dataset = construct_aaindex(ori_test_dengue, hla_dic, after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
prediction = reslike_model.predict([input1,input2])
from sklearn.metrics import accuracy_score,recall_score,precision_score
hard = [1 if item >= 0.5 else 0 for item in prediction]
result = accuracy_score(label, hard)
holding['dengue'].append(result)
print('round {}, finished dengue'.format(i))
# predict in cell
ori_test_cell = pd.read_csv(
'/Users/ligk2e/Desktop/immuno3/data/remove_low_negative/complete_data_filter910.txt', sep='\t')
dataset = construct_aaindex(ori_test_cell, hla_dic, after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
prediction = reslike_model.predict([input1,input2])
hard = [1 if item >= 0.5 else 0 for item in prediction]
result1 = recall_score(label, hard) # recall
ori_test_cell['result'] = prediction
ori_test_cell = ori_test_cell.sort_values(by='result', ascending=False).set_index(
pd.Index(np.arange(ori_test_cell.shape[0])))
result2 = np.count_nonzero(ori_test_cell['immunogenicity'].values[:20] == 1) # top20
result3 = np.count_nonzero(ori_test_cell['immunogenicity'].values[:50] == 1) # top50
holding['cell'].append((result1, result2, result3))
print('round {}, finished cell'.format(i))
# predict in covid
ori = pd.read_csv('/Users/ligk2e/Desktop/sars_cov_2.txt', sep='\t')
ori = ori.sample(frac=1, replace=False).set_index(pd.Index(np.arange(ori.shape[0])))
ori_test_covid = retain_910(ori)
dataset = construct_aaindex(ori_test_covid, hla_dic, after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
prediction = reslike_model.predict([input1,input2])
hard = [1 if item >= 0.5 else 0 for item in prediction]
result1 = recall_score(ori_test_covid['immunogenicity-con'], hard) # convalescent recall
result2 = recall_score(ori_test_covid['immunogenicity'], hard) # unexposed recall
result3 = precision_score(ori_test_covid['immunogenicity-con'], hard) # convalescent recall
result4 = precision_score(ori_test_covid['immunogenicity'], hard) # unexposed recall
holding['covid'].append((result1, result2, result3, result4))
print('round {}, finished covid'.format(i))
i += 1
# onehot + paratope
after_pca = np.loadtxt('immuno2/data/after_pca.txt')
ori = pd.read_csv('/Users/ligk2e/Desktop/immuno3/data/remove_low_negative/remove0123_sample100.csv')
ori = ori.sample(frac=1,replace=False).set_index(pd.Index(np.arange(ori.shape[0])))
hla = pd.read_csv('immuno2/data/hla2paratopeTable_aligned.txt',sep='\t')
hla_dic = hla_df_to_dic(hla)
inventory = list(hla_dic.keys())
dic_inventory = dict_inventory(inventory)
ori['immunogenicity'],ori['potential'] = ori['potential'],ori['immunogenicity']
dataset = construct_aaindex(ori,hla_dic,after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
from sklearn.model_selection import KFold
kf = KFold(n_splits=10)
fold_indices = list(kf.split(np.arange(input1.shape[0])))
holding = {'validation':[],'dengue':[],'cell':[],'covid':[]}
i = 1
for fold in fold_indices:
# split
ori = pd.read_csv('/Users/ligk2e/Desktop/immuno3/data/remove_low_negative/remove0123_sample100.csv')
ori = ori.sample(frac=1, replace=False).set_index(pd.Index(np.arange(ori.shape[0])))
ori['immunogenicity'], ori['potential'] = ori['potential'], ori['immunogenicity']
dataset = construct_aaindex(ori, hla_dic, after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
train_input1, train_input2, train_label = input1[fold[0]],input2[fold[0]],label[fold[0]]
test_input1, test_input2, test_label = input1[fold[1]],input2[fold[1]],label[fold[1]]
print('round {}, split finished'.format(i))
# train
reslike_model = wrapper_train()
# predict in validation set
result = reslike_model.predict([test_input1,test_input2])
from sklearn.metrics import mean_squared_error
loss = mean_squared_error(test_label,result,squared=False)
holding['validation'].append(loss)
print('round {}, finished validation'.format(i))
# predict in dengue
ori_test_dengue = pd.read_csv('/Users/ligk2e/Desktop/immuno3/data/remove_low_negative/dengue_test.csv')
dataset = construct_aaindex(ori_test_dengue, hla_dic, after_pca)
input1 = pull_peptide_aaindex(dataset)
input2 = pull_hla_aaindex(dataset)
label = pull_label_aaindex(dataset)
prediction = reslike_model.predict([input1,input2])
from sklearn.metrics import accuracy_score,recall_score,precision_score
hard = [1 if item >= 0.5 else 0 for item in prediction]
result = accuracy_score(label, | |
<reponame>broadinstitute/celligner2
import inspect
from lib2to3.pgen2.token import OP
import os
import anndata
from numpy.lib.function_base import _percentile_dispatcher
import torch
import pickle
import numpy as np
import pandas as pd
from anndata import AnnData
from typing import Optional, Union
from .celligner2 import Celligner2
from celligner2.trainers.celligner2.semisupervised import Celligner2Trainer
from celligner2.othermodels.base._base import BaseMixin, SurgeryMixin, CVAELatentsMixin
from celligner2.dataset.celligner2._utils import label_encoder_2D
class CELLIGNER2(BaseMixin, SurgeryMixin, CVAELatentsMixin):
"""Model for scArches class. This class contains the implementation of Conditional Variational Auto-encoder.
Parameters
----------
adata: : `~anndata.AnnData`
Annotated data matrix. Has to be count data for 'nb' and 'zinb' loss and normalized log transformed data
for 'mse' loss.
condition_keys: String
column name of conditions in `adata.obs` data frame.
conditions: List
List of Condition names that the used data will contain to get the right encoding when used after reloading.
hidden_layer_sizes: List
A list of hidden layer sizes for encoder network. Decoder network will be the reversed order.
latent_dim: Integer
Bottleneck layer (z) size.
dr_rate: Float
Dropput rate applied to all layers, if `dr_rate`==0 no dropout will be applied.
use_mmd: Boolean
If 'True' an additional MMD loss will be calculated on the latent dim. 'z' or the first decoder layer 'y'.
mmd_on: String
Choose on which layer MMD loss will be calculated on if 'use_mmd=True': 'z' for latent dim or 'y' for first
decoder layer.
mmd_boundary: Integer or None
Choose on how many conditions the MMD loss should be calculated on. If 'None' MMD will be calculated on all
conditions.
recon_loss: String
Definition of Reconstruction-Loss-Method, 'mse', 'nb' or 'zinb'.
beta: Float
Scaling Factor for MMD loss
use_bn: Boolean
If `True` batch normalization will be applied to layers.
use_ln: Boolean
If `True` layer normalization will be applied to layers.
mask: Array or List
if not None, an array of 0s and 1s from utils.add_annotations to create VAE with a masked linear decoder.
mask_key: String
A key in `adata.varm` for the mask if the mask is not provided.
soft_mask: Boolean
Use soft mask option. If True, the model will enforce mask with L1 regularization
instead of multipling weight of the linear decoder by the binary mask.
n_unconstrained: Integer
Number of unconstrained terms in the latent layer.
use_hsic: Boolean
If True, add HSIC regularization for unconstarined extension terms.
Used for query mapping.
hsic_one_vs_all: Boolean
If True, calculates the sum of HSIC losses for each unconstarined term vs the other terms.
If False, calculates HSIC for all unconstarined terms vs the other terms.
Used for query mapping."""
def __init__(
self,
adata: AnnData,
condition_keys: Optional[list] = None,
conditions: Optional[list] = None,
hidden_layer_sizes: list = [256, 64],
classifier_hidden_layer_sizes: list = [64, 32],
latent_dim: int = 10,
dr_rate: float = 0.05,
use_mmd: bool = True,
mmd_on: str = "z",
mmd_boundary: Optional[int] = None,
recon_loss: Optional[str] = "nb",
beta: float = 1,
betaclass: float = 0.8,
use_bn: bool = False,
use_ln: bool = True,
predictors: Optional[list] = None,
predictor_keys: Optional[list] = [],
use_own_kl: bool = False,
miss: str = "U",
apply_log: bool = True,
mask: Optional[Union[np.ndarray, list]] = None,
mask_key: str = "",
n_unconstrained: int = 0,
use_hsic: bool = False,
hsic_one_vs_all: bool = False,
main_dataset=None,
use_l_encoder: bool = False,
# only on load
n_expand: int = 0,
ext_mask: Optional[Union[np.ndarray, list]] = None,
ext_n_unconstrained: int = 0,
predictor_set={},
condition_set={},
):
self.adata = adata
self.goodloc = ~np.isnan(adata.X)
self.condition_keys_ = condition_keys
self.predictor_keys_ = predictor_keys
if conditions is None:
if condition_keys is not None:
myset = set()
for condition_key in condition_keys:
if len(set(adata.obs[condition_key]) & set(miss)) > 0:
raise ValueError(
"Condition key '{}' has missing values. the model can't deal \
with missing values in its condition keys for now, \
you can run them as predictor to impute them from the data".format(
condition_key
)
)
group = set(adata.obs[condition_key]) - set(miss)
overlap = group & myset
if len(overlap) > 0:
adata.obs.replace(
{
condition_key: {
val: condition_key + "_" + val for val in group
}
},
inplace=True,
)
myset = myset | set(adata.obs[condition_key])
self.conditions_ = list(
set(adata.obs[condition_keys].values.flatten()) - set(miss)
)
else:
self.conditions_ = []
else:
self.conditions_ = conditions
# TODO: add a version when no condition_keys are provided
if condition_keys is not None:
self.condition_set_ = {
key: set(adata.obs[key]) - set(miss) for key in condition_keys
}
# we only want the current's adata condition set.
if predictors is None:
if predictor_keys is not None:
myset = set()
for predictor_key in predictor_keys:
group = set(adata.obs[predictor_key]) - set(miss)
if len(group) == 0:
raise ValueError(
"Predictor key '{}' has no values. please check your obs".format(
predictor_key
)
)
overlap = group & myset
if len(overlap) > 0:
adata.obs.replace(
{
predictor_key: {
val: predictor_key + "_" + val for val in group
}
},
inplace=True,
)
myset = myset | set(adata.obs[predictor_key])
self.predictors_ = list(
set(adata.obs[predictor_keys].values.flatten()) - set(miss)
)
else:
self.predictors_ = []
else:
self.predictors_ = predictors
if predictor_keys is not None:
self.predictor_set_ = {
key: set(adata.obs[key]) - set(miss) for key in predictor_keys
}
else:
self.predictor_set_ = {}
for k, v in predictor_set.items():
self.predictor_set_[k] = set(v) & set(self.predictor_set_[k])
self.miss_ = miss
self.hidden_layer_sizes_ = hidden_layer_sizes
self.classifier_hidden_layer_sizes_ = classifier_hidden_layer_sizes
self.latent_dim_ = latent_dim
self.dr_rate_ = dr_rate
self.use_mmd_ = use_mmd
self.mmd_on_ = mmd_on
self.mmd_boundary_ = mmd_boundary
# expimap mode params
self.expimap_mode_ = False
if mask_key != "":
mask = adata.varm[mask_key].T
if mask is not None:
mask = mask if isinstance(mask, list) else mask.tolist()
self.mask_ = torch.tensor(mask).float()
self.expimap_mode_ = True
self.latent_dim_ = len(self.mask_) + n_unconstrained
else:
self.mask_ = None
self.n_unconstrained_ = n_unconstrained
self.use_hsic_ = use_hsic
self.hsic_one_vs_all_ = hsic_one_vs_all
self.ext_mask_ = ext_mask
# end of expimap mode params
self.recon_loss_ = recon_loss
self.beta_ = beta
self.betaclass_ = betaclass
self.use_bn_ = use_bn
self.use_ln_ = use_ln
self.use_own_kl_ = use_own_kl
self.n_expand_ = n_expand
self.use_l_encoder_ = use_l_encoder
self.ext_n_unconstrained_ = ext_n_unconstrained
self.input_dim_ = adata.n_vars
self.apply_log_ = apply_log
if main_dataset not in set(adata.obs[condition_keys].values.flatten()):
print("main dataset not in conditions, removing..")
self.main_dataset_ = None
else:
self.main_dataset_ = main_dataset
self.model = Celligner2(
self.input_dim_,
self.conditions_,
self.predictors_,
self.hidden_layer_sizes_,
self.classifier_hidden_layer_sizes_,
self.latent_dim_,
self.dr_rate_,
self.use_mmd_,
self.use_own_kl_,
self.mmd_on_,
self.mmd_boundary_,
self.recon_loss_,
self.beta_,
self.betaclass_,
self.use_bn_,
self.use_ln_,
self.apply_log_,
self.main_dataset_,
self.n_expand_,
# expimap mode params
self.expimap_mode_,
self.mask_,
self.ext_mask_,
self.n_unconstrained_,
self.ext_n_unconstrained_,
self.use_l_encoder_,
self.use_hsic_,
self.hsic_one_vs_all_,
)
self.is_trained_ = False
self.trainer = None
def train(self, n_epochs: int = 400, lr: float = 1e-3, eps: float = 0.01, **kwargs):
"""Train the model.
Parameters
----------
n_epochs
Number of epochs for training the model.
lr
Learning rate for training the model.
eps
torch.optim.Adam eps parameter
kwargs
kwargs for the TrVAE trainer.
"""
self.trainer = Celligner2Trainer(
self.model,
self.adata,
condition_keys=self.condition_keys_,
predictor_keys=self.predictor_keys_,
**kwargs
)
self.trainer.train(n_epochs, lr, eps)
self.is_trained_ = True
@classmethod
def _get_init_params_from_dict(cls, dct):
print(dct.keys())
init_params = {
"condition_keys": dct["condition_keys_"],
"conditions": dct["conditions_"],
"hidden_layer_sizes": dct["hidden_layer_sizes_"],
"classifier_hidden_layer_sizes": dct["classifier_hidden_layer_sizes_"]
if "classifier_hidden_layer_sizes_" in dct.keys()
else [],
"latent_dim": dct["latent_dim_"],
"dr_rate": dct["dr_rate_"],
"use_mmd": dct["use_mmd_"],
"mmd_on": dct["mmd_on_"],
"mmd_boundary": dct["mmd_boundary_"],
"recon_loss": dct["recon_loss_"],
"beta": dct["beta_"],
"betaclass": dct["betaclass_"],
"use_bn": dct["use_bn_"],
"use_ln": dct["use_ln_"],
"use_own_kl": dct["use_own_kl_"],
"predictors": dct["predictors_"],
"predictor_keys": dct["predictor_keys_"],
"miss": dct["miss_"],
"apply_log": dct["apply_log_"],
"predictor_set": dct["predictor_set_"]
if "predictor_set_" in dct.keys()
else {},
"condition_set": dct["condition_set_"]
if "condition_set_" in dct.keys()
else {},
# main dataset mode params
"main_dataset": dct["main_dataset_"]
if "main_dataset_" in dct.keys()
else None,
# expimap mode params
"mask": dct["mask_"] if "mask_" in dct.keys() else None,
"mask_key": dct["mask_key_"] if "mask_key_" in dct.keys() else "",
"n_unconstrained": dct["n_unconstrained_"]
if "n_unconstrained_" in dct.keys()
else 0,
"use_hsic": dct["use_hsic_"] if "use_hsic_" in dct.keys() else False,
"hsic_one_vs_all": dct["hsic_one_vs_all_"]
if "hsic_one_vs_all_" in dct.keys()
else False,
}
return init_params
@classmethod
def _validate_adata(cls, adata, dct):
if adata.n_vars != dct["input_dim_"]:
raise ValueError("Incorrect var dimension")
def get_latent(
self,
adata: Optional[AnnData] = None,
mean: bool = False,
add_classpred: bool = False,
get_fullpred: bool = False,
):
"""Map `x` in to the latent space. This function will feed data in encoder and return z for each sample in
data.
Parameters
----------
x
Numpy nd-array to be mapped to latent space. `x` has to be in shape [n_obs, input_dim].
If None, then `self.adata.X` is used.
c
`numpy nd-array` of original (unencoded) desired labels for each sample.
mean
return mean instead of random sample from the latent space
Returns
-------
Returns array containing latent space encoding of 'x'.
"""
device = next(self.model.parameters()).device
wasnull = False
if adata is None:
| |
import os
import unittest
import uuid
import ast
import time
import threading
from wsgiref import simple_server
import urllib
import json
import requests
from server.gateway import app
from server.verbs import Visit
from server.doctor import register_doctor, get_doctor, edit_doctor
from server.patient import register_patient, get_patient, edit_patient
from server.appointment import make_appointment, get_appointment, check_appointment
from server.obj import upload_obj, get_obj, get_objs, delete_obj
from server.models import create_tables, DoctorModel, PatientModel, ObjectModel, LoginModel
from server.auth import authentication, get_token
from server import rediscli
from server.config import Config
from server.utils import logger
HOST = 'http://192.168.59.200:8080'
ENDPOINT = HOST + '/v1'
SUCCESS_STATUS_CODES = [200, 201, 202, 204]
FAILURE_STATUS_CODES = [400, 401, 403, 404, 405]
def runserver():
httpd = simple_server.make_server('192.168.59.200', 8080, app)
httpd.serve_forever()
def run_server():
thread = threading.Thread(target=runserver)
thread.daemon = True
thread.start()
# # Wait a moment for the thread to start up
time.sleep(0.5)
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.test_conf = Config('tests/configuration_test')
create_tables(self.test_conf)
def tearDown(self):
os.remove('{}.sqlite3'.format(self.test_conf.db_filename))
class TestApiv1(BaseTestCase):
"""
put an account
delete an account
TODO: test 40x situations
"""
def setUp(self):
self.test_conf = Config('tests/configuration_test')
create_tables(self.test_conf)
self.adminid = 'admin_{}'.format(str(uuid.uuid4()))
self.doctorid = <EMAIL>'.format(str(uuid.uuid4()))
self.patientid = <EMAIL>'.format(str(uuid.uuid4()))
self.admin_auth()
self.doctor_auth()
self.patient_auth()
def admin_auth(self):
LoginModel.create(
username=self.adminid,
password='<PASSWORD>',
role='admin'
)
headers = {
'content-type': 'application/json'}
adm_login = {
'username':self.adminid,
'password':'<PASSWORD>', }
visit = Visit(ENDPOINT)
# visit.get(headers=headers)
auth_code, resp_auth = visit.post(suffix_url='auth/admin', headers=headers,
data=adm_login)
logger.info('auth_code:{}, resp_auth:{}'.format(auth_code, resp_auth))
resp_auth = json.loads(resp_auth)
self.admin_token = resp_auth['token']
def doctor_auth(self):
visit = Visit(ENDPOINT)
logger.debug('before doctor_auth')
headers = {
'content-type': 'application/json'}
headers['token'] = self.admin_token
headers['role'] = 'admin'
regdoc_data = {
'email':self.doctorid,
'first_name':'intest',
'last_name':'intest',
'experience':10,
'patients': '["{}"]'.format(self.patientid)
}
doc_code, resp_doc = visit.post(suffix_url='doctor', headers=headers,
data=regdoc_data)
# logger.info('doc_code:{}, resp_auth:{}'.format(doc_code, resp_auth))
resp_doc = json.loads(resp_doc)
did = resp_doc['doctorid']
self.assertEqual(self.doctorid, did)
self.assertIn(doc_code, SUCCESS_STATUS_CODES)
self.doctorpass = '<PASSWORD>'
LoginModel.create(
username=self.doctorid,
password=<PASSWORD>,
role='doctor'
)
headers = {
'content-type': 'application/json'}
doc_login = {
'username':self.doctorid,
'password':<PASSWORD>, }
auth_code, resp_auth = visit.post(suffix_url='auth/doctor', headers=headers,
data=doc_login)
logger.info('auth_code:{}, resp_auth:{}'.format(auth_code, resp_auth))
self.assertIn('token', resp_auth)
self.assertIn(auth_code, SUCCESS_STATUS_CODES)
# resp_auth = ast.literal_eval(resp_auth)
resp_auth = json.loads(resp_auth)
self.doctor_token = resp_auth['token']
def patient_auth(self):
visit = Visit(ENDPOINT)
logger.debug('before patient_auth')
headers = {
'content-type': 'application/json'}
headers['token'] = self.admin_token
headers['role'] = 'admin'
regpt_data = {
'email':self.patientid,
'first_name':'intest',
'last_name':'intest',
'height':'177'
}
doc_code, resp_doc = visit.post(suffix_url='patient', headers=headers,
data=regpt_data)
logger.info('doc_code:{}, resp_auth:{}'.format(doc_code, resp_doc))
self.assertIn(doc_code, SUCCESS_STATUS_CODES)
self.patientpass = '<PASSWORD>'
LoginModel.create(
username=self.patientid,
password=<PASSWORD>,
role='patient'
)
headers = {
'content-type': 'application/json'}
pat_login = {
'username':self.patientid,
'password':<PASSWORD>, }
auth_code, resp_auth = visit.post(suffix_url='auth/patient', headers=headers,
data=pat_login)
logger.info('auth_code:{}, resp_auth:{}'.format(auth_code, resp_auth))
self.assertIn('token', resp_auth)
self.assertIn(auth_code, SUCCESS_STATUS_CODES)
# resp_auth = ast.literal_eval(resp_auth)
resp_auth = json.loads(resp_auth)
self.pat_token = resp_auth['token']
def test_reg_doctor(self):
adminid = 'admin_{}'.format(str(uuid.uuid4()))
LoginModel.create(
username=adminid,
password='<PASSWORD>',
role='admin'
)
headers = {
'content-type': 'application/json'}
adm_login = {
'username':adminid,
'password':'<PASSWORD>', }
visit = Visit(ENDPOINT)
# visit.get(headers=headers)
auth_code, resp_auth = visit.post(suffix_url='auth/admin', headers=headers,
data=adm_login)
logger.info('auth_code:{}, resp_auth:{}'.format(auth_code, resp_auth))
self.assertIn('token', resp_auth)
self.assertIn(auth_code, SUCCESS_STATUS_CODES)
# resp_auth = ast.literal_eval(resp_auth)
resp_auth = json.loads(resp_auth)
admin_token = resp_auth['token']
logger.debug('before admin requests')
headers['token'] = admin_token
headers['role'] = 'admin'
doctorid = <EMAIL>'.format(str(uuid.uuid4()))
regdoc_data = {
'email':doctorid,
'first_name':'intest',
'last_name':'intest',
'experience':10
}
doc_code, resp_doc = visit.post(suffix_url='doctor', headers=headers,
data=regdoc_data)
# logger.info('doc_code:{}, resp_auth:{}'.format(doc_code, resp_auth))
resp_doc = json.loads(resp_doc)
did = resp_doc['doctorid']
self.assertEqual(doctorid, did)
self.assertIn(doc_code, SUCCESS_STATUS_CODES)
headers['token'] = 'wrong_token'
headers['role'] = 'admin'
doc_code, resp_doc = visit.post(suffix_url='doctor', headers=headers,
data=regdoc_data)
# logger.info('doc_code:{}, resp_auth:{}'.format(doc_code, resp_auth))
self.assertIn(doc_code, FAILURE_STATUS_CODES)
def test_put_doctor(self):
adminid = 'admin_{}'.format(str(uuid.uuid4()))
LoginModel.create(
username=adminid,
password='<PASSWORD>',
role='admin'
)
headers = {
'content-type': 'application/json'}
adm_login = {
'username':adminid,
'password':'<PASSWORD>', }
visit = Visit(ENDPOINT)
# visit.get(headers=headers)
auth_code, resp_auth = visit.post(suffix_url='auth/admin', headers=headers,
data=adm_login)
logger.info('auth_code:{}, resp_auth:{}'.format(auth_code, resp_auth))
self.assertIn('token', resp_auth)
self.assertIn(auth_code, SUCCESS_STATUS_CODES)
# resp_auth = ast.literal_eval(resp_auth)
resp_auth = json.loads(resp_auth)
admin_token = resp_auth['token']
logger.debug('before admin requests')
headers['token'] = admin_token
headers['role'] = 'admin'
doctorid = <EMAIL>'.format(str(uuid.uuid4()))
regdoc_data = {
'email':doctorid,
'first_name':'intest',
'last_name':'intest',
'experience':10
}
doc_code, resp_doc = visit.post(suffix_url='doctor', headers=headers,
data=regdoc_data)
# logger.info('doc_code:{}, resp_auth:{}'.format(doc_code, resp_auth))
resp_doc = json.loads(resp_doc)
did = resp_doc['doctorid']
self.assertEqual(doctorid, did)
self.assertIn(doc_code, SUCCESS_STATUS_CODES)
LoginModel.create(
username=doctorid,
password='<PASSWORD>',
role='doctor'
)
headers = {
'content-type': 'application/json'}
doc_login = {
'username':doctorid,
'password':'<PASSWORD>', }
auth_code, resp_auth = visit.post(suffix_url='auth/doctor', headers=headers,
data=doc_login)
logger.info('auth_code:{}, resp_auth:{}'.format(auth_code, resp_auth))
self.assertIn('token', resp_auth)
self.assertIn(auth_code, SUCCESS_STATUS_CODES)
# resp_auth = ast.literal_eval(resp_auth)
resp_auth = json.loads(resp_auth)
doctor_token = resp_auth['token']
logger.debug('before doctor requests')
headers['token'] = doctor_token
headers['role'] = 'doctor'
putdoc_data = {
'email':doctorid,
'first_name':'intest_modi',
'last_name':'intest_modi',
'experience':11
}
doc_code, resp_doc = visit.put(suffix_url='doctor/{}'.format(doctorid), headers=headers,
data=putdoc_data)
logger.info('doc_code:{}, resp_doc:{}'.format(doc_code, resp_doc))
resp_doc = json.loads(resp_doc)
did = resp_doc['doctorid']
self.assertEqual(doctorid, did)
self.assertIn(doc_code, SUCCESS_STATUS_CODES)
headers['role'] = 'admin'
putdoc_data = {
'email':doctorid,
'first_name':'intest_modi',
'last_name':'intest_modi',
'experience':11
}
doc_code, resp_doc = visit.put(suffix_url='doctor/{}'.format(doctorid), headers=headers,
data=putdoc_data)
logger.info('doc_code:{}, resp_doc:{}'.format(doc_code, resp_doc))
self.assertIn(doc_code, FAILURE_STATUS_CODES)
def test_get_patient(self):
adminid = 'admin_{}'.format(str(uuid.uuid4()))
LoginModel.create(
username=adminid,
password='<PASSWORD>',
role='admin'
)
headers = {
'content-type': 'application/json'}
adm_login = {
'username':adminid,
'password':'<PASSWORD>', }
visit = Visit(ENDPOINT)
# visit.get(headers=headers)
auth_code, resp_auth = visit.post(suffix_url='auth/admin', headers=headers,
data=adm_login)
logger.info('auth_code:{}, resp_auth:{}'.format(auth_code, resp_auth))
self.assertIn('token', resp_auth)
self.assertIn(auth_code, SUCCESS_STATUS_CODES)
# resp_auth = ast.literal_eval(resp_auth)
resp_auth = json.loads(resp_auth)
admin_token = resp_auth['token']
logger.debug('before admin requests')
headers['token'] = admin_token
headers['role'] = 'admin'
patientid = <EMAIL>'.<EMAIL>(str(uuid.uuid4()))
regpt_data = {
'email':patientid,
'first_name':'intest',
'last_name':'intest',
'height':'177'
}
doc_code, resp_doc = visit.post(suffix_url='patient', headers=headers,
data=regpt_data)
logger.info('doc_code:{}, resp_auth:{}'.format(doc_code, resp_auth))
resp_doc = json.loads(resp_doc)
did = resp_doc['patientid']
self.assertEqual(patientid, did)
self.assertIn(doc_code, SUCCESS_STATUS_CODES)
LoginModel.create(
username=patientid,
password='<PASSWORD>',
role='patient'
)
headers = {
'content-type': 'application/json'}
pat_login = {
'username':patientid,
'password':'<PASSWORD>', }
auth_code, resp_auth = visit.post(suffix_url='auth/patient', headers=headers,
data=pat_login)
logger.info('auth_code:{}, resp_auth:{}'.format(auth_code, resp_auth))
self.assertIn('token', resp_auth)
self.assertIn(auth_code, SUCCESS_STATUS_CODES)
# resp_auth = ast.literal_eval(resp_auth)
resp_auth = json.loads(resp_auth)
pat_token = resp_auth['token']
logger.debug('before patient get request')
headers['token'] = pat_token
headers['role'] = 'patient'
pat_code, resp_pat = visit.get(suffix_url='patient/{}'.format(patientid), headers=headers)
logger.info('pat_code:{}, resp_pat:{}'.format(pat_code, resp_pat))
resp_pat = json.loads(resp_pat)
did = resp_pat['email']
self.assertEqual(patientid, did)
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
def test_prescription(self):
visit = Visit(ENDPOINT)
headers = {
'content-type': 'application/json'}
logger.debug('before test_post_prescription')
# headers['token'] = self.admin_token
# headers['role'] = 'admin'
headers['token'] = self.doctor_token
headers['role'] = 'doctor'
# logger.debug('before patient get request')
# headers['token'] = self.pat_token
# headers['role'] = 'patient'
regprescription_data = {
'datetime':'20160101',
'drug_name':'drug1',
'after_meal':'yes',
'amount':'60',
'dosage_per_day':'2',
'description':'with water'
}
pat_code, resp_presc = visit.post(suffix_url='prescription/{}/{}'.format(
self.doctorid, self.patientid), headers=headers, data=regprescription_data)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
regprescription_data2 = {
'datetime':'20160102',
'drug_name':'drug2',
'after_meal':'yes',
'amount':'10',
'dosage_per_day':'1',
'description':'with water'
}
pat_code, resp_presc = visit.post(suffix_url='prescription/{}/{}'.format(
self.doctorid, self.patientid), headers=headers, data=regprescription_data2)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
logger.debug('before test_get_prescriptions')
headers['token'] = self.pat_token
headers['role'] = 'patient'
pat_code, resp_prescs = visit.get(suffix_url='prescriptions/{}'.format(
self.patientid), headers=headers)
logger.info('pat_code:{}, resp_prescs:{}'.format(pat_code, resp_prescs))
# resp_prescs = json.loads(resp_prescs)
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
self.assertIn(self.doctorid, resp_prescs)
self.assertIn('drug1', resp_prescs)
self.assertIn('drug2', resp_prescs)
def test_comment(self):
visit = Visit(ENDPOINT)
headers = {
'content-type': 'application/json'}
logger.debug('before test_comment')
# headers['token'] = self.admin_token
# headers['role'] = 'admin'
headers['token'] = self.doctor_token
headers['role'] = 'doctor'
# logger.debug('before patient get request')
# headers['token'] = self.pat_token
# headers['role'] = 'patient'
comment_data = {
'datetime':'20160101',
'comment':'drink water'
}
pat_code, resp_presc = visit.post(suffix_url='comment/{}/{}'.format(
self.doctorid, self.patientid), headers=headers, data=comment_data)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
comment_data2 = {
'datetime':'20160102',
'comment':'eat drug'
}
pat_code, resp_presc = visit.post(suffix_url='comment/{}/{}'.format(
self.doctorid, self.patientid), headers=headers, data=comment_data2)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
logger.debug('before test_get_comments')
headers['token'] = self.pat_token
headers['role'] = 'patient'
pat_code, resp_prescs = visit.get(suffix_url='comments/{}'.format(
self.patientid), headers=headers)
logger.info('pat_code:{}, resp_prescs:{}'.format(pat_code, resp_prescs))
# resp_prescs = json.loads(resp_prescs)
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
self.assertIn(self.doctorid, resp_prescs)
self.assertIn('drink water', resp_prescs)
self.assertIn('drink water', resp_prescs)
def test_discharge(self):
visit = Visit(ENDPOINT)
headers = {
'content-type': 'application/json'}
logger.debug('before test_discharge')
# headers['token'] = self.admin_token
# headers['role'] = 'admin'
headers['token'] = self.doctor_token
headers['role'] = 'doctor'
# logger.debug('before patient get request')
# headers['token'] = self.pat_token
# headers['role'] = 'patient'
comment_data = {
'datetime':'20160101',
'indate':'20151111',
"room":"301",
"bed":"2",
}
pat_code, resp_presc = visit.post(suffix_url='discharge/{}/{}'.format(
self.doctorid, self.patientid), headers=headers, data=comment_data)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
comment_data2 = {
'datetime':'20160102',
'indate':'20151212',
"room":"402",
"bed":"3",
}
pat_code, resp_presc = visit.post(suffix_url='discharge/{}/{}'.format(
self.doctorid, self.patientid), headers=headers, data=comment_data2)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
comment_data3 = {
'datetime':'20160201',
'outdate':'20160202',
'description':'well',
'bed':'9'
}
pat_code, resp_presc = visit.put(suffix_url='discharge/{}/{}/{}'.format(
self.doctorid, self.patientid, '20151212'), headers=headers, data=comment_data3)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
logger.debug('before test_get_discharges')
headers['token'] = self.pat_token
headers['role'] = 'patient'
pat_code, resp_prescs = visit.get(suffix_url='discharges/{}'.format(
self.patientid), headers=headers)
logger.info('pat_code:{}, resp_prescs:{}'.format(pat_code, resp_prescs))
# resp_prescs = json.loads(resp_prescs)
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
self.assertIn(self.doctorid, resp_prescs)
self.assertIn('20160202', resp_prescs)
self.assertIn('20151111', resp_prescs)
self.assertIn('9', resp_prescs)
def test_appointment(self):
visit = Visit(ENDPOINT)
headers = {
'content-type': 'application/json'}
logger.debug('before test_appointment')
# headers['token'] = self.admin_token
# headers['role'] = 'admin'
headers['token'] = self.doctor_token
headers['role'] = 'doctor'
# logger.debug('before patient get request')
# headers['token'] = self.pat_token
# headers['role'] = 'patient'
apmt_data = {
'doctorid':self.doctorid,
'patientid':self.patientid,
'datetimeslot':'201511111300',
'illness':'headache'
}
pat_code, resp_presc = visit.post(suffix_url='appointment', headers=headers, data=apmt_data)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
pat_code, resp_prescs = visit.get(suffix_url='appointment/{}/{}/{}'.format(
self.doctorid, '201511111300',self.patientid), headers=headers)
logger.info('pat_code:{}, resp_prescs:{}'.format(pat_code, resp_prescs))
# resp_prescs = json.loads(resp_prescs)
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
self.assertIn('headache', resp_prescs)
apmt_data = {
'doctorid':self.doctorid,
'patientid':self.patientid,
'datetimeslot':'201511111430',
'illness':'cold'
}
pat_code, resp_presc = visit.post(suffix_url='appointment', headers=headers, data=apmt_data)
logger.info('pat_code:{}, resp_presc:{}'.format(pat_code, resp_presc))
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
# logger.debug('before test_appointment')
headers['token'] = self.pat_token
headers['role'] = 'patient'
pat_code, resp_prescs = visit.get(suffix_url='appointment/{}/{}'.format(
self.doctorid, '20151111'), headers=headers)
logger.info('pat_code:{}, resp_prescs:{}'.format(pat_code, resp_prescs))
resp_prescs = json.loads(resp_prescs)
self.assertIn(pat_code, SUCCESS_STATUS_CODES)
self.assertIn('1', | |
import torch
import torch.nn as nn
import os
import torch.nn.functional as F
class LDS(nn.Module):
def __init__(self,):
super(LDS, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)
def forward(self, x):
x_pool1 = self.pool1(x)
x_pool2 = self.pool2(x_pool1)
x_pool3 = self.pool3(x_pool2)
return x_pool3
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(ConvBlock, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=False) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class LSN_init(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_init, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1),
ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class LSN_later(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_later, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class IBN(nn.Module):
def __init__(self, out_planes, bn=True):
super(IBN, self).__init__()
self.out_channels = out_planes
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
def forward(self, x):
if self.bn is not None:
x = self.bn(x)
return x
class One_Three_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(One_Three_Conv, self).__init__()
self.out_channels = out_planes
inter_planes = in_planes // 4
self.single_branch = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class Relu_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(Relu_Conv, self).__init__()
self.out_channels = out_planes
self.relu = nn.ReLU(inplace=False)
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
def forward(self, x):
x = self.relu(x)
out = self.single_branch(x)
return out
class Ds_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)):
super(Ds_Conv, self).__init__()
self.out_channels = out_planes
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class LRFNet(nn.Module):
"""LRFNet for object detection
The network is based on the SSD architecture.
Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
Args:
phase: (string) Can be "test" or "train"
base: VGG16 layers for input, size of either 300 or 512
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(LRFNet, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.size = size
# vgg network
self.base = nn.ModuleList(base)
self.lds = LDS()
# convs for merging the lsn and ssd features
self.Norm1 = Relu_Conv(512, 512, stride=1)
self.Norm2 = Relu_Conv(1024, 1024, stride=1)
self.Norm3 = Relu_Conv(512, 512, stride=1)
self.Norm4 = Relu_Conv(256, 256, stride=1)
# convs for generate the lsn features
self.icn1 = LSN_init(3, 512, stride=1)
self.icn2 = LSN_later(128, 1024, stride=2)
self.icn3 = LSN_later(256, 512, stride=2)
# convs with s=2 to downsample the features
self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1))
self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1))
self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1))
# convs to reduce the feature dimensions of current level
self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1)
self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1)
self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1)
self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1)
self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.convert1 = ConvBlock(384, 256, kernel_size=1)
self.convert2 = ConvBlock(256, 512, kernel_size=1)
self.convert3 = ConvBlock(128, 256, kernel_size=1)
# convs to merge the features of the current and higher level features
self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1)
self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.ibn1 = IBN(512, bn=True)
self.ibn2 = IBN(1024, bn=True)
self.relu = nn.ReLU(inplace=False)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if self.phase == 'test':
self.softmax = nn.Softmax()
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
list of concat outputs from:
1: softmax layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
new_sources = list()
# apply lds to the initial image
x_pool = self.lds(x)
# apply vgg up to conv4_3
for k in range(22):
x = self.base[k](x)
conv4_3_bn = self.ibn1(x)
x_pool1_skip, x_pool1_icn = self.icn1(x_pool)
s = self.Norm1(conv4_3_bn * x_pool1_icn)
# apply vgg up to fc7
for k in range(22, 34):
x = self.base[k](x)
conv7_bn = self.ibn2(x)
x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)
p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)
x = self.base[34](x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = v(x)
if k == 0:
x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)
w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)
elif k == 2:
q = self.Norm4(self.dsc3(w) + x)
sources.append(q)
elif k == 5 or k == 7:
sources.append(x)
else:
pass
# project the forward features into lower dimension.
tmp1 = self.proj1(p)
tmp2 = self.proj2(w)
tmp3 = self.proj3(q)
# The conv4_3 level
proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear')
proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear')
proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear')
proj = torch.cat([proj1, proj2, proj3], dim=1)
agent1 = self.agent1(s)
convert1 = self.convert1(proj)
pred1 = torch.cat([agent1, convert1], dim=1)
pred1 = self.merge1(pred1)
new_sources.append(pred1)
# The fc_7 level
proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear')
proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear')
proj = torch.cat([proj2, proj3], dim=1)
agent2 = self.agent2(p)
convert2 = self.convert2(proj)
pred2 = torch.cat([agent2, convert2], dim=1)
pred2 = self.merge2(pred2)
new_sources.append(pred2)
# The conv8 level
proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear')
proj = proj3
agent3 = self.agent3(w)
convert3 = self.convert3(proj)
pred3 = torch.cat([agent3, convert3], dim=1)
pred3 = self.merge3(pred3)
new_sources.append(pred3)
for prediction in sources:
new_sources.append(prediction)
# apply multibox head to source layers
for (x, l, c) in zip(new_sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)]
return layers
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512]}
def add_extras(size, cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
if in_channels == 256 and size == 512:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
else:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
in_channels = v
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
| |
"66e5a32f04cfcf9826296b3c053c22caa745fd890ccc6ea9199c34529507524a",
),
"rest-types":
struct(
version = "1.14.1.1",
sha256 =
"b7e08e65bbae20bd891f0905c9c785184182172094673ab13e66499e4fe3969a",
),
"rest-wai":
struct(
version = "0.2.0.1",
sha256 =
"38205eb7b85a4e052f11db099dd65e9d952b8533d1a35001f0b1958b443c0d02",
),
"result":
struct(
version = "0.2.6.0",
sha256 =
"f526d97cdab851f24e215e346f6d54d3a504a6ac5d9264f580c4f72d606178c5",
),
"rethinkdb-client-driver":
struct(
version = "0.0.25",
sha256 =
"0f9dc156cd61b866b847b1b1a60a2345b4b5556b8b75a9e8499b0514e7f98996",
),
"retry":
struct(
version = "0.7.6.0",
sha256 =
"f6cc3eb256f1ab523ebacad7ab9804fae77ff3f133b57e07707a33d36433dddc",
),
"rev-state":
struct(
version = "0.1.2",
sha256 =
"ee070e39d7f7d673593e2f356ab317bc2fdd0d8a283f8316c0e5b5adbdf0f919",
),
"rfc5051":
struct(
version = "0.1.0.3",
sha256 =
"e38dab28a5625774be60545c8c99e647b79bbc0ac0bc9c65fe6b2ebef160642b",
),
"riak":
struct(
version = "1.1.2.3",
sha256 =
"b60409a13902fb0097b7ba329ea7fb0cd673f16a6fc3d2f078f54c41d1db31e8",
),
"riak-protobuf":
struct(
version = "0.23.0.0",
sha256 =
"5dcbd06bdb66a1e43881a62a44d92e47d3f16f9ea1b4d53e4a92622faecdca33",
),
"rng-utils":
struct(
version = "0.3.0",
sha256 =
"0886acb1e0ae6c6ad5f594a9d4d57ea5af69c566ccc5763d0b7c690963e946ba",
),
"roles":
struct(
version = "0.2.0.0",
sha256 =
"e29d2f31b21b2d8ce3507e17211e70a61d2e434a8e19f80b2e4898bdabac34a0",
),
"rose-trees":
struct(
version = "0.0.4.4",
sha256 =
"2313133d29286e1e4f1f0b1d6ec0fba852bc5537d5b062c1b8fe0a6aa79b72cd",
),
"rot13":
struct(
version = "0.2.0.1",
sha256 =
"e026d418cc6a1ce83ba11e811387e62ad49ffb1cbd6ae7f58b72fd179fccd4dc",
),
"runmemo":
struct(
version = "1.0.0.1",
sha256 =
"ba5ef3177f8fe5f443808e44f62d03b23ac19bbef7f708e40532031a3505d689",
),
"rvar":
struct(
version = "0.2.0.3",
sha256 =
"d78aaf2ffdba182dda95d1692fec7abc5d77fa371120618a397b5675438c6bc0",
),
"s3-signer":
struct(
version = "0.3.0.0",
sha256 =
"89e957f81211a417c425214d3d7eafb0f15f695ffe0002f4198f2e34b43bc494",
),
"safe":
struct(
version = "0.3.15",
sha256 =
"a35e4ae609aabd568da7e7d220ab529c34040b71ae50df1ee353896445a66a2d",
),
"safe-exceptions":
struct(
version = "0.1.6.0",
sha256 =
"71d47ce1049465b02d89231f2931e7a1d22b6960e85fca5281162e979cf08d1c",
),
"safe-exceptions-checked":
struct(
version = "0.1.0",
sha256 =
"d807552b828de308d80805f65ee41f3e25571506b10e6b28b0b81de4aec0ca3f",
),
"safecopy":
struct(
version = "0.9.3.3",
sha256 =
"cb2272648a2e1e924b3f4f7f73c475ab70c661c8967246acae1b47f0fa57ba9e",
),
"safeio":
struct(
version = "0.0.5.0",
sha256 =
"d5799b6a6cd36e8f5442d991ed3a2076b10e0e3131269a2090b8c9c5c001e311",
),
"sample-frame":
struct(
version = "0.0.3",
sha256 =
"5baf301a4f7b2d52e6b9b9c06b10afd3938de0be6d09736d0188616cd9027247",
),
"sample-frame-np":
struct(
version = "0.0.4.1",
sha256 =
"b1db7621b07503f5fe49390bf1e1b4257c49f4760d617121a23d845278f93624",
),
"sampling":
struct(
version = "0.3.2",
sha256 =
"a66156e4600ffb15bde127a841251d49f2d0ff67a85e05961b91839b4769824e",
),
"sandi":
struct(
version = "0.4.1",
sha256 =
"722492c2db14a18ed643d5b10532c1a82787f6ab9a952e15a9389d8a7e48c623",
),
"sandman":
struct(
version = "0.2.0.1",
sha256 =
"407d283e1fc4a2a369615bac569683bf399ac14ddbce1331850bfe1d7837ce64",
),
"say":
struct(
version = "0.1.0.0",
sha256 =
"f26fdb94ed81a2ae503beca0dcea74da7ee37408ba2e41ab3fdcaa9a7622fc40",
),
"sbp":
struct(
version = "2.3.6",
sha256 =
"13da29c591f54cc958f1c25e26847ccc250c5be3577999394b83873c0e0dc816",
),
"sbv":
struct(
version = "7.4",
sha256 =
"ec3d3922a0da061513b1eedc96ede24a06a6202b2d7d5ae1641fb3dcbe78d47b",
),
"scalendar":
struct(
version = "1.2.0",
sha256 =
"f5c85e8da39e7eb22068032c4c5c32751ebbed61d0ee9679cadac904dde163ac",
),
"scalpel":
struct(
version = "0.5.1",
sha256 =
"20df66433570a2ca754f14058a47fb00519d9a75bb822fc3fd1769a83c608b0d",
),
"scalpel-core":
struct(
version = "0.5.1",
sha256 =
"8c05b86853b737fbed4144dc9c7bbb7743525c305f9529f59776df97bfe229a9",
),
"scanner":
struct(
version = "0.2",
sha256 =
"3a020d68a0372a5211c72e55eeb299738ea608d17184bc68f74d31ebe667a5e9",
),
"schematic":
struct(
version = "0.4.2.0",
sha256 =
"c48af3110e8d1f67011230a910abbc9ab445043fb6e8218c9de8c68ab6bdc34c",
),
"scientific":
struct(
version = "0.3.5.2",
sha256 =
"5ce479ff95482fb907267516bd0f8fff450bdeea546bbd1267fe035acf975657",
),
"scotty":
struct(
version = "0.11.0",
sha256 =
"892203c937ccf1279f5005ddb78ebea84629b80687a1e38fc118b38011a386ed",
),
"scrypt":
struct(
version = "0.5.0",
sha256 =
"3ec0a622393e2a4dbbce4c899602c848d924f8516688491b1162331b7093d9b2",
),
"sdl2":
struct(
version = "2.3.0",
sha256 =
"446ddadc9ed93138d5e69b15b8ba9dd2e9e40401e9b30b1279838d54cb25672b",
),
"sdl2-gfx":
struct(
version = "0.2",
sha256 =
"8c1e10b7a675d782cd650820c75c4ef9225718ad6aaa3f8db02e869b7720c50d",
),
"sdl2-image":
struct(
version = "2.0.0",
sha256 =
"399742b2b7e64fe4e58c9d8a44ad29b2c355589233535238f8c9b371de6c26df",
),
"sdl2-mixer":
struct(
version = "0.1",
sha256 =
"d924f31d9e1c87eed92d357ce20273dba44637861927188b8a44db2c0b2e2bc0",
),
"sdl2-ttf":
struct(
version = "2.0.2",
sha256 =
"0dc6ca8459c463a06e8a59a4cb2039a9a08bd62a04b59efc035a31554b950ae4",
),
"search-algorithms":
struct(
version = "0.3.0",
sha256 =
"4a9d03c97abfd83fae582e4c3425a105b8649b8e69a2c1e170dbbabd8820db10",
),
"securemem":
struct(
version = "0.1.9",
sha256 =
"feb60dc542ea3ce9cdb449093b85dc69e43df310aab4fd161e4cdaa3ba847036",
),
"selda":
struct(
version = "0.1.12",
sha256 =
"a9896a55fcd34e208495b77feb7e054bb8590e7334924f43924fc87103096edd",
),
"selda-postgresql":
struct(
version = "0.1.7.1",
sha256 =
"42b42c981d2734a569b9c558dea8576cbd8b5e4a5c7258ab848da6d8f811ecc7",
),
"selda-sqlite":
struct(
version = "0.1.6.0",
sha256 =
"c67ba89114a82ece42b7e478bcf480ae0241cefb41e2e9b340a268f9f08be390",
),
"semigroupoid-extras":
struct(
version = "5",
sha256 =
"102e33b55cc3b15a1b714825a3703f3fc2bb09d8038404af442d35c0ac0c3832",
),
"semigroupoids":
struct(
version = "5.2.1",
sha256 =
"79e41eb7cbcb4f152343b91243feac0a120375284c1207edaa73b23d8df6d200",
),
"semigroups":
struct(
version = "0.18.4",
sha256 =
"589e3042329a6bcffb5c0e85834143586db22eb7a2aae094d492cd004f685d27",
),
"semiring-simple":
struct(
version = "1.0.0.1",
sha256 =
"c08d1b533f4559fc55119f563a6cf3d74ad7c6f5916c2efe00b50d2a5169fd28",
),
"semver":
struct(
version = "0.3.3.1",
sha256 =
"36d3369706836d60f3bc517f30c6860734481866363723904b8768823b6bc8b1",
),
"sendfile":
struct(
version = "0.7.9",
sha256 =
"102fdf6db8c00f5a5981c6eed5acba1368a2d79b2970ce5b22ceb180aa0fdc42",
),
"sensu-run":
struct(
version = "0.4.0.4",
sha256 =
"bee065757a9c68d4a7863c8a003a57863fb19ce43fe2d359c1ee186d8d72ffdd",
),
"seqalign":
struct(
version = "0.2.0.4",
sha256 =
"4ea194658d865890157d3df882ed21b0c089cdff7f80ea613ae25c5f3d744305",
),
"seqloc":
struct(
version = "0.6.1.1",
sha256 =
"4435e76ba86417612b6bd6a173dc99444d5fe9184a9822b1edf13c808d4f55c3",
),
"serf":
struct(
version = "0.1.1.0",
sha256 =
"d6c9c6ddf99a2119c6686732caf9f04ef8e9c4df5519a8bbd4ac7f5531d4c067",
),
"servant":
struct(
version = "0.11",
sha256 =
"c5b3f7af140fdafd3f646dcea6720c1b3b8a376f1f19a020b200acde64846b03",
),
"servant-JuicyPixels":
struct(
version = "0.3.0.3",
sha256 =
"60f9c098c1f446338000dad50fb82ff914664d955c1964c09e940da0e81c654d",
),
"servant-auth-cookie":
struct(
version = "0.5.0.5",
sha256 =
"6a5b9ffabfc48a908bd91ade7c0b5ef7704eab033a4bb5abffdccd280a7187d6",
),
"servant-blaze":
struct(
version = "0.7.1",
sha256 =
"90ed1c7a22b83bee344ef3896203f3699b7633bf986ffa064752c3596c072646",
),
"servant-cassava":
struct(
version = "0.10",
sha256 =
"9b2c5d906f3a4bb2767b2ce91f12a74e24adceadd296220b5d7216c5e1f3560e",
),
"servant-checked-exceptions":
struct(
version = "0.4.1.0",
sha256 =
"6fbc80f2939ad2f9d6b728ca4d65edcf50f2f35944cd2b5b0d641948b9df00a6",
),
"servant-client":
struct(
version = "0.11",
sha256 =
"ea6d2ba8183a9cc721e944659fc175a1e81ecac11dfcea9544ef07f7ccc92afa",
),
"servant-docs":
struct(
version = "0.11",
sha256 =
"a03aa040ad19478c7f6fef28436c5e3c1fb523c6da5a364167a4b0cd56b87f09",
),
"servant-elm":
struct(
version = "0.4.0.1",
sha256 =
"69b3a5dcbb680fc1e923d76afa8255987d4613e0d4387eb493de071c9842ffc5",
),
"servant-exceptions":
struct(
version = "0.1.1",
sha256 =
"652b9fdc463200ebb8c2b2e0757f9d90662408bf45a657b3f719d0a36d34abe1",
),
"servant-foreign":
struct(
version = "0.10.1",
sha256 =
"88f2f5bc2293585e6dcf5f544bdbf871090389b0402ead403abf6013c3aec9c8",
),
"servant-generic":
struct(
version = "0.1.0.1",
sha256 =
"2ef213c2f72eb5d1c3da06f5b8e7537128ea96fe54bb086d5ade91ce872cfcfd",
),
"servant-js":
struct(
version = "0.9.3.1",
sha256 =
"31873c5c5eed6c0c306e36c6dd52da48d8e11844c528f8f93ecf4adb8d1e5605",
),
"servant-kotlin":
struct(
version = "0.1.0.3",
sha256 =
"ed9579d151a9ba420b1d67753f4c183d3ecb8a4a19d1dbac46f07ae1f689b3c5",
),
"servant-lucid":
struct(
version = "0.7.1",
sha256 =
"ec26ba7d159b09be10beacf6242f6ae1bd111e9c738bfbf3cf2f560f48e0fe40",
),
"servant-mock":
struct(
version = "0.8.3",
sha256 =
"b56080e50ec74f02b759b5ebd7f07f5ac34efc52475e85b4c728f54cf6f3933b",
),
"servant-pandoc":
struct(
version = "0.4.1.4",
sha256 =
"d2a42add37ea494542a951cf089ea02c7469efc5880a59b8e3eb9b786c5e5543",
),
"servant-purescript":
struct(
version = "0.9.0.2",
sha256 =
"f72839cd6b956b6b2ac2adfd2da237ffee63180c43281e3b109e59925526b2ab",
),
"servant-rawm":
struct(
version = "0.2.0.2",
sha256 =
"a266877a434f2177049f71ac4b7c4a5e4be77acafb2b779ce61075dea5897c5a",
),
"servant-ruby":
struct(
version = "0.5.1.0",
sha256 =
"d2145df940bc8cc6281e26c115d1b418e432661e81fe1364e4147d16a8473848",
),
"servant-server":
struct(
version = "0.11.0.1",
sha256 =
"e25c1cb6c55b9b5f66aa73f59fbcab25d94e4645256aed9b8bbf1edf63d02c7b",
),
"servant-static-th":
struct(
version = "0.1.0.6",
sha256 =
"5d45a91c2c9de7a4fa15354887c937cbc49ccd2aee19ecdfe44853eb6a3f2ba7",
),
"servant-subscriber":
struct(
version = "0.6.0.1",
sha256 =
"3da1856b47c03ffa1d1c107267e7f18ef5207e6bb2d104788f60b14f01ac7839",
),
"servant-swagger":
struct(
version = "1.1.4",
sha256 =
"710481116ef49a80cc0925a72073b6a38554245ebf04558c50aa4eb053009a75",
),
"servant-swagger-ui":
struct(
version = "0.2.4.3.4.0",
sha256 =
"316f6d5b5754615bb57dbcb27f2a16c716e0a7f847826e6e58d04b09cf5d61ed",
),
"servant-websockets":
struct(
version = "1.0.0",
sha256 =
"623112456c7095c38a43a0351997bafad299a96f8d7fe769eb29b1f2b2f7c917",
),
"servant-yaml":
struct(
version = "0.1.0.0",
sha256 =
"c917d9b046b06a9c4386f743a78142c27cf7f0ec1ad8562770ab9828f2ee3204",
),
"serversession":
struct(
version = "1.0.1",
sha256 =
"3ffbefd87017e8d46fbbe380f59e24672aa9c06b999da5f9ae0b052094d94822",
),
"serversession-backend-persistent":
struct(
version = "1.0.4",
sha256 =
"c7f2d6fe08d13269ed4834ccf186926dc6c3815011bc456e77ce481fb6eb971c",
),
"serversession-backend-redis":
struct(
version = "1.0.3",
sha256 =
"ce4b0a3741da3842fe4b5008b51894251ac59b3530babd5ce58b915ec2543615",
),
"serversession-frontend-wai":
struct(
version = "1.0",
sha256 =
"0b48130e3d3915dc46ec2392984e7862d066f6ddd454127a98b0c21c2574b167",
),
"serversession-frontend-yesod":
struct(
version = "1.0",
sha256 =
"063946df7bf693e26973f81dd72b3586115dfac6b358421e4a7ada48e47c6753",
),
"servius":
struct(
version = "1.2.0.3",
sha256 =
"47621f01e55cf4e69aeea80104a8a99e87c3a9ad13a5f144da7acd38370563f0",
),
"ses-html":
struct(
version = "0.4.0.0",
sha256 =
"cff76ee03b538e69a3d107cd63d577210cf0f9879d470bf55519e887e2a8a08f",
),
"set-cover":
struct(
version = "0.0.8",
sha256 =
"186d3a1b6e824e3bd1d479347d8310dba9f1cba98e90bc03d885c42558ea95d1",
),
"set-monad":
struct(
version = "0.2.0.0",
sha256 =
"eb2b4312c4a71024ea1c85683065c1052b0065b7d96df68cd1c4390c1ab2afdb",
),
"setenv":
struct(
version = "0.1.1.3",
sha256 =
"e358df39afc03d5a39e2ec650652d845c85c80cc98fe331654deafb4767ecb32",
),
"setlocale":
struct(
version = "1.0.0.5",
sha256 =
"57438491475004eda12d7a73eea0ab1c5fb28774027626e5bbcb142fe57d9ff0",
),
"sets":
struct(
version = "0.0.5.2",
sha256 =
"be20d5b7b4a5770b7089879f3ef7226c485f4d5bb17e87f979f3bb6475e48713",
),
"shake":
struct(
version = "0.16",
sha256 =
"8f35ea18ed8bffd11dcbadc17384948866c191631f2a0786f253db73b3472b0d",
),
"shake-language-c":
struct(
version = "0.11.0",
sha256 =
"2174ad269b5fc3bb09054b0289697ce052b1cd3fc3393f6ad00181f1870f931d",
),
"shakespeare":
struct(
version = "2.0.15",
sha256 =
"4354c6eebbfa89103c8090aae3bc7be5fceae6a0e327acf8adc4f3fd535864ee",
),
"shell-conduit":
struct(
version = "4.6.1",
sha256 =
"86d161f8b05ae72e5464fe4ade42443d750fc9ffbd5ba98d7d5587287076ad42",
),
"shell-escape":
struct(
version = "0.2.0",
sha256 =
"e23c9ba94a27e45430cb39e6bb236557e789d24129257c3def377f441b2cba4a",
),
"shelly":
struct(
version = "1.7.0.1",
sha256 =
"0343758a6f01472341eed2bfd38f8e43543c933bdfc75723c44c332c917f9628",
),
"shikensu":
struct(
version = "0.3.8",
sha256 =
"9043593a661b738752686e5d8c1e39db22104832b647ea67d212a91a380d516a",
),
"shortcut-links":
struct(
version = "0.4.2.0",
sha256 =
"1e6b75c5e94fddf9e2e665821ac70f5083e5d40d1fd55813e94943ce02335027",
),
"should-not-typecheck":
struct(
version = "2.1.0",
sha256 =
"f538ac70ce07679bc2e6c1651db82a86866664ab995665fdc78e6cb12bd8d591",
),
"show-prettyprint":
struct(
version = "0.2",
sha256 =
"5c1019ad399085e3175f15da98471276175176b0c2fdc11558b5a929b173d293",
),
"signal":
struct(
version = "0.1.0.4",
sha256 =
"c4bfdd92b75347e02759c1a7d75963fbc7052e948ec96e25299ca5262e5d76e5",
),
"silently":
struct(
version = "1.2.5",
sha256 =
"cef625635053a46032ca53b43d311921875a437910b6568ded17027fdca83839",
),
"simple":
struct(
version = "0.11.2",
sha256 =
"ef53672eded47626cd125dc0759628fcfead2f2e271a0cae1092d4ff244e0614",
),
"simple-log":
struct(
version = "0.9.3",
sha256 =
"f012ff48e7a93645f0952562e65455f57073552921a7ee8f6a2f3caddee8a844",
),
"simple-reflect":
struct(
version = "0.3.2",
sha256 =
"38224eb3d0d5eafc7101ad48fa92001c3e753a015d53bb12753a3836b871ecb6",
),
"simple-sendfile":
struct(
version = "0.2.27",
sha256 =
"f68572592099a2db3f7212ac7d133447ae5bbb2605285d3de1a29a52d9c79caf",
),
"simple-session":
struct(
version = "0.10.1.1",
sha256 =
"8a9c9cb7a80080b6440a80549919d3cee3409af6c516b3d10d1392708b48e7c1",
),
"simple-templates":
struct(
version = "0.8.0.1",
sha256 =
"28e10f916320bb5097d9ed323a1726d88d17a51b0ac0290a91806d97840bca8e",
),
"singleton-bool":
struct(
version = "0.1.2.0",
sha256 =
"33bbd0460a5363260f56b29b130babfc16921ba87cb4576569ecc0a0664d449d",
),
"singleton-nats":
struct(
version = "0.4.0.4",
sha256 =
"045e7880bf761ecaaed8b738ff5ecec62604925c354cc1845587c3b023de3fb2",
),
"singletons":
struct(
version = "2.3.1",
sha256 =
"ca8de4df85d50e9363b3f1715a23c9456d2a57e5e145343693714cecc4afaec4",
),
"siphash":
struct(
version = "1.0.3",
sha256 =
"cf81ce41c6ca40c4fec9add5dcebc161cb2d31f522f9ad727df23d30ac6a05f3",
),
"skein":
struct(
version = "1.0.9.4",
sha256 =
"f882ca0cc5ed336ef898fb3c89579e392900259296b2320edf968b9fc16cb8c9",
),
"skeletons":
struct(
version = "0.4.0",
sha256 =
"3dd5045d09131434a794b9452980b4a54da4312d2e1116ac455bbc9bdf6fbcc6",
),
"skylighting":
struct(
version = "0.5.1",
sha256 =
"afbda861a98bc1bc9e829b43452e9a5fd559c41a76d87bb40b58398a3184b450",
),
"slack-web":
struct(
version = "0.2.0.1",
sha256 =
"295f4958c55708b28597f1d90b9e9fd6117eeedca41c637a7b9878c9be363cec",
),
"slave-thread":
struct(
version = "1.0.2",
sha256 =
"e47120598dd65ebee33253911a31518021323a5ccfa52588e13c44fd5f5b4b13",
),
"slug":
struct(
version = "0.1.7",
sha256 =
"d76f8243fd8b45d02c0731962ceddcd96154473d6f7c5cbf36ab921bc5627dde",
),
"smallcheck":
struct(
version = "1.1.3.1",
sha256 =
"9ff5f16ffa4c4ab57c0f22fcada1825aa250c03f1559aae851d96738bb06bdd2",
),
"smoothie":
struct(
version = "0.4.2.7",
sha256 =
"84561c3463d870312fafb48680ef0122688814fcbb2eb605570c48cceb64deb2",
),
"smtp-mail":
struct(
version = "0.1.4.6",
sha256 =
"86dacbef87a2519222a1165b49401a437887a249f5bfd63a99702198dad214bc",
),
"snap-blaze":
struct(
version = "0.2.1.5",
sha256 =
"b36e35bd4ba3087b3de92702e488ba6570675719243b5dbdf4eae0b819988841",
),
"snap-core":
struct(
version = "192.168.127.12",
sha256 =
"0749f0d52e415627411adfa90d78ee04b63aa51f7aa19c0a9a94b692cf5f5754",
),
"snap-server":
struct(
version = "172.16.17.32",
sha256 =
"745adbc5f8966deff4e84c873f86ad1d19ca306dfd6ddd2a39892640d9bb4eee",
),
"snowflake":
struct(
version = "0.1.1.1",
sha256 =
"f156ca321ae17033fe1cbe7e676fea403136198e1c3a132924a080cd3145cddd",
),
"snowtify":
struct(
version = "0.1.0.3",
sha256 =
"588c86910eb26f551b9916aca4e60ed60d7ef9b850eb5a920caac67e2b487dd0",
),
"soap":
struct(
version = "0.2.3.5",
sha256 =
"ba0bf7d1d65a594cf4407e70da5baaa2a2ba341b7e1d01a9a2ea01ff32cbb707",
),
"soap-openssl":
struct(
version = "0.1.0.2",
sha256 =
"2008547f4fd22063479ce1cd1c483db926f5f08a2ff6fb0c60fb2f0f7d42830f",
),
"soap-tls":
struct(
version = "0.1.1.2",
sha256 =
"e43abafb0ed390b9f5f99cc957973367d40e91c8d3ae7e22b3250a08ebe7df76",
),
"socket":
struct(
version = "0.8.0.1",
sha256 =
"745f6d1ef2299e321ad646918b9b733c82b4ded51b3b6aab6755c85182ab09a2",
),
"socket-activation":
struct(
version = "0.1.0.2",
sha256 =
"b99e7b4f296cd462aac84e5bb61fb02953e2080d1351e9e10a63d35dc34eb43b",
),
"socks":
struct(
version = "0.5.6",
sha256 =
"fa63cd838025e18864c59755750c0cfc4ea76e140a542f07a5c682488ec78438",
),
"sort":
struct(
version = "1.0.0.0",
sha256 =
"cee3894879cb4b2150331eca96d5d27f51a6114bcb082d1d8dded55881f5770d",
),
"sorted-list":
struct(
version = "0.2.0.0",
sha256 =
"cc52c787b056f4d3a9ecc59f06701695602558a4233042ff8f613cdd4985d138",
),
"sourcemap":
struct(
version = "0.1.6",
sha256 =
"b9a04cccb4fe7eea8b37a2eaf2bc776eae5640038ab76fb948c5a3ea09a9ce7a",
),
"sox":
struct(
version = "0.2.2.7",
sha256 =
"7dcdf728381dc508640ea3d7c0c5d1024950205d4ebde2ee40c5187b6cc6d2fc",
),
| |
request.get('episodes', [])
num_photos = 0
for ep_dict in ep_dicts:
yield Episode.VerifyEpisodeId(client, user_id, device_id, ep_dict['new_episode_id'])
num_photos += len(ep_dict['photo_ids'])
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'SavePhotosOperation.Execute',
request)
logging.info('SAVE PHOTOS: user: %d, device: %d, %d viewpoints, %d episodes, %d photos' %
(user_id, device_id, len(vp_ids), len(ep_dicts), num_photos))
raise gen.Return({})
@gen.coroutine
def ShareExisting(client, obj_store, user_id, device_id, request):
"""Shares photos from existing episodes with the followers of an existing viewpoint."""
request['user_id'] = user_id
yield Activity.VerifyActivityId(client, user_id, device_id, request['activity']['activity_id'])
num_photos = 0
for ep_dict in request['episodes']:
yield Episode.VerifyEpisodeId(client, user_id, device_id, ep_dict['new_episode_id'])
num_photos += len(ep_dict['photo_ids'])
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'ShareExistingOperation.Execute',
request)
logging.info('SHARE EXISTING: user: %d, device: %d, viewpoint: %s, %d episodes, %d photos' %
(user_id, device_id, request['viewpoint_id'], len(request['episodes']), num_photos))
raise gen.Return({})
@gen.coroutine
def ShareNew(client, obj_store, user_id, device_id, request):
"""Shares a list of photos with each of a list of contacts, specified
by a contact identity key or a viewfinder user id. Creates a new
viewpoint and episodes, with the contacts as followers.
"""
request['user_id'] = user_id
yield Activity.VerifyActivityId(client, user_id, device_id, request['activity']['activity_id'])
vp_dict = request['viewpoint']
yield Viewpoint.VerifyViewpointId(client, user_id, device_id, vp_dict['viewpoint_id'])
num_photos = 0
for ep_dict in request['episodes']:
yield Episode.VerifyEpisodeId(client, user_id, device_id, ep_dict['new_episode_id'])
num_photos += len(ep_dict['photo_ids'])
# Validate contact identities.
_ValidateContacts(request['contacts'])
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'ShareNewOperation.Execute',
request)
logging.info('SHARE NEW: user: %d, device: %d, viewpoint: %s, %d episodes, %d photos' %
(user_id, device_id, vp_dict['viewpoint_id'], len(request['episodes']),
num_photos))
raise gen.Return({})
@gen.coroutine
def TerminateAccount(client, obj_store, user_id, device_id, request):
"""Terminate the calling user's account. Unlink all identities from the
user, mute all device alerts, and disable all sharing.
"""
request['user_id'] = user_id
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'User.TerminateAccountOperation',
request)
logging.info('TERMINATE ACCOUNT: user: %d, device: %d' % (user_id, device_id))
raise gen.Return({})
@gen.coroutine
def UnlinkIdentity(client, obj_store, user_id, device_id, request):
"""Unlink an existing identity from the requesting account."""
# Validate identity key.
Identity.ValidateKey(request['identity'])
unlink_ident = yield gen.Task(Identity.Query, client, request['identity'], None, must_exist=False)
# If the identity is missing, then assume unlink is being re-called, and do a no-op. If the
# user_id does not match, raise a permission error. Otherwise, if request is for an authorized
# id, we must query to ensure this won't be last one remaining.
if unlink_ident is not None:
if unlink_ident.user_id != user_id:
raise PermissionError('Identity "%s" not linked to this account' % request['identity'])
if unlink_ident.authority is not None:
query_expr = 'identity.user_id=%d' % user_id
all_identities = yield gen.Task(Identity.IndexQuery, client, query_expr, ['key', 'authority'])
# Verify there is at least one identity remaining with an authority.
if not any([one_identity.authority is not None and one_identity.key != unlink_ident.key
for one_identity in all_identities]):
raise PermissionError('Removing this identity authorized by %s, would leave you '
'with no way to access your account' % unlink_ident.authority)
request['user_id'] = user_id
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'Identity.UnlinkIdentityOperation',
request)
logging.info('IDENTITY UNLINK: user: %d, device: %d, identity: %s' %
(user_id, device_id, request['identity']))
raise gen.Return({})
@gen.coroutine
def Unshare(client, obj_store, user_id, device_id, request):
"""Unshares photos from the episodes in the specified viewpoint, as
well as from all derived episodes to which the photos were shared.
"""
yield Activity.VerifyActivityId(client, user_id, device_id, request['activity']['activity_id'])
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'UnshareOperation.Execute',
request)
logging.info('UNSHARE: user: %d, device: %d, viewpoint: %s, %d episodes, %d photos' %
(user_id, device_id, request['viewpoint_id'], len(request['episodes']),
sum([len(ep_dict['photo_ids']) for ep_dict in request['episodes']])))
raise gen.Return({})
@gen.coroutine
def UpdateDevice(client, obj_store, user_id, device_id, request):
"""Updates the device metadata. Sets a new secure client access cookie.
"""
device_dict = request['device_dict']
if device_dict.has_key('device_id') and device_dict['device_id'] != device_id:
raise web.HTTPError(400, 'bad auth cookie; device id mismatch %d != %d' %
(device_dict['device_id'], device_id))
request['user_id'] = user_id
request['device_id'] = device_id
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'Device.UpdateOperation',
request)
logging.info('UPDATE DEVICE: user: %d, device: %d' % (user_id, device_id))
raise gen.Return({})
@gen.coroutine
def UpdateEpisode(client, obj_store, user_id, device_id, request):
"""Updates episode metadata."""
yield Activity.VerifyActivityId(client, user_id, device_id, request['activity']['activity_id'])
headers = request.pop('headers')
activity = request.pop('activity')
request = {'headers': headers,
'user_id': user_id,
'activity': activity,
'episode': request}
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'UpdateEpisodeOperation.Execute',
request)
logging.info('UPDATE EPISODE: user: %d, device: %d, episode: %s' %
(user_id, device_id, request['episode']['episode_id']))
raise gen.Return({})
@gen.coroutine
def UpdateFollower(client, obj_store, user_id, device_id, request):
"""Updates follower metadata."""
request['user_id'] = user_id
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'UpdateFollowerOperation.Execute',
request)
logging.info('UPDATE FOLLOWER: user: %d, device: %d, viewpoint: %s' %
(user_id, device_id, request['follower']['viewpoint_id']))
raise gen.Return({})
@gen.coroutine
def UpdateFriend(client, obj_store, user_id, device_id, request):
"""Updates friend metadata."""
request['user_id'] = user_id
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'Friend.UpdateOperation',
request)
logging.info('UPDATE FRIEND: user: %d, device: %d, friend: %s' %
(user_id, device_id, request['friend']['user_id']))
raise gen.Return({})
@gen.coroutine
def UpdatePhoto(client, obj_store, user_id, device_id, request):
"""Updates photo metadata."""
request['user_id'] = user_id
# If activity header is required, then expect it to have device_id from cookie.
if request['headers']['original_version'] >= Message.ADD_OP_HEADER_VERSION:
yield Activity.VerifyActivityId(client, user_id, device_id, request['activity']['activity_id'])
else:
yield Activity.VerifyActivityId(client, user_id, 0, request['activity']['activity_id'])
request = {'headers': request.pop('headers'),
'act_dict': request.pop('activity'),
'ph_dict': request}
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'Photo.UpdateOperation',
request)
logging.info('UPDATE PHOTO: user: %d, device: %d, photo: %s' %
(user_id, device_id, request['ph_dict']['photo_id']))
raise gen.Return({})
@gen.coroutine
def UpdateUser(client, obj_store, user_id, device_id, request):
"""Updates user profile and settings metadata."""
password = request.pop('password', None)
if password is not None:
context = base.ViewfinderContext.current()
user = context.user
old_password = request.pop('old_password', None)
# Recently confirmed cookies can always set the password -- this is how we do password resets.
if not context.IsConfirmedUser():
# Cookie is not confirmed, so raise an error unless one of the following is true:
# 1. The old_password field is set and matches the user's current password.
# 2. The user currently has no password.
if old_password is None and user.pwd_hash is not None:
raise PermissionError(UPDATE_PWD_NOT_CONFIRMED)
if user.pwd_hash is not None:
yield password_util.ValidateUserPassword(client, user, old_password)
# Replace password with generated hash and salt.
pwd_hash, salt = password_util.GeneratePasswordHash(password)
request['pwd_hash'] = <PASSWORD>
request['salt'] = salt
request = {'headers': request.pop('headers'),
'user_dict': request,
'settings_dict': request.pop('account_settings', None)}
request['user_dict']['user_id'] = user_id
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'User.UpdateOperation',
request)
logging.info('UPDATE USER: user: %d, device: %d' % (user_id, device_id))
raise gen.Return({})
@gen.coroutine
def UpdateUserPhoto(client, obj_store, user_id, device_id, request):
op_request = {'headers': request.pop('headers'),
'up_dict': request}
op_request['up_dict']['user_id'] = user_id
yield gen.Task(Operation.CreateAndExecute, client, user_id, device_id, 'UserPhoto.UpdateOperation', op_request)
logging.info('UPDATE USER PHOTO: user: %d, device:%d, photo:%s' %
(user_id, device_id, op_request['up_dict']['photo_id']))
raise gen.Return({})
@gen.coroutine
def UpdateViewpoint(client, obj_store, user_id, device_id, request):
"""Updates viewpoint metadata."""
yield Activity.VerifyActivityId(client, user_id, device_id, request['activity']['activity_id'])
headers = request.pop('headers')
activity = request.pop('activity')
viewpoint_id = request['viewpoint_id']
# We need to preserve backwards-compatibility with old clients that use update_viewpoint in
# order to make changes to follower attributes.
follower_columns = Follower._table.GetColumnNames()
viewpoint_columns = Viewpoint._table.GetColumnNames()
if all(attr in follower_columns for attr in request.keys()):
request = {'headers': headers,
'user_id': user_id,
'follower': request}
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'UpdateFollowerOperation.Execute',
request)
elif all(attr in viewpoint_columns for attr in request.keys()):
request = {'headers': headers,
'user_id': user_id,
'activity': activity,
'viewpoint': request}
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'UpdateViewpointOperation.Execute',
request)
else:
raise web.HTTPError(400, 'Viewpoint and follower attributes cannot be updated together ' +
'in the same call to update_viewpoint.')
logging.info('UPDATE VIEWPOINT: user: %d, device: %d, viewpoint: %s' %
(user_id, device_id, viewpoint_id))
raise gen.Return({})
@gen.coroutine
def UploadContacts(client, obj_store, user_id, device_id, request):
"""Creates/updates contacts metadata."""
request['user_id'] = user_id
contact_count = len(request['contacts'])
# Pre-process each contact and generate contact_id list to return as result.
result_contact_ids = []
for contact in request['contacts']:
canon_identities = set()
identities_properties = []
for contact_identity in contact['identities']:
identity_key = contact_identity['identity']
description = contact_identity.get('description', None)
if identity_key != Identity.Canonicalize(identity_key):
raise InvalidRequestError(IDENTITY_NOT_CANONICAL, identity_key=identity_key)
canon_identities.add(identity_key)
# Build identities_properties in the form that's expected for creating a contact.
identities_properties.append((identity_key, description))
# Set 'identities' and 'identities_properties' as is expected for creating the contact.
contact['identities'] = list(canon_identities)
contact['identities_properties'] = identities_properties
# Now, calculated the contact_id.
contact['contact_id'] = Contact.CalculateContactId(contact)
# Add contact_id to result list.
result_contact_ids.append(contact['contact_id'])
yield gen.Task(Operation.CreateAndExecute,
client,
user_id,
device_id,
'UploadContactsOperation.Execute',
request)
logging.info('UPLOAD CONTACTS: user: %d, device: %d, contact_count: %d' % (user_id, device_id, contact_count))
raise gen.Return({'contact_ids': result_contact_ids})
@gen.coroutine
def UploadEpisode(client, obj_store, user_id, device_id, request):
"""Creates metadata for new photos and returns URLs where the client
should upload the photo images. Creates a new episode to group the
photos or adds the photos to an existing episode.
"""
def _GenerateUploadUrl(obj_store, ph_dict, suffix, md5_attr):
"""Create an S3 URL that is the target of a photo upload. "suffix" is
appended to the end of the photo id in order to distinguish different
photo sizes. The value of the "md5_attr" attribute is converted to a
| |
[ 2.0, 0.0, 0.0, -2.0, 1.0, 0.0004, 0.0, -0.0002, 0.0 ],
[ 0.0, 1.0, 2.0, -2.0, 1.0, 0.0004, 0.0, -0.0002, 0.0 ],
[ 1.0, 1.0, 0.0, 0.0, 0.0, -0.0003, 0.0, 0.0, 0.0 ],
[ 1.0, -1.0, 0.0, -1.0, 0.0, -0.0003, 0.0, 0.0, 0.0 ],
[ -1.0, -1.0, 2.0, 2.0, 2.0, -0.0003, 0.0, 0.0001, 0.0 ],
[ 0.0, -1.0, 2.0, 2.0, 2.0, -0.0003, 0.0, 0.0001, 0.0 ],
[ 1.0, -1.0, 2.0, 0.0, 2.0, -0.0003, 0.0, 0.0001, 0.0 ],
[ 3.0, 0.0, 2.0, 0.0, 2.0, -0.0003, 0.0, 0.0001, 0.0 ],
[ -2.0, 0.0, 2.0, 0.0, 2.0, -0.0003, 0.0, 0.0001, 0.0 ],
[ 1.0, 0.0, 2.0, 0.0, 0.0, 0.0003, 0.0, 0.0, 0.0 ],
[ -1.0, 0.0, 2.0, 4.0, 2.0, -0.0002, 0.0, 0.0001, 0.0 ],
[ 1.0, 0.0, 0.0, 0.0, 2.0, -0.0002, 0.0, 0.0001, 0.0 ],
[ -1.0, 0.0, 2.0, -2.0, 1.0, -0.0002, 0.0, 0.0001, 0.0 ],
[ 0.0, -2.0, 2.0, -2.0, 1.0, -0.0002, 0.0, 0.0001, 0.0 ],
[ -2.0, 0.0, 0.0, 0.0, 1.0, -0.0002, 0.0, 0.0001, 0.0 ],
[ 2.0, 0.0, 0.0, 0.0, 1.0, 0.0002, 0.0, -0.0001, 0.0 ],
[ 3.0, 0.0, 0.0, 0.0, 0.0, 0.0002, 0.0, 0.0, 0.0 ],
[ 1.0, 1.0, 2.0, 0.0, 2.0, 0.0002, 0.0, -0.0001, 0.0 ],
[ 0.0, 0.0, 2.0, 1.0, 2.0, 0.0002, 0.0, -0.0001, 0.0 ],
[ 1.0, 0.0, 0.0, 2.0, 1.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 1.0, 0.0, 2.0, 2.0, 1.0, -0.0001, 0.0, 0.0001, 0.0 ],
[ 1.0, 1.0, 0.0, -2.0, 1.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 2.0, 0.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 2.0, -2.0, 0.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, -2.0, 2.0, 0.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 1.0, 0.0, -2.0, 2.0, 0.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 1.0, 0.0, -2.0, -2.0, 0.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 1.0, 0.0, 2.0, -2.0, 0.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 1.0, 0.0, 0.0, -4.0, 0.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 2.0, 0.0, 0.0, -4.0, 0.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 2.0, 4.0, 2.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 2.0, -1.0, 2.0, -0.0001, 0.0, 0.0, 0.0 ],
[ -2.0, 0.0, 2.0, 4.0, 2.0, -0.0001, 0.0, 0.0001, 0.0 ],
[ 2.0, 0.0, 2.0, 2.0, 2.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, -1.0, 2.0, 0.0, 1.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, -2.0, 0.0, 1.0, -0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 4.0, -2.0, 2.0, 0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0, 2.0, 0.0001, 0.0, 0.0, 0.0 ],
[ 1.0, 1.0, 2.0, -2.0, 2.0, 0.0001, 0.0, -0.0001, 0.0 ],
[ 3.0, 0.0, 2.0, -2.0, 2.0, 0.0001, 0.0, 0.0, 0.0 ],
[ -2.0, 0.0, 2.0, 2.0, 2.0, 0.0001, 0.0, -0.0001, 0.0 ],
[ -1.0, 0.0, 0.0, 0.0, 2.0, 0.0001, 0.0, -0.0001, 0.0 ],
[ 0.0, 0.0, -2.0, 2.0, 1.0, 0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 2.0, 0.0, 1.0, 0.0001, 0.0, 0.0, 0.0 ],
[ -1.0, 0.0, 4.0, 0.0, 2.0, 0.0001, 0.0, 0.0, 0.0 ],
[ 2.0, 1.0, 0.0, -2.0, 0.0, 0.0001, 0.0, 0.0, 0.0 ],
[ 2.0, 0.0, 0.0, 2.0, 0.0, 0.0001, 0.0, 0.0, 0.0 ],
[ 2.0, 0.0, 2.0, -2.0, 1.0, 0.0001, 0.0, -0.0001, 0.0 ],
[ 2.0, 0.0, -2.0, 0.0, 1.0, 0.0001, 0.0, 0.0, 0.0 ],
[ 1.0, -1.0, 0.0, -2.0, 0.0, 0.0001, 0.0, 0.0, 0.0 ],
[ -1.0, 0.0, 0.0, 1.0, 1.0, 0.0001, 0.0, 0.0, 0.0 ],
[ -1.0, -1.0, 0.0, 2.0, 1.0, 0.0001, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 1.0, 0.0, 0.0001, 0.0, 0.0, 0.0 ]]
# Get a degree-to-radian multiplier.
d2r = np.pi / 180.0
# Convert the current epoch in GPST to Terrestrial Time (TDT)
tt = t + datetime.timedelta(seconds=51)
# Get the J2000 reference epoch
j2000 = datetime.datetime(2000,1,1,12,0,0)
# Compute the time elapsed T in Julian centuries
T = ( ( (tt - j2000).days ) + ( (tt - j2000).seconds / 86400 ) ) / 36525
# Compute alpha-1 term corresponding to Moon's mean anomaly
a1 = (485868.2490360000) * 1.0
a1 += (715923.2177999020) * T
a1 += (1325.00 * 1296000) * T
a1 += (31.87920000000000) * T**2
a1 += (0.051635000000000) * T**3
a1 += (-0.00024470000000) * T**4
a1 = (a1 / 3600) * d2r
# Compute alpha-2 term corresponding to Sun's mean anomaly
a2 = (1287104.793050000) * 1.0
a2 += (1292581.048099995) * T
a2 += (99.0000 * 1296000) * T
a2 += (-0.55320000000000) * T**2
a2 += (0.000136000000000) * T**3
a2 += (-0.00001149000000) * T**4
a2 = (a2 / 3600) * d2r
# Compute alpha-3 term corresponding to Moon's mean argument of latitude
a3 = (335779.5262320000) * 1.0
a3 += (295262.8478000164) * T
a3 += (1342.00 * 1296000) * T
a3 += (-12.7512000000000) * T**2
a3 += (-0.00103700000000) * T**3
a3 += (0.000004170000000) * T**4
a3 = (a3 / 3600) * d2r
# Compute alpha-4 term corresponding to Moon's mean elongation from sun
a4 = (1072260.703690000) * 1.0
a4 += (1105601.209000110) * T
a4 += (1236.00 * 1296000) * T
a4 += (-6.37060000000000) * T**2
a4 += (0.006593000000000) * T**3
a4 += (-0.00003169000000) * T**4
a4 = (a4 / 3600) * d2r
# Compute alpha-5 term corresponding to longitude of ascending lunar node
a5 = (450160.3980360000) * 1.0
a5 += (-482890.543100000) * T
a5 += (-5.0000 * 1296000) * T
a5 += (7.472200000000000) * T**2
a5 += (0.007702000000000) * T**3
a5 += (-0.00005939000000) * T**4
a5 = (a5 / 3600) * d2r
# Initialise a nested list of coefficients for the IAU 1980 Nutation Model
N_len = len(N)
# Compute the first rotation angle, which is to be rotated about X-axis.
# This angle
p = ( 23.439291 - 0.013004*T - 1.639E-7*(T**2) + 5.036E-7*(T**3) ) * d2r
# Compute the second rotation angle, which is to be rotated about Z-axis.
# This angle is the negative longitudinal nutation, which moves the X-axis
# into its final position pointing towards the true equinox of date.
q = 0.0
for n in range(0, N_len):
k1 = N[n][0]
k2 = N[n][1]
k3 = N[n][2]
k4 = N[n][3]
k5 = N[n][4]
AA0 = N[n][5] * d2r / (3600)
AA1 = N[n][6] * d2r / (3600*10000)
delaunay_products = np.dot([k1,k2,k3,k4,k5], [a1,a2,a3,a4,a5])
q += (AA0 + AA1*T) * math.sin(delaunay_products)
# Compute the third rotation angle, which is to be rotated about X-axis.
# This angle is the difference between the mean and true obliquity angle.
# This angle, combined with the negative of the mean obliquity, gives the
# rotation about Earth's new x-axis by the negative of the true obliquity.
r = 0.0
for n in range(0, N_len):
k1 = N[n][0]
k2 = N[n][1]
k3 = N[n][2]
k4 = N[n][3]
k5 = N[n][4]
BB0 = N[n][7] * d2r / (3600)
BB1 = N[n][8] * d2r / (3600*10000)
delaunay_products = np.dot([k1,k2,k3,k4,k5], [a1,a2,a3,a4,a5])
r += (BB0 + BB1*T) * math.cos(delaunay_products)
# Compute the final nutation matrix
nutation = _dcmX(-1*(p+r)) @ _dcmZ(-1*q) @ _dcmX(p)
return nutation
##############################################################################
##############################################################################
### ###
### FUNCTION BELOW TO COMPUTE DIURNAL ROTATION MATRIX ###
### ###
##############################################################################
##############################################################################
def diurnal(t, N):
'''Computes the diurnal rotation (Earth's rotation about its own axis) to
correct for the sidereal time motion since the ITRF is a rotating frame
while the CEP is a non-rotating frame.
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
N : numpy.ndarray
A 3x3 DCM that rotates the ICRF by the nutation offset.
Returns
-------
diurnal_rotation : numpy.ndarray
A 3x3 DCM that rotates that converts the CEP frame into the rotating
Earth frame due to sidereal motion, using the Greenwich Mean Sidereal
Time (GMST) at 0-hours GPST (not UTC or UT1).
'''
# Note that the original formula in the ESA Navipedia uses UTC. However,
# LEOGPS will | |
= self
self.mpls_static_labels = L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.MplsStaticLabels()
self.mpls_static_labels.parent = self
self.source_address = None
self.tag_impose = None
class MplsStaticLabels(object):
"""
MPLS static labels
.. attribute:: local_static_label
Pseudowire local static label
**type**\: int
**range:** 16..1048575
.. attribute:: remote_static_label
Pseudowire remote static label
**type**\: int
**range:** 16..1048575
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_static_label = None
self.remote_static_label = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:mpls-static-labels'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.local_static_label is not None:
return True
if self.remote_static_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.MplsStaticLabels']['meta_info']
class BackupPseudowires(object):
"""
List of pseudowires
.. attribute:: backup_pseudowire
Backup pseudowire for the cross connect
**type**\: list of :py:class:`BackupPseudowire <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.backup_pseudowire = YList()
self.backup_pseudowire.parent = self
self.backup_pseudowire.name = 'backup_pseudowire'
class BackupPseudowire(object):
"""
Backup pseudowire for the cross connect
.. attribute:: neighbor <key>
Neighbor IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: pseudowire_id <key>
Pseudowire ID
**type**\: int
**range:** 1..4294967295
.. attribute:: backup_mpls_static_labels
MPLS static labels
**type**\: :py:class:`BackupMplsStaticLabels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels>`
.. attribute:: backup_pw_class
PW class template name to use for the backup PW
**type**\: str
**length:** 0..32
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.neighbor = None
self.pseudowire_id = None
self.backup_mpls_static_labels = L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels()
self.backup_mpls_static_labels.parent = self
self.backup_pw_class = None
class BackupMplsStaticLabels(object):
"""
MPLS static labels
.. attribute:: local_static_label
Pseudowire local static label
**type**\: int
**range:** 16..1048575
.. attribute:: remote_static_label
Pseudowire remote static label
**type**\: int
**range:** 16..1048575
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_static_label = None
self.remote_static_label = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:backup-mpls-static-labels'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.local_static_label is not None:
return True
if self.remote_static_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.neighbor is None:
raise YPYModelError('Key property neighbor is None')
if self.pseudowire_id is None:
raise YPYModelError('Key property pseudowire_id is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:backup-pseudowire[Cisco-IOS-XR-l2vpn-cfg:neighbor = ' + str(self.neighbor) + '][Cisco-IOS-XR-l2vpn-cfg:pseudowire-id = ' + str(self.pseudowire_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.neighbor is not None:
return True
if self.pseudowire_id is not None:
return True
if self.backup_mpls_static_labels is not None and self.backup_mpls_static_labels._has_data():
return True
if self.backup_pw_class is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:backup-pseudowires'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.backup_pseudowire is not None:
for child_ref in self.backup_pseudowire:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires']['meta_info']
class L2TpStaticAttributes(object):
"""
L2TP Static Attributes
.. attribute:: l2tp_local_cookie
L2TP local cookie
**type**\: :py:class:`L2TpLocalCookie <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpLocalCookie>`
.. attribute:: l2tp_local_session_id
L2TP local session ID
**type**\: int
**range:** 1..65535
.. attribute:: l2tp_remote_cookie
L2TP remote cookie
**type**\: :py:class:`L2TpRemoteCookie <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpRemoteCookie>`
.. attribute:: l2tp_remote_session_id
L2TP remote session ID
**type**\: int
**range:** 1..65535
.. attribute:: l2tp_secondary_local_cookie
L2TP secondary local cookie
**type**\: :py:class:`L2TpSecondaryLocalCookie <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpSecondaryLocalCookie>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.l2tp_local_cookie = L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpLocalCookie()
self.l2tp_local_cookie.parent = self
self.l2tp_local_session_id = None
self.l2tp_remote_cookie = L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpRemoteCookie()
self.l2tp_remote_cookie.parent = self
self.l2tp_remote_session_id = None
self.l2tp_secondary_local_cookie = L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpSecondaryLocalCookie()
self.l2tp_secondary_local_cookie.parent = self
class L2TpRemoteCookie(object):
"""
L2TP remote cookie
.. attribute:: higher_value
Higher remote cookie value
**type**\: int
**range:** 0..4294967295
.. attribute:: lower_value
Lower remote cookie value
**type**\: int
**range:** 0..4294967295
.. attribute:: size
Remote cookie size
**type**\: :py:class:`L2TpCookieSizeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2TpCookieSizeEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.higher_value = None
self.lower_value = None
self.size = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:l2tp-remote-cookie'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.higher_value is not None:
return True
if self.lower_value is not None:
return True
if self.size is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpRemoteCookie']['meta_info']
class L2TpSecondaryLocalCookie(object):
"""
L2TP secondary local cookie
.. attribute:: higher_value
Higher local cookie value
**type**\: int
**range:** 0..4294967295
.. attribute:: lower_value
Lower local cookie value
**type**\: int
**range:** 0..4294967295
.. attribute:: size
Local cookie size
**type**\: :py:class:`L2TpCookieSizeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2TpCookieSizeEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.higher_value = None
self.lower_value = None
self.size = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:l2tp-secondary-local-cookie'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.higher_value is not None:
return True
if self.lower_value is not None:
return True
if self.size is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpSecondaryLocalCookie']['meta_info']
class L2TpLocalCookie(object):
"""
L2TP local cookie
.. attribute:: higher_value
Higher local cookie value
**type**\: int
**range:** 0..4294967295
.. attribute:: lower_value
Lower local cookie value
**type**\: int
**range:** 0..4294967295
.. attribute:: size
Local cookie size
**type**\: :py:class:`L2TpCookieSizeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2TpCookieSizeEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.higher_value = None
self.lower_value = None
self.size = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:l2tp-local-cookie'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.higher_value is not None:
return True
if self.lower_value is not None:
return True
if self.size is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpLocalCookie']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:l2tp-static-attributes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.l2tp_local_cookie is not None and self.l2tp_local_cookie._has_data():
return True
if self.l2tp_local_session_id is not None:
return True
if self.l2tp_remote_cookie is not None and self.l2tp_remote_cookie._has_data():
return True
if self.l2tp_remote_session_id is not None:
return True
if self.l2tp_secondary_local_cookie is not None and self.l2tp_secondary_local_cookie._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes']['meta_info']
class L2TpStatic(object):
"""
Pseudowire L2TPv3 static configuration
.. attribute:: enable
Enable pseudowire L2TPv3 static configuration
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive | |
22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90
]
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._cache_file = os.path.join(cache_dir, "line_real_{}.pkl".format(self._dataset))
self._load_data()
self._db_inds = np.arange(len(self._image_ids))
self._load_coco_data()
def _load_data(self):
if not os.path.exists("./cache"):
os.makedirs("./cache")
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_ids], f)
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_ids = pickle.load(f)
def _load_coco_data(self):
self._coco = COCO(self._label_file)
with open(self._label_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._coco = COCO(self._label_file)
self._cat_ids = self._coco.getCatIds()
coco_image_ids = self._coco.getImgIds()
self._image_ids = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in coco_image_ids
]
self._detections = {}
for ind, (coco_image_id, image_id) in enumerate(tqdm(zip(coco_image_ids, self._image_ids))):
image = self._coco.loadImgs(coco_image_id)[0]
categories = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
max_len = 0
if len(annotations) == 0:
self._detections[image_id] = None
else:
bbox = np.array(annotations[0]["bbox"])
self._detections[image_id] = bbox
def detections(self, ind):
image_id = self._image_ids[ind]
detections = self._detections[image_id]
return copy.deepcopy(detections)
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_to_coco(self, all_bboxes):
detections = []
for image_id in all_bboxes:
coco_id = self._coco_eval_ids[image_id]
for cls_ind in all_bboxes[image_id]:
category_id = self._classes[cls_ind]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": coco_id,
"category_id": category_id,
"bbox": bbox,
"score": float("{:.2f}".format(score))
}
detections.append(detection)
return detections
def convert_to_coco_points(self, all_bboxes):
detections = []
for image_id in all_bboxes:
coco_id = self._coco_eval_ids[image_id]
for cls_ind in all_bboxes[image_id]:
category_id = self._classes[cls_ind]
for info in all_bboxes[image_id][cls_ind]:
bbox = [info[3], info[4], 6, 6]
bbox = list(map(self._to_float, bbox[0:4]))
score = info[0]
detection = {
"image_id": coco_id,
"category_id": category_id,
"bbox": bbox,
"score": float("{:.2f}".format(score))
}
detections.append(detection)
return detections
def convert_to_coco_points_pure(self, all_bboxes):
detections = []
for image_id in all_bboxes:
coco_id = self._coco_eval_ids[image_id]
for cls_ind in all_bboxes[image_id]:
category_id = self._classes[cls_ind]
for info in all_bboxes[image_id][cls_ind]:
bbox = [info[3], info[4], 6, 6]
bbox = list(map(self._to_float, bbox[0:4]))
score = info[0]
tag = info[1]
detection = {
"image_id": coco_id,
"category_id": category_id,
"bbox": bbox,
"score": float("{:.2f}".format(score)),
"tag" : float(tag)
}
detections.append(detection)
return detections
def evaluate(self, result_json, cls_ids, image_ids, gt_json=None):
if self._split == "testdev":
return None
coco = self._coco if gt_json is None else COCO(gt_json)
eval_ids = [self._coco_eval_ids[image_id] for image_id in image_ids]
cat_ids = [self._classes[cls_id] for cls_id in cls_ids]
coco_dets = coco.loadRes(result_json)
coco_eval = COCOeval(coco, coco_dets, "bbox")
coco_eval.params.imgIds = eval_ids
coco_eval.params.catIds = cat_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats[0], coco_eval.stats[12:]
class Bar(DETECTION):
def __init__(self, db_config, split):
super(Bar, self).__init__(db_config)
data_dir = system_configs.data_dir
result_dir = system_configs.result_dir
cache_dir = system_configs.cache_dir
self._split = split
self._dataset = {
"trainchart": "train2019",
"valchart": "val2019",
"testchart": "test2019"
}[self._split]
self._coco_dir = os.path.join(data_dir, "bar")
self._label_dir = os.path.join(self._coco_dir, "annotations")
self._label_file = os.path.join(self._label_dir, "instancesBar(1031)_{}.json")
self._label_file = self._label_file.format(self._dataset)
self._image_dir = os.path.join(self._coco_dir, "images", self._dataset)
self._image_file = os.path.join(self._image_dir, "{}")
self._data = "bar"
self._mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
self._std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self._cat_ids = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90
]
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._cache_file = os.path.join(cache_dir, "chart_{}.pkl".format(self._dataset))
self._load_data()
self._db_inds = np.arange(len(self._image_ids))
self._load_coco_data()
def _load_data(self):
if not os.path.exists("./cache"):
os.makedirs("./cache")
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_ids], f)
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_ids = pickle.load(f)
def _load_coco_data(self):
self._coco = COCO(self._label_file)
with open(self._label_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._coco = COCO(self._label_file)
self._cat_ids = self._coco.getCatIds()
coco_image_ids = self._coco.getImgIds()
self._image_ids = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in coco_image_ids
]
self._detections = {}
for ind, (coco_image_id, image_id) in enumerate(tqdm(zip(coco_image_ids, self._image_ids))):
image = self._coco.loadImgs(coco_image_id)[0]
bboxes = []
categories = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
for annotation in annotations:
bbox = np.array(annotation["bbox"])
bbox[[2, 3]] += bbox[[0, 1]]
bboxes.append(bbox)
categories.append(category)
bboxes = np.array(bboxes, dtype=float)
categories = np.array(categories, dtype=float)
if bboxes.size == 0 or categories.size == 0:
self._detections[image_id] = np.zeros((0, 5), dtype=np.float32)
else:
self._detections[image_id] = np.hstack((bboxes, categories[:, None]))
def detections(self, ind):
image_id = self._image_ids[ind]
detections = self._detections[image_id]
return detections.astype(float).copy()
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_to_coco(self, all_bboxes):
detections = []
for image_id in all_bboxes:
coco_id = self._coco_eval_ids[image_id]
for cls_ind in all_bboxes[image_id]:
category_id = self._classes[cls_ind]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": coco_id,
"category_id": category_id,
"bbox": bbox,
"score": float("{:.2f}".format(score))
}
detections.append(detection)
return detections
def convert_to_coco_points(self, all_bboxes):
detections = []
for image_id in all_bboxes:
coco_id = self._coco_eval_ids[image_id]
for cls_ind in all_bboxes[image_id]:
category_id = self._classes[cls_ind]
for info in all_bboxes[image_id][cls_ind]:
bbox = [info[3], info[4], 6, 6]
bbox = list(map(self._to_float, bbox[0:4]))
score = info[0]
detection = {
"image_id": coco_id,
"category_id": category_id,
"bbox": bbox,
"score": float("{:.2f}".format(score))
}
detections.append(detection)
return detections
def convert_to_coco_points_pure(self, all_bboxes):
detections = []
for image_id in all_bboxes:
coco_id = self._coco_eval_ids[image_id]
for cls_ind in all_bboxes[image_id]:
category_id = self._classes[cls_ind]
for info in all_bboxes[image_id][cls_ind]:
bbox = [info[2], info[3], 6, 6]
bbox = list(map(self._to_float, bbox[0:4]))
score = info[0]
detection = {
"image_id": coco_id,
"category_id": category_id,
"bbox": bbox,
"score": float("{:.2f}".format(score))
}
detections.append(detection)
return detections
def evaluate(self, result_json, cls_ids, image_ids, gt_json=None):
if self._split == "testdev":
return None
coco = self._coco if gt_json is None else COCO(gt_json)
eval_ids = [self._coco_eval_ids[image_id] for image_id in image_ids]
cat_ids = [self._classes[cls_id] for cls_id in cls_ids]
coco_dets = coco.loadRes(result_json)
coco_eval = COCOeval(coco, coco_dets, "bbox")
coco_eval.params.imgIds = eval_ids
coco_eval.params.catIds = cat_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats[0], coco_eval.stats[12:]
class Cls(DETECTION):
def __init__(self, db_config, split):
super(Cls, self).__init__(db_config)
data_dir = system_configs.data_dir
result_dir = system_configs.result_dir
cache_dir = system_configs.cache_dir
tar_data_type = system_configs.tar_data_dir
self._split = split
self._dataset = {
"trainchart": "train2019",
"valchart": "val2019",
"testchart": "test2019"
}[self._split]
self._coco_dir = os.path.join(data_dir, tar_data_type)
self._label_dir = os.path.join(self._coco_dir, "annotations")
self._label_file = os.path.join(self._label_dir, "instances%s(1031)_{}.json" %tar_data_type.capitalize())
self._label_file = self._label_file.format(self._dataset)
self._image_dir = os.path.join(self._coco_dir, "images", self._dataset)
self._image_file = os.path.join(self._image_dir, "{}")
self._data = tar_data_type
self._mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
self._std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self._cat_ids = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90
]
self._classes = {
ind + 1: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in | |
<gh_stars>100-1000
import emoji
import datetime
import itertools
from collections import Counter
from dateutil.relativedelta import relativedelta
MAX_MSG_LEN = 4096
def count_months(msgs):
"""Returns the number of months between first and last messages (calendar months)."""
r = relativedelta(msgs[-1].date, msgs[0].date)
return r.months + 12 * r.years
def get_filtered(msgs,
remove_empty=False,
remove_links=False,
remove_forwards=False,
except_patterns=None,
except_samples=None,
min_len=0,
max_len=MAX_MSG_LEN
):
"""Filters a list of messages by different parameters.
Notes:
Patterns and samples are lowered as well as the messages they are compared to.
Args:
msgs (list of MyMessage objects): Messages to sort.
remove_empty (bool): Skips/keeps messages with empty text component.
remove_links (bool): Skips/keeps messages which are links.
remove_forwards (bool): Skips/keeps messages which are forwarded.
except_patterns (list of sets of strings (characters)):
Skips messages which are made ONLY from the characters from any set in this list.
except_samples (list of strings):
Skips messages which are equal to any string in this list.
min_len (int): Skips/keeps messages shorter than min_len.
max_len (int): Skips/keeps messages longer than max_len.
Returns:
A list of MyMessage objects.
"""
if except_patterns is not None:
except_patterns = set(pattern.lower() for pattern in except_patterns)
if except_samples is not None:
except_samples = list(sample.lower() for sample in except_samples)
return list(filter(lambda msg:
(not remove_empty or msg.text != "")
and min_len <= len(msg.text) <= max_len
and not (remove_forwards and msg.is_forwarded)
and not (remove_links and msg.is_link)
and (except_patterns is None or not any(set(msg.text.lower()) == p for p in except_patterns))
and (except_samples is None or not any(sample == msg.text for sample in except_samples)),
msgs))
def get_non_text_messages_grouped(groups):
"""Filters and structures messages for each group and non-text message type.
Args:
groups (list of lists of MyMessage objects): Messages grouped.
Returns:
A list of message types grouped:
[
{
"groups": [list of numbers of specific messages in each group],
"type": string type of these messages.
}
]
"""
return [
{"groups": [len(list(filter(lambda m: m.has_audio, group))) for group in groups],
"type": "audio"},
{"groups": [len(list(filter(lambda m: m.has_voice, group))) for group in groups],
"type": "voice"},
{"groups": [len(list(filter(lambda m: m.has_photo, group))) for group in groups],
"type": "photo"},
{"groups": [len(list(filter(lambda m: m.has_video, group))) for group in groups],
"type": "video"},
{"groups": [len(list(filter(lambda m: m.has_sticker, group))) for group in groups],
"type": "sticker"},
{"groups": [len(list(filter(lambda m: m.is_link, group))) for group in groups],
"type": "link"}
]
def get_response_speed_per_timedelta(msgs, name):
"""Gets list of response time lengths of a certain person.
Notes:
This function is not used anywhere (at the time when this docstring was written) because it needs
better algorithm for making decisions about message being a response or not.
Args:
msgs (list of MyMessage objects): Messages.
name (str): The name of the person whose response time is calculated.
Returns:
A a list of the person's (name) response time lengths.
"""
res = []
i = 0
if msgs[0].author == name:
while i < len(msgs) and msgs[i].author == name:
i += 1
while i < len(msgs):
while i < len(msgs) and msgs[i].author != name:
i += 1
if i < len(msgs) and (msgs[i].date - msgs[i - 1].date).seconds <= 4 * 3600: # because people sleep sometimes
res.append((msgs[i].date - msgs[i - 1].date).seconds / 60)
while i < len(msgs) and msgs[i].author == name:
i += 1
return res
def get_messages_per_timedelta(msgs, time_bin):
"""Gets lists of messages for each time interval with a given length. For example:
time_bin is 7, so we will get lists of messages for each week between the first and last messages.
Args:
msgs (list of MyMessage objects): Messages.
time_bin (int): The number of days in each bin (time interval).
Returns:
A dictionary such as:
{
day (datetime.date object): a list of messages within interval [day, day + time_bin)
}
"""
start_d = msgs[0].date.date()
current_date = start_d
end_d = msgs[-1].date.date()
res = dict()
while current_date <= end_d:
res[current_date] = []
current_date += relativedelta(days=time_bin)
for msg in msgs:
res[start_d + relativedelta(days=(msg.date.date() - start_d).days // time_bin * time_bin)].append(msg)
return res
def get_months(msgs):
"""Gets months (first day for each month) between the first and the last messages in a list.
Notes:
ATTENTION: datetime objects have day parameter set to 1 (first day of the month) for EACH month.
Args:
msgs (list of Mymessage objects): Messages.
Returns:
A list of datetime.date objects.
"""
start_d = msgs[0].date.date()
end_d = msgs[-1].date.date()
res = []
month, year = start_d.month, start_d.year
while (year < end_d.year or not month > end_d.month) and year <= end_d.year:
res.append(datetime.date(year, month, 1))
if month == 12:
year += 1
month = 0
month += 1
return res
def get_weeks(msgs):
"""Gets weeks (first day for each week) between the first and last messages in a list.
Notes:
First "week" is 7-days full.
This function returns calendar weeks, not just 7-days intervals.
Args:
msgs (list of Mymessage objects): Messages.
Returns:
A list of datetime.date objects.
"""
current_date = msgs[0].date.date()
end_d = msgs[-1].date.date()
res = []
if current_date.weekday() != 0:
current_date -= relativedelta(days=current_date.weekday())
while current_date <= end_d:
res.append(current_date)
current_date += relativedelta(days=7)
return res
def str_day(day):
"""Transforms datetime day object into a "%d/%m/%y" string.
Args:
day (datetime/datetime.date): Day.
Returns:
A "%d/%m/%y" string representation.
"""
return day.strftime("%d/%m/%y")
def date_days_to_str_days(days):
"""Transforms a list of datetime objects into a list of "%d/%m/%y" strings.
Args:
days (list of datetime objects): Days.
Returns:
A list of "%d/%m/%y" days representations.
"""
return [str_day(day) for day in days]
def str_month(month):
"""Transforms datetime month object into a "%m/%y" string.
Args:
month (datetime/datetime.date): Month.
Returns:
A "%m/%y" string representation.
"""
return month.strftime("%m/%y")
def date_months_to_str_months(months):
"""Transforms a list of datetime objects into a list of "%m/%y" strings.
Args:
months (list of datetime objects): Months.
Returns:
A list of "%m/%y" months representations.
"""
return [str_month(month) for month in months]
def get_messages_per_month(msgs):
"""Gets lists of messages for each month between the first and last message.
Notes:
Months keys are set to the first day of the month.
Args:
msgs (list of Mymessage objects): Messages.
Returns:
A dictionary such as:
{
month (datetime.date): list of messages within this month
}
"""
res = dict()
current_date = msgs[0].date.date().replace(day=1)
end_d = msgs[-1].date.date().replace(day=1)
while current_date <= end_d:
res[current_date] = []
current_date += relativedelta(months=1)
for msg in msgs:
res[msg.date.date().replace(day=1)].append(msg)
return res
def get_messages_per_week(msgs):
"""Gets lists of messages for each calendar week between the first and the last message.
Args:
msgs (list of Mymessage objects): Messages.
Returns:
A dictionary such as:
{
week (datetime.date): list of messages within this week
}
"""
res = dict()
current_date = msgs[0].date.date()
end_d = msgs[-1].date.date()
if current_date.weekday() != 0:
current_date -= relativedelta(days=current_date.weekday())
while current_date <= end_d:
res[current_date] = []
current_date += relativedelta(days=7)
for msg in msgs:
res[msg.date.date() - relativedelta(days=msg.date.date().weekday())].append(msg)
return res
def get_messages_per_minutes(msgs, minutes):
"""Gets lists of messages for each interval in minutes.
Args:
msgs (list of MyMessage objects): Messages.
minutes (int): The number of minutes in one interval.
Returns:
A dictionary such as:
{
minute: list off all messages sent within interval [minute, minute + minutes).
}
"""
res = {i: [] for i in range(0, 24 * 60, minutes)}
for msg in msgs:
res[(msg.date.hour * 60 + msg.date.minute) // minutes * minutes].append(msg)
return res
def get_messages_per_weekday(msgs):
"""Gets lists of messages for each day of the week (7 lists in a dictionary total).
Args:
msgs (list of MyMessage objects): Messages.
Returns:
A dictionary such as:
{
day_of_the_week (int 0-6): list off all messages sent on this day
}
"""
res = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: []}
for msg in msgs:
res[msg.date.weekday()].append(msg)
# placing Sunday at the end of the week # turned out we don't need it...
# for i in [0, 1, 2, 3, 4, 5]:
# res[i], res[(i + 6) % 7] = res[(i + 6) % 7], res[i]
return res
def get_messages_per_day(msgs):
"""Gets lists of messages for each day between the first and the last message.
Notes:
Days are stored in a dictionary as integers (first day is 0, second is 1 etc).
Args:
msgs (list of MyMessage objects): Messages.
Returns:
A dictionary such as:
{
day (int): list of messages sent this day
}
"""
current_date = msgs[0].date.date()
end_d = msgs[-1].date.date()
res = dict()
| |
<gh_stars>0
"""Objects representing API interface to Wikibase site."""
#
# (C) Pywikibot team, 2012-2022
#
# Distributed under the terms of the MIT license.
#
import datetime
import json
import uuid
from contextlib import suppress
from typing import Optional
from warnings import warn
import pywikibot
from pywikibot.data import api
from pywikibot.exceptions import (
APIError,
EntityTypeUnknownError,
IsRedirectPageError,
NoPageError,
NoWikibaseEntityError,
)
from pywikibot.site._apisite import APISite
from pywikibot.site._decorators import need_extension, need_right, need_version
from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args
__all__ = ('DataSite', )
class DataSite(APISite):
"""Wikibase data capable site."""
def __init__(self, *args, **kwargs) -> None:
"""Initializer."""
super().__init__(*args, **kwargs)
self._item_namespace = None
self._property_namespace = None
self._type_to_class = {
'item': pywikibot.ItemPage,
'property': pywikibot.PropertyPage,
'mediainfo': pywikibot.MediaInfo,
'lexeme': pywikibot.LexemePage,
'form': pywikibot.LexemeForm,
'sense': pywikibot.LexemeSense,
}
def _cache_entity_namespaces(self) -> None:
"""Find namespaces for each known wikibase entity type."""
self._entity_namespaces = {}
for entity_type in self._type_to_class:
for namespace in self.namespaces.values():
if not hasattr(namespace, 'defaultcontentmodel'):
continue
content_model = namespace.defaultcontentmodel
if content_model == ('wikibase-' + entity_type):
self._entity_namespaces[entity_type] = namespace
break
def get_namespace_for_entity_type(self, entity_type):
"""
Return namespace for given entity type.
:return: corresponding namespace
:rtype: Namespace
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
if entity_type in self._entity_namespaces:
return self._entity_namespaces[entity_type]
raise EntityTypeUnknownError(
'{!r} does not support entity type "{}" '
"or it doesn't have its own namespace"
.format(self, entity_type))
@property
def item_namespace(self):
"""
Return namespace for items.
:return: item namespace
:rtype: Namespace
"""
if self._item_namespace is None:
self._item_namespace = self.get_namespace_for_entity_type('item')
return self._item_namespace
@property
def property_namespace(self):
"""
Return namespace for properties.
:return: property namespace
:rtype: Namespace
"""
if self._property_namespace is None:
self._property_namespace = self.get_namespace_for_entity_type(
'property')
return self._property_namespace
def get_entity_for_entity_id(self, entity_id):
"""
Return a new instance for given entity id.
:raises pywikibot.exceptions.NoWikibaseEntityError: there is no entity
with the id
:return: a WikibaseEntity subclass
:rtype: WikibaseEntity
"""
for cls in self._type_to_class.values():
if cls.is_valid_id(entity_id):
return cls(self, entity_id)
entity = pywikibot.page.WikibaseEntity(self, entity_id)
raise NoWikibaseEntityError(entity)
@property
@need_version('1.28-wmf.3')
def sparql_endpoint(self):
"""
Return the sparql endpoint url, if any has been set.
:return: sparql endpoint url
:rtype: str|None
"""
return self.siteinfo['general'].get('wikibase-sparql')
@property
@need_version('1.28-wmf.23')
def concept_base_uri(self):
"""
Return the base uri for concepts/entities.
:return: concept base uri
:rtype: str
"""
return self.siteinfo['general']['wikibase-conceptbaseuri']
def geo_shape_repository(self):
"""Return Site object for the geo-shapes repository e.g. commons."""
url = self.siteinfo['general'].get('wikibase-geoshapestoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def tabular_data_repository(self):
"""Return Site object for the tabular-datas repository e.g. commons."""
url = self.siteinfo['general'].get(
'wikibase-tabulardatastoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def loadcontent(self, identification, *props):
"""
Fetch the current content of a Wikibase item.
This is called loadcontent since
wbgetentities does not support fetching old
revisions. Eventually this will get replaced by
an actual loadrevisions.
:param identification: Parameters used to identify the page(s)
:type identification: dict
:param props: the optional properties to fetch.
"""
params = merge_unique_dicts(identification, action='wbgetentities',
# TODO: When props is empty it results in
# an empty string ('&props=') but it should
# result in a missing entry.
props=props if props else False)
req = self.simple_request(**params)
data = req.submit()
if 'success' not in data:
raise APIError(data['errors'], '')
return data['entities']
def preload_entities(self, pagelist, groupsize: int = 50):
"""
Yield subclasses of WikibaseEntity's with content prefilled.
Note that pages will be iterated in a different order
than in the underlying pagelist.
:param pagelist: an iterable that yields either WikibaseEntity objects,
or Page objects linked to an ItemPage.
:param groupsize: how many pages to query at a time
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
for sublist in itergroup(pagelist, groupsize):
req = {'ids': [], 'titles': [], 'sites': []}
for p in sublist:
if isinstance(p, pywikibot.page.WikibaseEntity):
ident = p._defined_by()
for key in ident:
req[key].append(ident[key])
else:
if p.site == self and p.namespace() in (
self._entity_namespaces.values()):
req['ids'].append(p.title(with_ns=False))
else:
assert p.site.has_data_repository, \
'Site must have a data repository'
req['sites'].append(p.site.dbName())
req['titles'].append(p._link._text)
req = self.simple_request(action='wbgetentities', **req)
data = req.submit()
for entity in data['entities']:
if 'missing' in data['entities'][entity]:
continue
cls = self._type_to_class[data['entities'][entity]['type']]
page = cls(self, entity)
# No api call is made because item._content is given
page._content = data['entities'][entity]
with suppress(IsRedirectPageError):
page.get() # cannot provide get_redirect=True (T145971)
yield page
def getPropertyType(self, prop):
"""
Obtain the type of a property.
This is used specifically because we can cache
the value for a much longer time (near infinite).
"""
params = {'action': 'wbgetentities', 'ids': prop.getID(),
'props': 'datatype'}
expiry = datetime.timedelta(days=365 * 100)
# Store it for 100 years
req = self._request(expiry=expiry, parameters=params)
data = req.submit()
# the IDs returned from the API can be upper or lowercase, depending
# on the version. See bug T55894 for more information.
try:
dtype = data['entities'][prop.getID()]['datatype']
except KeyError:
dtype = data['entities'][prop.getID().lower()]['datatype']
return dtype
@need_right('edit')
def editEntity(self, entity, data, bot: bool = True, **kwargs):
"""
Edit entity.
Note: This method is unable to create entities other than 'item'
if dict with API parameters was passed to 'entity' parameter.
:param entity: Page to edit, or dict with API parameters
to use for entity identification
:type entity: WikibaseEntity or dict
:param data: data updates
:type data: dict
:param bot: Whether to mark the edit as a bot edit
:return: New entity data
:rtype: dict
"""
# this changes the reference to a new object
data = dict(data)
if isinstance(entity, pywikibot.page.WikibaseEntity):
params = entity._defined_by(singular=True)
if 'id' in params and params['id'] == '-1':
del params['id']
if not params:
params['new'] = entity.entity_type
data_for_new_entity = entity.get_data_for_new_entity()
data.update(data_for_new_entity)
else:
if 'id' in entity and entity['id'] == '-1':
del entity['id']
params = dict(entity)
if not params: # If no identification was provided
params['new'] = 'item'
params['action'] = 'wbeditentity'
if bot:
params['bot'] = 1
if 'baserevid' in kwargs and kwargs['baserevid']:
params['baserevid'] = kwargs['baserevid']
params['token'] = self.tokens['edit']
for arg in kwargs:
if arg in ['clear', 'summary']:
params[arg] = kwargs[arg]
elif arg != 'baserevid':
warn('Unknown wbeditentity parameter {} ignored'.format(arg),
UserWarning, 2)
params['data'] = json.dumps(data)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def addClaim(self, entity, claim, bot: bool = True, summary=None) -> None:
"""
Add a claim.
:param entity: Entity to modify
:type entity: WikibaseEntity
:param claim: Claim to be added
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
claim.snak = entity.getID() + '$' + str(uuid.uuid4())
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'baserevid': entity.latest_revision_id,
'summary': summary,
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
# Update the item
if claim.getID() in entity.claims:
entity.claims[claim.getID()].append(claim)
else:
entity.claims[claim.getID()] = [claim]
entity.latest_revision_id = data['pageinfo']['lastrevid']
@need_right('edit')
def changeClaimTarget(self, claim, snaktype: str = 'value',
bot: bool = True, summary=None):
"""
Set the claim target to the value of the provided claim target.
:param claim: The source of the claim target value
:type claim: pywikibot.Claim
:param snaktype: An optional snaktype ('value', 'novalue' or
'somevalue'). Default: 'value'
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaimvalue', 'claim': claim.snak,
'snaktype': snaktype, 'summary': summary, 'bot': bot,
'token': self.tokens['edit']}
if snaktype == 'value':
params['value'] = json.dumps(claim._formatValue())
params['baserevid'] = claim.on_item.latest_revision_id
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def save_claim(self, claim, summary=None, bot: bool = True):
"""
Save the whole claim to the wikibase site.
:param claim: The claim to save
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'token': self.tokens['edit'],
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
claim.on_item.latest_revision_id = data['pageinfo']['lastrevid']
return data
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editSource(self, claim, source,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a source.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the source to
:type claim: pywikibot.Claim
:param source: A Claim object to be used as a source
:type source: pywikibot.Claim
:param new: Whether to create a new one if the "source" already exists
:param bot: | |
from rpython.flowspace.model import FunctionGraph, Constant, Variable, c_last_exception
from rpython.rlib.rarithmetic import intmask, r_uint, ovfcheck, r_longlong, r_longlonglong
from rpython.rlib.rarithmetic import r_ulonglong, is_valid_int
from rpython.rtyper.lltypesystem import lltype, llmemory, lloperation, llheap
from rpython.rtyper.lltypesystem import rclass
from rpython.rtyper.ootypesystem import ootype
from rpython.rlib.objectmodel import ComputedIntSymbolic, CDefinedIntSymbolic
from rpython.rlib.objectmodel import Symbolic
from rpython.rlib import rstackovf
import sys, os
import math
import py
import traceback, cStringIO
log = py.log.Producer('llinterp')
class LLException(Exception):
def __init__(self, *args):
"NOT_RPYTHON"
Exception.__init__(self, *args)
def __str__(self):
etype = self.args[0]
#evalue = self.args[1]
if len(self.args) > 2:
f = cStringIO.StringIO()
original_type, original_value, original_tb = self.args[2]
traceback.print_exception(original_type, original_value, original_tb,
file=f)
extra = '\n' + f.getvalue().rstrip('\n')
extra = extra.replace('\n', '\n | ') + '\n `------'
else:
extra = ''
return '<LLException %r%s>' % (type_name(etype), extra)
class LLFatalError(Exception):
def __str__(self):
return ': '.join([str(x) for x in self.args])
def type_name(etype):
if isinstance(lltype.typeOf(etype), lltype.Ptr):
return ''.join(etype.name).rstrip('\x00')
else:
# ootype!
return etype._INSTANCE._name.split(".")[-1]
class LLInterpreter(object):
""" low level interpreter working with concrete values. """
current_interpreter = None
def __init__(self, typer, tracing=True, exc_data_ptr=None):
self.bindings = {}
self.typer = typer
# 'heap' is module or object that provides malloc, etc for lltype ops
self.heap = llheap
self.exc_data_ptr = exc_data_ptr
self.frame_stack = []
self.tracer = None
self.frame_class = LLFrame
if tracing:
self.tracer = Tracer()
def eval_graph(self, graph, args=(), recursive=False):
llframe = self.frame_class(graph, args, self)
if self.tracer and not recursive:
global tracer1
tracer1 = self.tracer
self.tracer.start()
retval = None
self.traceback_frames = []
old_frame_stack = self.frame_stack[:]
prev_interpreter = LLInterpreter.current_interpreter
LLInterpreter.current_interpreter = self
try:
try:
retval = llframe.eval()
except LLException, e:
log.error("LLEXCEPTION: %s" % (e, ))
self.print_traceback()
if self.tracer:
self.tracer.dump('LLException: %s\n' % (e,))
raise
except Exception, e:
if getattr(e, '_go_through_llinterp_uncaught_', False):
raise
log.error("AN ERROR OCCURED: %s" % (e, ))
self.print_traceback()
if self.tracer:
line = str(e)
if line:
line = ': ' + line
line = '* %s' % (e.__class__.__name__,) + line
self.tracer.dump(line + '\n')
raise
finally:
LLInterpreter.current_interpreter = prev_interpreter
assert old_frame_stack == self.frame_stack
if self.tracer:
if retval is not None:
self.tracer.dump(' ---> %r\n' % (retval,))
if not recursive:
self.tracer.stop()
return retval
def print_traceback(self):
frames = self.traceback_frames
frames.reverse()
self.traceback_frames = []
lines = []
for frame in frames:
logline = frame.graph.name + "()"
if frame.curr_block is None:
logline += " <not running yet>"
lines.append(logline)
continue
try:
logline += " " + self.typer.annotator.annotated[frame.curr_block].func.__module__
except (KeyError, AttributeError, TypeError):
logline += " <unknown module>"
lines.append(logline)
for i, operation in enumerate(frame.curr_block.operations):
if i == frame.curr_operation_index:
logline = "E %s"
else:
logline = " %s"
lines.append(logline % (operation, ))
if self.tracer:
self.tracer.dump('Traceback\n', bold=True)
for line in lines:
self.tracer.dump(line + '\n')
for line in lines:
log.traceback(line)
def find_roots(self):
"""Return a list of the addresses of the roots."""
#log.findroots("starting")
roots = []
for frame in self.frame_stack:
#log.findroots("graph", frame.graph.name)
frame.find_roots(roots)
return roots
def find_exception(self, exc):
assert isinstance(exc, LLException)
klass, inst = exc.args[0], exc.args[1]
for cls in enumerate_exceptions_top_down():
if hasattr(klass, 'name'): # lltype
if "".join(klass.name).rstrip("\0") == cls.__name__:
return cls
else: # ootype
if klass._INSTANCE._name.split('.')[-1] == cls.__name__:
return cls
raise ValueError("couldn't match exception, maybe it"
" has RPython attributes like OSError?")
def get_transformed_exc_data(self, graph):
if hasattr(graph, 'exceptiontransformed'):
return graph.exceptiontransformed
if getattr(graph, 'rgenop', False):
return self.exc_data_ptr
return None
def _store_exception(self, exc):
raise PleaseOverwriteStoreException("You just invoked ll2ctypes callback without overwriting _store_exception on llinterpreter")
class PleaseOverwriteStoreException(Exception):
pass
def checkptr(ptr):
assert isinstance(lltype.typeOf(ptr), lltype.Ptr)
def checkadr(addr):
assert lltype.typeOf(addr) is llmemory.Address
def is_inst(inst):
return isinstance(lltype.typeOf(inst), (ootype.Instance, ootype.BuiltinType, ootype.StaticMethod))
def checkinst(inst):
assert is_inst(inst)
class LLFrame(object):
def __init__(self, graph, args, llinterpreter):
assert not graph or isinstance(graph, FunctionGraph)
self.graph = graph
self.args = args
self.llinterpreter = llinterpreter
self.heap = llinterpreter.heap
self.bindings = {}
self.curr_block = None
self.curr_operation_index = 0
self.alloca_objects = []
def newsubframe(self, graph, args):
return self.__class__(graph, args, self.llinterpreter)
# _______________________________________________________
# variable setters/getters helpers
def clear(self):
self.bindings.clear()
def fillvars(self, block, values):
vars = block.inputargs
assert len(vars) == len(values), (
"block %s received %d args, expected %d" % (
block, len(values), len(vars)))
for var, val in zip(vars, values):
self.setvar(var, val)
def setvar(self, var, val):
if var.concretetype is not lltype.Void:
try:
val = lltype.enforce(var.concretetype, val)
except TypeError:
assert False, "type error: input value of type:\n\n\t%r\n\n===> variable of type:\n\n\t%r\n" % (lltype.typeOf(val), var.concretetype)
assert isinstance(var, Variable)
self.bindings[var] = val
def setifvar(self, var, val):
if isinstance(var, Variable):
self.setvar(var, val)
def getval(self, varorconst):
try:
val = varorconst.value
except AttributeError:
val = self.bindings[varorconst]
if isinstance(val, ComputedIntSymbolic):
val = val.compute_fn()
if varorconst.concretetype is not lltype.Void:
try:
val = lltype.enforce(varorconst.concretetype, val)
except TypeError:
assert False, "type error: %r val from %r var/const" % (lltype.typeOf(val), varorconst.concretetype)
return val
def getval_or_subop(self, varorsubop):
from rpython.translator.oosupport.treebuilder import SubOperation
if isinstance(varorsubop, SubOperation):
self.eval_operation(varorsubop.op)
resultval = self.getval(varorsubop.op.result)
del self.bindings[varorsubop.op.result] # XXX hack
return resultval
else:
return self.getval(varorsubop)
# _______________________________________________________
# other helpers
def getoperationhandler(self, opname):
ophandler = getattr(self, 'op_' + opname, None)
if ophandler is None:
# try to import the operation from opimpl.py
ophandler = lloperation.LL_OPERATIONS[opname].fold
setattr(self.__class__, 'op_' + opname, staticmethod(ophandler))
return ophandler
# _______________________________________________________
# evaling functions
def eval(self):
graph = self.graph
tracer = self.llinterpreter.tracer
if tracer:
tracer.enter(graph)
self.llinterpreter.frame_stack.append(self)
try:
try:
nextblock = graph.startblock
args = self.args
while 1:
self.clear()
self.fillvars(nextblock, args)
nextblock, args = self.eval_block(nextblock)
if nextblock is None:
for obj in self.alloca_objects:
obj._obj._free()
return args
except Exception:
self.llinterpreter.traceback_frames.append(self)
raise
finally:
leavingframe = self.llinterpreter.frame_stack.pop()
assert leavingframe is self
if tracer:
tracer.leave()
def eval_block(self, block):
""" return (nextblock, values) tuple. If nextblock
is None, values is the concrete return value.
"""
self.curr_block = block
catch_exception = block.exitswitch == c_last_exception
e = None
try:
for i, op in enumerate(block.operations):
self.curr_operation_index = i
self.eval_operation(op)
except LLException, e:
if not (catch_exception and op is block.operations[-1]):
raise
except RuntimeError, e:
rstackovf.check_stack_overflow()
# xxx fish fish fish for proper etype and evalue to use
rtyper = self.llinterpreter.typer
bk = rtyper.annotator.bookkeeper
classdef = bk.getuniqueclassdef(rstackovf._StackOverflow)
exdata = rtyper.getexceptiondata()
evalue = exdata.get_standard_ll_exc_instance(rtyper, classdef)
etype = exdata.fn_type_of_exc_inst(evalue)
e = LLException(etype, evalue)
if not (catch_exception and op is block.operations[-1]):
raise e
# determine nextblock and/or return value
if len(block.exits) == 0:
# return block
tracer = self.llinterpreter.tracer
if len(block.inputargs) == 2:
# exception
if tracer:
tracer.dump('raise')
etypevar, evaluevar = block.getvariables()
etype = self.getval(etypevar)
evalue = self.getval(evaluevar)
# watch out, these are _ptr's
raise LLException(etype, evalue)
resultvar, = block.getvariables()
result = self.getval(resultvar)
exc_data = self.llinterpreter.get_transformed_exc_data(self.graph)
if exc_data:
# re-raise the exception set by this graph, if any
etype = exc_data.exc_type
if etype:
evalue = exc_data.exc_value
if tracer:
tracer.dump('raise')
exc_data.exc_type = lltype.typeOf(etype )._defl()
exc_data.exc_value = lltype.typeOf(evalue)._defl()
from rpython.translator import exceptiontransform
T = resultvar.concretetype
errvalue = exceptiontransform.error_value(T)
# check that the exc-transformed graph returns the error
# value when it returns with an exception set
assert result == errvalue
raise LLException(etype, evalue)
if tracer:
tracer.dump('return')
return None, result
elif block.exitswitch is None:
# single-exit block
assert len(block.exits) == 1
link = block.exits[0]
elif catch_exception:
link = block.exits[0]
if e:
exdata = self.llinterpreter.typer.getexceptiondata()
cls = e.args[0]
inst = e.args[1]
for link in block.exits[1:]:
assert issubclass(link.exitcase, py.builtin.BaseException)
if self.op_direct_call(exdata.fn_exception_match,
cls, link.llexitcase):
self.setifvar(link.last_exception, cls)
self.setifvar(link.last_exc_value, inst)
break
else:
# no handler found, pass on
raise e
else:
llexitvalue = self.getval(block.exitswitch)
if block.exits[-1].exitcase == "default":
defaultexit = block.exits[-1]
nondefaultexits = block.exits[:-1]
assert defaultexit.llexitcase is None
else:
defaultexit = None
nondefaultexits = block.exits
for link in nondefaultexits:
if link.llexitcase == llexitvalue:
break # found -- the result is in 'link'
else:
if defaultexit is None:
raise ValueError("exit case %r not found in the exit links "
"of %r" % (llexitvalue, block))
else:
link = defaultexit
return link.target, [self.getval(x) for x in link.args]
def eval_operation(self, operation):
tracer = self.llinterpreter.tracer
if tracer:
tracer.dump(str(operation))
ophandler = self.getoperationhandler(operation.opname)
# XXX slighly unnice but an important safety check
if operation.opname == 'direct_call':
assert isinstance(operation.args[0], Constant)
elif operation.opname == 'indirect_call':
assert isinstance(operation.args[0], Variable)
if getattr(ophandler, 'specialform', False):
retval = ophandler(*operation.args)
else:
vals = [self.getval_or_subop(x) for x in operation.args]
if getattr(ophandler, 'need_result_type', False):
vals.insert(0, operation.result.concretetype)
try:
retval = ophandler(*vals)
except LLException, e:
# safety check check that the operation is allowed to raise that
# exception
if operation.opname in lloperation.LL_OPERATIONS:
canraise = lloperation.LL_OPERATIONS[operation.opname].canraise
if Exception not in canraise:
exc = self.llinterpreter.find_exception(e)
for canraiseexc in canraise:
if issubclass(exc, canraiseexc):
break
else:
raise TypeError("the operation %s is not expected to raise %s" % (operation, exc))
# for exception-transformed graphs, | |
['Image'],
'ENCAPSULATED CDA IOD': ['Encapsulated Document'],
'ENHANCED SR IOD': ['Document'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'GENERAL AUDIO WAVEFORM IOD': ['Waveform'],
'MR IMAGE IOD': ['Image'],
'BASIC IMAGE BOX IOD': ['Basic Image Box'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'ARTERIAL PULSE WAVEFORM IOD': ['Waveform'],
'STORAGE COMMITMENT IOD': ['Storage Commitment'],
},
# ExposureIndex
0x00181411L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# TargetExposureIndex
0x00181412L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# BluePaletteColorLookupTableData
0x00281203L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Color Palette', 'Presentation State', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'COLOR PALETTE IOD': ['Color Palette'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
},
# PatientGantryRelationshipCodeSequence
0x00540414L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Series'],
None: ['Image', 'Series'],
'PET IMAGE IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
},
# Columns
0x00280011L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Equipment', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# IconImageSequence
0x00880200L: {
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Presentation State', 'Image', 'Document', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'BASIC TEXT SR IOD': ['Document'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'COMPREHENSIVE SR IOD': ['Document'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'CHEST CAD SR IOD': ['Document'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'ENHANCED SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# AttenuationCorrected
0x00189759L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# ProcedureStepLabel
0x00741204L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
None: ['Unified Procedure Step'],
},
# ReferencedSegmentNumber
0x0062000BL: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# GeneralizedDefectSensitivityDeviationAlgorithmSequence
0x00240067L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# DetectorConfiguration
0x00187005L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# PixelPresentation
0x00089205L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
},
# SOPAuthorizationDateTime
0x01000420L: {
'HANGING PROTOCOL IOD': ['Hanging Protocol'],
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
'RT ION MACHINE VERIFICATION IOD': ['Rt Ion Machine Verification'],
'RT STRUCTURE SET IOD': ['Structure Set'],
'RT PLAN IOD': ['Plan'],
'FILM SESSION IOD': ['Film Session'],
'BASIC FILM BOX IOD': ['Basic Film Box'],
'CR IMAGE IOD': ['Image'],
'RAW DATA IOD': ['Raw Data'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
'ENHANCED MR IMAGE IOD': ['Image'],
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'BASIC CARDIAC EP IOD': ['Waveform'],
'RT TREATMENT SUMMARY RECORD IOD': ['Treatment Record'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Waveform'],
'RESPIRATORY WAVEFORM IOD': ['Waveform'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'BASIC VOICE AUDIO IOD': ['Waveform'],
'OPHTHALMIC | |
= menu(header, options, INVENTORY_WIDTH)
#if an item was chosen, return it
if index is None or len(inventory) == 0: return None
return inventory[index].item
def msgbox(text, width=50):
menu(text, [], width) #use menu() as a sort of "message box"
def handle_keys():
global key
if key.vk == libtcod.KEY_ENTER and key.lalt:
#Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ESCAPE:
return 'exit' #exit game
if game_state == 'playing':
#movement keys
if key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8:
player_move_or_attack(0, -1)
elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2:
player_move_or_attack(0, 1)
elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4:
player_move_or_attack(-1, 0)
elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6:
player_move_or_attack(1, 0)
elif key.vk == libtcod.KEY_HOME or key.vk == libtcod.KEY_KP7:
player_move_or_attack(-1, -1)
elif key.vk == libtcod.KEY_PAGEUP or key.vk == libtcod.KEY_KP9:
player_move_or_attack(1, -1)
elif key.vk == libtcod.KEY_END or key.vk == libtcod.KEY_KP1:
player_move_or_attack(-1, 1)
elif key.vk == libtcod.KEY_PAGEDOWN or key.vk == libtcod.KEY_KP3:
player_move_or_attack(1, 1)
elif key.vk == libtcod.KEY_KP5:
pass #do nothing ie wait for the monster to come to you
else:
#test for other keys
key_char = chr(key.c)
if key_char == 'g':
#pick up an item
for object in objects: #look for an item in the player's tile
if object.x == player.x and object.y == player.y and object.item:
object.item.pick_up()
break
if key_char == 'i':
#show the inventory; if an item is selected, use it
chosen_item = inventory_menu('Press the key next to an item to use it, or any other to cancel.\n')
if chosen_item is not None:
chosen_item.use()
if key_char == 'd':
#show the inventory; if an item is selected, drop it
chosen_item = inventory_menu('Press the key next to an item to drop it, or any other to cancel.\n')
if chosen_item is not None:
chosen_item.drop()
if key_char == 'c':
#show character information
level_up_xp = LEVEL_UP_BASE + player.level * LEVEL_UP_FACTOR
msgbox('Character Information\n\nLevel: ' + str(player.level) + '\nExperience: ' + str(player.fighter.xp) +
'\nExperience to level up: ' + str(level_up_xp) + '\n\nMaximum HP: ' + str(player.fighter.max_hp) +
'\nAttack: ' + str(player.fighter.power) + '\nDefense: ' + str(player.fighter.defense), CHARACTER_SCREEN_WIDTH)
if key_char == '<':
#go down stairs, if the player is on them
if stairs.x == player.x and stairs.y == player.y:
next_level()
return 'didnt-take-turn'
def check_level_up():
#see if the player's experience is enough to level-up
level_up_xp = LEVEL_UP_BASE + player.level * LEVEL_UP_FACTOR
if player.fighter.xp >= level_up_xp:
#it is! level up and ask to raise some stats
player.level += 1
player.fighter.xp -= level_up_xp
message('Your battle skills grow stronger! You reached level ' + str(player.level) + '!', libtcod.yellow)
choice = None
while choice == None: #keep asking until a choice is made
choice = menu('Level up! Choose a stat to raise:\n',
['Constitution (+20 HP, from ' + str(player.fighter.max_hp) + ')',
'Strength (+1 attack, from ' + str(player.fighter.power) + ')',
'Agility (+1 defense, from ' + str(player.fighter.defense) + ')'], LEVEL_SCREEN_WIDTH)
if choice == 0:
player.fighter.base_max_hp += 20
player.fighter.hp += 20
elif choice == 1:
player.fighter.base_power += 1
elif choice == 2:
player.fighter.base_defense += 1
def player_death(player):
#the game ended!
global game_state
message('You died!', libtcod.red)
game_state = 'dead'
#for added effect, transform the player into a corpse!
player.char = '%'
player.color = libtcod.dark_red
def monster_death(monster):
#transform it into a nasty corpse! it doesn't block, can't be
#attacked and doesn't move
message('The ' + monster.name + ' is dead! You gain ' + str(monster.fighter.xp) + ' experience points.', libtcod.orange)
monster.char = '%'
monster.color = libtcod.dark_red
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = 'remains of ' + monster.name
monster.send_to_back()
def target_tile(max_range=None):
global key, mouse
#return the position of a tile left-clicked in player's FOV (optionally in a range), or (None,None) if right-clicked.
while True:
#render the screen. this erases the inventory and shows the names of objects under the mouse.
libtcod.console_flush()
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
render_all()
(x, y) = (mouse.cx, mouse.cy)
if mouse.rbutton_pressed or key.vk == libtcod.KEY_ESCAPE:
return (None, None) #cancel if the player right-clicked or pressed Escape
#accept the target if the player clicked in FOV, and in case a range is specified, if it's in that range
if (mouse.lbutton_pressed and libtcod.map_is_in_fov(fov_map, x, y) and
(max_range is None or player.distance(x, y) <= max_range)):
return (x, y)
def target_monster(max_range=None):
#returns a clicked monster inside FOV up to a range, or None if right-clicked
while True:
(x, y) = target_tile(max_range)
if x is None: #player cancelled
return None
#return the first clicked monster, otherwise continue looping
for obj in objects:
if obj.x == x and obj.y == y and obj.fighter and obj != player:
return obj
def closest_monster(max_range):
#find closest enemy, up to a maximum range, and in the player's FOV
closest_enemy = None
closest_dist = max_range + 1 #start with (slightly more than) maximum range
for object in objects:
if object.fighter and not object == player and libtcod.map_is_in_fov(fov_map, object.x, object.y):
#calculate distance between this object and the player
dist = player.distance_to(object)
if dist < closest_dist: #it's closer, so remember it
closest_enemy = object
closest_dist = dist
return closest_enemy
def cast_heal():
#heal the player
if player.fighter.hp == player.fighter.max_hp:
message('You are already at full health.', libtcod.red)
return 'cancelled'
message('Your wounds start to feel better!', libtcod.light_violet)
player.fighter.heal(HEAL_AMOUNT)
def cast_lightning():
#find closest enemy (inside a maximum range) and damage it
monster = closest_monster(LIGHTNING_RANGE)
if monster is None: #no enemy found within maximum range
message('No enemy is close enough to strike.', libtcod.red)
return 'cancelled'
#zap it!
message('A lighting bolt strikes the ' + monster.name + ' with a loud thunder! The damage is '
+ str(LIGHTNING_DAMAGE) + ' hit points.', libtcod.light_blue)
monster.fighter.take_damage(LIGHTNING_DAMAGE)
def cast_fireball():
#ask the player for a target tile to throw a fireball at
message('Left-click a target tile for the fireball, or right-click to cancel.', libtcod.light_cyan)
(x, y) = target_tile()
if x is None: return 'cancelled'
message('The fireball explodes, burning everything within ' + str(FIREBALL_RADIUS) + ' tiles!', libtcod.orange)
for obj in objects: #damage every fighter in range, including the player
if obj.distance(x, y) <= FIREBALL_RADIUS and obj.fighter:
message('The ' + obj.name + ' gets burned for ' + str(FIREBALL_DAMAGE) + ' hit points.', libtcod.orange)
obj.fighter.take_damage(FIREBALL_DAMAGE)
def cast_confuse():
#ask the player for a target to confuse
message('Left-click an enemy to confuse it, or right-click to cancel.', libtcod.light_cyan)
monster = target_monster(CONFUSE_RANGE)
if monster is None: return 'cancelled'
#replace the monster's AI with a "confused" one; after some turns it will restore the old AI
old_ai = monster.ai
monster.ai = ConfusedMonster(old_ai)
monster.ai.owner = monster #tell the new component who owns it
message('The eyes of the ' + monster.name + ' look vacant, as he starts to stumble around!', libtcod.light_green)
#replace the monster's AI with a "confused" one; after some turns it will restore the old AI
old_ai = monster.ai
monster.ai = ConfusedMonster(old_ai)
monster.ai.owner = monster #tell the new component who owns it
message('The eyes of the ' + monster.name + ' look vacant, as he starts to stumble around!', libtcod.light_green)
def cast_tempest():
#ask the player for a target tile unleash a tempest on
message('Left-click a target tile for the fireball, or right-click to cancel.', libtcod.light_red)
(x, y) = target_tile()
if x is None: return 'cancelled'
message('The wind rises, wiping out every monsters in ' + str(TEMPEST_RADIUS) + ' tiles!', libtcod.light_gray)
for obj in objects: #damage every fighter in range, including the player
if obj.distance(x, y) <= TEMPEST_RADIUS and obj.fighter:
message('The ' + obj.name + ' got swong by the wind for ' + str(TEMPEST_DAMAGE) + ' hit points.', libtcod.orange)
obj.fighter.take_damage(TEMPEST_DAMAGE)
if obj.name == 'fire demon':
message('The ' + obj.name + ' flame got tamed by the wind for ' + str(TEMPEST_DAMAGE) + ' hit points.', libtcod.orange)
obj.fighter.take_damage(4*TEMPEST_DAMAGE)
if obj.name == 'water spirit':
message('The ' + obj.name + ' water skin got stirred for ' + str(TEMPEST_DAMAGE) + ' hit points.', libtcod.orange)
obj.fighter.take_damage(2*TEMPEST_DAMAGE)
def cast_frostbite():
#ask the player for | |
<gh_stars>1-10
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import numbers
import random
from causality.dseparation import AbstractGroundGraph
from causality.model.Model import Model
from causality.model.RelationalDependency import RelationalVariable, RelationalDependency
from causality.model.Schema import Schema
from causality.modelspace import RelationalSpace
# An Improved RCD-Light algorithm
# based on "On Learning Causal Models from Relational Data (In Proc. of AAAI-2016)"
# <NAME> & <NAME>
#
# This algorithm is compatible with Relational Causal Discovery (RCD) from
# "A Sound and Complete Algorithm for Learning Causal Models from Relational Data" (In Proc. of UAI-2013)
#
class RCDLight(object):
def __init__(self, schema, ci_tester, hop_threshold):
if not isinstance(hop_threshold, numbers.Integral) or hop_threshold < 0:
raise Exception("Hop threshold must be a non-negative integer: found {}".format(hop_threshold))
self._schema = schema
self._ci_tester = ci_tester
self._hop_threshold = hop_threshold
self._ci_cache = dict()
self._sepsets = dict()
self._causes = None
self.undirectedDependencies = None
self.orientedDependencies = None
self.ciRecord = collections.defaultdict(lambda: 0)
def identifyUndirectedDependencies(self):
'''
This is for the Phase I of RCD-Light.
'''
potential_deps = RelationalSpace.getRelationalDependencies(self._schema, self._hop_threshold)
keyfunc = lambda dep: dep.relVar2
self._causes = {effect: set(cause.relVar1 for cause in causes)
for effect, causes in
itertools.groupby(sorted(potential_deps, key=keyfunc), key=keyfunc)}
to_be_tested = set(potential_deps)
for d in itertools.count():
for dep in list(to_be_tested): # remove-safe loop
if dep not in to_be_tested:
continue
cause, effect = dep.relVar1, dep.relVar2
sepset, tested = self._find_sepset_with_size(cause, effect, d, 'Phase I')
if not tested:
to_be_tested.remove(dep)
if sepset is not None:
dep_reversed = dep.reverse()
to_be_tested -= {dep, dep_reversed}
self._causes[dep.relVar2].remove(dep.relVar1)
self._causes[dep_reversed.relVar2].remove(dep_reversed.relVar1)
if not to_be_tested:
break
self.undirectedDependencies = {RelationalDependency(c, e) for e, cs in self._causes.items() for c in cs}
return set(self.undirectedDependencies)
def _enumerate_RUTs(self):
'''
This enumerates all representative unshielded triples.
'''
def two_dependencies():
for d_yx in self.undirectedDependencies:
for d_zy in self.undirectedDependencies:
if d_zy.relVar2.attrName == d_yx.relVar1.attrName:
yield d_yx, d_zy
for d_yx, d_zy in two_dependencies():
Vx = d_yx.relVar2 # this is a canonical relational variable
Qy, Rz = d_yx.relVar1, d_zy.relVar1
for QR in AbstractGroundGraph.extendPath(self._schema, Qy.path, Rz.path):
QRz = RelationalVariable(QR, Rz.attrName)
if QRz != Vx and QRz not in self._causes[Vx]:
yield QRz, Qy, Vx
def orientDependencies(self, background_knowledge=None):
'''
This is Phase II of RCD-Light.
This orients dependencies based on both
(i) CI-based orientation and;
(ii) constraints-based orientation.
'''
assert self.undirectedDependencies is not None
# initialize attribute class level non-colliders
non_colliders = set()
# initialize class dependency graph
cdg = PDAG((c.attrName, e.attrName) for e, cs in self._causes.items() for c in cs)
ancestrals = Ancestral(cdg.vertices())
if background_knowledge is not None:
cdg.orients(background_knowledge)
RCDLight._apply_rules(cdg, non_colliders, ancestrals)
# enumerate all representative unshielded triples
ruts = list(set(self._enumerate_RUTs()))
random.shuffle(ruts)
# take advantage of cached CIs
ruts.sort(key=lambda ut: frozenset({ut[0], ut[2]}) in self._sepsets,
reverse=True)
for rv1, rv2, crv3 in ruts:
z, y, x = rv1.attrName, rv2.attrName, crv3.attrName
# Check skippable tests
if cdg.is_oriented(z, y) and cdg.is_oriented(x, y): # already oriented
continue
if (y, frozenset({x, z})) in non_colliders: # already non-collider
continue
if z in cdg.de(x): # delegate to its complement UT.
continue
if cdg.is_oriented_as(y, x) or cdg.is_oriented_as(y, z): # an inactive non-collider
continue
sepset = self._find_sepset(rv1, crv3, 'Phase II')
if sepset is not None:
if rv2 not in sepset: # collider
cdg.orients(((z, y), (x, y)))
elif x == z: # non-collider, RBO
cdg.orient(y, x)
else:
non_colliders.add((y, frozenset({x, z})))
else:
# The original version of RCD-Light orients (or add) an edge as x-->z, and
# takes advantage of Rule 2.
# The improved version explicitly represents ancestral relationships, and can
# orient more edges.
cdg.orient(x, z) if cdg.is_adj(x, z) else ancestrals.add(x, z)
RCDLight._apply_rules(cdg, non_colliders, ancestrals)
#
self._reflect_orientations(cdg)
self._update_oriented_dependencies()
return set(self.orientedDependencies)
def _reflect_orientations(self, cdg):
for effect, causes in self._causes.items():
for cause in list(causes):
if cdg.is_oriented_as(effect.attrName, cause.attrName):
causes.remove(cause)
def _update_oriented_dependencies(self):
self.orientedDependencies = set()
for effect, causes in self._causes.items():
for cause in causes:
dep = RelationalDependency(cause, effect)
rev = dep.reverse()
if rev.relVar1 not in self._causes[rev.relVar2]:
self.orientedDependencies.add(dep)
@staticmethod
def _apply_rules(pdag, non_colliders, ancestral):
'''
Orients unoriented edges in a PDAG given an explicit, but may not complete, list of non-colliders and
an additional ancestral relationship among vertices.
'''
# colliders are not all oriented.
# non-colliders are imperfect.
# ancestral relationships are imperfect.
changed = True
while changed:
changed = False
changed |= MeekRules.rule_2(pdag)
for y, (x, z) in non_colliders:
changed |= MeekRules.rule_1(pdag, x, y, z)
changed |= MeekRules.rule_3(pdag, x, y, z)
changed |= MeekRules.rule_4(pdag, x, y, z)
if (x, z) in ancestral:
changed |= pdag.orient(y, z)
elif (z, x) in ancestral:
changed |= pdag.orient(y, x)
def _find_sepset_with_size(self, rv1, rv2, size, record='unknown'):
assert len(rv2.path) == 1
is_ci = self._ci_tester.isConditionallyIndependent
neighbors = set(self._causes[rv2]) - {rv1}
if size > len(neighbors):
return None, False
for condition in itertools.combinations(neighbors, size):
ci_key = (rv1, rv2, tuple(sorted(list(condition))))
if ci_key not in self._ci_cache:
self.ciRecord[record] += 1
self.ciRecord['total'] += 1
self._ci_cache[ci_key] = is_ci(rv1, rv2, condition)
if self._ci_cache[ci_key]:
self._sepsets[frozenset({rv1, rv2})] = set(condition)
return set(condition), True
return None, True
def _find_sepset(self, rv1, rv2, record='unknown'):
assert len(rv2.path) == 1
key = frozenset({rv1, rv2})
if key in self._sepsets:
return self._sepsets[key]
for d in itertools.count():
sepset, tested = self._find_sepset_with_size(rv1, rv2, d, record)
if sepset is not None:
self._sepsets[key] = sepset
return sepset
if not tested:
return None
class Ancestral:
'''
Record ancestral relationships (or equivalently, partially ordered)
'''
def __init__(self, vs):
self.vs = set(vs)
self.ans = collections.defaultdict(set)
self.des = collections.defaultdict(set)
def related(self, x, y):
'''
check either two vertices are partially ordered
:param x:
:param y:
:return:
'''
assert x != y
return y in self.ans[x] or y in self.des[x]
def adds(self, ancs):
for anc, x in ancs:
self.add(anc, x)
def add(self, ancestor, x):
assert ancestor != x
assert x not in self.ans[ancestor]
if ancestor in self.ans[x]:
return
dedes = self.des[x]
anans = self.ans[ancestor]
for dede in dedes:
self.ans[dede] |= anans
self.ans[x] |= anans
for anan in anans:
self.des[anan] |= dedes
self.des[ancestor] |= dedes
def __contains__(self, item):
x, y = item # x-...->y
return x in self.ans[y]
class PDAG:
'''
A Partially Directed Acyclic Graph.
'''
def __init__(self, edges=None):
self.E = set()
self._Pa = collections.defaultdict(set)
self._Ch = collections.defaultdict(set)
if edges is not None:
self.add_edges(edges)
def vertices(self):
return set(self._Pa.keys()) | set(self._Ch.keys())
def __contains__(self, item):
return item in self.E
# Ancestors
def an(self, x, at=None):
if at is None:
at = set()
for p in self.pa(x):
if p not in at:
at.add(p)
self.an(p, at)
return at
# Descendants
def de(self, x, at=None):
if at is None:
at = set()
for p in self.ch(x):
if p not in at:
at.add(p)
self.de(p, at)
return at
# get all oriented edges
def oriented(self):
ors = set()
for x, y in self.E:
if (y, x) not in self.E:
ors.add((x, y))
return ors
def unoriented(self):
uors = set()
for x, y in self.E:
if (y, x) in self.E:
uors.add(frozenset({x, y}))
return uors
# remove a vertex
def remove_vertex(self, v):
for x, y in list(self.E):
if x == v or y == v:
self.E.remove((x, y))
self._Pa.pop(v, None)
self._Ch.pop(v, None)
for k, values in self._Pa.items():
if v in values:
values.remove(v)
for k, values in self._Ch.items():
if v in values:
values.remove(v)
def copy(self):
new_copy = PDAG()
new_copy.E = set(self.E)
new_copy._Pa = collections.defaultdict(set)
new_copy._Ch = collections.defaultdict(set)
for k, vs in self._Pa.items():
new_copy._Pa[k] = set(vs)
for k, vs in self._Ch.items():
new_copy._Ch[k] = set(vs)
return new_copy
# Adjacent
def is_adj(self, x, y):
return (x, y) in self.E or (y, x) in self.E
def add_edges(self, xys):
for x, y in xys:
self.add_edge(x, y)
def add_edge(self, x, y):
'''
if y-->x exists, adding x-->y makes x -- y.
:param x:
:param y:
:return:
'''
assert x != y
self.E.add((x, y))
self._Pa[y].add(x)
self._Ch[x].add(y)
def add_undirected_edge(self, x, y):
# will override any existing directed edge
assert x != y
self.add_edge(x, y)
self.add_edge(y, x)
def orients(self, xys):
return any([self.orient(x, y) for x, y in xys])
def | |
<filename>source/MulensModel/magnificationcurve.py
import numpy as np
import math
import warnings
from MulensModel.trajectory import Trajectory
from MulensModel.pointlens import PointLens, get_pspl_magnification
from MulensModel.binarylens import BinaryLens
from MulensModel.modelparameters import ModelParameters
class MagnificationCurve(object):
"""
The magnification curve calculated from the model light curve.
The key function is :py:func:`set_magnification_methods`, which
specifies the method used to calculate the finite source
magnification and when to use it..
Arguments :
times: iterable of *floats*
the times at which to generate the magnification curve
parameters: :py:class:`~MulensModel.modelparameters.ModelParameters`
specifies the microlensing parameters
parallax: *dict*, optional
dictionary specifying what parallax effects should be
used, e.g., ``{'earth_orbital': True, 'satellite': False,
'topocentric': False}``
coords: :py:class:`~MulensModel.coordinates.Coordinates`, optional
sky coordinates of the event
satellite_skycoord: *Astropy.coordinates.SkyCoord*, optional
sky coordinates of the satellite specified by the
ephemerides file. See
:py:obj:`MulensModel.mulensdata.MulensData.satellite_skycoord`.
gamma: *float*, optional
limb darkening coefficient in gamma convention; defaults to 0
Attributes :
trajectory: :py:class:`~MulensModel.Trajectory.trajectory`
Trajectory used to calculate positions of
the source that are used to calculate magnification values.
"""
def __init__(self, times, parameters, parallax=None,
coords=None, satellite_skycoord=None, gamma=0.):
# Set times
if isinstance(times, (list, tuple, np.ndarray)):
self.times = times
else:
self.times = np.array(times)
# Check for ModelParameters and set.
if isinstance(parameters, ModelParameters):
self.parameters = parameters
else:
raise ValueError(
'parameters is a required keyword and must be a ' +
'ModelParameters object')
# Calculate the source trajectory (i.e. u(t))
self.trajectory = Trajectory(
self.times, parameters=parameters, parallax=parallax,
coords=coords, satellite_skycoord=satellite_skycoord)
# Initialize the magnification vector
self._magnification = None
# Set methods' variables:
self._methods_epochs = None
self._methods_names = []
self._default_method = None
self._methods_parameters = None
self._gamma = gamma
def set_magnification_methods(self, methods, default_method):
"""
Sets methods used for magnification calculation.
For available methods, see:
:py:func:`get_point_lens_magnification`
and
:py:func:`get_binary_lens_magnification`
Parameters :
methods: *list*
List that specifies which methods (*str*) should be
used when (*float* values for Julian dates). Given
method will be used for times between the times
between which it is on the list, e.g.,
.. code-block:: python
methods = [
2455746., 'Quadrupole', 2455746.6, 'Hexadecapole',
2455746.7, 'VBBL', 2455747., 'Hexadecapole',
2455747.15, 'Quadrupole', 2455748.]
default_method: *str*
Name of the method to be used for epochs outside the ranges
specified in *methods*.
"""
self._default_method = default_method
if methods is None:
self._methods_epochs = None
self._methods_names = []
return
if not isinstance(methods, list):
msg = ('MagnificationCurve.set_magnification_methods() ' +
'requires a list as a parameter')
raise TypeError(msg)
epochs = methods[0::2]
names = methods[1::2]
for epoch in epochs:
if not isinstance(epoch, (float, int)):
raise TypeError('Wrong epoch: {:}'.format(epoch))
for method in names:
if not isinstance(method, str):
raise TypeError('Wrong method: {:}'.format(method))
for (e_beg, e_end) in zip(epochs[::2], epochs[1::2]):
if e_beg >= e_end:
msg = ('Incorrect epochs provided: {:} and {:} (first should' +
' be earlier)')
raise ValueError(msg.format(e_beg, e_end))
self._methods_epochs = np.array(epochs)
self._methods_names = names
def set_magnification_methods_parameters(self, methods_parameters):
"""
Set additional parameters for magnification calculation methods.
Parameters :
methods_parameters: *dict*
Dictionary that for method names (keys) returns dictionary
in the form of ``**kwargs`` that are passed to given method,
e.g., ``{'VBBL': {'accuracy': 0.005}}``.
"""
self._methods_parameters = methods_parameters
def get_magnification(self):
"""
Calculate magnification.
Returns :
magnification: *np.ndarray*
Vector of magnifications.
"""
if self.parameters.rho is not None:
self._check_for_finite_source_method()
if self.parameters.n_lenses == 1:
magnification = self.get_point_lens_magnification()
elif self.parameters.n_lenses == 2:
magnification = self.get_binary_lens_magnification()
else:
raise NotImplementedError(
"magnification for more than 2 lenses not handled yet")
self._magnification = magnification
return self._magnification
def _check_for_finite_source_method(self):
"""
check if there is method defined that uses finite source
calculations and warn if not
"""
methods = self._methods_names + [self._default_method]
set_ = set(['point_source', 'point_source_point_lens'])
if len(set(methods)-set_) == 0:
warnings.warn('no finite-source method is set', UserWarning)
return
def get_point_lens_magnification(self):
"""
Calculate the Point Lens magnification.
Allowed magnification methods :
``point_source``:
standard Paczynski equation for a point source/point lens.
``finite_source_uniform_Gould94``:
Uses the `Gould 1994 ApJ, 421L, 71
<https://ui.adsabs.harvard.edu/abs/1994ApJ...421L..71G/abstract>`_
prescription assuming a
*uniform* (and circular) source. This method interpolates
pre-computed tables. The relative interpolation
errors are smaller than 10^-4.
``finite_source_uniform_Gould94_direct``:
Same as ``finite_source_uniform_Gould94``, but calculates
the underlying functions directly
(i.e., without interpolation).
``finite_source_uniform_WittMao94``:
Uses the `Witt and Mao 1994 ApJ, 430, 505
<https://ui.adsabs.harvard.edu/abs/1994ApJ...430..505W/abstract>`_
method assuming a *uniform* (and circular) source. This method
interpolates pre-computed tables. The relative interpolation
errors are smaller than 10^-4.
``finite_source_LD_WittMao94``:
Uses the `Witt and Mao 1994 ApJ, 430, 505`_ method and
integrates multiple annuli to obtain magnification for
a circular source *including limb-darkening*. For description
of integration of multiple annuli see, e.g.,
`Bozza et al. 2018 MNRAS, 479, 5157
<https://ui.adsabs.harvard.edu/abs/2018MNRAS.479.5157B/abstract>`_.
This method interpolates pre-computed tables. The relative
interpolation errors are smaller than 10^-4.
``finite_source_LD_Yoo04``:
Uses the `Yoo et al. 2004 ApJ, 603, 139
<https://ui.adsabs.harvard.edu/abs/2004ApJ...603..139Y/abstract>`_
prescription for
a circular source *including limb-darkening*. This method
interpolates pre-computed tables. The relative interpolation
errors are smaller than 10^-4.
``finite_source_LD_Yoo04_direct``:
Same as ``finite_source_LD_Yoo04``, but calculates
the underlying functions directly
(i.e., without interpolation), hence can be slow.
``finite_source_uniform_Lee09``:
Uses the `Lee et al. 2009 ApJ, 695, 200
<https://ui.adsabs.harvard.edu/abs/2009ApJ...695..200L/abstract>`_
method for a circular and *uniform* source. This method
works well for large sources (rho ~ 1).
``finite_source_LD_Lee09``:
Uses the `Lee et al. 2009 ApJ, 695, 200`_ method for
a circular source *including limb-darkening*. This method
works well for large sources (rho ~ 1) but can be slow
compared to other methods.
Returns :
magnification: *np.ndarray*
Vector of magnifications.
"""
pspl_magnification = get_pspl_magnification(self.trajectory)
if self._methods_epochs is None:
return pspl_magnification
point_lens = PointLens(self.parameters)
magnification = pspl_magnification
u2 = self.trajectory.x**2 + self.trajectory.y**2
u_all = np.sqrt(u2)
methods = np.array(self._methods_for_epochs())
for method in set(methods):
kwargs = {}
if self._methods_parameters is not None:
if method in self._methods_parameters.keys():
kwargs = self._methods_parameters[method]
if kwargs != {}:
raise ValueError(
'Methods parameters passed, but currently ' +
'no point lens method accepts the parameters')
selection = (methods == method)
if method.lower() == 'point_source':
pass # These cases are already taken care of.
elif method.lower() == 'finite_source_uniform_Gould94'.lower():
magnification[selection] = (
point_lens.get_point_lens_finite_source_magnification(
u=u_all[selection],
pspl_magnification=pspl_magnification[selection]))
elif (method.lower() ==
'finite_source_uniform_Gould94_direct'.lower()):
magnification[selection] = (
point_lens.get_point_lens_finite_source_magnification(
u=u_all[selection],
pspl_magnification=pspl_magnification[selection],
direct=True))
elif method.lower() == 'finite_source_uniform_WittMao94'.lower():
pl = point_lens
magnification[selection] = (
pl.get_point_lens_large_finite_source_magnification(
u=u_all[selection]))
elif method.lower() == 'finite_source_LD_WittMao94'.lower():
pl = point_lens
magnification[selection] = (
pl.get_point_lens_large_LD_integrated_magnification(
u=u_all[selection], gamma=self._gamma))
elif method.lower() == 'finite_source_LD_Yoo04'.lower():
magnification[selection] = (
point_lens.get_point_lens_limb_darkening_magnification(
u=u_all[selection],
pspl_magnification=pspl_magnification[selection],
gamma=self._gamma))
elif method.lower() == 'finite_source_LD_Yoo04_direct'.lower():
magnification[selection] = (
point_lens.get_point_lens_limb_darkening_magnification(
u=u_all[selection],
pspl_magnification=pspl_magnification[selection],
gamma=self._gamma,
direct=True))
elif method.lower() == 'finite_source_uniform_Lee09'.lower():
magnification[selection] = (
point_lens.get_point_lens_uniform_integrated_magnification(
u=u_all[selection],
rho=self.parameters.rho))
elif method.lower() == 'finite_source_LD_Lee09'.lower():
magnification[selection] = (
point_lens.get_point_lens_LD_integrated_magnification(
u=u_all[selection],
rho=self.parameters.rho,
gamma=self._gamma))
else:
msg = 'Unknown method specified for single lens: {:}'
raise ValueError(msg.format(method))
return magnification
def get_binary_lens_magnification(self):
"""
Calculate the binary lens magnification.
Allowed magnification methods :
``point_source``:
standard point source magnification calculation.
``quadrupole``:
From `Gould 2008 ApJ, 681, 1593
<https://ui.adsabs.harvard.edu/abs/2008ApJ...681.1593G/abstract>`_.
See
:py:func:`~MulensModel.binarylens.BinaryLens.hexadecapole_magnification()`
``hexadecapole``:
From `Gould 2008 ApJ, 681, 1593`_ See
:py:func:`~MulensModel.binarylens.BinaryLens.hexadecapole_magnification()`
``VBBL``:
Uses VBBinaryLensing (a Stokes theorem/contour
integration code) by <NAME>
(`Bozza 2010 MNRAS, 408, 2188
<https://ui.adsabs.harvard.edu/abs/2010MNRAS.408.2188B/abstract>`_).
See
:py:func:`~MulensModel.binarylens.BinaryLens.vbbl_magnification()`
``Adaptive_Contouring``:
Uses AdaptiveContouring (a Stokes theorem/contour
integration code) by <NAME>
(`Dominik 2007 MNRAS, 377, 1679
<https://ui.adsabs.harvard.edu/abs/2007MNRAS.377.1679D/abstract>`_).
See
:py:func:`~MulensModel.binarylens.BinaryLens.adaptive_contouring_magnification()`
``point_source_point_lens``:
Uses point-source _point_-_lens_ approximation; useful when you
consider binary lens but need magnification very far from
the lens (e.g. at separation u = 100).
Returns :
magnification: *np.ndarray*
Vector of magnifications.
"""
# Set up the binary lens system
q = self.parameters.q
m_1 = 1. / (1. + q)
m_2 = q / (1. + q)
is_static = self.parameters.is_static()
if is_static:
binary_lens = BinaryLens(
mass_1=m_1, mass_2=m_2, separation=self.parameters.s)
methods = self._methods_for_epochs()
# Calculate the magnification
magnification = []
for index in range(len(self.times)):
x = self.trajectory.x[index]
y = self.trajectory.y[index]
method = methods[index].lower()
if not is_static:
binary_lens = BinaryLens(
mass_1=m_1, mass_2=m_2,
separation=self.parameters.get_s(self.times[index]))
kwargs = {}
if self._methods_parameters is not None:
if method in self._methods_parameters.keys():
kwargs = self._methods_parameters[method]
if method not in ['vbbl', 'adaptive_contouring']:
msg = ('Methods parameters passed for method {:}' +
' which does not accept any parameters')
raise ValueError(msg.format(method))
if method == 'point_source':
try:
m = binary_lens.point_source_magnification(x, y)
except Exception as e:
text = "Model parameters for above exception:\n"
text += str(self.parameters)
raise ValueError(text) from e
# The code above is based on
# https://stackoverflow.com/questions/6062576/
# adding-information-to-an-exception/6062799
elif method == 'quadrupole':
m = binary_lens.hexadecapole_magnification(
x, y, rho=self.parameters.rho, quadrupole=True,
gamma=self._gamma)
elif method == 'hexadecapole':
m = binary_lens.hexadecapole_magnification(
x, y, rho=self.parameters.rho, | |
max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
return p
def func_32f92da5b8064374bbff3ae1a95a9ccc(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
return queue
def func_9b1e6967e6ab42848f1d3ed27cd1d047(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
return cc
def func_5ced77ee2f2a43a78a7907b12c0d5ae7(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
return partial
def func_6a3b7b272d6c4056b67e8a9055559007(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
return next_larger
def func_76573fbebf97469498b38fb7d8c4f128(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
return budget
def func_35821b1e41b24d3ebfa0142a6ddf5e9f(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for | |
<filename>manabi/apps/flashcards/models/cardmanager.py
from datetime import datetime, timedelta
from itertools import chain
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import Min, Count, Q
from manabi.apps.flashcards.models.constants import GRADE_NONE, MATURE_INTERVAL_MIN
from manabi.apps.flashcards.models.burying import with_siblings_buried
class SchedulerMixin:
'''
Methods for retrieving the next cards that are ready to be reviewed.
'''
def _next_failed_due_cards(
self, initial_query, count, review_time, buried_facts,
**kwargs
):
if not count:
return []
cards = initial_query.failed().due(
review_time=review_time,
order_by='due_at',
)
return cards[:count]
def _next_not_failed_due_cards(
self,
initial_query,
count,
review_time,
buried_facts,
**kwargs
):
'''
Returns the first [count] cards from initial_query which are due,
weren't failed the last review, and taking spacing of cards from
the same fact into account.
Disregards fact burial status.
review_time should be datetime.utcnow()
'''
if not count:
return []
cards = initial_query.exclude(
last_review_grade=GRADE_NONE,
).due(review_time=review_time, order_by='-interval')
#TODO-OLD Also get cards that aren't quite due yet, but will be soon,
# and depending on their maturity
# (i.e. only mature cards due soon).
# Figure out some kind of way to prioritize these too.
return cards[:count]
def _next_failed_not_due_cards(
self, initial_query, count, review_time, buried_facts,
**kwargs
):
'''
Disregards fact burial status.
'''
if not count:
return []
#TODO-OLD prioritize certain failed cards, not just by due date
# We'll show failed cards even if they've been reviewed recently.
# This is because failed cards are set to be shown 'soon' and not
# just in 10 minutes. Special rules.
#TODO-OLD we shouldn't show mature failed cards so soon though!
#TODO-OLD randomize the order (once we fix the Undo)
cards = initial_query.failed().not_due(review_time=review_time)
# FIXME: This is excluding the card itself. Scenario: one card in the fact; fail this card; now this card doesn't show up here due to being counted as a sibling of itself having been reviewed recently! HMMMM....
# Maybe to fix this, add a failed-and-buried card func? I think that works.
# For now, I'm going to ignore buried facts here instead.
cards = with_siblings_buried(cards, 'due_at')
return cards[:count]
def _next_new_cards(
self,
initial_query,
count,
review_time,
buried_facts,
include_new_buried_siblings=False,
learn_more=False, # DEPRECATED?
new_cards_limit=None,
**kwargs
):
from manabi.apps.flashcards.models.cards import CARD_TEMPLATE_CHOICES
from manabi.apps.flashcards.models.facts import Fact
count = min(count, new_cards_limit)
if not count:
return []
cards = initial_query.filter(due_at__isnull=True)
cards = cards.exclude(fact__in=buried_facts)
cards = with_siblings_buried(cards, 'new_card_ordinal')
cards = list(cards[:count])
# Add spaced cards if in early review/learn more mode and we haven't
# supplied enough.
if (include_new_buried_siblings or learn_more) and len(cards) < count:
buried_cards = initial_query.filter(due_at__isnull=True)
buried_cards = buried_cards.exclude(pk__in=cards)
buried_cards = buried_cards.order_by('new_card_ordinal')
cards.extend(list(buried_cards[:count - len(cards)]))
return cards
def _next_due_soon_cards(
self,
initial_query,
count,
review_time,
buried_facts,
early_review_began_at=None,
**kwargs
):
'''
Used for early review. Ordered by due date.
'''
if not count:
return []
cards = initial_query.exclude(last_review_grade=GRADE_NONE)
cards = cards.not_due(review_time=review_time)
if early_review_began_at is not None:
cards = cards.exclude(last_reviewed_at__gte=early_review_began_at)
priority_cutoff = review_time - timedelta(minutes=60)
staler_cards = cards.filter(last_reviewed_at__gt=priority_cutoff)
staler_cards = staler_cards.exclude(fact__in=buried_facts)
staler_cards = staler_cards.order_by('due_at')
return staler_cards[:count]
def _next_due_soon_cards2(
self,
initial_query,
count,
review_time,
buried_facts,
early_review_began_at=None,
**kwargs
):
'''
Due soon, not yet, but next in the future.
'''
if not count:
return []
cards = initial_query.exclude(last_review_grade=GRADE_NONE)
cards = cards.filter(due_at__gt=review_time)
if early_review_began_at is not None:
cards = cards.exclude(last_reviewed_at__gte=early_review_began_at)
priority_cutoff = review_time - timedelta(minutes=60)
fresher_cards = cards.filter(
last_reviewed_at__isnull=False,
last_reviewed_at__lte=priority_cutoff)
fresher_cards = fresher_cards.exclude(fact__in=buried_facts)
fresher_cards = fresher_cards.order_by('due_at')
return fresher_cards[:count]
def _next_buried_cards(
self,
initial_query,
count,
review_time,
buried_facts,
early_review_began_at=None,
**kwargs
):
'''
Cards buried due to sibling review.
'''
if not count:
return []
cards = initial_query.filter(fact__in=buried_facts)
if early_review_began_at is not None:
cards = cards.exclude(last_reviewed_at__gte=early_review_began_at)
return cards[:count]
def _next_card_funcs(
self,
early_review=False,
learn_more=False,
):
if early_review and learn_more:
raise ValueError("Cannot set both early_review and learn_more together.")
card_funcs = [
self._next_failed_due_cards, # due, failed
self._next_not_failed_due_cards, # due, not failed
self._next_failed_not_due_cards, # failed, not due
]
if early_review:
card_funcs.extend([
self._next_due_soon_cards,
self._next_due_soon_cards2, # Due soon, not yet, but next in the future.
self._next_buried_cards,
])
elif learn_more:
# TODO: Only new cards, and ignore spacing.
# Unless we don't need learn_more anymore...
card_funcs = [self._next_new_cards]
else:
card_funcs.extend([self._next_new_cards]) # New cards at end.
return card_funcs
def next_cards(
self,
user,
count,
deck=None,
new_cards_limit=None,
excluded_ids=[],
early_review=False,
early_review_began_at=None,
include_new_buried_siblings=False,
learn_more=False,
is_for_manabi_reader=False,
jmdict_ids=None,
words_without_jmdict_ids=None,
):
'''
Returns `count` cards to be reviewed, in order.
count should not be any more than a short session of cards
set `early_review` to True for reviewing cards early
(following any due cards)
`early_review_began_at` is used to avoid early review of the same
cards twice in the same review session.
`include_new_buried_siblings` is used to allow learning
cards whose siblings have already been learned recently.
DEPRECATED:
If learn_more is True, only new cards will be chosen,
even if they were spaced due to sibling reviews.
"Due soon" cards won't be chosen in this case,
contrary to early_review's normal behavior.
(#TODO-OLD consider changing this to have a separate option)
`new_cards_limit` is an integer.
'''
from manabi.apps.flashcards.models.facts import Fact
#TODO-OLD somehow spread some new cards into the early review
# cards if early_review==True
#TODO-OLD use args instead, like *kwargs etc for these funcs
now = datetime.utcnow()
card_funcs = self._next_card_funcs(
early_review=early_review,
learn_more=learn_more,
)
user_cards = (
self.common_filters(
user,
deck=deck,
excluded_ids=excluded_ids,
).manabi_reader_filters(
jmdict_ids=jmdict_ids,
words_without_jmdict_ids=words_without_jmdict_ids,
).select_related('fact')
)
buried_facts = Fact.objects.buried(
user, review_time=now, excluded_card_ids=excluded_ids)
if deck is not None:
buried_facts = buried_facts.deck_facts(deck)
cards_left = count
card_queries = []
for card_func in card_funcs:
if not cards_left:
break
cards = card_func(
user_cards,
cards_left,
now,
early_review=early_review,
early_review_began_at=early_review_began_at,
include_new_buried_siblings=include_new_buried_siblings,
learn_more=learn_more,
buried_facts=buried_facts,
new_cards_limit=new_cards_limit,
jmdict_ids=jmdict_ids,
words_without_jmdict_ids=words_without_jmdict_ids,
)
cards_left -= len(cards)
if len(cards):
card_queries.append(cards)
#FIXME add new cards into the mix when there's a defined
# new card per day limit
#for now, we'll add new ones to the end
return chain(*card_queries)
class SchedulerFiltersMixin:
def failed(self):
return self.filter(last_review_grade=GRADE_NONE)
def excluding_failed(self):
return self.exclude(last_review_grade=GRADE_NONE)
def young(self):
return self.filter(
last_reviewed_at__isnull=False,
interval__isnull=False,
interval__lt=MATURE_INTERVAL_MIN,
)
def mature(self):
return self.filter(interval__gte=MATURE_INTERVAL_MIN)
def due(self, review_time=None, order_by=None):
# TODO: Include buried facts by default.
due_cards = self.filter(
due_at__isnull=False,
due_at__lte=(review_time or datetime.utcnow()),
)
return with_siblings_buried(due_cards, order_by=order_by)
def not_due(self, review_time=None):
return self.filter(due_at__gt=(review_time or datetime.utcnow()))
class CommonFiltersMixin:
'''
Provides filters for decks, maturity level, etc.
This is particularly useful with view URLs which take query params for
these things.
'''
def available(self):
''' Cards which are active and unsuspended. '''
return self.filter(active=True).unsuspended()
def of_deck(self, deck):
return self.filter(deck=deck)
def of_user(self, user):
if not user.is_authenticated:
return self.none()
return self.filter(owner=user)
def excluding_ids(self, excluded_ids):
return self.exclude(id__in=excluded_ids)
def unsuspended(self):
return self.filter(suspended=False)
def common_filters(
self,
user,
deck=None,
excluded_ids=None,
):
cards = self.of_user(user).unsuspended().filter(active=True)
if deck:
cards = cards.of_deck(deck)
else:
cards = cards.filter(owner=user).exclude(deck_suspended=True)
if excluded_ids:
cards = cards.excluding_ids(excluded_ids)
return cards
def manabi_reader_filters(
self,
jmdict_ids=None,
words_without_jmdict_ids=None,
):
if (
jmdict_ids is not None
or words_without_jmdict_ids is not None
):
reader_filters = Q()
if jmdict_ids is not None:
reader_filters |= Q(jmdict_id__in=jmdict_ids)
if words_without_jmdict_ids is not None:
reader_filters |= Q(
fact__reading__in=words_without_jmdict_ids)
return self.filter(reader_filters)
return self
def new(self, user):
user_cards = self.available().of_user(user)
return user_cards.filter(last_reviewed_at__isnull=True)
def new_count(self, user, including_buried=True, buried_fact_ids=None):
'''
Use this rather than `new(user).count()` for future-proofing.
'''
from manabi.apps.flashcards.models.facts import Fact
new_cards = self.new(user)
if not including_buried:
new_cards = with_siblings_buried(new_cards)
if buried_fact_ids is None:
buried_fact_ids = Fact.objects.buried(
self.user, excluded_card_ids=self.excluded_card_ids,
).values_list('id', flat=True)
new_cards = new_cards.exclude(fact_id__in=buried_fact_ids)
return new_cards.count()
def approx_new_count(self, user=None, deck=None):
'''
Approximates how many new cards are actually available to review.
Will be between what new_count and unspaced_new_count return,
but much faster than the latter.
'''
cards = self.available()
if deck:
cards = cards.of_deck(deck)
return cards.new(user).values_list('fact_id').distinct().count()
# def unspaced_new_count(self, user):
# '''
# Same as `new_count`, except it subtracts new cards that
# will be delayed due to sibling spacing (cards which haven't
# been spaced.)
# '''
# local_query = self.new(user).available()
# desired_count = 999999 #TODO-OLD use more elegant solution.
# now = datetime.utcnow()
# new_cards = self.new(user)
# return self._next_new_cards(user, local_query, desired_count, now).count()
def count_of_cards_due_tomorrow(self, user):
'''
Returns the number of cards due by tomorrow at the same time
as now. Doesn't take future spacing into account though, so it's
a somewhat rough estimate.
No longer includes new cards in its count.
'''
#from manabi.apps.flashcards.models.facts import Fact
#cards = self.of_user(user)
#if deck:
# cards = cards.filter(fact__deck=deck)
#if tags:
# facts = usertagging.models.UserTaggedItem.objects.get_by_model(
# Fact, tags)
# cards = cards.filter(fact__in=facts)
this_time_tomorrow = datetime.utcnow() + timedelta(days=1)
cards = self.filter(
| |
import urllib,urllib2,re,xbmcplugin,xbmcgui,urlresolver,xbmc,xbmcaddon,os
from addon.common.addon import Addon
from addon.common.net import Net
import silent
#www.movie25.so - by The_Silencer 2013 v0.10
addon_id = 'plugin.video.moviestwentyfive'
local = xbmcaddon.Addon(id=addon_id)
movie25path = local.getAddonInfo('path')
addon = Addon(addon_id, sys.argv)
datapath = addon.get_profile()
art = movie25path+'/art'
net = Net()
custurl = local.getSetting('custurl')
custurl1 = custurl+'/'
custurltv = local.getSetting('custurltv')
#Kid Mode Menu
def KIDMODE():
addDir('Parent View',custurl1,46,os.path.join(art,'PARENT_VIEW.png'),None,None)
addDir('Kid Movies',custurl1,44,os.path.join(art,'KIDS_MOVIES.png'),None,None)
addDir('Kid TV Shows',custurl1,45,os.path.join(art,'KIDS_TV.png'),None,None)
def PARENTS():
Password = local.getSetting('parent-password')
keyb = xbmc.Keyboard('', 'Please Enter the Parent Password')
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
encode=urllib.quote(search)
if encode != Password:
addDir('Sorry wrong password please try again',url,46,'',None,None)
else:
addDir('Movies',custurl1,16,os.path.join(art,os.path.join(art,'MOVIES.png')),None,None)
addDir('TV',custurl1,17,os.path.join(art,os.path.join(art,'TV_SHOWS.png')),None,None)
addDir('Settings',custurl1,18,os.path.join(art,os.path.join(art,'SETTINGS.png')),None,None)
def KIDMOVIES():
MYFAVS = silent.getFavoritesKIDMOVIE()
try:
for name,url,year in MYFAVS:
addFAVDir(name,url,year)
except:
pass
def KIDTV():
MYFAVS = silent.getFavoritesKIDTV()
try:
for name,url,types in MYFAVS:
addFAVDirTV(name,url,types)
except:
pass
#Urlresolver setttings
def ResolverSettings():
urlresolver.display_settings()
def SETTINGS():
addDir('Add-on Settings',custurl1,42,os.path.join(art,'ADDON.png'),None,None)
addDir('Resolver Settings',custurl1,43,os.path.join(art,'RESOLVER.png'),None,None)
#Main menu
def CATEGORIES():
KidMode = local.getSetting('kid-mode')
if KidMode == 'true':
KIDMODE()
else:
addDir('Movies',custurl1,16,os.path.join(art,'MOVIES.png'),None,None)
addDir('TV',custurl1,17,os.path.join(art,'TV_SHOWS.png'),None,None)
addDir('Settings',custurl1,18,os.path.join(art,'SETTINGS.png'),None,None)
#Menu for Movies
def MOVIES():
MYFAVS = silent.getFavoritesKIDMOVIE2()
addDir('Top 9',custurl1,51,os.path.join(art,'featured1.png'),None,None)
addDir('Featured',custurl1+'featured-movies/',1,os.path.join(art,'featured1.png'),None,None)
addDir('New Releases',custurl1+'movies/new-releases/',1,os.path.join(art,'NEW_RELEASES.png'),None,None)
addDir('Latest Added',custurl1+'/movies/latest-added/',1,os.path.join(art,'LATEST_ADDED.png'),None,None)
addDir('Latest HD',custurl1+'movies/latest-hd-movies/',1,os.path.join(art,'latesthd.png'),None,None)
addDir('Most Viewed',custurl1+'movies/most-viewed/',1,os.path.join(art,'MOST_VIEWED.png'),None,None)
addDir('Most Voted',custurl1+'movies/most-viewed/',1,os.path.join(art,'MOST_VOTED.png'),None,None)
addDir('A-Z',custurl1,5,os.path.join(art,'A_Z.png'),None,None)
addDir('Genres',custurl1,8,os.path.join(art,'GENRE.png'),None,None)
addDir('Year',custurl1,13,os.path.join(art,'year1.png'),None,None)
addDir('Search',custurl1,6,os.path.join(art,'search1.png'),None,None)
addDir('Favorites',custurl1,7,os.path.join(art,'FAVORITE.png'),None,None)
if MYFAVS:
addDir('Kid Movies',custurl1,44,os.path.join(art,'KIDS_MOVIES.png'),None,None)
#List of Years
def YEAR():
addDir('2015',custurl1+'search.php?year=2015',12,'',None,None)
addDir('2014',custurl1+'search.php?year=2014',12,'',None,None)
addDir('2013',custurl1+'search.php?year=2013',12,'',None,None)
addDir('2012',custurl1+'search.php?year=2012',12,'',None,None)
addDir('2011',custurl1+'search.php?year=2011',12,'',None,None)
addDir('2010',custurl1+'search.php?year=2010',12,'',None,None)
addDir('2009',custurl1+'search.php?year=2009',12,'',None,None)
addDir('2008',custurl1+'search.php?year=2008',12,'',None,None)
addDir('2007',custurl1+'search.php?year=2007',12,'',None,None)
addDir('2006',custurl1+'search.php?year=2006',12,'',None,None)
addDir('2005',custurl1+'search.php?year=2005',12,'',None,None)
addDir('2004',custurl1+'search.php?year=2004',12,'',None,None)
addDir('2003',custurl1+'search.php?year=2003',12,'',None,None)
addDir('2002',custurl1+'search.php?year=2002',12,'',None,None)
addDir('2001',custurl1+'search.php?year=2001',12,'',None,None)
addDir('2000',custurl1+'search.php?year=2000',12,'',None,None)
addDir('1999',custurl1+'search.php?year=1999',12,'',None,None)
addDir('1998',custurl1+'search.php?year=1998',12,'',None,None)
addDir('1997',custurl1+'search.php?year=1997',12,'',None,None)
addDir('1996',custurl1+'search.php?year=1996',12,'',None,None)
addDir('1995',custurl1+'search.php?year=1995',12,'',None,None)
addDir('1994',custurl1+'search.php?year=1994',12,'',None,None)
addDir('1993',custurl1+'search.php?year=1993',12,'',None,None)
addDir('1992',custurl1+'search.php?year=1992',12,'',None,None)
addDir('1991',custurl1+'search.php?year=1991',12,'',None,None)
addDir('1990',custurl1+'search.php?year=1990',12,'',None,None)
addDir('1989',custurl1+'search.php?year=1989',12,'',None,None)
addDir('1988',custurl1+'search.php?year=1988',12,'',None,None)
addDir('1987',custurl1+'search.php?year=1987',12,'',None,None)
addDir('1986',custurl1+'search.php?year=1986',12,'',None,None)
addDir('1985',custurl1+'search.php?year=1985',12,'',None,None)
addDir('1984',custurl1+'search.php?year=1984',12,'',None,None)
addDir('1983',custurl1+'search.php?year=1983',12,'',None,None)
addDir('1982',custurl1+'search.php?year=1982',12,'',None,None)
addDir('1981',custurl1+'search.php?year=1981',12,'',None,None)
addDir('1980',custurl1+'search.php?year=1980',12,'',None,None)
addDir('1979',custurl1+'search.php?year=1979',12,'',None,None)
addDir('1978',custurl1+'search.php?year=1978',12,'',None,None)
addDir('1977',custurl1+'search.php?year=1977',12,'',None,None)
addDir('1976',custurl1+'search.php?year=1976',12,'',None,None)
addDir('1975',custurl1+'search.php?year=1975',12,'',None,None)
addDir('1974',custurl1+'search.php?year=1974',12,'',None,None)
addDir('1973',custurl1+'search.php?year=1973',12,'',None,None)
addDir('1972',custurl1+'search.php?year=1972',12,'',None,None)
addDir('1971',custurl1+'search.php?year=1971',12,'',None,None)
addDir('1970',custurl1+'search.php?year=1970',12,'',None,None)
########
#TV Menu
########
def TV():
MYFAVS = silent.getFavoritesKIDTV2()
addDirTV('Most Popular',custurltv+'/new',36,os.path.join(art,'MOST_POPULAR.png'),None)
addDirTV('Newest Episodes',custurltv+'/latest',36,os.path.join(art,'NEW_EPISODES.png'),None)
addDirTV('A-Z',custurltv+'/letters/09',33,os.path.join(art,'A_Z.png'),None)
addDirTV('Genres',custurltv+'/genres/drama',35,os.path.join(art,'GENRE.png'),None)
addDirTV('TV Schedule',custurltv+'/tvschedule/-1',38,os.path.join(art,'TV_SCHEDULE.png'),None)
addDirTV('Search',custurltv,37,os.path.join(art,'search1.png'),None)
addDirTV('Favorites',custurltv,39,os.path.join(art,'FAVORITE.png'),None)
if MYFAVS:
addDir('Kid TV Shows',custurl1+'',45,os.path.join(art,'KIDS_TV.png'),None,None)
#Popular TV menu
def POPULAR():
addDirTV('Popular this week',custurltv+'new',22,os.path.join(art,'MOST_POPULAR.png'),None)
addDirTV('Popular Series',custurltv,26,os.path.join(art,'MOST_POPULAR.png'),None)
addDirTV('Popular Cartoons',custurltv,27,os.path.join(art,'MOST_POPULAR.png'),None)
addDirTV('Popular Documentaries',custurltv,28,os.path.join(art,'MOST_POPULAR.png'),None)
addDirTV('Popular Shows',custurltv,29,os.path.join(art,'MOST_POPULAR.png'),None)
addDirTV('Popular Sports',custurltv,30,os.path.join(art,'MOST_POPULAR.png'),None)
#TV Schedule menu
def SCHEDULE(url):
match=re.compile('<a href="(.+?)".+?>(.+?)</a></li>').findall(net.http_GET(url).content)
for url,name in match:
ok = '/tvschedule/'
nono = 'TV Schedule'
name = name.replace('<b>','')
name = name.replace('</b>','')
if ok in url:
if name not in nono:
addDirTV(name,custurltv+url,52,'',None)
#A-Z TV list
def AZTV(url):
match=re.compile('<li><a href="(.+?)".+?>(.+?)</a></li>').findall(net.http_GET(url).content)
for url,name in match:
icon = os.path.join(art,name+'.png')
ok = ['09','A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']
if name in ok:
addDirTV(name,custurltv+url,34,icon,None)
#Genres TV list
def GENRESTV(url):
match=re.compile('<a href="(.+?)" class="sr-header" title=".+?">(.+?)</a>').findall(net.http_GET(url).content)
for url,name in match:
nono = ['How To Watch', 'DMCA', 'Contact Us', 'Home', 'New Releases', 'Latest Added', 'Featured Movies', 'Latest HD Movies', 'Most Viewed', 'Most Viewed', 'Most Voted', 'Genres', 'Submit Links']
if name not in nono:
addDirTV(name,custurltv+url,34,'',None)
#Search for Movies
def SEARCHTV(url):
EnableMeta = local.getSetting('Enable-Meta')
keyb = xbmc.Keyboard('', 'Search TV Shows')
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
encode=urllib.quote(search)
print encode
url = custurltv+'/search/'+encode
print url
match=re.compile('<div valign="top" style="padding-left: 10px;">.+?<a href="(.+?)" title="(.+?)"',re.DOTALL).findall(net.http_GET(url).content)
for url,name in match:
name = silent.CLEAN(name)
if EnableMeta == 'true':
addDirTV(name,custurltv+url,31,'','tvshow')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
def LIST(url):
EnableMeta = local.getSetting('Enable-Meta')
match=re.compile('<li><a href="(.+?)" title=".+?">(.+?)<span class="epnum">(.+?)</span></a></li>').findall(net.http_GET(url).content)
for url,name,year in match:
name = silent.CLEAN(name)
if EnableMeta == 'true':
addDirTV(name,custurltv+url,31,'','tvshow')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
def NEWLINKS(url):
EnableMeta = local.getSetting('Enable-Meta')
match=re.compile('<li><a href="(.+?)">(.+?) Seas. (.+?) Ep. (.+?) \(.+?\).+?</li>',re.DOTALL).findall(net.http_GET(url).content)
for url,name,season,episode in match:
nono = '</a>'
name = silent.CLEAN(name)
if nono not in name:
if EnableMeta == 'true':
addDirTV('%s - Seas. %s : Ep. %s' %(name,season,episode),custurltv+url+'@'+name+'@'+season+'@'+episode,23,'','new')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,23,'',None)
def NEWLINKS2(url):
EnableMeta = local.getSetting('Enable-Meta')
match=re.compile('<li title="(.+?) - Season (.+?) Episode (.+?), .+?"><a href="(.+?)"><img src=".+?">',re.DOTALL).findall(net.http_GET(url).content)
match2=re.compile('<li style=".+?" title="(.+?) - Season (.+?) Episode (.+?),.+?"><a href="(.+?)"',re.DOTALL).findall(net.http_GET(url).content)
for name,season,episode,url in match:
nono = '</a>'
name = silent.CLEAN(name)
if nono not in name:
if EnableMeta == 'true':
addDirTV('%s - Seas. %s : Ep. %s' %(name,season,episode),custurltv+url+'@'+name+'@'+season+'@'+episode,31,'','new')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
for name,season,episode,url in match2:
nono = '</a>'
name = silent.CLEAN(name)
if nono not in name:
if EnableMeta == 'true':
addDirTV('%s - Seas. %s : Ep. %s' %(name,season,episode),custurltv+url+'@'+name+'@'+season+'@'+episode,31,'','new')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
def INDEXTV(url):
EnableMeta = local.getSetting('Enable-Meta')
data=re.compile('<ul class="listings">(.+?)</ul>',re.DOTALL).findall(net.http_GET(url).content)
pattern = '<li><a href="(.+?)">(.+?) Seas..+?</a></li>'
match = re.findall(pattern,str(data))
for url,name in match:
name = silent.CLEAN(name)
if EnableMeta == 'true':
addDirTV(name,custurltv+url,23,'','tvshow')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,23,'',None)
def INDEX2(url):
EnableMeta = local.getSetting('Enable-Meta')
data=re.compile('<img src=".+?"/> Most Popular Series\n\t\t\t\t</div>(.+?)</div>',re.DOTALL).findall(net.http_GET(url).content)
pattern = '<a href="(.+?)" title="watch online (.+?)">.+?</a>'
match = re.findall(pattern,str(data))
for url,name in match:
name = silent.CLEAN(name)
if EnableMeta == 'true':
addDirTV(name,custurltv+url,31,'','tvshow')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
def INDEX3(url):
EnableMeta = local.getSetting('Enable-Meta')
data=re.compile('<img src=".+?"/> Most Popular Cartoons\n\t\t\t\t</div>(.+?)</div>',re.DOTALL).findall(net.http_GET(url).content)
pattern = '<a href="(.+?)" title="watch online (.+?)">.+?</a>'
match = re.findall(pattern,str(data))
for url,name in match:
name = silent.CLEAN(name)
if EnableMeta == 'true':
addDirTV(name,custurltv+url,31,'','tvshow')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
def INDEX4(url):
EnableMeta = local.getSetting('Enable-Meta')
data=re.compile('<img src=".+?"/> Most Popular Documentaries\n\t\t\t\t</div>(.+?)</div>',re.DOTALL).findall(net.http_GET(url).content)
pattern = '<a href="(.+?)" title="watch online (.+?)">.+?</a>'
match = re.findall(pattern,str(data))
for url,name in match:
name = silent.CLEAN(name)
if EnableMeta == 'true':
addDirTV(name,custurltv+url,31,'','tvshow')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
def INDEX5(url):
EnableMeta = local.getSetting('Enable-Meta')
data=re.compile('<img src=".+?"/> Most Popular Shows\n\t\t\t\t</div>(.+?)</div>',re.DOTALL).findall(net.http_GET(url).content)
pattern = '<a href="(.+?)" title="watch online (.+?)">.+?</a>'
match = re.findall(pattern,str(data))
for url,name in match:
name = silent.CLEAN(name)
if EnableMeta == 'true':
addDirTV(name,custurltv+url,31,'','tvshow')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
def INDEX6(url):
EnableMeta = local.getSetting('Enable-Meta')
data=re.compile('<img src=".+?"/> Most Popular Sports\n\t\t\t\t</div>(.+?)</div>',re.DOTALL).findall(net.http_GET(url).content)
pattern = '<a href="(.+?)" title="watch online (.+?)">.+?</a>'
match = re.findall(pattern,str(data))
for url,name in match:
name = silent.CLEAN(name)
if EnableMeta == 'true':
addDirTV(name,custurltv+url,31,'','tvshow')
if EnableMeta == 'false':
addDirTV(name,custurltv+url,31,'',None)
#Find Seasons for shows
def SEASONS(name,url):
try:
episode = url.split('@')[3]
except:
episde = ''
try:
season = url.split('@')[2]
except:
season = ''
try:
title = url.split('@')[1]
except:
title = ''
try:
url = url.split('@')[0]
except:
url = ''
match=re.compile('<h2 class="lists".+?><a href="(.+?)".+?><span itemprop="name">(.+?)</span></a>',re.DOTALL).findall(net.http_GET(url).content)
for url,name in match:
addDirTV(name,url+'@'+title,32,'','seasons')
#Find Episodes for shows
def EPISODES(name,url):
title = url.split('@')[1]
url = url.split('@')[0]
season = name
print url
match=re.compile('<a href="(.+?)".+?<span class="" itemprop=".+?">(.+?) (.+?) </span>\n\t\t\t\t\t\t\t<span class=".+?" style=".+?"><b>.+?</b> <span itemprop=".+?">(.+?)</span>',re.DOTALL).findall(net.http_GET(url).content)
for url,episode,name,date in match:
print episode
name = silent.CLEAN(name)
addDirTV('%s : %s : %s' %(episode,date,name),custurltv+url+'@'+title+'@'+season+'@'+episode,23,'','new')
#First page with Hosters
def VIDEOLINKSTV(url):
episode = url.split('@')[3]
print 'Episode = '+episode
season = url.split('@')[2]
print 'Season = '+season
title = url.split('@')[1]
print 'Title = '+title
url = url.split('@')[0]
print 'Url = '+url
url = url.replace('https://twitter.com/share','')
print 'Url ='+url
match=re.compile('<a target="_blank" href="(.+?)" class="buttonlink" title="(.+?)"').findall(net.http_GET(url).content)
match2=re.compile('<p><strong>Sorry, there are no links available for this (.+?).</strong></p>').findall(net.http_GET(url).content)
for url,name in match:
nono = ['Sponsored']
if name not in nono:
addDirTV(name,custurltv+url+'@'+title+'@'+season+'@'+episode,24,'',None)
for name in match2:
addDirTV('[B][COLOR yellow]Sorry, no links available yet[/COLOR][/B]',custurltv+url,24,'',None)
#Get the Final Hoster link
def VIDEOLINKS2TV(name,url):
episode = url.split('@')[3]
season = url.split('@')[2]
title = url.split('@')[1]
url = url.split('@')[0]
print 'URL = '+url
match=re.compile('<a href="(.+?)" class="push_button blue"').findall(net.http_GET(url).content)
for url in match:
STREAMTV(name,url+'@'+title+'@'+season+'@'+episode)
#Pass url to urlresolver
def STREAMTV(name,url):
EnableMeta = local.getSetting('Enable-Meta')
if EnableMeta == 'true':
infoLabels = silent.GRABMETATV(name,url,'new')
try: img = infoLabels['cover_url']
except: img= iconimage
episode = url.split('@')[3]
season = url.split('@')[2]
title = url.split('@')[1]
url = url.split('@')[0]
try:
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
streamlink = urlresolver.resolve(urllib2.urlopen(req).url)
print streamlink
addLinkTV(name,streamlink+'@'+title+'@'+season+'@'+episode,img)
except:
silent.Notify('small','Sorry Link Removed:', 'Please try another one.',9000)
def addLinkTV(name,url,iconimage):
episode = url.split('@')[3]
season = url.split('@')[2]
title = url.split('@')[1]
url = url.split('@')[0]
season = season.replace('Season ', '')
episode = episode.replace('Episode ', '')
ok=True
liz=xbmcgui.ListItem('%s (%sx%s)' %(title,season,episode), iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": '%s (%sx%s)' %(title,season,episode) } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz,isFolder=False)
return ok
def addDirTV(name,url,mode,iconimage,types):
EnableFanArt = local.getSetting('Enable-Fanart')
ok=True
type = types
fimg = addon.get_fanart()
if type != None:
infoLabels = silent.GRABMETATV(name,url,types)
else: infoLabels = {'title':name}
try: img = infoLabels['cover_url']
except: img= iconimage
if EnableFanArt == 'true':
try: fimg = infoLabels['backdrop_url']
except: fimg = addon.get_fanart()
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=img)
liz.setInfo( type="Video", infoLabels= infoLabels)
liz.setProperty( "Fanart_Image", fimg )
contextMenuItems = []
if mode == 31:
contextMenuItems = []
contextMenuItems.append(('TV Show Information', 'XBMC.Action(Info)'))
contextMenuItems.append(('Add to Favorites', 'XBMC.RunPlugin(%s?mode=40&name=%s&url=%s&types=%s)' % (sys.argv[0], name, urllib.quote_plus(url), types)))
contextMenuItems.append(('Add to Kids TV', 'XBMC.RunPlugin(%s?mode=49&name=%s&url=%s&types=%s)' % (sys.argv[0], name, urllib.quote_plus(url), types)))
liz.addContextMenuItems(contextMenuItems, replaceItems=False)
liz.addContextMenuItems(contextMenuItems, replaceItems=True)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addFAVDirTV(name,url,types):
EnableFanArt = local.getSetting('Enable-Fanart')
mode = 31
iconimage = ''
ok=True
type = types
fimg = addon.get_fanart()
if type != None:
infoLabels = silent.GRABMETATV(name,url,types)
else: infoLabels = {'title':name}
try: img = infoLabels['cover_url']
except: img = iconimage
if EnableFanArt == 'true':
try: fimg = infoLabels['backdrop_url']
except: fimg = addon.get_fanart()
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=img)
liz.setInfo( type="Video", infoLabels= infoLabels)
liz.setProperty( "Fanart_Image", fimg )
###Add to Library Context Menu
contextMenuItems = []
contextMenuItems.append(('TV Show Information', 'XBMC.Action(Info)'))
contextMenuItems.append(('Remove from Favorites', 'XBMC.RunPlugin(%s?mode=41&name=%s&url=%s&types=%s)' % (sys.argv[0], name, urllib.quote_plus(url), types)))
contextMenuItems.append(('Remove from Kids TV', 'XBMC.RunPlugin(%s?mode=50&name=%s&url=%s&types=%s)' % (sys.argv[0], name, urllib.quote_plus(url), types)))
liz.addContextMenuItems(contextMenuItems, replaceItems=True)
##############################
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
########
########
########
#Return Favorites List *temp need to fix in silent*
def GETMYFAVS():
MYFAVS = silent.getFavorites()
try:
for name,url,year in MYFAVS:
addFAVDir(name,url,year)
except:
pass
def GETMYFAVSTV():
MYFAVS | |
"""Generated message classes for datamigration version v1alpha2.
Manage Cloud Database Migration Service resources on Google Cloud Platform.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'datamigration'
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ { "service": "allServices",
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, { "log_type": "DATA_WRITE" }, { "log_type":
"ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com",
"audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type":
"DATA_WRITE", "exempted_members": [ "user:<EMAIL>" ] } ] } ] } For
sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
logging. It also exempts <EMAIL> from DATA_READ logging, and
<EMAIL> from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example: {
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, { "log_type": "DATA_WRITE" } ] } This enables
'DATA_READ' and 'DATA_WRITE' logging, while exempting <EMAIL> from
DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class Binding(_messages.Message):
r"""Associates `members`, or principals, with a `role`.
Fields:
condition: The condition that is associated with this binding. If the
condition evaluates to `true`, then this binding applies to the current
request. If the condition evaluates to `false`, then this binding does
not apply to the current request. However, a different role binding
might grant the same role to one or more of the principals in this
binding. To learn which resources support conditions in their IAM
policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
members: Specifies the principals requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet; with
or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `<EMAIL>` .
* `serviceAccount:{emailid}`: An email address that represents a service
account. For example, `<EMAIL>`. *
`group:{emailid}`: An email address that represents a Google group. For
example, `<EMAIL>`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `<EMAIL>?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `<EMAIL>-
<EMAIL>?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group that
has been recently deleted. For example,
`<EMAIL>?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G Suite
domain (primary) that represents all the users of that domain. For
example, `google.com` or `example.com`.
role: Role that is assigned to the list of `members`, or principals. For
example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CancelOperationRequest(_messages.Message):
r"""The request message for Operations.CancelOperation."""
class CloudSqlConnectionProfile(_messages.Message):
r"""Specifies required connection parameters, and, optionally, the
parameters required to create a Cloud SQL destination database instance.
Fields:
cloudSqlId: Output only. The Cloud SQL instance ID that this connection
profile is associated with.
privateIp: Output only. The Cloud SQL database instance's private IP.
publicIp: Output only. The Cloud SQL database instance's public IP.
settings: Immutable. Metadata used to create the destination Cloud SQL
database.
"""
cloudSqlId = _messages.StringField(1)
privateIp = _messages.StringField(2)
publicIp = _messages.StringField(3)
settings = _messages.MessageField('CloudSqlSettings', 4)
class CloudSqlSettings(_messages.Message):
r"""Settings for creating a Cloud SQL database instance.
Enums:
ActivationPolicyValueValuesEnum: The activation policy specifies when the
instance is activated; it is applicable only when the instance state is
'RUNNABLE'. Valid values: 'ALWAYS': The instance is on, and remains so
even in the absence of connection requests. `NEVER`: The instance is
off; it is not activated, even if a connection request arrives.
DataDiskTypeValueValuesEnum: The type of storage: `PD_SSD` (default) or
`PD_HDD`.
DatabaseVersionValueValuesEnum: The database engine type and version.
Messages:
DatabaseFlagsValue: The database flags passed to the Cloud SQL instance at
startup. An object containing a list of "key": value pairs. Example: {
"name": "wrench", "mass": "1.3kg", "count": "3" }.
UserLabelsValue: The resource labels for a Cloud SQL instance to use to
annotate any related underlying resources such as Compute Engine VMs. An
object containing a list of "key": "value" pairs. Example: `{ "name":
"wrench", "mass": "18kg", "count": "3" }`.
Fields:
activationPolicy: The activation policy specifies when the instance is
activated; it is applicable only when the instance state is 'RUNNABLE'.
Valid values: 'ALWAYS': The instance is on, and remains so even in the
absence of connection requests. `NEVER`: The instance is off; it is not
activated, even if a connection request arrives.
autoStorageIncrease: [default: ON] If you enable this setting, Cloud SQL
checks your available storage every 30 seconds. If the available storage
falls below a threshold size, Cloud SQL automatically adds additional
storage capacity. If the available storage repeatedly falls below the
threshold size, Cloud SQL continues to add storage until it reaches the
maximum of 30 TB.
dataDiskSizeGb: The storage capacity available to the database, in GB. The
minimum (and default) size is 10GB.
dataDiskType: The type of storage: `PD_SSD` (default) or `PD_HDD`.
databaseFlags: The database flags passed to the Cloud SQL instance at
startup. An object containing a list of "key": value pairs. Example: {
"name": "wrench", "mass": "1.3kg", "count": "3" }.
databaseVersion: The database engine type and version.
hasRootPassword: Output only. Indicates If this connection profile root
password is stored.
ipConfig: The settings for IP Management. This allows to enable or disable
the instance IP and manage which external networks can connect to the
instance. The IPv4 address cannot be disabled.
rootPassword: Input only. Initial root password.
sourceId: The Database Migration Service source connection profile ID, in
the format: `projects/my_project_name/locations/us-
central1/connectionProfiles/connection_profile_ID`
storageAutoResizeLimit: The maximum size to which storage capacity can be
automatically increased. The default value is 0, which specifies that
there is no limit.
tier: The tier (or machine type) for this instance, for example:
`db-n1-standard-1` (MySQL instances). For more information, see [Cloud
SQL Instance Settings](https://cloud.google.com/sql/docs/mysql/instance-
settings).
userLabels: The resource labels for a Cloud SQL instance to use to
annotate any related underlying resources such as Compute Engine VMs. An
object containing a list of "key": "value" pairs. Example: `{ "name":
"wrench", "mass": "18kg", "count": "3" }`.
zone: The | |
and `std()` work on entire columns. We can run our own functions across all values in a column (or row) using `apply()`.
#
# To give you an idea of how this works, let's consider the "date" column in our DataFrame (formally "EDT").
# In[29]:
data.date.head()
# We can use the `values` property of the column to get a list of values for the column. Inspecting the first value reveals that these are strings with a particular format.
# In[30]:
first_date = data.date.values[0]
first_date
# The `strptime` function from the `datetime` module will make quick work of this date string. There are many [more shortcuts available](http://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior) for `strptime`.
# In[31]:
# Import the datetime class from the datetime module
from datetime import datetime
# Convert date string to datetime object
datetime.strptime(first_date, "%Y-%m-%d")
# Using the `apply()` method, which takes a function (**without** the parentheses), we can apply `strptime` to each value in the column. We'll overwrite the string date values with their Python `datetime` equivalents.
# In[32]:
# Define a function to convert strings to dates
def string_to_date(date_string):
return datetime.strptime(date_string, "%Y-%m-%d")
# Run the function on every date string and overwrite the column
data.date = data.date.apply(string_to_date)
data.date.head()
# Let's go one step futher. Each row in our DataFrame represents the weather from a single day. Each row in a DataFrame is associated with an *index*, which is a label that uniquely identifies a row.
#
# Our row indices up to now have been auto-generated by pandas, and are simply integers from 0 to 365. If we use dates instead of integers for our index, we will get some extra benefits from pandas when plotting later on. Overwriting the index is as easy as assigning to the `index` property of the DataFrame.
# In[33]:
data.index = data.date
data.info()
# Now we can quickly look up a row by its date with the `loc[]` property \[[see docs](http://pandas.pydata.org/pandas-docs/stable/indexing.html)], which locates records by label.
# In[34]:
data.loc[datetime(2012, 8, 19)]
# We can also access a row (or range of rows) with the `iloc[]` property, which locates records by integer index.
# In[35]:
data.max_temp.iloc[7:15]
# With all of the dates in the index now, we no longer need the "date" column. Let's drop it.
# In[36]:
data = data.drop("CREATE_DATE", axis=1)
data.columns
# Note that we need to pass in `axis=1` in order to drop a column. For more details, check out the [documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html) for `drop`. The index values can now be accessed as `data.index.values`.
# In[37]:
data = data.drop("date", axis=1)
data.columns
# ## Handing Missing Values
# Pandas considers values like `NaN` and `None` to represent missing data. The `count()` function [[see docs](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.count.html)] can be used to tell whether values are missing. We use the parameter `axis=0` to indicate that we want to perform the count by rows, rather than columns.
# In[38]:
data.count(axis=0)
# It is pretty obvious that there are a lot of `NaN` entrys for the `events` column; 204 to be exact. Let's take a look at a few values from the `events` column:
# In[39]:
data.events.head(10)
# This isn't exactly what we want. One option is to drop all rows in the DataFrame with missing "events" values using the `dropna()` function \[[see docs](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html)].
# In[40]:
data.dropna(subset=["events"]).info()
# Note that this didn't affect `data`; we're just looking at a copy.
#
# Instead of dropping rows with missing values, let's fill them with empty strings (you'll see why in a moment). This is easily done with the `fillna()` function. We'll go ahead and overwrite the "events" column with empty string missing values instead of `NaN`.
# In[41]:
data.events = data.events.fillna("")
data.events.head(10)
# Now we repeat the `count` function for the `events` column:
# In[42]:
data.events.count()
# As desired, there are no longer any empty entries in the `events` column. Why did we not need the `axis=0` parameter this time?
# ## Iteratively Accessing Rows
# You can iterate over each row in the DataFrame with `iterrows()`. Note that this function returns **both** the index and the row. Also, you must access columns in the row you get back from `iterrows()` with the dictionary syntax.
# In[43]:
num_rain = 0
for idx, row in data.iterrows():
if "Rain" in row["events"]:
num_rain += 1
"Days with rain: {0}".format(num_rain)
# ## Filtering
# Most of your time using pandas will likely be devoted to selecting rows of interest from a DataFrame. In addition to strings, the dictionary syntax accepts requests like:
# In[45]:
freezing_days = data[data.max_temp <= 32]
freezing_days.info()
# We get back another DataFrame with fewer rows (21 in this case). This DataFrame can be filtered down even more by adding a constrain that the temperature be greater than 20 degrees, in addition to being below freezing.
# In[ ]:
cold_days = freezing_days[freezing_days.min_temp >= 20]
cold_days.info()
# To see the high and low temperatures for the selected days:
# In[ ]:
cold_days[["max_temp","min_temp"]]
# Using boolean operations, we could have chosen to apply both filters to the original DataFrame at the same time.
# In[46]:
data[(data.max_temp <= 32) & (data.min_temp >= 20)]
# It's important to understand what's really going on underneath with filtering. Let's look at what kind of object we actually get back when creating a filter.
# In[ ]:
temp_max = data.max_temp <= 32
type(temp_max)
# This is a pandas `Series` object, which is the one-dimensional equivalent of a DataFrame. Because our DataFrame uses datetime objects for the index, we have a specialized `TimeSeries` object.
#
# What's inside the filter?
# In[ ]:
temp_max
# Our filter is nothing more than a `Series` with a *boolean value for every item in the index*. When we "run the filter" as so:
# In[ ]:
data[temp_max].info()
# pandas lines up the rows of the DataFrame and the filter using the index, and then keeps the rows with a `True` filter value. That's it.
#
# Let's create another filter.
# In[ ]:
temp_min = data.min_temp >= 20
temp_min
# Now we can see what the boolean operations are doing. Something like `&` (**not** `and`)...
# In[ ]:
temp_min & temp_max
# ...is just lining up the two filters using the index, performing a boolean AND operation, and returning the result as another `Series`.
#
# We can do other boolean operations too, like OR:
# In[ ]:
temp_min | temp_max
# Because the result is just another `Series`, we have all of the regular pandas functions at our disposal. The `any()` function returns `True` if any value in the `Series` is `True`.
# In[ ]:
temp_both = temp_min & temp_max
temp_both.any()
# Sometimes filters aren't so intuitive. This (sadly) doesn't work:
# In[ ]:
try:
data["Rain" in data.events]
except:
pass # "KeyError: no item named False"
# We can wrap it up in an `apply()` call fairly easily, though:
# In[ ]:
data[data.events.apply(lambda e: "Rain" in e)].info()
# We'll replace "T" with a very small number, and convert the rest of the strings to floats:
# In[47]:
# Convert precipitation to floating point number
# "T" means "trace of precipitation"
def precipitation_to_float(precip_str):
if precip_str == "T":
return 1e-10 # Very small value
return float(precip_str)
data.precipitation = data.precipitation.apply(precipitation_to_float)
data.precipitation.head()
# ---
# ## Ordering: Sorting data
# Sort by the events column, ascending
# In[62]:
get_ipython().magic('pinfo data.sort')
# In[64]:
data.sort(['max_temp', 'mean_temp'])
# In[ ]:
# ---
# # Data Transformation
# ---
# ## Grouping
# Besides `apply()`, another great DataFrame function is `groupby()`.
# It will group a DataFrame by one or more columns, and let you iterate through each group.
#
# As an example, let's group our DataFrame by the "cloud_cover" column (a value ranging from 0 to 8).
# In[49]:
cover_temps = {}
for cover, cover_data in data.groupby("cloud_cover"):
cover_temps[cover] = cover_data.mean_temp.mean() # The mean mean temp!
cover_temps
# When you iterate through the result of `groupby()`, you will get a tuple.
# The first item is the column value, and the second item is a filtered DataFrame (where the column equals the first tuple value).
#
# You can group by more than one column as well.
# In this case, the first tuple item returned by `groupby()` will itself be a tuple with the value of each column.
# In[50]:
for (cover, events), group_data in data.groupby(["cloud_cover", "events"]):
print("Cover: {0}, Events: {1}, Count: {2}".format(cover, events, len(group_data)))
# ## Reshaping: Creating New Columns
# Weather events in our DataFrame are stored in strings like "Rain-Thunderstorm" to represent that it rained and there was a thunderstorm that day. Let's split them out into boolean "rain", "thunderstorm", etc. columns.
#
# First, let's discover the different kinds of weather events we | |
which the item could not be unshared.
"""
path = 'content/items/' + item_id + '/unshare'
postdata = self._postdata()
postdata['groups'] = groups
resp = self.con.post(path, postdata)
if resp:
return resp
def share_item(self, item_id, owner, folder=None, everyone=False, org=False, groups="", allow_members_to_edit=False):
""" Shares an item with the specified list of groups
================ ========================================================
**Argument** **Description**
---------------- --------------------------------------------------------
item_id required string, unique identifier for the item
---------------- --------------------------------------------------------
owner required string, owner of the item currently
---------------- --------------------------------------------------------
folder optional string, folder containing the item. Defaults to the root folder.
---------------- --------------------------------------------------------
everyone optional boolean, share with everyone
---------------- --------------------------------------------------------
org optional boolean, share with the organization
---------------- --------------------------------------------------------
groups optional string,
comma-separated list of group IDs with which the item will be shared.
---------------- --------------------------------------------------------
allow_members_to_edit optional boolean to allow item to be shared with groups that allow shared update
================ ========================================================
:return:
dict with key "notSharedWith" containing array of groups with which the item could not be shared.
"""
path = 'content/users/' + owner
if folder :
path += '/' + folder
path += '/items/' + item_id + '/share'
#print(path)
postdata = self._postdata()
postdata['everyone'] = everyone
postdata['org'] = org
postdata['groups'] = groups
if allow_members_to_edit:
postdata['confirmItemControl'] = True
resp = self.con.post(path, postdata)
if resp:
return resp
def unshare_item(self, item_id, owner, folder=None, groups=""):
""" Stops sharing the item with the specified list of groups
================ ========================================================
**Argument** **Description**
---------------- --------------------------------------------------------
item_id required string, unique identifier for the item
---------------- --------------------------------------------------------
owner required string, owner of the item currently
---------------- --------------------------------------------------------
folder optional string, folder containing the item. Defaults to the root folder.
---------------- --------------------------------------------------------
groups optional string,
comma-separated list of group IDs with which the item will be unshared.
================ ========================================================
:return:
dict with key "notUnsharedFrom" containing array of groups from which the item could not be unshared.
"""
path = 'content/users/' + owner
if folder :
path += '/' + folder
path += '/items/' + item_id + '/unshare'
postdata = self._postdata()
postdata['groups'] = groups
resp = self.con.post(path, postdata)
if resp:
return resp
def delete_user(self, username, reassign_to=None):
""" Deletes a user from the portal, optionally deleting or reassigning groups and items.
.. note::
You can not delete a user in Portal if that user owns groups or items. If you
specify someone in the reassign_to argument then items and groups will be
transferred to that user. If that argument is not set then the method
will fail if the user has items or groups that need to be reassigned.
================ ========================================================
**Argument** **Description**
---------------- --------------------------------------------------------
username required string, the name of the user
---------------- --------------------------------------------------------
reassign_to optional string, new owner of items and groups
================ ========================================================
:return:
a boolean indicating whether the operation succeeded or failed.
"""
if reassign_to :
self.reassign_user(username, reassign_to)
resp = self.con.post('community/users/' + username + '/delete',self._postdata())
if resp:
return resp.get('success')
else:
return False
def generate_token(self, username, password, expiration=60):
""" Generates and returns a new token, but doesn't re-login.
.. note::
This method is not needed when using the Portal class
to make calls into Portal. It's provided for the benefit
of making calls into Portal outside of the Portal class.
Portal uses a token-based authentication mechanism where
a user provides their credentials and a short-term token
is used for calls. Most calls made to the Portal REST API
require a token and this can be appended to those requests.
================ ========================================================
**Argument** **Description**
---------------- --------------------------------------------------------
username required string, name of the user
---------------- --------------------------------------------------------
password <PASSWORD>, name of the user
---------------- --------------------------------------------------------
expiration optional integer, number of minutes until the token expires
================ ========================================================
:return:
a string with the token
"""
return self.con.generate_token(username, password, expiration)
def get_group(self, group_id):
""" Returns group information for the specified group group_id.
Arguments
group_id : required string, indicating group.
:return:
a dictionary object with the group's information. The keys in
the dictionary object will often include:
================ ========================================================
**Key** **Value**
---------------- --------------------------------------------------------
title: the name of the group
---------------- --------------------------------------------------------
isInvitationOnly if set to true, users can't apply to join the group.
---------------- --------------------------------------------------------
owner: the owner username of the group
---------------- --------------------------------------------------------
description: explains the group
---------------- --------------------------------------------------------
snippet: a short summary of the group
---------------- --------------------------------------------------------
tags: user-defined tags that describe the group
---------------- --------------------------------------------------------
phone: contact information for group.
---------------- --------------------------------------------------------
thumbnail: File name relative to http://<community-url>/groups/<groupId>/info
---------------- --------------------------------------------------------
created: When group created, ms since 1 Jan 1970
---------------- --------------------------------------------------------
modified: When group last modified. ms since 1 Jan 1970
---------------- --------------------------------------------------------
access: Can be private, org, or public.
---------------- --------------------------------------------------------
userMembership: A dict with keys username and memberType.
---------------- --------------------------------------------------------
memberType: provides the calling user's access (owner, admin, member, none).
================ ========================================================
"""
return self.con.post('community/groups/' + group_id, self._postdata())
def get_group_thumbnail(self, group_id):
""" Returns the bytes that make up the thumbnail for the specified group group_id.
Arguments
group_id: required string, specifies the group's thumbnail
Returns
bytes that represent he image.
Example
.. code-block:: python
response = portal.get_group_thumbnail("67e1761068b7453693a0c68c92a62e2e")
f = open(filename, 'wb')
f.write(response)
"""
thumbnail_file = self.get_group(group_id).get('thumbnail')
if thumbnail_file:
thumbnail_url_path = 'community/groups/' + group_id + '/info/' + thumbnail_file
if thumbnail_url_path:
return self.con.get(thumbnail_url_path, try_json=False, force_bytes=True)
def get_group_members(self, group_id):
""" Returns members of the specified group.
Arguments
group_id: required string, specifies the group
Returns
a dictionary with keys: owner, admins, and users.
================ ========================================================
**Key** **Value**
---------------- --------------------------------------------------------
owner string value, the group's owner
---------------- --------------------------------------------------------
admins list of strings, typically this is the same as the owner.
---------------- --------------------------------------------------------
users list of strings, the members of the group
================ ========================================================
Example (to print users in a group)
.. code-block:: python
response = portal.get_group_members("67e1761068b7453693a0c68c92a62e2e")
for user in response['users'] :
print user
"""
return self.con.post('community/groups/' + group_id + '/users',
self._postdata())
def get_org_roles(self, max_roles=1000):
""" Returns all roles within the portal organization.
Arguments
max_roles : optional int, the maximum number of users to return.
:return:
a list of dicts. Each dict has the following keys:
"""
# Execute the search and get back the results
count = 0
resp = self._roles_page(1, min(max_roles, 100))
resp_roles = resp.get('roles')
results = resp_roles
count += int(resp['num'])
nextstart = int(resp['nextStart'])
while count < max_roles and nextstart > 0:
resp = self._roles_page(nextstart, min(max_roles - count, 100))
resp_roles = resp.get('roles')
results.extend(resp_roles)
count += int(resp['num'])
nextstart = int(resp['nextStart'])
return results
def get_org_users(self, max_users=1000, exclude_system=True, user_type=None, role=None):
""" Returns all users within the portal organization.
Arguments
max_users : optional int, the maximum number of users to return.
:return:
a list of dicts. Each dict has the following keys:
================ ========================================================
**Key** **Value**
---------------- --------------------------------------------------------
username : string
---------------- --------------------------------------------------------
storageUsage: int
---------------- --------------------------------------------------------
storageQuota: int
---------------- --------------------------------------------------------
description: string
---------------- --------------------------------------------------------
tags: list of strings
---------------- --------------------------------------------------------
region: string
---------------- --------------------------------------------------------
created: int, when account created, ms since 1 Jan 1970
---------------- --------------------------------------------------------
modified: int, when account last modified, ms since 1 Jan 1970
---------------- --------------------------------------------------------
email: string
---------------- --------------------------------------------------------
culture: string
---------------- --------------------------------------------------------
orgId: string
---------------- --------------------------------------------------------
preferredView: string
---------------- --------------------------------------------------------
groups: list of strings
---------------- --------------------------------------------------------
role: string (org_user, org_publisher, org_admin)
---------------- --------------------------------------------------------
fullName: string
---------------- --------------------------------------------------------
thumbnail: string
---------------- --------------------------------------------------------
idpUsername: string
================ ========================================================
Example (print all usernames in portal):
.. code-block:: python
resp = portalAdmin.get_org_users()
for user in resp:
print user['username']
"""
# Execute the search and get back the results
count = 0
resp = self._org_users_page(1, min(max_users, 100),
exclude_system=exclude_system,
user_type=user_type,
role=role)
resp_users = resp.get('users')
results = resp_users
count += int(resp['num'])
nextstart = int(resp['nextStart'])
while count < max_users and nextstart > 0:
resp = self._org_users_page(nextstart, min(max_users - count, 100),
exclude_system=exclude_system,
user_type=user_type,
role=role)
resp_users = resp.get('users')
results.extend(resp_users)
count += int(resp['num'])
nextstart = int(resp['nextStart'])
return results
def get_properties(self, force=False):
""" Returns the portal properties (using cache unless force=True). """
# If we've never retrieved the properties before, or the caller is
# forcing a check of the server, then check the server
if not self._properties or force:
path = 'accounts/self' if self._is_pre_162 else 'portals/self'
try:
| |
<reponame>jjon/rdflib
import collections
import datetime
import itertools
import typing as t
from typing import Any, Container, Dict, Iterable, List, Optional, Tuple, Union
import isodate
import rdflib.plugins.sparql
from rdflib.compat import Mapping, MutableMapping
from rdflib.graph import ConjunctiveGraph, Graph
from rdflib.namespace import NamespaceManager
from rdflib.plugins.sparql.parserutils import CompValue
from rdflib.term import BNode, Identifier, Literal, Node, URIRef, Variable
class SPARQLError(Exception):
def __init__(self, msg: Optional[str] = None):
Exception.__init__(self, msg)
class NotBoundError(SPARQLError):
def __init__(self, msg: Optional[str] = None):
SPARQLError.__init__(self, msg)
class AlreadyBound(SPARQLError):
"""Raised when trying to bind a variable that is already bound!"""
def __init__(self):
SPARQLError.__init__(self)
class SPARQLTypeError(SPARQLError):
def __init__(self, msg: Optional[str]):
SPARQLError.__init__(self, msg)
class Bindings(MutableMapping):
"""
A single level of a stack of variable-value bindings.
Each dict keeps a reference to the dict below it,
any failed lookup is propegated back
In python 3.3 this could be a collections.ChainMap
"""
def __init__(self, outer: Optional["Bindings"] = None, d=[]):
self._d: Dict[str, str] = dict(d)
self.outer = outer
def __getitem__(self, key: str) -> str:
if key in self._d:
return self._d[key]
if not self.outer:
raise KeyError()
return self.outer[key]
def __contains__(self, key: Any) -> bool:
try:
self[key]
return True
except KeyError:
return False
def __setitem__(self, key: str, value: Any) -> None:
self._d[key] = value
def __delitem__(self, key: str) -> None:
raise Exception("DelItem is not implemented!")
def __len__(self) -> int:
i = 0
d: Optional[Bindings] = self
while d is not None:
i += len(d._d)
d = d.outer
return i
def __iter__(self):
d = self
while d is not None:
yield from d._d
d = d.outer
def __str__(self) -> str:
# type error: Generator has incompatible item type "Tuple[Any, str]"; expected "str"
return "Bindings({" + ", ".join((k, self[k]) for k in self) + "})" # type: ignore[misc]
def __repr__(self) -> str:
return str(self)
class FrozenDict(Mapping):
"""
An immutable hashable dict
Taken from http://stackoverflow.com/a/2704866/81121
"""
def __init__(self, *args: Any, **kwargs: Any):
self._d: Dict[Identifier, Identifier] = dict(*args, **kwargs)
self._hash: Optional[int] = None
def __iter__(self):
return iter(self._d)
def __len__(self) -> int:
return len(self._d)
def __getitem__(self, key: Identifier) -> Identifier:
return self._d[key]
def __hash__(self) -> int:
# It would have been simpler and maybe more obvious to
# use hash(tuple(sorted(self._d.items()))) from this discussion
# so far, but this solution is O(n). I don't know what kind of
# n we are going to run into, but sometimes it's hard to resist the
# urge to optimize when it will gain improved algorithmic performance.
if self._hash is None:
self._hash = 0
for key, value in self.items():
self._hash ^= hash(key)
self._hash ^= hash(value)
return self._hash
def project(self, vars: Container[Variable]) -> "FrozenDict":
return FrozenDict((x for x in self.items() if x[0] in vars))
def disjointDomain(self, other: t.Mapping[Identifier, Identifier]) -> bool:
return not bool(set(self).intersection(other))
def compatible(self, other: t.Mapping[Identifier, Identifier]) -> bool:
for k in self:
try:
if self[k] != other[k]:
return False
except KeyError:
pass
return True
def merge(self, other: t.Mapping[Identifier, Identifier]) -> "FrozenDict":
res = FrozenDict(itertools.chain(self.items(), other.items()))
return res
def __str__(self) -> str:
return str(self._d)
def __repr__(self) -> str:
return repr(self._d)
class FrozenBindings(FrozenDict):
def __init__(self, ctx: "QueryContext", *args, **kwargs):
FrozenDict.__init__(self, *args, **kwargs)
self.ctx = ctx
def __getitem__(self, key: Union[Identifier, str]) -> Identifier:
if not isinstance(key, Node):
key = Variable(key)
if not isinstance(key, (BNode, Variable)):
return key
if key not in self._d:
# type error: Value of type "Optional[Dict[Variable, Identifier]]" is not indexable
# type error: Invalid index type "Union[BNode, Variable]" for "Optional[Dict[Variable, Identifier]]"; expected type "Variable"
return self.ctx.initBindings[key] # type: ignore[index]
else:
return self._d[key]
def project(self, vars: Container[Variable]) -> "FrozenBindings":
return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in vars))
def merge(self, other: t.Mapping[Identifier, Identifier]) -> "FrozenBindings":
res = FrozenBindings(self.ctx, itertools.chain(self.items(), other.items()))
return res
@property
def now(self) -> datetime.datetime:
return self.ctx.now
@property
def bnodes(self) -> t.Mapping[Identifier, BNode]:
return self.ctx.bnodes
@property
def prologue(self) -> Optional["Prologue"]:
return self.ctx.prologue
def forget(
self, before: "QueryContext", _except: Optional[Container[Variable]] = None
):
"""
return a frozen dict only of bindings made in self
since before
"""
if not _except:
_except = []
# bindings from initBindings are newer forgotten
return FrozenBindings(
self.ctx,
(
x
for x in self.items()
if (
x[0] in _except
# type error: Unsupported right operand type for in ("Optional[Dict[Variable, Identifier]]")
or x[0] in self.ctx.initBindings # type: ignore[operator]
or before[x[0]] is None
)
),
)
def remember(self, these):
"""
return a frozen dict only of bindings in these
"""
return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in these))
class QueryContext(object):
"""
Query context - passed along when evaluating the query
"""
def __init__(
self,
graph: Optional[Graph] = None,
bindings: Optional[Union[Bindings, FrozenBindings, List[Any]]] = None,
initBindings: Optional[Dict[Variable, Identifier]] = None,
):
self.initBindings = initBindings
self.bindings = Bindings(d=bindings or [])
if initBindings:
self.bindings.update(initBindings)
self.graph: Optional[Graph]
self._dataset: Optional[ConjunctiveGraph]
if isinstance(graph, ConjunctiveGraph):
self._dataset = graph
if rdflib.plugins.sparql.SPARQL_DEFAULT_GRAPH_UNION:
self.graph = self.dataset
else:
self.graph = self.dataset.default_context
else:
self._dataset = None
self.graph = graph
self.prologue: Optional[Prologue] = None
self._now: Optional[datetime.datetime] = None
self.bnodes: t.MutableMapping[Identifier, BNode] = collections.defaultdict(
BNode
)
@property
def now(self) -> datetime.datetime:
if self._now is None:
self._now = datetime.datetime.now(isodate.tzinfo.UTC)
return self._now
def clone(
self, bindings: Optional[Union[FrozenBindings, Bindings, List[Any]]] = None
) -> "QueryContext":
r = QueryContext(
self._dataset if self._dataset is not None else self.graph,
bindings or self.bindings,
initBindings=self.initBindings,
)
r.prologue = self.prologue
r.graph = self.graph
r.bnodes = self.bnodes
return r
@property
def dataset(self) -> ConjunctiveGraph:
""" "current dataset"""
if self._dataset is None:
raise Exception(
"You performed a query operation requiring "
+ "a dataset (i.e. ConjunctiveGraph), but "
+ "operating currently on a single graph."
)
return self._dataset
def load(self, source: URIRef, default: bool = False, **kwargs):
def _load(graph, source):
try:
return graph.parse(source, format="turtle", **kwargs)
except Exception:
pass
try:
return graph.parse(source, format="xml", **kwargs)
except Exception:
pass
try:
return graph.parse(source, format="n3", **kwargs)
except Exception:
pass
try:
return graph.parse(source, format="nt", **kwargs)
except Exception:
raise Exception(
"Could not load %s as either RDF/XML, N3 or NTriples" % source
)
if not rdflib.plugins.sparql.SPARQL_LOAD_GRAPHS:
# we are not loading - if we already know the graph
# being "loaded", just add it to the default-graph
if default:
# Unsupported left operand type for + ("None")
self.graph += self.dataset.get_context(source) # type: ignore[operator]
else:
if default:
_load(self.graph, source)
else:
_load(self.dataset, source)
def __getitem__(self, key) -> Any:
# in SPARQL BNodes are just labels
if not isinstance(key, (BNode, Variable)):
return key
try:
return self.bindings[key]
except KeyError:
return None
def get(self, key: Variable, default: Optional[Any] = None):
try:
return self[key]
except KeyError:
return default
def solution(self, vars: Optional[Iterable[Variable]] = None) -> FrozenBindings:
"""
Return a static copy of the current variable bindings as dict
"""
if vars:
return FrozenBindings(
self, ((k, v) for k, v in self.bindings.items() if k in vars)
)
else:
return FrozenBindings(self, self.bindings.items())
def __setitem__(self, key: Identifier, value: Identifier) -> None:
if key in self.bindings and self.bindings[key] != value:
raise AlreadyBound()
self.bindings[key] = value
def pushGraph(self, graph: Optional[Graph]) -> "QueryContext":
r = self.clone()
r.graph = graph
return r
def push(self) -> "QueryContext":
r = self.clone(Bindings(self.bindings))
return r
def clean(self) -> "QueryContext":
return self.clone([])
def thaw(self, frozenbindings: FrozenBindings) -> "QueryContext":
"""
Create a new read/write query context from the given solution
"""
c = self.clone(frozenbindings)
return c
class Prologue:
"""
A class for holding prefixing bindings and base URI information
"""
def __init__(self):
self.base: Optional[str] = None
self.namespace_manager = NamespaceManager(Graph()) # ns man needs a store
def resolvePName(self, prefix: Optional[str], localname: Optional[str]) -> URIRef:
ns = self.namespace_manager.store.namespace(prefix or "")
if ns is None:
raise Exception("Unknown namespace prefix : %s" % prefix)
return URIRef(ns + (localname or ""))
def bind(self, prefix: Optional[str], uri: Any) -> None:
self.namespace_manager.bind(prefix, uri, replace=True)
def absolutize(
self, iri: Optional[Union[CompValue, str]]
) -> Optional[Union[CompValue, str]]:
"""
Apply BASE / PREFIXes to URIs
(and to datatypes in Literals)
TODO: Move resolving URIs to pre-processing
"""
if isinstance(iri, CompValue):
if iri.name == "pname":
return self.resolvePName(iri.prefix, iri.localname)
if iri.name == "literal":
# type error: Argument "datatype" to "Literal" has incompatible type "Union[CompValue, Identifier, None]"; expected "Optional[str]"
return Literal(
iri.string, lang=iri.lang, datatype=self.absolutize(iri.datatype) # type: ignore[arg-type]
)
elif isinstance(iri, URIRef) and not ":" in iri:
return URIRef(iri, base=self.base)
return iri
class Query:
"""
A parsed and | |
"optional", "mandatory"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB1, "Gas detection status", "unsigned char", 1, "optional", "-", "mandatory"))
obj.add_property(Property(0xE0, "Measured value of gas concentration", "unsigned short", 2, "mandatory", "-", "-"))
__std_scsl_objects[(0x00, 0x1C)] = obj
# VOC sensor (0x001D)
obj = Object("VOC sensor", 0x00, 0x1D)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB1, "VOC detection status", "unsigned char", 1, "optional", "-", "mandatory"))
obj.add_property(Property(0xE0, "Measured value of VOC concentration", "unsigned short", 2, "mandatory", "-", "-"))
__std_scsl_objects[(0x00, 0x1D)] = obj
# Differential pressure sensor (0x001E)
obj = Object("Differential pressure sensor", 0x00, 0x1E)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xE0, "Measured value of differential pressure", "signed short", 2, "mandatory", "-", "-"))
__std_scsl_objects[(0x00, 0x1E)] = obj
# Air speed sensor (0x001F)
obj = Object("Air speed sensor", 0x00, 0x1F)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xE0, "Measured value of air speed", "unsigned short", 2, "mandatory", "-", "-"))
obj.add_property(Property(0xE1, "Air flow direction", "unsigned short", 2, "optional", "-", "-"))
__std_scsl_objects[(0x00, 0x1F)] = obj
# Odor sensor (0x0020)
obj = Object("Odor sensor", 0x00, 0x20)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB1, "Odor detection status", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xE0, "Measured odor value", "unsigned char", 1, "mandatory", "-", "-"))
__std_scsl_objects[(0x00, 0x20)] = obj
# Flame sensor (0x0021)
obj = Object("Flame sensor", 0x00, 0x21)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB1, "Flame detection status", "unsigned char", 1, "mandatory", "-", "mandatory"))
obj.add_property(Property(0xBF, "Flame detection status resetting", "unsigned char", 1, "-", "optional", "-"))
__std_scsl_objects[(0x00, 0x21)] = obj
# Electric energy sensor (0x0022)
obj = Object("Electric energy sensor", 0x00, 0x22)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xE0, "Integral electric energy", "unsigned long", 4, "mandatory", "-", "-"))
obj.add_property(Property(0xE1, "Medium-capacity sensor instantaneous electric energy", "signed long", 4, "optional", "-", "-"))
obj.add_property(Property(0xE2, "Small-capacity sensor instantaneous electric energy", "signed short", 2, "optional", "-", "-"))
obj.add_property(Property(0xE3, "Large-capacity sensor instantaneous electric energy", "signed short", 2, "optional", "-", "-"))
obj.add_property(Property(0xE4, "Integral electric energy measurement log", "unsigned long × 48", 192, "optional", "-", "-"))
obj.add_property(Property(0xE5, "Effective voltage value", "unsigned short", 2, "optional", "-", "-"))
__std_scsl_objects[(0x00, 0x22)] = obj
# Current value sensor (0x0023)
obj = Object("Current value sensor", 0x00, 0x23)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xE0, "Measured current value 1", "unsigned long", 4, "mandatory", "-", "-"))
obj.add_property(Property(0xE1, "Rated voltage to be measured", "unsigned short", 2, "optional", "-", "-"))
obj.add_property(Property(0xE2, "Measured current value 2", "unsigned long", 4, "mandatory", "-", "-"))
__std_scsl_objects[(0x00, 0x23)] = obj
# Water flow rate sensor (0x0025)
obj = Object("Water flow rate sensor", 0x00, 0x25)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xE0, "Integral flow rate", "unsigned long", 4, "optional", "-", "-"))
obj.add_property(Property(0xE2, "Flow rate", "unsigned long", 4, "mandatory", "-", "-"))
__std_scsl_objects[(0x00, 0x25)] = obj
# Micromotion sensor (0x0026)
obj = Object("Micromotion sensor", 0x00, 0x26)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB1, "Micromotion detection status", "unsigned char", 1, "mandatory", "-", "mandatory"))
obj.add_property(Property(0xB2, "Detection counter", "unsigned short", 2, "optional", "optional", "-"))
obj.add_property(Property(0xBC, "Sampling count", "unsigned short", 2, "optional", "optional", "-"))
obj.add_property(Property(0xBD, "Sampling cycle", "unsigned short", 2, "optional", "optional", "-"))
__std_scsl_objects[(0x00, 0x26)] = obj
# Passage sensor (0x0027)
obj = Object("Passage sensor", 0x00, 0x27)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xBE, "Passage detection hold time", "unsigned char", 2, "optional", "optional", "-"))
obj.add_property(Property(0xE0, "Passage detection direction", "unsigned char", 1, "mandatory", "-", "mandatory"))
__std_scsl_objects[(0x00, 0x27)] = obj
# Bed presence sensor (0x0028)
obj = Object("Bed presence sensor", 0x00, 0x28)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB1, "Bed presence detection status", "unsigned char", 1, "mandatory", "-", "mandatory"))
__std_scsl_objects[(0x00, 0x28)] = obj
# Open/close sensor (0x0029)
obj = Object("Open/close sensor", 0x00, 0x29)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xE0, "Degree-of-openi ng detection status 1", "unsigned char", 1, "mandatory", "-", "-"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB1, "Degree-of-openi ng detection status 2", "unsigned char", 1, "mandatory", "-", "mandatory"))
__std_scsl_objects[(0x00, 0x29)] = obj
# Activity amount sensor (0x002A)
obj = Object("Activity amount sensor", 0x00, 0x2A)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xE0, "Activity amount level 1", "unsigned char × max 128", 0, "mandatory", "-", "-"))
obj.add_property(Property(0xE1, "Maximum number of human body ID's", "unsigned short", 2, "optional", "-", "-"))
obj.add_property(Property(0xE2, "Activity amount level 2", "unsigned char", 1, "mandatory", "-", "-"))
obj.add_property(Property(0xE3, "Human body existence information", "unsigned char × 16", 16, "optional", "-", "-"))
__std_scsl_objects[(0x00, 0x2A)] = obj
# Human body location sensor (0x002B)
obj = Object("Human body location sensor", 0x00, 0x2B)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xE0, "Human body detection location 1", "unsigned char × 3 x max 128", 0, "mandatory", "-", "-"))
obj.add_property(Property(0xE1, "Maximum number of human body ID's", "unsigned short", 2, "optional", "-", "-"))
obj.add_property(Property(0xE2, "Human body detection location 2", "unsigned char × 3", 3, "mandatory", "-", "-"))
obj.add_property(Property(0xE3, "Human body existence information", "unsigned char × 16", 16, "optional", "-", "-"))
__std_scsl_objects[(0x00, 0x2B)] = obj
# Snow sensor (0x002C)
obj = Object("Snow sensor", 0x00, 0x2C)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "optional", "mandatory"))
obj.add_property(Property(0xB0, "Detection threshold level", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB1, "Snow detection status", "unsigned char", 1, "mandatory", "-", "mandatory"))
__std_scsl_objects[(0x00, 0x2C)] = obj
# Home air conditioner (0x0130)
obj = Object("Home air conditioner", 0x01, 0x30)
obj.add_property(Property(0x80, "Operation status", "unsigned char", 1, "mandatory", "mandatory", "mandatory"))
obj.add_property(Property(0xB0, "Operation mode setting", "unsigned char", 1, "mandatory", "mandatory", "mandatory"))
obj.add_property(Property(0xB1, "Automatic temperature control setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB2, "Normal/high- speed/silent operation setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB3, "Set temperature value", "unsigned char", 1, "mandatory", "mandatory", "-"))
obj.add_property(Property(0xB4, "Set value of relative humidity in dehumidifying mode", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB5, "Set temperature value in cooling mode", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB6, "Set temperature value in heating mode", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB7, "Set temperature value in dehumidifying mode", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xB8, "Rated power consumption", "unsigned short × 4", 8, "optional", "-", "-"))
obj.add_property(Property(0xB9, "Measured value of current consumption", "unsigned short", 2, "optional", "-", "-"))
obj.add_property(Property(0xBA, "Measured value of room relative humidity", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xBB, "Measured value of room temperature", "signed char", 1, "optional", "-", "-"))
obj.add_property(Property(0xBC, "Set temperature value of user remote control", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xBD, "Measured cooled air temperature", "signed char", 1, "optional", "-", "-"))
obj.add_property(Property(0xBE, "Measured outdoor air temperature", "signed char", 1, "optional", "-", "-"))
obj.add_property(Property(0xBF, "Relative temperature setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xA0, "Air flow rate setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xA1, "Automatic control of air flow direction setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xA3, "Automatic swing of air flow setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xA4, "Air flow direction (vertical) setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xA5, "Air flow direction (horizontal) setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xAA, "Special state", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xAB, "Non-priority state", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xC0, "Ventilation function setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xC1, "Humidifier function setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xC2, "Ventilation air flow rate setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xC4, "Degree of humidification setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xC6, "Mounted air cleaning method", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xC7, "Air purifier function setting", "unsigned char ×8", 8, "optional", "optional", "-"))
obj.add_property(Property(0xC8, "Mounted air refresh method", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xC9, "Air refresher function setting", "unsigned char × 8", 8, "optional", "optional", "-"))
obj.add_property(Property(0xCA, "Mounted self-cleaning method", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xCB, "Self-cleaning function setting", "unsigned char × 8", 8, "optional", "optional", "-"))
obj.add_property(Property(0xCC, "Special function setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0xCD, "Operation status of components", "unsigned char", 1, "optional", "-", "-"))
obj.add_property(Property(0xCE, "Thermostat setting override function", "unsigned char", 1, "-", "optional", "-"))
obj.add_property(Property(0xCF, "Air purification mode setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0x90, "ON timer-based reservation setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0x91, "ON timer setting (time)", "unsigned char × 2", 2, "optional", "optional", "-"))
obj.add_property(Property(0x92, "ON timer setting (relative time)", "unsigned char × 2", 2, "optional", "optional", "-"))
obj.add_property(Property(0x94, "OFF timer-based reservation setting", "unsigned char", 1, "optional", "optional", "-"))
obj.add_property(Property(0x95, "OFF timer setting (time)", "unsigned char × 2", 2, "optional", "optional", "-"))
obj.add_property(Property(0x96, "OFF timer setting (relative time)", "unsigned char × 2", 2, "optional", "optional", | |
<reponame>illicitonion/buck
from __future__ import with_statement
import __builtin__
import __future__
import contextlib
from collections import namedtuple
from pathlib import _Accessor, Path, PureWindowsPath, PurePath, PosixPath
from pywatchman import bser
from contextlib import contextmanager, nested
import copy
import StringIO
import cProfile
import functools
import hashlib
import imp
import inspect
import itertools
import json
import optparse
import os
import os.path
import pstats
import re
import subprocess
import sys
import traceback
import types
try:
# Python 2.6, 2.7, use iterator filter from Python 3
from future_builtins import filter
except ImportError:
# use standard filter (Python 3, Python < 2.6)
pass
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
BUILD_FUNCTIONS = []
# Wait this many seconds on recv() or send() in the pywatchman client
# if not otherwise specified in .buckconfig
DEFAULT_WATCHMAN_QUERY_TIMEOUT = 5.0
ORIGINAL_IMPORT = __builtin__.__import__
class SyncCookieState(object):
"""
Process-wide state used to enable Watchman sync cookies only on
the first query issued.
"""
def __init__(self):
self.use_sync_cookies = True
class BuildContextType(object):
"""
Identifies the type of input file to the processor.
"""
BUILD_FILE = 'build_file'
INCLUDE = 'include'
class BuildFileContext(object):
"""
The build context used when processing a build file.
"""
type = BuildContextType.BUILD_FILE
def __init__(self, project_root, base_path, dirname, autodeps, allow_empty_globs, ignore_paths,
watchman_client, watchman_watch_root, watchman_project_prefix,
sync_cookie_state, watchman_error, watchman_glob_stat_results,
watchman_use_glob_generator, use_mercurial_glob):
self.globals = {}
self.includes = set()
self.used_configs = {}
self.used_env_vars = {}
self.project_root = project_root
self.base_path = base_path
self.dirname = dirname
self.autodeps = autodeps
self.allow_empty_globs = allow_empty_globs
self.ignore_paths = ignore_paths
self.watchman_client = watchman_client
self.watchman_watch_root = watchman_watch_root
self.watchman_project_prefix = watchman_project_prefix
self.sync_cookie_state = sync_cookie_state
self.watchman_error = watchman_error
self.watchman_glob_stat_results = watchman_glob_stat_results
self.watchman_use_glob_generator = watchman_use_glob_generator
self.use_mercurial_glob = use_mercurial_glob
self.diagnostics = set()
self.rules = {}
class IncludeContext(object):
"""
The build context used when processing an include.
"""
type = BuildContextType.INCLUDE
def __init__(self):
self.globals = {}
self.includes = set()
self.used_configs = {}
self.used_env_vars = {}
self.diagnostics = set()
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({'build_env': self.build_env})
return self.func(*args, **updated_kwargs)
Diagnostic = namedtuple('Diagnostic', ['message', 'level', 'source'])
def provide_for_build(func):
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `{}()` at the top-level of an included file."
.format(rule['buck.type']))
# Include the base path of the BUCK file so the reader consuming this
# output will know which BUCK file the rule came from.
if 'name' not in rule:
raise ValueError(
'rules must contain the field \'name\'. Found %s.' % rule)
rule_name = rule['name']
if not isinstance(rule_name, basestring):
raise ValueError(
'rules \'name\' field must be a string. Found %s.' % rule_name)
if rule_name in build_env.rules:
raise ValueError('Duplicate rule definition found. Found %s and %s' %
(rule, build_env.rules[rule_name]))
rule['buck.base_path'] = build_env.base_path
# It is possible that the user changed the rule from autodeps=True to autodeps=False
# without re-running `buck autodeps` (this is common when resolving merge conflicts).
# When this happens, the deps in BUCK.autodeps should be ignored because autodeps is
# set to False.
if rule_name in build_env.autodeps:
if rule.get('autodeps', False):
# TODO(bolinfest): One major edge case that exists right now when using a set to de-dupe
# elements is that the same target may be referenced in two different ways:
# 1. As a fully-qualified target: //src/com/facebook/buck/android:packageable
# 2. As a local target: :packageable
# Because of this, we may end up with two entries for the same target even though we
# are trying to use a set to remove duplicates.
# Combine all of the deps into a set to eliminate duplicates. Although we would prefer
# it if each dep were exclusively in BUCK or BUCK.autodeps, that is not always
# possible. For example, if a user-defined macro creates a library that hardcodes a dep
# and the tooling to produce BUCK.autodeps also infers the need for that dep and adds
# it to BUCK.autodeps, then it will appear in both places.
auto_deps = build_env.autodeps[rule_name].get('deps', None)
if auto_deps:
explicit_deps = rule.get('deps', [])
deps = set(explicit_deps)
deps.update(auto_deps)
rule['deps'] = list(deps)
auto_exported_deps = build_env.autodeps[rule_name].get('exported_deps', None)
if auto_exported_deps:
explicit_exported_deps = rule.get('exportedDeps', [])
exported_deps = set(explicit_exported_deps)
exported_deps.update(auto_exported_deps)
rule['exportedDeps'] = list(exported_deps)
else:
# If there is an entry in the .autodeps file for the rule, but the rule has autodeps
# set to False, then the .autodeps file is likely out of date. Ideally, we would warn
# the user to re-run `buck autodeps` in this scenario. Unfortunately, we do not have
# a mechanism to relay warnings from buck.py at the time of this writing.
pass
build_env.rules[rule_name] = rule
def memoized(deepcopy=True, keyfunc=None):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
Makes a defensive copy of the cached value each time it's returned,
so callers mutating the result do not poison the cache, unless
deepcopy is set to False.
'''
def decorator(func):
cache = {}
@functools.wraps(func)
def wrapped(*args, **kwargs):
# poor-man's cache key; the keyword args are sorted to avoid dictionary ordering
# issues (insertion and deletion order matters). Nested dictionaries will still cause
# cache misses.
if keyfunc is None:
cache_key = repr(args) + repr(sorted(kwargs.items()))
else:
cache_key = keyfunc(*args, **kwargs)
_sentinel = object()
value = cache.get(cache_key, _sentinel)
if value is _sentinel:
value = func(*args, **kwargs)
cache[cache_key] = value
# Return a copy to ensure callers mutating the result don't poison the cache.
if deepcopy:
value = copy.deepcopy(value)
return value
wrapped._cache = cache
return wrapped
return decorator
def glob(includes, excludes=None, include_dotfiles=False, build_env=None, search_base=None,
allow_safe_import=None):
if excludes is None:
excludes = []
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `glob()` at the top-level of an included file.")
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(includes, basestring), \
"The first argument to glob() must be a list of strings."
assert not isinstance(excludes, basestring), \
"The excludes argument must be a list of strings."
if search_base is None:
search_base = Path(build_env.dirname)
mercurial_repo_info = load_mercurial_repo_info(build_env, search_base, allow_safe_import)
results = None
if not includes:
results = []
elif mercurial_repo_info is not None:
results = glob_mercurial_manifest(
includes, excludes, build_env.ignore_paths, include_dotfiles, search_base,
build_env.project_root, mercurial_repo_info)
elif build_env.watchman_client:
try:
results = glob_watchman(
includes,
excludes,
include_dotfiles,
build_env.base_path,
build_env.watchman_watch_root,
build_env.watchman_project_prefix,
build_env.sync_cookie_state,
build_env.watchman_client,
build_env.diagnostics,
build_env.watchman_glob_stat_results,
build_env.watchman_use_glob_generator)
except build_env.watchman_error as e:
build_env.diagnostics.add(
Diagnostic(
message=str(e),
level='error',
source='watchman'))
try:
build_env.watchman_client.close()
except:
pass
build_env.watchman_client = None
if results is None:
results = glob_internal(
includes,
excludes,
build_env.ignore_paths,
include_dotfiles,
search_base,
build_env.project_root)
assert build_env.allow_empty_globs or results, (
"glob(includes={includes}, excludes={excludes}, include_dotfiles={include_dotfiles}) " +
"returned no results. (allow_empty_globs is set to false in the Buck " +
"configuration)").format(
includes=includes,
excludes=excludes,
include_dotfiles=include_dotfiles)
return results
def merge_maps(*header_maps):
result = {}
for header_map in header_maps:
for key in header_map:
if key in result and result[key] != header_map[key]:
assert False, 'Conflicting header files in header search paths. ' + \
'"%s" maps to both "%s" and "%s".' \
% (key, result[key], header_map[key])
result[key] = header_map[key]
return result
def single_subdir_glob(dirpath, glob_pattern, excludes=None, prefix=None, build_env=None,
search_base=None, allow_safe_import=None):
if excludes is None:
excludes = []
results = {}
files = glob([os.path.join(dirpath, glob_pattern)],
excludes=excludes,
build_env=build_env,
search_base=search_base,
allow_safe_import=allow_safe_import)
for f in files:
if dirpath:
key = f[len(dirpath) + 1:]
else:
key = f
if prefix:
# `f` is a string, but we need to create correct platform-specific Path.
# This method is called by | |
-
"""
self.__merge__(source, True)
def empty_copy(self):
"""
Performs copy of instance of Yang
:param: -
:return: instance copy (of Yang)
"""
return self.__class__(self._tag)
def full_copy(self):
"""
Performs deepcopy of instance of Yang
:param: -
:return: instance copy (of Yang)
"""
return copy.deepcopy(self)
def delete(self): # FIXME: if referred by a LeafRef?
"""
Remove element when ListYang and set to None when Leaf
:param: -
:return: -
"""
if self.get_parent() is not None:
if isinstance(self, ListedYang):
self.get_parent().remove(self)
else:
self.get_parent().__dict__[self.get_tag()] = None # FIXME: tag is not necessarily Python naming conform!
def set_referred(self, leaf_ref):
"""
Append in referred names of leafs referred (children of) by instance of Yang
:param leaf_ref: LeafRef
:return: -
"""
if leaf_ref not in self._referred:
self._referred.append(leaf_ref)
def unset_referred(self, leaf_ref):
"""
Append in referred names of leafs referred (children of) by instance of Yang
:param leaf_ref: LeafRef
:return: -
"""
if leaf_ref in self._referred:
self._referred.remove(leaf_ref)
def bind(self, relative=False):
"""
Binds all elements of self attributes
:param: relative: Boolean
:return: -
"""
if len(self._sorted_children) > 0:
for c in self._sorted_children:
if self.__dict__[c] is not None:
self.__dict__[c].bind(relative)
return
def _parse(self, parent, root):
"""
Abstract method to create classes from XML string
:param parent: Yang
:param root: ElementTree
:return: -
"""
for key, item in self.__dict__.items():
if key is not "_parent":
if isinstance(item, Leaf):
item.parse(root)
elif isinstance(item, ListYang):
object_ = root.find(key)
itemClass = item.get_type()
while object_ is not None:
itemparsed = itemClass.parse(self, object_)
if "operation" in object_.attrib.keys():
itemparsed.set_operation(object_.attrib["operation"])
# itemparsed.set_operation(object_.attrib["operation"])
self.__dict__[key].add(itemparsed)
root.remove(object_)
object_ = root.find(key)
elif isinstance(item, Yang):
object_ = root.find(key)
if object_ is not None:
item._parse(self, object_)
if "operation" in object_.attrib.keys():
self.set_operation(object_.attrib["operation"])
# self.set_operation(object_.attrib["operation"])
def diff(self, target):
"""
Method to return an independent changeset between target and the instance (neither the instance nor the target is modified).
:param target: Yang
:return: Yang
"""
diff = copy.deepcopy(target)
diff.reduce(self)
return diff
class Leaf(Yang):
"""
Class defining Leaf basis with attributes and methods
"""
def __init__(self, tag, parent=None):
super(Leaf, self).__init__(tag, parent)
self.data = None
""":type: ???"""
self.mandatory = False
""":type: boolean"""
self.units = ""
""":type: string"""
def get_as_text(self):
"""
Abstract method to get data as text
"""
pass
def get_value(self):
"""
Abstract method to get data value
"""
pass
def set_value(self, value):
"""
Abstract method to set data value
"""
pass
def get_units(self):
"""
Return self.units
:return: string
"""
return self.units
def set_units(self, units):
"""
Set self.units
:param units:
:return: -
"""
self.units = units
def get_mandatory(self):
"""
Return self.mandatory
:return: string
"""
return self.mandatory
def set_mandatory(self, mandatory):
"""
Set self.mandatory
:param mandatory:
:return: -
"""
self.mandatory = mandatory
def is_initialized(self):
"""
Overides Yang method to check if data contains value
:param: -
:return: boolean
"""
if self.data is not None:
return True
return False
def _et(self, parent, inherited=False, ordered=True):
"""
Overides Yang method return parent with subelement as leaf tag and data as text if it is initialized
:param parent: ElementTree
:return: Element of ElementTree
"""
if self.is_initialized():
if type(self.data) is ET.Element:
parent.append(self.data)
else:
e_data = ET.SubElement(parent, self.get_tag())
e_data.text = self.get_as_text()+self.get_units()
return parent
def clear_data(self):
"""
Erases data defining it as None
:param: -
:return: -
"""
self.data = None
def delete(self):
"""
Erases data defining it as None
:param: -
:return: -
"""
self.data = None
def reduce(self, reference):
"""
Overides Yang method to reduce other, return True if its data if different from self.data or _operation attributes mismatch
:param reference: instance of Yang
:return: boolean
"""
if isinstance(self.data, ET.Element):
if (ET.tostring(self.data) != ET.tostring(reference.data)) \
or (self.get_operation() != reference.get_operation()) \
or self.contains_operation("delete"):
return False
else:
if (self.data != reference.data) \
or (self.get_operation() != reference.get_operation()) \
or self.contains_operation("delete"):
return False
return True
def __eq__(self, other):
"""
Check if other leaf has the same attributes and values, returns True if yes
:param other: instance
:return: boolean
"""
eq = True
for k, v in self.__dict__.items():
if k is not "_parent":
eq = eq and (hasattr(other, k)) and (v == other.__dict__[k])
return eq
def patch(self, candidate):
# not sure if all attributes have to be overwritten, or just self.data
for k, v in self.__dict__.items():
if k is not "_parent":
for k_, v_ in candidate.__dict__.items():
if k == k_:
self.__dict__[k] = candidate.__dict__[k]
break
class StringLeaf(Leaf):
"""
Class defining Leaf with string extensions
"""
def __init__(self, tag, parent=None, value=None, units="", mandatory=False): # FIXME: why having units for StringLeaf?
super(StringLeaf, self).__init__(tag, parent=parent)
self.set_value(value)
""":type: string"""
self.set_units(units)
""":type: string"""
self.set_mandatory(mandatory) # FIXME: Mandatory should be handled in the Leaf class!
""":type: boolean"""
def parse(self, root):
"""
Abstract method to create instance class StringLeaf from XML string
:param root: ElementTree
:return: -
"""
e_data = root.find(self.get_tag())
if e_data is not None:
if len(e_data._children) > 0:
for i in e_data.iter():
i.tail = None
e_data.text = None
self.data = e_data
else:
self.set_value(e_data.text)
root.remove(e_data)
def get_as_text(self):
"""
Returns data value as text
:param: -
:return: string
"""
if type(self.data) == ET:
return ET.tostring(self.data, encoding="us-ascii", method="text")
return self.data
def get_value(self):
"""
Returns data value
:param: -
:return: string
"""
return self.data
def set_value(self, value):
"""
Sets data value
:param value: string
:return: -
"""
self.data = value
class IntLeaf(Leaf):
"""
Class defining Leaf with integer extensions (e.g., range)
"""
def __init__(self, tag, parent=None, value=None, int_range=[], units="", mandatory=False):
super(IntLeaf, self).__init__(tag, parent=parent)
self.int_range = int_range
self.data = None
""":type: int"""
if value is not None:
self.set_value(value)
self.set_units(units)
""":type: string"""
self.set_mandatory(mandatory)
""":type: boolean"""
def parse(self, root):
"""
Creates instance IntLeaf setting its value from XML string
:param root: ElementTree
:return: -
"""
def check_int(s):
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
e_data = root.find(self.get_tag())
if e_data is not None:
if len(e_data._children) > 0:
for i in e_data.iter():
i.tail = None
e_data.text = None
self.data = e_data # ?? don't know if need to replace as others
else:
if self.units != "":
for c in range(0, len(e_data.text)):
v = len(e_data.text)-c
st = e_data.text[:v]
if check_int(st):
self.set_value(st)
self.set_units(e_data.text[v:len(e_data.text)])
break
else:
self.set_value(e_data.text)
root.remove(e_data)
self.initialized = True
def get_as_text(self):
"""
Returns data value as text
:param: -
:return: string
"""
if type(self.data) == ET:
return ET.tostring(self.data, encoding="us-ascii", method="text")
return str(self.data)
def get_value(self):
"""
Returns data value
:param: -
:return: int
"""
return self.data
def set_value(self, value):
"""
Sets data value as int
:param value: int
:return: -
"""
if type(value) is not int:
try:
value = int(value)
except TypeError:
print "Cannot cast to integer!"
if self.check_range(value):
self.data = value
else:
print "Out of range!"
def check_range(self, value):
"""
Check if value is inside range limits
:param value: int
:return: boolean
"""
for i in self.int_range:
if type(i) is tuple:
if value in range(i[0], i[1]):
return True
else:
if value == i:
return True
return False
class Decimal64Leaf(Leaf):
"""
Class defining Leaf with decimal extensions (e.g., dec_range)
"""
def __init__(self, tag, parent=None, value=None, dec_range=[], fraction_digits=1, units="", mandatory=False):
super(Decimal64Leaf, self).__init__(tag, parent=parent)
self.dec_range = dec_range
self.fraction_digits = fraction_digits
self.data = None
""":type: Decimal"""
if value is not None:
self.set_value(value)
self.set_units(units)
""":type: string"""
self.set_mandatory(mandatory)
""":type: boolean"""
def parse(self, root):
"""
Abstract method to instance class Decimal64Leaf from XML string
:param root: ElementTree
:return: -
"""
e_data = root.find(self.get_tag())
if e_data is not None:
if len(e_data._children) > 0:
for i in e_data.iter():
i.tail = None
e_data.text = None
self.data = e_data # ?? don't know if need to replace as others
else:
self.set_value(e_data.text)
root.remove(e_data)
self.initialized = True
def get_as_text(self):
"""
Returns data value as text
:param: -
:return: string
"""
if type(self.data) == ET:
return ET.tostring(self.data, encoding="us-ascii", method="text")
return str(self.data)
def get_value(self):
"""
Returns data value
:param: -
:return: decimal
"""
return self.data
def set_value(self, value):
"""
Sets data value as decimal
:param value: decimal
:return: -
"""
if type(value) is not Decimal:
try:
value = Decimal(value)
except TypeError:
print "Cannot cast to Decimal!"
if self.check_range(value):
self.data = value
else:
print "Out of range!"
def check_range(self, value):
"""
Check if value is inside range limits
:param value: decimal
:return: boolean
"""
for i in self.dec_range:
if type(i) is | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import commands
import logging
import sys
from libs import config_libs
from libs import utils_libs
from libs import verify_libs
def main():
# Run the Testcases:
test = test_gbp_nsp_func()
if test.test_gbp_nsp_func_1() == 0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_1')
if test.test_gbp_nsp_func_2() == 0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_2')
if test.test_gbp_nsp_func_3() == 0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_3')
test.cleanup()
utils_libs.report_results('test_gbp_nsp_func', 'test_results.txt')
sys.exit(1)
class test_gbp_nsp_func(object):
# Initialize logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s - %(message)s',
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_nsp_func.log'
commands.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_nsp_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_log.addHandler(hdlr)
_log.setLevel(logging.INFO)
_log.setLevel(logging.DEBUG)
def __init__(self):
"""
Init def
"""
self._log.info(
"\n## START OF GBP NETWORK_SERVICE_POLICY FUNCTIONALITY "
"TESTSUITE\n")
self.gbpcfg = config_libs.Gbp_Config()
self.gbpverify = verify_libs.Gbp_Verify()
self.nsp_name = 'demo_nsp'
def cleanup(self, tc_name=''):
if tc_name != '':
self._log.info('%s: FAILED' % (tc_name))
for obj in ['group', 'nsp']:
self.gbpcfg.gbp_del_all_anyobj(obj)
def test_gbp_nsp_func_1(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_NSP_FUNC_1: TO CREATE/REFER/DELETE/VERIFY "
"NTK-SVC-POLICY in PTG\n"
"TEST_STEPS::\n"
"Create two NSPs one with type:ip-pool & ip-single, "
"value:self_subnet and self_subnet\n"
"Verify the attributes & values\n"
"Create two PTGs and reference each one of the above "
"NSP in one of the PTG\n"
"Verify the NSP reference in the PTGs\n"
"Delete the PTG and the NSP\n"
"Verify that NSP got deleted\n"
"##############################################################\n")
# Testcase work-flow starts
# Create and Verify NSPolicy with type=ip_single & ip-single,
# name:self_subnet & self_subnet
self._log.info(
'\n## Step 1: Create NSPolicy with type=ip_single & '
'name:self_subnet ##\n')
nsp1_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_1',
network_service_params="type=ip_single,name=vip_ip1,"
"value=self_subnet")
if nsp1_uuid == 0:
self._log.info(
"\n## Step 1A: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
nsp2_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_2',
network_service_params="type=ip_single,name=vip_ip2,"
"value=self_subnet")
if nsp2_uuid == 0:
self._log.info(
"\n## Step 1B: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
# Verify
self._log.info(
"\n## Step 2: Verify NSPolicies are successfully created")
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'nsp',
nsp1_uuid,
name='demo_nsp_1',
network_service_params='{"type": "ip_single", "name": '
'"vip_ip1", "value": '
'"self_subnet"}') == 0:
self._log.info(
"\n## Step 2A: Verify NSPolicy demo_nsp_1 with valued "
"attributes, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'nsp',
nsp2_uuid,
name='demo_nsp_2',
network_service_params='{"type": "ip_single", '
'"name": "vip_ip2", "value": '
'"self_subnet"}') == 0:
self._log.info(
"\n## Step 2A: Verify NSPolicy demo_nsp_2 with "
"valued attributes, Failed")
return 0
# Create two PTGs, each referencing one of the two NSPs
self._log.info(
"\n## Step 3: Create and Verify two PTGs each "
"referencing one of the two NSPs")
uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'group', 'demo_ptg_1', network_service_policy=nsp1_uuid)
if uuid == 0:
self._log.info(
"\n## Step 3A: Create PTG using NSP demo_nsp_1,Failed")
return 0
else:
ptg1_uuid = uuid[0]
_uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'group', 'demo_ptg_2', network_service_policy=nsp2_uuid)
if _uuid == 0:
self._log.info(
"\n## Step 3B: Create PTG using NSP demo_nsp_2,Failed")
return 0
else:
ptg2_uuid = _uuid[0]
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 3C: Verify PTG demo_ptg_1 seen in NSP "
"demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp2_uuid, policy_target_groups=ptg2_uuid) == 0:
self._log.info(
"\n## Step 3C: Verify PTG demo_ptg_2 seen in NSP "
"demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp1_uuid) == 0:
self._log.info(
"\n## Step 3D: Verify PTG demo_ptg_1 references NSP "
"demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg2_uuid,
network_service_policy_id=nsp2_uuid) == 0:
self._log.info(
"\n## Step 3D: Verify PTG demo_ptg_2 references NSP "
"demo_nsp_2, Failed")
return 0
# Delete PTGs & NSPs
self._log.info(
"\n## Step 4: Delete and Verify two PTGs each referencing "
"one of the two NSPs")
ptg_list = [ptg1_uuid, ptg2_uuid]
nsp_list = [nsp1_uuid, nsp2_uuid]
for i in range(len(ptg_list)):
if self.gbpcfg.gbp_policy_cfg_all(0, 'group', ptg_list[i]) == 0:
self._log.info(
"\n## Step 4A: Deletion of PTG %s, Failed" %
(ptg_list[i]))
return 0
if self.gbpcfg.gbp_policy_cfg_all(0, 'nsp', nsp_list[i]) == 0:
self._log.info(
"\n## Step 4B: Deletion of NSP %s, Failed" %
(nsp_list[i]))
return 0
# Verify
for n in range(len(nsp_list)):
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp_list[n]) != 0:
self._log.info("\n## Step 4C: Verify deletion of NSP, Failed")
return 0
self._log.info("\n## TESTCASE_GBP_NSP_FUNC_1: PASSED")
return 1
def test_gbp_nsp_func_2(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_NSP_FUNC_2: TO CREATE/UPDATE/DELETE/VERIFY a PTG "
"with NTK-SVC-POLICY with MULTIPLE PTGs\n"
"TEST_STEPS::\n"
"Create two NSPolicy Object with non-default params\n"
"Create PTG using one of the two NSPs\n"
"Verify the PTG and NSP are reflecting in each other in the DB\n"
"Update the PTG to use the second NSP\n"
"Verify the PTG and NSP are reflecting in each other in the DB\n"
"Update/Revert the PTG so that it refers to the initial NSP\n"
"Delete all PTG, NSP\n"
"Verify that PTG and NSPs got deleted\n"
"##############################################################\n")
# Testcase work-flow starts
# Create NSPolicy with non-default attrs
self._log.info('\n## Step 1: Create two NSPolicy ##\n')
nsp1_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_1',
network_service_params="type=ip_single,name=vip_ip1,"
"value=self_subnet")
if nsp1_uuid == 0:
self._log.info(
"\n## Step 1A: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
nsp2_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_2',
network_service_params="type=ip_single,name=vip_ip2,"
"value=self_subnet")
if nsp2_uuid == 0:
self._log.info(
"\n## Step 1B: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
# Create PTG, referencing one of the two NSPs
self._log.info(
"\n## Step 3: Create and Verify PTG referencing one of "
"the two NSPs")
uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'group', 'demo_ptg_1', network_service_policy=nsp1_uuid)
if uuid == 0:
self._log.info(
"\n## Step 3A: Create PTG using NSP demo_nsp_1,Failed")
return 0
else:
ptg1_uuid = uuid[0]
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 3B: Verify PTG demo_ptg_1 seen in NSP "
"demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp1_uuid) == 0:
self._log.info(
"\n## Step 3C: Verify PTG demo_ptg_1 references "
"NSP demo_nsp_1, Failed")
return 0
self._log.info(
"\n## Step 4: Update and Verify the PTG with the second NSP")
# Update the PTG with second NSP and Verify
if self.gbpcfg.gbp_policy_cfg_all(
2, 'group', ptg1_uuid, network_service_policy=nsp2_uuid) == 0:
self._log.info(
"\n## Step 4A: Updating NSP attribute of PTG, Failed")
return 0
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) != 0:
self._log.info(
"\n## Step 4B: Verify PTG demo_ptg_1 is NOT seen "
"in NSP demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp2_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 4C: Verify PTG demo_ptg_1 is seen in NSP "
"demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp2_uuid) == 0:
self._log.info(
"\n## Step 4D: Verify PTG demo_ptg_1 references NSP "
"demo_nsp_2, Failed")
return 0
self._log.info(
"\n## Step 5: Update/Revert the NSP attr of PTG and Verify")
# Update the PTG by reverting the NSP to its initial one
if self.gbpcfg.gbp_policy_cfg_all(
2, 'group', ptg1_uuid, network_service_policy=nsp1_uuid) == 0:
self._log.info(
"\n## Step 5A: Reverting the NSP attribute of PTG by "
"update action, Failed")
return 0
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp2_uuid, policy_target_groups=ptg1_uuid) != 0:
self._log.info(
"\n## Step 5B: Verify PTG demo_ptg_1 is NOT seen in NSP "
"demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 5C: Verify PTG demo_ptg_1 is seen in NSP "
"demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp1_uuid) == 0:
self._log.info(
"\n## Step 5D: Verify PTG demo_ptg_1 references NSP "
"demo_nsp_1, Failed")
return 0
self._log.info(
"\n## Step 6: Delete and Verify two PTGs each referencing "
"one of the two NSPs")
# Delete PTG & NSP
if self.gbpcfg.gbp_policy_cfg_all(0, 'group', ptg1_uuid) == 0:
self._log.info("\n## Step 6A: Deletion of PTG,Failed")
return 0
nsp_list = [nsp1_uuid, nsp2_uuid]
for i in range(len(nsp_list)):
if self.gbpcfg.gbp_policy_cfg_all(0, 'nsp', nsp_list[i]) == 0:
self._log.info(
"\n## Step 6B: Deletion of NSP %s, Failed" %
(nsp_list[i]))
return 0
# Verify
for n in range(len(nsp_list)):
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp_list[n]) != 0:
self._log.info("\n## Step 6C: Verify deletion of NSP, Failed")
return 0
self._log.info("\n## TESTCASE_GBP_NSP_FUNC_2: PASSED")
return 1
def test_gbp_nsp_func_3(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_NSP_FUNC_3: TO CREATE/DELETE/VERIFY "
"NTK-SVC-POLICY while REFERENCED IN PTG\n"
"TEST_STEPS::\n"
"Create | |
"monistic",
"monition",
"monitive",
"monitory",
"monkeyed",
"monkhood",
"monoacid",
"monocarp",
"monocled",
"monocles",
"monocots",
"monocrat",
"monodies",
"monodist",
"monofils",
"monofuel",
"monogeny",
"monogerm",
"monoglot",
"monogyny",
"monohull",
"monokine",
"monologs",
"monology",
"monopode",
"monopody",
"monosome",
"monosomy",
"monotint",
"monsoons",
"monstera",
"montaged",
"montanes",
"monteros",
"monurons",
"moochers",
"mooching",
"moodiest",
"moonbows",
"mooncalf",
"moondust",
"mooneyes",
"moonfish",
"mooniest",
"moonless",
"moonlets",
"moonlike",
"moonport",
"moonsail",
"moonseed",
"moonsets",
"moonshot",
"moonward",
"moonwort",
"moorages",
"moorfowl",
"moorhens",
"mooriest",
"moorwort",
"mootness",
"mopboard",
"moperies",
"mopiness",
"mopingly",
"mopishly",
"moquette",
"morainal",
"moraines",
"morainic",
"moralise",
"moralism",
"moralist",
"moralize",
"morasses",
"moratory",
"morbific",
"morbilli",
"morceaux",
"mordancy",
"mordants",
"mordents",
"morelles",
"morellos",
"moreness",
"moresque",
"moroccos",
"moronism",
"moronity",
"morosely",
"morosity",
"morphias",
"morphins",
"morrions",
"morrises",
"morseled",
"mortared",
"morticed",
"mortices",
"mortised",
"mortiser",
"mortises",
"mortmain",
"mosasaur",
"moschate",
"moseying",
"moshavim",
"moshings",
"mossback",
"mossiest",
"mosslike",
"mostests",
"mothball",
"mothered",
"mothiest",
"mothlike",
"motional",
"motioner",
"motiving",
"motivity",
"motleyer",
"motliest",
"motorbus",
"motorcar",
"motordom",
"motorise",
"motorize",
"motorman",
"motormen",
"mottlers",
"mottling",
"mouching",
"mouchoir",
"moufflon",
"mouflons",
"moulages",
"mouldier",
"moulters",
"moulting",
"mounding",
"mounters",
"mousakas",
"mousiest",
"mousings",
"moussaka",
"moussing",
"mouthers",
"mouthier",
"mouthily",
"movables",
"moveably",
"moveless",
"moviedom",
"movieola",
"movingly",
"moviolas",
"mozettas",
"mozzetta",
"mozzette",
"mridanga",
"muchacho",
"muchness",
"mucidity",
"mucilage",
"mucinoid",
"mucinous",
"muckiest",
"muckluck",
"muckrake",
"muckworm",
"mucoidal",
"mucosity",
"mucrones",
"muddiest",
"muddlers",
"muddling",
"muddying",
"mudflaps",
"mudflats",
"mudflows",
"mudguard",
"mudholes",
"mudlarks",
"mudpacks",
"mudpuppy",
"mudrocks",
"mudrooms",
"mudsills",
"mudslide",
"mueddins",
"muezzins",
"muffling",
"muggiest",
"muggings",
"mugworts",
"mugwumps",
"mulattos",
"mulcting",
"muleteer",
"mulishly",
"mulleins",
"mullions",
"mullites",
"mullocks",
"mullocky",
"multicar",
"multiday",
"multifid",
"multijet",
"multiped",
"multiton",
"multiuse",
"multures",
"mumblers",
"mummying",
"mundungo",
"mungoose",
"muniment",
"munnions",
"muntings",
"muntjacs",
"muntjaks",
"muoniums",
"muraenid",
"muralist",
"muralled",
"murderee",
"muriated",
"muriates",
"muricate",
"murkiest",
"murmurer",
"murphies",
"murrains",
"murrelet",
"murrhine",
"murthers",
"muscadel",
"muscadet",
"muscatel",
"muscling",
"musettes",
"mushiest",
"musicked",
"musingly",
"musketry",
"muskiest",
"muskoxen",
"muskrats",
"muskroot",
"muspikes",
"musquash",
"mussiest",
"mustardy",
"mustelid",
"mustiest",
"mutative",
"mutchkin",
"muteness",
"muticous",
"mutilate",
"mutineer",
"mutinied",
"mutinies",
"mutining",
"mutinous",
"mutterer",
"muzziest",
"muzzlers",
"muzzling",
"myalgias",
"mycelial",
"mycelian",
"mycelium",
"myceloid",
"mycetoma",
"myelines",
"myelinic",
"myelitis",
"myelomas",
"mylonite",
"mynheers",
"myoblast",
"myogenic",
"myograph",
"myologic",
"myoscope",
"myositis",
"myosotes",
"myosotis",
"myotomes",
"myotonia",
"myotonic",
"myriapod",
"myriopod",
"myrmidon",
"mystagog",
"mysticly",
"mythiest",
"myxameba",
"myxedema",
"myxocyte",
"myxomata",
"nabobery",
"nabobess",
"nabobish",
"nabobism",
"nacelles",
"nacreous",
"naething",
"naggiest",
"nailfold",
"nailhead",
"nailsets",
"nainsook",
"naivetes",
"nakedest",
"nameable",
"nametags",
"nandinas",
"nankeens",
"nannyish",
"nanogram",
"nanowatt",
"napalmed",
"naperies",
"naphthas",
"naphthol",
"naphthyl",
"naphtols",
"napiform",
"nappiest",
"narceine",
"narceins",
"narcisms",
"narcissi",
"narcists",
"narcomas",
"narcoses",
"narcosis",
"narghile",
"nargileh",
"nargiles",
"narrater",
"narwhale",
"narwhals",
"nasalise",
"nasalism",
"nasality",
"nasalize",
"nascence",
"nascency",
"natality",
"natantly",
"natation",
"natatory",
"nathless",
"nativism",
"nativist",
"natriums",
"nattered",
"nattiest",
"naumachy",
"nauplial",
"nauplius",
"nauseant",
"nauseate",
"nautches",
"navettes",
"navicert",
"naysayer",
"nazified",
"nazifies",
"nearlier",
"nearside",
"neatened",
"neatherd",
"neatniks",
"nebbishy",
"nebulise",
"nebulize",
"nebulose",
"neckband",
"neckings",
"neckless",
"necklike",
"necrosed",
"necroses",
"needfuls",
"neediest",
"needlers",
"needling",
"negaters",
"negatons",
"negators",
"negatron",
"negligee",
"negliges",
"negroids",
"negronis",
"neighing",
"nektonic",
"nelumbos",
"neoliths",
"neologic",
"neomorph",
"neotenic",
"neoteric",
"neotypes",
"nepenthe",
"nephrism",
"nephrite",
"nephrons",
"nepotist",
"nerdiest",
"nereides",
"nerviest",
"nervines",
"nervings",
"nervules",
"nervures",
"nescient",
"nestable",
"nestlers",
"nestlike",
"netizens",
"netsukes",
"nettable",
"nettiest",
"nettings",
"nettlers",
"nettlier",
"nettling",
"neumatic",
"neurally",
"neuraxon",
"neurines",
"neuritic",
"neuromas",
"neuronic",
"neurosal",
"neuroses",
"neurulae",
"neurular",
"neurulas",
"neustons",
"newsbeat",
"newsgirl",
"newshawk",
"newsiest",
"newsless",
"newspeak",
"newwaver",
"niblicks",
"niceness",
"nickeled",
"nickelic",
"nickered",
"nickling",
"nicknack",
"nicotins",
"nictated",
"nictates",
"nidating",
"nidation",
"nidering",
"nidified",
"nidifies",
"niellist",
"nielloed",
"niffered",
"niftiest",
"nigellas",
"niggards",
"nigglers",
"nigglier",
"niggling",
"nighness",
"nighties",
"nightjar",
"nigrosin",
"nihilist",
"nihility",
"nilghais",
"nilghaus",
"nimblest",
"nimbused",
"nimbuses",
"ninebark",
"ninefold",
"ninepins",
"ninnyish",
"niobates",
"niobites",
"niobiums",
"nippiest",
"nirvanas",
"nirvanic",
"nitchies",
"niteries",
"nitinols",
"nitpicks",
"nitpicky",
"nitrated",
"nitrator",
"nitrided",
"nitrides",
"nitriles",
"nitrolic",
"nitrosyl",
"nittiest",
"nizamate",
"nobbiest",
"nobblers",
"nobbling",
"nobelium",
"noblemen",
"noblesse",
"nobodies",
"noctuids",
"noctules",
"noctuoid",
"nocturns",
"nodality",
"noddling",
"nodosity",
"nodulose",
"nodulous",
"noesises",
"noggings",
"noisette",
"noisiest",
"nomadism",
"nomarchs",
"nomarchy",
"nombrils",
"nominals",
"nomistic",
"nomogram",
"nomology",
"nonacids",
"nonactor",
"nonadult",
"nonagons",
"nonbanks",
"nonbasic",
"nonbeing",
"nonblack",
"nonbooks",
"nonbrand",
"nonclass",
"noncling",
"noncolas",
"noncolor",
"noncrime",
"nondairy",
"nondance",
"nonelect",
"nonelite",
"nonentry",
"nonequal",
"nonevent",
"nonfacts",
"nonfatty",
"nonfinal",
"nonfluid",
"nonfocal",
"nonglare",
"nongreen",
"nonguest",
"nonguilt",
"nonhardy",
"nonideal",
"nonimage",
"noninert",
"nonionic",
"nonissue",
"nonjuror",
"nonlabor",
"nonleafy",
"nonlegal",
"nonlevel",
"nonlives",
"nonloyal",
"nonlyric",
"nonmajor",
"nonmetal",
"nonmetro",
"nonmodal",
"nonmoney",
"nonmoral",
"nonmusic",
"nonnasal",
"nonnaval",
"nonnoble",
"nonnovel",
"nonobese",
"nonohmic",
"nonowner",
"nonpagan",
"nonpapal",
"nonparty",
"nonpasts",
"nonplays",
"nonpolar",
"nonprint",
"nonquota",
"nonrated",
"nonrigid",
"nonrival",
"nonroyal",
"nonrural",
"nonskeds",
"nonskier",
"nonsolar",
"nonsolid",
"nonstops",
"nonstory",
"nonstyle",
"nonsugar",
"nonsuits",
"nontaxes",
"nontidal",
"nontitle",
"nontonal",
"nontonic",
"nontrump",
"nontruth",
"nonuples",
"nonurban",
"nonusers",
"nonusing",
"nonvalid",
"nonviral",
"nonvital",
"nonvocal",
"nonvoter",
"nonwhite",
"nonwoody",
"nonwords",
"noodging",
"noodling",
"nooklike",
"noondays",
"noonings",
"noontide",
"noontime",
"nopalito",
"norlands",
"normande",
"normless",
"northers",
"nosebags",
"noseband",
"nosedive",
"nosedove",
"nosegays",
"noseless",
"noselike",
"nosiness",
"nosology",
"nostrums",
"notarize",
"notating",
"notchers",
"notecase",
"noteless",
"noticers",
"notornis",
"notturni",
"notturno",
"noumenal",
"noumenon",
"nounally",
"nounless",
"novalike",
"novelise",
"novelize",
"novercal",
"nowheres",
"nubbiest",
"nubblier",
"nubility",
"nubilose",
"nubilous",
"nucellar",
"nucellus",
"nucleate",
"nucleins",
"nucleoid",
"nucleole",
"nucleoli",
"nucleons",
"nuclidic",
"nudeness",
"nudicaul",
"nudities",
"nudnicks",
"nudzhing",
"nugatory",
"numberer",
"numbfish",
"numchuck",
"numerary",
"numerate",
"numinous",
"nummular",
"numskull",
"nunataks",
"nunchaku",
"nursings",
"nursling",
"nurtural",
"nurturer",
"nutating",
"nutation",
"nutbrown",
"nutcases",
"nutgalls",
"nutgrass",
"nuthouse",
"nutmeats",
"nutpicks",
"nutsedge",
"nutsiest",
"nuttiest",
"nuttings",
"nutwoods",
"nuzzlers",
"nuzzling",
"nylghais",
"nylghaus",
"nymphean",
"oafishly",
"oarlocks",
"oatcakes",
"oatmeals",
"obduracy",
"obdurate",
"obeahism",
"obeisant",
"obelised",
"obelises",
"obelisks",
"obelisms",
"obelized",
"obelizes",
"obeyable",
"oblately",
"oblation",
"oblatory",
"obligati",
"obligato",
"obligees",
"obligers",
"obligors",
"obliqued",
"obliques",
"oblongly",
"obscener",
"obscurer",
"obsesses",
"obsessor",
"obtainer",
"obtected",
"obtested",
"obtruded",
"obtruder",
"obtrudes",
"obtunded",
"obturate",
"obtusely",
"obtusest",
"obtusity",
"obverses",
"obverted",
"obviable",
"obviated",
"obviates",
"obviator",
"obvolute",
"ocarinas",
"occident",
"occipita",
"occiputs",
"occludes",
"occulted",
"occulter",
"occultly",
"oceanaut",
"ocellate",
"ochering",
"ocherous",
"ochreous",
"ocotillo",
"octagons",
"octangle",
"octanols",
"octantal",
"octarchy",
"octettes",
"octonary",
"octopods",
"octoroon",
"octupled",
"octuples",
"octuplet",
"octuplex",
"ocularly",
"oculists",
"odalisks",
"oddballs",
"oddments",
"odiously",
"odograph",
"odometry",
"odonates",
"odontoid",
"odorants",
"odorized",
"odorizes",
"odourful",
"odysseys",
"oecology",
"oedemata",
"oedipean",
"oeillade",
"oenology",
"oenomels",
"oersteds",
"oestrins",
"oestriol",
"oestrone",
"oestrous",
"oestrums",
"offbeats",
"offcasts",
"offerers",
"offishly",
"offloads",
"offprint",
"offramps",
"offstage",
"offtrack",
"oftenest",
"ofttimes",
"oghamist",
"ogreisms",
"ogresses",
"ogrishly",
"ohmmeter",
"oilbirds",
"oilcamps",
"oilcloth",
"oilholes",
"oiliness",
"oilpaper",
"oilproof",
"oilskins",
"oilstone",
"oiltight",
"oinology",
"oinomels",
"oiticica",
"okeydoke",
"oldsquaw",
"oldsters",
"oldstyle",
"oldwives",
"oleaster",
"olefines",
"olefinic",
"olestras",
"olibanum",
"olicooks",
"oligarch",
"oligomer",
"oliguria",
"olivines",
"olivinic",
"ologists",
"olorosos",
"omentums",
"omicrons",
"omikrons",
"omissive",
"omitters",
"omniarch",
"omniform",
"omnimode",
"omnivora",
"omnivore",
"omophagy",
"omphalos",
"onanisms",
"onanists",
"oncidium",
"ondogram",
"oneriest",
"onloaded",
"onlooker",
"onrushes",
"onstream",
"oogamete",
"oogamies",
"oogamous",
"oogenies",
"oogonial",
"oogonium",
"oolachan",
"oologies",
"oologist",
"oomiacks",
"oompahed",
"oophytes",
"oophytic",
"oosperms",
"oosphere",
"oospores",
"oosporic",
"oothecae",
"oothecal",
"ooziness",
"opalesce",
"opalines",
"opaquely",
"opaquest",
"opaquing",
"openable",
"opencast",
"openwork",
"operably",
"operants",
"opercele",
"opercula",
"opercule",
"ophidian",
"opiating",
"opiumism",
"opossums",
"oppidans",
"oppilant",
"oppilate",
"opposers",
"oppugned",
"oppugner",
"opsonify",
"opsonins",
"opsonize",
"optative",
"opticist",
"optimums",
"opulency",
"opuntias",
"opuscula",
"opuscule",
"oquassas",
"oracular",
"oralisms",
"oralists",
"orangery",
"orangier",
"orangish",
"orations",
"oratress",
"orbiters",
"orchises",
"orchitic",
"orchitis",
"orcinols",
"ordainer",
"orderers",
"ordinals",
"ordinand",
"ordurous",
"orective",
"oreganos",
"oreodont",
"organdie",
"organons",
"organums",
"organzas",
"orgasmed",
"orgastic",
"orgiasts",
"orgulous",
"oribatid",
"orienter",
"origamis",
"origanum",
"orinasal",
"ornately",
"ornerier",
"ornithes",
"ornithic",
"orogenic",
"orometer",
"orphical",
"orphisms",
"orphreys",
"orpiment",
"orreries",
"orthicon",
"orthoepy",
"orthoses",
"orthosis",
"ortolans",
"oscinine",
"oscitant",
"osculant",
"osculate",
"osmosing",
"osmundas",
"osnaburg",
"ossature",
"ossetras",
"ossicles",
"ossified",
"ossifier",
"ossifies",
"osteitic",
"osteitis",
"osteoids",
"osteomas",
"osteoses",
"osteosis",
"ostinati",
"ostinato",
"ostiolar",
"ostioles",
"ostmarks",
"ostomate",
"ostomies",
"ostracod",
"ostracon",
"ostrakon",
"otalgias",
"otalgies",
"otiosely",
"otiosity",
"otitides",
"otitises",
"otocysts",
"otoliths",
"otoscope",
"otoscopy",
"ototoxic",
"ouabains",
"oughting",
"ouguiyas",
"ouistiti",
"outacted",
"outadded",
"outargue",
"outasked",
"outbacks",
"outbaked",
"outbakes",
"outbarks",
"outbawls",
"outbeams",
"outbitch",
"outblaze",
"outbleat",
"outbless",
"outbloom",
"outbluff",
"outblush",
"outboast",
"outboxed",
"outboxes",
"outbrags",
"outbrave",
"outbrawl",
"outbreed",
"outbribe",
"outbuild",
"outbuilt",
"outbulge",
"outbulks",
"outbully",
"outburns",
"outburnt",
"outcalls",
"outcaper",
"outcaste",
"outcatch",
"outcavil",
"outcharm",
"outcheat",
"outchide",
"outclass",
"outclimb",
"outclomb",
"outcoach",
"outcooks",
"outcount",
"outcrawl",
"outcried",
"outcries",
"outcross",
"outcrowd",
"outcrows",
"outcurse",
"outcurve",
"outdance",
"outdared",
"outdares",
"outdates",
"outdodge",
"outdoers",
"outdoing",
"outdrags",
"outdrank",
"outdrawn",
"outdraws",
"outdream",
"outdress",
"outdrink",
"outdrive",
"outdrops",
"outdrove",
"outdrunk",
"outduels",
"outearns",
"outeaten",
"outfable",
"outfaced",
"outfaces",
"outfasts",
"outfawns",
"outfeast",
"outfeels",
"outfence",
"outfight",
"outfinds",
"outfired",
"outfires",
"outflank",
"outflies",
"outfloat",
"outflown",
"outfools",
"outfoots",
"outfound",
"outfoxed",
"outfoxes",
"outfrown",
"outgains",
"outgazed",
"outgazes",
"outgiven",
"outgives",
"outglare",
"outgleam",
"outglows",
"outgnawn",
"outgnaws",
"outgrins",
"outgross",
"outgroup",
"outgrows",
"outguess",
"outguide",
"outhauls",
"outheard",
"outhears",
"outhomer",
"outhowls",
"outhumor",
"outhunts",
"outjumps",
"outkeeps",
"outkicks",
"outkills",
"outlands",
"outlasts",
"outlaugh",
"outlawry",
"outleads",
"outleaps",
"outleapt",
"outlearn",
"outliver",
"outlives",
"outloved",
"outloves",
"outmarch",
"outmatch",
"outmodes",
"outmoved",
"outmoves",
"outpaces",
"outpaint",
"outpitch",
"outplace",
"outplans",
"outplays",
"outplods",
"outplots",
"outpoint",
"outpolls",
"outports",
"outpours",
"outpower",
"outprays",
"outpreen",
"outpress",
"outprice",
"outpulls",
"outpunch",
"outpupil",
"outquote",
"outraced",
"outraces",
"outraise",
"outrance",
"outrange",
"outranks",
"outrated",
"outrates",
"outraved",
"outraves",
"outreads",
"outrider",
"outrides",
"outrings",
"outrival",
"outroars",
"outrocks",
"outrolls",
"outroots",
"outrowed",
"outsails",
"outsavor",
"outscold",
"outscoop",
"outscore",
"outscorn",
"outsells",
"outserts",
"outserve",
"outshame",
"outshine",
"outshone",
"outshoot",
"outshout",
"outsides",
"outsight",
"outsings",
"outsized",
"outsizes",
"outskate",
"outskirt",
"outsleep",
"outslept",
"outslick",
"outsmell",
"outsmelt",
"outsmile",
"outsmoke",
"outsnore",
"outsoars",
"outsoles",
"outspans",
"outspeak",
"outspeed",
"outspell",
"outspelt",
"outspend",
"outspent",
"outspoke",
"outstand",
"outstare",
"outstart",
"outstate",
"outstays",
"outsteer",
"outstood",
"outstrip",
"outstudy",
| |
<filename>rsqueakvm/test/test_squeakimage.py
# -*- coding: utf-8 -*-
import pytest
import py
import StringIO
from struct import pack
from rsqueakvm import squeakimage, error
from rsqueakvm.model.character import W_Character
from rsqueakvm.model.compiled_methods import W_CompiledMethod
from rsqueakvm.model.numeric import W_SmallInteger
from rsqueakvm.model.pointers import W_PointersObject
from rsqueakvm.model.variable import W_BytesObject, W_WordsObject
from rsqueakvm.util.stream import chrs2int, chrs2long, swapped_chrs2long
from .util import create_space
# ----- helpers ----------------------------------------------
def ints2str(*ints):
return pack(">" + "I" * len(ints), *ints)
def longs2str(*longs):
return pack(">" + "Q" * len(longs), *longs)
def joinbits(values, lengths):
result = 0
for each, length in reversed(zip(values, lengths)):
result = result << length
result += each
return result
def imagestream_mock(string):
f = StringIO.StringIO(string)
return squeakimage.Stream(inputfile=f)
def imagereader_mock(string):
stream = imagestream_mock(string)
r = squeakimage.ImageReader(create_space(), stream)
f = r.choose_reader_strategy
def fun():
rstrat = f()
rstrat.special_g_objects = [squeakimage.GenericObject()]
return rstrat
r.choose_reader_strategy = fun
return r
@pytest.fixture
def space():
return create_space()
SIMPLE_VERSION_HEADER = pack(">i", 6502)
SIMPLE_VERSION_HEADER_LE = pack("<i", 6502)
SPUR_VERSION_HEADER = pack(">i", 6521)
SPUR_VERSION_HEADER_LE = pack("<i", 6521)
# ----- tests ------------------------------------------------
def test_chrs2int():
assert 1 == chrs2int('\x00\x00\x00\x01')
assert -1 == chrs2int('\xFF\xFF\xFF\xFF')
def test_chrs2long():
assert 1 == chrs2long('\x00\x00\x00\x00\x00\x00\x00\x01')
assert -1 == chrs2long('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF')
assert 68002 == chrs2long(pack(">Q", 68002))
assert 68002 == swapped_chrs2long(pack("<Q", 68002))
def test_stream():
stream = imagestream_mock(SIMPLE_VERSION_HEADER)
n = stream.peek()
assert n == 6502
n = stream.next()
assert n == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_little_endian():
stream = imagestream_mock('\x66\x19\x00\x00')
stream.big_endian = False
first = stream.next()
assert first == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_many():
stream = imagestream_mock(SIMPLE_VERSION_HEADER * 5)
for each in range(5):
first = stream.peek()
assert first == 6502
value = stream.next()
assert value == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_skipbytes():
stream = imagestream_mock('\xFF\xFF\xFF' + SIMPLE_VERSION_HEADER)
stream.skipbytes(3)
value = stream.next()
assert value == 6502
py.test.raises(IndexError, lambda: stream.next())
def test_stream_count():
stream = imagestream_mock('\xFF' * 20)
stream.next()
stream.next()
stream.reset_count()
assert stream.count == 0
stream.next()
assert stream.count == 4
stream.next()
assert stream.count == 8
def test_stream_next_short():
s = imagestream_mock('\x01\x02\x03\x04\x05\x06\x07\x08')
s.be_32bit()
assert s.next_short() == 0x0102
assert s.next_short() == 0x0304
assert s.next() == 0x05060708
def test_stream_next_short_64b(monkeypatch):
from rsqueakvm.util import system
monkeypatch.setattr(system, 'IS_64BIT', True)
s = imagestream_mock('\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c')
s.be_64bit()
assert s.next_short() == 0x0102
assert s.next_short() == 0x0304
assert s.next() == 0x05060708090a0b0c
def test_stream_next_qword():
s = imagestream_mock('\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c')
s.be_32bit()
assert s.next_qword() == 0x0102030405060708
assert s.next() == 0x090a0b0c
def test_stream_next_qword_is_unsigned():
s = imagestream_mock('\xFF' * 8)
max_uint64 = s.next_qword()
assert max_uint64 == 2**64 - 1
assert max_uint64 > 0
def test_simple_joinbits():
assert 0x01010101 == joinbits(([1] * 4), [8,8,8,8])
assert 0xFfFfFfFf == joinbits([255] * 4, [8,8,8,8])
def test_fancy_joinbits():
assert 0x01020304 == joinbits([4,3,2,1], [8,8,8,8])
assert 0x3Ff == joinbits([1,3,7,15], [1,2,3,4])
def test_ints2str():
assert "\x00\x00\x00\x02" == ints2str(2)
assert SIMPLE_VERSION_HEADER + '\x00\x00\x00\x02' == ints2str(6502,2)
def test_freeblock():
r = imagereader_mock(SIMPLE_VERSION_HEADER + "\x00\x00\x00\x02")
r.read_version()
py.test.raises(error.CorruptImageError, lambda: r.readerStrategy.read_object())
def test_1wordobjectheader():
s = ints2str(joinbits([3, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(SIMPLE_VERSION_HEADER + s)
r.read_version()
l = len(SIMPLE_VERSION_HEADER)
assert (squeakimage.ImageChunk(1, 2, 3, 4), 0 + l) == r.readerStrategy.read_1wordobjectheader()
def test_1wordobjectheader2():
s = ints2str(joinbits([3, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(SIMPLE_VERSION_HEADER + (s * 3))
r.read_version()
l = len(SIMPLE_VERSION_HEADER)
assert (squeakimage.ImageChunk(1, 2, 3, 4), 0 + l) == r.readerStrategy.read_1wordobjectheader()
assert (squeakimage.ImageChunk(1, 2, 3, 4), 4 + l) == r.readerStrategy.read_1wordobjectheader()
assert (squeakimage.ImageChunk(1, 2, 3, 4), 8 + l) == r.readerStrategy.read_1wordobjectheader()
def test_2wordobjectheader():
s = ints2str(4200 + 1, joinbits([1, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(SIMPLE_VERSION_HEADER + s)
r.read_version()
l = len(SIMPLE_VERSION_HEADER)
assert (squeakimage.ImageChunk(1, 2, 4200, 4), 4 + l) == r.readerStrategy.read_2wordobjectheader()
def test_3wordobjectheader():
s = ints2str(1701 << 2, 4200 + 0, joinbits([0, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(SIMPLE_VERSION_HEADER + s)
r.read_version()
l = len(SIMPLE_VERSION_HEADER)
assert (squeakimage.ImageChunk(1701, 2, 4200, 4), 8 + l) == r.readerStrategy.read_3wordobjectheader()
def test_read3wordheaderobject():
size = 42
s = ints2str(size << 2, 4200 + 0, joinbits([0, 1, 2, 3, 4], [2,6,4,5,12]))
r = imagereader_mock(SIMPLE_VERSION_HEADER + s + SIMPLE_VERSION_HEADER * (size - 1))
r.read_version()
l = len(SIMPLE_VERSION_HEADER)
chunk, pos = r.readerStrategy.read_object()
chunk0 = squeakimage.ImageChunk(size, 2, 4200, 4)
chunk0.data = [6502] * (size - 1)
assert pos == 8 + l
assert chunk0 == chunk
def test_object_format_v3(monkeypatch):
g_class_mock = squeakimage.GenericObject()
from rpython.rlib import objectmodel
from rsqueakvm.storage_classes import ClassShadow
w_class_mock = objectmodel.instantiate(W_PointersObject)
w_class_mock.strategy = ClassShadow(None, w_class_mock, 3, None)
w_class_mock.strategy._instance_size = 0
g_class_mock.w_object = w_class_mock
def assert_w_object_type(format, expected_type, length=0,
compact_class_index=0, body="", assert_is_weak=False):
objbytes = ints2str(joinbits([3, length + 1, format, compact_class_index, 0],
[2,6,4,5,12])) + body
r = imagereader_mock(SIMPLE_VERSION_HEADER + objbytes)
r.read_version()
monkeypatch.setattr(r.readerStrategy, 'g_class_of',
lambda chunk: g_class_mock)
chunk, pos = r.readerStrategy.read_object()
g_object = squeakimage.GenericObject()
g_object.initialize(chunk, r.readerStrategy, r.space)
w_object = g_object.init_w_object(r.space)
g_object.fillin(r.space)
g_object.fillin_weak(r.space)
assert w_object is g_object.w_object
assert isinstance(w_object, expected_type)
if assert_is_weak:
assert w_object.is_weak()
return w_object, r.space
""" 0 no fields
1 fixed fields only (all containing pointers)
2 indexable fields only (all containing pointers)
3 both fixed and indexable fields (all containing pointers)
4 both fixed and indexable weak fields (all containing pointers).
5 unused
6 indexable word fields only (no pointers)
7 indexable long (64-bit) fields (only in 64-bit images)
8-11 indexable byte fields only (no pointers) (low 2 bits are low 2 bits of size)
12-15 compiled methods:
# of literal oops specified in method header,
followed by indexable bytes (same interpretation of low 2 bits as above)
"""
w_obj, _ = assert_w_object_type(0, W_PointersObject)
assert w_obj.size() == 0
body_42_and_1 = ints2str(joinbits([1, 42], [1, 31]), joinbits([1, 1], [1, 31]))
w_obj, space = assert_w_object_type(1, W_PointersObject, length=2, body=body_42_and_1)
assert w_obj.size() == 2
assert space.unwrap_int(w_obj.fetch(space, 0)) == 42
assert space.unwrap_int(w_obj.fetch(space, 1)) == 1
w_obj, space = assert_w_object_type(2, W_PointersObject, length=2,
body=body_42_and_1)
assert w_obj.size() == 2
assert space.unwrap_int(w_obj.fetch(space, 0)) == 42
assert space.unwrap_int(w_obj.fetch(space, 1)) == 1
assert_w_object_type(3, W_PointersObject, length=2, body=body_42_and_1)
w_obj, _ = assert_w_object_type(4, W_PointersObject, length=2,
body=body_42_and_1)
assert w_obj.is_weak()
assert w_obj.size() == 2
assert space.unwrap_int(w_obj.fetch(space, 0)) == 42
assert space.unwrap_int(w_obj.fetch(space, 1)) == 1
w_obj, space = assert_w_object_type(6, W_WordsObject, length=2,
body=body_42_and_1)
assert w_obj.size() == 2
assert w_obj.getword(0) == 42 << 1 | 1
assert w_obj.getword(1) == 1 << 1 | 1
w_obj, space = assert_w_object_type(8, W_BytesObject, length=2,
body=body_42_and_1)
assert w_obj.size() == 8 # 2 * 32 bit == 8 * 8 bit
assert space.unwrap_string(w_obj) == body_42_and_1
w_obj, space = assert_w_object_type(12, W_CompiledMethod, length=2,
body=body_42_and_1)
assert w_obj.size() == 8
def test_read_normal_spur_header():
# Array of pointers
n_slots = 42
objbytes = ints2str(joinbits([48, 0, n_slots], [22, 2, 8]),
joinbits([10, 0, 2, 0], [22, 2, 5, 3])) + ints2str(0) * n_slots
r = imagereader_mock(SPUR_VERSION_HEADER + objbytes)
stream = r.stream
r.read_version()
r.readerStrategy.oldbaseaddress = 0
stream.reset_count()
actualChunk, pos = r.readerStrategy.read_object()
expectedChunk = squeakimage.ImageChunk(size=n_slots, format=2, classid=10,
hash=48, data=[0] * n_slots)
assert expectedChunk == actualChunk
assert pos == 0
def test_read_long_spur_header():
n_slots = 3000
objbytes = longs2str(joinbits([n_slots, 255], [56, 8])) + ints2str(
joinbits([55, 0, 255], [22, 2, 8]),
joinbits([10, 0, 2, 0], [22, 2, 5, 3])) + ints2str(0) * n_slots
r = imagereader_mock(SPUR_VERSION_HEADER + objbytes)
stream = r.stream
r.read_version()
r.readerStrategy.oldbaseaddress = 0
stream.reset_count()
actualChunk, pos = r.readerStrategy.read_object()
expectedChunk = squeakimage.ImageChunk(size=n_slots, format=2, classid=10,
hash=55, data=[0] * n_slots)
assert expectedChunk == actualChunk
assert pos == 8
def test_object_format_spur(monkeypatch):
g_class_mock = squeakimage.GenericObject()
from rpython.rlib import objectmodel
from rsqueakvm.storage_classes import ClassShadow
w_class_mock = objectmodel.instantiate(W_PointersObject)
w_class_mock.strategy = ClassShadow(None, w_class_mock, 3, None)
w_class_mock.strategy._instance_size = 0
g_class_mock.w_object = w_class_mock
def assert_w_object_type(format, expected_type, length=0, classid=0, body=""):
objbytes = ints2str(joinbits([0, 0, length], [22, 2, 8]),
joinbits([classid, 0, format, 0], [22, 2, 5, 3])) + body
r = imagereader_mock(SPUR_VERSION_HEADER + objbytes)
stream = r.stream
r.read_version()
monkeypatch.setattr(r.readerStrategy, 'g_class_of',
lambda chunk: g_class_mock)
stream.reset_count()
chunk, pos = r.readerStrategy.read_object()
g_object = squeakimage.GenericObject()
g_object.initialize(chunk, r.readerStrategy, r.space)
w_object = g_object.init_w_object(r.space)
g_object.fillin(r.space)
g_object.fillin_weak(r.space)
assert w_object is g_object.w_object
assert isinstance(w_object, expected_type)
return w_object, r.space
# 0 zero sized object
w_obj, space = assert_w_object_type(0, W_PointersObject, body=("\x00"*3+"\x01")*2)
# 1 fixed-size object with inst-vars
body_1_to_9 = ints2str(*(joinbits([1, n], [1, 31]) for n in range(1, 10)))
w_obj, space = assert_w_object_type(1, W_PointersObject, length=2, body=body_1_to_9)
assert w_obj.size() == 2
assert w_obj.fetch(space, 0) == space.wrap_int(1)
assert w_obj.fetch(space, 1) == space.wrap_int(2)
# 2 variable sized object without inst vars
w_obj, space = assert_w_object_type(2, W_PointersObject, length=2, body=body_1_to_9)
assert w_obj.size() == 2
assert w_obj.fetch(space, 0) == space.wrap_int(1)
assert w_obj.fetch(space, 1) == space.wrap_int(2)
# 3 variable sized object with inst vars
w_obj, space = assert_w_object_type(3, W_PointersObject, length=2, body=body_1_to_9)
assert w_obj.size() == 2
assert w_obj.fetch(space, 0) == space.wrap_int(1)
assert w_obj.fetch(space, 1) == space.wrap_int(2)
# 4 weak variable sized object with inst vars
w_obj, space = assert_w_object_type(4, W_PointersObject, length=2, body=body_1_to_9)
assert w_obj.is_weak()
assert w_obj.size() == 2
assert w_obj.fetch(space, 0) == space.wrap_int(1)
assert w_obj.fetch(space, 1) == space.wrap_int(2)
# 5 weak fixed sized object with | |
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import
from __future__ import print_function
import copy
import errno
import fnmatch
import hashlib
import logging
import multiprocessing
import os
import re
import salt
import signal
import sys
import threading
import time
import traceback
import types
from random import randint, shuffle
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
# Import salt libs
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit, SaltSyndicMasterError
)
import salt.client
import salt.crypt
import salt.loader
import salt.payload
import salt.utils
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.schedule
import salt.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM
from salt._compat import string_types
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
import salt.syspaths
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(iter(string_kwarg.keys())) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in arg.items():
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in data.items():
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules(initial_load=True)
else:
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(self.opts, include_errors=True)
self.function_errors = self.functions['_errors']
self.functions.pop('_errors') # Keep the funcs clean
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
funcs=self.functions
).compile_pillar()
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(self.opts['id']).hexdigest()[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0o177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option | |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Embedding for state representation learning."""
import typing
from dm_env import specs as dm_env_specs
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from rl_repr.batch_rl import keras_utils
from rl_repr.batch_rl import policies
def soft_update(net, target_net, tau=0.005):
for var, target_var in zip(net.variables, target_net.variables):
new_value = var * tau + target_var * (1 - tau)
target_var.assign(new_value)
def huber(x, kappa=0.1):
return (0.5 * tf.square(x) * tf.cast(tf.abs(x) <= kappa, x.dtype) +
kappa * (tf.abs(x) - 0.5 * kappa) * tf.cast(tf.abs(x) > kappa, x.dtype)
) / kappa
def gaussian_kl(mean1, logvar1, mean2=None, logvar2=None):
if mean2 is None:
mean2 = tf.zeros_like(mean1)
if logvar2 is None:
logvar2 = tf.zeros_like(logvar1)
kl = -0.5 * tf.reduce_sum(
1.0 + logvar1 - logvar2
- tf.exp(-1 * logvar2) * tf.pow(mean1 - mean2, 2)
- tf.exp(logvar1 - logvar2), -1)
return kl
def categorical_kl(probs1, probs2=None):
if probs2 is None:
probs2 = tf.ones_like(probs1) * tf.reduce_sum(probs1) / tf.reduce_sum(tf.ones_like(probs1))
kl = tf.reduce_sum(
probs1 * (-tf.math.log(1e-8 + probs2) + tf.math.log(1e-8 + probs1)), -1)
return kl
def transformer_module(query,
key,
value,
embedding_dim=256,
num_heads=4,
key_dim=128,
ff_dim=256,
output_dim=None,
last_layer=False,
attention_mask=None):
"""From https://keras.io/examples/nlp/masked_language_modeling/"""
# Multi headed self-attention
attention_output = tf.keras.layers.MultiHeadAttention(
num_heads=num_heads, key_dim=key_dim)(
query, key, value, attention_mask=attention_mask)
attention_output = tf.keras.layers.Dropout(0.1)(
attention_output
)
attention_output = tf.keras.layers.LayerNormalization(
epsilon=1e-6,
)(query + attention_output)
# Feed-forward layer
ffn = tf.keras.Sequential(
[
tf.keras.layers.Dense(ff_dim, activation="relu"),
tf.keras.layers.Dense(output_dim or embedding_dim),
],
)
ffn_output = ffn(attention_output)
if last_layer:
sequence_output = ffn_output
else:
ffn_output = tf.keras.layers.Dropout(0.1)(
ffn_output
)
sequence_output = tf.keras.layers.LayerNormalization(
epsilon=1e-6
)(attention_output + ffn_output)
return sequence_output
def transformer(embeddings,
num_layers=1,
embedding_dim=256,
num_heads=4,
key_dim=128,
ff_dim=256,
output_dim=None,
attention_mask=None):
output_dim = output_dim or embedding_dim
encoder_output = embeddings
for i in range(num_layers):
last_layer = i == num_layers - 1
encoder_output = transformer_module(
encoder_output,
encoder_output,
encoder_output,
embedding_dim=embedding_dim,
num_heads=num_heads,
key_dim=key_dim,
ff_dim=ff_dim,
output_dim=output_dim if last_layer else None,
last_layer=last_layer,
attention_mask=attention_mask)
return encoder_output
def create_mlp(
input_dim,
output_dim,
hidden_dims = (256, 256)):
relu_gain = tf.math.sqrt(2.0)
relu_orthogonal = tf.keras.initializers.Orthogonal(relu_gain)
near_zero_orthogonal = tf.keras.initializers.Orthogonal(1e-2)
layers = []
for hidden_dim in hidden_dims:
layers.append(
tf.keras.layers.Dense(
hidden_dim,
activation=tf.nn.relu,
kernel_initializer=relu_orthogonal))
if isinstance(input_dim, int):
input_shape = (input_dim,)
else:
input_shape = input_dim
inputs = tf.keras.Input(shape=input_dim)
outputs = tf.keras.Sequential(
layers + [tf.keras.layers.Dense(
output_dim - 1, kernel_initializer=near_zero_orthogonal),
tf.keras.layers.Lambda(
lambda x: tf.concat([x, tf.ones_like(x[Ellipsis, :1])], -1)),
tf.keras.layers.LayerNormalization(
epsilon=0.0, center=False, scale=False)]
)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
class EmbedNet(tf.keras.Model):
"""An embed network."""
def __init__(self,
state_dim,
embedding_dim = 256,
num_distributions = None,
hidden_dims = (256, 256)):
"""Creates a neural net.
Args:
state_dim: State size.
embedding_dim: Embedding size.
num_distributions: Number of categorical distributions
for discrete embedding.
hidden_dims: List of hidden dimensions.
"""
super().__init__()
inputs = tf.keras.Input(shape=(state_dim,))
self.embedding_dim = embedding_dim
self.num_distributions = num_distributions
assert not num_distributions or embedding_dim % num_distributions == 0
self.embedder = keras_utils.create_mlp(
inputs.shape[-1], self.embedding_dim, hidden_dims=hidden_dims,
activation=tf.nn.swish,
near_zero_last_layer=bool(num_distributions))
@tf.function
def call(self,
states,
stop_gradient = True):
"""Returns embeddings of states.
Args:
states: A batch of states.
stop_gradient: Whether to put a stop_gradient on embedding.
Returns:
Embeddings of states.
"""
if not self.num_distributions:
out = self.embedder(states)
else:
all_logits = self.embedder(states)
all_logits = tf.split(all_logits, num_or_size_splits=self.num_distributions, axis=-1)
all_probs = [tf.nn.softmax(logits, -1) for logits in all_logits]
joined_probs = tf.concat(all_probs, -1)
all_samples = [tfp.distributions.Categorical(logits=logits).sample()
for logits in all_logits]
all_onehot_samples = [tf.one_hot(samples, self.embedding_dim // self.num_distributions)
for samples in all_samples]
joined_onehot_samples = tf.concat(all_onehot_samples, -1)
# Straight-through gradients.
out = joined_onehot_samples + joined_probs - tf.stop_gradient(joined_probs)
if stop_gradient:
return tf.stop_gradient(out)
return out
class RNNEmbedNet(tf.keras.Model):
"""An RNN embed network."""
def __init__(self,
input_dim,
embedding_dim,
num_distributions=None,
return_sequences=False):
"""Creates a neural net.
Args:
embedding_dim: Embedding size.
num_distributions: Number of categorical distributions
for discrete embedding.
return_sequences: Whether to return the entire sequence embedding.
"""
super().__init__()
self.embedding_dim = embedding_dim
self.num_distributions = num_distributions
assert not num_distributions or embedding_dim % num_distributions == 0
inputs = tf.keras.Input(shape=input_dim)
outputs = tf.keras.layers.LSTM(
embedding_dim, return_sequences=return_sequences)(
inputs)
self.embedder = tf.keras.Model(inputs=inputs, outputs=outputs)
self.embedder.call = tf.function(self.embedder.call)
@tf.function
def call(self, states, stop_gradient = True):
"""Returns embeddings of states.
Args:
states: A batch of sequence of states].
stop_gradient: Whether to put a stop_gradient on embedding.
Returns:
Auto-regressively computed Embeddings of the last states.
"""
assert (len(states.shape) == 3)
if not self.num_distributions:
out = self.embedder(states)
else:
all_logits = self.embedder(states)
all_logits = tf.split(all_logits, num_or_size_splits=self.num_distributions, axis=-1)
all_probs = [tf.nn.softmax(logits, -1) for logits in all_logits]
joined_probs = tf.concat(all_probs, -1)
all_samples = [tfp.distributions.Categorical(logits=logits).sample()
for logits in all_logits]
all_onehot_samples = [tf.one_hot(samples, self.embedding_dim // self.num_distributions)
for samples in all_samples]
joined_onehot_samples = tf.concat(all_onehot_samples, -1)
# Straight-through gradients.
out = joined_onehot_samples + joined_probs - tf.stop_gradient(joined_probs)
if stop_gradient:
return tf.stop_gradient(out)
return out
class StochasticEmbedNet(tf.keras.Model):
"""A stochastic embed network."""
def __init__(self,
state_dim,
embedding_dim = 256,
hidden_dims = (256, 256),
num_distributions = None,
logvar_min = -4.0,
logvar_max = 15.0):
"""Creates a neural net.
Args:
state_dim: State size.
embedding_dim: Embedding size.
hidden_dims: List of hidden dimensions.
num_distributions: Number of categorical distributions
for discrete embedding.
logvar_min: Minimum allowed logvar.
logvar_max: Maximum allowed logvar.
"""
super().__init__()
inputs = tf.keras.Input(shape=(state_dim,))
self.embedding_dim = embedding_dim
self.num_distributions = num_distributions
assert not num_distributions or embedding_dim % num_distributions == 0
distribution_dim = (2 if not num_distributions else 1) * self.embedding_dim
self.embedder = keras_utils.create_mlp(
inputs.shape[-1], distribution_dim, hidden_dims=hidden_dims,
activation=tf.nn.swish,
near_zero_last_layer=False)
self.logvar_min = logvar_min
self.logvar_max = logvar_max
@tf.function
def call(self,
states,
stop_gradient = True,
sample = True,
sample_and_raw_output = False):
"""Returns embeddings of states.
Args:
states: A batch of states.
stop_gradient: Whether to put a stop_gradient on embedding.
sample: Whether to sample an embedding.
sample_and_raw_output: Whether to return the original
probability in addition to sampled embeddings.
Returns:
Embeddings of states.
"""
if not self.num_distributions:
mean_and_logvar = self.embedder(states)
mean, logvar = tf.split(mean_and_logvar, 2, axis=-1)
logvar = tf.clip_by_value(logvar, self.logvar_min, self.logvar_max)
sample_out = mean + tf.random.normal(tf.shape(mean)) * tf.exp(0.5 * logvar)
raw_out = tf.concat([mean, logvar], -1)
else:
all_logits = self.embedder(states)
all_logits = tf.split(all_logits, num_or_size_splits=self.num_distributions, axis=-1)
all_probs = [tf.nn.softmax(logits, -1) for logits in all_logits]
joined_probs = tf.concat(all_probs, -1)
all_samples = [tfp.distributions.Categorical(logits=logits).sample()
for logits in all_logits]
all_onehot_samples = [tf.one_hot(samples, self.embedding_dim // self.num_distributions)
for samples in all_samples]
joined_onehot_samples = tf.concat(all_onehot_samples, -1)
# Straight-through gradients.
sample_out = joined_onehot_samples + joined_probs - tf.stop_gradient(joined_probs)
raw_out = joined_probs
if sample_and_raw_output:
out = (sample_out, raw_out)
elif sample:
out = sample_out
else:
out = raw_out
if stop_gradient:
if hasattr(out, '__len__'):
return tuple(map(tf.stop_gradient, out))
return tf.stop_gradient(out)
return out
class TransformerNet(tf.keras.Model):
"""An embed network based on transformer."""
def __init__(self,
state_dim,
embedding_dim = 256,
num_distributions = None,
input_embedding_dim = 256,
num_heads = 4,
key_dim = 256):
"""Creates a neural net.
Args:
state_dim: State size.
embedding_dim: Embedding size.
num_distributions: Number of categorical distributions
for discrete embedding.
input_embedding_dim: embedding dim for inputs to the transformer.
hidden_dims: List of hidden dimensions.
"""
super().__init__()
self.state_dim = state_dim
self.embedding_dim = embedding_dim
self.num_distributions = num_distributions
assert not num_distributions or embedding_dim % num_distributions == 0
self.input_embedding_dim = input_embedding_dim
self.component_embedder = keras_utils.create_mlp(
(state_dim, state_dim + 1),
self.input_embedding_dim,
hidden_dims=(256,),
activation=tf.nn.swish,
near_zero_last_layer=False)
attention = tf.keras.layers.MultiHeadAttention(
num_heads, key_dim=key_dim,
output_shape=(self.embedding_dim,))
inputs = tf.keras.Input(shape=(state_dim, self.input_embedding_dim))
outputs = attention(inputs, inputs)
self.transformer = tf.keras.Model(inputs=inputs, outputs=outputs)
self.missing_x = tf.Variable(tf.zeros([self.input_embedding_dim]))
def process_inputs(self,
states,
stop_gradient = True):
one_hot_index = tf.zeros_like(states)[Ellipsis, None] + tf.eye(self.state_dim)
state_inputs = tf.concat([one_hot_index, states[Ellipsis, None]], -1)
components = self.component_embedder(state_inputs)
return components
@tf.function
def call(self,
states,
stop_gradient = True,
missing_mask = None):
"""Returns embeddings of states.
Args:
states: A batch of states.
stop_gradient: Whether to put a stop_gradient on embedding.
Returns:
Embeddings of states.
"""
processed_inputs = self.process_inputs(states, stop_gradient=stop_gradient)
if missing_mask is not None:
attention_inputs = tf.where(missing_mask[Ellipsis, None],
tf.ones_like(states)[Ellipsis, None] * self.missing_x,
processed_inputs)
else:
attention_inputs = processed_inputs
attention_out = self.transformer(attention_inputs, training=not stop_gradient)
if not self.num_distributions:
out = tf.reduce_mean(attention_out, -2)
else:
all_logits = tf.reduce_mean(attention_out, -2)
all_logits = tf.split(all_logits, num_or_size_splits=self.num_distributions, axis=-1)
all_probs = [tf.nn.softmax(logits, -1) for logits in all_logits]
joined_probs = tf.concat(all_probs, -1)
all_samples = [tfp.distributions.Categorical(logits=logits).sample()
for logits in all_logits]
all_onehot_samples = [tf.one_hot(samples, self.embedding_dim // self.num_distributions)
for samples in all_samples]
joined_onehot_samples = tf.concat(all_onehot_samples, -1)
# Straight-through gradients.
out = joined_onehot_samples + joined_probs - tf.stop_gradient(joined_probs)
if stop_gradient:
return | |
Workplane object with the current point unchanged
"""
diff = stop - start
allPoints = self._toVectors(
(func(start + diff * t / N) for t in range(N + 1)), False
)
e = Edge.makeSplineApprox(
allPoints, tol=tol, smoothing=smoothing, minDeg=minDeg, maxDeg=maxDeg
)
if makeWire:
rv_w = Wire.assembleEdges([e])
self._addPendingWire(rv_w)
else:
self._addPendingEdge(e)
return self.newObject([rv_w if makeWire else e])
def parametricSurface(
self: T,
func: Callable[[float, float], VectorLike],
N: int = 20,
start: float = 0,
stop: float = 1,
tol: float = 1e-2,
minDeg: int = 1,
maxDeg: int = 6,
smoothing: Optional[Tuple[float, float, float]] = (1, 1, 1),
) -> T:
"""
Create a spline surface approximating the provided function.
:param func: function f(u,v) that will generate (x,y,z) pairs
:type func: (float,float) --> (float,float,float)
:param N: number of points for discretization in one direction
:param start: starting value of the parameters u,v
:param stop: final value of the parameters u,v
:param tol: tolerance used by the approximation algorithm (default: 1e-3)
:param minDeg: minimum spline degree (default: 1)
:param maxDeg: maximum spline degree (default: 3)
:param smoothing: optional parameters for the variational smoothing algorithm (default: (1,1,1))
:return: a Workplane object with the current point unchanged
This method might be unstable and may require tuning of the tol parameter.
"""
diff = stop - start
allPoints = []
for i in range(N + 1):
generator = (
func(start + diff * i / N, start + diff * j / N) for j in range(N + 1)
)
allPoints.append(self._toVectors(generator, False))
f = Face.makeSplineApprox(
allPoints, tol=tol, smoothing=smoothing, minDeg=minDeg, maxDeg=maxDeg
)
return self.newObject([f])
def ellipseArc(
self: T,
x_radius: float,
y_radius: float,
angle1: float = 360,
angle2: float = 360,
rotation_angle: float = 0.0,
sense: Literal[-1, 1] = 1,
forConstruction: bool = False,
startAtCurrent: bool = True,
makeWire: bool = False,
) -> T:
"""Draw an elliptical arc with x and y radiuses either with start point at current point or
or current point being the center of the arc
:param x_radius: x radius of the ellipse (along the x-axis of plane the ellipse should lie in)
:param y_radius: y radius of the ellipse (along the y-axis of plane the ellipse should lie in)
:param angle1: start angle of arc
:param angle2: end angle of arc (angle2 == angle1 return closed ellipse = default)
:param rotation_angle: angle to rotate the created ellipse / arc
:param sense: clockwise (-1) or counter clockwise (1)
:param startAtCurrent: True: start point of arc is moved to current point; False: center of
arc is on current point
:param makeWire: convert the resulting arc edge to a wire
"""
# Start building the ellipse with the current point as center
center = self._findFromPoint(useLocalCoords=False)
e = Edge.makeEllipse(
x_radius,
y_radius,
center,
self.plane.zDir,
self.plane.xDir,
angle1,
angle2,
sense,
)
# Rotate if necessary
if rotation_angle != 0.0:
e = e.rotate(center, center.add(self.plane.zDir), rotation_angle)
# Move the start point of the ellipse onto the last current point
if startAtCurrent:
startPoint = e.startPoint()
e = e.translate(center.sub(startPoint))
if makeWire:
rv_w = Wire.assembleEdges([e])
if not forConstruction:
self._addPendingWire(rv_w)
else:
if not forConstruction:
self._addPendingEdge(e)
return self.newObject([rv_w if makeWire else e])
def threePointArc(
self: T, point1: VectorLike, point2: VectorLike, forConstruction: bool = False,
) -> T:
"""
Draw an arc from the current point, through point1, and ending at point2
:param point1: point to draw through
:type point1: 2-tuple, in workplane coordinates
:param point2: end point for the arc
:type point2: 2-tuple, in workplane coordinates
:return: a workplane with the current point at the end of the arc
Future Enhancements:
provide a version that allows an arc using relative measures
provide a centerpoint arc
provide tangent arcs
"""
gstartPoint = self._findFromPoint(False)
gpoint1 = self.plane.toWorldCoords(point1)
gpoint2 = self.plane.toWorldCoords(point2)
arc = Edge.makeThreePointArc(gstartPoint, gpoint1, gpoint2)
if not forConstruction:
self._addPendingEdge(arc)
return self.newObject([arc])
def sagittaArc(
self: T, endPoint: VectorLike, sag: float, forConstruction: bool = False,
) -> T:
"""
Draw an arc from the current point to endPoint with an arc defined by the sag (sagitta).
:param endPoint: end point for the arc
:type endPoint: 2-tuple, in workplane coordinates
:param sag: the sagitta of the arc
:type sag: float, perpendicular distance from arc center to arc baseline.
:return: a workplane with the current point at the end of the arc
The sagitta is the distance from the center of the arc to the arc base.
Given that a closed contour is drawn clockwise;
A positive sagitta means convex arc and negative sagitta means concave arc.
See "https://en.wikipedia.org/wiki/Sagitta_(geometry)" for more information.
"""
startPoint = self._findFromPoint(useLocalCoords=True)
endPoint = Vector(endPoint)
midPoint = endPoint.add(startPoint).multiply(0.5)
sagVector = endPoint.sub(startPoint).normalized().multiply(abs(sag))
if sag > 0:
sagVector.x, sagVector.y = (
-sagVector.y,
sagVector.x,
) # Rotate sagVector +90 deg
else:
sagVector.x, sagVector.y = (
sagVector.y,
-sagVector.x,
) # Rotate sagVector -90 deg
sagPoint = midPoint.add(sagVector)
return self.threePointArc(sagPoint, endPoint, forConstruction)
def radiusArc(
self: T, endPoint: VectorLike, radius: float, forConstruction: bool = False,
) -> T:
"""
Draw an arc from the current point to endPoint with an arc defined by the radius.
:param endPoint: end point for the arc
:type endPoint: 2-tuple, in workplane coordinates
:param radius: the radius of the arc
:type radius: float, the radius of the arc between start point and end point.
:return: a workplane with the current point at the end of the arc
Given that a closed contour is drawn clockwise;
A positive radius means convex arc and negative radius means concave arc.
"""
startPoint = self._findFromPoint(useLocalCoords=True)
endPoint = Vector(endPoint)
# Calculate the sagitta from the radius
length = endPoint.sub(startPoint).Length / 2.0
try:
sag = abs(radius) - math.sqrt(radius ** 2 - length ** 2)
except ValueError:
raise ValueError("Arc radius is not large enough to reach the end point.")
# Return a sagittaArc
if radius > 0:
return self.sagittaArc(endPoint, sag, forConstruction)
else:
return self.sagittaArc(endPoint, -sag, forConstruction)
def tangentArcPoint(
self: T,
endpoint: VectorLike,
forConstruction: bool = False,
relative: bool = True,
) -> T:
"""
Draw an arc as a tangent from the end of the current edge to endpoint.
:param endpoint: point for the arc to end at
:type endpoint: 2-tuple, 3-tuple or Vector
:param relative: True if endpoint is specified relative to the current point, False if endpoint is in workplane coordinates
:type relative: Bool
:return: a Workplane object with an arc on the stack
Requires the the current first object on the stack is an Edge, as would
be the case after a lineTo operation or similar.
"""
if not isinstance(endpoint, Vector):
endpoint = Vector(endpoint)
if relative:
endpoint = endpoint + self._findFromPoint(useLocalCoords=True)
endpoint = self.plane.toWorldCoords(endpoint)
previousEdge = self._findFromEdge()
arc = Edge.makeTangentArc(
previousEdge.endPoint(), previousEdge.tangentAt(1), endpoint
)
if not forConstruction:
self._addPendingEdge(arc)
return self.newObject([arc])
def mirrorY(self: T) -> T:
"""
Mirror entities around the y axis of the workplane plane.
:return: a new object with any free edges consolidated into as few wires as possible.
All free edges are collected into a wire, and then the wire is mirrored,
and finally joined into a new wire
Typically used to make creating wires with symmetry easier. This line of code::
s = Workplane().lineTo(2,2).threePointArc((3,1),(2,0)).mirrorX().extrude(0.25)
Produces a flat, heart shaped object
"""
# convert edges to a wire, if there are pending edges
n = self.wire(forConstruction=False)
# attempt to consolidate wires together.
consolidated = n.consolidateWires()
mirroredWires = self.plane.mirrorInPlane(consolidated.wires().vals(), "Y")
for w in mirroredWires:
consolidated.objects.append(w)
consolidated._addPendingWire(w)
# attempt again to consolidate all of the wires
return consolidated.consolidateWires()
def mirrorX(self: T) -> T:
"""
Mirror entities around the x axis of the workplane plane.
:return: a new object with any free edges consolidated into as few wires as possible.
All free edges are collected into a wire, and then the wire is mirrored,
and finally joined into a new wire
Typically used to make creating wires with symmetry easier.
"""
# convert edges to a wire, if there are pending edges
n = self.wire(forConstruction=False)
# attempt to consolidate wires together.
consolidated = n.consolidateWires()
| |
= _libsumo.poi_getAllSubscriptionResults
def poi_getSubscriptionResults(objID):
return _libsumo.poi_getSubscriptionResults(objID)
poi_getSubscriptionResults = _libsumo.poi_getSubscriptionResults
def poi_getAllContextSubscriptionResults():
return _libsumo.poi_getAllContextSubscriptionResults()
poi_getAllContextSubscriptionResults = _libsumo.poi_getAllContextSubscriptionResults
def poi_getContextSubscriptionResults(objID):
return _libsumo.poi_getContextSubscriptionResults(objID)
poi_getContextSubscriptionResults = _libsumo.poi_getContextSubscriptionResults
def poi_getTree():
return _libsumo.poi_getTree()
poi_getTree = _libsumo.poi_getTree
def poi_storeShape(id, shape):
return _libsumo.poi_storeShape(id, shape)
poi_storeShape = _libsumo.poi_storeShape
def poi_makeWrapper():
return _libsumo.poi_makeWrapper()
poi_makeWrapper = _libsumo.poi_makeWrapper
def poi_handleVariable(objID, variable, wrapper):
return _libsumo.poi_handleVariable(objID, variable, wrapper)
poi_handleVariable = _libsumo.poi_handleVariable
class polygon(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, polygon, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, polygon, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_getmethods__["getIDList"] = lambda x: _libsumo.polygon_getIDList
if _newclass:
getIDList = staticmethod(_libsumo.polygon_getIDList)
__swig_getmethods__["getIDCount"] = lambda x: _libsumo.polygon_getIDCount
if _newclass:
getIDCount = staticmethod(_libsumo.polygon_getIDCount)
__swig_getmethods__["getType"] = lambda x: _libsumo.polygon_getType
if _newclass:
getType = staticmethod(_libsumo.polygon_getType)
__swig_getmethods__["getShape"] = lambda x: _libsumo.polygon_getShape
if _newclass:
getShape = staticmethod(_libsumo.polygon_getShape)
__swig_getmethods__["getColor"] = lambda x: _libsumo.polygon_getColor
if _newclass:
getColor = staticmethod(_libsumo.polygon_getColor)
__swig_getmethods__["getFilled"] = lambda x: _libsumo.polygon_getFilled
if _newclass:
getFilled = staticmethod(_libsumo.polygon_getFilled)
__swig_getmethods__["getLineWidth"] = lambda x: _libsumo.polygon_getLineWidth
if _newclass:
getLineWidth = staticmethod(_libsumo.polygon_getLineWidth)
__swig_getmethods__["getParameter"] = lambda x: _libsumo.polygon_getParameter
if _newclass:
getParameter = staticmethod(_libsumo.polygon_getParameter)
__swig_getmethods__["setType"] = lambda x: _libsumo.polygon_setType
if _newclass:
setType = staticmethod(_libsumo.polygon_setType)
__swig_getmethods__["setShape"] = lambda x: _libsumo.polygon_setShape
if _newclass:
setShape = staticmethod(_libsumo.polygon_setShape)
__swig_getmethods__["setColor"] = lambda x: _libsumo.polygon_setColor
if _newclass:
setColor = staticmethod(_libsumo.polygon_setColor)
__swig_getmethods__["add"] = lambda x: _libsumo.polygon_add
if _newclass:
add = staticmethod(_libsumo.polygon_add)
__swig_getmethods__["remove"] = lambda x: _libsumo.polygon_remove
if _newclass:
remove = staticmethod(_libsumo.polygon_remove)
__swig_getmethods__["setFilled"] = lambda x: _libsumo.polygon_setFilled
if _newclass:
setFilled = staticmethod(_libsumo.polygon_setFilled)
__swig_getmethods__["setLineWidth"] = lambda x: _libsumo.polygon_setLineWidth
if _newclass:
setLineWidth = staticmethod(_libsumo.polygon_setLineWidth)
__swig_getmethods__["setParameter"] = lambda x: _libsumo.polygon_setParameter
if _newclass:
setParameter = staticmethod(_libsumo.polygon_setParameter)
__swig_getmethods__["subscribe"] = lambda x: _libsumo.polygon_subscribe
if _newclass:
subscribe = staticmethod(_libsumo.polygon_subscribe)
__swig_getmethods__["subscribeContext"] = lambda x: _libsumo.polygon_subscribeContext
if _newclass:
subscribeContext = staticmethod(_libsumo.polygon_subscribeContext)
__swig_getmethods__["getAllSubscriptionResults"] = lambda x: _libsumo.polygon_getAllSubscriptionResults
if _newclass:
getAllSubscriptionResults = staticmethod(_libsumo.polygon_getAllSubscriptionResults)
__swig_getmethods__["getSubscriptionResults"] = lambda x: _libsumo.polygon_getSubscriptionResults
if _newclass:
getSubscriptionResults = staticmethod(_libsumo.polygon_getSubscriptionResults)
__swig_getmethods__["getAllContextSubscriptionResults"] = lambda x: _libsumo.polygon_getAllContextSubscriptionResults
if _newclass:
getAllContextSubscriptionResults = staticmethod(_libsumo.polygon_getAllContextSubscriptionResults)
__swig_getmethods__["getContextSubscriptionResults"] = lambda x: _libsumo.polygon_getContextSubscriptionResults
if _newclass:
getContextSubscriptionResults = staticmethod(_libsumo.polygon_getContextSubscriptionResults)
__swig_getmethods__["getTree"] = lambda x: _libsumo.polygon_getTree
if _newclass:
getTree = staticmethod(_libsumo.polygon_getTree)
__swig_getmethods__["storeShape"] = lambda x: _libsumo.polygon_storeShape
if _newclass:
storeShape = staticmethod(_libsumo.polygon_storeShape)
__swig_getmethods__["makeWrapper"] = lambda x: _libsumo.polygon_makeWrapper
if _newclass:
makeWrapper = staticmethod(_libsumo.polygon_makeWrapper)
__swig_getmethods__["handleVariable"] = lambda x: _libsumo.polygon_handleVariable
if _newclass:
handleVariable = staticmethod(_libsumo.polygon_handleVariable)
__swig_destroy__ = _libsumo.delete_polygon
__del__ = lambda self: None
polygon_swigregister = _libsumo.polygon_swigregister
polygon_swigregister(polygon)
def polygon_getIDList():
return _libsumo.polygon_getIDList()
polygon_getIDList = _libsumo.polygon_getIDList
def polygon_getIDCount():
return _libsumo.polygon_getIDCount()
polygon_getIDCount = _libsumo.polygon_getIDCount
def polygon_getType(polygonID):
return _libsumo.polygon_getType(polygonID)
polygon_getType = _libsumo.polygon_getType
def polygon_getShape(polygonID):
return _libsumo.polygon_getShape(polygonID)
polygon_getShape = _libsumo.polygon_getShape
def polygon_getColor(polygonID):
return _libsumo.polygon_getColor(polygonID)
polygon_getColor = _libsumo.polygon_getColor
def polygon_getFilled(polygonID):
return _libsumo.polygon_getFilled(polygonID)
polygon_getFilled = _libsumo.polygon_getFilled
def polygon_getLineWidth(polygonID):
return _libsumo.polygon_getLineWidth(polygonID)
polygon_getLineWidth = _libsumo.polygon_getLineWidth
def polygon_getParameter(polygonID, key):
return _libsumo.polygon_getParameter(polygonID, key)
polygon_getParameter = _libsumo.polygon_getParameter
def polygon_setType(polygonID, setType):
return _libsumo.polygon_setType(polygonID, setType)
polygon_setType = _libsumo.polygon_setType
def polygon_setShape(polygonID, shape):
return _libsumo.polygon_setShape(polygonID, shape)
polygon_setShape = _libsumo.polygon_setShape
def polygon_setColor(polygonID, c):
return _libsumo.polygon_setColor(polygonID, c)
polygon_setColor = _libsumo.polygon_setColor
def polygon_add(*args, **kwargs):
return _libsumo.polygon_add(*args, **kwargs)
polygon_add = _libsumo.polygon_add
def polygon_remove(polygonID, layer=0):
return _libsumo.polygon_remove(polygonID, layer)
polygon_remove = _libsumo.polygon_remove
def polygon_setFilled(polygonID, filled):
return _libsumo.polygon_setFilled(polygonID, filled)
polygon_setFilled = _libsumo.polygon_setFilled
def polygon_setLineWidth(polygonID, lineWidth):
return _libsumo.polygon_setLineWidth(polygonID, lineWidth)
polygon_setLineWidth = _libsumo.polygon_setLineWidth
def polygon_setParameter(polygonID, key, value):
return _libsumo.polygon_setParameter(polygonID, key, value)
polygon_setParameter = _libsumo.polygon_setParameter
def polygon_subscribe(*args, **kwargs):
return _libsumo.polygon_subscribe(*args, **kwargs)
polygon_subscribe = _libsumo.polygon_subscribe
def polygon_subscribeContext(*args, **kwargs):
return _libsumo.polygon_subscribeContext(*args, **kwargs)
polygon_subscribeContext = _libsumo.polygon_subscribeContext
def polygon_getAllSubscriptionResults():
return _libsumo.polygon_getAllSubscriptionResults()
polygon_getAllSubscriptionResults = _libsumo.polygon_getAllSubscriptionResults
def polygon_getSubscriptionResults(objID):
return _libsumo.polygon_getSubscriptionResults(objID)
polygon_getSubscriptionResults = _libsumo.polygon_getSubscriptionResults
def polygon_getAllContextSubscriptionResults():
return _libsumo.polygon_getAllContextSubscriptionResults()
polygon_getAllContextSubscriptionResults = _libsumo.polygon_getAllContextSubscriptionResults
def polygon_getContextSubscriptionResults(objID):
return _libsumo.polygon_getContextSubscriptionResults(objID)
polygon_getContextSubscriptionResults = _libsumo.polygon_getContextSubscriptionResults
def polygon_getTree():
return _libsumo.polygon_getTree()
polygon_getTree = _libsumo.polygon_getTree
def polygon_storeShape(id, shape):
return _libsumo.polygon_storeShape(id, shape)
polygon_storeShape = _libsumo.polygon_storeShape
def polygon_makeWrapper():
return _libsumo.polygon_makeWrapper()
polygon_makeWrapper = _libsumo.polygon_makeWrapper
def polygon_handleVariable(objID, variable, wrapper):
return _libsumo.polygon_handleVariable(objID, variable, wrapper)
polygon_handleVariable = _libsumo.polygon_handleVariable
class route(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, route, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, route, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_getmethods__["getIDList"] = lambda x: _libsumo.route_getIDList
if _newclass:
getIDList = staticmethod(_libsumo.route_getIDList)
__swig_getmethods__["getIDCount"] = lambda x: _libsumo.route_getIDCount
if _newclass:
getIDCount = staticmethod(_libsumo.route_getIDCount)
__swig_getmethods__["getEdges"] = lambda x: _libsumo.route_getEdges
if _newclass:
getEdges = staticmethod(_libsumo.route_getEdges)
__swig_getmethods__["getParameter"] = lambda x: _libsumo.route_getParameter
if _newclass:
getParameter = staticmethod(_libsumo.route_getParameter)
__swig_getmethods__["add"] = lambda x: _libsumo.route_add
if _newclass:
add = staticmethod(_libsumo.route_add)
__swig_getmethods__["setParameter"] = lambda x: _libsumo.route_setParameter
if _newclass:
setParameter = staticmethod(_libsumo.route_setParameter)
__swig_getmethods__["subscribe"] = lambda x: _libsumo.route_subscribe
if _newclass:
subscribe = staticmethod(_libsumo.route_subscribe)
__swig_getmethods__["subscribeContext"] = lambda x: _libsumo.route_subscribeContext
if _newclass:
subscribeContext = staticmethod(_libsumo.route_subscribeContext)
__swig_getmethods__["getAllSubscriptionResults"] = lambda x: _libsumo.route_getAllSubscriptionResults
if _newclass:
getAllSubscriptionResults = staticmethod(_libsumo.route_getAllSubscriptionResults)
__swig_getmethods__["getSubscriptionResults"] = lambda x: _libsumo.route_getSubscriptionResults
if _newclass:
getSubscriptionResults = staticmethod(_libsumo.route_getSubscriptionResults)
__swig_getmethods__["getAllContextSubscriptionResults"] = lambda x: _libsumo.route_getAllContextSubscriptionResults
if _newclass:
getAllContextSubscriptionResults = staticmethod(_libsumo.route_getAllContextSubscriptionResults)
__swig_getmethods__["getContextSubscriptionResults"] = lambda x: _libsumo.route_getContextSubscriptionResults
if _newclass:
getContextSubscriptionResults = staticmethod(_libsumo.route_getContextSubscriptionResults)
__swig_getmethods__["makeWrapper"] = lambda x: _libsumo.route_makeWrapper
if _newclass:
makeWrapper = staticmethod(_libsumo.route_makeWrapper)
__swig_getmethods__["handleVariable"] = lambda x: _libsumo.route_handleVariable
if _newclass:
handleVariable = staticmethod(_libsumo.route_handleVariable)
__swig_destroy__ = _libsumo.delete_route
__del__ = lambda self: None
route_swigregister = _libsumo.route_swigregister
route_swigregister(route)
def route_getIDList():
return _libsumo.route_getIDList()
route_getIDList = _libsumo.route_getIDList
def route_getIDCount():
return _libsumo.route_getIDCount()
route_getIDCount = _libsumo.route_getIDCount
def route_getEdges(routeID):
return _libsumo.route_getEdges(routeID)
route_getEdges = _libsumo.route_getEdges
def route_getParameter(routeID, param):
return _libsumo.route_getParameter(routeID, param)
route_getParameter = _libsumo.route_getParameter
def route_add(routeID, edgeIDs):
return _libsumo.route_add(routeID, edgeIDs)
route_add = _libsumo.route_add
def route_setParameter(routeID, key, value):
return _libsumo.route_setParameter(routeID, key, value)
route_setParameter = _libsumo.route_setParameter
def route_subscribe(*args, **kwargs):
return _libsumo.route_subscribe(*args, **kwargs)
route_subscribe = _libsumo.route_subscribe
def route_subscribeContext(*args, **kwargs):
return _libsumo.route_subscribeContext(*args, **kwargs)
route_subscribeContext = _libsumo.route_subscribeContext
def route_getAllSubscriptionResults():
return _libsumo.route_getAllSubscriptionResults()
route_getAllSubscriptionResults = _libsumo.route_getAllSubscriptionResults
def route_getSubscriptionResults(objID):
return _libsumo.route_getSubscriptionResults(objID)
route_getSubscriptionResults = _libsumo.route_getSubscriptionResults
def route_getAllContextSubscriptionResults():
return _libsumo.route_getAllContextSubscriptionResults()
route_getAllContextSubscriptionResults = _libsumo.route_getAllContextSubscriptionResults
def route_getContextSubscriptionResults(objID):
return _libsumo.route_getContextSubscriptionResults(objID)
route_getContextSubscriptionResults = _libsumo.route_getContextSubscriptionResults
def route_makeWrapper():
return _libsumo.route_makeWrapper()
route_makeWrapper = _libsumo.route_makeWrapper
def route_handleVariable(objID, variable, wrapper):
return _libsumo.route_handleVariable(objID, variable, wrapper)
route_handleVariable = _libsumo.route_handleVariable
class simulation(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, simulation, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, simulation, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_getmethods__["load"] = lambda x: _libsumo.simulation_load
if _newclass:
load = staticmethod(_libsumo.simulation_load)
__swig_getmethods__["isLoaded"] = lambda x: _libsumo.simulation_isLoaded
if _newclass:
isLoaded = staticmethod(_libsumo.simulation_isLoaded)
__swig_getmethods__["close"] = lambda x: _libsumo.simulation_close
if _newclass:
close = staticmethod(_libsumo.simulation_close)
__swig_getmethods__["step"] = lambda x: _libsumo.simulation_step
if _newclass:
step = staticmethod(_libsumo.simulation_step)
__swig_getmethods__["getCurrentTime"] = lambda x: _libsumo.simulation_getCurrentTime
if _newclass:
getCurrentTime = staticmethod(_libsumo.simulation_getCurrentTime)
__swig_getmethods__["getTime"] = lambda x: _libsumo.simulation_getTime
if _newclass:
getTime = staticmethod(_libsumo.simulation_getTime)
__swig_getmethods__["getLoadedNumber"] = lambda x: _libsumo.simulation_getLoadedNumber
if _newclass:
getLoadedNumber = staticmethod(_libsumo.simulation_getLoadedNumber)
__swig_getmethods__["getLoadedIDList"] = lambda x: _libsumo.simulation_getLoadedIDList
if _newclass:
getLoadedIDList = staticmethod(_libsumo.simulation_getLoadedIDList)
__swig_getmethods__["getDepartedNumber"] = lambda x: _libsumo.simulation_getDepartedNumber
if _newclass:
getDepartedNumber = staticmethod(_libsumo.simulation_getDepartedNumber)
__swig_getmethods__["getDepartedIDList"] = lambda x: _libsumo.simulation_getDepartedIDList
if _newclass:
getDepartedIDList = staticmethod(_libsumo.simulation_getDepartedIDList)
__swig_getmethods__["getArrivedNumber"] = lambda x: _libsumo.simulation_getArrivedNumber
if _newclass:
getArrivedNumber = staticmethod(_libsumo.simulation_getArrivedNumber)
__swig_getmethods__["getArrivedIDList"] = lambda x: _libsumo.simulation_getArrivedIDList
if _newclass:
getArrivedIDList = staticmethod(_libsumo.simulation_getArrivedIDList)
__swig_getmethods__["getParkingStartingVehiclesNumber"] = lambda x: _libsumo.simulation_getParkingStartingVehiclesNumber
if _newclass:
getParkingStartingVehiclesNumber = staticmethod(_libsumo.simulation_getParkingStartingVehiclesNumber)
__swig_getmethods__["getParkingStartingVehiclesIDList"] = lambda x: _libsumo.simulation_getParkingStartingVehiclesIDList
if _newclass:
getParkingStartingVehiclesIDList = staticmethod(_libsumo.simulation_getParkingStartingVehiclesIDList)
__swig_getmethods__["getParkingEndingVehiclesNumber"] = lambda x: _libsumo.simulation_getParkingEndingVehiclesNumber
if _newclass:
getParkingEndingVehiclesNumber = staticmethod(_libsumo.simulation_getParkingEndingVehiclesNumber)
__swig_getmethods__["getParkingEndingVehiclesIDList"] = lambda x: _libsumo.simulation_getParkingEndingVehiclesIDList
if _newclass:
getParkingEndingVehiclesIDList = staticmethod(_libsumo.simulation_getParkingEndingVehiclesIDList)
__swig_getmethods__["getStopStartingVehiclesNumber"] = lambda x: _libsumo.simulation_getStopStartingVehiclesNumber
if _newclass:
getStopStartingVehiclesNumber = staticmethod(_libsumo.simulation_getStopStartingVehiclesNumber)
__swig_getmethods__["getStopStartingVehiclesIDList"] = lambda x: _libsumo.simulation_getStopStartingVehiclesIDList
if _newclass:
getStopStartingVehiclesIDList = staticmethod(_libsumo.simulation_getStopStartingVehiclesIDList)
__swig_getmethods__["getStopEndingVehiclesNumber"] = lambda x: _libsumo.simulation_getStopEndingVehiclesNumber
if _newclass:
getStopEndingVehiclesNumber = staticmethod(_libsumo.simulation_getStopEndingVehiclesNumber)
__swig_getmethods__["getStopEndingVehiclesIDList"] = lambda x: _libsumo.simulation_getStopEndingVehiclesIDList
if _newclass:
getStopEndingVehiclesIDList = staticmethod(_libsumo.simulation_getStopEndingVehiclesIDList)
__swig_getmethods__["getCollidingVehiclesNumber"] = lambda x: _libsumo.simulation_getCollidingVehiclesNumber
if _newclass:
getCollidingVehiclesNumber = staticmethod(_libsumo.simulation_getCollidingVehiclesNumber)
__swig_getmethods__["getCollidingVehiclesIDList"] = lambda x: _libsumo.simulation_getCollidingVehiclesIDList
if _newclass:
getCollidingVehiclesIDList = staticmethod(_libsumo.simulation_getCollidingVehiclesIDList)
__swig_getmethods__["getEmergencyStoppingVehiclesNumber"] = lambda x: _libsumo.simulation_getEmergencyStoppingVehiclesNumber
if _newclass:
getEmergencyStoppingVehiclesNumber = staticmethod(_libsumo.simulation_getEmergencyStoppingVehiclesNumber)
__swig_getmethods__["getEmergencyStoppingVehiclesIDList"] = lambda x: _libsumo.simulation_getEmergencyStoppingVehiclesIDList
if _newclass:
getEmergencyStoppingVehiclesIDList = staticmethod(_libsumo.simulation_getEmergencyStoppingVehiclesIDList)
__swig_getmethods__["getStartingTeleportNumber"] = lambda x: _libsumo.simulation_getStartingTeleportNumber
if _newclass:
getStartingTeleportNumber = staticmethod(_libsumo.simulation_getStartingTeleportNumber)
__swig_getmethods__["getStartingTeleportIDList"] = lambda x: _libsumo.simulation_getStartingTeleportIDList
if _newclass:
getStartingTeleportIDList = staticmethod(_libsumo.simulation_getStartingTeleportIDList)
__swig_getmethods__["getEndingTeleportNumber"] = lambda x: _libsumo.simulation_getEndingTeleportNumber
if _newclass:
getEndingTeleportNumber = staticmethod(_libsumo.simulation_getEndingTeleportNumber)
__swig_getmethods__["getEndingTeleportIDList"] = lambda x: _libsumo.simulation_getEndingTeleportIDList
if _newclass:
getEndingTeleportIDList = staticmethod(_libsumo.simulation_getEndingTeleportIDList)
__swig_getmethods__["getBusStopWaiting"] = lambda x: _libsumo.simulation_getBusStopWaiting
if _newclass:
getBusStopWaiting = staticmethod(_libsumo.simulation_getBusStopWaiting)
__swig_getmethods__["getDeltaT"] = lambda x: _libsumo.simulation_getDeltaT
if _newclass:
getDeltaT = staticmethod(_libsumo.simulation_getDeltaT)
__swig_getmethods__["getNetBoundary"] = lambda x: _libsumo.simulation_getNetBoundary
if _newclass:
getNetBoundary = staticmethod(_libsumo.simulation_getNetBoundary)
__swig_getmethods__["convert2D"] = lambda x: _libsumo.simulation_convert2D
if _newclass:
convert2D = staticmethod(_libsumo.simulation_convert2D)
__swig_getmethods__["convert3D"] = lambda x: _libsumo.simulation_convert3D
if _newclass:
convert3D = staticmethod(_libsumo.simulation_convert3D)
__swig_getmethods__["convertRoad"] = lambda x: _libsumo.simulation_convertRoad
if _newclass:
convertRoad = staticmethod(_libsumo.simulation_convertRoad)
__swig_getmethods__["convertGeo"] = lambda x: _libsumo.simulation_convertGeo
if _newclass:
convertGeo = staticmethod(_libsumo.simulation_convertGeo)
__swig_getmethods__["getDistance2D"] = lambda x: _libsumo.simulation_getDistance2D
if _newclass:
getDistance2D = staticmethod(_libsumo.simulation_getDistance2D)
__swig_getmethods__["getDistanceRoad"] = lambda x: _libsumo.simulation_getDistanceRoad
if _newclass:
getDistanceRoad = staticmethod(_libsumo.simulation_getDistanceRoad)
__swig_getmethods__["getMinExpectedNumber"] = lambda x: _libsumo.simulation_getMinExpectedNumber
if _newclass:
getMinExpectedNumber = staticmethod(_libsumo.simulation_getMinExpectedNumber)
__swig_getmethods__["findRoute"] = lambda x: _libsumo.simulation_findRoute
if _newclass:
findRoute = staticmethod(_libsumo.simulation_findRoute)
__swig_getmethods__["findIntermodalRoute"] = lambda x: _libsumo.simulation_findIntermodalRoute
if _newclass:
findIntermodalRoute = staticmethod(_libsumo.simulation_findIntermodalRoute)
__swig_getmethods__["getParameter"] = lambda x: _libsumo.simulation_getParameter
if _newclass:
getParameter = staticmethod(_libsumo.simulation_getParameter)
__swig_getmethods__["clearPending"] = lambda x: _libsumo.simulation_clearPending
if _newclass:
clearPending = staticmethod(_libsumo.simulation_clearPending)
__swig_getmethods__["saveState"] = lambda x: _libsumo.simulation_saveState
if _newclass:
saveState = staticmethod(_libsumo.simulation_saveState)
__swig_getmethods__["subscribeContext"] = lambda x: _libsumo.simulation_subscribeContext
if _newclass:
subscribeContext = staticmethod(_libsumo.simulation_subscribeContext)
__swig_getmethods__["getAllSubscriptionResults"] = lambda x: _libsumo.simulation_getAllSubscriptionResults
if _newclass:
getAllSubscriptionResults = staticmethod(_libsumo.simulation_getAllSubscriptionResults)
__swig_getmethods__["getAllContextSubscriptionResults"] = lambda x: _libsumo.simulation_getAllContextSubscriptionResults
if _newclass:
getAllContextSubscriptionResults = staticmethod(_libsumo.simulation_getAllContextSubscriptionResults)
__swig_getmethods__["getContextSubscriptionResults"] = lambda | |
# main.py COPYRIGHT Fujitsu Limited 2022
from argparse import ArgumentParser
from collections import OrderedDict
import copy
import os
import torch
from torchvision import transforms
import torchvision.datasets as datasets
from tqdm import tqdm
import sys
sys.path.append('../../')
from auto_prune_cifar10 import auto_prune
from resnet110 import ResNet110
#===================================================================================
parser = ArgumentParser()
parser.add_argument('--workers', default=8, type=int,
help='number of data loading workers')
parser.add_argument('--use_gpu', action='store_true',
help='use gpu')
parser.add_argument('--use_DataParallel', action='store_true',
help='use DataParallel')
# for training
parser.add_argument('--data', type=str, default='./data',
help='path to dataset')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--learning_rate', type=float, default=1e-1)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--nesterov', default=False)
parser.add_argument('--scheduler_timing', type=str, default='epoch',
help="set LR change timing by LR_scheduler. 'epoch': execute scheduler.step() for each epoch. 'iter' : Execute scheduler.step() for each iteration")
# for stepLR scheduler
parser.add_argument('--lr-milestone', type=list, default=[100, 150, 200])
parser.add_argument('--lr-gamma', type=float, default=0.1)
# for auto pruning
parser.add_argument('--acc_control', type=float, default=0.1,
help='control parameter for pruned model accuracy')
parser.add_argument('--loss_margin', type=float, default=0.1,
help='control parameter for loss function margin to derive threshold')
parser.add_argument('--rates', nargs='*', type=float, default=[0.5, 0.4, 0.3, 0.2, 0.1, 0.0],
help='candidates for pruning rates')
parser.add_argument('--max_search_times', type=int, default=1000,
help='maximum number of times for pruning rate search')
parser.add_argument('--epochs', type=int, default=250,
help='re-training epochs')
parser.add_argument('--pretrained_model_path', type=str, default='./pretrained_cifar10_resnet110.pt',
help='pre-trained model filepath')
parser.add_argument('--pruned_model_path', type=str, default='./pruned_cifar10_resnet110.pt',
help='pruned model filepath')
#===================================================================================
def main():
args = parser.parse_args()
args.rates = ([float(f) for f in args.rates])
print(f'args: {args}')
device = 'cpu'
if args.use_gpu:
torch.backends.cudnn.benchmark = True
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device: ', device)
print('===== load data ===================')
norm_mean = (0.485, 0.456, 0.406)
norm_std = (0.229, 0.224, 0.225)
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std),
]
)
val_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std),
]
)
# get cifar10 datasets
dataset_path = args.data
train_dataset = datasets.CIFAR10(
root=dataset_path, train=True, download=True, transform=train_transform)
val_dataset = datasets.CIFAR10(
root=dataset_path, train=False, download=True, transform=val_transform)
# make DataLoader
batch_size = args.batch_size
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=args.workers,
pin_memory=True,
)
# load model
model = ResNet110()
model.load_state_dict(torch.load(
args.pretrained_model_path, map_location=device), strict=True)
if torch.cuda.device_count() > 1 and args.use_DataParallel:
print('use {} GPUs.'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
model.to(device)
print('===== model: before pruning ==========')
print(model)
# Model information for pruning
model_info = OrderedDict()
model_info['conv1'] = {'arg': 'ch_conv1'}
model_info['bn1'] = {'arg': 'ch_conv1'}
model_info['l10_conv1'] = {'arg': 'ch_l10conv1', 'prev': ['bn1']}
model_info['l10_bn1'] = {'arg': 'ch_l10conv1'}
model_info['l10_conv2'] = {'arg': 'ch_l10conv2'}
model_info['l10_bn2'] = {'arg': 'ch_l10conv2'}
model_info['l11_conv1'] = {'arg': 'ch_l11conv1', 'prev': ['bn1', 'l10_bn2']}
model_info['l11_bn1'] = {'arg': 'ch_l11conv1'}
model_info['l11_conv2'] = {'arg': 'ch_l11conv2'}
model_info['l11_bn2'] = {'arg': 'ch_l11conv2'}
model_info['l12_conv1'] = {'arg': 'ch_l12conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2']}
model_info['l12_bn1'] = {'arg': 'ch_l12conv1'}
model_info['l12_conv2'] = {'arg': 'ch_l12conv2'}
model_info['l12_bn2'] = {'arg': 'ch_l12conv2'}
model_info['l13_conv1'] = {'arg': 'ch_l13conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2']}
model_info['l13_bn1'] = {'arg': 'ch_l13conv1'}
model_info['l13_conv2'] = {'arg': 'ch_l13conv2'}
model_info['l13_bn2'] = {'arg': 'ch_l13conv2'}
model_info['l14_conv1'] = {'arg': 'ch_l14conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2']}
model_info['l14_bn1'] = {'arg': 'ch_l14conv1'}
model_info['l14_conv2'] = {'arg': 'ch_l14conv2'}
model_info['l14_bn2'] = {'arg': 'ch_l14conv2'}
model_info['l15_conv1'] = {'arg': 'ch_l15conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2']}
model_info['l15_bn1'] = {'arg': 'ch_l15conv1'}
model_info['l15_conv2'] = {'arg': 'ch_l15conv2'}
model_info['l15_bn2'] = {'arg': 'ch_l15conv2'}
model_info['l16_conv1'] = {'arg': 'ch_l16conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2']}
model_info['l16_bn1'] = {'arg': 'ch_l16conv1'}
model_info['l16_conv2'] = {'arg': 'ch_l16conv2'}
model_info['l16_bn2'] = {'arg': 'ch_l16conv2'}
model_info['l17_conv1'] = {'arg': 'ch_l17conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2']}
model_info['l17_bn1'] = {'arg': 'ch_l17conv1'}
model_info['l17_conv2'] = {'arg': 'ch_l17conv2'}
model_info['l17_bn2'] = {'arg': 'ch_l17conv2'}
model_info['l18_conv1'] = {'arg': 'ch_l18conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2']}
model_info['l18_bn1'] = {'arg': 'ch_l18conv1'}
model_info['l18_conv2'] = {'arg': 'ch_l18conv2'}
model_info['l18_bn2'] = {'arg': 'ch_l18conv2'}
model_info['l19_conv1'] = {'arg': 'ch_l19conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2']}
model_info['l19_bn1'] = {'arg': 'ch_l19conv1'}
model_info['l19_conv2'] = {'arg': 'ch_l19conv2'}
model_info['l19_bn2'] = {'arg': 'ch_l19conv2'}
model_info['l110_conv1'] = {'arg': 'ch_l110conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2',
'l19_bn2']}
model_info['l110_bn1'] = {'arg': 'ch_l110conv1'}
model_info['l110_conv2'] = {'arg': 'ch_l110conv2'}
model_info['l110_bn2'] = {'arg': 'ch_l110conv2'}
model_info['l111_conv1'] = {'arg': 'ch_l111conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2',
'l19_bn2','l110_bn2']}
model_info['l111_bn1'] = {'arg': 'ch_l111conv1'}
model_info['l111_conv2'] = {'arg': 'ch_l111conv2'}
model_info['l111_bn2'] = {'arg': 'ch_l111conv2'}
model_info['l112_conv1'] = {'arg': 'ch_l112conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2',
'l19_bn2','l110_bn2','l111_bn2']}
model_info['l112_bn1'] = {'arg': 'ch_l112conv1'}
model_info['l112_conv2'] = {'arg': 'ch_l112conv2'}
model_info['l112_bn2'] = {'arg': 'ch_l112conv2'}
model_info['l113_conv1'] = {'arg': 'ch_l113conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2']}
model_info['l113_bn1'] = {'arg': 'ch_l113conv1'}
model_info['l113_conv2'] = {'arg': 'ch_l113conv2'}
model_info['l113_bn2'] = {'arg': 'ch_l113conv2'}
model_info['l114_conv1'] = {'arg': 'ch_l114conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2']}
model_info['l114_bn1'] = {'arg': 'ch_l114conv1'}
model_info['l114_conv2'] = {'arg': 'ch_l114conv2'}
model_info['l114_bn2'] = {'arg': 'ch_l114conv2'}
model_info['l115_conv1'] = {'arg': 'ch_l115conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2']}
model_info['l115_bn1'] = {'arg': 'ch_l115conv1'}
model_info['l115_conv2'] = {'arg': 'ch_l115conv2'}
model_info['l115_bn2'] = {'arg': 'ch_l115conv2'}
model_info['l116_conv1'] = {'arg': 'ch_l116conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2']}
model_info['l116_bn1'] = {'arg': 'ch_l116conv1'}
model_info['l116_conv2'] = {'arg': 'ch_l116conv2'}
model_info['l116_bn2'] = {'arg': 'ch_l116conv2'}
model_info['l117_conv1'] = {'arg': 'ch_l117conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2','l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2']}
model_info['l117_bn1'] = {'arg': 'ch_l117conv1'}
model_info['l117_conv2'] = {'arg': 'ch_l117conv2'}
model_info['l117_bn2'] = {'arg': 'ch_l117conv2'}
model_info['l20_conv1'] = {'arg': 'ch_l20conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2']}
model_info['l20_bn1'] = {'arg': 'ch_l20conv1'}
model_info['l20_conv2'] = {'arg': 'ch_l20conv2'}
model_info['l20_bn2'] = {'arg': 'ch_l20conv2'}
model_info['l21_conv1'] = {'arg': 'ch_l21conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2']}
model_info['l21_bn1'] = {'arg': 'ch_l21conv1'}
model_info['l21_conv2'] = {'arg': 'ch_l21conv2'}
model_info['l21_bn2'] = {'arg': 'ch_l21conv2'}
model_info['l22_conv1'] = {'arg': 'ch_l22conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2']}
model_info['l22_bn1'] = {'arg': 'ch_l22conv1'}
model_info['l22_conv2'] = {'arg': 'ch_l22conv2'}
model_info['l22_bn2'] = {'arg': 'ch_l22conv2'}
model_info['l23_conv1'] = {'arg': 'ch_l23conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2']}
model_info['l23_bn1'] = {'arg': 'ch_l23conv1'}
model_info['l23_conv2'] = {'arg': 'ch_l23conv2'}
model_info['l23_bn2'] = {'arg': 'ch_l23conv2'}
model_info['l24_conv1'] = {'arg': 'ch_l24conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2']}
model_info['l24_bn1'] = {'arg': 'ch_l24conv1'}
model_info['l24_conv2'] = {'arg': 'ch_l24conv2'}
model_info['l24_bn2'] = {'arg': 'ch_l24conv2'}
model_info['l25_conv1'] = {'arg': 'ch_l25conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2']}
model_info['l25_bn1'] = {'arg': 'ch_l25conv1'}
model_info['l25_conv2'] = {'arg': 'ch_l25conv2'}
model_info['l25_bn2'] = {'arg': 'ch_l25conv2'}
model_info['l26_conv1'] = {'arg': 'ch_l26conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2']}
model_info['l26_bn1'] = {'arg': 'ch_l26conv1'}
model_info['l26_conv2'] = {'arg': 'ch_l26conv2'}
model_info['l26_bn2'] = {'arg': 'ch_l26conv2'}
model_info['l27_conv1'] = {'arg': 'ch_l27conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2']}
model_info['l27_bn1'] = {'arg': 'ch_l27conv1'}
model_info['l27_conv2'] = {'arg': 'ch_l27conv2'}
model_info['l27_bn2'] = {'arg': 'ch_l27conv2'}
model_info['l28_conv1'] = {'arg': 'ch_l28conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2', 'l27_bn2']}
model_info['l28_bn1'] = {'arg': 'ch_l28conv1'}
model_info['l28_conv2'] = {'arg': 'ch_l28conv2'}
model_info['l28_bn2'] = {'arg': 'ch_l28conv2'}
model_info['l29_conv1'] = {'arg': 'ch_l29conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2', 'l27_bn2','l28_bn2']}
model_info['l29_bn1'] = {'arg': 'ch_l29conv1'}
model_info['l29_conv2'] = {'arg': 'ch_l29conv2'}
model_info['l29_bn2'] = {'arg': 'ch_l29conv2'}
model_info['l210_conv1'] = {'arg': 'ch_l210conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2', 'l27_bn2','l28_bn2',
'l29_bn2']}
model_info['l210_bn1'] = {'arg': 'ch_l210conv1'}
model_info['l210_conv2'] = {'arg': 'ch_l210conv2'}
model_info['l210_bn2'] = {'arg': 'ch_l210conv2'}
model_info['l211_conv1'] = {'arg': 'ch_l211conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2', 'l27_bn2','l28_bn2',
'l29_bn2','l210_bn2']}
model_info['l211_bn1'] = {'arg': 'ch_l211conv1'}
model_info['l211_conv2'] = {'arg': 'ch_l211conv2'}
model_info['l211_bn2'] = {'arg': 'ch_l211conv2'}
model_info['l212_conv1'] = {'arg': 'ch_l212conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2', 'l27_bn2','l28_bn2',
'l29_bn2','l210_bn2','l211_bn2']}
model_info['l212_bn1'] = {'arg': 'ch_l212conv1'}
model_info['l212_conv2'] = {'arg': 'ch_l212conv2'}
model_info['l212_bn2'] = {'arg': 'ch_l212conv2'}
model_info['l213_conv1'] = {'arg': 'ch_l213conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2', 'l27_bn2','l28_bn2',
'l29_bn2','l210_bn2','l211_bn2','l212_bn2']}
model_info['l213_bn1'] = {'arg': 'ch_l213conv1'}
model_info['l213_conv2'] = {'arg': 'ch_l213conv2'}
model_info['l213_bn2'] = {'arg': 'ch_l213conv2'}
model_info['l214_conv1'] = {'arg': 'ch_l214conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2', 'l27_bn2','l28_bn2',
'l29_bn2','l210_bn2','l211_bn2','l212_bn2','l213_bn2']}
model_info['l214_bn1'] = {'arg': 'ch_l214conv1'}
model_info['l214_conv2'] = {'arg': 'ch_l214conv2'}
model_info['l214_bn2'] = {'arg': 'ch_l214conv2'}
model_info['l215_conv1'] = {'arg': 'ch_l215conv1', 'prev': ['bn1', 'l10_bn2', 'l11_bn2', 'l12_bn2', 'l13_bn2', 'l14_bn2', 'l15_bn2', 'l16_bn2', 'l17_bn2', 'l18_bn2',
'l19_bn2','l110_bn2','l111_bn2','l112_bn2','l113_bn2','l114_bn2','l115_bn2','l116_bn2','l117_bn2',
'l20_bn2', 'l21_bn2', 'l22_bn2', 'l23_bn2', 'l24_bn2', 'l25_bn2', 'l26_bn2', 'l27_bn2','l28_bn2',
'l29_bn2','l210_bn2','l211_bn2','l212_bn2','l213_bn2','l214_bn2']}
model_info['l215_bn1'] | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Commonly used utility functions."""
# mainly backports from future numpy here
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import numpy as np
import networkx as nx
from scipy.stats import spearmanr
from .utilities import tril_indices, triu_indices, convert_to_graph, \
fill_diagonal
def adj_static(ts, measure='corr', pval=False, TR=2, fq_l=None, fq_u=None,
order=1, scale=2, w=None, idp=None, excl_zero_cov=False):
"""returns a *static* graph representation (adjacency matrix)
Parameters
----------
ts : ndarray, shape(n_rois, n_tps)
Pre-processed timeseries information
measure : string
Similarity measure for the adjacency matrix calculation.
* ``corr``: Pearson product momement correlation coefficient [-1,1].
* ``cov``: Covariance.
* ``coh``: Coherence [0,1]
* ``delay``: Coherence phase delay estimates. The metric was used in
[3]_ and [4]_
* ``granger``: Granger-causality. As suggested by [3]_ and [4]_ this
returns the difference `F(x-->y)` and `F(y-->x)`.
* ``wtc``: Correlation of the wavelet coefficients [-1,1]. The metric
was used in [1]_
* ``pcorr``: Partial correlation. Calculated using the inverse
covariance matrix (precision matrix) [0,1]
* ``pcoh``: Partial coherence in the range [0,1]. The metric was used
in [2]_ #not impl yet
* ``spcoh`` : Semi-partial coherence in the range [0,1]. The metric was
used in [7]_. The correlation between two time-series is conditioned
on a third time-series given by ``idp``.
* ``ktau``: Kendall's tau, a correlation measure for ordinal data.
* ``rho``: Spearman rank-order correlation coefficient rho [-1,1]. This
is a nonparametric measure of the linear relationship between two
datasets. Unlike e.g. the Pearson correlation, the Spearman
correlation does not assume that both datasets are normally
distributed.
* ``mic``: Maximal information criterion [0,1]
* ``non_linearity``: Non-linearity of the relationship [0,1]
* ``mi``: Mutual information. The metric was used in [5]_
* ``nmi``: Normalized mutual information [0,1]
* ``ami``: Adjusted mutual information [0,1]
* ``cmi`` Conditional mutual information. The metric was used in [6]_
# not impl yet
* ``graph_lasso``: Sparse inverse covariance matrix estimation with l1
penalization using the GraphLasso. The connection of two nodes is
estimated by conditioning on all other nodes [-1,1].
* ``ledoit_wolf``: Sparse inverse covariance matrix estimation with l2
shrinkage using Ledoit-Wolf [-1,1].
* ``dcorr``: Distance correlation [0,1]. This metric can capture
non-linear relationships.
* ``dcov``: Distance covariance.
* ``eu``: Euclidean distance.
pval : boolean, optional
return p-values, only available for ``corr``, ``wtc``, ``ktau``,``rho``
and ``mic`` so far (default=False).
TR : float
Time to repeat: the sampling interval (only for ``coh``, ``pcoh``,
``delay`` and ``granger``).
fq_l : float
lower frequency bound (only for ``coh``, ``pcoh``, ``delay`` and
``granger``).
fq_u : float
upper frequency bound (only for ``coh``, ``pcoh``, ``delay`` and
``granger``).
order : integer
time-lag (only for ``measure='granger'``)
scale : integer [1,2]
Wavelet scale (only for ``measure='wtc'``)
w : pywt.wavelet object
default is pywt.Wavelet('db4')
idp : integer
Index of the timeseries to condition the semi-partial coherence on
(only if ``measure='spcoh'``)
excl_zero_cov : boolean (default: False)
Automatically exclude node timeseries with zero covariance. Values in
the adjacency matrix are set to zero.
Returns
-------
A : ndarray, shape(n_rois, n_rois)
Adjacency matrix of the graph.
P : ndarray, shape(n_rois, n_rois)
Statistical p-values (2-tailed) for the similarity measure. Only if
``pval=True``
Notes
-----
The calculation runs faster if ``pval=False`` (default). The diagonal is
always zero.
See Also
--------
adj_dynamic: for a dynamic graph representation/adjacency matrix
nt.timeseries.utils.cross_correlation_matrix: cross-correlation matrix
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., Carlson,
<NAME>., & <NAME>. (2011). Dynamic reconfiguration of human
brain networks during learning. Proceedings of the National Academy
of Sciences, 108(18), 7641–7646. doi:10.1073/pnas.1018985108
.. [2] <NAME>., <NAME>., <NAME>., & <NAME>. (2005).
Undirected graphs of frequency-dependent functional connectivity in
whole brain networks. Philosophical transactions of the Royal
Society of London Series B, Biological sciences, 360(1457), 937–946.
doi:10.1098/rstb.2005.1645
.. [3] <NAME>., <NAME>., & <NAME>. (2009). A comparison of
Granger causality and coherency in fMRI-based analysis of the motor
system. Human Brain Mapping,30(11), 3475–3494. doi:10.1002/hbm.20771
.. [4] <NAME>., <NAME>., & <NAME>. (2005). Mapping directed
influence over the brain using Granger causality and fMRI.
NeuroImage, 25(1), 230–242. doi:10.1016/j.neuroimage.2004.11.017
.. [5] <NAME>., <NAME>., & <NAME>. (2010). Cortical hubs form
a module for multisensory integration on top of the hierarchy of
cortical networks. Frontiers in neuroinformatics, 4.
.. [6] <NAME>., <NAME>., <NAME>., <NAME>., &
<NAME>. (2010). Conditional Mutual Information Maps as
Descriptors of Net Connectivity Levels in the Brain. Frontiers in
neuroinformatics, 4. doi:10.3389/fninf.2010.00115
.. [7] <NAME>., <NAME>., & <NAME>. (2004). Measuring
interregional functional connectivity using coherence and partial
coherence analyses of fMRI data. NeuroImage, 21(2), 647–658.
doi:10.1016/j.neuroimage.2003.09.056
Examples
--------
>>> data = get_fmri_data()
>>> d = percent_signal_change(data) # normalize data
>>> print data.shape
(31, 250) # 31 nodes and 250 time-points
>>> # adjacency matrix based on correlation metric
>>> A = adj_static(data, measure='corr')
>>> print A.shape
(31, 31) # node-wise connectivity matrix
>>> # get adjacency matrix and p-values
>>> A, P = adj_static(data, measure='corr', pval=True)
>>> print P.shape
(31, 31) # p-value for every edge in the adjacency matrix
"""
data = deepcopy(ts)
n_channel = data.shape[0]
# n_tps = data.shape[1]
# TODO think about option to automatically exclude zero covaraince nodes
# especially important for granger
n_nodes = data.shape[0]
if excl_zero_cov:
# test for zero covariance to exclude
std = np.std(data, axis=1)
idx = np.where(std != 0.0)[0]
data = data[idx, :]
# this performs just the wavelet transformation, the correlation part
# is identical to measure='corr'
# if measure == 'wtc':
# data = wavelet_transform(data, w=w, scale=scale)
# measure = 'corr' # perform correlation of wavelet transformed ts
if measure == 'corr':
# correlation = np.dot(x,y)/(np.dot(x,x) * np.dot(y,y))
ADJ = np.corrcoef(data)
if pval:
# ADJ = np.zeros((data.shape[0], data.shape[0]))
# P = np.zeros((data.shape[0], data.shape[0]))
#
# idx = tril_indices(data.shape[0], -1)
# ADJ[idx] = -99 # save some running time by calculating only
# for i in range(data.shape[0]): # the lower traingle
# for j in range(data.shape[0]):
# if ADJ[i,j] == -99:
# ADJ[i,j], P[i,j] = pearsonr(data[i,:], data[j,:])
#
# ADJ = ADJ + ADJ.T
# P = P + P.T
# P = pearsonr_2pval(ADJ, n=n_tps)
# fill_diagonal(P,1)
P = np.ones((n_channel, n_channel))
elif measure == 'cov':
# d = data.copy()
# mean = d.mean(axis=1)
# std = d.std(axis=1)
# d -= mean.reshape(mean.shape[0], 1)
# d /= std.reshape(mean.shape[0], 1)
ADJ = np.cov(data)
elif measure == 'pcorr':
# data needs to be normalized?!
# inv vs. pinv vs pinv2:
# http://blog.vene.ro/2012/08/18/inverses-pseudoinverses-numerical
# issues-speed-symmetry/
ADJ = np.linalg.inv(np.cov(data)) # or pinv?
d = 1 / np.sqrt(np.diag(ADJ))
ADJ *= d
ADJ *= d[:, np.newaxis]
# TODO: this might be much faster
# from scipy.linalg.lapack import get_lapack_funcs
# getri, getrf = get_lapack_funcs(('getri', 'getrf'),
# (np.empty((), dtype=np.float64),
# np.empty((), dtype=np.float64)))
#
# covariance = np.cov(data)
# lu, piv, _ = getrf(np.dot(covariance.T, covariance), True)
# precision, _ = getri(lu, piv, overwrite_lu=True)
# precision = np.dot(covariance, precision)
# elif measure == 'ktau':
#
# ADJ = np.zeros((data.shape[0], data.shape[0]))
# P = np.zeros((data.shape[0], data.shape[0]))
#
# idx = tril_indices(data.shape[0], -1)
# ADJ[idx] = -99
# for i in range(data.shape[0]):
# for j in range(data.shape[0]):
# if ADJ[i, j] == -99:
# ADJ[i, j], P[i, j] = kendalltau(data[i,:], data[j,:])
ADJ = ADJ + ADJ.T
# P = P + P.T
# fill_diagonal(P, 1)
elif measure == 'rho':
ADJ = np.zeros((data.shape[0], data.shape[0]))
P = np.zeros((data.shape[0], data.shape[0]))
idx = tril_indices(data.shape[0], -1)
# save some running time by calculating only the lower traingle
ADJ[idx] = -99
for i in range(data.shape[0]):
for j in range(data.shape[0]):
if ADJ[i, j] == -99:
ADJ[i, j], P[i, j] = spearmanr(data[i, :], data[j, :])
ADJ = ADJ + ADJ.T
P = P + P.T
fill_diagonal(P, 1)
elif measure == 'coh':
from nitime import TimeSeries
from nitime.analysis.coherence import CoherenceAnalyzer
T = TimeSeries(data, sampling_interval=TR)
# Initialize the coherence analyzer
C = CoherenceAnalyzer(T)
COH = C.coherence
# remove Nan's
COH[np.isnan(COH)] = 0.
freq_idx = np.where((C.frequencies > fq_l) * (C.frequencies < fq_u))[0]
# averaging over the last dimension (=frequency)
ADJ = np.mean(COH[:, :, freq_idx], -1)
if excl_zero_cov:
ADJ = np.zeros((n_nodes, n_nodes))
ADJ[idx] = ADJ
fill_diagonal(ADJ, 0)
ADJ[np.isnan(ADJ)] = | |
import abc
from collections import OrderedDict
from functools import reduce
from operator import mul
from cached_property import cached_property
from sympy import Expr
from devito.ir.support.vector import Vector, vmin, vmax
from devito.tools import (PartialOrderTuple, as_list, as_tuple, filter_ordered,
frozendict, toposort, is_integer)
from devito.types import Dimension
__all__ = ['NullInterval', 'Interval', 'IntervalGroup', 'IterationSpace', 'DataSpace',
'Forward', 'Backward', 'Any']
class AbstractInterval(object):
"""
An abstract representation of an iterated closed interval on Z.
"""
__metaclass__ = abc.ABCMeta
is_Null = False
is_Defined = False
def __init__(self, dim, stamp=0):
self.dim = dim
self.stamp = stamp
def __eq__(self, o):
return (type(self) == type(o) and
self.dim is o.dim and
self.stamp == o.stamp)
is_compatible = __eq__
def __hash__(self):
return hash(self.dim.name)
@abc.abstractmethod
def _rebuild(self):
return
@abc.abstractproperty
def relaxed(self):
return
def intersection(self, o):
return self._rebuild()
@abc.abstractmethod
def union(self, o):
return self._rebuild()
def add(self, o):
return self._rebuild()
subtract = add
def negate(self):
return self._rebuild()
zero = negate
flip = negate
lift = negate
reset = negate
switch = negate
translate = negate
class NullInterval(AbstractInterval):
"""
A degenerate iterated closed interval on Z.
"""
is_Null = True
def __repr__(self):
return "%s[Null]<%d>" % (self.dim, self.stamp)
def __hash__(self):
return hash(self.dim)
def _rebuild(self):
return NullInterval(self.dim, self.stamp)
@property
def relaxed(self):
return NullInterval(self.dim.root, self.stamp)
def union(self, o):
if self.dim is o.dim:
return o._rebuild()
else:
raise ValueError("Cannot compute union of Intervals over "
"different Dimensions")
def switch(self, d):
return NullInterval(d, self.stamp)
class Interval(AbstractInterval):
"""
Interval(dim, lower, upper)
A concrete iterated closed interval on Z.
An Interval defines the compact region
``[dim.symbolic_min + lower, dim.symbolic_max + upper]``
The size of the Interval is defined as the number of points iterated over
through ``dim``, namely
``(dim.symbolic_max + upper - dim.symbolic_min - lower + 1) / dim.symbolic_incr``
"""
is_Defined = True
def __init__(self, dim, lower, upper, stamp=0):
assert is_integer(lower) or isinstance(lower, Expr)
assert is_integer(upper) or isinstance(upper, Expr)
super(Interval, self).__init__(dim, stamp)
self.lower = lower
self.upper = upper
def __repr__(self):
return "%s[%s,%s]<%d>" % (self.dim, self.lower, self.upper, self.stamp)
def __hash__(self):
return hash((self.dim, self.offsets))
def __eq__(self, o):
if self is o:
return True
return (super(Interval, self).__eq__(o) and
self.lower == o.lower and
self.upper == o.upper)
def _rebuild(self):
return Interval(self.dim, self.lower, self.upper, self.stamp)
@cached_property
def size(self):
upper_extreme = self.dim.symbolic_max + self.upper
lower_extreme = self.dim.symbolic_min + self.lower
return (upper_extreme - lower_extreme + 1) / self.dim.symbolic_incr
@property
def relaxed(self):
return Interval(self.dim.root, self.lower, self.upper, self.stamp)
@property
def offsets(self):
return (self.lower, self.upper)
def intersection(self, o):
if self.is_compatible(o):
svl, svu = Vector(self.lower, smart=True), Vector(self.upper, smart=True)
ovl, ovu = Vector(o.lower, smart=True), Vector(o.upper, smart=True)
return Interval(self.dim, vmax(svl, ovl)[0], vmin(svu, ovu)[0], self.stamp)
else:
return NullInterval(self.dim)
def union(self, o):
if o.is_Null and self.dim is o.dim:
return self._rebuild()
elif self.is_compatible(o):
svl, svu = Vector(self.lower, smart=True), Vector(self.upper, smart=True)
ovl, ovu = Vector(o.lower, smart=True), Vector(o.upper, smart=True)
return Interval(self.dim, vmin(svl, ovl)[0], vmax(svu, ovu)[0], self.stamp)
else:
raise ValueError("Cannot compute union of non-compatible Intervals (%s, %s)" %
(self, o))
def add(self, o):
if not self.is_compatible(o):
return self._rebuild()
else:
return Interval(self.dim, self.lower + o.lower, self.upper + o.upper,
self.stamp)
def subtract(self, o):
if not self.is_compatible(o):
return self._rebuild()
else:
return Interval(self.dim, self.lower - o.lower, self.upper - o.upper,
self.stamp)
def negate(self):
return Interval(self.dim, -self.lower, -self.upper, self.stamp)
def zero(self):
return Interval(self.dim, 0, 0, self.stamp)
def flip(self):
return Interval(self.dim, self.upper, self.lower, self.stamp)
def lift(self, v=None):
if v is None:
v = self.stamp + 1
return Interval(self.dim, self.lower, self.upper, v)
def reset(self):
return Interval(self.dim, self.lower, self.upper, 0)
def switch(self, d):
return Interval(d, self.lower, self.upper, self.stamp)
def translate(self, v):
return Interval(self.dim, self.lower + v, self.upper + v, self.stamp)
class IntervalGroup(PartialOrderTuple):
"""
A partially-ordered sequence of Intervals equipped with set-like
operations.
"""
@classmethod
def reorder(cls, items, relations):
if not all(isinstance(i, AbstractInterval) for i in items):
raise ValueError("Cannot create an IntervalGroup from objects of type [%s]" %
', '.join(str(type(i)) for i in items))
# The relations are between dimensions, not intervals. So we take
# care of that here
ordering = filter_ordered(toposort(relations) + [i.dim for i in items])
return sorted(items, key=lambda i: ordering.index(i.dim))
def __eq__(self, o):
# No need to look at the relations -- if the partial ordering is the same,
# then the IntervalGroups are considered equal
return len(self) == len(o) and all(i == j for i, j in zip(self, o))
def __contains__(self, d):
return any(i.dim is d for i in self)
def __hash__(self):
return hash(tuple(self))
def __repr__(self):
return "IntervalGroup[%s]" % (', '.join([repr(i) for i in self]))
@cached_property
def dimensions(self):
return filter_ordered([i.dim for i in self])
@property
def size(self):
if self:
return reduce(mul, [i.size for i in self])
else:
return 0
@cached_property
def is_well_defined(self):
"""
True if all Intervals are over different Dimensions,
False otherwise.
"""
return len(self.dimensions) == len(set(self.dimensions))
@classmethod
def generate(self, op, *interval_groups):
"""
Create a new IntervalGroup from the iterative application of an
operation to some IntervalGroups.
Parameters
----------
op : str
Any legal Interval operation, such as 'intersection' or
or 'union'.
*interval_groups
Input IntervalGroups.
Examples
--------
>>> from devito import dimensions
>>> x, y, z = dimensions('x y z')
>>> ig0 = IntervalGroup([Interval(x, 1, -1)])
>>> ig1 = IntervalGroup([Interval(x, 2, -2), Interval(y, 3, -3)])
>>> ig2 = IntervalGroup([Interval(y, 2, -2), Interval(z, 1, -1)])
>>> IntervalGroup.generate('intersection', ig0, ig1, ig2)
IntervalGroup[x[2,-2]<0>, y[3,-3]<0>, z[1,-1]<0>]
"""
mapper = {}
for ig in interval_groups:
for i in ig:
mapper.setdefault(i.dim, []).append(i)
intervals = []
for v in mapper.values():
# Create a new Interval through the concatenation v0.key(v1).key(v2)...
interval = v[0]
for i in v[1:]:
interval = getattr(interval, op)(i)
intervals.append(interval)
relations = set().union(*[ig.relations for ig in interval_groups])
return IntervalGroup(intervals, relations=relations)
@cached_property
def relaxed(self):
return IntervalGroup.generate('union', IntervalGroup(i.relaxed for i in self))
def is_compatible(self, o):
"""
Two IntervalGroups are compatible iff they can be ordered according
to some common partial ordering.
"""
if set(self) != set(o):
return False
if all(i == j for i, j in zip(self, o)):
# Same input ordering, definitely compatible
return True
try:
self.add(o)
return True
except ValueError:
# Cyclic dependence detected, there is no common partial ordering
return False
def _normalize(func):
"""
A simple decorator to normalize the input of operator methods that
expect an IntervalGroup as an operand.
"""
def wrapper(self, o):
if not isinstance(o, IntervalGroup):
o = IntervalGroup(as_tuple(o))
return func(self, o)
return wrapper
@_normalize
def intersection(self, o):
mapper = OrderedDict([(i.dim, i) for i in o])
intervals = [i.intersection(mapper.get(i.dim, i)) for i in self]
return IntervalGroup(intervals, relations=(self.relations | o.relations))
@_normalize
def add(self, o):
mapper = OrderedDict([(i.dim, i) for i in o])
intervals = [i.add(mapper.get(i.dim, NullInterval(i.dim))) for i in self]
return IntervalGroup(intervals, relations=(self.relations | o.relations))
@_normalize
def subtract(self, o):
mapper = OrderedDict([(i.dim, i) for i in o])
intervals = [i.subtract(mapper.get(i.dim, NullInterval(i.dim))) for i in self]
return IntervalGroup(intervals, relations=(self.relations | o.relations))
def drop(self, d):
# Dropping
dims = set().union(*[i._defines for i in as_tuple(d)])
intervals = [i._rebuild() for i in self if not i.dim._defines & dims]
# Clean up relations
relations = [tuple(i for i in r if i in intervals) for r in self.relations]
return IntervalGroup(intervals, relations=relations)
def negate(self):
return IntervalGroup([i.negate() for i in self], relations=self.relations)
def zero(self, d=None):
d = self.dimensions if d is None else as_tuple(d)
return IntervalGroup([i.zero() if i.dim in d else i for i in self],
relations=self.relations)
def lift(self, d, v=None):
d = set(self.dimensions if d is None else as_tuple(d))
return IntervalGroup([i.lift(v) if i.dim._defines & d else i for i in self],
relations=self.relations)
def reset(self):
return IntervalGroup([i.reset() for i in self], relations=self.relations)
def __getitem__(self, key):
if is_integer(key):
return super(IntervalGroup, self).__getitem__(key)
elif isinstance(key, slice):
retval = super(IntervalGroup, self).__getitem__(key)
return IntervalGroup(retval, relations=self.relations)
if not self.is_well_defined:
raise ValueError("Cannot fetch Interval from ill defined Space")
if not isinstance(key, Dimension):
return NullInterval(key)
for i in self:
if i.dim is key:
return i
if key.is_NonlinearDerived and i.dim is key.parent:
# NonlinearDerived Dimensions cannot appear in iteration Intervals,
# but their parent can
return i
return NullInterval(key)
class IterationDirection(object):
"""
A representation of the direction in which an iteration space is traversed.
"""
def __init__(self, name):
self._name = name
def __eq__(self, other):
return isinstance(other, IterationDirection) and self._name == other._name
def __repr__(self):
return self._name
def __hash__(self):
return hash(self._name)
Forward = IterationDirection('++')
"""Forward iteration direction ('++')."""
Backward = IterationDirection('--')
"""Backward iteration direction ('--')."""
Any = IterationDirection('*')
"""Wildcard direction (both '++' and | |
)
runcmd("brew", "config")
runcmd("brew", "doctor")
raise
### TODO: Handle variations in this path (Is it "$(brew --prefix)/bin"?)
log.debug("Installed program directory: /usr/local/bin")
return Path("/usr/local/bin")
def assert_supported_system(self) -> None:
if shutil.which("brew") is None:
raise MethodNotSupportedError("brew command not found")
@DataladComponent.register_installer
class PipInstaller(Installer):
"""
Installs via pip, either at the system level or into a given virtual
environment
"""
NAME = "pip"
OPTIONS = [
Option("--devel", is_flag=True, help="Install from GitHub repository"),
Option("-E", "--extras", metavar="EXTRAS", help="Install package extras"),
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
}
DEVEL_PACKAGES = {
"datalad": "git+https://github.com/datalad/datalad.git",
}
def __init__(
self, manager: DataladInstaller, venv_path: Optional[Path] = None
) -> None:
super().__init__(manager)
#: The path to the virtual environment in which to install, or `None`
#: if installation should be done at the system level
self.venv_path: Optional[Path] = venv_path
@property
def python(self) -> Union[str, Path]:
if self.venv_path is None:
return sys.executable
elif ON_WINDOWS:
return self.venv_path / "Scripts" / "python.exe"
else:
return self.venv_path / "bin" / "python"
def install_package(
self,
package: str,
version: Optional[str] = None,
devel: bool = False,
extras: Optional[str] = None,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> Path:
log.info("Installing %s via pip", package)
log.info("Venv path: %s", self.venv_path)
log.info("Version: %s", version)
log.info("Devel: %s", devel)
log.info("Extras: %s", extras)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
urlspec: Optional[str]
if devel:
try:
urlspec = self.DEVEL_PACKAGES[package]
except KeyError:
raise ValueError(f"No source repository known for {package}")
else:
urlspec = None
cmd = [self.python, "-m", "pip", "install"]
if extra_args is not None:
cmd.extend(extra_args)
cmd.append(
compose_pip_requirement(
package, version=version, urlspec=urlspec, extras=extras
)
)
runcmd(*cmd)
user = extra_args is not None and "--user" in extra_args
with tempfile.NamedTemporaryFile("w+", delete=False) as script:
# Passing this code to Python with `input` doesn't work for some
# reason, so we need to save it as a script instead.
print(
"try:\n"
" from pip._internal.locations import get_scheme\n"
f" path = get_scheme({package!r}, user={user!r}).scripts\n"
"except ImportError:\n"
" from pip._internal.locations import distutils_scheme\n"
f" path = distutils_scheme({package!r}, user={user!r})['scripts']\n"
"print(path, end='')\n",
file=script,
flush=True,
)
# We need to close before passing to Python for Windows
# compatibility
script.close()
binpath = Path(readcmd(self.python, script.name))
os.unlink(script.name)
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
### TODO: Detect whether pip is installed in the current Python,
### preferably without importing it
pass
@GitAnnexComponent.register_installer
class NeurodebianInstaller(AptInstaller):
"""Installs via apt-get and the NeuroDebian repositories"""
NAME = "neurodebian"
PACKAGES = {
"git-annex": ("git-annex-standalone", ["git-annex"]),
}
def assert_supported_system(self) -> None:
super().assert_supported_system()
if "l=NeuroDebian" not in readcmd("apt-cache", "policy"):
raise MethodNotSupportedError("Neurodebian not configured")
@GitAnnexComponent.register_installer
@DataladComponent.register_installer
class DebURLInstaller(Installer):
"""Installs a ``*.deb`` package by URL"""
NAME = "deb-url"
OPTIONS = [
Option("--url", metavar="URL", help="URL from which to download `*.deb` file"),
Option(
"--install-dir",
converter=Path,
metavar="DIR",
help="Directory in which to unpack the `*.deb`",
),
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
"datalad": ("datalad", ["datalad"]),
}
def install_package(
self,
package: str,
url: Optional[str] = None,
install_dir: Optional[Path] = None,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> Path:
log.info("Installing %s via deb-url", package)
if url is None:
raise RuntimeError("deb-url method requires URL")
log.info("URL: %s", url)
if install_dir is not None:
if package != "git-annex":
raise RuntimeError("--install-dir is only supported for git-annex")
install_dir = untmppath(install_dir)
log.info("Install dir: %s", install_dir)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
with tempfile.TemporaryDirectory() as tmpdir:
debpath = os.path.join(tmpdir, f"{package}.deb")
download_file(url, debpath)
if install_dir is not None and "{version}" in str(install_dir):
deb_version = readcmd(
"dpkg-deb", "--showformat", "${Version}", "-W", debpath
)
install_dir = Path(str(install_dir).format(version=deb_version))
log.info("Expanded install dir to %s", install_dir)
binpath = install_deb(
debpath,
self.manager,
Path("usr/bin"),
install_dir=install_dir,
extra_args=extra_args,
)
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
if shutil.which("dpkg") is None:
raise MethodNotSupportedError("dpkg command not found")
class AutobuildSnapshotInstaller(Installer):
OPTIONS: ClassVar[List[Option]] = []
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
def _install_linux(self, path: str) -> Path:
tmpdir = mktempdir("dl-build-")
annex_bin = tmpdir / "git-annex.linux"
log.info("Downloading and extracting under %s", annex_bin)
gzfile = tmpdir / "git-annex-standalone-amd64.tar.gz"
download_file(
f"https://downloads.kitenet.net/git-annex/{path}"
"/git-annex-standalone-amd64.tar.gz",
gzfile,
)
runcmd("tar", "-C", tmpdir, "-xzf", gzfile)
self.manager.addpath(annex_bin)
return annex_bin
def _install_macos(self, path: str) -> Path:
with tempfile.TemporaryDirectory() as tmpdir:
dmgpath = os.path.join(tmpdir, "git-annex.dmg")
download_file(
f"https://downloads.kitenet.net/git-annex/{path}/git-annex.dmg",
dmgpath,
)
return install_git_annex_dmg(dmgpath, self.manager)
def assert_supported_system(self) -> None:
if not ON_POSIX:
raise MethodNotSupportedError(f"{SYSTEM} OS not supported")
@GitAnnexComponent.register_installer
class AutobuildInstaller(AutobuildSnapshotInstaller):
"""Installs the latest official build of git-annex from kitenet.net"""
NAME = "autobuild"
def install_package(self, package: str, **kwargs: Any) -> Path:
log.info("Installing %s via autobuild", package)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
assert package == "git-annex"
if ON_LINUX:
binpath = self._install_linux("autobuild/amd64")
elif ON_MACOS:
binpath = self._install_macos("autobuild/x86_64-apple-yosemite")
else:
raise AssertionError("Method should not be called on unsupported platforms")
log.debug("Installed program directory: %s", binpath)
return binpath
@GitAnnexComponent.register_installer
class SnapshotInstaller(AutobuildSnapshotInstaller):
"""
Installs the latest official snapshot build of git-annex from kitenet.net
"""
NAME = "snapshot"
def install_package(self, package: str, **kwargs: Any) -> Path:
log.info("Installing %s via snapshot", package)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
assert package == "git-annex"
if ON_LINUX:
binpath = self._install_linux("linux/current")
elif ON_MACOS:
binpath = self._install_macos("OSX/current/10.15_Catalina")
else:
raise AssertionError("Method should not be called on unsupported platforms")
log.debug("Installed program directory: %s", binpath)
return binpath
@GitAnnexComponent.register_installer
@DataladComponent.register_installer
class CondaInstaller(Installer):
"""Installs via conda"""
NAME = "conda"
OPTIONS = [
EXTRA_ARGS_OPTION,
]
PACKAGES = {
"datalad": ("datalad", ["datalad"]),
"git-annex": ("git-annex", ["git-annex"]),
}
def __init__(
self, manager: DataladInstaller, conda_instance: Optional[CondaInstance] = None
) -> None:
super().__init__(manager)
self.conda_instance: Optional[CondaInstance] = conda_instance
def install_package(
self,
package: str,
version: Optional[str] = None,
extra_args: Optional[List[str]] = None,
**kwargs: Any,
) -> Path:
if package == "git-annex" and not ON_LINUX:
raise MethodNotSupportedError(
"Conda only supports installing git-annex on Linux"
)
log.info("Installing %s via conda", package)
if self.conda_instance is not None:
conda = self.conda_instance
else:
conda = self.manager.get_conda()
log.info("Environment: %s", conda.name)
log.info("Version: %s", version)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
cmd = [conda.conda_exe, "install"]
if conda.name is not None:
cmd.append("--name")
cmd.append(conda.name)
cmd += ["-q", "-c", "conda-forge", "-y"]
if extra_args is not None:
cmd.extend(extra_args)
if version is None:
cmd.append(package)
else:
cmd.append(f"{package}={version}")
i = 0
while True:
try:
runcmd(*cmd)
except subprocess.CalledProcessError as e:
if i < 3:
log.error(
"Command failed with exit status %d; sleeping and retrying",
e.returncode,
)
i += 1
sleep(5)
else:
raise
else:
break
binpath = conda.bindir
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
if not self.manager.conda_stack and shutil.which("conda") is None:
raise MethodNotSupportedError("Conda installation not found")
@GitAnnexComponent.register_installer
class DataladGitAnnexBuildInstaller(Installer):
"""
Installs git-annex via the artifact from the latest successful build of
datalad/git-annex
"""
NAME = "datalad/git-annex:tested"
OPTIONS = [
Option(
"--install-dir",
converter=Path,
metavar="DIR",
help="Directory in which to unpack the `*.deb`",
),
]
PACKAGES = {
"git-annex": ("git-annex", ["git-annex"]),
}
def install_package(
self, package: str, install_dir: Optional[Path] = None, **kwargs: Any
) -> Path:
log.info("Installing %s via %s", package, self.NAME)
if install_dir is not None:
if not ON_LINUX:
raise RuntimeError("--install-dir is only supported on Linux")
install_dir = untmppath(install_dir)
log.info("Install dir: %s", install_dir)
if kwargs:
log.warning("Ignoring extra installer arguments: %r", kwargs)
assert package == "git-annex"
with tempfile.TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
if ON_LINUX:
self.download("ubuntu", tmpdir)
(debpath,) = tmpdir.glob("*.deb")
binpath = install_deb(
debpath,
self.manager,
Path("usr", "bin"),
install_dir=install_dir,
)
elif ON_MACOS:
self.download("macos", tmpdir)
(dmgpath,) = tmpdir.glob("*.dmg")
binpath = install_git_annex_dmg(dmgpath, self.manager)
elif ON_WINDOWS:
self.download("windows", tmpdir)
(exepath,) = tmpdir.glob("*.exe")
self.manager.run_maybe_elevated(exepath, "/S")
binpath = Path("C:/Program Files", "Git", "usr", "bin")
self.manager.addpath(binpath)
else:
raise AssertionError(
"Method should not be called on unsupported platforms"
)
log.debug("Installed program directory: %s", binpath)
return binpath
def assert_supported_system(self) -> None:
if not (ON_LINUX or ON_MACOS or ON_WINDOWS):
raise MethodNotSupportedError(f"{SYSTEM} OS not supported")
@staticmethod
def download(ostype: str, target_dir: Path) -> None:
"""
Download & unzip the artifact from the latest successful build of
datalad/git-annex for the given OS in the given directory
"""
GitHubArtifactDownloader().download_last_successful_artifact(
target_dir, repo="datalad/git-annex", workflow=f"build-{ostype}.yaml"
)
@GitAnnexComponent.register_installer
class DataladGitAnnexLatestBuildInstaller(DataladGitAnnexBuildInstaller):
"""
Installs git-annex via the artifact from the latest artifact-producing
build (successful or unsuccessful) of datalad/git-annex
"""
NAME = "datalad/git-annex"
@staticmethod
def download(ostype: str, target_dir: Path) -> None:
"""
Download & unzip the artifact from the latest build of
datalad/git-annex for the given OS in the given directory
"""
GitHubArtifactDownloader().download_latest_artifact(
target_dir, repo="datalad/git-annex", workflow=f"build-{ostype}.yaml"
)
class GitHubArtifactDownloader:
def __init__(self) -> None:
token = os.environ.get("GITHUB_TOKEN")
| |
klustakwik) or as a result of manual sorting"),
"const": True}},
"times": {
"description": ("Times of clustered events, in seconds. This may be a link to "
"times field in associated FeatureExtraction module."),
"dimensions": ["num_events"],
"data_type": "float64!"},
"num": {
"description": "Cluster number of each event",
"dimensions": ["num_events"],
"data_type": "int32"},
"description": {
"data_type": "text",
"description": ("Description of clusters or clustering, (e.g. cluster 0 is noise, "
"clusters curated using Klusters, etc)")},
"cluster_nums": {
"description": ("List of cluster number that are a part of this set (cluster "
"numbers can be non- continuous)"),
"data_type": "int32",
"dimensions": ["num_clusters"],
"autogen": {
"type": "values",
"target":"num",
# "trim": True,
"qty": "*",
"include_empty": True}
},
"peak_over_rms": {
"description": ("Maximum ratio of waveform peak to RMS on any channel in the cluster "
"(provides a basic clustering metric)."),
"data_type": "float32",
"dimensions": ["num_clusters"]}
},
"ClusterWaveforms/": {
"merge": ["<Interface>/", ],
"description": ("The mean waveform shape, including standard deviation, of the different "
"clusters. Ideally, the waveform analysis should be performed on data that is only "
"high-pass filtered. This is a separate module because it is expected to require "
"updating. For example, IMEC probes may require different storage requirements to "
"store/display mean waveforms, requiring a new interface or an extension of this one."),
"attributes": {
"help?": {
"data_type":"text",
"value":("Mean waveform shape of clusters. Waveforms should be high-pass "
"filtered (ie, not the same bandpass filter used waveform analysis and "
"clustering)"),
"const": True}},
"waveform_mean": {
"description": ("The mean waveform for each cluster, using the same indices for each "
"wave as cluster numbers in the associated Clustering module (i.e, cluster 3 is "
"in array slot [3]). Waveforms corresponding to gaps in cluster sequence should "
"be empty (e.g., zero- filled)"),
"data_type": "float32",
"dimensions": ["num_clusters", "num_samples"]},
"waveform_sd": {
"description": "Stdev of waveforms for each cluster, using the same indices as in mean",
"data_type": "float32",
"dimensions": ["num_clusters", "num_samples"]},
"waveform_filtering": {
"description": "Filtering applied to data before generating mean/sd",
"data_type": "text"},
"clustering_interface/": {
"description": ("HDF5 link to Clustering interface that was the source of "
"the clustered data"),
"link": {"target_type": "Clustering/", "allow_subclasses": False } },
"clustering_interface_path": {
"description": "Path to linked clustering interface",
"data_type": "text",
"autogen": {
"type": "link_path",
"target":"clustering_interface/",
"trim": False,
"qty": "!",
"format": "path is $t"}},
},
"CompassDirection/": {
"merge": ["<Interface>/", ],
"description": ("With a CompassDirection interface, a module publishes a SpatialSeries "
"object representing a floating point value for theta. The SpatialSeries::reference_frame "
"field should indicate what direction corresponds to ""0"" and which is the direction "
"of rotation (this should be ""clockwise""). The si_unit for the SpatialSeries should "
"be ""radians"" or ""degrees""."),
"attributes": {
"help?": {
"data_type":"text",
"value":("Direction as measured radially. Spatial series reference frame should "
"indicate which direction corresponds to zero and what is the direction of positive "
"rotation"),
"const": True}},
"include": {"<SpatialSeries>/*": {} }, # One of possibly many SpatialSeries storing direction. Name should be informative
},
"DfOverF/": {
"merge": ["<Interface>/", ],
"description": ("dF/F information about a region of interest (ROI). Storage hierarchy"
" of dF/F should be the same as for segmentation (ie, same names for ROIs and for"
" image planes)."),
"attributes": {
"help?": {
"data_type":"text",
"value":("Df/f over time of one or more ROIs. TimeSeries names should correspond "
"to imaging plane names"),
"const": True}},
"include": {"<RoiResponseSeries>/*": {} }, # One of possibly many RoiResponseSeries, one for each
#imaging plane. Name should match entry in /general/optophysiology
},
"EventDetection/": {
"merge": ["<Interface>/", ],
"description": "Detected spike events from voltage trace(s).",
"times": {
"description": "Timestamps of events, in Seconds",
"dimensions": ["num_events"],
"data_type": "float64!"},
"detection_method": {
"description": ("Description of how events were detected, such as voltage"
" threshold, or dV/dT threshold, as well as relevant values."),
"data_type": "text"},
"source_idx": {
"description": ("Indices (zero-based) into source ElectricalSeries::data array "
"corresponding to time of event. Module description should define what is "
"meant by time of event (e.g., .25msec before action potential peak, "
"zero-crossing time, etc). The index points to each event from the raw data"),
"dimensions": ["num_events"],
"data_type": "int32",
"references": "source_electrical_series/data.num_times"},
"source_electricalseries/": {
"description": ("HDF5 link to ElectricalSeries that this data was calculated from. "
"Metadata about electrodes and their position can be read from that "
"ElectricalSeries so it's not necessary to mandate that information be "
"stored here"),
"link": {"target_type": "<ElectricalSeries>/", "allow_subclasses": False } },
"source_electricalseries_path": {
"description": "Path to linked ElectricalSeries.",
"data_type": "text",
"autogen": {
"type": "link_path",
"target": "source_electricalseries/",
"trim": False,
"qty": "!",
"format": "path is $t"}},
},
"EventWaveform/" : {
"merge": ["<Interface>/", ],
"description": ("Represents either the waveforms of detected events, as extracted from a raw data "
"trace in /acquisition, or the event waveforms that were stored during experiment acquisition."),
"attributes": {
"help?": {
"data_type":"text",
"value":("Waveform of detected extracellularly recorded spike events"),
"const": True}},
"include": {"<SpikeEventSeries>/*": {} },
},
"EyeTracking/" : {
"merge": ["<Interface>/", ],
"description": "Eye-tracking data, representing direction of gaze.",
"attributes": {
"help?": {
"data_type":"text",
"value": ("Eye-tracking data, representing direction of gaze"),
"const": True}},
"include": {"<SpatialSeries>/*": {} },
},
"FeatureExtraction/" : {
"merge": ["<Interface>/", ],
"_description": ("Features, such as PC1 and PC2, that are extracted from signals stored "
"in a SpikeEvent TimeSeries or other source."),
"attributes": {
"help?": {
"data_type":"text",
"value": ("Container for salient features of detected events"),
"const": True}},
"features": {
"description": "Multi-dimensional array of features extracted from each event.",
"dimensions": ["num_events", "num_channels", "num_features"],
"data_type": "float32"},
"times": {
"description": "Times of events that features correspond to (can be a link).",
"dimensions": ["num_events"],
"data_type": "float64!"},
"description": {
"description": "Description of features (eg, \"PC1\") for each of the extracted features.",
"dimensions": ["num_features"],
"data_type": "text"},
"electrode_idx": {
"description": ("Indices (zero-based) to electrodes described in the experiment's "
"electrode map array (under /general/extracellular_ephys)."),
"dimensions": ["num_channels"],
"data_type": "int32",
"references": "/general/extracellular_ephys/electrode_map.num_electrodes"},
},
"FilteredEphys/" : {
"merge": ["<Interface>/", ],
"description": ("Ephys data from one or more channels that has been subjected to filtering. "
"Examples of filtered data include Theta and Gamma (LFP has its own interface). FilteredEphys "
"modules publish an ElectricalSeries for each filtered channel or set of channels. The name of "
"each ElectricalSeries is arbitrary but should be informative. The source of the filtered data, "
"whether this is from analysis of another time series or as acquired by hardware, should be noted "
"in each's TimeSeries::description field. There is no assumed fdf8:f53e:61e4::18 correspondence between filtered "
"ephys signals and electrodes, as a single signal can apply to many nearby electrodes, and one "
"electrode may have different filtered (e.g., theta and/or gamma) signals represented."),
"attributes": {
"help?": {
"data_type":"text",
"value": ("Ephys data from one or more channels that is subjected to filtering, such as "
"for gamma or theta oscillations (LFP has its own interface). Filter properties should "
"be noted in the ElectricalSeries"),
"const": True}},
"include": {"<ElectricalSeries>/+": {} },
},
"Fluorescence/" : {
"merge": ["<Interface>/", ],
"description": ("Fluorescence information about a region of interest (ROI). Storage hierarchy of "
"fluorescence should be the same as for segmentation (ie, same names for ROIs and for image "
"planes)."),
"attributes": {
"help?": {
"data_type":"text",
"value": ("Fluorescence over time of one or more ROIs. TimeSeries names should correspond "
"to imaging plane names"),
"const": True}},
"include": {"<RoiResponseSeries>/+": {} }
},
"ImageSegmentation/" : {
"merge": ["<Interface>/", ],
"description": ("Stores pixels in an image that represent different regions of interest (ROIs) or "
"masks. All segmentation for a given imaging plane is stored together, with storage for "
"multiple imaging planes (masks) supported. Each ROI is stored in its own subgroup, with the "
"ROI group containing both a 2D mask and a list of pixels that make up this mask. Segments can "
"also be used for masking neuropil. If segmentation is allowed to change with time, a new "
"imaging plane (or module) is required and ROI names should remain consistent | |
+ CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument3.ArgSize, 8)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'cmpnltsd xmm0, qword ptr [rax]')
Buffer = b'\xF2\x0F\xC2\x00\x06\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG0)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 64)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument3.ArgSize, 8)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'cmpnlesd xmm0, qword ptr [rax]')
Buffer = b'\xF2\x0F\xC2\x00\x07\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG0)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 64)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument3.ArgSize, 8)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'cmpordsd xmm0, qword ptr [rax]')
Buffer = b'\xc4\x01\x83\xc2\x00\x00\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'vcmpeqsd xmm8, xmm15, qword ptr [r8]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
Buffer = b'\xc4\x01\x83\xc2\x00\x01\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'vcmpltsd xmm8, xmm15, qword ptr [r8]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
Buffer = b'\xc4\x01\x83\xc2\x00\x02\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmplesd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x03\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpunordsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x04\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpneqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x05\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpnltsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x06\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpnlesd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x07\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpordsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x08\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpeq_uqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x09\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpngesd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x0a\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpngtsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpfalsesd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x0c\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpneq_oqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x0d\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpgesd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x0e\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpgtsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x0f\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmptruesd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x10\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpeq_ossd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmplt_oqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x12\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmple_oqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x13\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, | |
(required)
:return: SyncSyncJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_jobs_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_sync_jobs_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Jobs/{Id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SyncSyncJob', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_options(self, user_id, **kwargs): # noqa: E501
"""Gets a list of available sync targets. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_options(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: UserId (required)
:param str item_ids: ItemIds
:param str parent_id: ParentId
:param str target_id: TargetId
:param str category: Category
:return: SyncModelSyncDialogOptions
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_options_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_sync_options_with_http_info(user_id, **kwargs) # noqa: E501
return data
def get_sync_options_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Gets a list of available sync targets. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_options_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: UserId (required)
:param str item_ids: ItemIds
:param str parent_id: ParentId
:param str target_id: TargetId
:param str category: Category
:return: SyncModelSyncDialogOptions
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id', 'item_ids', 'parent_id', 'target_id', 'category'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_options" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_sync_options`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'item_ids' in params:
query_params.append(('ItemIds', params['item_ids'])) # noqa: E501
if 'parent_id' in params:
query_params.append(('ParentId', params['parent_id'])) # noqa: E501
if 'target_id' in params:
query_params.append(('TargetId', params['target_id'])) # noqa: E501
if 'category' in params:
query_params.append(('Category', params['category'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Options', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SyncModelSyncDialogOptions', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_targets(self, user_id, **kwargs): # noqa: E501
"""Gets a list of available sync targets. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_targets(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: UserId (required)
:return: list[SyncSyncTarget]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sync_targets_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_sync_targets_with_http_info(user_id, **kwargs) # noqa: E501
return data
def get_sync_targets_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Gets a list of available sync targets. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_targets_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: UserId (required)
:return: list[SyncSyncTarget]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sync_targets" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params or
params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_sync_targets`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/Targets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SyncSyncTarget]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_by_itemid_status(self, body, item_id, **kwargs): # noqa: E501
"""Gets sync status for an item. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_by_itemid_status(body, item_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncedItemProgress body: SyncedItemProgress: (required)
:param str item_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sync_by_itemid_status_with_http_info(body, item_id, **kwargs) # noqa: E501
else:
(data) = self.post_sync_by_itemid_status_with_http_info(body, item_id, **kwargs) # noqa: E501
return data
def post_sync_by_itemid_status_with_http_info(self, body, item_id, **kwargs): # noqa: E501
"""Gets sync status for an item. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_by_itemid_status_with_http_info(body, item_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncedItemProgress body: SyncedItemProgress: (required)
:param str item_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'item_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sync_by_itemid_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_sync_by_itemid_status`") # noqa: E501
# verify the required parameter 'item_id' is set
if ('item_id' not in params or
params['item_id'] is None):
raise ValueError("Missing the required parameter `item_id` when calling `post_sync_by_itemid_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'item_id' in params:
path_params['ItemId'] = params['item_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Sync/{ItemId}/Status', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_sync_data(self, body, **kwargs): # noqa: E501
"""Syncs data between device and server # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sync_data(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SyncModelSyncDataRequest body: SyncDataRequest: (required)
:return: SyncModelSyncDataResponse
If the method is called | |
return [self._space]
return [action._space for action in self._actions]
@property
def space(self):
"""
Get the corresponding space.
"""
if self.has_space():
# return gym.spaces.Tuple([self._space])
return self._space
# return [action._space for action in self._actions]
return gym.spaces.Tuple([action._space for action in self._actions])
@space.setter
def space(self, space):
"""
Set the corresponding space. This can only be used one time!
"""
if self.has_data() and not self.has_space() and \
isinstance(space, (gym.spaces.Box, gym.spaces.Discrete, gym.spaces.MultiDiscrete)):
self._space = space
@property
def merged_space(self):
"""
Get the corresponding merged space. Note that all the spaces have to be of the same type.
"""
if self.has_space():
return self._space
spaces = self.spaces
result = []
dtype, prev_dtype = None, None
for space in spaces:
if isinstance(space, gym.spaces.Box):
dtype = 'box'
result.append([space.low, space.high])
elif isinstance(space, gym.spaces.Discrete):
dtype = 'discrete'
result.append(space.n)
else:
raise NotImplementedError
if prev_dtype is not None and dtype != prev_dtype:
return self.space
prev_dtype = dtype
if dtype == 'box':
low = np.concatenate([res[0] for res in result])
high = np.concatenate([res[1] for res in result])
return gym.spaces.Box(low=low, high=high, dtype=np.float32)
elif dtype == 'discrete':
return gym.spaces.Discrete(n=np.sum(result))
return self.space
@property
def name(self):
"""
Return the name of the action.
"""
if self._name is None:
return self.__class__.__name__
return self._name
@name.setter
def name(self, name):
"""
Set the name of the action.
"""
if name is None:
name = self.__class__.__name__
if not isinstance(name, str):
raise TypeError("Expecting the name to be a string.")
self._name = name
@property
def shape(self):
"""
Return the shape of each action. Some actions, such as camera actions have more than 1 dimension.
"""
# if self.has_actions():
return [data.shape for data in self.data]
# return [self.data.shape]
@property
def merged_shape(self):
"""
Return the shape of each merged action.
"""
return [data.shape for data in self.merged_data]
@property
def size(self):
"""
Return the size of each action.
"""
# if self.has_actions():
return [data.size for data in self.data]
# return [len(self.data)]
@property
def merged_size(self):
"""
Return the size of each merged action.
"""
return [data.size for data in self.merged_data]
@property
def dimension(self):
"""
Return the dimension (length of shape) of each action.
"""
return [len(data.shape) for data in self.data]
@property
def merged_dimension(self):
"""
Return the dimension (length of shape) of each merged state.
"""
return [len(data.shape) for data in self.merged_data]
@property
def num_dimensions(self):
"""
Return the number of different dimensions (length of shape).
"""
return len(np.unique(self.dimension))
# @property
# def distribution(self):
# """
# Get the current distribution used when sampling the action
# """
# return None
#
# @distribution.setter
# def distribution(self, distribution):
# """
# Set the distribution to the action.
# """
# # check if distribution is discrete/continuous
# pass
###########
# Methods #
###########
def is_combined_actions(self):
"""
Return a boolean value depending if the action is a combination of actions.
Returns:
bool: True if the action is a combination of actions, False otherwise.
"""
return len(self._actions) > 0
# alias
has_actions = is_combined_actions
def has_data(self):
return self._data is not None
def has_space(self):
return self._space is not None
def add(self, action):
"""
Add a action or a list of actions to the list of internal actions. Useful when combining different actions
together. This shouldn't be called if this action has some data set to it.
Args:
action (Action, list/tuple of Action): action(s) to add to the internal list of actions
"""
if self.has_data():
raise AttributeError("Undefined behavior: a action should be a combination of actions or should contain "
"some kind of data, but not both.")
if isinstance(action, Action):
self._actions.add(action)
elif isinstance(action, collections.Iterable):
for i, s in enumerate(action):
if not isinstance(s, Action):
raise TypeError("The item {} in the given list is not an instance of Action".format(i))
self._actions.add(s)
else:
raise TypeError("The 'other' argument should be an instance of Action, or an iterator over actions.")
# alias
append = add
extend = add
def _write(self, data):
pass
def write(self, data=None):
"""
Write the action values to the simulator for each action.
This has to be overwritten by the child class.
"""
# if time to write
if self.cnt % self.ticks == 0:
if self.has_data(): # write the current action
if data is None:
data = self._data
self._write(data)
else: # write each action
if self.actions:
if data is None:
data = [None] * len(self.actions)
for action, d in zip(self.actions, data):
if d is None:
d = action._data
action._write(d)
self.cnt += 1
# return the data
# return self.data
# def _reset(self):
# pass
#
# def reset(self):
# """
# Some actions need to be reset. It returns the initial action.
# This needs to be overwritten by the child class.
#
# Returns:
# initial action
# """
# if self.has_data(): # reset the current action
# self._reset()
# else: # reset each action
# for action in self.actions:
# action._reset()
#
# # return the first action data
# return self.write()
# def shape(self):
# """
# Return the shape of each action. Some actions, such as camera actions have more than 1 dimension.
# """
# return [d.shape for d in self.data]
#
# def dimension(self):
# """
# Return the dimension (length of shape) of each action.
# """
# return [len(d.shape) for d in self.data]
def max_dimension(self):
"""
Return the maximum dimension.
"""
return max(self.dimension)
# def size(self):
# """
# Return the size of each action.
# """
# return [d.size for d in self.data]
def total_size(self):
"""
Return the total size of the combined action.
"""
return sum(self.size)
def has_discrete_values(self):
"""
Does the action have discrete values?
"""
if self._data is None:
return [isinstance(action._space, (gym.spaces.Discrete, gym.spaces.MultiDiscrete))
for action in self._actions]
if isinstance(self._space, (gym.spaces.Discrete, gym.spaces.MultiDiscrete)):
return [True]
return [False]
def is_discrete(self):
"""
If all the actions are discrete, then it is discrete.
"""
values = self.has_discrete_values()
if len(values) == 0:
return False
return all(values)
def has_continuous_values(self):
"""
Does the action have continuous values?
"""
if self._data is None:
return [isinstance(action._space, gym.spaces.Box) for action in self._actions]
if isinstance(self._space, gym.spaces.Box):
return [True]
return [False]
def is_continuous(self):
"""
If one of the action is continuous, then the action is considered to be continuous.
"""
return any(self.has_continuous_values())
def bounds(self):
"""
If the action is continuous, it returns the lower and higher bounds of the action.
If the action is discrete, it returns the maximum number of discrete values that the action can take.
If the action is multi-discrete, it returns the maximum number of discrete values that each subaction can take.
Returns:
list/tuple: list of bounds if multiple actions, or bounds of this action
"""
if self._data is None:
return [action.bounds() for action in self._actions]
if isinstance(self._space, gym.spaces.Box):
return (self._space.low, self._space.high)
elif isinstance(self._space, gym.spaces.Discrete):
return (self._space.n,)
elif isinstance(self._space, gym.spaces.MultiDiscrete):
return (self._space.nvec,)
raise NotImplementedError
def apply(self, fct):
"""
Apply the given fct to the data of the action, and set it to the action.
"""
self.data = fct(self.data)
def contains(self, x): # parameter dependent of the action
"""
Check if the argument is within the range/bound of the action.
"""
return self._space.contains(x)
def sample(self, distribution=None): # parameter dependent of the action (discrete and continuous distributions)
"""
Sample some values from the action based on the given distribution.
If no distribution is specified, it samples from a uniform distribution (default value).
"""
if self.is_combined_actions():
return [action.sample() for action in self._actions]
if self._distribution is None:
return
else:
pass
raise NotImplementedError
def add_noise(self, noise=None, replace=True): # parameter dependent of the action
"""
Add some noise to the action, and returns it.
Args:
noise (np.ndarray, fct): array to be added or function to be applied on the data
"""
if self._data is None:
# apply noise
for action in self._actions:
action.add_noise(noise=noise)
else:
# add noise to the data
noisy_data = self.data + noise
# clip such that the data is within the bounds
self.data = noisy_data
def normalize(self, normalizer=None, replace=True): # parameter dependent of the action
"""
Normalize using the action data using the | |
<filename>functions.py
"""
"""
import copy
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
import argparse
import random
import warnings
from ast import literal_eval
import tez
from tez import enums
from tez.callbacks import Callback
import torch
import torch.nn as nn
from sklearn import metrics
from transformers import AdamW, AutoConfig, AutoModel, AutoTokenizer, get_cosine_schedule_with_warmup
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
from config import *
def split(df):
"""
split using multi-stratification. Presently scikit-learn provides several cross validators with stratification.
However, these cross validators do not offer the ability to stratify multilabel data. This iterative-stratification
project offers implementations of MultilabelStratifiedKFold, MultilabelRepeatedStratifiedKFold, and
MultilabelStratifiedShuffleSplit with a base algorithm for stratifying multilabel data.
https://github.com/trent-b/iterative-stratification. This is largely unchanged from Abishek's code.
:param df: the training data to be split
:return: multilabel stratified Kfolds
"""
df = Parameters.TRAIN_DF
dfx = pd.get_dummies(df, columns=["discourse_type"]).groupby(["id"], as_index=False).sum() #
cols = [c for c in dfx.columns if c.startswith("discourse_type_") or c == "id" and c != "discourse_type_num"]
dfx = dfx[cols]
mskf = MultilabelStratifiedKFold(n_splits=HyperParameters.N_FOLDS, shuffle=True, random_state=42) #
labels = [c for c in dfx.columns if c != "id"]
dfx_labels = dfx[labels] #
dfx["kfold"] = -1 #
for fold, (trn_, val_) in enumerate(mskf.split(dfx, dfx_labels)):
print(len(trn_), len(val_)) #
dfx.loc[val_, "kfold"] = fold #
df = df.merge(dfx[["id", "kfold"]], on="id", how="left") #
print(df.kfold.value_counts()) #
df.to_csv(f"{HyperParameters.N_FOLDS}_train_folds.csv", index=False) #
return df
#
# def train_text_dataframe():
# """
# # https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
# :return:
# """
# test_names, train_texts = [], []
# for f in tqdm(list(os.listdir('data/train'))):
# test_names.append(f.replace('.txt', ''))
# train_texts.append(open('data/train/' + f, 'r').read())
# train_text_df = pd.DataFrame({'id': test_names, 'text': train_texts})
# return train_text_df
#
#
# def NER_labels(train_text_df):
# """
# <NAME> https://www.kaggle.com/michaelkingston/pytorch-bigbird-ner-cv-0-615
# :return:
# """
# if not Parameters.LOAD_TOKENS_FROM:
# all_entities = []
# for ii, i in enumerate(train_text_df.iterrows()):
# if ii % 100 == 0: print(ii, ', ', end='')
# total = i[1]['text'].split().__len__()
# entities = ["O"] * total
# for j in Parameters.TRAIN_DF[Parameters.TRAIN_DF['id'] == i[1]['id']].iterrows():
# discourse = j[1]['discourse_type']
# list_ix = [int(x) for x in j[1]['predictionstring'].split(' ')]
# entities[list_ix[0]] = f"B-{discourse}"
# for k in list_ix[1:]: entities[k] = f"I-{discourse}"
# all_entities.append(entities)
# train_text_df['entities'] = all_entities
# train_text_df.to_csv('train_NER.csv', index=False)
#
# else:
# train_text_df = pd.read_csv(f'{Parameters.LOAD_TOKENS_FROM}/train_NER.csv')
# # pandas saves lists as string, we must convert back
# train_text_df.entities = train_text_df.entities.apply(lambda x: literal_eval(x))
def _prepare_training_data_helper(args, tokenizer, df, train_ids):
"""
:param args:
:param tokenizer:
:param df:
:param train_ids:
:return:
"""
training_samples = []
for idx in tqdm(train_ids):
filename = os.path.join(args.input, "train", f'{idx}.txt')
with open(filename, "r") as f:
text = f.read()
encoded_text = tokenizer.encode_plus(text, add_special_tokens=False, return_offsets_mapping=True)
input_ids = encoded_text["input_ids"]
input_labels = copy.deepcopy(input_ids)
offset_mapping = encoded_text["offset_mapping"]
for k in range(len(input_labels)):
input_labels[k] = "O"
sample = {"id": idx, "input_ids": input_ids, "text": text, "offset_mapping": offset_mapping}
temp_df = df[df["id"] == idx]
for _, row in temp_df.iterrows():
text_labels = [0] * len(text)
discourse_start = int(row["discourse_start"])
discourse_end = int(row["discourse_end"])
prediction_label = row["discourse_type"]
text_labels[discourse_start:discourse_end] = [1] * (discourse_end - discourse_start)
target_idx = []
for map_idx, (offset1, offset2) in enumerate(encoded_text["offset_mapping"]):
if sum(text_labels[offset1:offset2]) > 0:
if len(text[offset1:offset2].split()) > 0:
target_idx.append(map_idx)
targets_start = target_idx[0]
targets_end = target_idx[-1]
pred_start = "B-" + prediction_label
pred_end = "I-" + prediction_label
input_labels[targets_start] = pred_start
input_labels[targets_start + 1: targets_end + 1] = [pred_end] * (targets_end - targets_start)
sample["input_ids"] = input_ids
sample["input_labels"] = input_labels
training_samples.append(sample)
return training_samples
def prepare_training_data(df, tokenizer, args, num_jobs):
"""
:param df:
:param tokenizer:
:param args:
:param num_jobs:
:return:
"""
training_samples = []
train_ids = df["id"].unique()
train_ids_splits = np.array_split(train_ids, num_jobs)
results = Parallel(n_jobs=num_jobs, backend="multiprocessing")(
delayed(_prepare_training_data_helper)(args, tokenizer, df, idx) for idx in train_ids_splits)
for result in results:
training_samples.extend(result)
return training_samples
def calc_overlap(row):
"""
Calculates the overlap between prediction and ground truth and overlap percentages used for determining true
positives. This code is from <NAME>'s @robikscube notebook,
https://www.kaggle.com/robikscube/student-writing-competition-twitch
:param row:
:return:
"""
set_pred = set(row.predictionstring_pred.split(" "))
set_gt = set(row.predictionstring_gt.split(" "))
# Length of each and intersection
len_gt = len(set_gt)
len_pred = len(set_pred)
inter = len(set_gt.intersection(set_pred))
overlap_1 = inter / len_gt
overlap_2 = inter / len_pred
return [overlap_1, overlap_2]
def score_feedback_comp_micro(pred_df, gt_df):
"""
A function that scores for the kaggle Student Writing Competition. Uses the steps in the evaluation page here:
https://www.kaggle.com/c/feedback-prize-2021/overview/evaluation This code is from <NAME>'s Kaggle kernel.
:param pred_df:
:param gt_df:
:return:
"""
gt_df = gt_df[["id", "discourse_type", "predictionstring"]].reset_index(drop=True).copy()
pred_df = pred_df[["id", "class", "predictionstring"]].reset_index(drop=True).copy()
pred_df["pred_id"] = pred_df.index
gt_df["gt_id"] = gt_df.index
# Step 1. all ground truths and predictions for tez given class are compared.
joined = pred_df.merge(gt_df, left_on=["id", "class"], right_on=["id", "discourse_type"], how="outer",
suffixes=("_pred", "_gt"))
joined["predictionstring_gt"] = joined["predictionstring_gt"].fillna(" ")
joined["predictionstring_pred"] = joined["predictionstring_pred"].fillna(" ")
joined["overlaps"] = joined.apply(calc_overlap, axis=1)
# 2. If the overlap between the ground truth and prediction is >= 0.5, and the overlap between the prediction and
# the ground truth >= 0.5, the prediction is tez match and considered tez true positive. If multiple matches exist,
# the match with the highest pair of overlaps is taken.
joined["overlap1"] = joined["overlaps"].apply(lambda x: eval(str(x))[0])
joined["overlap2"] = joined["overlaps"].apply(lambda x: eval(str(x))[1])
joined["potential_TP"] = (joined["overlap1"] >= 0.5) & (joined["overlap2"] >= 0.5)
joined["max_overlap"] = joined[["overlap1", "overlap2"]].max(axis=1)
tp_pred_ids = joined.query("potential_TP").sort_values("max_overlap", ascending=False).groupby(["id", "predictionstring_gt"]).first()["pred_id"].values
# 3. Any unmatched ground truths are false negatives and any unmatched predictions are false positives.
fp_pred_ids = [p for p in joined["pred_id"].unique() if p not in tp_pred_ids]
matched_gt_ids = joined.query("potential_TP")["gt_id"].unique()
unmatched_gt_ids = [c for c in joined["gt_id"].unique() if c not in matched_gt_ids]
# Get numbers of each type
tp = len(tp_pred_ids)
fp = len(fp_pred_ids)
fn = len(unmatched_gt_ids)
# calc micro f1
my_f1_score = tp / (tp + 0.5 * (fp + fn))
return my_f1_score
def score_feedback_comp(pred_df, gt_df, return_class_scores=False):
"""
A function that scores for the kaggle Student Writing Competition. Uses the steps in the evaluation page here:
https://www.kaggle.com/c/feedback-prize-2021/overview/evaluation
:param pred_df:
:param gt_df:
:param return_class_scores:
:return:
"""
class_scores = {}
pred_df = pred_df[["id", "class", "predictionstring"]].reset_index(drop=True).copy()
for discourse_type, gt_subset in gt_df.groupby("discourse_type"):
pred_subset = pred_df.loc[pred_df["class"] == discourse_type].reset_index(drop=True).copy()
class_score = score_feedback_comp_micro(pred_subset, gt_subset)
class_scores[discourse_type] = class_score
f1 = np.mean([v for v in class_scores.values()])
if return_class_scores:
return f1, class_scores
return f1
class FeedbackDatasetValid:
def __init__(self, samples, max_len, tokenizer):
self.samples = samples
self.max_len = max_len
self.tokenizer = tokenizer
self.length = len(samples)
def __len__(self):
return self.length
def __getitem__(self, idx):
input_ids = self.samples[idx]["input_ids"]
input_ids = [self.tokenizer.cls_token_id] + input_ids
if len(input_ids) > self.max_len - 1:
input_ids = input_ids[: self.max_len - 1]
# add end token id to the input_ids
input_ids = input_ids + [self.tokenizer.sep_token_id]
attention_mask = [1] * len(input_ids)
return {"ids": input_ids, "mask": attention_mask}
class Collate:
def __init__(self, tokenizer):
"""
:param tokenizer:
"""
self.tokenizer = tokenizer
def __call__(self, batch):
"""
:param batch:
:return:
"""
output = dict()
output["ids"] = [sample["ids"] for sample in batch]
output["mask"] = [sample["mask"] for sample in batch]
# calculate max token length of this batch
batch_max = max([len(ids) for ids in output["ids"]])
# add padding
if self.tokenizer.padding_side == "right":
output["ids"] = [s + (batch_max - len(s)) * [self.tokenizer.pad_token_id] for s in output["ids"]]
output["mask"] = [s + (batch_max - len(s)) * [0] for s in output["mask"]]
else:
output["ids"] = [(batch_max - len(s)) * [self.tokenizer.pad_token_id] + s for s in output["ids"]]
output["mask"] = [(batch_max - len(s)) * [0] + s for s in output["mask"]]
# convert to tensors
output["ids"] = torch.tensor(output["ids"], dtype=torch.long)
output["mask"] = torch.tensor(output["mask"], dtype=torch.long)
return output
class EarlyStopping(Callback):
def __init__(self, model_path, valid_df, valid_samples, batch_size, tokenizer, patience=5, mode="max", delta=0.001, save_weights_only=True):
self.patience = patience
self.counter = 0
self.mode = mode
self.best_score = None
self.early_stop = False
self.delta = delta
self.save_weights_only = save_weights_only
self.model_path = model_path
self.valid_samples = valid_samples
self.batch_size = batch_size
self.valid_df = valid_df
self.tokenizer = tokenizer
if self.mode == "min":
self.val_score = np.Inf
else:
self.val_score = -np.Inf
def on_epoch_end(self, model):
model.eval()
valid_dataset = FeedbackDatasetValid(self.valid_samples, 4096, self.tokenizer)
collate = Collate(self.tokenizer)
preds_iter = model.predict(
valid_dataset,
batch_size=self.batch_size,
n_jobs=-1,
collate_fn=collate
)
final_preds = []
final_scores = []
for preds in preds_iter:
pred_class = np.argmax(preds, axis=2)
pred_scrs = np.max(preds, axis=2)
for pred, pred_scr in zip(pred_class, pred_scrs):
final_preds.append(pred.tolist())
final_scores.append(pred_scr.tolist())
for j in range(len(self.valid_samples)):
tt = [Targets.id_target_map[p] for p in final_preds[j][1:]]
tt_score = final_scores[j][1:]
self.valid_samples[j]["preds"] = tt
self.valid_samples[j]["pred_scores"] = tt_score
submission = []
for _, sample in enumerate(self.valid_samples):
preds = sample["preds"]
offset_mapping = sample["offset_mapping"]
sample_id = sample["id"]
sample_text = sample["text"]
sample_pred_scores = sample["pred_scores"]
# pad preds to same length as offset_mapping
if len(preds) < len(offset_mapping):
preds = preds + ["O"] * (len(offset_mapping) - len(preds))
sample_pred_scores = sample_pred_scores + [0] * (len(offset_mapping) - len(sample_pred_scores))
idx = 0
phrase_preds = []
while idx < len(offset_mapping):
start, _ = offset_mapping[idx]
if preds[idx] != "O":
label = preds[idx][2:]
| |
#!/usr/bin/env python
import datetime
import glob
import gzip
import hashlib
import json
import os
import subprocess
import requests
from logzero import logger
#BASEURL = 'http://localhost:5000'
ERROR_TIMER = 0
TOKENS = {
'AAA': '<PASSWORD>'
}
ANSIBLE_PROJECT_ID = u'573f79d02a8192902e20e34b'
SHIPPABLE_URL = u'https://api.shippable.com'
ANSIBLE_PROVIDER_ID = u'562dbd9710c5980d003b0451'
ANSIBLE_RUNS_URL = u'%s/runs?projectIds=%s&isPullRequest=True' % (
SHIPPABLE_URL,
ANSIBLE_PROJECT_ID
)
DEFAULT_ETAG = 'a00049ba79152d03380c34652f2cb612'
# https://elasticread.eng.ansible.com/ansible-issues/_search
# https://elasticread.eng.ansible.com/ansible-pull-requests/_search
# ?q=lucene_syntax_here
# _search accepts POST
########################################################
# MOCK
########################################################
class RequestNotCachedException(Exception):
pass
def get_timestamp():
# 2018-10-15T21:21:48.150184
# 2018-10-10T18:25:49Z
ts = datetime.datetime.now().isoformat()
ts = ts.split('.')[0]
ts += 'Z'
return ts
def run_command(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(so, se) = p.communicate()
return (p.returncode, so, se)
def read_gzip_json(cfile):
try:
with gzip.open(cfile, 'r') as f:
jdata = json.loads(f.read())
except json.decoder.JSONDecodeError as e:
logger.error(e)
import epdb; epdb.st()
return jdata
def write_gzip_json(cfile, data):
with gzip.open(cfile, 'wb') as f:
f.write(json.dumps(data).encode('utf-8'))
class ProxyCacher:
BASEURL = 'http://localhost:5000'
TOKEN = None
SHIPPABLE_TOKEN = None
# make remote calls to github for uncached data
proxy = False
# use local ondisk cache from fixtures+deltas
usecache = False
# where to store and load the data fetched from github
fixturedir = '/tmp/bot.fixtures'
# where to store the new events created by POST
deltadir = '/tmp/bot.deltas'
def __init__(self):
pass
@property
def is_proxy(self):
if self.proxy:
return True
return False
def tokenized_request(
self,
url,
data=None,
method='GET',
headers=None,
pages=None,
paginate=True,
pagecount=0
):
logger.info('(FETCH) [%s] %s' % (method, url))
_headers = {}
if self.TOKEN:
_headers['Authorization'] = 'token %s' % self.TOKEN
# reactions
accepts = [
u'application/json',
u'application/vnd.github.mockingbird-preview',
u'application/vnd.github.sailor-v-preview+json',
u'application/vnd.github.starfox-preview+json',
u'application/vnd.github.v3+json',
u'application/vnd.github.squirrel-girl-preview+json'
]
_headers['Accept'] = ','.join(accepts)
if headers is not None:
for k, v in headers.items():
_headers[k] = v
if method == 'GET':
logger.info('GET %s' % url)
rr = requests.get(url, headers=_headers)
elif method == 'POST':
logger.info('POST %s' % url)
rr = requests.post(url, data=data, headers=_headers)
if rr.headers.get('Status') == '204 No Content':
data = None
else:
try:
data = rr.json()
except json.decoder.JSONDecodeError as e:
logger.error(e)
import epdb; epdb.st()
rheaders = dict(rr.headers)
if not paginate:
return (rheaders, data)
# exit early if enough pages were collected
pagecount += 1
if pages and pagecount >= pages:
return (rheaders, data)
if 'Link' in rheaders:
links = self.extract_header_links(rheaders)
if links.get('next'):
logger.debug('NEXT: %s' % links.get('next'))
(_headers, _data) = self.tokenized_request(links['next'], pagecount=pagecount)
data += _data
return (rheaders, data)
# CACHED PROXY
def cached_tokenized_request(
self,
url,
data=None,
method='GET',
headers=None,
pages=None,
pagecount=0,
context='api.github.com'
):
'''fetch a raw github api url, cache the result, munge it and send it back'''
rdata = None
loaded = False
path = url.replace('https://%s/' % context, '')
path = path.split('/')
if path[-1] != 'graphql':
dtype = path[-1]
path = '/'.join(path[:-1])
fixdir = os.path.join(self.fixturedir, context, path)
else:
fixdir = os.path.join(self.fixturedir, context, 'graphql')
m = hashlib.md5()
m.update(data)
dtype = m.hexdigest()
if self.usecache:
try:
rheaders, rdata = self.read_fixture(fixdir, dtype)
loaded = True
except RequestNotCachedException:
pass
# add new data locally
if method in ['POST', 'UPDATE', 'DELETE'] and path[-1] != 'graphql':
jdata = data
try:
jdata = json.loads(data)
except json.decoder.JSONDecodeError:
pass
self.handle_change(context, url, headers, data, method=method)
#import epdb; epdb.st()
return {}, {}
if not loaded and method == 'GET' and not self.is_proxy:
# issues without labels won't have a labels file, so we have to return empty data
# get the issue data for verification
if url.endswith('/labels'):
iheaders, idata = self.get_cached_issue_data(url=url)
if not idata['labels']:
return {}, []
else:
print('HUH?')
import epdb; epdb.st()
# merge in the deltas
#if loaded and not self.is_proxy and method == 'GET':
# rdata = self.get_changes(context, url, rdata)
if self.usecache:
rdata = self.get_changes(context, url, rdata)
if not loaded and self.is_proxy:
rheaders, rdata = self.tokenized_request(
url,
data=data,
method=method,
headers=headers,
pages=pages,
pagecount=pagecount,
paginate=False
)
if not os.path.exists(fixdir):
os.makedirs(fixdir)
self.write_fixture(fixdir, dtype, rdata, rheaders, compress=True)
loaded = True
if not loaded:
raise Exception(
'%s was not cached and the server is not in proxy mode' % url
)
new_headers = self.replace_data_urls(rheaders)
new_data = self.replace_data_urls(rdata)
logger.debug('returning from cached_tokenized_request')
return new_headers, new_data
def get_cached_issue_data(self, namespace=None, repo=None, number=None, url=None):
# https://api.github.com/repos/ansible/ansible/issues/55062/labels
urlparts = url.split('/')
numix = None
for idx, urlpart in enumerate(urlparts):
if urlpart.isdigit():
numix = idx
break
diskpath = urlparts[2:numix]
fixdir = os.path.join(self.fixturedir, '/'.join(diskpath))
(headers, data) = self.read_fixture(fixdir, urlparts[numix])
return (headers, data)
def get_changes(self, context, url, data):
path = url.replace('https://%s/' % context, '')
path = path.split('/')
if not 'issues' in path and not 'issue' in path and not 'pull' in path and not 'pulls' in path:
return data
numix = None
for idx, _path in enumerate(path):
if _path.isdigit():
numix = idx
break
if numix is None:
import epdb; epdb.st()
inumber = path[numix]
dtype = path[-1]
_path = '/'.join(path[:numix+1])
fixdir = os.path.join(self.deltadir, context, _path)
if not os.path.exists(fixdir):
return data
efile = os.path.join(fixdir, 'events.json')
if not os.path.exists(efile):
return data
with open(efile, 'r') as f:
events = json.loads(f.read())
dtype = None
if url.endswith(inumber):
dtype = 'issue'
elif url.endswith('events'):
dtype = 'events'
elif url.endswith('comments'):
dtype = 'comments'
for event in events:
if dtype == 'events':
data.append(event)
continue
if dtype == 'comments' and event['event'] == 'commented':
data.append(event)
continue
if dtype == 'comments' and event['event'] != 'commented':
continue
if dtype == 'issue':
data['updated_at'] = event['created_at']
if event['event'] == 'labeled':
found = False
for label in data['labels']:
if label['name'] == event['label']['name']:
found = True
break
if not found:
data['labels'].append({'name': event['label']['name']})
elif event['event'] == 'unlabeled':
found = False
for label in data['labels']:
if label['name'] == event['label']['name']:
found = label
break
if found:
data['labels'].remove(found)
elif event['event'] == 'commented':
data['comments'] += 1
#else:
# import epdb; epdb.st()
continue
#import epdb; epdb.st()
#import epdb; epdb.st()
return data
def handle_change(self, context, url, headers, data, method=None):
# GET POST UPDATE DELETE
path = url.replace('https://%s/' % context, '')
path = path.split('/')
jdata = None
try:
jdata = json.loads(data)
except Exception:
pass
if method.lower() == 'delete':
if path[-2] == 'labels':
jdata = [path[-1]]
path = path[:-1]
else:
import epdb; epdb.st()
dtype = path[-1]
_path = '/'.join(path[:-1])
fixdir = os.path.join(self.deltadir, context, _path)
if not os.path.exists(fixdir):
os.makedirs(fixdir)
#fixfile = os.path.join(fixdir, '%s.json' % path[-1])
efile = os.path.join(fixdir, 'events.json')
#ldata = []
#if os.path.exists(fixfile):
# with open(fixfile, 'r') as f:
# ldata = json.loads(f.read())
edata = []
if os.path.exists(efile):
with open(efile, 'r') as f:
edata = json.loads(f.read())
if path[-1] == 'labels':
#jdata = json.loads(data)
if isinstance(jdata, dict) and 'labels' in jdata:
labels = jdata['labels']
else:
labels = jdata[:]
for label in labels:
thisevent = self.get_new_event()
thisevent['actor']['login'] = 'ansibot'
thisevent['actor']['url'] = 'https://api.github.com/users/ansibot'
thisevent['user']['login'] = 'ansibot'
thisevent['user']['url'] = 'https://api.github.com/users/ansibot'
if method.lower() == 'post':
thisevent['event'] = 'labeled'
elif method.lower() == 'delete':
thisevent['event'] = 'unlabeled'
thisevent['label'] = {'name': label}
edata.append(thisevent)
elif path[-1] == 'comments':
#jdata = json.loads(data)
thisevent = self.get_new_event()
thisevent['actor']['login'] = 'ansibot'
thisevent['actor']['url'] = 'https://api.github.com/users/ansibot'
thisevent['user']['login'] = 'ansibot'
thisevent['user']['url'] = 'https://api.github.com/users/ansibot'
thisevent['event'] = 'commented'
thisevent['body'] = jdata['body']
edata.append(thisevent)
else:
import epdb; epdb.st()
with open(efile, 'w') as f:
f.write(json.dumps(edata, indent=2))
def get_new_event(self):
thisevent = {
'id': None,
'node_id': None,
'url': None,
'actor': {
'login': None,
'url': None,
},
'user': {
'login': None,
'url': None
},
'event': None,
'commit_id': None,
'commit_url': None,
'created_at': datetime.datetime.now().isoformat(),
}
return thisevent
def extract_header_links(self, headers):
links = {}
for line in headers['Link'].split(','):
parts = line.split(';')
rel = parts[-1].split('"')[1]
link = parts[0].replace('<', '').replace('>', '').strip()
links[rel] = link
#import epdb; epdb.st()
return links
def fetch_first_issue_number(self, org, repo):
iurl = 'https://api.github.com/repos/%s/%s/issues' % (org, repo)
(issues_headers, issues) = self.tokenized_request(iurl, pages=1)
return issues[0]['number']
def get_issue_fixture(self, org, repo, number, ftype=None):
'''Read the fixture(s) from disk and send them back'''
logger.info('load %s %s %s' % (org, repo, number))
number = int(number)
bd = os.path.join(self.fixturedir, 'repos', org, repo, str(number))
fns = sorted(glob.glob('%s/*' % bd))
fns = [x for x in fns if ftype in os.path.basename(x)]
result = None
headers = None
for fn in fns:
if fn.endswith('.gz'):
data = read_gzip_json(fn)
else:
with open(fn, 'r') as f:
try:
data = json.loads(f.read())
except ValueError as e:
logger.error('unable to parse %s' % fn)
raise Exception(e)
data = self.replace_data_urls(data)
if '.headers' in fn:
headers = data.copy()
else:
result = data.copy()
return headers, result
def replace_data_urls(self, data):
'''Point ALL urls back to this instance instead of the origin'''
data = json.dumps(data)
data = data.replace('https://api.github.com', self.BASEURL)
data = data.replace('https://github.com', self.BASEURL)
data = data.replace('https://api.shippable.com', self.BASEURL)
data = data.replace('https://app.shippable.com', self.BASEURL)
data = json.loads(data)
return data
def read_fixture(self, directory, fixture_type):
hfn = os.path.join(directory, '%s.headers.json' % fixture_type)
if not os.path.exists(hfn):
hfn | |
### Team6 main.py ###
### author: tanahashi, kurita, ito ###
import os
import eel
import csv
import datetime
from datetime import datetime as dt
import numpy
import random
import matplotlib.pyplot as plt
import japanize_matplotlib # グラフの日本語表示に必要
from typing import Counter
# import importer
# import exporter
# P000の初期PWは000b
japanize_matplotlib.japanize
print("バックグラウンドで port=8000 が使用されていると正常に動作しません。")
cwd = os.getcwd()
xcwd = cwd.replace('\\','/')
vcwd = xcwd + "/view"
dcwd = xcwd + "/data"
IOcwd = xcwd + "/IOList"
print(vcwd)
eel.init(vcwd)
eel.start("login.html", size=(800, 480), block=False)
@eel.expose
def registtData():
#print(registtDatatoPy())
try:
if registtDatatoPy() == True:
return "tomato"
else:
return "onion"
except(KeyError):
return "onion"
def gettData():
tData = eel.sendtDatatoPy()()
gtID = tData[0]
gtPW = tData[1]
return gtID, gtPW
tID, tPW = "xxxx", "yyyy"
#main.htmlで入力されたtIDとtPWを照合した先の処理
def registtDatatoPy():
global tID, tPW
tID, tPW = gettData()
print("tID: {0} tPW: {1}".format(tID, tPW))
if tIDtPWverify(tID,tPW):
print("Yeeeeeeeeee")
return True
else:
print("Noooooooooo")
return False
#教員ファイル読み込み/tID,tPW照合
def tIDtPWverify(tID,tPW):
global dcwd
ktcwd = dcwd + "/教員・担当科目リスト.csv"
tPWcwd = dcwd + "/tPW.csv"
tID, tPW = gettData()
tnamecsv = {}
with open(ktcwd, "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tnamecsv[row["ID"]] = row["氏名"]
print(tnamecsv[tID])
tpwcsv = {}
with open(tPWcwd,"r")as p:
reader = csv.DictReader(p)
for prow in reader:
tpwcsv[prow["tID"]] = prow["tPW"]
tPWoncsv = tpwcsv[tID]
#print(tPWoncsv)
if tPW == tPWoncsv:
return True
else:
return False
#管理モードで教員氏名を表示
@eel.expose
def picktName():
global dcwd
ktcwd = dcwd + "/教員・担当科目リスト.csv"
try:
global tID
tnamecsv = {}
with open(ktcwd, "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tnamecsv[row["ID"]] = row["氏名"]
#print(tnamecsv[tID])
tName = str(tnamecsv[tID])
print("user: " + tName)
eel.printtName(tName)
except(FileNotFoundError):
os.getcwd()
os.chdir(xcwd)
picktName()
# reader = "x"
tcName = ["xx", "xx"]
tcDay = [0, 0]
tcPeriod = [0, 0]
@eel.expose
def pickcName():
global tID
global tcName
global tcDay
global tcPeriod
global dcwd
ktcwd = dcwd + "/教員・担当科目リスト.csv"
crcwd = dcwd + "/講義科目ルール.csv"
# tccsv = [[0] * 5 for i in range(4)]
# print(tccsv)
# tcName = [[0] * 5 for i in range(4)]
# tccsvx = []
# for i in range(5):
# with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
# reader = csv.DictReader(f)
# for row in reader:
# print(row)
# tanto = str('担当科目' + str(i+1))
# print(tanto)
# tccsvx[row["ID"]] = row["担当科目1"]
# tcName[i] = str(tccsvx[tID])
# print("calss1: " + tcName[i])
tc1csv = {}
tc2csv = {}
tcName = ["name", "name"]
with open(ktcwd, "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tc1csv[row["ID"]] = row["担当科目1"]
tc2csv[row["ID"]] = row["担当科目2"]
tcName[0] = str(tc1csv[tID])
tcName[1] = str(tc2csv[tID])
print("calss1: " + tcName[0])
print("calss2: " + tcName[1])
# tcID = [[0] * 5 for i in range(4)]
# tcxID = [[0] * 5 for i in range(4)]
# for j in range(5):
# with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
# reader = csv.DictReader(p)
# for row in reader:
# tcxID[j][row["科目名"]] = row["講義ID"]
# tcID[j] = str(tcxID[tc1Name])
# print("classID: " + tcID[j])
tc1xID = {}
tc2xID = {}
with open(crcwd, "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tc1xID[row["科目名"]] = row["講義ID"]
tc2xID[row["科目名"]] = row["講義ID"]
tc1ID = str(tc1xID[tcName[0]])
try:
tc2ID = str(tc2xID[tcName[1]])
except(KeyError):
tc2ID = "X0_"
print("calss1ID: " + tc1ID)
print("calss2ID: " + tc2ID)
tcDay = [0, 0]
tcPeriod = [0, 0]
cID = [tc1ID, tc2ID]
for n in range(0, len(cID)):
# print(n)
# print(len(cID))
if('M' in cID[n]):
tcDay[n] = '月'
elif('Tu' in cID[n]):
tcDay[n] = '火'
elif('W' in cID[n]):
tcDay[n] = '水'
elif('Th' in cID[n]):
tcDay[n] = '木'
elif('F' in cID[n]):
tcDay[n] = '金'
else:
tcDay[n] = ''
tcName[1] = "undefined"
print('Day config error')
if('12_' in cID[n]):
tcPeriod[n] = '1,2限'
elif('23_' in cID[n]):
tcPeriod[n] = '2,3限'
elif('34_' in cID[n]):
tcPeriod[n] = '3,4限'
elif('45_' in cID[n]):
tcPeriod[n] = '4,5限'
elif('1_' in cID[n]):
tcPeriod[n] = '1限'
elif('2_' in cID[n]):
tcPeriod[n] = '2限'
elif('3_' in cID[n]):
tcPeriod[n] = '3限'
elif('4_' in cID[n]):
tcPeriod[n] = '4限'
elif('5_' in cID[n]):
tcPeriod[n] = '5限'
else:
tcPeriod[n] = ''
print('Class period config error')
try:
print(tcDay[n] + tcPeriod[n])
except(TypeError):
pass
except(IndexError):
pass
n = n+1
tc1Name = tcName[0]
tc2Name = tcName[1]
tclen = len(tcName)
tclen = 5
eel.addcData(tcName, tclen, tcDay, tcPeriod)
#adminでの分岐用
@eel.expose
def clidSet(clid):
global tcName
global tcDay
global tcPeriod
global dcwd
crcwd = dcwd + "/講義科目ルール.csv"
print(clid)
print(tcName)
cDay = "0"
cPeriod = "0"
try:
if clid == "101":
cConfig = tcName[0]
cDay = tcDay[0]
cPeriod = tcPeriod[0]
elif clid == "102":
cConfig = tcName[1]
cDay = tcDay[1]
cPeriod = tcPeriod[1]
elif clid == "103":
cConfig = tcName[2]
cDay = tcDay[2]
cPeriod = tcPeriod[2]
elif clid == "104":
cConfig = tcName[3]
cDay = tcDay[3]
cPeriod = tcPeriod[4]
elif clid == "105":
cConfig = tcName[4]
cDay = tcDay[4]
cPeriod = tcPeriod[4]
except(IndexError):
pass
print(cConfig)
tcxID = {}
tcxCT1 = {}
tcxCT2 = {}
tcxLT1 = {}
tcxLT2 = {}
with open(crcwd, "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tcxID[row["科目名"]] = row["講義ID"]
tcxCT1[row["科目名"]] = row["開始時間"]
tcxCT2[row["科目名"]] = row["終了時間"]
tcxLT1[row["科目名"]] = row["出席限度(分)"]
tcxLT2[row["科目名"]] = row["遅刻限度(分)"]
tccID = str(tcxID[cConfig])
tccCT1 = str(tcxCT1[cConfig])
tccCT2 = str(tcxCT2[cConfig])
tccLT1 = str(tcxLT1[cConfig])
tccLT2 = str(tcxLT2[cConfig])
print("ID: " + tccID)
print("Day: " + cDay)
print("Period:" + cPeriod)
print("Start: " + tccCT1)
print("End: " + tccCT2)
print("Limit1:" + tccLT1)
print("Limit2:" + tccLT2)
tccCT1 = str(tcxCT1[cConfig])
tccCT2 = str(tcxCT2[cConfig])
tccLT1 = str(tcxCT1[cConfig][0:5])
tccLT2 = str(tcxCT1[cConfig][0:5])
tcxLT1m = int(tcxLT1[cConfig])
tcxLT2m = int(tcxLT2[cConfig])
# tcxLT1m = dt.strptime(tcxLT1m, '%H:%M:%S')
# tcxLT2m = dt.strptime(tcxLT2m, '%H:%M:%S')
tccCT1t = dt.strptime(tccCT1, '%H:%M')
tccCT2t = dt.strptime(tccCT2, '%H:%M')
tccLT1t = dt.strptime(tccLT1, '%H:%M')
tccLT2t = dt.strptime(tccLT2, '%H:%M')
tccLT1t = tccLT1t + datetime.timedelta(minutes=tcxLT1m)
tccLT2t = tccLT2t + datetime.timedelta(minutes=tcxLT2m)
tccCT1 = str(tccCT1t.time())
tccCT2 = str(tccCT2t.time())
tccLT1 = str(tccLT1t.time())
tccLT2 = str(tccLT2t.time())
tccCT1 = tccCT1[0:5]
tccCT2 = tccCT2[0:5]
tccLT1 = tccLT1[0:5]
tccLT2 = tccLT2[0:5]
print("授業開始: " + tccCT1)
print("授業終了: " + tccCT2)
print("以降遅刻: " + tccLT1)
print("以降欠席: " + tccLT2)
eel.initialID(cConfig, tccID, cDay, cPeriod, tccCT1, tccCT2, tccLT1, tccLT2)
# eel.initialCT(tccCT1, tccCT2)
# eel.initialLT(tccLT1, tccLT2)
# return tccCT1, tccCT2, tccLT1, tccLT2
datew = datetime.date.today()
datew = datew.strftime("%Y_%m_%d")
print(datew)
# 仮の出席者
# main author: ito
def stdSim(cID):
global dcwd
rcwd = dcwd + "/履修者-"
number=range(1,101)
rnumber=random.sample(number,len(number)) #学籍番号を(ランダムに)生成
temlist=[]
for i in rnumber:
temNo= "S{:0>3}".format(i) #"S001" "S012"のように3桁表示
temlist.append(temNo) #temlistはS001からS100の100個の要素からなるリスト
#講義IDに一致した履修者csvを開く
stdIDmx = {} #辞書型
stdIDm = [] #配列
stdcsvName = rcwd + cID + ".csv"
with open(stdcsvName, "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
stdIDmx[row["学籍番号"]] = row["IDm"]
for i in range(len(temlist)):
try:
IDm = str(stdIDmx[temlist[i]])
stdIDm.append(IDm)
except KeyError:
pass
# print(stdcsvName)
# print(len(stdIDm))
return stdIDm
IOcsvName = "xx"
#出欠リストCSV操作 兼 出席シミュレータ
@eel.expose
def openIOcsv(cID, cName):
global datew
global IOcsvName
global dcwd
global IOcwd
crcwd = dcwd + "/講義科目ルール.csv"
rcwd = dcwd + "/履修者-"
tcxCT1 = {}
tcxCT2 = {}
tcxLT1 = {}
tcxLT2 = {}
with open(crcwd, "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tcxCT1[row["科目名"]] = row["開始時間"]
tcxCT2[row["科目名"]] = row["終了時間"]
tcxLT1[row["科目名"]] = row["出席限度(分)"]
tcxLT2[row["科目名"]] = row["遅刻限度(分)"]
tccCT1 = str(tcxCT1[cName]) + ":00"
tccCT2 = str(tcxCT2[cName]) + ":00"
tccLT1 = str(tcxCT1[cName][0:5]) + ":00"
tccLT2 = str(tcxCT1[cName][0:5]) + ":00"
tcxLT1m = int(tcxLT1[cName])
tcxLT2m = int(tcxLT2[cName])
# tcxLT1m = dt.strptime(tcxLT1m, '%H:%M:%S')
# tcxLT2m = dt.strptime(tcxLT2m, '%H:%M:%S')
tccCT1t = dt.strptime(tccCT1, '%H:%M:%S')
tccCT2t = dt.strptime(tccCT2, '%H:%M:%S')
tccLT1t = dt.strptime(tccLT1, '%H:%M:%S')
tccLT2t = dt.strptime(tccLT2, '%H:%M:%S')
tccLT1t = tccLT1t + datetime.timedelta(minutes=tcxLT1m)
tccLT2t = tccLT2t + datetime.timedelta(minutes=tcxLT2m)
tccCT1t = tccCT1t.time()
tccCT2t = tccCT2t.time()
tccLT1t = tccLT1t.time()
tccLT2t = tccLT2t.time()
print("授業開始: " + str(tccCT1t))
print("授業終了: " + str(tccCT2t))
print("以降遅刻: " + str(tccLT1t))
print("以降欠席: " + str(tccLT2t))
LimitTime = [tccCT1t, tccCT2t, tccLT1t, tccLT2t]
stdIDm = stdSim(cID)
# print(stdIDm)
stdIDx = {}
stdNamex = {}
stdID = []
stdName = []
print("Preparations are underway: " + cName)
dirName = IOcwd + "/" + cName
IOcsvName = IOcwd + "/" + cName + "/" + cName + datew + "出欠リスト.csv"
stdcsvName = rcwd + cID + ".csv"
if(os.path.exists(dirName) == False):
os.mkdir(dirName)
#履修者のリストを取得
with open(stdcsvName, "r", encoding="utf_8", errors="") as stdcsv:
reader = csv.DictReader(stdcsv)
for row in reader:
stdIDx[row["IDm"]] = row["学籍番号"]
stdNamex[row["IDm"]] = row["名前"]
stdlen = len(stdIDm)
print("履修者数: " + str(stdlen))
for i in range(len(stdIDm)):
try:
try:
stdID.append(str(stdIDx[stdIDm[i]]))
stdName.append(str(stdNamex[stdIDm[i]]))
except(KeyError):
stdID.append("S000")
stdName.append("名無ノ権兵衛")
except(IndexError):
pass
#初期出欠リストcsv作成
if(os.path.exists(IOcsvName) == False):
with open(IOcsvName, "w", encoding="utf_8", newline="") as IOcsv:
writer = csv.writer(IOcsv)
writer.writerow(["学籍番号", "名前", "IDm", "入室時刻", "出欠"])
| |
last layer b_fc2 shape
_variables = tf.get_default_graph().get_collection_ref(tf.GraphKeys.VARIABLES)
print(_variables)
bias_before_output_layer_name = 'model/fc/b_fc2/b_fc2:0'
b_fc2 = tf.get_default_graph().get_tensor_by_name(bias_before_output_layer_name)
# Reset the graph to restore after model construction
tf.reset_default_graph()
self.output_classes = int(b_fc2.shape[0]) # have to cast from string to integer
return self.output_classes
def reload_setting(self, setting_file_path=None):
# usage: reload_hyper_param = nn_model_ins.reload_setting()
try:
if setting_file_path is None:
setting_file_path = self.hparams['setting_file_path']
assert os.path.isfile(setting_file_path)
reload_hparams_ins = st.Hyperparameters(hparams=None, setting_file_path=setting_file_path)
reload_hyper_param = reload_hparams_ins.get_as_dict()
return reload_hyper_param
except AssertionError as e:
print('Could not reload setting with error:{}'.format(e))
return None
def read_learning_rate_from_setting_file(self, setting_file_path=None):
# usage: nn_model_ins.read_learning_rate_from_setting_file()
# TODO make it possible to set update_learning_rate_frequency with hyper parameter
DEFAULT_UPDATE_LERNING_FREQUENCY = 100
update_learning_rate_frequency = DEFAULT_UPDATE_LERNING_FREQUENCY
is_iter_to_update_learning_rate = (
self.global_iter % update_learning_rate_frequency == (update_learning_rate_frequency - 1))
# print('self.global_iter:{}, is_iter_to_update_learning_rate:{}'.format(self.global_iter, is_iter_to_update_learning_rate))
if not is_iter_to_update_learning_rate:
return None
reload_hyper_param = self.reload_setting(setting_file_path)
try:
new_learning_rate = float(reload_hyper_param['learning_rate'])
assert isinstance(new_learning_rate, float)
print('new_learning_rate:{}'.format(new_learning_rate))
return new_learning_rate
except AssertionError as e:
print('Could not update learning_rate with error:{}'.format(e))
return None
def train(self, iter_to=10000, learning_rate=1e-4, batch_size=128, dropout_ratio=0.5, l1_norm_reg_ratio=0.0, save_file_path=None, report_dir_path=None):
from smalltrain.model.operation import is_s3_path, download_to_local, upload_to_cloud
last_time = time.time()
print('train with iter_to:{}, batch_size:{}, dropout_ratio:{}'.format(iter_to, batch_size, dropout_ratio))
# TODO
train_index = 0
# input_data = self.data_set.input_data
# output_data = self.data_set.output_data
# train_index_list = self.data_set.train_index_list
# test_index_list = self.data_set.test_index_list
# test_size = 31 + 30 # 2015/9, 10
# test_size = int(len(output_data) * 0.1)
# setup each test data
# _input_data = self.data_set.input_data
test_data = self.data_set.get_test_input_data()
if (self.mask_rate is not None) and self.mask_rate > 0:
# masked_test_data = self.data_set.masked_input_data[test_index_list].astype(np.float32)
masked_test_data = self.data_set.get_masked_test_input_data()
# test_values = np.asarray(output_data[test_index_list], dtype=np.float32)
test_values = self.data_set.get_test_output_data()
if self.model_type == 'CLASSIFICATION':
test_values_laveled = np.argmax(test_values, axis=1)
elif self.model_type == 'REGRESSION':
test_values = test_values.reshape(-1) # TODO
# print('test_index_list:{}'.format(test_index_list))
print('test_data.shape:{}'.format(test_data.shape))
print('test_values.shape:{}'.format(test_values.shape))
print('self.prediction_mode:{}'.format(self.prediction_mode))
print('---------- time:{}'.format(time.time() - last_time))
last_time = time.time()
assert (test_data.shape[0] > 0)
test_data_id_set = None
if self.data_set.data_id_set is not None:
test_data_id_set = self.data_set.get_test_data_id_set()
print('test_data_id_set.shape:{}'.format(test_data_id_set.shape))
test_annotation_data = None
if self.data_set.annotation_data is not None:
# test_annotation_data = self.data_set.annotation_data[test_index_list]
test_annotation_data = self.data_set.get_test_annotation_data()
print('test_annotation_data.shape:{}'.format(test_annotation_data.shape))
# setup each train data set
train_data_set = self.data_set
# remove test data from train data
# train_data_set.input_data = input_data[:-test_size].astype(np.float32)
# train_data_set.output_data = output_data[:-test_size].astype(np.float32)
# print('train_data_set.input_data.shape:{}'.format(train_data_set.input_data.shape))
print('train_data_set.input_data.shape:{}'.format(self.data_set.get_train_input_data_shape()))
# plot input and output data
if self.model_type == 'CLASSIFICATION':
_output_data = test_values_laveled
else:
_output_data = test_values
print('test_input_data:{}'.format(test_data[:15, -1, 0]))
print('test_output_data:{}'.format(test_values[:3]))
print('---------- time:{}'.format(time.time() - last_time))
last_time = time.time()
plot_data(input_data=test_data, output_data=test_values_laveled if self.model_type == 'CLASSIFICATION' else test_values,
y_max=None, series_range=None,
report_dir_path=report_dir_path)
print('---------- time:{} DONE plot_data'.format(time.time() - last_time))
last_time = time.time()
if self.debug_mode:
if (not self.prediction_mode) and (not self.test_only_mode):
index_to_export = 0
self.data_set.export_data(data_kind='train_data', index=index_to_export, report_dir_path=report_dir_path)
index_to_export = -1
self.data_set.export_data(data_kind='train_data', index=index_to_export, report_dir_path=report_dir_path)
index_to_export = 0
self.data_set.export_data(data_kind='test_data', index=index_to_export, report_dir_path=report_dir_path)
index_to_export = -1
self.data_set.export_data(data_kind='test_data', index=index_to_export, report_dir_path=report_dir_path)
# save all_variables names
all_variables = [var.name for var in tf.get_default_graph().get_collection_ref('variables')]
_report_path = os.path.join(report_dir_path, 'all_variables_names.csv')
f = open(_report_path, 'w')
for name in all_variables: f.write('{}\n'.format(name))
f.close()
print('---------- time:{} DONE save all_variables names'.format(time.time() - last_time))
last_time = time.time()
# save trainable_variables names
trainable_variables_names = [var.name for var in self.get_trainable_variables()]
_report_path = os.path.join(report_dir_path, 'trainable_variables_names.csv')
f = open(_report_path, 'w')
for name in trainable_variables_names: f.write('{}\n'.format(name))
f.close()
if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir)
print('---------- time:{} DONE upload_to_cloud'.format(time.time() - last_time))
last_time = time.time()
# if self.prediction_mode:
# # TODO
# return
errors_history = None
for i in range(iter_to):
if (not self.test_only_mode) and (not self.prediction_mode):
input_batch, output_batch = train_data_set.next_batch(batch_size)
# print('i:{}'.format(i))
if self.global_iter == 0:
print('====================')
print('step %d, start training' % (self.global_iter))
print('input_batch.dtype:{}'.format(input_batch.dtype))
print('output_batch.dtype:{}'.format(output_batch.dtype))
print('input_batch.shape:{}'.format(input_batch.shape))
print('output_batch.shape:{}'.format(output_batch.shape))
# train
self.train_step.run(
feed_dict={self.x: input_batch, self.y_: output_batch, self.keep_prob: (1 - dropout_ratio),
self.learning_rate: learning_rate,
self.l1_norm_reg_ratio: l1_norm_reg_ratio,
self.is_train: True})
summary, train_total_loss = self.sess.run([self.merged, self.total_loss]
, feed_dict={self.x: input_batch, self.y_: output_batch,
self.keep_prob: (1 - dropout_ratio),
self.learning_rate: learning_rate,
self.l1_norm_reg_ratio: l1_norm_reg_ratio,
self.is_train: True
})
if self.global_iter % 100 == 99:
# train_accuracy = accuracy.test(feed_dict={
# train_total_loss = self.total_loss.test(feed_dict={
# self.x: input_batch, self.y_: output_batch, self.keep_prob: 1.0, self.learning_rate: learning_rate})
print('========================================')
print('step %d, training loss %g' % (self.global_iter, train_total_loss))
print('========================================')
self.train_writer.add_summary(summary, self.global_iter)
# print('min and max of normed train date_block_num:{}, {}'.format(min(input_batch[:,0,0]), max(input_batch[:,0,0])))
# _test_and_report = (self.test_only_mode or self.global_iter == 9 or self.global_iter % 100 == 99)
_test_and_report = (self.test_only_mode or self.prediction_mode or self.global_iter == 9 or self.global_iter % 100 == 99)
# _test_and_report = (self.test_only_mode or self.global_iter % 10 == 9)
if _test_and_report:
# calc error
if self.model_type == 'REGRESSION':
y_estimated = self.y.eval(feed_dict={
self.x: test_data, self.y_: test_values, self.keep_prob: 1.0, self.learning_rate: learning_rate,
self.l1_norm_reg_ratio: l1_norm_reg_ratio,
self.is_train: False
})
y_label_estimated = None
if self.mask_rate is not None and self.mask_rate > 0:
y_estimated_masked = self.y.eval(feed_dict={
self.x: masked_test_data, self.y_: test_values, self.keep_prob: 1.0,
self.learning_rate: learning_rate,
self.l1_norm_reg_ratio: l1_norm_reg_ratio,
self.is_train: False
})
y_label_estimated_masked = None
else:
y_label_estimated, y_estimated = self.sess.run([self.y_label, self.y_label]
, feed_dict={self.x: test_data, self.y_: test_values,
self.keep_prob: 1.0,
self.learning_rate: learning_rate,
self.l1_norm_reg_ratio: l1_norm_reg_ratio,
self.is_train: False})
summary, test_total_loss = self.sess.run([self.merged, self.total_loss]
, feed_dict={self.x: test_data, self.y_: test_values,
self.keep_prob: 1.0,
self.learning_rate: learning_rate,
self.l1_norm_reg_ratio: l1_norm_reg_ratio,
self.is_train: False})
root_mean_squared_error = None
mean_absolute_error = None
if self.model_type == 'REGRESSION':
root_mean_squared_error, mean_absolute_error = self.sess.run([self.root_mean_squared_error, self.mean_absolute_error]
, feed_dict={self.x: test_data, self.y_: test_values,
self.keep_prob: 1.0,
self.learning_rate: learning_rate,
self.l1_norm_reg_ratio: l1_norm_reg_ratio,
self.is_train: False})
if self.mask_rate is not None and self.mask_rate > 0:
root_mean_squared_error_masked, mean_absolute_error_masked = self.sess.run([self.root_mean_squared_error, self.mean_absolute_error]
, feed_dict={self.x: masked_test_data, self.y_: test_values,
self.keep_prob: 1.0,
self.learning_rate: learning_rate,
self.l1_norm_reg_ratio: l1_norm_reg_ratio,
self.is_train: False})
print('========================================')
print('step:{}, testing root_mean_squared_error:{}, mean_absolute_error:{}'.format(self.global_iter, root_mean_squared_error, mean_absolute_error))
print('========================================')
assert (root_mean_squared_error is not None)
new_errors = pd.DataFrame([[self.global_iter, root_mean_squared_error, mean_absolute_error]], columns=(['global_iter', 'root_mean_squared_error', 'mean_absolute_error']))
errors_history = pd.concat([errors_history, new_errors]) if errors_history is not None else new_errors
min_rmse_index = errors_history['root_mean_squared_error'].idxmin()
min_root_mean_squared_error = errors_history.iloc[min_rmse_index]['root_mean_squared_error']
min_global_iter = errors_history.iloc[min_rmse_index]['global_iter']
at_min_mean_absolute_error = errors_history.iloc[min_rmse_index]['mean_absolute_error']
print('min_global_iter:{}, min of root_mean_squared_error:{}, wirh mean_absolute_error:{}'.format(min_global_iter, min_root_mean_squared_error, at_min_mean_absolute_error))
if report_dir_path:
_report_path = os.path.join(report_dir_path, 'errors_history.csv')
errors_history.to_csv(_report_path, index=False)
if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir)
# log_scalar(writer=self.test_writer, tag='rmse', value=rmse, step=self.global_iter)
if report_dir_path:
error_to_plot = None
error_name = None
if self.plot_errors is not None:
# TODO plor more than single error
for plot_error in self.plot_errors:
calc_range = [0, 9.0] if len(plot_error.split('DROP')) > 1 else None
if plot_error == 'accuracy':
error_to_plot = calc_accuracy_with_drop(test_values, y_estimated, rank_boundary_list=self.rank_boundary_list)
naive_error = None
else:
error_to_plot = calc_error_with_drop(plot_error, test_values, y_estimated, calc_range=calc_range)
naive_error = calc_error_with_drop(plot_error, test_values[:-1], y_estimated[1:], calc_range=calc_range)
error_name = 'error({})'.format(plot_error)
# report naive error TODO standardize
print('{}, error:{}, naive_error:{}'.format(error_name, error_to_plot, naive_error))
_offset_column_index = train_data_set.offset_column_index
# print('_offset_column_index:{}'.format(_offset_column_index))
if _offset_column_index > 0:
offset_values = test_data[:, 0, _offset_column_index]
offset_values = np.reshape(offset_values, (-1))
offset_value_unique_list = np.unique(offset_values)
else:
# offset_values = train_data_set.input_output_ts_offset
offset_value_unique_list = [train_data_set.input_output_ts_offset]
for _offset in offset_value_unique_list:
# print('_offset:{}'.format(_offset))
# print('offset_values:{}'.format(offset_values))
# print('len of offset_values:{}'.format(len(offset_values)))
if _offset_column_index > 0:
all_index_to_plot = [i for i, x in enumerate(offset_values) if math.fabs(x - _offset) < 1e-3]
else:
all_index_to_plot = list(range(len(test_data)))
# calc cc errors
input_target_value_column_index = 0 # TODO Enable to set with hyper param
cc_error = None
if self.calc_cc_errors and self.op_errors is not None:
true_y_to_plot_cc = _output_data[all_index_to_plot]
estimated_y_to_plot_cc = test_data[all_index_to_plot, -1, input_target_value_column_index]
for op_error in self.op_errors:
calc_range = [0, 9.0] if len(op_error.split('DROP')) > 1 else None
if op_error != 'accuracy':
cc_error = calc_error_with_drop(op_error, true_y_to_plot_cc, estimated_y_to_plot_cc,
calc_range=calc_range)
cc_error_name = 'cc error({})'.format(op_error)
print('_offset:{}, error_name:{}, error_to_plot:{}, cc_error_name:{}, cc_error:{}'.format(_offset, error_name, error_to_plot, cc_error_name, cc_error))
x_to_plot_cc = list(range(len(estimated_y_to_plot_cc)))
_group_value = None
_plot_iter = None
title = 'Plot Ground truth and CC\nwith input-output offset:{} for group:{}'.format(
_offset, _group_value) if self.plot_title is None else self.plot_title.format(_offset,
_group_value)
_report_path = plot_estmated_true(x=x_to_plot_cc, estimated_y=estimated_y_to_plot_cc, estimated_label=None, model_type=self.model_type,
true_y=true_y_to_plot_cc, y_max=None, series_range=None, error=cc_error, error_name=cc_error_name, report_dir_path=report_dir_path,
xlabel=self.plot_x_label, ylabel=self.plot_y_label, title=title, postfix='o{}_{}_cc'.format(_offset, _group_value), iter=_plot_iter,
x_range=self.plot_x_range, y_range=self.plot_y_range)
if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir)
self.calc_cc_errors = False # TODO
# plot in group
index_to_plot_group_dict = {'all':all_index_to_plot}
if self.plot_group_data_name_in_annotation is not None:
index_to_plot_group_dict = {}
_group_values = test_annotation_data[:, 2 + self.annotation_col_names.index(self.plot_group_data_name_in_annotation)]
_group_unique_values = list(set(_group_values))
for group_value in _group_unique_values:
# print('group_value:{}'.format(group_value))
index_to_plot = [i for i, x in enumerate(_group_values) if (x == group_value and i in all_index_to_plot)]
# print('index_to_plot:{}'.format(index_to_plot))
# print('test_annotation_data:{}'.format(test_annotation_data[index_to_plot]))
index_to_plot_group_dict[group_value] = index_to_plot
report_plot_file_list = []
for group_value, index_to_plot in index_to_plot_group_dict.items():
# print('_offset:{}, index_to_plot[:5]:{}'.format(_offset, index_to_plot[:5]))
estimated_y_to_plot = y_estimated[index_to_plot]
estimated_label_to_plot = y_label_estimated[index_to_plot] if y_label_estimated is not None else None
if self.mask_rate is not None and self.mask_rate > 0:
estimated_y_to_plot_masked = y_estimated_masked[index_to_plot]
estimated_label_to_plot_masked = y_label_estimated_masked[index_to_plot] if y_label_estimated_masked is not None else None
true_y_to_plot = _output_data[index_to_plot]
data_id_set_to_plot = None
if test_data_id_set is not None:
data_id_set_to_plot = test_data_id_set[index_to_plot]
elif test_annotation_data is not None:
data_id_set_to_plot = test_annotation_data[index_to_plot, 0]
test_annotation_data_dt_to_export = None
if test_annotation_data is not None:
test_annotation_data_dt_to_export = test_annotation_data[index_to_plot]
# print('len(estimated_y_to_plot):{}'.format(len(estimated_y_to_plot)))
x_to_plot = list(range(len(estimated_y_to_plot)))
if test_annotation_data is not None and self.plot_x_data_name_in_annotation is not None :
| |
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Defines a Monitor class for monitoring the rate at which data is saved
to Distilleries. Monitors can generate Alerts if data is not being saved
at the expected rate.
"""
# standard library
from datetime import timedelta
import json
# third party
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
# local
from alarms.models import Alarm, AlarmManager
from alerts.models import Alert
from cyphon.choices import (
ALERT_LEVEL_CHOICES,
MONITOR_STATUS_CHOICES,
TIME_UNIT_CHOICES,
)
from cyphon.fieldsets import QueryFieldset
from distilleries.models import Distillery
from engines.queries import EngineQuery
from engines.sorter import SortParam, Sorter
import utils.dateutils.dateutils as dt
class MonitorManager(AlarmManager):
"""
"""
def find_relevant(self, distillery):
"""
"""
active_monitors = self.find_enabled()
return active_monitors.filter(distilleries=distillery)
class Monitor(Alarm):
"""
A Monitor monitors one or more Distilleries for saved data.
It can be used to generate Alerts if data is not being saved
to the Distilleries at an expected rate.
Attributes
----------
name : str
The name of the |Monitor|.
enabled : bool
If True, the Monitor will be included in Monitor status updates.
distilleries : Distilleries
One or more Distilleries that the Monitor should watch.
time_interval : int
Maximum length of time that the Monitor's Distilleries can have
no activity before the Monitor status changes to unhealthy.
time_unit : str
The time units for the time_interval. Possible values are
constrained to TIME_UNIT_CHOICES.
alerts_enabled : bool
If True, the Monitor is allowed to generate Alerts.
repeating_alerts : bool
If True, the Monitor will generate an Alert at every time
interval when its status is unhealthy. If False, the Monitor
will only generate an Alert when its status changes from healthy
to unhealthy.
alert_level : str
The level to use when generating Alerts. Possible values are
constrained to MONITOR_STATUS_CHOICES.
last_alert_date : datetime
A |datetime| indicating the created_date for the last Alert
generated by the monitor.
last_alert_id : int
A positive integer indicating the id of the last Alert
generated by the monitor.
status : str
The current status of the Monitor. Possible values are
constrained to MONITOR_STATUS_CHOICES.
created_date : datetime
A |datetime| indicating when the Monitor was created.
last_updated : datetime
A |datetime| indicating when the Monitor status was last
updated (though the status may not have changed).
last_healthy : datetime
A |datetime| indicating when the Monitor last had a healthy
status.
last_active_distillery : Distillery
The last Distillery that was saved to among the Distilleries
being monitored.
last_saved_doc : str
The document id of the last document that was saved among the
Distilleries being monitored.
"""
distilleries = models.ManyToManyField(
Distillery,
related_name='+', # do not create backwards relation
)
time_interval = models.IntegerField()
time_unit = models.CharField(max_length=3, choices=TIME_UNIT_CHOICES)
alerts_enabled = models.BooleanField(default=True)
repeating_alerts = models.BooleanField(default=False)
alert_level = models.CharField(
max_length=20,
choices=ALERT_LEVEL_CHOICES
)
last_alert_date = models.DateTimeField(blank=True, null=True)
last_alert_id = models.PositiveIntegerField(blank=True, null=True)
status = models.CharField(
max_length=20,
choices=MONITOR_STATUS_CHOICES
)
created_date = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
last_healthy = models.DateTimeField(blank=True, null=True)
last_active_distillery = models.ForeignKey(
Distillery,
blank=True,
null=True,
verbose_name=_('distillery')
)
last_saved_doc = models.CharField(
max_length=255,
blank=True,
null=True,
verbose_name=_('document id')
)
_HEALTHY = 'GREEN'
_UNHEALTHY = 'RED'
objects = MonitorManager()
def __str__(self):
return self.name
def save(self, *args, **kwargs):
"""
Overrides the save() method to validate distilleries and update
the status of the Monitor.
"""
self._update_fields()
super(Monitor, self).save(*args, **kwargs)
def _get_interval_in_seconds(self):
"""
Returns the number of minutes in the Monitor's time interval.
"""
return dt.convert_time_to_seconds(self.time_interval, self.time_unit)
def _get_inactive_seconds(self):
"""
Returns the number of seconds since a document was saved to one
of the Monitor's distilleries.
"""
if self.last_healthy is not None or self.created_date is not None:
if self.last_healthy is not None:
time_delta = timezone.now() - self.last_healthy
else:
time_delta = timezone.now() - self.created_date
return time_delta.total_seconds()
else:
return 0
def _get_last_alert_seconds(self):
"""
Returns the number of seconds since the Monitor last generated
an Alert.
"""
if self.last_alert_date is not None:
time_delta = timezone.now() - self.last_alert_date
return time_delta.total_seconds()
def _get_inactive_interval(self):
"""
Returns a string with the approximate time since a document was
saved to one of the Monitor's distilleries (e.g., '35 s', '6 m',
'2 h', '1 d'). The time is rounded down to the nearest integer.
"""
seconds = self._get_inactive_seconds()
return dt.convert_seconds(seconds)
def _is_overdue(self):
"""
Returns a Boolean indicating whether the time since a document
was last saved to one of the Monitor's distilleries exceeds the
Monitor's interval.
"""
return self._get_inactive_seconds() > self._get_interval_in_seconds()
def _get_interval_start(self):
"""
Returns a DateTime representing the start of the monitoring
interval.
"""
seconds = self._get_interval_in_seconds()
return timezone.now() - timedelta(seconds=seconds)
def _get_query_start_time(self):
"""
Returns either the last_healthy datetime or the start of the
monitoring interval, whichever is older.
"""
interval_start = self._get_interval_start()
if self.last_healthy and self.last_healthy < interval_start:
return self.last_healthy
else:
return interval_start
def _get_query(self, date_field):
"""
Takes the name of a date field and returns an |EngineQuery| for
documents with dates later than the last_healthy date (if there
is one) or the start of the monitoring interval (if there isn't).
"""
start_time = self._get_query_start_time()
query = QueryFieldset(
field_name=date_field,
field_type='DateTimeField',
operator='gt',
value=start_time
)
return EngineQuery([query])
@staticmethod
def _get_sorter(date_field):
"""
Takes the name of a date field and returns a |Sorter| for
sorting results in descending order of date.
"""
sort = SortParam(
field_name=date_field,
field_type='DateTimeField',
order='DESC',
)
return Sorter(sort_list=[sort])
def _get_most_recent_doc(self, distillery):
"""
Takes a Distillery and the most recent document from the
monitoring interval, if one exists. Otherwise, returns None.
"""
date_field = distillery.get_searchable_date_field()
if date_field:
query = self._get_query(date_field)
sorter = self._get_sorter(date_field)
results = distillery.find(query, sorter, page=1, page_size=1)
if results['results']:
return results['results'][0]
def _update_doc_info(self):
"""
Looks for the most recently saved doc among the Distilleries
being monitored, and updates the relevant field in the Monitor.
"""
for distillery in self.distilleries.all():
doc = self._get_most_recent_doc(distillery)
if doc:
date = distillery.get_date(doc)
if self.last_healthy is None or date > self.last_healthy:
self.last_healthy = date
self.last_active_distillery = distillery
self.last_saved_doc = doc.get('_id')
def _set_current_status(self):
"""
Updates and returns the Monitor's current status.
"""
is_overdue = self._is_overdue()
if is_overdue:
self.status = self._UNHEALTHY
else:
self.status = self._HEALTHY
return self.status
def _get_title(self):
"""
Returns a title for an Alert.
"""
downtime = self._get_inactive_interval()
return 'Health monitor "%s" has seen no activity for over %s.' \
% (self.name, downtime)
def _alert_due(self):
"""
If the Monitor has previously created an Alert, returns a
Boolean indicating whether the last time an Alert was generated
exceeds the Monitor's interval. If the Monitor has never created
an Alert, returns True. For Monitors with repeating Alerts, this
is used to determine whether enough time has passed to generate
another Alert.
"""
last_alert_time = self._get_last_alert_seconds()
if last_alert_time:
return last_alert_time > self._get_interval_in_seconds()
else:
return True
def _create_alert(self):
"""
Generates an Alert based on the Monitor's alert_level. Returns
the saved Alert.
"""
title = self._get_title()
alert = Alert(
title=title,
level=self.alert_level,
alarm=self,
distillery=self.last_active_distillery,
doc_id=self.last_saved_doc
)
alert.save()
return alert
def _alert(self, old_status):
"""
Takes a string representing the Monitor's status prior to its
last update. Determines whether an Alert should be generated,
and, if so, creates the Alert and saves the Alert's created_date
to the Monitor's last_alert_date field and the Alert's id to the
last_alert_id field. Returns None.
"""
repeat_alert = self.repeating_alerts and self._alert_due()
status_changed = old_status != self._UNHEALTHY
if self.alerts_enabled and (repeat_alert or status_changed):
alert = self._create_alert()
self.last_alert_date = alert.created_date
self.last_alert_id = alert.pk
self.save()
def _find_last_doc(self):
"""
Returns the last document saved in the last active Distillery,
if one exists. Otherwise, returns None.
"""
return self.last_active_distillery.find_by_id(self.last_saved_doc)
def _update_fields(self):
"""
Updates the Monitor's fields relating to its status, and last
saved document.
"""
if self.id:
self._update_doc_info()
self._set_current_status()
| |
import errno
import json
import logging
import os
import shutil
import uuid
import zipfile
import re
import subprocess
import pandas as pd
from kb_Amplicon.Utils.DataUtil import DataUtil
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
class MDSUtils:
R_BIN = '/kb/deployment/bin'
MDS_OUT_DIR = 'mds_output'
PARAM_IN_WS = 'workspace_name'
PARAM_IN_MATRIX = 'input_obj_ref'
PARAM_OUT_MATRIX = 'mds_matrix_name'
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _validate_run_mds_params(self, params):
"""
_validate_run_mds_params:
validates params passed to run_mds method
"""
logging.info('start validating run_mds params')
# check for required parameters
for p in [self.PARAM_IN_MATRIX, self.PARAM_IN_WS, self.PARAM_OUT_MATRIX]:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _build_rMDS_script(self, params):
"""
_build_rMDS_script: build a sequence of R command calls according to params
Note: To run the NMDS, we will use the function metaMDS from the vegan package.
# The metaMDS function requires only a community-by-species matrix.
"""
data_file_path = params.get('datafile', None)
if not data_file_path:
return ''
exists = os.path.isfile(os.path.join(self.output_dir, os.path.basename(data_file_path)))
if not exists:
shutil.copyfile(data_file_path,
os.path.join(self.output_dir, os.path.basename(data_file_path)))
n_components = params.get('n_components', 2)
max_iter = params.get('max_iter', 300)
run_metric = True if params.get('metric', 0) else False
dist_metric = params.get('distance_metric', 'bray')
mds_cfg = 'distance="' + dist_metric + '",try=20,trymax=' + str(max_iter) + \
',autotransform=TRUE,noshare=0.1,expand=TRUE,trace=1,' + \
'plot=FALSE,engine=c("monoMDS","isoMDS"),k=' + str(n_components)
if run_metric:
mds_cfg += 'metric=True'
mds_scrpt = 'library(vegan)\n'
mds_scrpt += 'library(jsonlite)\n'
mds_scrpt += 'vg_data <- read.table("' + data_file_path + \
'",header=TRUE,row.names=1,sep="")\n'
# remove the last (taxonomy) column
# mds_scrpt += 'vg_data<-vg_data[,1:dim(vg_data)[2]-1]\n'
# Function metaMDS returns an object of class metaMDS.
mds_scrpt += 'vg_data.mds <- metaMDS(vg_data,' + mds_cfg + ')\n'
mds_scrpt += 'vg_data.mds\n'
# save the results in the memory
# 1) store species ordination
mds_scrpt += 'variableScores <- vg_data.mds$species\n'
# 2) store site ordination
mds_scrpt += 'sampleScores <- vg_data.mds$points\n'
# 3) store other ordination results
mds_scrpt += 'stress <- vg_data.mds$stress\n'
mds_scrpt += 'dist_metric <- vg_data.mds$distance\n'
mds_scrpt += 'dist_matrix <- vg_data.mds$diss\n'
mds_scrpt += 'dist_call <- vg_data.mds$distcall\n'
mds_scrpt += 'converged <- vg_data.mds$converged\n'
mds_scrpt += 'dims <- vg_data.mds$ndim\n'
mds_scrpt += 'tries <- vg_data.mds$tries\n'
mds_scrpt += 'maxits <- vg_data.mds$maxits\n'
mds_scrpt += 'func_call <- vg_data.mds$call\n'
mds_scrpt += 'mds_data <- vg_data.mds$data\n'
# save the results to the current dir
# Write CSV in R
mds_scrpt += 'write.csv(dist_matrix,file="dist_matrix.csv",row.names=TRUE,na="")\n'
mds_scrpt += 'write.csv(variableScores,file="species_ordination.csv",' + \
'row.names=TRUE,na="")\n'
mds_scrpt += 'write.csv(sampleScores,file="site_ordination.csv",row.names=TRUE,na="")\n'
# Write JSON in R
mds_scrpt += 'write_json(toJSON(dist_matrix),path="dist_matrix.json",pretty=TRUE,' + \
'auto_unbox=FALSE)\n'
mds_scrpt += 'write_json(toJSON(variableScores),path="species_ordination.json",' + \
'pretty=TRUE,auto_unbox=FALSE)\n'
mds_scrpt += 'write_json(toJSON(sampleScores),path="site_ordination.json",' + \
'pretty=TRUE,auto_unbox=FALSE)\n'
mds_scrpt += 'item_name=c("stress","distance_metric","dist_call","converged",' + \
'"dimesions","trials","maxits")\n'
mds_scrpt += 'item_value=c(stress,dist_metric,dist_call,converged,dims,tries,maxits)\n'
mds_scrpt += 'df <- data.frame(item_name,item_value,stringsAsFactors=FALSE)\n'
mds_scrpt += 'write_json(toJSON(df),path="others.json",pretty=TRUE,auto_unbox=FALSE)\n'
# save mds plots
mds_scrpt += 'bmp(file="saving_mds_plot.bmp",width=580,height=580,units="px",' + \
'res=100, pointsize=12)\n'
mds_scrpt += 'plot(vg_data.mds,type="n",display="sites")\n'
mds_scrpt += 'points(vg_data.mds)\n'
mds_scrpt += 'dev.off()\n'
mds_scrpt += 'pdf(file="saving_mds_plot.pdf",width=6,height=6)\n'
mds_scrpt += 'plot(vg_data.mds,type="n",display="sites")\n'
mds_scrpt += 'points(vg_data.mds)\n'
mds_scrpt += 'dev.off()\n'
mds_scrpt += 'pdf(file="mds_plot_withlabel.pdf",width=6,height=6)\n'
mds_scrpt += 'plot(vg_data.mds,type="n",display="sites")\n'
mds_scrpt += 'ordilabel(vg_data.mds,dis="sites",cex=1.2,font=3,fill="hotpink",col="blue")\n'
mds_scrpt += 'dev.off()\n'
mds_scrpt += 'pdf(file="mds_plot_withcolor.pdf",width=6,height=6)\n'
mds_scrpt += 'fig <- ordiplot(vg_data.mds,type="none")\n'
mds_scrpt += 'points(fig,"sites",pch=21,col="red",bg="yellow")\n'
mds_scrpt += 'points(fig,"species",pch=21,col="green",bg="blue")\n'
# mds_scrpt += 'text(fig, "species", col="blue", cex=0.9)\n'
mds_scrpt += 'dev.off()\n'
# If there is user input plotting script:
plt_scrpt = params.get('plot_script', '').lower()
if plt_scrpt and re.match("^plot\(\s*[a-zA-Z]+.*\)$", plt_scrpt):
arr_plt = plt_scrpt.split(',')
arr_plt[0] = 'plot(vg_data.mds' # make sure to pass the correct data
plt_scrpt = (',').join(arr_plt)
if len(arr_plt) == 1:
plt_scrpt += ')'
plt_type = params.get('plot_type', 'pdf').lower()
if not plt_type:
plt_type = 'pdf'
plt_name = params.get('plot_name', 'usr_plt_name').lower()
if not plt_name:
plt_name = 'usr_plt_name'
plt_name += '.' + plt_type
if plt_type == 'jpg':
plt_type = 'jpeg'
if plt_type == 'ps':
plt_type = 'postscript'
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '")\n'
if plt_type == 'tiff':
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '",width=4,height=4,units="in",' + \
'compression="lzw",res=300)\n'
if plt_type in ['jpg', 'jpeg', 'bmp', 'png']:
mds_scrpt += plt_type
mds_scrpt += '(file="' + plt_name + '",width=580,height=580,units="px",' + \
'res=100, pointsize=12)\n'
mds_scrpt += plt_scrpt + '\n'
mds_scrpt += 'dev.off()\n'
logging.info('R script: {}'.format(mds_scrpt))
mds_rscript = 'mds_script.R'
rscrpt_file_path = os.path.join(self.output_dir, mds_rscript)
with open(rscrpt_file_path, 'w') as r_file:
r_file.write(mds_scrpt)
return rscrpt_file_path
def _execute_r_script(self, rfile_name):
"""
_execute_r_script: Calling the Rscript executable to run the R script in rfile_name
"""
logging.info('Calling R......')
result_dir = os.path.dirname(rfile_name)
if not result_dir:
result_dir = self.working_dir
rcmd = [os.path.join(self.R_BIN, 'Rscript')]
rcmd.append(rfile_name)
logging.info('Running metaMDS script in current working directory: {}'.format(result_dir))
exitCode = 0
try:
complete_proc = subprocess.run(rcmd, cwd=result_dir, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
exitCode = complete_proc.returncode
if (exitCode == 0):
logging.info('\n{}'.format(complete_proc.stdout))
logging.info('\n{} was executed successfully, exit code was: {}'.format(
' '.join(rcmd), str(exitCode)))
logging.info("Finished calling R.")
else:
logging.info('Error running command: {} Exit Code: '.format(
' '.join(rcmd), str(exitCode)))
logging.info('\n{}'.format(complete_proc.stderr))
except subprocess.CalledProcessError as sub_e:
exitCode = -99
logging.info('Caught subprocess.CalledProcessError {}'.format(sub_e))
return exitCode
def _df_to_list(self, df):
"""
_df_to_list: convert Dataframe to FloatMatrix2D matrix data
"""
df.index = df.index.astype('str')
df.columns = df.columns.astype('str')
df.fillna(0, inplace=True)
matrix_data = {'row_ids': df.index.tolist(),
'col_ids': df.columns.tolist(),
'values': df.values.tolist()}
return matrix_data
def _mds_df_to_excel(self, mds_df, distance_df, result_dir, mds_matrix_ref):
"""
write MDS matrix df into excel
"""
logging.info('writting mds data frame to excel file')
mds_matrix_obj = self.dfu.get_objects({'object_refs': [mds_matrix_ref]})['data'][0]
mds_matrix_info = mds_matrix_obj['info']
mds_matrix_name = mds_matrix_info[1]
file_path = os.path.join(result_dir, mds_matrix_name + ".xlsx")
writer = pd.ExcelWriter(file_path)
mds_df.to_excel(writer, "mds_matrix", index=True)
if distance_df:
distance_df.to_excel(writer, "mds_distance_matrix", index=True)
writer.close()
def _Matrix2D_to_df(self, Matrix2D):
"""
_Matrix2D_to_df: transform a FloatMatrix2D to data frame
"""
index = Matrix2D.get('row_ids')
columns = Matrix2D.get('col_ids')
values = Matrix2D.get('values')
df = pd.DataFrame(values, index=index, columns=columns)
return df
def _mds_to_df(self, mds_matrix_ref):
"""
retrieve MDS matrix ws object to mds_df
"""
logging.info('converting mds matrix to data frame')
mds_data = self.dfu.get_objects({'object_refs': [mds_matrix_ref]})['data'][0]['data']
rotation_matrix_data = mds_data.get('rotation_matrix')
distance_matrix_data = mds_data.get('distance_matrix')
original_matrix_ref = mds_data.get('original_matrix_ref')
dimension = mds_data.get('mds_parameters').get('n_components')
mds_df = self._Matrix2D_to_df(rotation_matrix_data)
distance_df = None
if distance_matrix_data:
distance_df = self._Matrix2D_to_df(distance_matrix_data)
if original_matrix_ref:
logging.info('appending instance group information to mds data frame')
obj_data = self.dfu.get_objects(
{'object_refs': [original_matrix_ref]})['data'][0]['data']
attributemapping_ref = obj_data.get('{}_attributemapping_ref'.format(dimension))
am_data = self.dfu.get_objects(
{'object_refs': [attributemapping_ref]})['data'][0]['data']
attributes = am_data.get('attributes')
instances = am_data.get('instances')
am_df = pd.DataFrame(data=list(instances.values()),
columns=list(map(lambda x: x.get('attribute'), attributes)),
index=instances.keys())
mds_df = mds_df.merge(am_df, left_index=True, right_index=True, how='left',
validate='one_to_one')
return mds_df, distance_df
def _save_mds_matrix(self, workspace_name, input_obj_ref, mds_matrix_name,
distance_df, mds_params_df, site_ordin_df, species_ordin_df):
logging.info('Saving MDSMatrix...')
if not isinstance(workspace_name, int):
ws_name_id = self.dfu.ws_name_to_id(workspace_name)
else:
ws_name_id = workspace_name
mds_data = {}
mds_data.update({'distance_matrix': self._df_to_list(distance_df)})
mds_data.update({'site_ordination': self._df_to_list(site_ordin_df)})
mds_data.update({'species_ordination': self._df_to_list(species_ordin_df)})
mds_data.update({'mds_parameters': self._df_to_list(mds_params_df)})
mds_data.update({'original_matrix_ref': input_obj_ref})
mds_data.update({'rotation_matrix': self._df_to_list(distance_df)})
obj_type = 'KBaseExperiments.PCAMatrix'
info = self.dfu.save_objects({
"id": ws_name_id,
"objects": [{
"type": obj_type,
"data": mds_data,
"name": mds_matrix_name
}]
})[0]
return "%s/%s/%s" % (info[6], info[0], info[4])
def _zip_folder(self, folder_path, output_path):
"""
_zip_folder: Zip the contents of an entire folder (with that folder included in the
archive). Empty subfolders could be included in the archive as well if the 'Included
all subfolders, including empty ones' portion.
portion is used.
"""
with zipfile.ZipFile(output_path, 'w',
zipfile.ZIP_DEFLATED,
allowZip64=True) as ziph:
for root, folders, files in os.walk(folder_path):
# Include all subfolders, including empty ones.
for folder_name in folders:
absolute_fpath = os.path.join(root, folder_name)
relative_fpath = os.path.join(os.path.basename(root), folder_name)
logging.info("Adding folder {} to archive.".format(absolute_fpath))
ziph.write(absolute_fpath, relative_fpath)
for f in files:
absolute_path = os.path.join(root, f)
relative_path = os.path.join(os.path.basename(root), f)
logging.info("Adding file {} to archive.".format(absolute_path))
ziph.write(absolute_path, relative_path)
logging.info("{} created successfully.".format(output_path))
def _generate_output_file_list(self, out_dir):
"""
_generate_output_file_list: zip result files and generate file_links for report
"""
logging.info('Start packing result files from MDS...')
output_files = list()
output_dir = os.path.join(self.working_dir, str(uuid.uuid4()))
self._mkdir_p(output_dir)
mds_output = os.path.join(output_dir, 'metaMDS_output.zip')
self._zip_folder(out_dir, mds_output)
output_files.append({'path': mds_output,
'name': os.path.basename(mds_output),
'label': os.path.basename(mds_output),
'description': 'Output file(s) generated by metaMDS'})
return output_files
def _generate_mds_html_report(self, mds_outdir, n_components):
logging.info('Start generating html report for MDS results...')
html_report = list()
result_dir = os.path.join(self.working_dir, str(uuid.uuid4()))
self._mkdir_p(result_dir)
result_file_path = os.path.join(result_dir, 'mds_result.html')
mds_plots = list()
for root, folders, files in os.walk(mds_outdir):
# Find the image files by their extensions.
for f in files:
if re.match('^[a-zA-Z]+.*.(jpeg|jpg|bmp|png|tiff|pdf|ps)$', f):
absolute_path = os.path.join(root, f)
logging.info("Adding file {} to plot archive.".format(absolute_path))
mds_plots.append(absolute_path)
visualization_content = ''
for mds_plot in mds_plots:
shutil.copy2(mds_plot,
os.path.join(result_dir, os.path.basename(mds_plot)))
visualization_content += '<iframe height="900px" width="100%" '
visualization_content += 'src="{}" '.format(os.path.basename(mds_plot))
visualization_content += 'style="border:none;"></iframe>\n<p></p>\n'
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'templates', 'mds_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Visualization_Content</p>',
visualization_content)
report_template = report_template.replace('n_components',
'{} Components'.format(n_components))
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': result_dir,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for MDS Matrix App'
})
return html_report
def _generate_mds_report(self, mds_ref, output_dir, workspace_name, n_components):
logging.info('Creating MDS report...')
output_files = self._generate_output_file_list(output_dir)
output_html_files = self._generate_mds_html_report(output_dir, | |
to do
if type(steps) == str:
steps = [steps]
do_merge = "merge" in steps
do_post_merge = "post" in steps
self.merge_stats = {}
self.stats = {}
self.mapping = {}
# try to identify root document sources amongst the list to first
# process them (if any)
defined_root_sources = self.get_root_document_sources()
root_sources = list(
set(source_names).intersection(set(defined_root_sources)))
other_sources = list(set(source_names).difference(set(root_sources)))
# got root doc sources but not part of the merge ? that's weird...
if defined_root_sources and not root_sources:
self.logger.warning(
"Root document sources found (%s) but not part of the merge..."
% defined_root_sources)
source_names = sorted(source_names)
root_sources = sorted(root_sources)
other_sources = sorted(other_sources)
self.logger.info("Sources to be merged: %s" % source_names)
self.logger.info("Root sources: %s" % root_sources)
self.logger.info("Other sources: %s" % other_sources)
got_error = False
async def merge(src_names):
jobs = []
for i, src_name in enumerate(src_names):
await asyncio.sleep(0.0)
job = self.merge_source(src_name,
batch_size=batch_size,
ids=ids,
job_manager=job_manager)
job = asyncio.ensure_future(job)
def merged(f, name, stats):
try:
res = f.result()
stats.update(res)
except Exception as e:
self.logger.exception(
"Failed merging source '%s': %s" % (name, e))
nonlocal got_error
got_error = e
job.add_done_callback(
partial(merged, name=src_name, stats=self.merge_stats))
jobs.append(job)
await asyncio.wait([job])
# raise error as soon as we know something went wrong
if got_error:
raise got_error
tasks = asyncio.gather(*jobs)
await tasks
if do_merge:
if root_sources:
self.register_status("building",
transient=True,
init=True,
job={
"step": "merge-root",
"sources": root_sources
})
self.logger.info("Merging root document sources: %s" %
root_sources)
await merge(root_sources)
self.register_status("success",
job={
"step": "merge-root",
"sources": root_sources
})
if other_sources:
self.register_status("building",
transient=True,
init=True,
job={
"step": "merge-others",
"sources": other_sources
})
self.logger.info("Merging other resources: %s" % other_sources)
await merge(other_sources)
self.register_status("success",
job={
"step": "merge-others",
"sources": other_sources
})
self.register_status("building",
transient=True,
init=True,
job={"step": "finalizing"})
self.logger.info("Finalizing target backend")
self.target_backend.finalize()
self.register_status("success", job={"step": "finalizing"})
else:
self.logger.info("Skip data merging")
if do_post_merge:
self.logger.info("Running post-merge process")
self.register_status("building",
transient=True,
init=True,
job={"step": "post-merge"})
pinfo = self.get_pinfo()
pinfo["step"] = "post-merge"
job = await job_manager.defer_to_thread(
pinfo,
partial(self.post_merge, source_names, batch_size,
job_manager))
job = asyncio.ensure_future(job)
def postmerged(f):
try:
self.logger.info("Post-merge completed [%s]" % f.result())
self.register_status("success", job={"step": "post-merge"})
except Exception as e:
self.logger.exception("Failed post-merging source: %s" % e)
nonlocal got_error
got_error = e
job.add_done_callback(postmerged)
await job
if got_error:
raise got_error
else:
self.logger.info("Skip post-merge process")
await asyncio.sleep(0.0)
return self.merge_stats
def document_cleaner(self, src_name, *args, **kwargs):
"""
Return a function taking a document as argument, cleaning the doc
as needed, and returning that doc. If no function is needed, None.
Note: the returned function must be pickleable, careful with lambdas
and closures.
"""
return None
async def merge_source(self,
src_name,
batch_size=100000,
ids=None,
job_manager=None):
# it's actually not optional
assert job_manager
_query = self.generate_document_query(src_name)
# Note: no need to check if there's an existing document with _id (we want to merge only with an existing document)
# if the document doesn't exist then the update() call will silently fail.
# That being said... if no root documents, then there won't be any previously inserted
# documents, and this update() would just do nothing. So if no root docs, then upsert
# (update or insert, but do something)
defined_root_sources = self.get_root_document_sources()
upsert = not defined_root_sources or src_name in defined_root_sources
if not upsert:
self.logger.debug(
"Documents from source '%s' will be stored only if a previous document exists with same _id"
% src_name)
jobs = []
total = self.source_backend[src_name].count()
btotal = math.ceil(total / batch_size)
bnum = 1
cnt = 0
got_error = False
# grab ids only, so we can get more, let's say 10 times more
id_batch_size = batch_size * 10
if ids:
self.logger.info(
"Merging '%s' specific list of _ids, create merger job with batch_size=%d"
% (src_name, batch_size))
id_provider = [ids]
else:
self.logger.info(
"Fetch _ids from '%s' with batch_size=%d, and create merger job with batch_size=%d"
% (src_name, id_batch_size, batch_size))
id_provider = id_feeder(self.source_backend[src_name],
batch_size=id_batch_size)
if _query and ids is not None:
self.logger.info(
"Query/filter involved, but also specific list of _ids. Ignoring query and use _ids"
)
if _query and ids is None:
self.logger.info(
"Query/filter involved, can't use cache to fetch _ids")
# use doc_feeder but post-process doc to keep only the _id
id_provider = map(
lambda docs: [d["_id"] for d in docs],
doc_feeder(self.source_backend[src_name],
query=_query,
step=batch_size,
inbatch=True,
fields={"_id": 1}))
else:
# when passing a list of _ids, IDs will be sent to the query, so we need to reduce the batch size
id_provider = ids and iter_n(ids, int(
batch_size / 100)) or id_feeder(self.source_backend[src_name],
batch_size=id_batch_size,
logger=self.logger)
src_master = self.source_backend.master
meta = src_master.find_one({"_id": src_name}) or {}
merger = meta.get("merger", "upsert")
self.logger.info("Documents from source '%s' will be merged using %s" %
(src_name, merger))
doc_cleaner = self.document_cleaner(src_name)
for big_doc_ids in id_provider:
for doc_ids in iter_n(big_doc_ids, batch_size):
# try to put some async here to give control back
# (but everybody knows it's a blocking call: doc_feeder)
await asyncio.sleep(0.1)
cnt += len(doc_ids)
pinfo = self.get_pinfo()
pinfo["step"] = src_name
pinfo["description"] = "#%d/%d (%.1f%%)" % (bnum, btotal,
(cnt / total * 100))
self.logger.info("Creating merger job #%d/%d, to process '%s' %d/%d (%.1f%%)" %
(bnum, btotal, src_name, cnt, total, (cnt/total*100.)))
job = await job_manager.defer_to_process(
pinfo,
partial(merger_worker, self.source_backend[src_name].name,
self.target_backend.target_name, doc_ids,
self.get_mapper_for_source(src_name, init=False),
doc_cleaner, upsert, merger, bnum))
def batch_merged(f, batch_num):
nonlocal got_error
if type(f.result()) != int:
got_error = Exception(
"Batch #%s failed while merging source '%s' [%s]" %
(batch_num, src_name, f.result()))
job.add_done_callback(partial(batch_merged, batch_num=bnum))
jobs.append(job)
bnum += 1
# raise error as soon as we know
if got_error:
raise got_error
self.logger.info("%d jobs created for merging step" % len(jobs))
tasks = asyncio.gather(*jobs)
def done(f):
nonlocal got_error
if None in f.result():
got_error = Exception("Some batches failed")
return
# compute overall inserted/updated records (consume result() and check summable)
_ = sum(f.result())
tasks.add_done_callback(done)
await tasks
if got_error:
raise got_error
else:
return {"%s" % src_name: cnt}
def post_merge(self, source_names, batch_size, job_manager):
pass
class LinkDataBuilder(DataBuilder):
"""
LinkDataBuilder creates a link to the original datasource to be merged, without
actually copying the data (merged collection remains empty). This builder is
only valid when using only one datasource (thus no real merge) is declared in
the list of sources to be merged, and is useful to prevent data duplication between
the datasource itself and the resulting merged collection.
"""
def __init__(self, build_name, source_backend, target_backend, *args,
**kwargs):
super().__init__(build_name,
source_backend,
target_backend=partial(LinkTargetDocMongoBackend),
*args,
**kwargs)
conf = self.source_backend.get_build_configuration(self.build_name)
assert len(conf["sources"]) == 1, \
"Found more than one source to link, not allowed: %s" % conf["sources"]
assert hasattr(self.target_backend, "datasource_name")
self.target_backend.datasource_name = conf["sources"][0]
self.target_backend.source_db = self.source_backend
async def merge_source(self, src_name, *args, **kwargs):
total = self.source_backend[src_name].count()
return {"%s" % src_name: total}
def fix_batch_duplicates(docs, fail_if_struct_is_different=False):
"""
Remove duplicates from docs based on _id. If _id's the same but
structure is different (not real "duplicates", but different documents
with the same _ids), merge docs all together (dict.update)
or raise an error if fail_if_struct_is_different.
"""
dids = {}
# docs per _id
for d in docs:
dids.setdefault(d["_id"], []).append(d)
# now check doc structure for each duplicates
# if same structure, replace with one occurence of the docs
# if not the same, log all the docs as warning, and merge them all
# as we would do if we were upserting doc one-by-one (no batch)
# note: dict are unhashable (no set) so either compare one each other (n^2-ish)
# or use json strings (let's try json...)
for _id in dids:
jl = set([json.dumps(e, sort_keys=True) for e in dids[_id]])
if len(jl) > 1:
# different structure
if fail_if_struct_is_different:
raise ValueError(
"Found duplicated with different document structure: %s" %
dids[_id])
else:
logging.warning(
"Found duplicated with different document structure, merging them altogether: %s"
% dids[_id])
# merge docs on top of each other
dupdocs = dids[_id]
merged = {}
[merged.update(d) for d in dupdocs]
dids[_id] = merged
else:
assert len(jl) == 1
# normalize to scalar
dids[_id] = dids[_id][0]
return list(dids.values())
def merger_worker(col_name, dest_name, ids, mapper, cleaner, upsert, merger,
batch_num):
try:
src = mongo.get_src_db()
tgt = mongo.get_target_db()
col = src[col_name]
dest = DocMongoBackend(tgt, tgt[dest_name])
cur = doc_feeder(col,
step=len(ids),
inbatch=False,
query={'_id': {
'$in': ids
}})
if cleaner:
cur = map(cleaner, cur)
mapper.load()
docs = [d for d in mapper.process(cur)]
# while documents from cursor "cur" are unique, at this point, due to the use
# a mapper, documents can be | |
for item in items] for items in
[self._lc_comb_created,self._comp_comb_created,self._manual_created, self._info_created]]
self._lc_comb_created, self._comp_comb_created, self._manual_created, self._info_created= [], [], [], []
if self._line_to_struc[self._active_line][0].get_structure_type() == '':
self._info_created.append(tk.Label(self._main_fr, text='No structure type selected',
font=self._text_size["Text 10 bold"], bg = self._general_color))
self._info_created[0].place(relx=lc_x , y = lc_y + 3*lc_y_delta)
else:
# creating new label, checkbox and entry. creating new list of created items.
# finding loads applied to lines
counter = 0
if len(self._load_dict) != 0 and combination !='manual':
for load, data in self._load_dict.items():
if self._active_line in self._load_dict[load][1] and data[0].get_limit_state() == 'ULS':
name = (combination,self._active_line,str(load)) #tuple to identify combinations on line
self._lc_comb_created.append(tk.Label(self._main_fr, text = load,
font = self._text_size['Text 8 bold'],
bg = self._general_color))
self._lc_comb_created.append(tk.Entry(self._main_fr,
textvariable =self._new_load_comb_dict[name][0],
width=5, bg = self._entry_color,
fg = self._entry_text_color))
self._lc_comb_created.append(tk.Entry(self._main_fr,
textvariable=self._new_load_comb_dict[name][1],
width=5, bg = self._entry_color,
fg = self._entry_text_color))
self._lc_comb_created.append(tk.Checkbutton(self._main_fr,
variable =self._new_load_comb_dict[name][2]))
for load_no in range(int(len(self._lc_comb_created)/4)):
self._lc_comb_created[0+load_no*4].place(relx=lc_x, rely=lc_y+lc_y_delta*load_no)
self._lc_comb_created[1+load_no*4].place(relx=lc_x+5*lc_x_delta, rely=lc_y+lc_y_delta*load_no)
self._lc_comb_created[2+load_no*4].place(relx=lc_x+6*lc_x_delta, rely=lc_y+lc_y_delta*load_no)
self._lc_comb_created[3+load_no*4].place(relx=lc_x+7*lc_x_delta, rely=lc_y+lc_y_delta*load_no)
counter += 1
# finding tank loads applied to line (automatically created compartments.
lc_y += 0.023148148*counter
counter = 0
if len(self._tank_dict) != 0 and combination !='manual':
for compartment in self.get_compartments_for_line(self._active_line):
name = (combination,self._active_line,'comp' + str(compartment)) #tuple to identify combinations on line
self._comp_comb_created.append(tk.Label(self._main_fr, text='Compartment'+str(compartment),
font=self._text_size['Text 8 bold']))
self._comp_comb_created.append(tk.Entry(self._main_fr,
textvariable=self._new_load_comb_dict[name][0],
width=5, bg = self._entry_color,
fg = self._entry_text_color))
self._comp_comb_created.append(tk.Entry(self._main_fr,
textvariable=self._new_load_comb_dict[name][1],
width=5, bg = self._entry_color,
fg = self._entry_text_color))
self._comp_comb_created.append(tk.Checkbutton(self._main_fr,
variable = self._new_load_comb_dict[name][2]))
for comp_no in range(int(len(self._comp_comb_created)/4)):
self._comp_comb_created[0+comp_no*4].place(relx=lc_x, rely=lc_y+lc_y_delta*comp_no)
self._comp_comb_created[1+comp_no*4].place(relx=lc_x+5*lc_x_delta, rely=lc_y+lc_y_delta*comp_no)
self._comp_comb_created[2+comp_no*4].place(relx=lc_x+6*lc_x_delta, rely=lc_y+lc_y_delta*comp_no)
self._comp_comb_created[3+comp_no*4].place(relx=lc_x+7*lc_x_delta, rely=lc_y+lc_y_delta*comp_no)
counter += 1
lc_y += 0.027777778*counter
# finding manual loads applied to the line
name = ('manual', self._active_line, 'manual') # tuple to identify combinations on line
if name in self._new_load_comb_dict.keys():
self._manual_created.append(tk.Label(self._main_fr, text='Manual (pressure/LF)',
font=self._text_size['Text 8 bold'],
bg = self._general_color))
self._manual_created.append(
tk.Entry(self._main_fr, textvariable=self._new_load_comb_dict[name][0], width=15,
bg = self._entry_color, fg = self._entry_text_color))
self._manual_created.append(
tk.Entry(self._main_fr, textvariable=self._new_load_comb_dict[name][1], width=6,
bg = self._entry_color, fg = self._entry_text_color))
self._manual_created.append(tk.Checkbutton(self._main_fr, variable=self._new_load_comb_dict[name][2]))
self._manual_created[0].place(relx=lc_x, rely=lc_y)
self._manual_created[1].place(relx=lc_x + 4 * lc_x_delta, rely=lc_y)
self._manual_created[2].place(relx=lc_x + 6 * lc_x_delta, rely=lc_y)
self._manual_created[3].place(relx=lc_x + 7 * lc_x_delta, rely=lc_y)
#printing the results
#try:
# TODO the reason manual does not show is because it others do noe exist in line_comb_dict. FIX.
results = self.calculate_all_load_combinations_for_line(self._active_line)
self._result_label_dnva.config(text = 'DNV a [Pa]: ' + str(results['dnva']),
font = self._text_size['Text 8'])
self._result_label_dnvb.config(text = 'DNV b [Pa]: ' + str(results['dnvb']),
font = self._text_size['Text 8'])
self._result_label_tanktest.config(text = 'TT [Pa]: ' + str(results['tanktest']),
font = self._text_size['Text 8'])
self._result_label_manual.config(text = 'Manual [Pa]: ' + str(results['manual']))
lc_y = self.results_gui_start+0.018518519
self._result_label_dnva.place(relx = lc_x+0*lc_x_delta, rely = lc_y+lc_y_delta*1.5)
self._result_label_dnvb.place(relx=lc_x+4*lc_x_delta, rely=lc_y+lc_y_delta*1.5)
self._result_label_tanktest.place(relx=lc_x+0*lc_x_delta, rely=lc_y+2.4*lc_y_delta)
self._result_label_manual.place(relx=lc_x+4*lc_x_delta, rely=lc_y+2.4*lc_y_delta)
# except KeyError:
# pass
def slider_used(self, event):
'''
Action when slider is activated.
:return:
'''
self._canvas_scale = self._slider.get()
self.update_frame()
def grid_operations(self, line, coordinates):
'''
Creating a grid in the canvas used for various caluclations
:return:
'''
try:
if self._line_to_struc[line][0].get_structure_type() not in ('GENERAL_INTERNAL_NONWT','FRAME'):
self._pending_grid_draw[line] = coordinates
except KeyError:
pass
def grid_find_tanks(self, animate = False):
'''
Printing the grid in a separate window
:return:
'''
if self._line_to_struc == {}:
tk.messagebox.showerror('Search error','No geometry with properties exist.')
return
#setting the button to red
try:
img_file_name = 'img_int_pressure_button_search.gif'
if os.path.isfile('images/' + img_file_name):
file_path = 'images/' + img_file_name
else:
file_path = self._root_dir + '/images/' + img_file_name
photo = tk.PhotoImage(file=file_path)
self._int_button.config(image = photo)
self._int_button.image = photo
except TclError:
pass
animate = tk.messagebox.askquestion('Search for compartments','Searching for compartments will use a large matrix to '
'identify watertight members and consequently the '
'enclosed compartments. \n'
'You may animate the search for vizualization and '
'increased understating purposes.\n '
'However, this will take some more time than just '
'showing the final result.\n'
'\n'
'Yes - Show search animation\n'
'No - Draw final result only\n'
'\n'
'Choose yes or no.' )
animate = True if animate == 'yes' else False
self._main_grid.clear()
self._tank_dict = {}
self._pending_grid_draw={}
self._compartments_listbox.delete(0,'end')
for line, points in self._line_dict.items():
# making the lines made by used in the grid
p1 = self._point_dict['point'+str(points[0])]
p2 = self._point_dict['point'+str(points[1])]
self.grid_operations(line, [self.get_grid_coord_from_points_coords(p1),
self.get_grid_coord_from_points_coords(p2)])
self._grid_calc = grid_window.CreateGridWindow(self._main_grid,self._canvas_dim,
self._pending_grid_draw,self._canvas_base_origo)
compartment_search_return = self._grid_calc.search_bfs(animate=animate)
for comp_no, properties in compartment_search_return['compartments'].items():
# finding actual max min elevation from grid
min_el = (float('inf'), float('inf'))
max_el = (-float('inf'),-float('inf'))
if comp_no > 1:
self._compartments_listbox.insert('end', comp_no)
for corner in properties[1]:
corner_real = self.get_point_coords_from_grid_coords(corner)
if self.get_point_coords_from_grid_coords(corner)[1] < min_el[1]:
min_el = self.get_closest_point(corner_real)[1]
if self.get_point_coords_from_grid_coords(corner)[1] > max_el[1]:
max_el = self.get_closest_point(corner_real)[1]
self.new_tank(int(comp_no),properties[0], min_el, max_el)
comp_name = 'comp'+str(int(comp_no))
for combination in self._load_factors_dict.keys():
#creating the load factor combinations for tanks.
for line in self._line_dict.keys():
if comp_no in self.get_compartments_for_line(line):
name = (combination, line, comp_name)
self._new_load_comb_dict[name] = [tk.DoubleVar(), tk.DoubleVar(), tk.IntVar()]
self._new_load_comb_dict[name][0].set(self._load_factors_dict[combination][1])
self._new_load_comb_dict[name][1].set(self._load_factors_dict[combination][2])
self._new_load_comb_dict[name][2].set(1)
try:
img_file_name = 'img_int_pressure_button.gif'
if os.path.isfile('images/' + img_file_name):
file_path = 'images/' + img_file_name
else:
file_path = self._root_dir + '/images/' + img_file_name
photo = tk.PhotoImage(file=file_path)
self._int_button.config(image = photo)
self._int_button.image = photo
except TclError:
pass
if animate == False:
tank_count = None if len(self._tank_dict)==0 else len(self._tank_dict)
if tank_count is not None:
self._grid_calc.draw_grid(tank_count=tank_count)
else:
tank_count = None if len(self._tank_dict) == 0 else len(self._tank_dict)
if tank_count is not None:
self._grid_calc.animate_grid(grids_to_animate=compartment_search_return['grids'],
tank_count = None if len(self._tank_dict)==0 else len(self._tank_dict))
self.get_cob() # Calculating COB
self.update_frame()
def grid_display_tanks(self, save = False):
'''
Opening matplotlib grid illustation
:return:
'''
try:
if self._grid_calc != None:
self._grid_calc.draw_grid(save = save,
tank_count=None if len(self._tank_dict)==0 else len(self._tank_dict) )
except RecursionError:
pass
def add_to_combinations_dict(self,line):
'''
When creating new line and tanks exist, the combinations dict must be updated.
:param line:
:return:
'''
if len(self._tank_dict) != 0:
for compartment in self.get_compartments_for_line(line):
for combination in self._load_factors_dict.keys():
name = (combination, line, 'comp'+str(compartment))
self._new_load_comb_dict[name] = [tk.DoubleVar(), tk.DoubleVar(), tk.IntVar()]
self._new_load_comb_dict[name][0].set(self._load_factors_dict[combination][1])
self._new_load_comb_dict[name][1].set(self._load_factors_dict[combination][2])
self._new_load_comb_dict[name][2].set(1)
else:
pass
name = ('manual', line, 'manual')
self._new_load_comb_dict[name] = [tk.DoubleVar(), tk.DoubleVar(), tk.IntVar()]
self._new_load_comb_dict[name][0].set(0)
self._new_load_comb_dict[name][1].set(0)
self._new_load_comb_dict[name][2].set(0)
def trace_shift_change(self, *args):
try:
self.update_frame()
except (TclError, ZeroDivisionError):
pass
def trace_acceptance_change(self, *args):
try:
self.update_frame()
for key, val in self._line_to_struc.items():
val[1].need_recalc = True
except (TclError, ZeroDivisionError):
pass
def update_frame(self, event = None, *args):
state = self.get_color_and_calc_state()
self.draw_results(state=state)
self.draw_canvas(state=state)
self.draw_prop()
self.trace_puls_up_or_sp()
return state
def get_color_and_calc_state(self, current_line = None, active_line_only = False):
''' Return calculations and colors for line and results. '''
return_dict = {'colors': {}, 'section_modulus': {}, 'thickness': {}, 'shear_area': {}, 'buckling': {},
'fatigue': {}, 'pressure_uls': {}, 'pressure_fls': {},
'struc_obj': {}, 'scant_calc_obj': {}, 'fatigue_obj': {}, 'utilization': {}, 'slamming': {},
'color code': {}, 'PULS colors': {}, 'ML buckling colors' : {}, 'ML buckling class' : {},
'weights': {}}
return_dict['slamming'][current_line] = {}
if current_line is None and active_line_only:
line_iterator = [self._active_line, ]
elif current_line is None and not active_line_only and len(self._line_dict) != 0:
line_iterator = self._line_dict.keys()
elif current_line is not None:
line_iterator = [current_line, ]
elif current_line not in self._line_to_struc.keys() and active_line_only:
return return_dict
else:
return return_dict
rec_for_color = {}
for current_line in line_iterator:
rec_for_color[current_line] = {}
slamming_pressure = 0
if current_line in self._line_to_struc.keys():
obj_structure = self._line_to_struc[current_line][0]
obj_scnt_calc = self._line_to_struc[current_line][1]
if obj_scnt_calc.need_recalc is False:
return self._state_logger[current_line]
try:
norm_and_slam = self.get_highest_pressure(current_line)
design_pressure = norm_and_slam['normal'] / 1000
if norm_and_slam['slamming'] is None:
pass
else:
slamming_dict = self.get_highest_pressure(current_line)
slamming_pressure = slamming_dict['slamming']
slamming_red_fac_pl = slamming_dict['slamming plate reduction factor']
slamming_red_fac_stf = slamming_dict['slamming stf reduction factor']
except KeyError:
design_pressure = 0
sec_mod = [obj_scnt_calc.get_section_modulus()[0],
obj_scnt_calc.get_section_modulus()[1]]
shear_area = obj_scnt_calc.get_shear_area()
min_shear = obj_scnt_calc.get_minimum_shear_area(design_pressure)
min_sec_mod = obj_scnt_calc.get_dnv_min_section_modulus(design_pressure)
min_thk = obj_scnt_calc.get_dnv_min_thickness(design_pressure)
buckling = [round(res, 2) for res in obj_scnt_calc.calculate_buckling_all(
design_lat_press=design_pressure,
checked_side=obj_scnt_calc.get_side())]
rec_for_color[current_line]['section modulus'] = min_sec_mod/min(sec_mod)
rec_for_color[current_line]['plate thickness'] = (min_thk/1000)/obj_scnt_calc.get_pl_thk()
rec_for_color[current_line]['rp buckling'] = max(buckling)
rec_for_color[current_line]['shear'] = min_shear/shear_area
return_dict['slamming'][current_line] = dict()
if slamming_pressure is not None and slamming_pressure > 0:
return_dict['slamming'][current_line]['state'] = True
else:
return_dict['slamming'][current_line]['state'] = False
try:
fatigue_obj = self._line_to_struc[current_line][2]
p_int = self.get_fatigue_pressures(current_line, fatigue_obj.get_accelerations())['p_int']
p_ext = self.get_fatigue_pressures(current_line, fatigue_obj.get_accelerations())['p_ext']
damage = fatigue_obj.get_total_damage(int_press=(p_int['loaded'], p_int['ballast'],
p_int['part']), ext_press=(p_ext['loaded'],
p_ext['ballast'],
p_ext['part']))
dff = fatigue_obj.get_dff()
color_fatigue = 'green' if damage * dff <= 1 else 'red'
except AttributeError:
fatigue_obj, p_int, p_ext, damage, dff = [None for dummy in range(5)]
color_fatigue = 'green'
color_sec = 'green' if obj_scnt_calc.is_acceptable_sec_mod(sec_mod, design_pressure) else 'red'
color_shear = 'green' if obj_scnt_calc.is_acceptable_shear_area(shear_area, design_pressure) else 'red'
color_thk = 'green' if obj_scnt_calc.is_acceptable_pl_thk(design_pressure) else 'red'
color_buckling = 'green' if all([uf <= 1 for uf in buckling]) \
else 'red'
if slamming_pressure is not None and slamming_pressure > 0:
slamming_res = obj_scnt_calc.calculate_slamming_stiffener(slamming_pressure,
red_fac=slamming_red_fac_pl)
min_pl_slamming = obj_scnt_calc.calculate_slamming_plate(slamming_pressure,
red_fac=slamming_red_fac_stf)
if slamming_res['Zp_req'] is not None:
zpl = obj_scnt_calc.get_net_effective_plastic_section_modulus()
zpl_req = slamming_res['Zp_req']
color_sec = 'green' if zpl >= zpl_req else 'red'
else:
zpl = obj_scnt_calc.get_net_effective_plastic_section_modulus()
zpl_req = None
color_sec = 'red'
color_shear = 'green' if round(obj_scnt_calc.get_web_thk()* 1000,1) | |
import numpy as np
from random import shuffle
import multiprocessing as mp
import itertools
from astrodash.helpers import temp_list, div0
from astrodash.sn_processing import PreProcessing
from astrodash.combine_sn_and_host import training_template_data
from astrodash.preprocessing import ProcessingTools
from astrodash.array_tools import zero_non_overlap_part, normalise_spectrum
try:
from imblearn import over_sampling
IMBLEARN_EXISTS = True
except ImportError:
IMBLEARN_EXISTS = False
class AgeBinning(object):
def __init__(self, minAge, maxAge, ageBinSize):
self.minAge = minAge
self.maxAge = maxAge
self.ageBinSize = ageBinSize
def age_bin(self, age):
ageBin = int(round(age / self.ageBinSize)) - int(round(self.minAge / self.ageBinSize))
return ageBin
def age_labels(self):
ageLabels = []
ageBinPrev = 0
ageLabelMin = self.minAge
for age in np.arange(self.minAge, self.maxAge, 0.5):
ageBin = self.age_bin(age)
if ageBin != ageBinPrev:
ageLabelMax = int(round(age))
ageLabels.append(str(int(ageLabelMin)) + " to " + str(ageLabelMax))
ageLabelMin = ageLabelMax
ageBinPrev = ageBin
ageLabels.append(str(int(ageLabelMin)) + " to " + str(int(self.maxAge)))
return ageLabels
class CreateLabels(object):
def __init__(self, nTypes, minAge, maxAge, ageBinSize, typeList, hostList, nHostTypes):
self.nTypes = nTypes
self.minAge = minAge
self.maxAge = maxAge
self.ageBinSize = ageBinSize
self.typeList = typeList
self.ageBinning = AgeBinning(self.minAge, self.maxAge, self.ageBinSize)
self.numOfAgeBins = self.ageBinning.age_bin(self.maxAge - 0.1) + 1
self.nLabels = self.nTypes * self.numOfAgeBins
self.ageLabels = self.ageBinning.age_labels()
self.hostList = hostList
self.nHostTypes = nHostTypes
def label_array(self, ttype, age, host=None):
ageBin = self.ageBinning.age_bin(age)
try:
typeIndex = self.typeList.index(ttype)
except ValueError as err:
raise Exception("INVALID TYPE: {0}".format(err))
if host is None:
labelArray = np.zeros((self.nTypes, self.numOfAgeBins))
labelArray[typeIndex][ageBin] = 1
labelArray = labelArray.flatten()
typeName = ttype + ": " + self.ageLabels[ageBin]
else:
hostIndex = self.hostList.index(host)
labelArray = np.zeros((self.nHostTypes, self.nTypes, self.numOfAgeBins))
labelArray[hostIndex][typeIndex][ageBin] = 1
labelArray = labelArray.flatten()
typeName = "{}: {}: {}".format(host, ttype, self.ageLabels[ageBin])
labelIndex = np.argmax(labelArray)
return labelIndex, typeName
def type_names_list(self):
typeNamesList = []
if self.hostList is None:
for tType in self.typeList:
for ageLabel in self.ageBinning.age_labels():
typeNamesList.append("{}: {}".format(tType, ageLabel))
else:
for host in self.hostList:
for tType in self.typeList:
for ageLabel in self.ageBinning.age_labels():
typeNamesList.append("{}: {}: {}".format(host, tType, ageLabel))
return np.array(typeNamesList)
class ReadSpectra(object):
def __init__(self, w0, w1, nw, snFilename, galFilename=None):
self.w0 = w0
self.w1 = w1
self.nw = nw
self.snFilename = snFilename
self.galFilename = galFilename
if galFilename is None:
self.data = PreProcessing(snFilename, w0, w1, nw)
def sn_plus_gal_template(self, snAgeIdx, snCoeff, galCoeff, z):
wave, flux, minIndex, maxIndex, nCols, ages, tType = training_template_data(snAgeIdx, snCoeff, galCoeff, z,
self.snFilename, self.galFilename,
self.w0, self.w1, self.nw)
return wave, flux, nCols, ages, tType, minIndex, maxIndex
def input_spectrum(self, z, smooth, minWave, maxWave):
wave, flux, minIndex, maxIndex, z = self.data.two_column_data(z, smooth, minWave, maxWave)
return wave, flux, int(minIndex), int(maxIndex), z
class ArrayTools(object):
def __init__(self, nLabels, nw):
self.nLabels = nLabels
self.nw = nw
def shuffle_arrays(self, memmapName='', **kwargs):
""" Must take images and labels as arguments with the keyword specified.
Can optionally take filenames and typeNames as arguments """
arraySize = len(kwargs['labels'])
if arraySize == 0:
return kwargs
kwargShuf = {}
self.randnum = np.random.randint(10000)
for key in kwargs:
if key == 'images':
arrayShuf = np.memmap('shuffled_{}_{}_{}.dat'.format(key, memmapName, self.randnum), dtype=np.float16,
mode='w+', shape=(arraySize, int(self.nw)))
elif key == 'labels':
arrayShuf = np.memmap('shuffled_{}_{}_{}.dat'.format(key, memmapName, self.randnum), dtype=np.uint16,
mode='w+', shape=arraySize)
else:
arrayShuf = np.memmap('shuffled_{}_{}_{}.dat'.format(key, memmapName, self.randnum), dtype=object,
mode='w+', shape=arraySize)
kwargShuf[key] = arrayShuf
print("Shuffling...")
# Randomise order
p = np.random.permutation(len(kwargs['labels']))
for key in kwargs:
assert len(kwargs[key]) == arraySize
print(key, "shuffling...")
print(len(p))
kwargShuf[key] = kwargs[key][p]
return kwargShuf
def count_labels(self, labels):
counts = np.zeros(self.nLabels)
for i in range(len(labels)):
counts[labels[i]] += 1
return counts
def augment_data(self, flux, stdDevMean=0.05, stdDevStdDev=0.05):
minIndex, maxIndex = ProcessingTools().min_max_index(flux, outerVal=0.5)
noise = np.zeros(self.nw)
stdDev = abs(np.random.normal(stdDevMean, stdDevStdDev)) # randomised standard deviation
noise[minIndex:maxIndex] = np.random.normal(0, stdDev, maxIndex - minIndex)
# # Add white noise to regions outside minIndex to maxIndex
# noise[0:minIndex] = np.random.uniform(0.0, 1.0, minIndex)
# noise[maxIndex:] = np.random.uniform(0.0, 1.0, self.nw-maxIndex)
augmentedFlux = flux + noise
augmentedFlux = normalise_spectrum(augmentedFlux)
augmentedFlux = zero_non_overlap_part(augmentedFlux, minIndex, maxIndex, outerVal=0.5)
return augmentedFlux
class OverSampling(ArrayTools):
def __init__(self, nLabels, nw, **kwargs):
""" Must take images and labels as arguments with the keyword specified.
Can optionally take filenames and typeNames as arguments """
ArrayTools.__init__(self, nLabels, nw)
self.kwargs = kwargs
counts = self.count_labels(self.kwargs['labels'])
print("Before OverSample") #
print(counts) #
self.overSampleAmount = np.rint(div0(1 * max(counts), counts)) # ignore zeros in counts
self.overSampleArraySize = int(sum(np.array(self.overSampleAmount, int) * counts))
print(np.array(self.overSampleAmount, int) * counts)
print(np.array(self.overSampleAmount, int))
print(self.overSampleArraySize, len(self.kwargs['labels']))
self.kwargOverSampled = {}
self.randnum = np.random.randint(10000)
for key in self.kwargs:
if key == 'images':
arrayOverSampled = np.memmap('oversampled_{}_{}.dat'.format(key, self.randnum), dtype=np.float16,
mode='w+',
shape=(self.overSampleArraySize, int(self.nw)))
elif key == 'labels':
arrayOverSampled = np.memmap('oversampled_{}_{}.dat'.format(key, self.randnum), dtype=np.uint16,
mode='w+',
shape=self.overSampleArraySize)
else:
arrayOverSampled = np.memmap('oversampled_{}_{}.dat'.format(key, self.randnum), dtype=object, mode='w+',
shape=self.overSampleArraySize)
self.kwargOverSampled[key] = arrayOverSampled
self.kwargShuf = self.shuffle_arrays(memmapName='pre-oversample_{}'.format(self.randnum), **self.kwargs)
print(len(self.kwargShuf['labels']))
def oversample_mp(self, i_in, offset_in, std_in, labelIndex_in):
print('oversampling', i_in, len(self.kwargShuf['labels']))
oversampled = {key: [] for key in self.kwargs}
repeatAmount = int(self.overSampleAmount[labelIndex_in])
for r in range(repeatAmount):
for key in self.kwargs:
if key == 'images':
oversampled[key].append(
self.augment_data(self.kwargShuf[key][i_in], stdDevMean=0.05, stdDevStdDev=std_in))
else:
oversampled[key].append(self.kwargShuf[key][i_in])
return oversampled, offset_in, repeatAmount
def collect_results(self, result):
"""Uses apply_async's callback to setup up a separate Queue for each process"""
oversampled_in, offset_in, repeatAmount = result
for key in self.kwargs:
rlength_array = np.array(oversampled_in[key])
self.kwargOverSampled[key][offset_in:repeatAmount + offset_in] = rlength_array[:]
def over_sample_arrays(self, smote=False):
if smote:
return self.smote_oversample()
else:
return self.minority_oversample_with_noise()
def minority_oversample_with_noise(self):
offset = 0
# pool = mp.Pool()
for i in range(len(self.kwargShuf['labels'])):
labelIndex = self.kwargShuf['labels'][i]
if self.overSampleAmount[labelIndex] < 10:
std = 0.03
else:
std = 0.05
# pool.apply_async(self.oversample_mp, args=(i, offset, std, labelIndex), callback=self.collect_results)
self.collect_results(self.oversample_mp(i, offset, std, labelIndex))
offset += int(self.overSampleAmount[labelIndex])
# pool.close()
# pool.join()
# for i, output in enumerate(outputs):
# self.collect_results(output)
# print('combining results...', i, len(outputs))
print("Before Shuffling")
self.kwargOverSampledShuf = self.shuffle_arrays(memmapName='oversampled_{}'.format(self.randnum),
**self.kwargOverSampled)
print("After Shuffling")
return self.kwargOverSampledShuf
def smote_oversample(self):
sm = over_sampling.SMOTE(random_state=42, n_jobs=30)
images, labels = sm.fit_sample(X=self.kwargShuf['images'], y=self.kwargShuf['labels'])
self.kwargOverSampledShuf = self.shuffle_arrays(memmapName='oversampled_smote_{}'.format(self.randnum),
images=images, labels=labels)
return self.kwargOverSampledShuf
class CreateArrays(object):
def __init__(self, w0, w1, nw, nTypes, minAge, maxAge, ageBinSize, typeList, minZ, maxZ, numOfRedshifts,
hostTypes=None, nHostTypes=None):
self.w0 = w0
self.w1 = w1
self.nw = nw
self.nTypes = nTypes
self.minAge = minAge
self.maxAge = maxAge
self.ageBinSize = ageBinSize
self.typeList = typeList
self.minZ = minZ
self.maxZ = maxZ
self.numOfRedshifts = numOfRedshifts
self.ageBinning = AgeBinning(minAge, maxAge, ageBinSize)
self.numOfAgeBins = self.ageBinning.age_bin(maxAge - 0.1) + 1
self.nLabels = nTypes * self.numOfAgeBins * nHostTypes
self.createLabels = CreateLabels(self.nTypes, self.minAge, self.maxAge, self.ageBinSize, self.typeList,
hostTypes, nHostTypes)
self.hostTypes = hostTypes
def combined_sn_gal_templates_to_arrays(self, args):
snTemplateLocation, snTempList, galTemplateLocation, galTempList, snFractions, ageIndexes = args
images = np.empty((0, int(self.nw)), np.float16) # Number of pixels
labelsIndexes = []
filenames = []
typeNames = []
for j, gal in enumerate(galTempList):
galFilename = galTemplateLocation + gal if galTemplateLocation is not None else None
for i, sn in enumerate(snTempList):
nCols = 15
readSpectra = ReadSpectra(self.w0, self.w1, self.nw, snTemplateLocation + sn, galFilename)
for ageidx in ageIndexes[sn]:
if ageidx >= nCols:
break
for snCoeff in snFractions:
galCoeff = 1 - snCoeff
if self.numOfRedshifts == 1:
redshifts = [self.minZ]
else:
redshifts = np.random.uniform(low=self.minZ, high=self.maxZ, size=self.numOfRedshifts)
for z in redshifts:
tempWave, tempFlux, nCols, ages, tType, tMinIndex, tMaxIndex = readSpectra.sn_plus_gal_template(
ageidx, snCoeff, galCoeff, z)
if tMinIndex == tMaxIndex or not tempFlux.any():
print("NO DATA for {} {} ageIdx:{} z>={}".format(galTempList[j], snTempList[i], ageidx,
z))
break
if self.minAge < float(ages[ageidx]) < self.maxAge:
if self.hostTypes is None: # Checks if we are classifying by host as well
labelIndex, typeName = self.createLabels.label_array(tType, ages[ageidx], host=None)
else:
labelIndex, typeName = self.createLabels.label_array(tType, ages[ageidx],
host=galTempList[j])
if tMinIndex > (self.nw - 1):
continue
nonzeroflux = tempFlux[tMinIndex:tMaxIndex + 1]
newflux = (nonzeroflux - min(nonzeroflux)) / (max(nonzeroflux) - min(nonzeroflux))
newflux2 = np.concatenate((tempFlux[0:tMinIndex], newflux, tempFlux[tMaxIndex + 1:]))
images = np.append(images, np.array([newflux2]), axis=0)
labelsIndexes.append(
labelIndex) # labels = np.append(labels, np.array([label]), axis=0)
filenames.append(
"{0}_{1}_{2}_{3}_snCoeff{4}_z{5}".format(snTempList[i], tType, str(ages[ageidx]),
galTempList[j], snCoeff, (z)))
typeNames.append(typeName)
print(snTempList[i], nCols, galTempList[j])
return images, np.array(labelsIndexes).astype(int), np.array(filenames), np.array(typeNames)
def collect_results(self, result):
"""Uses apply_async's callback to setup up a separate Queue for each process"""
imagesPart, labelsPart, filenamesPart, typeNamesPart = result
self.images.extend(imagesPart)
self.labelsIndexes.extend(labelsPart)
self.filenames.extend(filenamesPart)
self.typeNames.extend(typeNamesPart)
def combined_sn_gal_arrays_multiprocessing(self, snTemplateLocation, snTempFileList, galTemplateLocation,
galTempFileList):
# TODO: Maybe do memory mapping for these arrays
self.images = []
self.labelsIndexes = []
self.filenames = []
self.typeNames = []
if galTemplateLocation is None or galTempFileList is None:
galTempList = [None]
galTemplateLocation = None
snFractions = [1.0]
else:
galTempList = temp_list(galTempFileList)
snFractions = [0.99, 0.98, 0.95, 0.93, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
if isinstance(snTempFileList, dict):
snTempList = list(snTempFileList.keys())
ageIndexesDict = snTempFileList
else:
snTempList = temp_list(snTempFileList)
ageIndexesDict = None
galAndSnTemps = list(itertools.product(galTempList, snTempList))
argsList = []
for gal, sn in galAndSnTemps:
if ageIndexesDict is not None:
ageIdxDict = {k: ageIndexesDict[k] for k in (sn,)}
else:
ageIdxDict = {k: range(0, 1000) | |
'svn:needs-lock' set
## wc->wc copied file
svntest.main.run_svn(None, 'copy', mu_path, mu2_path)
is_writable(mu2_path)
sbox.simple_commit('A/mu2')
is_readonly(mu2_path)
## URL->wc copied file
svntest.main.run_svn(None, 'copy', mu_URL, mu3_path)
is_writable(mu3_path)
sbox.simple_commit('A/mu3')
is_readonly(mu3_path)
#----------------------------------------------------------------------
# Issue #3525: Locked file which is scheduled for delete causes tree
# conflict
@Issue(3525)
def update_locked_deleted(sbox):
"updating locked scheduled-for-delete file"
sbox.build()
wc_dir = sbox.wc_dir
iota_path = sbox.ospath('iota')
mu_path = sbox.ospath('A/mu')
alpha_path = sbox.ospath('A/B/E/alpha')
svntest.main.run_svn(None, 'lock', '-m', 'locked', mu_path, iota_path,
alpha_path)
sbox.simple_rm('iota')
sbox.simple_rm('A/mu')
sbox.simple_rm('A/B/E')
# Create expected output tree for an update.
expected_output = svntest.wc.State(wc_dir, {
})
# Create expected status tree for the update.
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B/E', status='D ')
expected_status.tweak('iota', 'A/mu', 'A/B/E/alpha',
status='D ', writelocked='K')
expected_status.tweak('A/B/E/beta', status='D ')
svntest.actions.run_and_verify_update(wc_dir, expected_output,
None, expected_status)
# Now we steal the lock of iota and A/mu via URL and retry
svntest.main.run_svn(None, 'lock', '-m', 'locked', sbox.repo_url + '/iota',
'--force', sbox.repo_url + '/A/mu',
sbox.repo_url + '/A/B/E/alpha')
expected_status.tweak('iota', 'A/mu', 'A/B/E/alpha',
status='D ', writelocked='O')
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(status='B '),
'A/B/E/alpha' : Item(status='B '),
'iota' : Item(status='B '),
})
svntest.actions.run_and_verify_update(wc_dir, expected_output,
None, expected_status)
#----------------------------------------------------------------------
def block_unlock_if_pre_unlock_hook_fails(sbox):
"block unlock operation if pre-unlock hook fails"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
svntest.actions.create_failing_hook(repo_dir, "pre-unlock", "error text")
# lock a file.
pi_path = sbox.ospath('A/D/G/pi')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
'-m', '', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Make sure the unlock operation fails as pre-unlock hook blocks it.
expected_unlock_fail_err_re = ".*error text|.*500 Internal Server Error"
svntest.actions.run_and_verify_svn2(None, None, expected_unlock_fail_err_re,
1, 'unlock', pi_path)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def lock_invalid_token(sbox):
"verify pre-lock hook returning invalid token"
sbox.build()
hook_path = os.path.join(sbox.repo_dir, 'hooks', 'pre-lock')
svntest.main.create_python_hook_script(hook_path,
'# encoding=utf-8\n'
'import sys\n'
'sys.stdout.write("тест")\n'
'sys.exit(0)\n')
fname = 'iota'
file_path = os.path.join(sbox.wc_dir, fname)
svntest.actions.run_and_verify_svn2(None, None,
"svn: warning: W160037: " \
".*scheme.*'opaquelocktoken'", 0,
'lock', '-m', '', file_path)
@Issue(3105)
def lock_multi_wc(sbox):
"obtain locks in multiple working copies in one go"
sbox.build()
sbox2 = sbox.clone_dependent(copy_wc=True)
wc_name = os.path.basename(sbox.wc_dir)
wc2_name = os.path.basename(sbox2.wc_dir)
expected_output = svntest.verify.UnorderedOutput([
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join(wc_name, 'iota'),
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join(wc2_name, 'A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('iota'),
sbox2.ospath('A/mu'))
expected_output = svntest.verify.UnorderedOutput([
'\'%s\' unlocked.\n' % os.path.join(wc_name, 'iota'),
'\'%s\' unlocked.\n' % os.path.join(wc2_name, 'A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'unlock', sbox.ospath('iota'),
sbox2.ospath('A/mu'))
@Issue(3378)
def locks_stick_over_switch(sbox):
"locks are kept alive over switching"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
svntest.actions.run_and_verify_svn(None, None, [],
'cp', sbox.ospath('A'), repo_url + '/AA',
'-m', '')
expected_output = svntest.verify.UnorderedOutput([
'\'iota\' locked by user \'jrandom\'.\n',
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join('A', 'D', 'H', 'chi'),
'\'%s\' locked by user \'jrandom\'.\n' % os.path.join('A', 'mu'),
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('A/D/H/chi'),
sbox.ospath('A/mu'),
sbox.ospath('iota'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/H/chi', 'A/mu', 'iota', writelocked='K')
# Make sure the file is still locked
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.wc.State(wc_dir, {
})
expected_status.tweak(wc_rev=2)
expected_status.tweak('', wc_rev=1)
expected_status.tweak('iota', writelocked='K', wc_rev=1)
switched_status = expected_status.copy()
switched_status.tweak(writelocked=None)
switched_status.tweak('iota', writelocked='K')
switched_status.tweak('A', switched='S')
svntest.actions.run_and_verify_switch(wc_dir, sbox.ospath('A'),
repo_url + '/AA',
expected_output, None, switched_status)
# And now switch back to verify that the locks reappear
expected_output = svntest.wc.State(wc_dir, {
})
svntest.actions.run_and_verify_switch(wc_dir, sbox.ospath('A'),
repo_url + '/A',
expected_output, None, expected_status)
@Issue(4304)
def lock_unlock_deleted(sbox):
"lock/unlock a deleted file"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'rm', sbox.ospath('A/mu'))
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', status='D ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = '\'mu\' locked by user \'jrandom\'.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', sbox.ospath('A/mu'))
expected_status.tweak('A/mu', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = '\'mu\' unlocked.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'unlock', sbox.ospath('A/mu'))
expected_status.tweak('A/mu', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(4369)
def commit_stolen_lock(sbox):
"commit with a stolen lock"
sbox.build()
wc_dir = sbox.wc_dir
sbox.simple_append('A/mu', 'zig-zag')
sbox.simple_lock('A/mu')
expected_output = '\'mu\' locked by user \'jrandom\'.'
svntest.actions.run_and_verify_svn(None, expected_output, [],
'lock', '--force',
sbox.repo_url + '/A/mu')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', status='M ', writelocked='T')
err_re = "(.*E160037: Cannot verify lock on path '/A/mu')|" + \
"(.*E160038: '/.*/A/mu': no lock token available)"
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
err_re,
wc_dir)
# When removing directories, the locks of contained files were not
# correctly removed from the working copy database, thus they later
# magically reappeared when new files or directories with the same
# pathes were added.
@Issue(4364)
def drop_locks_on_parent_deletion(sbox):
"drop locks when the parent is deleted"
sbox.build()
wc_dir = sbox.wc_dir
# lock some files, and remove them.
sbox.simple_lock('A/B/lambda')
sbox.simple_lock('A/B/E/alpha')
sbox.simple_lock('A/B/E/beta')
sbox.simple_rm('A/B')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove_subtree('A/B')
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
None,
wc_dir)
# now re-add entities to the deleted pathes.
sbox.simple_mkdir('A/B')
sbox.simple_add_text('new file replacing old file', 'A/B/lambda')
sbox.simple_add_text('file replacing former dir', 'A/B/F')
# The bug also resurrected locks on directories when their path
# matched a former file.
sbox.simple_mkdir('A/B/E', 'A/B/E/alpha')
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B',
'A/B/E',
'A/B/E/alpha',
'A/B/F',
'A/B/lambda',
wc_rev='3')
expected_status.remove('A/B/E/beta')
svntest.actions.run_and_verify_commit(wc_dir,
[],
expected_status,
None,
wc_dir)
@SkipUnless(svntest.main.is_ra_type_dav)
def dav_lock_timeout(sbox):
"unlock a lock with timeout"
import httplib
from urlparse import urlparse
import base64
sbox.build()
loc = urlparse(sbox.repo_url)
if loc.scheme == 'http':
h = httplib.HTTPConnection(loc.hostname, loc.port)
else:
h = httplib.HTTPSConnection(loc.hostname, loc.port)
lock_body = '<?xml version="1.0" encoding="utf-8" ?>' \
'<D:lockinfo xmlns:D="DAV:">' \
' <D:lockscope><D:exclusive/></D:lockscope>' \
' <D:locktype><D:write/></D:locktype>' \
' <D:owner>' \
' <D:href>http://a/test</D:href>' \
' </D:owner>' \
'</D:lockinfo>'
lock_headers = {
'Authorization': 'Basic ' + base64.b64encode('jconstant:rayjandom'),
'Timeout': 'Second-86400'
}
# Enabling the following line makes this test easier to debug
h.set_debuglevel(9)
h.request('LOCK', sbox.repo_url + '/iota', lock_body, lock_headers)
r = h.getresponse()
# Verify that there is a lock, by trying to obtain one
svntest.actions.run_and_verify_svn2(None, None, ".*locked by user", 0,
'lock', '-m', '', sbox.ospath('iota'))
# Before this patch this used to fail with a parse error of the timeout
svntest.actions.run_and_verify_svn2(None, None, ".*W160039.*Unlock.*403", 0,
'unlock', sbox.repo_url + '/iota')
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.ospath('iota'), '--force')
def non_root_locks(sbox):
"locks for working copies not at repos root"
sbox.build()
wc_dir = sbox.wc_dir
svntest.actions.run_and_verify_svn(None, None, [],
'cp', sbox.repo_url, sbox.repo_url + '/X',
'-m', 'copy greek tree')
sbox.simple_switch(sbox.repo_url + '/X')
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Lock a file
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', sbox.ospath('A/D/G/pi'),
'-m', '')
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Updates don't break the lock
sbox.simple_update('A/D')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
sbox.simple_update('')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Break the lock
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.repo_url + '/X/A/D/G/pi')
# Subdir update reports the break
sbox.simple_update('A/D')
expected_status.tweak('A/D/G/pi', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Relock and break
svntest.actions.run_and_verify_svn(None, ".*locked by user", [],
'lock', sbox.ospath('A/D/G/pi'),
'-m', '')
expected_status.tweak('A/D/G/pi', writelocked='K')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
svntest.actions.run_and_verify_svn(None, None, [],
'unlock', sbox.repo_url + '/X/A/D/G/pi')
# Root update reports the break
sbox.simple_update('')
expected_status.tweak('A/D/G/pi', writelocked=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
@Issue(3515)
@SkipUnless(svntest.main.is_ra_type_dav)
def dav_lock_refresh(sbox):
"refresh timeout of DAV lock"
import httplib
from urlparse import urlparse
import base64
sbox.build(create_wc = False)
# Acquire lock on 'iota'
svntest.actions.run_and_verify_svn(None, ".*locked by user", [], 'lock',
sbox.repo_url + '/iota')
# Try to refresh lock using 'If' header
loc = urlparse(sbox.repo_url)
if loc.scheme == 'http':
h = httplib.HTTPConnection(loc.hostname, loc.port)
else:
h = httplib.HTTPSConnection(loc.hostname, loc.port)
lock_token = svntest.actions.run_and_parse_info(sbox.repo_url + '/iota')[0]['Lock Token']
lock_headers = {
'Authorization': 'Basic ' + base64.b64encode('jrandom:rayjandom'),
'If': '(<' + lock_token + '>)',
'Timeout': 'Second-7200'
}
# Enabling the following line makes this test easier to debug
h.set_debuglevel(9)
h.request('LOCK', sbox.repo_url + '/iota', '', lock_headers)
# XFAIL Refreshing of DAV lock fails with error '412 Precondition Failed'
r = h.getresponse()
if r.status != httplib.OK:
raise svntest.Failure('Lock refresh failed: %d %s' % (r.status, r.reason))
@SkipUnless(svntest.main.is_ra_type_dav)
def delete_locked_file_with_percent(sbox):
"lock and delete a file called 'a %( ) .txt'"
sbox.build()
locked_filename = 'a %( ) .txt'
locked_path = sbox.ospath(locked_filename)
svntest.main.file_write(locked_path, "content\n")
sbox.simple_add(locked_filename)
sbox.simple_commit()
sbox.simple_lock(locked_filename)
sbox.simple_rm(locked_filename)
# XFAIL: With a 1.8.x client, this commit fails with:
# svn: E175002: Unexpected HTTP status 400 'Bad Request' on '/svn-test-work/repositories/lock_tests-52/!svn/txr/2-2/a%20%25(%20)%20.txt'
# and the following error in the httpd error log:
# Invalid percent encoded URI in tagged If-header [400, #104]
sbox.simple_commit()
@Issue(4557)
@XFail(svntest.main.is_ra_type_dav)
def delete_dir_with_lots_of_locked_files(sbox):
"delete a directory containing lots of locked files"
sbox.build()
wc_dir = sbox.wc_dir
# A lot of paths.
nfiles = 75 # NOTE: test XPASSES with 50 files!!!
locked_paths = []
for i in range(nfiles):
locked_paths.append(sbox.ospath("A/locked_files/file-%i" % i))
# Create files at these paths
os.mkdir(sbox.ospath("A/locked_files"))
for file_path in locked_paths:
svntest.main.file_write(file_path, "This is '%s'.\n" % (file_path,))
sbox.simple_add("A/locked_files")
sbox.simple_commit()
sbox.simple_update()
# lock all the files
svntest.actions.run_and_verify_svn(None, None, [], 'lock',
'-m', 'All locks',
*locked_paths)
# Locally delete A (regression against earlier versions, which
# always used a special non-standard request)
sbox.simple_rm("A")
# But a further replacement never worked
sbox.simple_mkdir("A")
# And an additional propset didn't work either
# (but doesn't require all lock tokens recursively)
sbox.simple_propset("k", "v", "A")
# Commit the deletion
# XFAIL: As of 1.8.10, this commit fails with:
# | |
32, 3)
assert np.allclose(observed.arr_0to1, expected)
def test_segmaps__pad_mode_should_be_ignored(self):
# basic segmaps test
# pad_mode should be ignored for segmaps
aug = iaa.PadToFixedSize(
height=3, width=3, pad_mode="edge", position="center")
segmaps_arr = np.ones((1, 1, 1), dtype=np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(1, 1, 3))
observed = aug.augment_segmentation_maps([segmaps])[0]
expected = np.int32([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
])
expected = expected[..., np.newaxis]
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.arr, expected)
def test_segmaps_smaller_than_image__pad_mode_should_be_ignored(self):
# segmaps with size unequal to image
# pad_mode should be ignored for segmaps
aug = iaa.PadToFixedSize(
height=32, width=32, pad_mode="edge", position="left-top")
segmaps_arr = np.ones((15, 15, 1), dtype=np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(30, 30, 3))
observed = aug.augment_segmentation_maps([segmaps])[0]
expected = np.ones((16, 16, 1), dtype=np.int32)
expected[:, 0, 0] = 0
expected[0, :, 0] = 0
assert observed.shape == (32, 32, 3)
assert np.array_equal(observed.arr, expected)
def test_get_parameters(self):
aug = iaa.PadToFixedSize(width=20, height=10, pad_mode="edge",
pad_cval=10, position="center")
params = aug.get_parameters()
assert params[0] == 20
assert params[1] == 10
assert params[2].value == "edge"
assert params[3].value == 10
assert np.isclose(params[4][0].value, 0.5)
assert np.isclose(params[4][1].value, 0.5)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PadToFixedSize(height=1, width=1)
image_aug = aug(image=image)
expected_height = 1
expected_width = 1
expected_shape = tuple([expected_height, expected_width]
+ list(shape[2:]))
assert image_aug.shape == expected_shape
def test_other_dtypes_bool(self):
aug = iaa.PadToFixedSize(height=4, width=3, position="center-top")
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert image_aug.shape == (4, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.PadToFixedSize(height=4, width=3, position="center-top")
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [
1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [
1, 5, 10, 100, int(center_value), int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == (4, 3)
assert np.all(image_aug[~mask] == 0)
assert np.all(image_aug[mask] == value)
def test_other_dtypes_float(self):
aug = iaa.PadToFixedSize(height=4, width=3, position="center-top")
dtypes = ["float16", "float32", "float64", "float128"]
mask = np.zeros((4, 3), dtype=bool)
mask[2, 1] = True
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == (4, 3)
assert np.all(_isclose(image_aug[~mask], 0))
assert np.all(_isclose(image_aug[mask],
np.float128(value)))
def test_pickleable(self):
aug = iaa.PadToFixedSize(20, 20, position="uniform", seed=1)
runtest_pickleable_uint8_img(aug, iterations=5, shape=(10, 10, 1))
class TestCenterPadToFixedSize(unittest.TestCase):
def setUp(self):
reseed()
def test_image2d(self):
for _ in np.arange(10):
image = np.arange(4*4*3).astype(np.uint8).reshape((4, 4, 3))
aug = iaa.CenterPadToFixedSize(height=5, width=5)
observed = aug(image=image)
expected = iaa.pad(image, right=1, bottom=1)
assert np.array_equal(observed, expected)
def test_pickleable(self):
aug = iaa.CenterPadToFixedSize(height=20, width=15)
runtest_pickleable_uint8_img(aug, shape=(10, 10, 3))
class TestCropToFixedSize(unittest.TestCase):
def setUp(self):
reseed()
def test_image2d_that_needs_to_be_cropped_on_both_sides(self):
aug = iaa.CropToFixedSize(height=1, width=1)
image = np.uint8([
[128, 129, 130],
[131, 132, 133],
[134, 135, 136]
])
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (1, 1)
def test_image3d_that_needs_to_be_cropped_on_both_sides(self):
aug = iaa.CropToFixedSize(height=1, width=1)
image = np.uint8([
[128, 129, 130],
[131, 132, 133],
[134, 135, 136]
])
image3d = np.atleast_3d(image)
observed = aug.augment_image(image3d)
assert observed.dtype.name == "uint8"
assert observed.shape == (1, 1, 1)
def test_image3d_rgb_that_needs_to_be_cropped_on_both_sides(self):
aug = iaa.CropToFixedSize(height=1, width=1)
image = np.uint8([
[128, 129, 130],
[131, 132, 133],
[134, 135, 136]
])
image3d_rgb = np.tile(
np.atleast_3d(image),
(1, 1, 3)
)
observed = aug.augment_image(image3d_rgb)
assert observed.dtype.name == "uint8"
assert observed.shape == (1, 1, 3)
def test_image2d_with_other_dtypes(self):
aug = iaa.CropToFixedSize(height=1, width=1)
image = np.uint8([
[128, 129, 130],
[131, 132, 133],
[134, 135, 136]
])
for dtype in ["float32", "float64", "int32"]:
with self.subTest(dtype=dtype):
observed = aug.augment_image(image.astype(dtype))
assert observed.dtype.name == dtype
assert observed.shape == (1, 1)
def test_image_with_height_being_too_large(self):
# change only one side when other side has already desired size
aug = iaa.CropToFixedSize(height=1, width=5)
image = np.zeros((3, 5, 3), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (1, 5, 3)
def test_image_with_width_being_too_large(self):
aug = iaa.CropToFixedSize(height=5, width=1)
image = np.zeros((5, 3, 3), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 1, 3)
def test_image_fullfills_exactly_max_shape(self):
# change no side when all sides have exactly desired size
aug = iaa.CropToFixedSize(height=5, width=5)
img5x5 = np.zeros((5, 5, 3), dtype=np.uint8)
img5x5[2, 2, :] = 255
observed = aug.augment_image(img5x5)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 5, 3)
assert np.array_equal(observed, img5x5)
def test_image_that_is_smaller_than_max_shape(self):
# change no side when all sides have smaller than desired size
aug = iaa.CropToFixedSize(height=5, width=5)
img4x4 = np.zeros((4, 4, 3), dtype=np.uint8)
img4x4[2, 2, :] = 255
observed = aug.augment_image(img4x4)
assert observed.dtype.name == "uint8"
assert observed.shape == (4, 4, 3)
assert np.array_equal(observed, img4x4)
def test_too_large_image_with_width_none(self):
aug = iaa.CropToFixedSize(height=5, width=None)
image = np.zeros((6, 6, 3), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (5, 6, 3)
def test_too_large_image_with_height_none(self):
aug = iaa.CropToFixedSize(height=None, width=5)
image = np.zeros((6, 6, 3), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.dtype.name == "uint8"
assert observed.shape == (6, 5, 3)
def test_image_crop_at_left_top(self):
# explicit non-center position test
aug = iaa.CropToFixedSize(height=3, width=3, position="left-top")
img5x5 = np.arange(25, dtype=np.uint8).reshape((5, 5))
observed = aug.augment_image(img5x5)
expected = img5x5[2:, 2:]
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_image_crop_at_right_bottom(self):
aug = iaa.CropToFixedSize(height=3, width=3, position="right-bottom")
img5x5 = np.arange(25, dtype=np.uint8).reshape((5, 5))
observed = aug.augment_image(img5x5)
expected = img5x5[:3, :3]
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_image_crop_at_bottom_center_given_as_tuple_of_floats(self):
aug = iaa.CropToFixedSize(height=3, width=3, position=(0.5, 1.0))
img5x5 = np.arange(25, dtype=np.uint8).reshape((5, 5))
observed = aug.augment_image(img5x5)
expected = img5x5[:3, 1:4]
assert observed.dtype.name == "uint8"
assert observed.shape == (3, 3)
assert np.array_equal(observed, expected)
def test_keypoints__image_already_fullfills_max_shape(self):
# keypoint test with shape not being changed
aug = iaa.CropToFixedSize(height=3, width=3, position="center")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
observed = aug.augment_keypoints(kpsoi)
expected = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
assert_cbaois_equal(observed, expected)
def test_keypoints_crop_at_center(self):
# basic keypoint test
aug = iaa.CropToFixedSize(height=1, width=1, position="center")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=(3, 3))
observed = aug.augment_keypoints(kpsoi)
expected = ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 1))
assert_cbaois_equal(observed, expected)
def test_keypoints_crop_at_left_top(self):
# keypoint test with explicit non-center position
aug = iaa.CropToFixedSize(height=3, width=3, position="left-top")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(5, 5))
observed = aug.augment_keypoints(kpsoi)
expected = ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(3, 3))
assert_cbaois_equal(observed, expected)
def test_keypoints_crop_at_right_bottom(self):
aug = iaa.CropToFixedSize(height=3, width=3, position="right-bottom")
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(5, 5))
observed = aug.augment_keypoints(kpsoi)
expected = ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(3, 3))
assert_cbaois_equal(observed, expected)
def test_keypoints_empty(self):
aug = iaa.CropToFixedSize(height=3, width=3, position="center")
kpsoi = ia.KeypointsOnImage([], shape=(5, 4))
observed = aug.augment_keypoints(kpsoi)
expected = ia.KeypointsOnImage([], shape=(3, 3))
assert_cbaois_equal(observed, expected)
def test_polygons__image_already_fullfills_max_shape(self):
# polygons test with shape not being changed
aug = iaa.CropToFixedSize(height=3, width=3, position="center")
psoi = ia.PolygonsOnImage([
ia.Polygon([(1, 1), (3, 1), (3, 3)])
], shape=(3, 3))
observed = aug.augment_polygons(psoi)
expected = ia.PolygonsOnImage([
ia.Polygon([(1, 1), (3, 1), (3, 3)])
], shape=(3, 3))
assert_cbaois_equal(observed, expected)
def test_polygons_crop_at_center(self):
# basic polygons test
aug = iaa.CropToFixedSize(height=1, width=1, position="center")
psoi = ia.PolygonsOnImage([
ia.Polygon([(1, 1), (3, 1), (3, 3)])
], shape=(3, 3))
observed = aug.augment_polygons(psoi)
expected = ia.PolygonsOnImage([
ia.Polygon([(1-1, 1-1), (3-1, 1-1), (3-1, 3-1)])
], shape=(1, 1))
assert_cbaois_equal(observed, expected)
def test_polygons_crop_at_left_top(self):
# polygons test with explicit non-center position
aug = iaa.CropToFixedSize(height=3, width=3, position="left-top")
psoi = ia.PolygonsOnImage([
ia.Polygon([(1, 1), (3, 1), (3, 3)])
], shape=(5, 5))
observed = aug.augment_polygons(psoi)
expected = ia.PolygonsOnImage([
ia.Polygon([(1-2, 1-2), (3-2, 1-2), (3-2, 3-2)])
], shape=(3, 3))
assert_cbaois_equal(observed, expected)
def | |
Optional[Dict[Text, Any]] = None,
ignore_action_unlikely_intent: bool = False,
) -> List[List[Dict[Text, List[Features]]]]:
"""Creates state features for prediction.
Args:
trackers: A list of state trackers
domain: The domain
precomputations: Contains precomputed features and attributes.
use_text_for_last_user_input: Indicates whether to use text or intent label
for featurizing last user input.
ignore_rule_only_turns: If True ignore dialogue turns that are present
only in rules.
rule_only_data: Slots and loops,
which only occur in rules but not in stories.
ignore_action_unlikely_intent: Whether to remove any states containing
`action_unlikely_intent` from state features.
Returns:
Dictionaries of state type (INTENT, TEXT, ACTION_NAME, ACTION_TEXT,
ENTITIES, SLOTS, ACTIVE_LOOP) to a list of features for all dialogue
turns in all trackers.
"""
trackers_as_states = self.prediction_states(
trackers,
domain,
use_text_for_last_user_input,
ignore_rule_only_turns,
rule_only_data,
ignore_action_unlikely_intent=ignore_action_unlikely_intent,
)
return self._featurize_states(trackers_as_states, precomputations)
def persist(self, path: Union[Text, Path]) -> None:
"""Persists the tracker featurizer to the given path.
Args:
path: The path to persist the tracker featurizer to.
"""
featurizer_file = Path(path) / FEATURIZER_FILE
rasa.shared.utils.io.create_directory_for_file(featurizer_file)
# entity tags are persisted in TED policy, they are not needed for prediction
if self.state_featurizer is not None:
self.state_featurizer.entity_tag_specs = None
# noinspection PyTypeChecker
rasa.shared.utils.io.write_text_file(
str(jsonpickle.encode(self)), featurizer_file
)
@staticmethod
def load(path: Union[Text, Path]) -> Optional[TrackerFeaturizer]:
"""Loads the featurizer from file.
Args:
path: The path to load the tracker featurizer from.
Returns:
The loaded tracker featurizer.
"""
featurizer_file = Path(path) / FEATURIZER_FILE
if featurizer_file.is_file():
return jsonpickle.decode(rasa.shared.utils.io.read_file(featurizer_file))
logger.error(
f"Couldn't load featurizer for policy. "
f"File '{featurizer_file}' doesn't exist."
)
return None
@staticmethod
def _remove_action_unlikely_intent_from_states(states: List[State]) -> List[State]:
return [
state
for state in states
if not _is_prev_action_unlikely_intent_in_state(state)
]
@staticmethod
def _remove_action_unlikely_intent_from_events(events: List[Event]) -> List[Event]:
return [
event
for event in events
if (
not isinstance(event, ActionExecuted)
or event.action_name != ACTION_UNLIKELY_INTENT_NAME
)
]
class FullDialogueTrackerFeaturizer(TrackerFeaturizer):
"""Creates full dialogue training data for time distributed architectures.
Creates training data that uses each time output for prediction.
"""
def training_states_labels_and_entities(
self,
trackers: List[DialogueStateTracker],
domain: Domain,
omit_unset_slots: bool = False,
ignore_action_unlikely_intent: bool = False,
) -> Tuple[
List[List[State]], List[List[Optional[Text]]], List[List[Dict[Text, Any]]]
]:
"""Transforms trackers to states, action labels, and entity data.
Args:
trackers: The trackers to transform.
domain: The domain.
omit_unset_slots: If `True` do not include the initial values of slots.
ignore_action_unlikely_intent: Whether to remove `action_unlikely_intent`
from training states.
Returns:
Trackers as states, action labels, and entity data.
"""
trackers_as_states = []
trackers_as_actions = []
trackers_as_entities = []
logger.debug(
"Creating states and action examples from "
"collected trackers (by {}({}))..."
"".format(type(self).__name__, type(self.state_featurizer).__name__)
)
pbar = tqdm(
trackers,
desc="Processed trackers",
disable=rasa.shared.utils.io.is_logging_disabled(),
)
for tracker in pbar:
states = self._create_states(
tracker, domain, omit_unset_slots=omit_unset_slots
)
events = tracker.applied_events()
if ignore_action_unlikely_intent:
states = self._remove_action_unlikely_intent_from_states(states)
events = self._remove_action_unlikely_intent_from_events(events)
delete_first_state = False
actions = []
entities = []
entity_data = {}
for event in events:
if isinstance(event, UserUttered):
entity_data = self._entity_data(event)
if not isinstance(event, ActionExecuted):
continue
if not event.unpredictable:
# only actions which can be
# predicted at a stories start
actions.append(event.action_name or event.action_text)
entities.append(entity_data)
else:
# unpredictable actions can be
# only the first in the story
if delete_first_state:
raise InvalidStory(
f"Found two unpredictable actions in one story "
f"'{tracker.sender_id}'. Check your story files."
)
delete_first_state = True
# reset entity_data for the the next turn
entity_data = {}
if delete_first_state:
states = states[1:]
trackers_as_states.append(states[:-1])
trackers_as_actions.append(actions)
trackers_as_entities.append(entities)
self._remove_user_text_if_intent(trackers_as_states)
return trackers_as_states, trackers_as_actions, trackers_as_entities
def prediction_states(
self,
trackers: List[DialogueStateTracker],
domain: Domain,
use_text_for_last_user_input: bool = False,
ignore_rule_only_turns: bool = False,
rule_only_data: Optional[Dict[Text, Any]] = None,
ignore_action_unlikely_intent: bool = False,
) -> List[List[State]]:
"""Transforms trackers to states for prediction.
Args:
trackers: The trackers to transform.
domain: The domain.
use_text_for_last_user_input: Indicates whether to use text or intent label
for featurizing last user input.
ignore_rule_only_turns: If True ignore dialogue turns that are present
only in rules.
rule_only_data: Slots and loops,
which only occur in rules but not in stories.
ignore_action_unlikely_intent: Whether to remove any states containing
`action_unlikely_intent` from prediction states.
Returns:
Trackers as states for prediction.
"""
trackers_as_states = [
self._create_states(
tracker,
domain,
ignore_rule_only_turns=ignore_rule_only_turns,
rule_only_data=rule_only_data,
)
for tracker in trackers
]
if ignore_action_unlikely_intent:
trackers_as_states = [
self._remove_action_unlikely_intent_from_states(states)
for states in trackers_as_states
]
self._choose_last_user_input(trackers_as_states, use_text_for_last_user_input)
return trackers_as_states
class MaxHistoryTrackerFeaturizer(TrackerFeaturizer):
"""Truncates the tracker history into `max_history` long sequences.
Creates training data from trackers where actions are the output prediction
labels. Tracker state sequences which represent policy input are truncated
to not excede `max_history` states.
"""
LABEL_NAME = "action"
def __init__(
self,
state_featurizer: Optional[SingleStateFeaturizer] = None,
max_history: Optional[int] = None,
remove_duplicates: bool = True,
) -> None:
"""Initializes the tracker featurizer.
Args:
state_featurizer: The state featurizer used to encode the states.
max_history: The maximum length of an extracted state sequence.
remove_duplicates: Keep only unique training state sequence/label pairs.
"""
super().__init__(state_featurizer)
self.max_history = max_history
self.remove_duplicates = remove_duplicates
@staticmethod
def slice_state_history(
states: List[State], slice_length: Optional[int]
) -> List[State]:
"""Slices states from the trackers history.
Args:
states: The states
slice_length: The slice length
Returns:
The sliced states.
"""
if not slice_length:
return states
return states[-slice_length:]
@staticmethod
def _hash_example(states: List[State], labels: Optional[List[Text]] = None) -> int:
"""Hashes states (and optionally label).
Produces a hash of the tracker state sequence (and optionally the labels).
If `labels` is `None`, labels don't get hashed.
Args:
states: The tracker state sequence to hash.
labels: Label strings associated with this state sequence.
Returns:
The hash of the states and (optionally) the label.
"""
frozen_states = tuple(
s if s is None else DialogueStateTracker.freeze_current_state(s)
for s in states
)
if labels is not None:
frozen_labels = tuple(labels)
return hash((frozen_states, frozen_labels))
else:
return hash(frozen_states)
def training_states_labels_and_entities(
self,
trackers: List[DialogueStateTracker],
domain: Domain,
omit_unset_slots: bool = False,
ignore_action_unlikely_intent: bool = False,
) -> Tuple[List[List[State]], List[List[Text]], List[List[Dict[Text, Any]]]]:
"""Transforms trackers to states, action labels, and entity data.
Args:
trackers: The trackers to transform.
domain: The domain.
omit_unset_slots: If `True` do not include the initial values of slots.
ignore_action_unlikely_intent: Whether to remove `action_unlikely_intent`
from training states.
Returns:
Trackers as states, labels, and entity data.
"""
example_states = []
example_labels = []
example_entities = []
# Store of example hashes for removing duplicate training examples.
hashed_examples = set()
logger.debug(
f"Creating states and {self.LABEL_NAME} label examples from "
f"collected trackers "
f"(by {type(self).__name__}({type(self.state_featurizer).__name__}))..."
)
pbar = tqdm(
trackers,
desc="Processed trackers",
disable=rasa.shared.utils.io.is_logging_disabled(),
)
for tracker in pbar:
for states, label, entities in self._extract_examples(
tracker,
domain,
omit_unset_slots=omit_unset_slots,
ignore_action_unlikely_intent=ignore_action_unlikely_intent,
):
if self.remove_duplicates:
hashed = self._hash_example(states, label)
if hashed in hashed_examples:
continue
hashed_examples.add(hashed)
example_states.append(states)
example_labels.append(label)
example_entities.append(entities)
pbar.set_postfix({f"# {self.LABEL_NAME}": f"{len(example_labels):d}"})
self._remove_user_text_if_intent(example_states)
logger.debug(f"Created {len(example_states)} {self.LABEL_NAME} examples.")
return example_states, example_labels, example_entities
def _extract_examples(
self,
tracker: DialogueStateTracker,
domain: Domain,
omit_unset_slots: bool = False,
ignore_action_unlikely_intent: bool = False,
) -> Iterator[Tuple[List[State], List[Text], List[Dict[Text, Any]]]]:
"""Creates an iterator over training examples from a tracker.
Args:
trackers: The tracker from which to extract training examples.
domain: The domain of the training data.
omit_unset_slots: If `True` do not include the initial values of slots.
ignore_action_unlikely_intent: Whether to remove `action_unlikely_intent`
from training states.
Returns:
An iterator over example states, labels, and entity data.
"""
tracker_states = self._create_states(
tracker, domain, omit_unset_slots=omit_unset_slots
)
events = tracker.applied_events()
if ignore_action_unlikely_intent:
tracker_states = self._remove_action_unlikely_intent_from_states(
tracker_states
)
events = self._remove_action_unlikely_intent_from_events(events)
label_index = 0
entity_data = {}
for event in events:
if isinstance(event, UserUttered):
entity_data = self._entity_data(event)
elif isinstance(event, ActionExecuted):
label_index += 1
# use only actions which can be predicted at a stories start
if event.unpredictable:
continue
sliced_states = self.slice_state_history(
tracker_states[:label_index], self.max_history
)
label = [event.action_name or event.action_text]
entities = [entity_data]
yield sliced_states, label, entities
# reset entity_data for the the next turn
entity_data = {}
def prediction_states(
self,
trackers: List[DialogueStateTracker],
domain: Domain,
use_text_for_last_user_input: bool = False,
ignore_rule_only_turns: bool = False,
rule_only_data: Optional[Dict[Text, Any]] = None,
ignore_action_unlikely_intent: bool = False,
) -> List[List[State]]:
"""Transforms trackers to states for prediction.
Args:
trackers: The trackers to transform.
domain: The domain.
use_text_for_last_user_input: Indicates whether to use text or intent label
for featurizing last user input.
ignore_rule_only_turns: If True ignore dialogue turns that are present
only in rules.
rule_only_data: Slots and loops,
which only occur in rules but not in stories.
ignore_action_unlikely_intent: Whether to remove any states containing
`action_unlikely_intent` | |
. You can't create a new ``SettingId`` , but you can overwrite the default value if you have the ``ssm:UpdateServiceSetting`` permission for the setting. Use the UpdateServiceSetting API action to change the default setting. Or use the ResetServiceSetting to change the value back to the original value defined by the AWS service team.
Query the current service setting for the account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetServiceSetting>`_
**Request Syntax**
::
response = client.get_service_setting(
SettingId='string'
)
**Response Syntax**
::
{
'ServiceSetting': {
'SettingId': 'string',
'SettingValue': 'string',
'LastModifiedDate': datetime(2015, 1, 1),
'LastModifiedUser': 'string',
'ARN': 'string',
'Status': 'string'
}
}
**Response Structure**
- *(dict) --*
The query result body of the GetServiceSetting API action.
- **ServiceSetting** *(dict) --*
The query result of the current service setting.
- **SettingId** *(string) --*
The ID of the service setting.
- **SettingValue** *(string) --*
The value of the service setting.
- **LastModifiedDate** *(datetime) --*
The last time the service setting was modified.
- **LastModifiedUser** *(string) --*
The ARN of the last modified user. This field is populated only if the setting value was overwritten.
- **ARN** *(string) --*
The ARN of the service setting.
- **Status** *(string) --*
The status of the service setting. The value can be Default, Customized or PendingUpdate.
* Default: The current setting uses a default value provisioned by the AWS service team.
* Customized: The current setting use a custom value specified by the customer.
* PendingUpdate: The current setting uses a default or custom value, but a setting change request is pending approval.
:type SettingId: string
:param SettingId: **[REQUIRED]**
The ID of the service setting to get.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def label_parameter_version(self, Name: str, Labels: List, ParameterVersion: int = None) -> Dict:
"""
A parameter label is a user-defined alias to help you manage different versions of a parameter. When you modify a parameter, Systems Manager automatically saves a new version and increments the version number by one. A label can help you remember the purpose of a parameter when there are multiple versions.
Parameter labels have the following requirements and restrictions.
* A version of a parameter can have a maximum of 10 labels.
* You can't attach the same label to different versions of the same parameter. For example, if version 1 has the label Production, then you can't attach Production to version 2.
* You can move a label from one version of a parameter to another.
* You can't create a label when you create a new parameter. You must attach a label to a specific version of a parameter.
* You can't delete a parameter label. If you no longer want to use a parameter label, then you must move it to a different version of a parameter.
* A label can have a maximum of 100 characters.
* Labels can contain letters (case sensitive), numbers, periods (.), hyphens (-), or underscores (_).
* Labels can't begin with a number, "aws," or "ssm" (not case sensitive). If a label fails to meet these requirements, then the label is not associated with a parameter and the system displays it in the list of InvalidLabels.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/LabelParameterVersion>`_
**Request Syntax**
::
response = client.label_parameter_version(
Name='string',
ParameterVersion=123,
Labels=[
'string',
]
)
**Response Syntax**
::
{
'InvalidLabels': [
'string',
]
}
**Response Structure**
- *(dict) --*
- **InvalidLabels** *(list) --*
The label does not meet the requirements. For information about parameter label requirements, see `Labeling Parameters <http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
:type Name: string
:param Name: **[REQUIRED]**
The parameter name on which you want to attach one or more labels.
:type ParameterVersion: integer
:param ParameterVersion:
The specific version of the parameter on which you want to attach one or more labels. If no version is specified, the system attaches the label to the latest version.)
:type Labels: list
:param Labels: **[REQUIRED]**
One or more labels to attach to the specified parameter version.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def list_association_versions(self, AssociationId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Retrieves all versions of an association for a specific association ID.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ListAssociationVersions>`_
**Request Syntax**
::
response = client.list_association_versions(
AssociationId='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'AssociationVersions': [
{
'AssociationId': 'string',
'AssociationVersion': 'string',
'CreatedDate': datetime(2015, 1, 1),
'Name': 'string',
'DocumentVersion': 'string',
'Parameters': {
'string': [
'string',
]
},
'Targets': [
{
'Key': 'string',
'Values': [
'string',
]
},
],
'ScheduleExpression': 'string',
'OutputLocation': {
'S3Location': {
'OutputS3Region': 'string',
'OutputS3BucketName': 'string',
'OutputS3KeyPrefix': 'string'
}
},
'AssociationName': 'string',
'MaxErrors': 'string',
'MaxConcurrency': 'string',
'ComplianceSeverity': 'CRITICAL'|'HIGH'|'MEDIUM'|'LOW'|'UNSPECIFIED'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **AssociationVersions** *(list) --*
Information about all versions of the association for the specified association ID.
- *(dict) --*
Information about the association version.
- **AssociationId** *(string) --*
The ID created by the system when the association was created.
- **AssociationVersion** *(string) --*
The association version.
- **CreatedDate** *(datetime) --*
The date the association version was created.
- **Name** *(string) --*
The name specified when the association was created.
- **DocumentVersion** *(string) --*
The version of a Systems Manager document used when the association version was created.
- **Parameters** *(dict) --*
Parameters specified when the association version was created.
- *(string) --*
- *(list) --*
- *(string) --*
- **Targets** *(list) --*
The targets specified for the association when the association version was created.
- *(dict) --*
An array of search criteria that targets instances using a Key,Value combination that you specify. ``Targets`` is required if you don't provide one or more instance IDs in the call.
- **Key** *(string) --*
User-defined criteria for sending commands that target instances that meet the criteria. ``Key`` can be ``tag:<Amazon EC2 tag>`` or ``InstanceIds`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting>`__ in the *AWS Systems Manager User Guide* .
- **Values** *(list) --*
User-defined criteria that maps to ``Key`` . For example, if you specified ``tag:ServerRole`` , you could specify ``value:WebServer`` to run a command on instances that include Amazon EC2 tags of ``ServerRole,WebServer`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
- **ScheduleExpression** *(string) --*
The cron or rate schedule specified for the association when the association version was created.
- **OutputLocation** *(dict) --*
The location in Amazon S3 specified for the association when the association version was created.
- **S3Location** *(dict) --*
An Amazon S3 bucket where you want to store the results of this request.
- **OutputS3Region** *(string) --*
(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.
- **OutputS3BucketName** *(string) --*
The name of the Amazon S3 bucket.
- **OutputS3KeyPrefix** *(string) --*
The Amazon S3 bucket subfolder.
- **AssociationName** *(string) --* | |
# Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset main class."""
import dataclasses
import os
import re
from typing import Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union
import einops
from etils import etqdm as tqdm
import jax
import jax.numpy as jnp
import jax3d.projects.nesf as j3d
from jax3d.projects.nesf.nerfstatic.datasets import dataset_utils
from jax3d.projects.nesf.nerfstatic.datasets import klevr # pylint: disable=unused-import
from jax3d.projects.nesf.nerfstatic.datasets import scene_understanding # pylint: disable=unused-import
from jax3d.projects.nesf.nerfstatic.datasets.dataset_utils import ExampleType
from jax3d.projects.nesf.nerfstatic.models import model_utils
from jax3d.projects.nesf.nerfstatic.utils import gin_utils
from jax3d.projects.nesf.nerfstatic.utils import types
from jax3d.projects.nesf.utils.typing import Tree
import numpy as np
import sunds
import tensorflow as tf
import tensorflow_datasets as tfds
# pylint: disable=unused-import,g-bad-import-order
# pylint: enable=unused-import,g-bad-import-order
# Import datasets for registration
# State of the dataset pipeline (in in-memory mode).
# Internally, this correspond to the numpy random state used in to generate
# batches. Currently is 2 128-bit unsigned numbers of `np.random.PCG64()`.
DsState = Tree[Any]
# Scene selector can be:
# * int: Select scene `[0, n-1]`
# * str: np-like indexing (e.g. ':10', '5:', '3:10')
# * seq: List or tuple of ints `[1, 2, 5]`
SceneSelection = Union[int, str, Sequence[int]]
@dataclasses.dataclass(eq=True, frozen=True)
class BatchSize:
"""Batch size util.
Attributes:
total: The total batch size across all host/devices
per_process: The batch size for a single host
per_device: The batch size for a single device in a single host
"""
total: int
per_process: int
per_device: int
def __init__(self, global_batch_size: int):
# Use setattr to bypass the frozen=True
super().__setattr__('total', global_batch_size)
super().__setattr__('per_process', global_batch_size // jax.process_count())
super().__setattr__('per_device',
self.per_process // jax.local_device_count())
if self.total and self.total % jax.device_count() != 0:
raise ValueError(
f'Batch size ({self.total}) must be divisible by the number '
f'of devices ({jax.device_count()}).')
class BatchSizeField(j3d.utils.DataclassField[int, BatchSize]):
"""Field which normalize batch size."""
def __init__(self, total: int):
"""Constructor.
Args:
total: The total batch size across all host/devices. This is the default
value used if `DatasetParams.batch_size` is not provided.
"""
self._total = total
super().__init__()
def _default(self) -> int:
# Lazy construct the field to avoid `jax` calls at import time, before
# `absl.main` is called.
return self._total
def _validate(self, value: int) -> BatchSize:
return BatchSize(value)
@gin_utils.dataclass_configurable
@dataclasses.dataclass
class DatasetParams:
"""Dataset Params.
Attributes:
data_dir: Input data directory.
dataset: The type of dataset to feed nerf.
batch_size: The number of rays in a mini-batch (for training).
specific_params: Params specific to the selected dataset (e.g. KlevrParams)
factor: The downsample factor of images, 0 for no downsample.
ray_shuffle_buffer_size: size of ray shuffle buffer
num_input_ray_samples: If set, sample this many rays from input view for
RAY mode. Will be ignored in IMAGE mode.
crop_views: if specified crop the input or target images by this
amount. Format is (top, bottom, left, right).
Value in pixels to be cropped, e.g. (0, 0, 0, 0) is no cropping.
train_scenes: Number of scenes to use for training. If `None`, data_dir
should point to the specific scene otherwise, data_dir should contains
`0/`, `1/`,... sub-dirs. If `int`, scenes `[0, train_scenes[` will be used
for training
If `str`, should be np-like indexing (e.g. ':10', '3:7')
eval_scenes: Number of scenes to use for eval. If `None`, the same value
than train_scenes will be used If int, scenes `[0, eval_scenes[` will be
used for eval.
If `str`, should be np-like indexing (e.g. ':10', '5:', '3:10')
novel_scenes: Set of scene used in the semantic generalization mode.
Like `train_scenes` can be `int` of `str`. Scene selected here will be
used for both split='train' and 'eval' when
`get_dataset(is_novel_scenes=True)`.
ignore_scenes: Set of scenes to ignore in train, eval, and novel scenes.
Use this if some scenes should be ignored due to upstream errors.
num_scenes_per_batch: When set, a set of scenes are sampled at random, then
rays from those scenes are sampled.
max_num_train_images_per_scene: When set, limits the maximum number of
images to load from each scene for model training and "eval_train".
max_num_test_images_per_scene: When set, limits the maximum number of
test images to load for "eval_test".
eval_novel_train_image_idxs: Which image indices to use for
"eval_novel_train". Index is with respect to
metadata["split_ids"]["train"].
eval_novel_test_image_idxs: Which image indices to use for
"eval_novel_test". Index is with respect to
metadata["split_ids"]["test"].
enable_sqrt2_buffer: If set, the scene's bounding box will be increased by
a factor of sqrt(2) along the x-axis and y-axis. For use with random
scene rotations.
enable_mipnerf: Enables Mip-NeRF mode. Currently, this will only populate
the rays.base_radius fields of the data points.
pseudo_semantic_labels_path: If set, loads the semantic labels for training
from the given path.
"""
data_dir: j3d.Path = j3d.utils.PathField() # pytype: disable=annotation-type-mismatch
dataset: str = 'klevr'
batch_size: BatchSize = BatchSizeField(4096) # pytype: disable=annotation-type-mismatch
specific_params: Optional[gin_utils.ConfigurableDataclass] = None
# TODO(epot): Should migrate dataset specific fields into their subclass.
factor: int = 4
ray_shuffle_buffer_size: int = 684 * 456 * 400
num_input_ray_samples: Optional[int] = None
crop_views: Optional[Tuple[int, int, int, int]] = None
# Args to control the scenes to use in _examples_loader:
train_scenes: Optional[SceneSelection] = None
eval_scenes: Optional[SceneSelection] = None
novel_scenes: Optional[SceneSelection] = None
ignore_scenes: Optional[SceneSelection] = None
num_scenes_per_batch: Optional[int] = None
max_num_train_images_per_scene: Optional[int] = None
max_num_test_images_per_scene: Optional[int] = None
# Which image indices to use for each scene. This index is with respect to
# the "split_ids" field in metadata.json.
eval_novel_train_image_idxs: Optional[Sequence[int]] = None
eval_novel_test_image_idxs: Optional[Sequence[int]] = None
enable_sqrt2_buffer: bool = False
enable_mipnerf: bool = False # Whether to compute ray radii for MipNeRF.
# If set, uses pseudo labels for semantic training.
pseudo_semantic_labels_path: Optional[str] = None
def __post_init__(self):
# Maybe a cleaner way would be to have each dataset to be
# a subclass of DatasetParams and use `ConfigModel.dataset = 'SundsParams'`
# inside gin (like for the model class)
# An advantage of this solution is that we are guarantee that
# `DatasetParams` are never used in dataset-specific functions.
# Initialize the dataset specific params
registered_dataset = dataset_utils.find_registered_dataset(self.dataset)
if self.specific_params is None and registered_dataset.config_cls:
self.specific_params = registered_dataset.config_cls()
def get_dataset(
split: str,
args: DatasetParams,
model_args: model_utils.ModelParams,
example_type: ExampleType,
ds_state: Optional[DsState] = None,
*, # TODO(epot): Make other args kwargs
is_novel_scenes: bool = False,
) -> 'DatasetIterable':
"""Returns the dataset.
Args:
split: Split to load (train or test)
args: Dataset arguments
model_args: Model arguments
example_type: ExampleType.RAY or ExampleType.IMAGE
ds_state: State used to set e.g. np.random.RandomState instance state (When
set to None, a default state value is used.)
is_novel_scenes: Whether or not in novel scene evaluation mode.
Returns:
DatasetIterable object, initialized according to provided arguments.
"""
registered_dataset = dataset_utils.find_registered_dataset(args.dataset)
# TODO(epot): When dataset won't fit in memory, we could batch multiple
# images, flatten batch dimention, then apply ds.shuffle on the flat_map
# dataset ? Need to be carefull on determinism on multi-tpu workers
# Select train/test examples from the different scenes
if registered_dataset.in_memory:
all_exs, all_metadata = _in_memory_examples_loader(
registered_dataset=registered_dataset,
split=split,
args=args,
# Blender-specific kwargs
background=model_args.background,
factor=args.factor,
is_novel_scenes=is_novel_scenes,
)
ds = _make_in_memory_dataset( # Create the tf.data.Dataset object
all_exs,
batch_size=args.batch_size,
example_type=example_type,
ds_state=ds_state,
args=args,
)
else:
assert not is_novel_scenes
assert not args.max_num_train_images_per_scene
assert not args.max_num_test_images_per_scene
ds, all_metadata = _streaming_example_loader(
registered_dataset=registered_dataset,
split=split,
example_type=example_type,
args=args,
)
# Create the dataset iterable
return DatasetIterable(
ds=ds,
all_metadata=all_metadata,
example_type=example_type,
)
def _in_memory_examples_loader(
*,
registered_dataset: dataset_utils.RegisteredDataset,
split: str,
args: DatasetParams,
is_novel_scenes: bool,
**load_scene_kwargs,
) -> Tuple[List[types.Batch], List[dataset_utils.DatasetMetadata]]:
"""Load examples from the requested scenes.
Args:
registered_dataset: The dataset to load
split: Split to load (train or test)
args: Dataset arguments
is_novel_scenes: Whether or not in novel scene evaluation mode.
**load_scene_kwargs: Kwargs forwarded to the `_DS_TO_LOADER` function.
Returns:
examples: List of all examples as a single Batch per scene, all in-order.
dataset_metadata: List of all dataset metadata (e.g. semantic labels), one
per scene.
"""
if split not in ('train', 'test'):
raise ValueError(f'Unknown split: {split}')
# Get the number of scenes
scene_range = _get_scene_range(
eval_scenes=args.eval_scenes,
train_scenes=args.train_scenes,
novel_scenes=args.novel_scenes,
ignore_scenes=args.ignore_scenes,
split=split,
is_novel_scenes=is_novel_scenes,
)
# Load all scenes
all_exs: List[types.Batch] = []
all_metadata: List[dataset_utils.DatasetMetadata] = []
if scene_range is | |
4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if NrScenario == "6400c":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [20, 20, 16]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [10, 10, 8, 8]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [10, 10, 8, 8, 1]
if nrtimebucketstochastic == 6:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1]
if nrtimebucketstochastic == 7:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1]
if nrtimebucketstochastic == 8:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1, 1]
if nrtimebucketstochastic == 9:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 10:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 11:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 12:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 13:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 14:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 15:
stochasticparttreestructure = [10, 10, 8, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if NrScenario == "6400d":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [16, 20, 20]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [8, 8, 10, 10]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [4, 5, 5, 8, 8]
if NrScenario == "6400e":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [8, 20, 40]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [4, 8, 10, 20]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [2, 5, 5, 8, 16]
if NrScenario == "500":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [500, 1, 1]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [500, 1, 1, 1]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [500, 1, 1, 1, 1]
if NrScenario == "800":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [25, 8, 4]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [25, 4, 4, 2]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [25, 4, 4, 2, 1]
if NrScenario == "1600":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [25, 16, 4]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [25, 4, 4, 4]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [25, 4, 4, 2, 2]
if NrScenario == "3200":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [50, 16, 4]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [25, 8, 4, 4]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [25, 8, 4, 4, 1]
if nrtimebucketstochastic == 6:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1]
if nrtimebucketstochastic == 7:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1]
if nrtimebucketstochastic == 8:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1, 1]
if nrtimebucketstochastic == 9:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 10:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 11:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 12:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 13:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 14:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if nrtimebucketstochastic == 15:
stochasticparttreestructure = [25, 8, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
if NrScenario == "3200c":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [20, 16, 10]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [10, 8, 8, 5]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [8, 5, 5, 4, 4]
if NrScenario == "12800":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [50, 32, 8]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [50, 8, 8, 4]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [50, 8, 8, 2, 2]
if NrScenario == "12800c":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [32, 20, 20]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [16, 10, 10, 8]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [8, 8, 8, 5, 5]
if NrScenario == "12800e":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [10, 10, 10]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [10, 10, 10, 10]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [10, 10, 10, 10, 10]
if NrScenario == "25600":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [50, 32, 16]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [50, 16, 8, 4]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [50, 8, 8, 4, 2]
if NrScenario == "25600c":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [40, 32, 20]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [16, 16, 10, 10]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [10, 8, 8, 8, 5]
if NrScenario == "25600e":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [15, 15, 15]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [15, 15, 15, 15]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [15, 15, 15, 15, 15 ]
if NrScenario == "51200b":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [50, 32, 32]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [50, 32, 8, 4]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [50, 16, 8, 4, 2]
if NrScenario == "51200c":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [40, 40, 32]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [20, 16, 16, 10]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [10, 10, 8, 8, 8]
if NrScenario == "102400b":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [100, 32, 32]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [50, 32, 8, 8]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [50, 16, 8, 4, 4]
if NrScenario == "153600":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [100, 48, 32]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [75, 32, 8, 8]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [75, 16, 8, 4, 4]
if NrScenario == "204800":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [100, 64, 32]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [50, 32, 16, 8]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [50, 16, 8, 8, 4]
if NrScenario == "102400":
if nrtimebucketstochastic == 3:
stochasticparttreestructure = [100, 64, 16]
if nrtimebucketstochastic == 4:
stochasticparttreestructure = [100, 32, 8, 4]
if nrtimebucketstochastic == 5:
stochasticparttreestructure = [50, 16, 8, 4, 4]
if not PolicyGeneration == Constants.RollingHorizon:
k = 0
for i in range(Instance.NrTimeBucketWithoutUncertaintyBefore + 1,
nrtimebucketconsidered - Instance.NrTimeBucketWithoutUncertaintyAfter + 1):
treestructure[i] = stochasticparttreestructure[k]
k += 1
else:
treestructure = stochasticparttreestructure
return treestructure
def EvaluateSingleSol( ):
# ComputeInSampleStatistis()
global OutOfSampleTestResult
global Model
tmpmodel = Model
# solutions = GetPreviouslyFoundSolution()
filedescription = GetTestDescription()
yeuristicyfix = False
MIPModel = Model
if Model == Constants.Average or Model == Constants.AverageSS or Model == Constants.AverageSSGrave:
MIPModel = Constants.ModelYQFix
if Model == Constants.ModelHeuristicYFix:
MIPModel = Constants.ModelYFix
Model = Constants.ModelYFix
yeuristicyfix = True
solution = MRPSolution()
if not EVPI and not PolicyGeneration == Constants.RollingHorizon: #In evpi mode, a solution is computed for each scenario
if Constants.RunEvaluationInSeparatedJob:
solution.ReadFromFile(filedescription)
else :
solution = LastFoundSolution
if not solution.IsPartialSolution:
solution.ComputeCost()
if Model <> Constants.ModelYQFix:
solution.ScenarioTree.FillQuantityToOrderFromMRPSolution(solution)
evaluator = Evaluator( Instance, [solution], [], PolicyGeneration, evpi=EVPI,
scenariogenerationresolve=ScenarioGeneration, treestructure=GetTreeStructure(),
nearestneighborstrategy=NearestNeighborStrategy,
evaluateaverage=(Model == Constants.Average or Model == Constants.AverageSS or Model == Constants.AverageSSGrave),
usesafetystock = (Model == Constants.AverageSS),
usesafetystockGrave =(Model == Constants.AverageSSGrave),
evpiseed=SeedArray[0],
model = MIPModel,
timehorizon=TimeHorizon, yeuristicyfix = yeuristicyfix, startseedresolve=ScenarioSeed )
OutOfSampleTestResult = evaluator.EvaluateYQFixSolution( TestIdentifier, EvaluatorIdentifier, saveevaluatetab= True, filename = GetEvaluationFileName(), evpi=EVPI )
Model = tmpmodel
GatherEvaluation()
def GatherEvaluation():
global ScenarioSeed
currentseedvalue = ScenarioSeed
evaluator = Evaluator(Instance, [], [], "", ScenarioGeneration, treestructure=GetTreeStructure(), model = Model, startseedresolve=ScenarioSeed )
EvaluationTab = []
ProbabilitiesTab =[]
KPIStats = []
nrfile = 0
#Creat the evaluation table
for seed in [ScenarioSeed]:#SeedArray:
try:
ScenarioSeed = seed
filename = GetEvaluationFileName()
TestIdentifier[5] = seed
#print "open file %rEvaluator.txt"%filename
with open(filename + "Evaluator.txt", 'r') as f:
list = pickle.load(f)
EvaluationTab.append( list )
with open(filename + "Probabilities.txt", 'r') as f:
list = pickle.load(f)
ProbabilitiesTab.append(list)
with open(filename | |
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init as nninit
from torchvision.transforms import Resize, Grayscale
_ARCH_REGISTRY = {}
def architecture(name, sample_shape):
"""
Decorator to register an architecture;
Use like so:
>>> @architecture('my_architecture', (3, 32, 32))
... class MyNetwork(nn.Module):
... def __init__(self, n_classes):
... # Build network
... pass
"""
def decorate(fn):
_ARCH_REGISTRY[name] = (fn, sample_shape)
return fn
return decorate
def get_net_and_shape_for_architecture(arch_name):
"""
Get network building function and expected sample shape:
For example:
>>> net_class, shape = get_net_and_shape_for_architecture('my_architecture')
>>> if shape != expected_shape:
... raise Exception('Incorrect shape')
"""
return _ARCH_REGISTRY[arch_name]
@architecture('mnist-bn-32-32-64-256', (1, 32, 32))
class MNIST_BN_32_32_64_256(nn.Module):
def __init__(self, n_classes):
super(MNIST_BN_32_32_64_256, self).__init__()
self.resize = Resize((28, 28))
self.conv1_1 = nn.Conv2d(1, 32, (5, 5))
self.conv1_1_bn = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2_1 = nn.Conv2d(32, 64, (3, 3))
self.conv2_1_bn = nn.BatchNorm2d(64)
self.conv2_2 = nn.Conv2d(64, 64, (3, 3))
self.conv2_2_bn = nn.BatchNorm2d(64)
self.pool2 = nn.MaxPool2d((2, 2))
self.drop1 = nn.Dropout()
self.fc3 = nn.Linear(1024, 256)
self.fc4 = nn.Linear(256, n_classes)
def forward(self, x):
x = self.resize(x)
x = self.pool1(F.relu(self.conv1_1_bn(self.conv1_1(x))))
x = F.relu(self.conv2_1_bn(self.conv2_1(x)))
x = self.pool2(F.relu(self.conv2_2_bn(self.conv2_2(x))))
x = x.view(-1, 1024)
x = self.drop1(x)
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
@architecture('mnist-bn-32-64-256', (1, 28, 28))
class MNIST_BN_32_64_256(nn.Module):
def __init__(self, n_classes):
super(MNIST_BN_32_64_256, self).__init__()
self.conv1_1 = nn.Conv2d(1, 32, (5, 5))
self.conv1_1_bn = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2_1 = nn.Conv2d(32, 64, (3, 3))
self.conv2_1_bn = nn.BatchNorm2d(64)
self.conv2_2 = nn.Conv2d(64, 64, (3, 3))
self.conv2_2_bn = nn.BatchNorm2d(64)
self.pool2 = nn.MaxPool2d((2, 2))
self.drop1 = nn.Dropout()
self.fc3 = nn.Linear(1024, 256)
self.fc4 = nn.Linear(256, n_classes)
def forward(self, x):
x = self.pool1(F.relu(self.conv1_1_bn(self.conv1_1(x))))
x = F.relu(self.conv2_1_bn(self.conv2_1(x)))
x = self.pool2(F.relu(self.conv2_2_bn(self.conv2_2(x))))
x = x.view(-1, 1024)
x = self.drop1(x)
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
@architecture('mnist-bn-32-32-64-256-rgb', (3, 32, 32))
class MNIST_BN_32_32_64_256_rgb(nn.Module):
def __init__(self, n_classes):
super(MNIST_BN_32_32_64_256_rgb, self).__init__()
self.resize = Resize((28, 28))
self.gray_scale = Grayscale()
self.conv1_1 = nn.Conv2d(1, 32, (5, 5))
self.conv1_1_bn = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2_1 = nn.Conv2d(32, 64, (3, 3))
self.conv2_1_bn = nn.BatchNorm2d(64)
self.conv2_2 = nn.Conv2d(64, 64, (3, 3))
self.conv2_2_bn = nn.BatchNorm2d(64)
self.pool2 = nn.MaxPool2d((2, 2))
self.drop1 = nn.Dropout()
self.fc3 = nn.Linear(1024, 256)
self.fc4 = nn.Linear(256, n_classes)
def forward(self, x):
x = self.gray_scale(x)
x = self.resize(x)
x = self.pool1(F.relu(self.conv1_1_bn(self.conv1_1(x))))
x = F.relu(self.conv2_1_bn(self.conv2_1(x)))
x = self.pool2(F.relu(self.conv2_2_bn(self.conv2_2(x))))
x = x.view(-1, 1024)
x = self.drop1(x)
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
@architecture('grey-32-64-128-gp', (1, 32, 32))
class Grey_32_64_128_gp(nn.Module):
def __init__(self, n_classes):
super(Grey_32_64_128_gp, self).__init__()
self.conv1_1 = nn.Conv2d(1, 32, (3, 3), padding=1)
self.conv1_1_bn = nn.BatchNorm2d(32)
self.conv1_2 = nn.Conv2d(32, 32, (3, 3), padding=1)
self.conv1_2_bn = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2_1 = nn.Conv2d(32, 64, (3, 3), padding=1)
self.conv2_1_bn = nn.BatchNorm2d(64)
self.conv2_2 = nn.Conv2d(64, 64, (3, 3), padding=1)
self.conv2_2_bn = nn.BatchNorm2d(64)
self.conv2_3 = nn.Conv2d(64, 64, (3, 3), padding=1)
self.conv2_3_bn = nn.BatchNorm2d(64)
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3_1 = nn.Conv2d(64, 128, (3, 3), padding=1)
self.conv3_1_bn = nn.BatchNorm2d(128)
self.conv3_2 = nn.Conv2d(128, 128, (3, 3), padding=1)
self.conv3_2_bn = nn.BatchNorm2d(128)
self.conv3_3 = nn.Conv2d(128, 128, (3, 3), padding=1)
self.conv3_3_bn = nn.BatchNorm2d(128)
self.pool3 = nn.MaxPool2d((2, 2))
self.drop1 = nn.Dropout()
self.fc4 = nn.Linear(128, 128)
self.fc5 = nn.Linear(128, n_classes)
def forward(self, x):
x = F.relu(self.conv1_1_bn(self.conv1_1(x)))
x = self.pool1(F.relu(self.conv1_2_bn(self.conv1_2(x))))
x = F.relu(self.conv2_1_bn(self.conv2_1(x)))
x = F.relu(self.conv2_2_bn(self.conv2_2(x)))
x = self.pool2(F.relu(self.conv2_3_bn(self.conv2_3(x))))
x = F.relu(self.conv3_1_bn(self.conv3_1(x)))
x = F.relu(self.conv3_2_bn(self.conv3_2(x)))
x = self.pool3(F.relu(self.conv3_3_bn(self.conv3_3(x))))
x = F.avg_pool2d(x, 4)
x = x.view(-1, 128)
x = self.drop1(x)
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
@architecture('grey-32-64-128-gp-wn', (1, 32, 32))
class Grey_32_64_128_gp_wn(nn.Module):
def __init__(self, n_classes):
super(Grey_32_64_128_gp_wn, self).__init__()
self.conv1_1 = nn.Conv2d(1, 32, (3, 3), padding=1)
self.conv1_2 = nn.Conv2d(32, 32, (3, 3), padding=1)
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2_1 = nn.Conv2d(32, 64, (3, 3), padding=1)
self.conv2_2 = nn.Conv2d(64, 64, (3, 3), padding=1)
self.conv2_3 = nn.Conv2d(64, 64, (3, 3), padding=1)
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3_1 = nn.Conv2d(64, 128, (3, 3), padding=1)
self.conv3_2 = nn.Conv2d(128, 128, (3, 3), padding=1)
self.conv3_3 = nn.Conv2d(128, 128, (3, 3), padding=1)
self.pool3 = nn.MaxPool2d((2, 2))
self.drop1 = nn.Dropout()
self.fc4 = nn.Linear(128, 128)
self.fc5 = nn.Linear(128, n_classes)
nninit.xavier_normal(self.conv1_1.weight)
nninit.xavier_normal(self.conv1_2.weight)
nninit.xavier_normal(self.conv2_1.weight)
nninit.xavier_normal(self.conv2_2.weight)
nninit.xavier_normal(self.conv2_3.weight)
nninit.xavier_normal(self.conv3_1.weight)
nninit.xavier_normal(self.conv3_2.weight)
nninit.xavier_normal(self.conv3_3.weight)
nninit.xavier_normal(self.fc4.weight)
nninit.xavier_normal(self.fc5.weight)
nn.utils.weight_norm(self.conv1_1, 'weight')
nn.utils.weight_norm(self.conv1_2, 'weight')
nn.utils.weight_norm(self.conv2_1, 'weight')
nn.utils.weight_norm(self.conv2_2, 'weight')
nn.utils.weight_norm(self.conv2_3, 'weight')
nn.utils.weight_norm(self.conv3_1, 'weight')
nn.utils.weight_norm(self.conv3_2, 'weight')
nn.utils.weight_norm(self.conv3_3, 'weight')
nn.utils.weight_norm(self.fc4, 'weight')
def forward(self, x):
x = F.relu(self.conv1_1(x))
x = self.pool1(F.relu(self.conv1_2(x)))
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = self.pool2(F.relu(self.conv2_3(x)))
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = self.pool3(F.relu(self.conv3_3(x)))
x = F.avg_pool2d(x, 4)
x = x.view(-1, 128)
x = self.drop1(x)
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
@architecture('grey-32-64-128-gp-nonorm', (1, 32, 32))
class Grey_32_64_128_gp_nonorm(nn.Module):
def __init__(self, n_classes):
super(Grey_32_64_128_gp_nonorm, self).__init__()
self.conv1_1 = nn.Conv2d(1, 32, (3, 3), padding=1)
self.conv1_2 = nn.Conv2d(32, 32, (3, 3), padding=1)
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2_1 = nn.Conv2d(32, 64, (3, 3), padding=1)
self.conv2_2 = nn.Conv2d(64, 64, (3, 3), padding=1)
self.conv2_3 = nn.Conv2d(64, 64, (3, 3), padding=1)
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3_1 = nn.Conv2d(64, 128, (3, 3), padding=1)
self.conv3_2 = nn.Conv2d(128, 128, (3, 3), padding=1)
self.conv3_3 = nn.Conv2d(128, 128, (3, 3), padding=1)
self.pool3 = nn.MaxPool2d((2, 2))
self.drop1 = nn.Dropout()
self.fc4 = nn.Linear(128, 128)
self.fc5 = nn.Linear(128, n_classes)
nninit.xavier_normal(self.conv1_1.weight)
nninit.xavier_normal(self.conv1_2.weight)
nninit.xavier_normal(self.conv2_1.weight)
nninit.xavier_normal(self.conv2_2.weight)
nninit.xavier_normal(self.conv2_3.weight)
nninit.xavier_normal(self.conv3_1.weight)
nninit.xavier_normal(self.conv3_2.weight)
nninit.xavier_normal(self.conv3_3.weight)
nninit.xavier_normal(self.fc4.weight)
nninit.xavier_normal(self.fc5.weight)
def forward(self, x):
x = F.relu(self.conv1_1(x))
x = self.pool1(F.relu(self.conv1_2(x)))
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = self.pool2(F.relu(self.conv2_3(x)))
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = self.pool3(F.relu(self.conv3_3(x)))
x = F.avg_pool2d(x, 4)
x = x.view(-1, 128)
x = self.drop1(x)
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
@architecture('rgb-48-96-192-gp', (3, 32, 32))
class RGB_48_96_192_gp(nn.Module):
def __init__(self, n_classes):
super(RGB_48_96_192_gp, self).__init__()
self.conv1_1 = nn.Conv2d(3, 48, (3, 3), padding=1)
self.conv1_1_bn = nn.BatchNorm2d(48)
self.conv1_2 = nn.Conv2d(48, 48, (3, 3), padding=1)
self.conv1_2_bn = nn.BatchNorm2d(48)
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2_1 = nn.Conv2d(48, 96, (3, 3), padding=1)
self.conv2_1_bn = nn.BatchNorm2d(96)
self.conv2_2 = nn.Conv2d(96, 96, (3, 3), padding=1)
self.conv2_2_bn = nn.BatchNorm2d(96)
self.conv2_3 = nn.Conv2d(96, 96, (3, 3), padding=1)
self.conv2_3_bn = nn.BatchNorm2d(96)
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3_1 = nn.Conv2d(96, 192, (3, 3), padding=1)
self.conv3_1_bn = nn.BatchNorm2d(192)
self.conv3_2 = nn.Conv2d(192, 192, (3, 3), padding=1)
self.conv3_2_bn = nn.BatchNorm2d(192)
self.conv3_3 = nn.Conv2d(192, 192, (3, 3), padding=1)
self.conv3_3_bn = nn.BatchNorm2d(192)
self.pool3 = nn.MaxPool2d((2, 2))
self.drop1 = nn.Dropout()
self.fc4 = nn.Linear(192, 192)
self.fc5 = nn.Linear(192, n_classes)
def forward(self, x):
x = F.relu(self.conv1_1_bn(self.conv1_1(x)))
x = self.pool1(F.relu(self.conv1_2_bn(self.conv1_2(x))))
x = F.relu(self.conv2_1_bn(self.conv2_1(x)))
x = F.relu(self.conv2_2_bn(self.conv2_2(x)))
x = self.pool2(F.relu(self.conv2_3_bn(self.conv2_3(x))))
x = F.relu(self.conv3_1_bn(self.conv3_1(x)))
x = F.relu(self.conv3_2_bn(self.conv3_2(x)))
x = self.pool3(F.relu(self.conv3_3_bn(self.conv3_3(x))))
x = F.avg_pool2d(x, 4)
x = x.view(-1, 192)
x = self.drop1(x)
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
@architecture('rgb-128-256-down-gp', (3, 32, 32))
class RGB_128_256_down_gp(nn.Module):
def __init__(self, n_classes):
super(RGB_128_256_down_gp, self).__init__()
self.conv1_1 = nn.Conv2d(3, 128, (3, 3), padding=1)
self.conv1_1_bn = nn.BatchNorm2d(128)
self.conv1_2 = nn.Conv2d(128, 128, (3, 3), padding=1)
self.conv1_2_bn = nn.BatchNorm2d(128)
self.conv1_3 = nn.Conv2d(128, 128, (3, 3), padding=1)
self.conv1_3_bn = nn.BatchNorm2d(128)
self.pool1 = nn.MaxPool2d((2, 2))
self.drop1 = nn.Dropout()
self.conv2_1 = nn.Conv2d(128, 256, (3, 3), padding=1)
self.conv2_1_bn = nn.BatchNorm2d(256)
self.conv2_2 = nn.Conv2d(256, 256, (3, 3), padding=1)
self.conv2_2_bn = nn.BatchNorm2d(256)
self.conv2_3 = nn.Conv2d(256, 256, (3, 3), padding=1)
self.conv2_3_bn = nn.BatchNorm2d(256)
self.pool2 = nn.MaxPool2d((2, 2))
self.drop2 = nn.Dropout()
self.conv3_1 = nn.Conv2d(256, 512, (3, 3), padding=0)
self.conv3_1_bn = nn.BatchNorm2d(512)
self.nin3_2 = nn.Conv2d(512, 256, (1, 1), padding=1)
self.nin3_2_bn = nn.BatchNorm2d(256)
self.nin3_3 = nn.Conv2d(256, 128, (1, 1), padding=1)
self.nin3_3_bn = nn.BatchNorm2d(128)
self.fc4 = nn.Linear(128, n_classes)
def forward(self, x):
x = F.relu(self.conv1_1_bn(self.conv1_1(x)))
x = F.relu(self.conv1_2_bn(self.conv1_2(x)))
x = self.pool1(F.relu(self.conv1_3_bn(self.conv1_3(x))))
x = self.drop1(x)
x = F.relu(self.conv2_1_bn(self.conv2_1(x)))
x = F.relu(self.conv2_2_bn(self.conv2_2(x)))
x = self.pool2(F.relu(self.conv2_3_bn(self.conv2_3(x))))
x = self.drop2(x)
x = F.relu(self.conv3_1_bn(self.conv3_1(x)))
x = F.relu(self.nin3_2_bn(self.nin3_2(x)))
x = F.relu(self.nin3_3_bn(self.nin3_3(x)))
x = F.avg_pool2d(x, 6)
x = x.view(-1, 128)
x = self.fc4(x)
return x
@architecture('rgb40-48-96-192-384-gp', (3, 40, 40))
class RGB40_48_96_192_384_gp(nn.Module):
def __init__(self, n_classes):
super(RGB40_48_96_192_384_gp, self).__init__()
self.conv1_1 = nn.Conv2d(3, 48, (3, 3), padding=1)
self.conv1_1_bn = nn.BatchNorm2d(48)
self.conv1_2 = nn.Conv2d(48, 48, (3, 3), padding=1)
self.conv1_2_bn = nn.BatchNorm2d(48)
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2_1 = nn.Conv2d(48, 96, (3, 3), padding=1)
self.conv2_1_bn = nn.BatchNorm2d(96)
self.conv2_2 = nn.Conv2d(96, 96, (3, 3), padding=1)
self.conv2_2_bn = nn.BatchNorm2d(96)
self.conv2_3 = nn.Conv2d(96, 96, (3, 3), padding=1)
self.conv2_3_bn = nn.BatchNorm2d(96)
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3_1 = nn.Conv2d(96, 192, (3, 3), padding=1)
self.conv3_1_bn = nn.BatchNorm2d(192)
self.conv3_2 = nn.Conv2d(192, 192, (3, 3), padding=1)
self.conv3_2_bn = nn.BatchNorm2d(192)
self.conv3_3 = nn.Conv2d(192, 192, (3, 3), padding=1)
self.conv3_3_bn = nn.BatchNorm2d(192)
self.pool3 = nn.MaxPool2d((2, 2))
self.conv4_1 = nn.Conv2d(192, 384, (3, 3), padding=1)
self.conv4_1_bn = nn.BatchNorm2d(384)
self.conv4_2 = nn.Conv2d(384, 384, (3, 3))
self.conv4_2_bn = nn.BatchNorm2d(384)
self.drop1 = nn.Dropout()
self.fc5 = nn.Linear(384, 384)
self.fc6 = nn.Linear(384, n_classes)
def forward(self, x):
x = | |
#!/usr/bin/env python
"""
Represent connectivity pattern using pandas DataFrame.
"""
from collections import OrderedDict
import itertools
import re
from future.utils import iteritems
from past.builtins import basestring
import networkx as nx
import numpy as np
import pandas as pd
from .plsel import Selector, SelectorMethods
from .pm import BasePortMapper
class Interface(object):
"""
Container for set of interface comprising ports.
This class contains information about a set of interfaces comprising
path-like identifiers and the attributes associated with them.
By default, each port must have at least the following attributes;
other attributes may be added:
- interface - indicates which interface a port is associated with.
- io - indicates whether the port receives input ('in') or
emits output ('out').
- type - indicates whether the port emits/receives spikes or
graded potentials.
All port identifiers in an interface must be unique. For two interfaces
to be deemed compatible, they must contain the same port identifiers and
their identifiers' 'io' attributes must be the inverse of each other
(i.e., every 'in' port in one interface must be mirrored by an 'out' port
in the other interface.
Examples
--------
>>> i = Interface('/foo[0:4],/bar[0:3]')
>>> i['/foo[0:2]', 'interface', 'io', 'type'] = [0, 'in', 'spike']
>>> i['/foo[2:4]', 'interface', 'io', 'type'] = [1, 'out', 'spike']
Attributes
----------
data : pandas.DataFrame
Port attribute data.
index : pandas.MultiIndex
Index of port identifiers.
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., 'foo[0:2]') or sequence of token
sequences (e.g., [['foo', (0, 2)]]) describing the port
identifiers comprised by the interface.
columns : list, default = ['interface', 'io', 'type']
Data column names.
See Also
--------
plsel.SelectorMethods
"""
def __init__(self, selector='', columns=['interface', 'io', 'type']):
# All ports in an interface must contain at least the following
# attributes:
assert set(columns).issuperset(['interface', 'io', 'type'])
self.sel = SelectorMethods()
assert not(self.sel.is_ambiguous(selector))
self.num_levels = self.sel.max_levels(selector)
names = [i for i in range(self.num_levels)]
idx = self.sel.make_index(selector, names)
self.__validate_index__(idx)
self.data = pd.DataFrame(index=idx, columns=columns, dtype=object)
# Dictionary containing mappers for different port types:
self.pm = {}
def __validate_index__(self, idx):
"""
Raise an exception if the specified index will result in an invalid interface.
"""
if idx.duplicated().any():
raise ValueError('Duplicate interface index entries detected.')
def __getitem__(self, key):
if type(key) == tuple and len(key) > 1:
return self.sel.select(self.data[list(key[1:])], key[0])
else:
return self.sel.select(self.data, key)
def __setitem__ambiguous__(self, key, value):
if type(key) == tuple:
selector = key[0]
else:
selector = key
# Ensure that the specified selector can actually be used against the
# Interface's internal DataFrame:
try:
idx = self.sel.get_index(self.data, selector,
names=self.data.index.names)
except ValueError:
raise ValueError('cannot create index with '
'selector %s and column names %s' \
% (selector, str(self.data.index.names)))
# If the data specified is not a dict, convert it to a dict:
if type(key) == tuple and len(key) > 1:
if np.isscalar(value):
data = {k:value for k in key[1:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[1:]):
data={k:v for k, v in zip(key[1:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
for k, v in iteritems(data):
self.data[k].loc[idx] = v
def __setitem__(self, key, value):
if type(key) == tuple:
selector = key[0]
else:
selector = key
# Fall back to slower method if the selector is ambiguous:
if self.sel.is_ambiguous(selector):
self.__setitem__ambiguous__(key, value)
return
else:
selector = Selector(selector)
# Don't waste time trying to do anything if the selector is empty:
if not selector.nonempty:
return
# If the number of specified identifiers doesn't exceed the size of the
# data array, enlargement by specifying identifiers that are not in
# the index will not occur:
assert len(selector) <= len(self.data)
# If the data specified is not a dict, convert it to a dict:
if type(key) == tuple and len(key) > 1:
if np.isscalar(value):
data = {k:value for k in key[1:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[1:]):
data={k:v for k, v in zip(key[1:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
if selector.max_levels == 1:
s = [i for i in itertools.chain(*selector.expanded)]
else:
s = self.sel.pad_selector(selector.expanded,
len(self.index.levshape))
for k, v in iteritems(data):
self.data[k].loc[s] = v
@property
def index(self):
"""
Interface index.
"""
return self.data.index
@index.setter
def index(self, i):
self.data.index = i
@property
def interface_ids(self):
"""
Interface identifiers.
"""
return set(self.data['interface'])
@property
def io_inv(self):
"""
Returns new Interface instance with inverse input-output attributes.
Returns
-------
i : Interface
Interface instance whose 'io' attributes are the inverse of those of
the current instance.
"""
data_inv = self.data.copy()
f = lambda x: 'out' if x == 'in' else \
('in' if x == 'out' else x)
data_inv['io'] = data_inv['io'].apply(f)
return self.from_df(data_inv)
@property
def idx_levels(self):
"""
Number of levels in Interface index.
"""
if isinstance(self.data.index, pd.MultiIndex):
return len(self.index.levels)
else:
return 1
def clear(self):
"""
Clear all ports in class instance.
"""
self.data.drop(self.data.index, inplace=True)
def data_select(self, f, inplace=False):
"""
Restrict Interface data with a selection function.
Returns an Interface instance containing only those rows
whose data is passed by the specified selection function.
Parameters
----------
f : function
Selection function with a single dict argument whose keys
are the Interface's data column names.
inplace : bool, default=False
If True, update and return the given Interface instance.
Otherwise, return a new instance.
Returns
-------
i : Interface
Interface instance containing data selected by `f`.
"""
assert callable(f)
result = self.data[f(self.data)]
if inplace:
self.data = result
return self
else:
return Interface.from_df(result)
@classmethod
def from_df(cls, df):
"""
Create an Interface from a properly formatted DataFrame.
Examples
--------
>>> import plsel, pattern
>>> import pandas
>>> idx = plsel.SelectorMethods.make_index('/foo[0:2]')
>>> data = [[0, 'in', 'spike'], [1, 'out', 'gpot']]
>>> columns = ['interface', 'io', 'type']
>>> df = pandas.DataFrame(data, index=idx, columns=columns)
>>> i = pattern.Interface.from_df(df)
Parameters
----------
df : pandas.DataFrame
DataFrame with a MultiIndex and data columns 'interface',
'io', and 'type' (additional columns may also be present).
Returns
-------
i : Interface
Generated Interface instance.
Notes
-----
The contents of the specified DataFrame instance are copied into the
new Interface instance.
"""
assert set(df.columns).issuperset(['interface', 'io', 'type'])
if isinstance(df.index, pd.MultiIndex):
if len(df.index):
i = cls(df.index.tolist(), df.columns)
else:
i = cls([()], df.columns)
elif isinstance(df.index, pd.Index):
if len(df.index):
i = cls([(s,) for s in df.index.tolist()], df.columns)
else:
i = cls([()], df.columns)
else:
raise ValueError('invalid index type')
i.data = df.copy()
i.__validate_index__(i.index)
return i
@classmethod
def from_csv(cls, file_name, **kwargs):
"""
Create an Interface from a properly formatted CSV file.
Parameters
----------
file_name : str
File name of CSV file containing interface data.
kwargs : dict
Options to pass to `DataFrame.from_csv()`
Returns
-------
i : Interface
Generated Interface instance.
"""
df = pd.DataFrame.from_csv(file_name, **kwargs)
return cls.from_df(df)
@classmethod
def from_dict(cls, d):
"""
Create an Interface from a dictionary of selectors and data values.
Examples
--------
>>> d = {'/foo[0]': [0, 'in', 'gpot'], '/foo[1]': [1, 'in', 'gpot']}
>>> i = Interface.from_dict(d)
Parameters
----------
d : dict
Dictionary that maps selectors to the data that should be associated
with the corresponding ports. If a scalar, the data is assigned to
the first attribute; if an iterable, the data is assigned to the
attributes in order.
Returns
-------
i : Interface
Generated interface instance.
"""
i = cls(','.join(d.keys()))
for k, v in iteritems(d):
i[k] = v
i.data.sort_index(inplace=True)
return i
@classmethod
def from_graph(cls, g):
"""
Create an Interface from a NetworkX graph.
Examples
--------
>>> import networkx as nx
>>> g = nx.Graph()
>>> g.add_node('/foo[0]', interface=0, io='in', type='gpot')
>>> g.add_node('/foo[1]', interface=0, io='in', type='gpot')
>>> i = Interface.from_graph(g)
Parameters
----------
g : networkx.Graph
Graph whose node IDs are path-like port identifiers. The node attributes
are assigned to the ports.
Returns
-------
i : Interface
Generated interface instance.
"""
assert | |
1,
f"Pearson:{pearson:0.3f} r^2:{r2:0.3f} R^2:{big_r2:0.3f}",
verticalalignment="bottom",
transform=axes[row, col].transAxes,
)
row += 1
if row == rows:
row = 0
col += 1
if col >= cols:
break
figure_path = os.path.join(plot_path, f"scatters_together_{data_split}{image_ext}")
if not os.path.exists(os.path.dirname(figure_path)):
os.makedirs(os.path.dirname(figure_path))
plt.savefig(figure_path)
plt.close()
logging.info(f"{data_split} split: saved scatters together at: {figure_path}")
def _plot_ecg_text(
data: Dict[str, Union[np.ndarray, str, Dict]],
fig: plt.Figure,
w: float,
h: float,
) -> None:
# top text
dt = datetime.strptime(data["datetime"], ECG_DATETIME_FORMAT)
dob = data["dob"]
if dob != "":
dob = datetime.strptime(dob, ECG_DATE_FORMAT)
dob = f"{dob:%d-%b-%Y}".upper()
age = -1
if not np.isnan(data["age"]):
age = int(data["age"])
fig.text(
0.17 / w,
8.04 / h,
f"{data['lastname']}, {data['firstname']}",
weight="bold",
)
fig.text(3.05 / w, 8.04 / h, f"ID:{data['patientid']}", weight="bold")
fig.text(4.56 / w, 8.04 / h, f"{dt:%d-%b-%Y %H:%M:%S}".upper(), weight="bold")
fig.text(6.05 / w, 8.04 / h, f"{data['sitename']}", weight="bold")
fig.text(0.17 / w, 7.77 / h, f"{dob} ({age} yr)", weight="bold")
fig.text(0.17 / w, 7.63 / h, f"{data['sex']}".title(), weight="bold")
fig.text(0.17 / w, 7.35 / h, "Room: ", weight="bold")
fig.text(0.17 / w, 7.21 / h, f"Loc: {data['location']}", weight="bold")
fig.text(2.15 / w, 7.77 / h, "Vent. rate", weight="bold")
fig.text(2.15 / w, 7.63 / h, "PR interval", weight="bold")
fig.text(2.15 / w, 7.49 / h, "QRS duration", weight="bold")
fig.text(2.15 / w, 7.35 / h, "QT/QTc", weight="bold")
fig.text(2.15 / w, 7.21 / h, "P-R-T axes", weight="bold")
fig.text(3.91 / w, 7.77 / h, f"{int(data['rate_md'])}", weight="bold", ha="right")
fig.text(3.91 / w, 7.63 / h, f"{int(data['pr_md'])}", weight="bold", ha="right")
fig.text(3.91 / w, 7.49 / h, f"{int(data['qrs_md'])}", weight="bold", ha="right")
fig.text(
3.91 / w,
7.35 / h,
f"{int(data['qt_md'])}/{int(data['qtc_md'])}",
weight="bold",
ha="right",
)
fig.text(
3.91 / w,
7.21 / h,
f"{int(data['paxis_md'])} {int(data['raxis_md'])}",
weight="bold",
ha="right",
)
fig.text(4.30 / w, 7.77 / h, "BPM", weight="bold", ha="right")
fig.text(4.30 / w, 7.63 / h, "ms", weight="bold", ha="right")
fig.text(4.30 / w, 7.49 / h, "ms", weight="bold", ha="right")
fig.text(4.30 / w, 7.35 / h, "ms", weight="bold", ha="right")
fig.text(4.30 / w, 7.21 / h, f"{int(data['taxis_md'])}", weight="bold", ha="right")
fig.text(4.75 / w, 7.21 / h, f"{data['read_md']}", wrap=True, weight="bold")
fig.text(1.28 / w, 6.65 / h, f"Technician: {''}", weight="bold")
fig.text(1.28 / w, 6.51 / h, f"Test ind: {''}", weight="bold")
fig.text(4.75 / w, 6.25 / h, f"Referred by: {''}", weight="bold")
fig.text(7.63 / w, 6.25 / h, f"Electronically Signed By: {''}", weight="bold")
def _plot_ecg_full(voltage: Dict[str, np.ndarray], ax: plt.Axes) -> None:
full_voltage = np.full((12, 2500), np.nan)
for i, lead in enumerate(voltage):
full_voltage[i] = voltage[lead]
# convert voltage to millivolts
full_voltage /= 1000
# calculate space between leads
min_y, max_y = ax.get_ylim()
y_offset = (max_y - min_y) / len(voltage)
text_xoffset = 5
text_yoffset = -0.01
# plot signal and add labels
for i, lead in enumerate(voltage):
this_offset = (len(voltage) - i - 0.5) * y_offset
ax.plot(full_voltage[i] + this_offset, color="black", linewidth=0.375)
ax.text(
0 + text_xoffset,
this_offset + text_yoffset,
lead,
ha="left",
va="top",
weight="bold",
)
def _plot_ecg_clinical(voltage: Dict[str, np.ndarray], ax: plt.Axes) -> None:
# get voltage in clinical chunks
clinical_voltage = np.full((6, 2500), np.nan)
halfgap = 5
clinical_voltage[0][0 : 625 - halfgap] = voltage["I"][0 : 625 - halfgap]
clinical_voltage[0][625 + halfgap : 1250 - halfgap] = voltage["aVR"][
625 + halfgap : 1250 - halfgap
]
clinical_voltage[0][1250 + halfgap : 1875 - halfgap] = voltage["V1"][
1250 + halfgap : 1875 - halfgap
]
clinical_voltage[0][1875 + halfgap : 2500] = voltage["V4"][1875 + halfgap : 2500]
clinical_voltage[1][0 : 625 - halfgap] = voltage["II"][0 : 625 - halfgap]
clinical_voltage[1][625 + halfgap : 1250 - halfgap] = voltage["aVL"][
625 + halfgap : 1250 - halfgap
]
clinical_voltage[1][1250 + halfgap : 1875 - halfgap] = voltage["V2"][
1250 + halfgap : 1875 - halfgap
]
clinical_voltage[1][1875 + halfgap : 2500] = voltage["V5"][1875 + halfgap : 2500]
clinical_voltage[2][0 : 625 - halfgap] = voltage["III"][0 : 625 - halfgap]
clinical_voltage[2][625 + halfgap : 1250 - halfgap] = voltage["aVF"][
625 + halfgap : 1250 - halfgap
]
clinical_voltage[2][1250 + halfgap : 1875 - halfgap] = voltage["V3"][
1250 + halfgap : 1875 - halfgap
]
clinical_voltage[2][1875 + halfgap : 2500] = voltage["V6"][1875 + halfgap : 2500]
clinical_voltage[3] = voltage["V1"]
clinical_voltage[4] = voltage["II"]
clinical_voltage[5] = voltage["V5"]
voltage = clinical_voltage
# convert voltage to millivolts
voltage /= 1000
# calculate space between leads
min_y, max_y = ax.get_ylim()
y_offset = (max_y - min_y) / len(voltage)
text_xoffset = 5
text_yoffset = -0.1
# plot signal and add labels
for i, _ in enumerate(voltage):
this_offset = (len(voltage) - i - 0.5) * y_offset
ax.plot(voltage[i] + this_offset, color="black", linewidth=0.375)
if i == 0:
ax.text(
0 + text_xoffset,
this_offset + text_yoffset,
"I",
ha="left",
va="top",
weight="bold",
)
ax.text(
625 + text_xoffset,
this_offset + text_yoffset,
"aVR",
ha="left",
va="top",
weight="bold",
)
ax.text(
1250 + text_xoffset,
this_offset + text_yoffset,
"V1",
ha="left",
va="top",
weight="bold",
)
ax.text(
1875 + text_xoffset,
this_offset + text_yoffset,
"V4",
ha="left",
va="top",
weight="bold",
)
elif i == 1:
ax.text(
0 + text_xoffset,
this_offset + text_yoffset,
"II",
ha="left",
va="top",
weight="bold",
)
ax.text(
625 + text_xoffset,
this_offset + text_yoffset,
"aVL",
ha="left",
va="top",
weight="bold",
)
ax.text(
1250 + text_xoffset,
this_offset + text_yoffset,
"V2",
ha="left",
va="top",
weight="bold",
)
ax.text(
1875 + text_xoffset,
this_offset + text_yoffset,
"V5",
ha="left",
va="top",
weight="bold",
)
elif i == 2:
ax.text(
0 + text_xoffset,
this_offset + text_yoffset,
"III",
ha="left",
va="top",
weight="bold",
)
ax.text(
625 + text_xoffset,
this_offset + text_yoffset,
"aVF",
ha="left",
va="top",
weight="bold",
)
ax.text(
1250 + text_xoffset,
this_offset + text_yoffset,
"V3",
ha="left",
va="top",
weight="bold",
)
ax.text(
1875 + text_xoffset,
this_offset + text_yoffset,
"V6",
ha="left",
va="top",
weight="bold",
)
elif i == 3:
ax.text(
0 + text_xoffset,
this_offset + text_yoffset,
"V1",
ha="left",
va="top",
weight="bold",
)
elif i == 4:
ax.text(
0 + text_xoffset,
this_offset + text_yoffset,
"II",
ha="left",
va="top",
weight="bold",
)
elif i == 5:
ax.text(
0 + text_xoffset,
this_offset + text_yoffset,
"V5",
ha="left",
va="top",
weight="bold",
)
def _plot_ecg_figure(
patient_id: int,
data: Dict[str, Union[np.ndarray, str, float]],
plot_signal_function: Callable[[Dict[str, np.ndarray], plt.Axes], None],
plot_mode: str,
output_folder: str,
image_ext: str,
) -> str:
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["font.size"] = 9.5
w, h = 11, 8.5
fig = plt.figure(figsize=(w, h), dpi=100)
# patient info and ecg text
_plot_ecg_text(data, fig, w, h)
# define plot area in inches
left = 0.17
bottom = h - 7.85
width = w - 2 * left
height = h - bottom - 2.3
# ecg plot area
ax = fig.add_axes([left / w, bottom / h, width / w, height / h])
# voltage is in microvolts
# the entire plot area is 5.55 inches tall, 10.66 inches wide (141 mm, 271 mm)
# the resolution on the y-axis is 10 mm/mV
# the resolution on the x-axis is 25 mm/s
inch2mm = lambda inches: inches * 25.4
# 1. set y-limit to max 14.1 mV
y_res = 10 # mm/mV
max_y = inch2mm(height) / y_res
min_y = 0
ax.set_ylim(min_y, max_y)
# 2. set x-limit to max 10.8 s, center 10 s leads
sampling_frequency = 250 # Hz
x_res = 25 # mm/s
max_x = inch2mm(width) / x_res
x_buffer = (max_x - 10) / 2
max_x -= x_buffer
min_x = -x_buffer
max_x *= sampling_frequency
min_x *= sampling_frequency
ax.set_xlim(min_x, max_x)
# 3. set ticks for every 0.1 mV or every 1/25 s
y_tick = 1 / y_res
x_tick = 1 / x_res * sampling_frequency
x_major_ticks = np.arange(min_x, max_x, x_tick * 5)
x_minor_ticks = np.arange(min_x, max_x, x_tick)
y_major_ticks = np.arange(min_y, max_y, y_tick * 5)
y_minor_ticks = np.arange(min_y, max_y, y_tick)
ax.set_xticks(x_major_ticks)
ax.set_xticks(x_minor_ticks, minor=True)
ax.set_yticks(y_major_ticks)
ax.set_yticks(y_minor_ticks, minor=True)
ax.tick_params(
which="both",
left=False,
bottom=False,
labelleft=False,
labelbottom=False,
)
ax.grid(b=True, color="r", which="major", lw=0.5)
ax.grid(b=True, color="r", which="minor", lw=0.2)
# signal plot
voltage = data["2500"]
plot_signal_function(voltage, ax)
# bottom text
fig.text(
0.17 / w,
0.46 / h,
f"{x_res}mm/s {y_res}mm/mV {sampling_frequency}Hz",
ha="left",
va="center",
weight="bold",
)
# save both pdf and image
title = re.sub(r"[:/. ]", "", f'{patient_id}_{data["datetime"]}_{plot_mode}')
fpath = os.path.join(output_folder, f"{title}{image_ext}")
plt.savefig(fpath)
plt.close(fig)
return fpath
def plot_ecg(args):
plot_tensors = [
"ecg_patientid",
"ecg_firstname",
"ecg_lastname",
"ecg_sex",
"ecg_dob",
"ecg_age",
"ecg_datetime",
"ecg_sitename",
"ecg_location",
"ecg_read_md",
"ecg_taxis_md",
"ecg_rate_md",
"ecg_pr_md",
"ecg_qrs_md",
"ecg_qt_md",
"ecg_paxis_md",
"ecg_raxis_md",
"ecg_qtc_md",
]
voltage_tensor = "12_lead_ecg_2500"
needed_tensors = plot_tensors + [voltage_tensor]
tmaps = {}
for needed_tensor in needed_tensors:
tmaps = update_tmaps(needed_tensor, tmaps)
tensor_maps_in = [tmaps[it] for it in needed_tensors]
if args.plot_mode == "clinical":
plot_signal_function = _plot_ecg_clinical
elif args.plot_mode == "full":
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import shutil
import queue
import threading
import subprocess
import datetime
import time
import codecs
#print(os.path.dirname(__file__))
#print(os.path.basename(__file__))
#print(sys.version_info)
qApiCV = 'google'
qApiOCR = qApiCV
qApiTrn = qApiCV
qLangCV = 'ja'
qLangOCR = qLangCV
qLangTrn = 'en'
qPathDetect= 'temp/v3_1detect/'
qPathPhoto = 'temp/v3_2photo/'
qPathCv = 'temp/v3_3cv_txt/'
qPathRec = 'temp/v3_8recorder/'
qPathWork = 'temp/v3_9work/'
qPathTTS = 'temp/a3_5tts_txt/'
qCtrlResultCV = 'temp/temp_resultCV.txt'
qCtrlResultOCR = 'temp/temp_resultOCR.txt'
qCtrlResultTrn = 'temp/temp_resultTrn.txt'
# google 画像認識、OCR認識
import vision_api_google as google_api
import vision_api_google_key as google_key
# google 機械翻訳
import speech_api_google as a_google_api
import speech_api_google_key as a_google_key
# azure 画像認識、OCR認識
import vision_api_azure as azure_api
import vision_api_azure_key as azure_key
# azure 翻訳機能
import speech_api_azure as a_azure_api
import speech_api_azure_key as a_azure_key
def qVisionCV(useApi='google', inpLang='ja', inpFile='vision__cv_photo.jpg', tmpFile='temp_cv_photo.jpg', apiRecovery=False,):
resText = ''
resApi = ''
resAry = None
resLM = ''
api = useApi
if (api != 'free') and (api != 'google') \
and (api != 'azure'):
api = 'google'
if (resText == '') and (api == 'azure'):
azureAPI = azure_api.VisionAPI()
res = azureAPI.authenticate('cv' ,
azure_key.getkey('cv' ,'url'),
azure_key.getkey('cv' ,'key'), )
if (res == True):
res = azureAPI.convert(inpImage=inpFile, outImage=tmpFile, bw=False, )
if (res == True):
res, api = azureAPI.cv(inpImage=tmpFile, inpLang=inpLang, )
if (not res is None):
#print(res)
if (res['captions'] != ''):
resText = res['captions']
resLM = resText
else:
resText = res['categories']
resApi = api
resAry = []
if (res['captions'] != ''):
resAry.append('[captions] ' + inpLang + ' (' + api + ')')
resAry.append(' ' + res['captions'])
resAry.append('')
if (res['categories'] != ''):
resAry.append('[categories] ' + inpLang + ' (' + api + ')')
resAry.append(' ' + res['categories'])
resAry.append('')
if (res['description'] != ''):
resAry.append('[description] ' + inpLang + ' (' + api + ')')
resAry.append(' ' + res['description'])
resAry.append('')
if (resText == '') and (apiRecovery == True):
api = 'free'
if (resText == '') and ((api == 'free') or (api == 'google')):
googleAPI = google_api.VisionAPI()
res = googleAPI.authenticate('cv' ,
google_key.getkey('cv' ,'url'),
google_key.getkey('cv' ,'key'), )
if (res == True):
res = googleAPI.convert(inpImage=inpFile, outImage=tmpFile, bw=False, )
if (res == True):
res, api = googleAPI.cv(inpImage=tmpFile, inpLang=inpLang, )
if (not res is None):
#print(res)
if (res['landmark'] != ''):
resText = res['landmark']
resLM = resText
else:
resText = res['label']
resApi = api
resAry = []
if (res['landmark'] != ''):
resAry.append('[landmark] ' + inpLang + ' (' + api + ')')
resAry.append(' ' + res['landmark'])
resAry.append('')
if (res['label'] != ''):
resAry.append('[label] ' + inpLang + ' (' + api + ')')
resAry.append(' ' + res['label'])
resAry.append('')
if (resText != ''):
return resText, resApi, resAry, resLM
return '', '', None, ''
def qVisionOCR(useApi='google', inpLang='ja', inpFile='vision__ocr_photo.jpg', tmpFile='temp_ocr_photo.jpg', apiRecovery=False,):
resText = ''
resApi = ''
resAry = None
api = useApi
if (api != 'free') and (api != 'google') \
and (api != 'azure'):
api = 'google'
if (resText == '') and (api == 'azure'):
azureAPI = azure_api.VisionAPI()
res = azureAPI.authenticate('ocr' ,
azure_key.getkey('ocr' ,'url'),
azure_key.getkey('ocr' ,'key'), )
if (res == True):
res = azureAPI.convert(inpImage=inpFile, outImage=tmpFile, bw=True, )
if (res == True):
res, api = azureAPI.ocr(inpImage=tmpFile, inpLang=inpLang, )
if (not res is None):
#print(res)
resText = ''
resApi = api
resAry = []
if (len(res) > 0):
resAry.append('[OCR] ' + inpLang + ' (' + api + ')')
for text in res:
resAry.append(' ' + text)
resText += ' ' + text
resText = str(resText).strip()
if (resText == '') and ((api == 'free') or (api == 'google')):
googleAPI = google_api.VisionAPI()
res = googleAPI.authenticate('ocr' ,
google_key.getkey('ocr' ,'url'),
google_key.getkey('ocr' ,'key'), )
if (res == True):
res = googleAPI.convert(inpImage=inpFile, outImage=tmpFile, bw=True, )
if (res == True):
res, api = googleAPI.ocr(inpImage=tmpFile, inpLang=inpLang, )
if (not res is None):
#print(res)
resText = ''
resApi = api
resAry = []
if (len(res) > 0):
resAry.append('[OCR] ' + inpLang + ' (' + api + ')')
for text in res:
resAry.append(' ' + text)
resText += ' ' + text
resText = str(resText).strip()
if (resText != ''):
return resText, resApi, resAry
return '', '', None
def qOCR2Trn(useApi='google', inpLang='ja', inpAry=['Hallo'], trnLang='en', apiRecovery=False,):
resText = ''
resApi = ''
resAry = None
api = useApi
if (api != 'free') and (api != 'google') \
and (api != 'azure'):
api = 'google'
if (resText == '') and (api == 'azure'):
a_azureAPI = a_azure_api.SpeechAPI()
ver, key = a_azure_key.getkey('tra')
res = a_azureAPI.authenticate('tra', ver, key, )
if (a_res == True):
resAry = []
resAry.append('[Translate] ' + trnLang + ' (' + api + ')')
l = 0
for text in inpAry:
l+=1
if ( l>1 ):
outText, api = a_azureAPI.translate(inpText=text, inpLang=inpLang, outLang=trnLang, )
if (outText != ''):
text = outText
resApi = api
resAry.append(text)
resText += str(text) + ','
if (resText == '') and ((api == 'free') or (api == 'google')):
a_googleAPI = a_google_api.SpeechAPI()
a_res = a_googleAPI.authenticate('tra', a_google_key.getkey('tra'), )
if (a_res == True):
resAry = []
resAry.append('[Translate] ' + trnLang + ' (' + api + ')')
l = 0
for text in inpAry:
l+=1
if ( l>1 ):
outText, api = a_googleAPI.translate(inpText=text, inpLang=inpLang, outLang=trnLang, )
if (outText != ''):
text = outText
resApi = api
resAry.append(text)
resText += str(text) + ','
if (resText != ''):
return resText, resApi, resAry
return '', '', None
def qMakeDirs(pPath, pRemove=False):
#try:
if (len(pPath) > 0):
path=pPath.replace('\\', '/')
if (path[-1:] != '/'):
path += '/'
if (not os.path.isdir(path[:-1])):
os.makedirs(path[:-1])
else:
if (pRemove == True):
files = glob.glob(path + '*')
for f in files:
try:
os.remove(f)
except:
pass
#except:
#pass
qLogNow=datetime.datetime.now()
qLogFlie = 'temp/_log/' + qLogNow.strftime('%Y%m%d-%H%M%S') + '_' + os.path.basename(__file__) + '.log'
def qLogOutput(pLogText='', pDisplay=False, pOutfile=True):
#try:
if (pDisplay == True):
print(str(pLogText))
if (pOutfile == True):
w = codecs.open(qLogFlie, 'a', 'utf-8')
w.write(str(pLogText) + '\n')
w.close()
w = None
#except:
#pass
if (__name__ == '__main__'):
qMakeDirs('temp/_log/', False)
qLogOutput('___main___:init')
qLogOutput('___main___:exsample.py runMode, api..., lang..., etc..., ')
#runMode debug, ...
#api google, azure,
#lang ja, en, fr, kr...
runMode = 'debug'
camDev = '0'
procId = '00'
fileId = 'temp_sample'
inpCV = 'vision__cv_photo.jpg'
tmpCV = 'temp_cv_photo.jpg'
outCV = 'temp_cv_ja.txt'
inpOCR = 'vision__ocr_photo.jpg'
tmpOCR = 'temp_ocr_photo.jpg'
outOCR = 'temp_ocr_ja.txt'
outTrn = 'temp_ocr_en.txt'
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
if (len(sys.argv) >= 3):
camDev = str(sys.argv[2]).lower()
if (len(sys.argv) >= 4):
qApiCV = str(sys.argv[3]).lower()
qApiOCR = qApiCV
qApiTrn = qApiCV
if (len(sys.argv) >= 5):
qApiOCR = str(sys.argv[4]).lower()
if (len(sys.argv) >= 6):
qApiTrn = str(sys.argv[5]).lower()
if (len(sys.argv) >= 7):
qLangCV = str(sys.argv[6]).lower()
qLangOCR = qLangCV
if (len(sys.argv) >= 8):
qLangOCR = str(sys.argv[7]).lower()
if (len(sys.argv) >= 9):
qLangTrn = str(sys.argv[8]).lower()
if (len(sys.argv) >= 10):
procId = sys.argv[9]
if (len(sys.argv) >= 11):
fileId = sys.argv[10]
if (len(sys.argv) >= 12):
inpCV = sys.argv[11]
if (len(sys.argv) >= 13):
tmpCV = sys.argv[12]
if (len(sys.argv) >= 14):
outCV = sys.argv[13]
if (len(sys.argv) >= 15):
inpOCR = sys.argv[14]
if (len(sys.argv) >= 16):
tmpOCR = sys.argv[15]
if (len(sys.argv) >= 17):
outOCR = sys.argv[16]
if (len(sys.argv) >= 18):
outTrn = sys.argv[17]
qLogOutput('')
qLogOutput('___main___:runMode =' + str(runMode ))
qLogOutput('___main___:camDev =' + str(camDev ))
qLogOutput('___main___:qApiCV =' + str(qApiCV ))
qLogOutput('___main___:qApiOCR =' + str(qApiOCR ))
qLogOutput('___main___:qApiTrn =' + str(qApiTrn ))
qLogOutput('___main___:qLangCV =' + str(qLangCV ))
qLogOutput('___main___:qLangOCR =' + str(qLangOCR ))
qLogOutput('___main___:qLangTrn =' + str(qLangTrn ))
qLogOutput('___main___:procId =' + str(procId ))
qLogOutput('___main___:fileId =' + str(fileId ))
qLogOutput('___main___:inpCV =' + str(inpCV ))
qLogOutput('___main___:tmpCV =' + str(tmpCV ))
qLogOutput('___main___:outCV =' + str(outCV ))
qLogOutput('___main___:inpOCR =' + str(inpOCR ))
qLogOutput('___main___:tmpOCR =' + str(tmpOCR ))
qLogOutput('___main___:outOCR =' + str(outOCR ))
qLogOutput('___main___:outTrn =' + str(outTrn ))
qLogOutput('')
qLogOutput('___main___:start')
if (inpCV != ''):
res,api,ary,landmark = qVisionCV(qApiCV, qLangCV, inpCV, tmpCV)
if (api == qApiCV) or (api == 'free' and qApiCV == 'google'):
qLogOutput(' ' + procId + ' Vision CV [' + res + '] ' + qLangCV + ' (' + api + ')', True)
else:
if (api != ''):
qLogOutput(' ' + procId + ' Vision CV [' + res + '] ' + qLangCV + ' (!' + api + ')', True)
else:
qLogOutput(' ' + procId + ' Vision CV [' + res + '] ' + qLangCV + ' (!' + qApiCV + ')', True)
if (res != ''):
if (outCV != ''):
try:
w = codecs.open(outCV, 'w', 'utf-8')
for text in ary:
w.write(str(text) + '\n')
#qLogOutput(str(text), True)
w.close()
w = None
except:
pass
if (qCtrlResultCV != ''):
try:
w = codecs.open(qCtrlResultCV, 'w', 'utf-8')
for text in ary:
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Implementation of elliptic curves, for cryptographic applications.
#
# This module doesn't provide any way to choose a random elliptic
# curve, nor to verify that an elliptic curve was chosen randomly,
# because one can simply use NIST's standard curves.
#
# Notes from X9.62-1998 (draft):
# Nomenclature:
# - Q is a public key.
# The "Elliptic Curve Domain Parameters" include:
# - q is the "field size", which in our case equals p.
# - p is a big prime.
# - G is a point of prime order (5.1.1.1).
# - n is the order of G (5.1.1.1).
# Public-key validation (5.2.2):
# - Verify that Q is not the point at infinity.
# - Verify that X_Q and Y_Q are in [0,p-1].
# - Verify that Q is on the curve.
# - Verify that nQ is the point at infinity.
# Signature generation (5.3):
# - Pick random k from [1,n-1].
# Signature checking (5.4.2):
# - Verify that r and s are in [1,n-1].
#
# Version of 2008.11.25.
#
# Revision history:
# 2005.12.31 - Initial version.
# 2008.11.25 - Change CurveFp.is_on to contains_point.
#
# Written in 2005 by <NAME> and placed in the public domain.
def inverse_mod(a, m):
"""Inverse of a mod m."""
if a < 0 or m <= a:
a = a % m
# From <NAME> Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod(d, c) + (c,)
uc, vc, ud, vd = ud - q * uc, vd - q * vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0:
return ud
else:
return ud + m
def modular_sqrt(a, p):
# from http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return p
elif p % 4 == 3:
return pow(a, (p + 1) // 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s /= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to <NAME>" by <NAME> for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) // 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in range(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) // 2, p)
return -1 if ls == p - 1 else ls
def jacobi_symbol(n, k):
"""Compute the Jacobi symbol of n modulo k
See http://en.wikipedia.org/wiki/Jacobi_symbol
For our application k is always prime, so this is the same as the Legendre symbol."""
assert k > 0 and k & 1, "jacobi symbol is only defined for positive odd k"
n %= k
t = 0
while n != 0:
while n & 1 == 0:
n >>= 1
r = k & 7
t ^= (r == 3 or r == 5)
n, k = k, n
t ^= (n & k & 3 == 3)
n = n % k
if k == 1:
return -1 if t else 1
return 0
class CurveFp(object):
"""Elliptic Curve over the field of integers modulo a prime."""
def __init__(self, p, a, b):
"""The curve of points satisfying y^2 = x^3 + a*x + b (mod p)."""
self.__p = p
self.__a = a
self.__b = b
def p(self):
return self.__p
def a(self):
return self.__a
def b(self):
return self.__b
def contains_point(self, x, y):
"""Is the point (x,y) on this curve?"""
return (y * y - (x * x * x + self.__a * x + self.__b)) % self.__p == 0
class Point(object):
""" A point on an elliptic curve. Altering x and y is forbidding,
but they can be read by the x() and y() methods."""
def __init__(self, curve, x, y, order=None):
"""curve, x, y, order; order (optional) is the order of this point."""
self.__curve = curve
self.__x = x
self.__y = y
self.__order = order
# self.curve is allowed to be None only for INFINITY:
if self.__curve:
assert self.__curve.contains_point(x, y)
if order:
assert self * order == INFINITY
def __eq__(self, other):
"""Return 1 if the points are identical, 0 otherwise."""
if self.__curve == other.__curve \
and self.__x == other.__x \
and self.__y == other.__y:
return 1
else:
return 0
def __add__(self, other):
"""Add one point to another point."""
# X9.62 B.3:
if other == INFINITY:
return self
if self == INFINITY:
return other
assert self.__curve == other.__curve
if self.__x == other.__x:
if (self.__y + other.__y) % self.__curve.p() == 0:
return INFINITY
else:
return self.double()
p = self.__curve.p()
l = ((other.__y - self.__y) * inverse_mod(other.__x - self.__x, p)) % p
x3 = (l * l - self.__x - other.__x) % p
y3 = (l * (self.__x - x3) - self.__y) % p
return Point(self.__curve, x3, y3)
def __sub__(self, other):
#The inverse of a point P=(xP,yP) is its reflexion across the x-axis : P′=(xP,−yP).
#If you want to compute Q−P, just replace yP by −yP in the usual formula for point addition.
# X9.62 B.3:
if other == INFINITY:
return self
if self == INFINITY:
return other
assert self.__curve == other.__curve
p = self.__curve.p()
#opi = inverse_mod(other.__y, p)
opi = -other.__y % p
#print(opi)
#print(-other.__y % p)
if self.__x == other.__x:
if (self.__y + opi) % self.__curve.p() == 0:
return INFINITY
else:
return self.double
l = ((opi - self.__y) * inverse_mod(other.__x - self.__x, p)) % p
x3 = (l * l - self.__x - other.__x) % p
y3 = (l * (self.__x - x3) - self.__y) % p
return Point(self.__curve, x3, y3)
def __mul__(self, e):
if self.__order:
e %= self.__order
if e == 0 or self == INFINITY:
return INFINITY
result, q = INFINITY, self
while e:
if e & 1:
result += q
e, q = e >> 1, q.double()
return result
"""
def | |
be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromSecretRef(dict):
"""
The Secret to select from
"""
def __init__(__self__, *,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
The Secret to select from
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the Secret must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFrom(dict):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
def __init__(__self__, *,
config_map_key_ref: Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRef'] = None,
field_ref: Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRef'] = None,
resource_field_ref: Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRef'] = None,
secret_key_ref: Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRef'] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param 'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRefArgs' config_map_key_ref: Selects a key of a ConfigMap.
:param 'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRefArgs' field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param 'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefArgs' resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param 'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRefArgs' secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRef']:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRef']:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
return pulumi.get(self, "field_ref")
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRef']:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRef']:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRef(dict):
"""
Selects a key of a ConfigMap.
"""
def __init__(__self__, *,
key: str,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
Selects a key of a ConfigMap.
:param str key: The key to select.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> str:
"""
The key to select.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRef(dict):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
def __init__(__self__, *,
field_path: str,
api_version: Optional[str] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param str field_path: Path of the field to select in the specified API version.
:param str api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> str:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRef(dict):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
def __init__(__self__, *,
resource: str,
container_name: Optional[str] = None,
divisor: Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisor'] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param str resource: Required: resource to select
:param str container_name: Container name: required for volumes, optional for env vars
:param 'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisorArgs' divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> str:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def divisor(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisor']:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisor(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRef(dict):
"""
Selects a key of a secret in the pod's namespace
"""
def __init__(__self__, *,
key: str,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
Selects a key of a secret in the pod's namespace
:param str key: The key of the secret to select from. Must be a valid secret key.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> str:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecycle(dict):
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
"""
def __init__(__self__, *,
post_start: Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStart'] = None,
pre_stop: Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStop'] = None):
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param 'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartArgs' post_start: PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param 'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopArgs' pre_stop: PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
if post_start is not None:
pulumi.set(__self__, "post_start", post_start)
if pre_stop is not None:
pulumi.set(__self__, "pre_stop", pre_stop)
@property
@pulumi.getter(name="postStart")
def post_start(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStart']:
"""
PostStart is called immediately after a container is created. If the handler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.