hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
98e0601566ba652e64eedad746be214634e5e438 | 17,357 | py | Python | MrWorldwide.py | AnonymousHacker1279/MrWorldwide | a782194e1ebe3a1cd73409e3d4dc9946700bcc0e | [
"MIT"
]
| null | null | null | MrWorldwide.py | AnonymousHacker1279/MrWorldwide | a782194e1ebe3a1cd73409e3d4dc9946700bcc0e | [
"MIT"
]
| null | null | null | MrWorldwide.py | AnonymousHacker1279/MrWorldwide | a782194e1ebe3a1cd73409e3d4dc9946700bcc0e | [
"MIT"
]
| null | null | null | from PyQt6.QtWidgets import QApplication, QWidget, QFileDialog
import PyQt6.QtCore as QtCore
import PyQt6.QtGui as QtGui
import sys, time, json, requests, traceback, configparser, os
import MrWorldwideUI, ConfigurationUI, UpdateManagerUI
version = "v1.0.0"
class LangTypes:
ENGLISH = "English"
ARABIC = "Arabic"
CHINESE = "Chinese"
DUTCH = "Dutch"
FRENCH = "French"
GERMAN = "German"
HINDI = "Hindi"
INDONESIAN = "Indonesian"
IRISH = "Irish"
ITALIAN = "Italian"
JAPANESE = "Japanese"
KOREAN = "Korean"
POLISH = "Polish"
PORTUGUESE = "Portuguese"
RUSSIAN = "Russian"
SPANISH = "Spanish"
TURKISH = "Turkish"
UKRANIAN = "Ukranian"
VIETNAMESE = "Vietnamese"
class WorkerSignals(QtCore.QObject):
callback = QtCore.pyqtSignal(str)
class Worker(QtCore.QRunnable):
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progressCallback'] = self.signals.callback
@QtCore.pyqtSlot()
def run(self):
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
print(traceback.print_exc())
else:
self.signals.callback.emit(result)
def readConfigurationFile(config):
try:
configFile = open("config.ini")
configFile.close()
return config.read("config.ini")
except:
config['general'] = {}
config['general']['libretranslate_mirror'] = 'https://translate.astian.org/translate'
config['defaults'] = {}
config['defaults']['default_source_language'] = LangTypes.ENGLISH
config['defaults']['default_target_language'] = LangTypes.SPANISH
with open('config.ini', 'w') as configFile:
config.write(configFile)
configFile.close()
return config
class MrWorldwide(QWidget, MrWorldwideUI.Ui_Dialog, QtCore.QThread):
selectedFile = ""
selectedTargetLocation = ""
sourceFileKeys = []
sourceFileValues = []
totalLangFileLines = 0
shouldAbort = False
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/MrWorldwide.png"))
icon = QtGui.QIcon(resource_path("gui_resources/MrWorldwide.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
self.config = configparser.ConfigParser()
readConfigurationFile(self.config)
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.abortButton.clicked.connect(self.abortEvent)
self.startButton.clicked.connect(self.preTranslate)
self.openFileButton.clicked.connect(self.openFileEvent)
self.targetLocationButton.clicked.connect(self.selectFileLocationEvent)
self.configButton.clicked.connect(self.openConfiguration)
# Setup dropdown boxes
self.sourceLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.targetLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.sourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.targetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
self.apiMirror = self.config["general"]["libretranslate_mirror"]
# Open the configuration GUI
def openConfiguration(self, event):
self.configurationDialog = ConfigurationDialog()
self.configurationDialog.setup(self)
self.configurationDialog.show()
# Refresh the configuration
def refreshConfiguration(self):
readConfigurationFile(self.config)
self.sourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.targetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
self.apiMirror = self.config["general"]["libretranslate_mirror"]
# Close event, for handling closing of the program
def closeEvent(self, event):
global app
self.close()
app.exit()
# Abort event, for shutting down translation functions
def abortEvent(self, event):
global shouldAbort
global totalLangFileLines
self.shouldAbort = True
self.progressBar.setValue(0)
self.progressBarLabel.setText("Idle")
self.logAction("ABORT: Translation process canceled.")
# Open file event, for selecting a language file and starting the read process
def openFileEvent(self, event):
self.totalLangFileLines = 0
self.selectedFile = QFileDialog.getOpenFileName(self, 'Select a Minecraft language file', '','JSON Files (*.json)')[0]
self.fileSelectionBox.setText(str(self.selectedFile))
self.readLangFile()
# Select output file location event, for setting the target location
def selectFileLocationEvent(self, event):
self.selectedTargetLocation = QFileDialog.getSaveFileName(self, 'Select an output location', 'target.json','JSON Files (*.json)')[0]
self.targetLocationBox.setText(str(self.selectedTargetLocation))
# Read a language file and get the keys, values, and set various content on the GUI
def readLangFile(self):
global sourceFileValues
global totalLangFileLines
self.sourceFileValues = []
self.sourceFileKeys = []
# Read input JSON and make it usable
startReadInputTime = time.time()
if self.selectedFile != "":
with open(self.selectedFile, 'r') as f:
data = json.load(f)
self.sourceFileKeys = data.keys()
for item in data:
if self.shouldAbort:
return
self.sourceFileValues.append(data[item])
self.totalLangFileLines = self.totalLangFileLines + 1
self.logAction("Reading input file took " + str(((time.time() - startReadInputTime) * 1000).__round__(3)) + " ms.")
self.langFileEntryCounter.display(self.totalLangFileLines)
self.logAction("Found " + str(self.totalLangFileLines) + " entries.")
def preTranslate(self, event):
global totalLangFileLines
global selectedFile
global selectedTargetLocation
canProceed = True
self.shouldAbort = False
if self.selectedFile == "":
self.logAction("ERROR: No language file selected.")
canProceed = False
elif self.totalLangFileLines == 0:
self.logAction("ERROR: The selected language file is empty.")
canProceed = False
elif self.selectedTargetLocation == "":
self.logAction("ERROR: No target location specified.")
canProceed = False
elif self.sourceLangBox.currentText() == self.targetLangBox.currentText():
self.logAction("ERROR: Target language is the same as the source")
canProceed = False
if canProceed:
self.logAction("Beginning translations with a source language of " + self.sourceLangBox.currentText() + " and a target language of " + self.targetLangBox.currentText())
self.logAction("Using LibreTranslate mirror: " + self.config["general"]["libretranslate_mirror"])
self.disableButtonsDuringTranslations()
self.threadpool = QtCore.QThreadPool()
self.worker = Worker(self.startTranslations)
self.worker.signals.callback.connect(self.threadCallbackHandler)
self.threadpool.start(self.worker)
def disableButtonsDuringTranslations(self):
self.startButton.setDisabled(True)
self.openFileButton.setDisabled(True)
self.targetLocationButton.setDisabled(True)
self.closeButton.setDisabled(True)
self.configButton.setDisabled(True)
def enableButtonsAfterTranslations(self):
self.startButton.setDisabled(False)
self.openFileButton.setDisabled(False)
self.targetLocationButton.setDisabled(False)
self.closeButton.setDisabled(False)
self.configButton.setDisabled(False)
def threadCallbackHandler(self, callback):
try:
exec(callback)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
exctype, value, traceback.format_exc()
app.exit()
def startTranslations(self, progressCallback):
global sourceFileValues
global totalLangFileLines
global shouldAbort
progressCallback.emit('self.progressBarLabel.setText("Starting translations")')
# Set query headers
headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
# Really inefficient but it works ¯\_(ツ)_/¯
startQueryTime = time.time()
responseJSON = []
progressCallback.emit('self.progressBarLabel.setText("Translating...")')
itemLoopIteration = 1
try:
requests.post(self.config["general"]["libretranslate_mirror"], headers=headers, data=None)
hasFailedResolve = False
except:
requests.post('https://translate.astian.org/translate', headers=headers, data=None)
progressCallback.emit('self.logAction("Failed to resolve LibreTranslate mirror. Defaulting to https://translate.astian.org/translate")')
hasFailedResolve = True
for item in self.sourceFileValues:
if self.shouldAbort:
return
# Setup the progress bar, by mapping the total translation count to 100
progressCallback.emit('self.progressBar.setValue(int(((' + str(itemLoopIteration) + ' / self.totalLangFileLines) * 100).__round__(0)))')
# Set query data
data = {
'q': item,
'source': self.getLangIdentifier(self.sourceLangBox.currentText()),
'target': self.getLangIdentifier(self.targetLangBox.currentText())
}
# Send the query and get the response
if hasFailedResolve == True:
response = requests.post('https://translate.astian.org/translate', headers=headers, data=data)
else:
response = requests.post(self.config["general"]["libretranslate_mirror"], headers=headers, data=data)
responseData = json.loads(response.content.decode(response.encoding))["translatedText"]
responseJSON.append(str(responseData).rstrip('"').replace('\u00ab', '').lstrip('"').replace('\u00bb', ''))
itemLoopIteration = itemLoopIteration + 1
progressCallback.emit('self.logAction("Query time was " + str(time.time() - ' + str(startQueryTime) + ') + " seconds.")')
progressCallback.emit('self.progressBarLabel.setText("Translations complete")')
progressCallback.emit('self.saveToFile(' + str(responseJSON) + ')')
# Save the JSON data to file
def saveToFile(self, responseJSON):
global sourceFileKeys
global shouldAbort
self.progressBarLabel.setText("Writing to file...")
self.progressBar.setValue(0)
with open(self.targetLocationBox.text(), 'w', encoding="UTF-8") as f:
compiledDict = dict()
responseJSONList = list(responseJSON)
currentIteration = 0
for item in self.sourceFileKeys:
if self.shouldAbort:
return
compiledDict.update({item: str(responseJSONList[currentIteration])})
currentIteration = currentIteration + 1
progBarVal = int(((currentIteration / self.totalLangFileLines) * 100).__round__(0))
self.progressBar.setValue(progBarVal)
json.dump(compiledDict, f, separators=(',', ': '), indent=" ", ensure_ascii=False)
self.enableButtonsAfterTranslations()
self.logAction("Translations written to file.")
self.progressBarLabel.setText("All tasks completed.")
# Log information to the console
def logAction(self, text: str):
if self.logBox.text() == "No log information available. ":
self.logBox.setText("")
preparedLogText = ">> " + text
else:
preparedLogText = self.logBox.text() + "\n>> " + text
self.logBox.setText(preparedLogText)
self.logBoxScrollArea.verticalScrollBar().setValue(self.logBoxScrollArea.verticalScrollBar().maximum())
def getLangIdentifier(self, lang):
if lang == LangTypes.ENGLISH:
return "en"
if lang == LangTypes.ARABIC:
return "ar"
if lang == LangTypes.CHINESE:
return "zh"
if lang == LangTypes.DUTCH:
return "nl"
if lang == LangTypes.FRENCH:
return "fr"
if lang == LangTypes.GERMAN:
return "de"
if lang == LangTypes.HINDI:
return "hi"
if lang == LangTypes.INDONESIAN:
return "id"
if lang == LangTypes.IRISH:
return "ga"
if lang == LangTypes.ITALIAN:
return "it"
if lang == LangTypes.JAPANESE:
return "ja"
if lang == LangTypes.KOREAN:
return "ko"
if lang == LangTypes.POLISH:
return "pl"
if lang == LangTypes.PORTUGUESE:
return "pt"
if lang == LangTypes.RUSSIAN:
return "ru"
if lang == LangTypes.SPANISH:
return "es"
if lang == LangTypes.TURKISH:
return "tr"
if lang == LangTypes.UKRANIAN:
return "uk"
if lang == LangTypes.VIETNAMESE:
return "vi"
# Initialize the program
def __init__(self, parent=None):
global app
super(MrWorldwide, self).__init__(parent)
self.setupUi(self)
self.run()
class ConfigurationDialog(QWidget, ConfigurationUI.Ui_Dialog):
def __init__(self, parent=None):
super(ConfigurationDialog, self).__init__(parent)
self.setupUi(self)
self.run()
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/Configuration.png"))
icon = QtGui.QIcon(resource_path("gui_resources/Configuration.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
# Read configuration
self.config = configparser.ConfigParser()
readConfigurationFile(self.config)
# Setup dropdown boxes
self.defaultSourceLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
self.defaultTargetLangBox.addItems([LangTypes.ENGLISH, LangTypes.ARABIC, LangTypes.CHINESE, LangTypes.DUTCH, LangTypes.FRENCH, LangTypes.GERMAN, LangTypes.HINDI, LangTypes.INDONESIAN, LangTypes.IRISH, LangTypes.ITALIAN, LangTypes.JAPANESE, LangTypes.KOREAN, LangTypes.POLISH, LangTypes.PORTUGUESE, LangTypes.RUSSIAN, LangTypes.SPANISH, LangTypes.TURKISH, LangTypes.UKRANIAN, LangTypes.VIETNAMESE])
# Apply current configuration
self.apiMirror.setText(self.config["general"]["libretranslate_mirror"])
self.defaultSourceLangBox.setCurrentText(self.config["defaults"]["default_source_language"])
self.defaultTargetLangBox.setCurrentText(self.config["defaults"]["default_target_language"])
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.applyButton.clicked.connect(self.applyEvent)
self.updateButton.clicked.connect(self.openUpdateManager)
# Setup variables
def setup(self, parent):
self.parent = parent
# Close event, for handling closing of the program
def closeEvent(self, event):
self.close()
# Update event, for opening the update manager
# Open the configuration GUI
def openUpdateManager(self, event):
self.updateManagerDialog = UpdateManagerDialog()
self.updateManagerDialog.setup(self)
self.updateManagerDialog.show()
# Apply event, for handling applying of configurations
def applyEvent(self, event):
self.config = configparser.ConfigParser()
self.config['general'] = {}
self.config['general']['libretranslate_mirror'] = self.apiMirror.text()
self.config['defaults'] = {}
self.config['defaults']['default_source_language'] = self.defaultSourceLangBox.currentText()
self.config['defaults']['default_target_language'] = self.defaultTargetLangBox.currentText()
with open('config.ini', 'w') as configFile:
self.config.write(configFile)
configFile.close()
self.parent.refreshConfiguration()
self.close()
class UpdateManagerDialog(QWidget, UpdateManagerUI.Ui_Dialog):
def __init__(self, parent=None):
super(UpdateManagerDialog, self).__init__(parent)
self.setupUi(self)
self.run()
def run(self):
# Setup resources
logo = QtGui.QPixmap(resource_path("gui_resources/Updates.png"))
icon = QtGui.QIcon(resource_path("gui_resources/Updates.png"))
# Set the logos and images
self.setWindowIcon(icon) # TODO: Custom icon
self.logo.setPixmap(logo)
# Setup button actions
self.closeButton.clicked.connect(self.closeEvent)
self.checkUpdatesButton.clicked.connect(self.checkForUpdatesEvent)
global version
self.currentVersionBox.setText(version)
# Setup variables
def setup(self, parent):
self.parent = parent
# Close event, for handling closing of the program
def closeEvent(self, event):
self.close()
# Check for updates event
def checkForUpdatesEvent(self, event):
self.updateData = json.loads(requests.get("https://raw.githubusercontent.com/AnonymousHacker1279/MrWorldwide/master/update.json").text)
self.latestVersionBox.setText(self.updateData["latest"])
self.changelogBox.setText(self.updateData["changelog"] + "\n\nDownload the update here: " + self.updateData["link"])
def main():
global app
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
app.setStyle("Fusion")
form = MrWorldwide()
form.show()
app.exec()
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath('.'), relative_path)
if __name__ == '__main__':
main() | 37.008529 | 399 | 0.75157 | 16,182 | 0.932089 | 0 | 0 | 239 | 0.013766 | 0 | 0 | 4,154 | 0.239272 |
98e15c2d42b427bf4ffb23842980cd80d4cd57bf | 7,429 | py | Python | tools/az_cli.py | google/cloud-forensics-utls | 719093b4a229e5e97c30d93faabb1ccf3b6ee422 | [
"Apache-2.0"
]
| null | null | null | tools/az_cli.py | google/cloud-forensics-utls | 719093b4a229e5e97c30d93faabb1ccf3b6ee422 | [
"Apache-2.0"
]
| null | null | null | tools/az_cli.py | google/cloud-forensics-utls | 719093b4a229e5e97c30d93faabb1ccf3b6ee422 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo CLI tool for Azure."""
import os
from datetime import datetime
from typing import TYPE_CHECKING
from Crypto.PublicKey import RSA
from libcloudforensics import logging_utils
from libcloudforensics.providers.azure.internal import account
from libcloudforensics.providers.azure.internal import monitoring
from libcloudforensics.providers.azure import forensics
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
if TYPE_CHECKING:
import argparse
def ListInstances(args: 'argparse.Namespace') -> None:
"""List instances in Azure subscription.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
instances = az_account.compute.ListInstances(
resource_group_name=args.resource_group_name)
logger.info('Instances found:')
for instance in instances.values():
boot_disk = instance.GetBootDisk()
logger.info(
'Name: {0:s}, Boot disk: {1:s}'.format(instance.name, boot_disk.name))
def ListDisks(args: 'argparse.Namespace') -> None:
"""List disks in Azure subscription.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
disks = az_account.compute.ListDisks(
resource_group_name=args.resource_group_name)
logger.info('Disks found:')
for disk_name, disk in disks.items():
logger.info('Name: {0:s}, Region: {1:s}'.format(disk_name, disk.region))
def CreateDiskCopy(args: 'argparse.Namespace') -> None:
"""Create an Azure disk copy.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
logger.info('Starting disk copy...')
disk_copy = forensics.CreateDiskCopy(args.default_resource_group_name,
instance_name=args.instance_name,
disk_name=args.disk_name,
disk_type=args.disk_type,
region=args.region,
src_profile=args.src_profile,
dst_profile=args.dst_profile)
logger.info(
'Done! Disk {0:s} successfully created. You will find it in '
'your Azure subscription under the name {1:s}.'.format(
disk_copy.resource_id, disk_copy.name))
def StartAnalysisVm(args: 'argparse.Namespace') -> None:
"""Start forensic analysis VM.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
attach_disks = []
if args.attach_disks:
attach_disks = args.attach_disks.split(',')
# Check if attach_disks parameter exists and if there
# are any empty entries.
if not (attach_disks and all(elements for elements in attach_disks)):
logger.error('error: parameter --attach_disks: {0:s}'.format(
args.attach_disks))
return
ssh_public_key = args.ssh_public_key
if not ssh_public_key:
# According to https://docs.microsoft.com/cs-cz/samples/azure-samples/
# resource-manager-python-template-deployment/resource-manager-python-
# template-deployment/ there's no API to generate a new SSH key pair in
# Azure, so we do this manually...
ssh_public_key = _GenerateSSHKeyPair(args.instance_name)
logger.info('Starting analysis VM...')
vm = forensics.StartAnalysisVm(args.default_resource_group_name,
args.instance_name,
int(args.disk_size),
ssh_public_key,
cpu_cores=int(args.cpu_cores),
memory_in_mb=int(args.memory_in_mb),
region=args.region,
attach_disks=attach_disks,
dst_profile=args.dst_profile)
logger.info('Analysis VM started.')
logger.info('Name: {0:s}, Started: {1:s}'.format(vm[0].name, str(vm[1])))
def _GenerateSSHKeyPair(vm_name: str) -> str:
"""Generate a SSH key pair and returns its public key.
Both public and private keys will be saved in the current directory.
Args:
vm_name (str): The VM name for which to generate the key pair.
Returns:
str: The public key for the generated SSH key pair.
Raises:
ValueError: If vm_name is None.
"""
if not vm_name:
raise ValueError('Parameter vm_name must not be None.')
logger.info('Generating a new SSH key pair for VM: {0:s}'.format(vm_name))
key = RSA.generate(2048)
key_name = '{0:s}-ssh'.format(vm_name)
public_key = key.publickey().exportKey('OpenSSH')
path_public_key = os.path.join(os.getcwd(), key_name + '.pub')
private_key = key.exportKey('PEM')
path_private_key = os.path.join(os.getcwd(), key_name + '.pem')
with open(path_private_key, 'wb') as f:
f.write(private_key)
with open(path_public_key, 'wb') as f:
f.write(public_key)
logger.info('SSH key pair generated. Public key saved in {0:s}, private key '
'saved in {1:s}'.format(path_public_key, path_private_key))
return public_key.decode('utf-8')
def ListMetrics(args: 'argparse.Namespace') -> None:
"""List Azure Monitoring metrics for a resource.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
"""
az_account = account.AZAccount(args.default_resource_group_name)
az_monitoring = monitoring.AZMonitoring(az_account)
metrics = az_monitoring.ListAvailableMetricsForResource(args.resource_id)
for metric in metrics:
logger.info('Available metric: {0:s}'.format(metric))
def QueryMetrics(args: 'argparse.Namespace') -> None:
"""Query Azure Monitoring metrics for a resource.
Args:
args (argparse.Namespace): Arguments from ArgumentParser.
Raises:
RuntimeError: If from_date or to_date could not be parsed.
"""
az_account = account.AZAccount(args.default_resource_group_name)
az_monitoring = monitoring.AZMonitoring(az_account)
from_date, to_date = args.from_date, args.to_date
if from_date and to_date:
try:
from_date = datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%SZ')
to_date = datetime.strptime(to_date, '%Y-%m-%dT%H:%M:%SZ')
except ValueError as exception:
raise RuntimeError(
'Cannot parse date: {0!s}'.format(exception)) from exception
metrics = az_monitoring.GetMetricsForResource(
args.resource_id,
metrics=args.metrics,
from_date=from_date,
to_date=to_date,
interval=args.interval,
aggregation=args.aggregation or 'Total',
qfilter=args.qfilter)
for metric, metric_value in metrics.items():
logger.info('Metric: {0:s}'.format(metric))
for timestamp, value in metric_value.items():
logger.info(' Timestamp: {0:s}, value: {1:s}'.format(timestamp, value))
| 35.208531 | 79 | 0.679768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,852 | 0.383901 |
98e581895367116db85fb5bcc24f1ed7b42ed751 | 2,181 | py | Python | bbio/bbio.py | timgates42/PyBBIO | 0d46115059ed7ec0c17afb6dd7ed2f507b4f2b8a | [
"MIT"
]
| 102 | 2015-01-29T04:28:49.000Z | 2022-01-03T18:27:50.000Z | bbio/bbio.py | timgates42/PyBBIO | 0d46115059ed7ec0c17afb6dd7ed2f507b4f2b8a | [
"MIT"
]
| 62 | 2015-01-29T11:05:13.000Z | 2019-12-03T04:30:34.000Z | bbio/bbio.py | timgates42/PyBBIO | 0d46115059ed7ec0c17afb6dd7ed2f507b4f2b8a | [
"MIT"
]
| 58 | 2015-02-10T14:31:18.000Z | 2022-03-29T13:24:03.000Z | """
PyBBIO - bbio.py
Copyright (c) 2012-2015 - Alexander Hiam <[email protected]>
Released under the MIT license
https://github.com/graycatlabs/PyBBIO
"""
import sys, atexit
from .platform import platform_init, platform_cleanup
from .common import ADDITIONAL_CLEANUP, util_init
def bbio_init():
""" Pre-run initialization, i.e. starting module clocks, etc. """
util_init()
platform_init()
def bbio_cleanup():
""" Post-run cleanup, i.e. stopping module clocks, etc. """
# Run user cleanup routines:
for cleanup in ADDITIONAL_CLEANUP:
try:
cleanup()
except Exception as e:
# Something went wrong with one of the cleanup routines, but we
# want to keep going; just print the error and continue
print "*Exception raised trying to call cleanup routine '%s':\n %s" %\
(cleanup, e)
platform_cleanup()
# The following code detects if Python is running interactively,
# and if so initializes PyBBIO on import and registers PyBBIO's
# cleanup to be called at exit, otherwise it defines the run() and
# stop() methods for the file based control flow:
import __main__
if not hasattr(__main__, '__file__'):
# We're in the interpreter, see:
# http://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode
bbio_init()
print "PyBBIO initialized"
def interactive_cleanup():
bbio_cleanup()
print "Finished PyBBIO cleanup"
atexit.register(interactive_cleanup)
else:
bbio_init()
atexit.register(bbio_cleanup)
# Imported in a Python file, define run() and stop():
def run(setup, loop):
""" The main loop; must be passed a setup and a loop function.
First the setup function will be called once, then the loop
function wil be called continuously until a stop signal is
raised, e.g. CTRL-C or a call to the stop() function from
within the loop. """
try:
setup()
while (True):
loop()
except KeyboardInterrupt:
# Manual exit signal, clean up and exit happy
exit(0)
def stop():
""" Preferred way for a program to stop itself. """
raise KeyboardInterrupt # Expected happy stop condition in run()
| 32.073529 | 85 | 0.692343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,384 | 0.634571 |
98e5e44eba98b059fc30bc12fb7cf43b26e82f78 | 365 | py | Python | app/models/endeavors.py | theLaborInVain/kdm-manager-api | fa8744c9b8a739262d1b94900648254cc69d16e1 | [
"MIT"
]
| 2 | 2020-03-04T13:43:45.000Z | 2020-11-03T20:34:21.000Z | app/models/endeavors.py | theLaborInVain/kdm-manager-api | fa8744c9b8a739262d1b94900648254cc69d16e1 | [
"MIT"
]
| 64 | 2019-07-19T19:19:50.000Z | 2022-03-03T21:19:28.000Z | app/models/endeavors.py | theLaborInVain/kdm-manager-api | fa8744c9b8a739262d1b94900648254cc69d16e1 | [
"MIT"
]
| null | null | null | """
The Endeavors asset collection has a number of irregular assets. Be careful
writing any custom code here.
"""
from app.assets import endeavors
from app import models
class Assets(models.AssetCollection):
def __init__(self, *args, **kwargs):
self.root_module = endeavors
models.AssetCollection.__init__(self, *args, **kwargs)
| 20.277778 | 79 | 0.706849 | 180 | 0.493151 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.334247 |
98e710a1b1cb3e42d4cbdb66250958e21888c440 | 804 | py | Python | interface/inter5.py | CeciliaDornelas/Python | 883959ed2e10cd8e8ace2b640e1944edc0c1d8a3 | [
"MIT"
]
| null | null | null | interface/inter5.py | CeciliaDornelas/Python | 883959ed2e10cd8e8ace2b640e1944edc0c1d8a3 | [
"MIT"
]
| null | null | null | interface/inter5.py | CeciliaDornelas/Python | 883959ed2e10cd8e8ace2b640e1944edc0c1d8a3 | [
"MIT"
]
| null | null | null | import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget
from PyQt5.QtCore import QSize
class HelloWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(280, 120))
self.setWindowTitle("Olá, Mundo! Exemplo PyQT5")
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
gridLayout = QGridLayout(self)
centralWidget.setLayout(gridLayout)
title = QLabel("Olá Mundo para PyQt", self)
title.setAlignment(QtCore.Qt.AlignCenter)
gridLayout.addWidget(title, 0, 0)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = HelloWindow()
mainWin.show()
sys.exit( app.exec_() )
| 26.8 | 69 | 0.691542 | 508 | 0.630273 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.074442 |
98e753afbcdb25feef4bb770897b167108c721b5 | 1,523 | py | Python | setup.py | notwa/scipybiteopt | 62e1510789b680483ad867984849af215a9848c5 | [
"MIT"
]
| null | null | null | setup.py | notwa/scipybiteopt | 62e1510789b680483ad867984849af215a9848c5 | [
"MIT"
]
| null | null | null | setup.py | notwa/scipybiteopt | 62e1510789b680483ad867984849af215a9848c5 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import os
import sys
import numpy
from setuptools import setup, Extension
#include markdown description in pip page
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# https://github.com/pypa/packaging-problems/issues/84
# no sensible way to include header files by default
headers = ['scipybiteopt/biteopt.h',
'scipybiteopt/biteoptort.h',
'scipybiteopt/spheropt.h',
'scipybiteopt/biteaux.h',
'scipybiteopt/nmsopt.h']
def get_c_sources(files, include_headers=False):
return files + (headers if include_headers else [])
module1 = Extension('scipybiteopt.biteopt',
sources=get_c_sources(['scipybiteopt/biteopt_py_ext.cpp'], include_headers=(sys.argv[1] == "sdist")),
language="c++",
include_dirs=[numpy.get_include()],
extra_compile_args=['-std=c++11', '-O3'] if os.name != 'nt' else ['-O3'])
setup(name='scipybiteopt',
version='1.1.1',
description="Scipy style wrapper for Aleksey Vaneev's BiteOpt",
author='dschmitz89',
author_email='[email protected]',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url = 'https://github.com/dschmitz89/scipybiteopt',
packages = ['scipybiteopt'],
ext_modules = [module1],
install_requires=[
'numpy']
)
| 35.418605 | 119 | 0.670387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 601 | 0.394616 |
98e97be18c63f8ef9e8f59a9c1da5ea5229f6454 | 2,619 | py | Python | qiskit_experiments/data_processing/__init__.py | yoshida-ryuhei/qiskit-experiments | 82561acf86b407dcda0a9ec69fe18de2b0a592a2 | [
"Apache-2.0"
]
| null | null | null | qiskit_experiments/data_processing/__init__.py | yoshida-ryuhei/qiskit-experiments | 82561acf86b407dcda0a9ec69fe18de2b0a592a2 | [
"Apache-2.0"
]
| null | null | null | qiskit_experiments/data_processing/__init__.py | yoshida-ryuhei/qiskit-experiments | 82561acf86b407dcda0a9ec69fe18de2b0a592a2 | [
"Apache-2.0"
]
| null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===========================================================
Data Processing (:mod:`qiskit_experiments.data_processing`)
===========================================================
.. currentmodule:: qiskit_experiments.data_processing
Data processing is the act of taking the data returned by the backend and
converting it into a format that can be analyzed.
It is implemented as a chain of data processing steps that transform various input data,
e.g. IQ data, into a desired format, e.g. population, which can be analyzed.
These data transformations may consist of multiple steps, such as kerneling and discrimination.
Each step is implemented by a :class:`~qiskit_experiments.data_processing.data_action.DataAction`
also called a `node`.
The data processor implements the :meth:`__call__` method. Once initialized, it
can thus be used as a standard python function:
.. code-block:: python
processor = DataProcessor(input_key="memory", [Node1(), Node2(), ...])
out_data = processor(in_data)
The data input to the processor is a sequence of dictionaries each representing the result
of a single circuit. The output of the processor is a numpy array whose shape and data type
depend on the combination of the nodes in the data processor.
Uncertainties that arise from quantum measurements or finite sampling can be taken into account
in the nodes: a standard error can be generated in a node and can be propagated
through the subsequent nodes in the data processor.
Correlation between computed values is also considered.
Classes
=======
.. autosummary::
:toctree: ../stubs/
DataProcessor
DataAction
TrainableDataAction
Data Processing Nodes
=====================
.. autosummary::
:toctree: ../stubs/
Probability
MarginalizeCounts
ToImag
ToReal
SVD
AverageData
BasisExpectationValue
MinMaxNormalize
"""
from .data_action import DataAction, TrainableDataAction
from .nodes import (
Probability,
MarginalizeCounts,
ToImag,
ToReal,
SVD,
AverageData,
BasisExpectationValue,
MinMaxNormalize,
)
from .data_processor import DataProcessor
| 31.178571 | 97 | 0.71974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,345 | 0.89538 |
98e9db17617d3ce2f8dbdda50ebfbe93ce11f25b | 10,064 | py | Python | models/pointnet2_sem_seg_msg_haptic.py | yufeiwang63/Pointnet_Pointnet2_pytorch | f9078a71b973c13ae7ffa897e142dc7b1e8e88be | [
"MIT"
]
| null | null | null | models/pointnet2_sem_seg_msg_haptic.py | yufeiwang63/Pointnet_Pointnet2_pytorch | f9078a71b973c13ae7ffa897e142dc7b1e8e88be | [
"MIT"
]
| null | null | null | models/pointnet2_sem_seg_msg_haptic.py | yufeiwang63/Pointnet_Pointnet2_pytorch | f9078a71b973c13ae7ffa897e142dc7b1e8e88be | [
"MIT"
]
| null | null | null | import torch.nn as nn
import torch.nn.functional as F
from haptic.Pointnet_Pointnet2_pytorch.models.pointnet2_utils import PointNetSetAbstractionMsg,PointNetFeaturePropagation
class get_shared_model(nn.Module):
def __init__(self, use_batch_norm, num_classes, num_input_channel=7):
super(get_shared_model, self).__init__()
self.sa1 = PointNetSetAbstractionMsg(1024, [0.05, 0.1], [16, 32], num_input_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(256, [0.1, 0.2], [16, 32], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(64, [0.2, 0.4], [16, 32], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(16, [0.4, 0.8], [16, 32], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_classes, 1)
# for normal prediction
self.conv_normal = nn.Conv1d(128, 3, 1)
# for force prediction
self.conv_force = nn.Conv1d(128, 1, 1)
self.use_batch_norm = use_batch_norm
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.conv1(l0_points))
contact = self.conv2(x)
normal = self.conv_normal(x)
normal = F.normalize(normal, dim=1)
force = self.conv_force(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
contact = contact.permute(0, 2, 1)
normal = normal.permute(0, 2, 1)
force = force.permute(0, 2, 1)
return (contact, normal, force), l4_points
class get_model(nn.Module):
def __init__(self, use_batch_norm, num_out_channel, num_in_channel=7, target='contact',
radius_list=[[0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8]],
npoint_list=[1024, 256, 64, 16],
sample_point_1_list=[16, 16, 16, 16],
sample_point_2_list=[32, 32, 32, 32],
layer=4,
downsample=True,
dropout=True,
track_running_stats=True,
mlp1_size=[16, 16, 32],
mlp2_size=[32, 32, 64],
interpolation_mlp_size=[128, 128, 128]
):
print("using layer: ", layer)
super(get_model, self).__init__()
self.layer = layer
if self.layer == 4:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(npoint_list[2], radius_list[2], [sample_point_1_list[2], sample_point_2_list[2]], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(npoint_list[3], radius_list[3], [sample_point_1_list[3], sample_point_2_list[3]], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 3:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(npoint_list[2], radius_list[2], [sample_point_1_list[2], sample_point_2_list[2]], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 2:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(npoint_list[1], radius_list[1], [sample_point_1_list[1], sample_point_2_list[1]], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+128+128, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
elif self.layer == 1:
self.sa1 = PointNetSetAbstractionMsg(npoint_list[0], radius_list[0], [sample_point_1_list[0], sample_point_2_list[0]], num_in_channel, [mlp1_size, mlp2_size], use_batch_norm=use_batch_norm,
downsample=downsample, track_running_stats=track_running_stats)
self.fp1 = PointNetFeaturePropagation(mlp1_size[-1] + mlp2_size[-1], interpolation_mlp_size, use_batch_norm=use_batch_norm, track_running_stats=track_running_stats)
self.drop_out = dropout
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128, track_running_stats=track_running_stats)
if self.drop_out:
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_out_channel, 1)
self.use_batch_norm = use_batch_norm
self.target = target
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
if self.layer == 4:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 3:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 2:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
elif self.layer == 1:
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
if self.drop_out:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.bn1(self.conv1(l0_points)))
else:
x = F.relu(self.conv1(l0_points))
x = self.conv2(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
if self.target == 'normal':
x = F.normalize(x, dim=1)
x = x.permute(0, 2, 1)
# return x, l4_points
return x, None
class get_loss_original(nn.Module):
def __init__(self):
super(get_loss_original, self).__init__()
def forward(self, pred, target, trans_feat, weight):
total_loss = F.nll_loss(pred, target, weight=weight)
return total_loss
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
self.loss = nn.BCEWithLogitsLoss()
def forward(self, pred, target, trans_feat, weight):
total_loss = self.loss(pred, target)
return total_loss
if __name__ == '__main__':
import torch
model = get_model(13)
xyz = torch.rand(6, 9, 2048)
(model(xyz)) | 54.695652 | 207 | 0.64527 | 9,757 | 0.969495 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.024245 |
98eaf0ff524a7491427b7b19f617c3c6aaefc6a4 | 100 | py | Python | backend/src/notifications/admin.py | YujithIsura/request-management | 3c683274881ef7798779e03a24042034edcd941c | [
"MIT"
]
| 3 | 2021-11-21T20:46:00.000Z | 2021-12-02T14:47:18.000Z | notification/admin.py | lautarianoo/django_social_network | ec83af7267f830a2463cb591138dae1a088f9a4e | [
"BSD-3-Clause"
]
| 169 | 2020-04-09T08:39:25.000Z | 2021-09-03T01:07:01.000Z | notification/admin.py | lautarianoo/django_social_network | ec83af7267f830a2463cb591138dae1a088f9a4e | [
"BSD-3-Clause"
]
| 13 | 2020-04-05T20:53:11.000Z | 2022-02-28T14:52:17.000Z | from django.contrib import admin
from .models import Notification
admin.site.register(Notification) | 25 | 33 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
98eb89e6efe4554abbe1506f10c8ccfbcb3dedf8 | 2,234 | py | Python | HoverSlam.py | GiantWaffleCode/WafflePython | d3e85ce6d9c792e7338eb825307f7bb48113742a | [
"MIT"
]
| 13 | 2020-10-13T00:19:21.000Z | 2020-12-31T02:38:58.000Z | HoverSlam.py | GiantWaffleCode/WafflePython | d3e85ce6d9c792e7338eb825307f7bb48113742a | [
"MIT"
]
| null | null | null | HoverSlam.py | GiantWaffleCode/WafflePython | d3e85ce6d9c792e7338eb825307f7bb48113742a | [
"MIT"
]
| 10 | 2020-10-13T00:19:52.000Z | 2020-12-31T02:39:42.000Z | import krpc
import time
import math
from simple_pid import PID
conn = krpc.connect(name="UI Test")
vessel = conn.space_center.active_vessel
kerbin_frame = vessel.orbit.body.reference_frame
orb_frame = vessel.orbital_reference_frame
srf_frame = vessel.surface_reference_frame
surface_gravity = vessel.orbit.body.surface_gravity
current_met = conn.add_stream(getattr, vessel, 'met')
current_roll = conn.add_stream(getattr, vessel.flight(), 'roll')
current_pitch = conn.add_stream(getattr, vessel.flight(), 'pitch')
current_heading = conn.add_stream(getattr, vessel.flight(), 'heading')
current_alt = conn.add_stream(getattr, vessel.flight(), 'surface_altitude')
lowest = conn.add_stream(vessel.bounding_box, srf_frame)
current_drag = conn.add_stream(getattr, vessel.flight(), 'drag')
current_aero = conn.add_stream(getattr, vessel.flight(), 'aerodynamic_force')
current_speed = conn.add_stream(getattr, vessel.flight(kerbin_frame), 'speed')
vessel.control.activate_next_stage()
vessel.control.sas = True
time.sleep(.2)
vessel.control.sas_mode = conn.space_center.SASMode.retrograde
def bottom_altitude():
return max(0, current_alt() - abs(lowest()[0][0]))
for engine in vessel.parts.engines:
engine.gimbal_locked = True
while True:
aero_amp = math.sqrt(current_aero()[0] ** 2
+ current_aero()[1] ** 2
+ current_aero()[2] ** 2)
time_to_zero = current_speed() / ((((vessel.max_thrust * .9) + aero_amp) / vessel.mass)
+ vessel.orbit.body.surface_gravity)
if (time_to_zero * current_speed()) >= bottom_altitude() - current_speed():
print(current_speed())
print(f"Start Hover Slam Burn")
vessel.control.throttle = .9
break
while current_speed() > 50:
print(current_speed())
time.sleep(.01)
pass
print(f"Switch to Stab")
for leg in vessel.parts.legs:
leg.deployed = True
pid1 = PID(.15, 0, .5, setpoint=0)
pid1.output_limits = (0, 1)
pid1.sample_time = 0.01
while bottom_altitude() > 1:
vessel.control.throttle = pid1(bottom_altitude())
# pid1.setpoint *= .98
time.sleep(.01)
vessel.control.sas_mode = conn.space_center.SASMode.radial
vessel.control.throttle = 0
| 33.848485 | 91 | 0.705461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.066697 |
98ee2fa044a20258e55e590fef0af310684f4e34 | 433 | py | Python | tests/unit_tests/cx_core/integration/integration_test.py | clach04/controllerx | b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe | [
"MIT"
]
| 204 | 2020-01-18T10:12:13.000Z | 2022-03-27T09:40:17.000Z | tests/unit_tests/cx_core/integration/integration_test.py | clach04/controllerx | b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe | [
"MIT"
]
| 329 | 2020-01-17T17:18:53.000Z | 2022-03-29T11:20:30.000Z | tests/unit_tests/cx_core/integration/integration_test.py | clach04/controllerx | b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe | [
"MIT"
]
| 66 | 2020-01-19T20:17:21.000Z | 2022-03-13T15:03:41.000Z | from cx_core import integration as integration_module
from cx_core.controller import Controller
def test_get_integrations(fake_controller: Controller):
integrations = integration_module.get_integrations(fake_controller, {})
inteagration_names = {i.name for i in integrations}
assert inteagration_names == {
"z2m",
"zha",
"deconz",
"state",
"mqtt",
"lutron_caseta",
}
| 27.0625 | 75 | 0.678984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.106236 |
98ee487f9a2345f91b85bcae94f9855580455dc1 | 478 | py | Python | asystem-adoc/src/main/template/python/script_util.py | ggear/asystem_archive | b97f67218e8aa60991fba386c9e73d27d20d6c47 | [
"Apache-2.0"
]
| null | null | null | asystem-adoc/src/main/template/python/script_util.py | ggear/asystem_archive | b97f67218e8aa60991fba386c9e73d27d20d6c47 | [
"Apache-2.0"
]
| 2 | 2021-03-25T21:27:09.000Z | 2022-02-11T03:38:48.000Z | asystem-adoc/src/main/template/python/script_util.py | ggear/asystem_archive | b97f67218e8aa60991fba386c9e73d27d20d6c47 | [
"Apache-2.0"
]
| null | null | null | ###############################################################################
#
# Python script utilities as included from the cloudera-framework-assembly,
# do not edit directly
#
###############################################################################
import os
import re
def qualify(path):
return path if (re.match(r'[.]*://[.]*', path)
or 'CF_HADOOP_DEFAULT_FS' not in os.environ) \
else os.environ['CF_HADOOP_DEFAULT_FS'] + path
| 29.875 | 79 | 0.433054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.658996 |
98ee7596428318903272a404f3751220eec8a490 | 11,760 | py | Python | datapackage_pipelines/web/server.py | gperonato/datapackage-pipelines | 72b98918db1c19590586a3a85c5b087227cbbc3b | [
"MIT"
]
| 109 | 2016-09-01T08:41:55.000Z | 2021-11-10T10:08:35.000Z | datapackage_pipelines/web/server.py | gperonato/datapackage-pipelines | 72b98918db1c19590586a3a85c5b087227cbbc3b | [
"MIT"
]
| 144 | 2016-08-30T16:26:50.000Z | 2021-04-18T09:06:12.000Z | datapackage_pipelines/web/server.py | gperonato/datapackage-pipelines | 72b98918db1c19590586a3a85c5b087227cbbc3b | [
"MIT"
]
| 34 | 2016-09-05T12:46:53.000Z | 2022-03-05T01:53:49.000Z | import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(lvl):
pipelines = list(filter(lambda x: len(x['id']) == 1, lvl))
children_ = list(filter(lambda x: len(x['id']) > 1, lvl))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
@blueprint.route("badge/collection/<path:pipeline_path>")
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
| 33.696275 | 79 | 0.631463 | 0 | 0 | 0 | 0 | 8,090 | 0.687925 | 0 | 0 | 2,074 | 0.176361 |
98eec9960afb05f934f3e80b57d22d6b3147c3f1 | 1,425 | py | Python | MoveSim/code/models/losses.py | tobinsouth/privacy-preserving-synthetic-mobility-data | fd4d1851b47e3e7304761a894b460e8345fae5db | [
"MIT"
]
| null | null | null | MoveSim/code/models/losses.py | tobinsouth/privacy-preserving-synthetic-mobility-data | fd4d1851b47e3e7304761a894b460e8345fae5db | [
"MIT"
]
| null | null | null | MoveSim/code/models/losses.py | tobinsouth/privacy-preserving-synthetic-mobility-data | fd4d1851b47e3e7304761a894b460e8345fae5db | [
"MIT"
]
| null | null | null | # coding: utf-8
import numpy as np
import torch.nn as nn
class distance_loss(nn.Module):
def __init__(self):
with open('../data/raw/Cellular_Baselocation_baidu') as f:
gpss = f.readlines()
self.X = []
self.Y = []
for gps in gpss:
x, y = float(gps.split()[0]), float(gps.split()[1])
self.X.append(x)
self.Y.append(y)
self.X = torch.Tensor(np.array(self.X)).float()
self.Y = torch.Tensor(np.array(self.Y)).float()
def forward(self, x):
"""
:param x: generated sequence, batch_size * seq_len
:return:
"""
x1 = torch.index_select(self.X, 0, x[:, :-1].view(-1))
x2 = torch.index_select(self.X, 0, x[:, 1:].view(-1))
y1 = torch.index_select(self.Y, 0, x[:, :-1].view(-1))
y2 = torch.index_select(self.Y, 0, x[:, :-1].view(-1))
dx = x1 - x2
dy = y1 - y2
loss = dx**2 + dy**2
return loss
class period_loss(nn.Module):
def __init__(self, time_interval):
self.time_interval = time_interval
self.mse = nn.MSELoss()
def forward(self, x):
"""
:param x: generated sequence, batch_size * seq_len
:return:
"""
loss = 0.
for i in range(0, x.size(1) - self.time_interval):
loss += self.mse(x[:, i], x[:, i + self.time_interval])
return loss
| 27.403846 | 67 | 0.523509 | 1,362 | 0.955789 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.168421 |
98ef6a5aa62915725ae521746cef94f51adfcf47 | 1,316 | py | Python | board/game.py | petthauk/chess_ml | 2a66ca8511fd4eef71607a7f56417d039d94dbf9 | [
"MIT"
]
| null | null | null | board/game.py | petthauk/chess_ml | 2a66ca8511fd4eef71607a7f56417d039d94dbf9 | [
"MIT"
]
| null | null | null | board/game.py | petthauk/chess_ml | 2a66ca8511fd4eef71607a7f56417d039d94dbf9 | [
"MIT"
]
| null | null | null | import pygame as pg
from pygame.locals import *
import sys
import board.chess_board as board
w = 60 * 8
h = 60 * 8
class Game:
"""
Class to setup and start a game
"""
def __init__(self):
self.b = board.Board(w, h)
def get_board(self):
"""
Returns board
:return: Board-class
"""
return self.b
def run(self):
"""
Where the game is created and launched
:return:
"""
# While loop to show display
while True:
for event in pg.event.get():
# Quitting game
if event.type == QUIT:
pg.quit()
sys.exit()
# If game can continue
if self.b.get_status() == "-":
# Pressing mouse
if event.type == MOUSEBUTTONDOWN:
pos = pg.mouse.get_pos()
for r in self.b.get_board_array():
for square in r:
if square.get_visual().collidepoint(pos):
square.click()
self.b.update_board()
if __name__ == "__main__":
# Launch main-function if running this script
game = Game()
game.run()
| 24.830189 | 73 | 0.458207 | 1,084 | 0.823708 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.25152 |
98efb4404db7ca8bc8ddf99fbe40494ec2e70aa1 | 2,515 | py | Python | pix2pix/Dataset_util.py | Atharva-Phatak/Season-Tranfer | d6a0d4d42e396677920ffb81ab0086b0aa05d3c3 | [
"MIT"
]
| 2 | 2019-07-02T14:00:15.000Z | 2019-07-11T15:50:41.000Z | pix2pix/Dataset_util.py | Atharva-Phatak/Season-Tranfer | d6a0d4d42e396677920ffb81ab0086b0aa05d3c3 | [
"MIT"
]
| null | null | null | pix2pix/Dataset_util.py | Atharva-Phatak/Season-Tranfer | d6a0d4d42e396677920ffb81ab0086b0aa05d3c3 | [
"MIT"
]
| null | null | null | #importing libraries
import torch
import torch.utils.data as data
import os
import random
from PIL import Image
class CreateDataset(data.Dataset):
def __init__(self , imagedir , subfolder='train' , direction = 'AtoB' , flip = False , transform = None ,resize_scale = None , crop_size = None):
super(CreateDataset , self).__init__()
self.images_path = os.path.join(imagedir , subfolder)
self.image_filenames = [name for name in sorted(os.listdir(self.images_path))]
self.flip = flip
self.transform = transform
self.resize_scale = resize_scale
self.crop_size = crop_size
self.direction = direction
def __getitem__(self , index):
image_path = os.path.join(self.images_path , self.image_filenames[index])
img = Image.open(image_path)
if self.direction == 'AtoB':
inp_img = img.crop((0,0,img.width//2 , img.height))
target_img = img.crop((img.width//2 , 0 , img.width , img.height))
elif self.direction == 'BtoA':
inp_img = img.crop((img.width//2 , 0 , img.width , img.height))
target_img = img.crop((0,0,img.width//2 , img.height))
if self.resize_scale:
inp_img = inp_img.resize((self.resize_scale , self.resize_scale) , Image.BILINEAR)
target_img = target_img.resize((self.resize_scale , self.resize_scale) , Image.BILINEAR)
if self.crop_size:
x = random.randint(0 , self.resize_scale - self.crop_size + 1)
y = random.randint(0 , self.resize_scale - self.crop_size + 1)
inp_img = inp_img.crop((x , y , x + self.crop_size , y + self.crop_size))
target_img = target_img.crop((x , y , x + self.crop_size , y + self.crop_size))
if self.flip:
if random.random() < 0.5:
inp_img = inp_img.transpose(Image.FLIP_LEFT_RIGHT)
target_img = target_img.transpose(Image.FLIP_LEFT_RIGHT)
if self.transform is not None:
inp_img = self.transform(inp_img)
target_img = self.transform(target_img)
return inp_img , target_img
def __len__(self):
return len(self.image_filenames)
| 36.985294 | 150 | 0.553082 | 2,360 | 0.93837 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.01829 |
98efd5c91e56c42872a45ff29528b847156d1400 | 20,126 | py | Python | crslab/system/C2CRS_System.py | Zyh716/WSDM2022-C2CRS | 8ef2fa7c44bdba1799ab79f379ae7394bd468c02 | [
"MIT"
]
| 4 | 2022-03-24T02:14:50.000Z | 2022-03-30T02:28:19.000Z | crslab/system/C2CRS_System.py | RUCAIBox/WSDM2022-C2CRS | 8ef2fa7c44bdba1799ab79f379ae7394bd468c02 | [
"MIT"
]
| null | null | null | crslab/system/C2CRS_System.py | RUCAIBox/WSDM2022-C2CRS | 8ef2fa7c44bdba1799ab79f379ae7394bd468c02 | [
"MIT"
]
| 2 | 2022-03-23T02:24:24.000Z | 2022-03-28T12:45:43.000Z | # @Time : 2022/1/1
# @Author : Yuanhang Zhou
# @email : [email protected]
import os
from math import floor
import torch
from loguru import logger
from typing import List, Dict
from copy import copy, deepcopy
import pickle
import os
import numpy
import ipdb
from crslab.config import PRETRAIN_PATH, SAVE_PATH
from crslab.data import get_dataloader, dataset_language_map
from crslab.evaluator.metrics.base import AverageMetric
from crslab.evaluator.metrics.gen import PPLMetric
from crslab.system.base import BaseSystem
from crslab.system.utils.functions import ind2txt, ind2txt2
import random
from tqdm import tqdm
class C2CRS_System(BaseSystem):
"""This is the system for TGReDial model"""
def __init__(self, opt, train_dataloader, valid_dataloader, test_dataloader, vocab, side_data, restore_system=False,
interact=False, debug=False):
"""
Args:
opt (dict): Indicating the hyper parameters.
train_dataloader (BaseDataLoader): Indicating the train dataloader of corresponding dataset.
valid_dataloader (BaseDataLoader): Indicating the valid dataloader of corresponding dataset.
test_dataloader (BaseDataLoader): Indicating the test dataloader of corresponding dataset.
vocab (dict): Indicating the vocabulary.
side_data (dict): Indicating the side data.
restore_system (bool, optional): Indicating if we store system after training. Defaults to False.
interact (bool, optional): Indicating if we interact with system. Defaults to False.
debug (bool, optional): Indicating if we train in debug mode. Defaults to False.
"""
super(C2CRS_System, self).__init__(opt, train_dataloader, valid_dataloader,
test_dataloader, vocab, side_data, restore_system, interact, debug)
self._init_token_attribute(vocab)
self._init_rec_attribute(side_data, vocab)
self._init_conv_attribute(side_data, vocab)
self._init_pretrain_attribute(side_data, vocab)
self.language = dataset_language_map[self.opt['dataset']]
self.pertrain_save_epoches = [epoch-1 for epoch in eval(opt['pertrain_save_epoches'])]
def _init_token_attribute(self, vocab):
self.ind2tok = vocab['rec']['ind2tok']
self.end_token_idx = vocab['rec']['end']
self.unk_token_idx = vocab['rec']['unk']
self.unk = self.ind2tok.get(self.unk_token_idx, '<unk>')
def _init_rec_attribute(self, side_data, vocab):
self.item_ids = side_data['rec']['item_entity_ids']
self.id2entity = side_data['rec']['entity_kg']['id2entity']
self.dpath = side_data['rec']['dpath']
self.rec_ind2tok = vocab['rec']['ind2tok']
self.rec_optim_opt = deepcopy(self.opt['rec'])
self.rec_batch_size = self.opt['rec_batch_size'] if self.opt['rec_batch_size'] != -1 else self.rec_optim_opt['batch_size']
self.rec_epoch = self.opt['rec_epoch'] if self.opt['rec_epoch'] != -1 else self.rec_optim_opt['epoch']
def _init_conv_attribute(self, side_data, vocab):
self.conv_optim_opt = self.opt['conv']
if self.conv_optim_opt.get('lr_scheduler', None) and 'Transformers' in self.conv_optim_opt['lr_scheduler']['name']:
batch_num = 0
for _ in self.train_dataloader['rec'].get_conv_data(batch_size=self.conv_batch_size, shuffle=False):
batch_num += 1
conv_training_steps = self.conv_epoch * floor(batch_num / self.conv_optim_opt.get('update_freq', 1))
self.conv_optim_opt['lr_scheduler']['training_steps'] = conv_training_steps
self.conv_batch_size = self.opt['conv_batch_size'] if self.opt['conv_batch_size'] != -1 else self.conv_optim_opt['batch_size']
self.conv_epoch = self.opt['conv_epoch'] if self.opt['conv_epoch'] != -1 else self.conv_optim_opt['epoch']
def _init_pretrain_attribute(self, side_data, vocab):
if 'pretrain' in self.opt:
self.pretrain_optim_opt = deepcopy(self.opt['pretrain'])
self.pretrain_epoch = self.opt['pretrain_epoch'] if self.opt['pretrain_epoch'] != -1 else self.pretrain_optim_opt['pretrain_epoch']
self.pretrain_batch_size = self.opt['pretrain_batch_size'] if self.opt['pretrain_batch_size'] != -1 else self.pretrain_optim_opt['batch_size']
def rec_evaluate(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
item_label = item_label.tolist()
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
self.evaluator.rec_evaluate(rec_rank, item)
def rec_evaluate_and_return_score(self, rec_predict, item_label):
rec_predict = rec_predict.cpu()
rec_predict = rec_predict[:, self.item_ids]
_, rec_ranks = torch.topk(rec_predict, 50, dim=-1)
_, fully_rec_ranks = torch.topk(rec_predict, 50, dim=-1)
rec_ranks = rec_ranks.tolist()
fully_rec_ranks = fully_rec_ranks.tolist()
item_label = item_label.tolist()
scores = []
for rec_rank, item in zip(rec_ranks, item_label):
item = self.item_ids.index(item)
scores.append(self.evaluator.rec_evaluate_and_return_score(rec_rank, fully_rec_ranks, item, self.opt['score_type']))
return scores, rec_ranks
def conv_evaluate(self, prediction, response):
"""
Args:
prediction: torch.LongTensor, shape=(bs, response_truncate-1)
response: torch.LongTensor, shape=(bs, response_truncate)
the first token in response is <|endoftext|>, it is not in prediction
"""
prediction = prediction.tolist()
response = response.tolist()
for p, r in zip(prediction, response):
p_str, p_ListStr = ind2txt2(p, self.ind2tok, self.end_token_idx)
r_str, r_ListStr = ind2txt2(r[1:], self.ind2tok, self.end_token_idx)
self.evaluator.gen_evaluate(p_str, [r_str], p_ListStr, [r_ListStr])
def step(self, batch, stage, mode, epoch=-1):
batch, unbatchify_batch = batch
self.step_default(batch, stage, mode, epoch)
def step_default(self, batch, stage, mode, epoch=-1):
"""
stage: ['policy', 'rec', 'conv']
mode: ['train', 'val', 'test]
"""
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device)
if stage == 'pretrain_rec':
loss = self.rec_model.pretrain(batch, mode, epoch)
if loss:
if mode == "train":
self.backward(loss)
loss = loss.item()
self.evaluator.optim_metrics.add("loss", AverageMetric(loss))
elif stage == 'policy':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
policy_loss, policy_predict = self.rec_model.guide(batch, mode)
if mode == "train" and policy_loss is not None:
self.backward(policy_loss)
else:
self.policy_evaluate(policy_predict, batch[-1])
if isinstance(policy_loss, torch.Tensor):
policy_loss = policy_loss.item()
self.evaluator.optim_metrics.add("policy_loss",
AverageMetric(policy_loss))
elif stage == 'rec':
if mode == 'train':
self.rec_model.train()
else:
self.rec_model.eval()
rec_loss, rec_predict = self.rec_model.recommend(batch, mode)
if mode == "train":
self.backward(rec_loss)
else:
self.rec_evaluate(rec_predict, batch['movie_to_rec'])
rec_loss = rec_loss.item()
self.evaluator.optim_metrics.add("rec_loss",
AverageMetric(rec_loss))
elif stage == "conv":
if mode != "test":
gen_loss, pred = self.rec_model.converse(batch, mode)
if mode == 'train':
self.backward(gen_loss)
else:
self.conv_evaluate(pred, batch['response'])
gen_loss = gen_loss.item()
self.evaluator.optim_metrics.add("gen_loss",
AverageMetric(gen_loss))
self.evaluator.gen_metrics.add("ppl", PPLMetric(gen_loss))
else:
# generate response in rec_model.step
_, pred = self.rec_model.converse(batch, mode)
response = batch['response']
self.conv_evaluate(pred, response)
self.record_conv_gt_pred(response, pred, epoch)
self.record_conv_gt(response, pred)
self.record_conv_pred(response, pred, epoch)
else:
raise
def record_conv_gt_pred(self, batch_response, batch_pred, epoch):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer(f'{epoch}_record_conv_gt_pred', '.txt')
for response, pred in zip(batch_response, batch_pred):
response_tok_list = self.convert_tensor_ids_to_tokens(response)
pred_tok_list = self.convert_tensor_ids_to_tokens(pred)
file_writer.writelines(' '.join(response_tok_list) + '\n')
file_writer.writelines(' '.join(pred_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def record_conv_gt(self, batch_response, batch_pred):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer('record_conv_gt', '.txt')
for response, pred in zip(batch_response, batch_pred):
response_tok_list = self.convert_tensor_ids_to_tokens(response)
file_writer.writelines(' '.join(response_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def record_conv_pred(self, batch_response, batch_pred, epoch):
# (bs, response_truncate), (bs, response_truncate)
file_writer = self.get_file_writer(f'{epoch}_record_conv_pred', '.txt')
for response, pred in zip(batch_response, batch_pred):
pred_tok_list = self.convert_tensor_ids_to_tokens(pred)
file_writer.writelines(' '.join(pred_tok_list) + '\n')
file_writer.writelines('\n')
file_writer.close()
def get_file_writer(self, file_keywords: str, file_type: str):
file_name = file_keywords + file_type
file_path = os.path.join(self.opt['LOG_PATH'], file_name)
if os.path.exists(file_path):
file_writer = open(file_path, 'a', encoding='utf-8')
else:
file_writer = open(file_path, 'w', encoding='utf-8')
return file_writer
def convert_tensor_ids_to_tokens(self, token_ids):
tokens = []
token_ids = token_ids.tolist() # List[int]
if not token_ids:
return tokens
for token_id in token_ids:
if token_id == self.end_token_idx:
return tokens
tokens.append(self.ind2tok.get(token_id, self.unk))
return tokens
def is_early_stop(self, valid_metric, epoch):
early_stop_result = self.early_stop(valid_metric)
# logger.info(f'valid_metric = {valid_metric}, early_stop_result = {early_stop_result}, stop_mode = {self.stop_mode}')
if early_stop_result == 'Stop':
return True
elif early_stop_result == 'New Model':
self.save_model(epoch=epoch, valid_metric=valid_metric)
elif early_stop_result == 'Patience':
pass
return False
def fit(self):
self.extend_datasets()
self.pre_training()
self.train_recommender_default()
self.train_conversation_using_rec_model()
def extend_datasets(self):
extend_train_dataset = self.train_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.train_dataloader['rec'].replace_dataset(extend_train_dataset)
extend_train_dataset = self.valid_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.valid_dataloader['rec'].replace_dataset(extend_train_dataset)
extend_train_dataset = self.test_dataloader['rec'].add_avi_info_to_init_dataset_u()
self.test_dataloader['rec'].replace_dataset(extend_train_dataset)
def pre_training(self):
self.init_pretrain_optim()
self.pretrain_recommender_convergence()
def init_pretrain_optim(self):
self.pretrain_optim_opt = deepcopy(self.opt['pretrain'])
# get params and training setting
bert_param = [p for n, p in self.rec_model.named_parameters() if 'bert' in n]
other_param = [p for n, p in self.rec_model.named_parameters() if 'bert' not in n]
params = [{'params': bert_param, 'lr': self.pretrain_optim_opt['lr_bert']},
{'params': other_param}]
logger.info('There are {} bert parameters unit, {} other parameters unit'
.format(len(bert_param), len(other_param)))
self.init_optim(deepcopy(self.pretrain_optim_opt), params)
def pretrain_recommender_convergence(self):
for epoch in range(self.pretrain_epoch):
self.pretrain_recommender_one_epoch(epoch)
valid_metric = self.valid_pretrain_recommender(epoch)
if epoch in self.pertrain_save_epoches:
self.save_model(post_fix='epoch_{}'.format(epoch), epoch=epoch, valid_metric=valid_metric)
if self.is_early_stop(valid_metric, epoch):
break
def pretrain_recommender_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Pretrain | Epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_rec_data(self.pretrain_batch_size,
shuffle=True):
self.step(batch, stage='pretrain_rec', mode='train', epoch=epoch)
self.evaluator.report()
def valid_pretrain_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Valid | Epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(self.pretrain_batch_size,
shuffle=False):
self.step(batch, stage='pretrain_rec', mode='val', epoch=epoch)
self.evaluator.report()
metric = self.evaluator.optim_metrics['loss']
return metric
def train_recommender_default(self):
self.init_rec_optim()
self.train_recommender_convergence()
# test
if self.rec_epoch != 0:
self.restore_model_from_save()
self.test_recommender('final')
def init_rec_optim(self):
self.rec_optim_opt = deepcopy(self.opt['rec'])
# get params and training setting
bert_param = [p for n, p in self.rec_model.named_parameters() if 'bert' in n]
other_param = [p for n, p in self.rec_model.named_parameters() if 'bert' not in n]
params = [{'params': bert_param, 'lr': self.rec_optim_opt['lr_bert']},
{'params': other_param}]
logger.info('There are {} bert parameters unit, {} other parameters unit'
.format(len(bert_param), len(other_param)))
self.init_optim(deepcopy(self.rec_optim_opt), params)
def train_recommender_convergence(self) -> float:
for epoch in range(self.rec_epoch):
self.train_recommender_one_epoch(epoch)
valid_metric = self.valid_recommender(epoch)
if self.is_early_stop(valid_metric, epoch):
break
def train_recommender_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Train | Epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=True):
self.step(batch, stage='rec', mode='train', epoch=epoch)
self.evaluator.report()
def valid_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Valid | Epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='val', epoch=epoch)
self.evaluator.report()
metric = self.evaluator.rec_metrics['hit@1'] + self.evaluator.rec_metrics['hit@50']
return metric
def test_recommender(self, epoch):
logger.info(f'[{self.log_prefix}][Recommender | Test ]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_rec_data(self.rec_batch_size,
shuffle=False):
self.step(batch, stage='rec', mode='test', epoch=epoch)
self.evaluator.report()
def train_conversation_using_rec_model(self):
self.init_optim(deepcopy(self.conv_optim_opt), self.rec_model.parameters())
if self.opt['freeze_parameters']:
self.rec_model.freeze_parameters()
self.train_conversation_convergence()
if self.conv_epoch != 0:
self.restore_model_from_save()
self.test_conversation('final')
def train_conversation_convergence(self):
for epoch in range(self.conv_epoch):
self.train_conversation_one_epoch(epoch)
valid_metric = self.valid_conversation(epoch)
self.test_conversation('final')
if self.is_early_stop(valid_metric, epoch):
break
def train_conversation_one_epoch(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Train | epoch {str(epoch)}]')
self.evaluator.reset_metrics()
for batch in self.train_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=True):
self.step(batch, stage='conv', mode='train', epoch=epoch)
self.evaluator.report()
def valid_conversation(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Valid | epoch {str(epoch)}]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.valid_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='val', epoch=epoch)
self.evaluator.report()
valid_metric = self.get_sum_dist_metric()
# early stop
return valid_metric
def get_sum_dist_metric(self):
sum_dist = 0
for k in range(1, 5):
try:
sum_dist += self.evaluator.gen_metrics[f'dist@{k}']
except:
pass
return sum_dist
def test_conversation(self, epoch):
logger.info(f'[{self.log_prefix}][Conversation | Test]')
with torch.no_grad():
self.evaluator.reset_metrics()
for batch in self.test_dataloader['rec'].get_conv_data(
batch_size=self.conv_batch_size, shuffle=False):
self.step(batch, stage='conv', mode='test', epoch=epoch)
self.evaluator.report()
def interact(self):
pass | 42.549683 | 154 | 0.622131 | 19,494 | 0.968598 | 0 | 0 | 0 | 0 | 0 | 0 | 3,529 | 0.175345 |
98efe77eec76324cc9234c09e5f48bc8417b9d98 | 1,198 | py | Python | morepath/__init__.py | hugovk/morepath | 5596f9ce43ee4e5cd73eaa2ab9ef37825f88ae28 | [
"BSD-3-Clause"
]
| 314 | 2015-01-01T01:42:52.000Z | 2022-01-07T21:46:15.000Z | morepath/__init__.py | hugovk/morepath | 5596f9ce43ee4e5cd73eaa2ab9ef37825f88ae28 | [
"BSD-3-Clause"
]
| 369 | 2015-01-02T19:10:40.000Z | 2021-07-03T04:37:27.000Z | morepath/__init__.py | hugovk/morepath | 5596f9ce43ee4e5cd73eaa2ab9ef37825f88ae28 | [
"BSD-3-Clause"
]
| 37 | 2015-01-11T09:22:02.000Z | 2021-07-02T20:48:20.000Z | # flake8: noqa
"""This is the main public API of Morepath.
Additional public APIs can be imported from the :mod:`morepath.error`
and :mod:`morepath.pdbsupport` modules. For custom directive
implementations that interact with core directives for grouping or
subclassing purposes, or that need to use one of the Morepath
registries, you may need to import from :mod:`morepath.directive`.
The other submodules are considered private. If you find yourself
needing to import from them in application or extension code, please
report an issue about it on the Morepath issue tracker.
"""
from dectate import commit
from .app import App, dispatch_method
from .core import (
excview_tween_factory as EXCVIEW,
poisoned_host_header_protection_tween_factory as HOST_HEADER_PROTECTION,
model_predicate,
name_predicate,
request_method_predicate,
)
from .core import request_method_predicate as LAST_VIEW_PREDICATE
from .view import render_json, render_html, redirect
from .request import Request, Response
from .autosetup import scan, autoscan
from .authentication import Identity, IdentityPolicy, NO_IDENTITY
from .converter import Converter
from .reify import reify
from .run import run
| 37.4375 | 76 | 0.810518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 581 | 0.484975 |
98f2204e0eeff6cafe4a1031fc879a4bec0db151 | 37 | py | Python | src/AuShadha/demographics/email_and_fax/dijit_fields_constants.py | GosthMan/AuShadha | 3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e | [
"PostgreSQL"
]
| 46 | 2015-03-04T14:19:47.000Z | 2021-12-09T02:58:46.000Z | src/AuShadha/demographics/email_and_fax/dijit_fields_constants.py | aytida23/AuShadha | 3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e | [
"PostgreSQL"
]
| 2 | 2015-06-05T10:29:04.000Z | 2015-12-06T16:54:10.000Z | src/AuShadha/demographics/email_and_fax/dijit_fields_constants.py | aytida23/AuShadha | 3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e | [
"PostgreSQL"
]
| 24 | 2015-03-23T01:38:11.000Z | 2022-01-24T16:23:42.000Z | EMAIL_AND_FAX_FORM_CONSTANTS = {
} | 12.333333 | 32 | 0.756757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
98f428d0ea0b7f44539193898ee9647b5c6c689f | 2,242 | py | Python | marketDataRetrieval.py | amertx/Monte-Carlo-Simulation | 6c3a616bc67e668d80a73247ca279e10f6d46cd5 | [
"MIT"
]
| null | null | null | marketDataRetrieval.py | amertx/Monte-Carlo-Simulation | 6c3a616bc67e668d80a73247ca279e10f6d46cd5 | [
"MIT"
]
| null | null | null | marketDataRetrieval.py | amertx/Monte-Carlo-Simulation | 6c3a616bc67e668d80a73247ca279e10f6d46cd5 | [
"MIT"
]
| null | null | null | #Prediction model using an instance of the Monte Carlo simulation and Brownian Motion equation
#import of libraries
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
from scipy.stats import norm
#ticker selection
def mainFunction(tradingSymbol):
data = pd.DataFrame()
data[tradingSymbol] = wb.DataReader(tradingSymbol, data_source='yahoo', start='2019-1-1')['Adj Close']
#percent change of asset price
log_returns = np.log(1+ data.pct_change())
#graph showing growth over time beginning from 2015
data.plot(figsize = (10,6));
plt.show()
#graph of log returns of input ticker
#returns are normally distributed and have a consistent mean
log_returns.plot(figsize = (10,6))
plt.show()
#calculations
averageDailyReturn = log_returns.mean()
variance = log_returns.var()
drift = averageDailyReturn-(variance/2)
standardDeviation = log_returns.std()
#Brownian Motion equation
#r = drift + standardDeviation * (e^r)
#prediction of future stock price based on simulation below using numpy for storing data into array
np.array(drift)
drift.values
standardDeviation.values
#Brownian motion variable correlating to the distance between the mean and the number of standard deviation
norm.ppf(0.95)
#10 x 2 Matrix
x = np.random.rand(10,2)
norm.ppf(x)
#stores distances from the mean value, 0, into the 10 x 2 matrix
Z = norm.ppf(np.random.rand(10,2))
#time interval for the stock price forecast
timeInterval = 365
iterations = 5
#r = drift + standardDeviation * (e^r)
#10 sets of 365 random future stock prices of the ticker symbol
dailyReturns = np.exp(drift.values + standardDeviation.values * norm.ppf(np.random.rand(timeInterval,iterations)))
#returns into price points
presentPrice = data.iloc[-1]
priceList = np.zeros_like(dailyReturns)
priceList[0] = presentPrice
#iteration for the time interavl of 365
for t in range(1, timeInterval):
priceList[t] = priceList[t-1] * dailyReturns[t]
#showcases 10 paths of the future stock price
plt.figure(figsize =(10,6))
plt.plot(priceList)
plt.show()
| 29.116883 | 118 | 0.711864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 951 | 0.424175 |
98f43fcd4c7844a9b69d2baa890a95f4841f18e8 | 31,716 | py | Python | HelloDeepSpeed/train_bert_ds.py | mrwyattii/DeepSpeedExamples | 6bd444a7c62e9d7d320dd4c1e1142062f50c861d | [
"MIT"
]
| null | null | null | HelloDeepSpeed/train_bert_ds.py | mrwyattii/DeepSpeedExamples | 6bd444a7c62e9d7d320dd4c1e1142062f50c861d | [
"MIT"
]
| null | null | null | HelloDeepSpeed/train_bert_ds.py | mrwyattii/DeepSpeedExamples | 6bd444a7c62e9d7d320dd4c1e1142062f50c861d | [
"MIT"
]
| null | null | null | """
Modified version of train_bert.py that adds DeepSpeed
"""
import os
import datetime
import json
import pathlib
import re
import string
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union
import random
import datasets
import fire
import logging
import loguru
import numpy as np
import pytz
import sh
import torch
import torch.nn as nn
import deepspeed
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.models.roberta import RobertaConfig, RobertaModel
from transformers.models.roberta.modeling_roberta import (
RobertaLMHead,
RobertaPreTrainedModel,
)
def is_rank_0() -> bool:
return int(os.environ.get("RANK", "0")) == 0
######################################################################
####################### Logging Functions ############################
######################################################################
logger = loguru.logger
def log_dist(message: str,
ranks: List[int] = [],
level: int = logging.INFO) -> None:
"""Log messages for specified ranks only"""
my_rank = int(os.environ.get("RANK", "0"))
if my_rank in ranks:
if level == logging.INFO:
logger.info(f'[Rank {my_rank}] {message}')
if level == logging.ERROR:
logger.error(f'[Rank {my_rank}] {message}')
if level == logging.DEBUG:
logger.debug(f'[Rank {my_rank}] {message}')
######################################################################
############### Dataset Creation Related Functions ###################
######################################################################
TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
def collate_function(batch: List[Tuple[List[int], List[int]]],
pad_token_id: int) -> Dict[str, torch.Tensor]:
"""Collect a list of masked token indices, and labels, and
batch them, padding to max length in the batch.
"""
max_length = max(len(token_ids) for token_ids, _ in batch)
padded_token_ids = [
token_ids +
[pad_token_id for _ in range(0, max_length - len(token_ids))]
for token_ids, _ in batch
]
padded_labels = [
labels + [pad_token_id for _ in range(0, max_length - len(labels))]
for _, labels in batch
]
src_tokens = torch.LongTensor(padded_token_ids)
tgt_tokens = torch.LongTensor(padded_labels)
attention_mask = src_tokens.ne(pad_token_id).type_as(src_tokens)
return {
"src_tokens": src_tokens,
"tgt_tokens": tgt_tokens,
"attention_mask": attention_mask,
}
def masking_function(
text: str,
tokenizer: TokenizerType,
mask_prob: float,
random_replace_prob: float,
unmask_replace_prob: float,
max_length: int,
) -> Tuple[List[int], List[int]]:
"""Given a text string, randomly mask wordpieces for Bert MLM
training.
Args:
text (str):
The input text
tokenizer (TokenizerType):
The tokenizer for tokenization
mask_prob (float):
What fraction of tokens to mask
random_replace_prob (float):
Of the masked tokens, how many should be replaced with
random tokens (improves performance)
unmask_replace_prob (float):
Of the masked tokens, how many should be replaced with
the original token (improves performance)
max_length (int):
The maximum sequence length to consider. Note that for
Bert style models, this is a function of the number of
positional embeddings you learn
Returns:
Tuple[List[int], List[int]]:
The masked token ids (based on the tokenizer passed),
and the output labels (padded with `tokenizer.pad_token_id`)
"""
# Note: By default, encode does add the BOS and EOS token
# Disabling that behaviour to make this more clear
tokenized_ids = ([tokenizer.bos_token_id] +
tokenizer.encode(text,
add_special_tokens=False,
truncation=True,
max_length=max_length - 2) +
[tokenizer.eos_token_id])
seq_len = len(tokenized_ids)
tokenized_ids = np.array(tokenized_ids)
subword_mask = np.full(len(tokenized_ids), False)
# Masking the BOS and EOS token leads to slightly worse performance
low = 1
high = len(subword_mask) - 1
mask_choices = np.arange(low, high)
num_subwords_to_mask = max(
int((mask_prob * (high - low)) + np.random.rand()), 1)
subword_mask[np.random.choice(mask_choices,
num_subwords_to_mask,
replace=False)] = True
# Create the labels first
labels = np.full(seq_len, tokenizer.pad_token_id)
labels[subword_mask] = tokenized_ids[subword_mask]
tokenized_ids[subword_mask] = tokenizer.mask_token_id
# Now of the masked tokens, choose how many to replace with random and how many to unmask
rand_or_unmask_prob = random_replace_prob + unmask_replace_prob
if rand_or_unmask_prob > 0:
rand_or_unmask = subword_mask & (np.random.rand(len(tokenized_ids)) <
rand_or_unmask_prob)
if random_replace_prob == 0:
unmask = rand_or_unmask
rand_mask = None
elif unmask_replace_prob == 0:
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = unmask_replace_prob / rand_or_unmask_prob
decision = np.random.rand(len(tokenized_ids)) < unmask_prob
unmask = rand_or_unmask & decision
rand_mask = rand_or_unmask & (~decision)
if unmask is not None:
tokenized_ids[unmask] = labels[unmask]
if rand_mask is not None:
weights = np.ones(tokenizer.vocab_size)
weights[tokenizer.all_special_ids] = 0
probs = weights / weights.sum()
num_rand = rand_mask.sum()
tokenized_ids[rand_mask] = np.random.choice(tokenizer.vocab_size,
num_rand,
p=probs)
return tokenized_ids.tolist(), labels.tolist()
class WikiTextMLMDataset(Dataset):
"""A [Map style dataset](https://pytorch.org/docs/stable/data.html)
for iterating over the wikitext dataset. Note that this assumes
the dataset can fit in memory. For larger datasets
you'd want to shard them and use an iterable dataset (eg: see
[Infinibatch](https://github.com/microsoft/infinibatch))
Args:
Dataset (datasets.arrow_dataset.Dataset):
The wikitext dataset
masking_function (Callable[[str], Tuple[List[int], List[int]]])
The masking function. To generate one training instance,
the masking function is applied to the `text` of a dataset
record
"""
def __init__(
self,
dataset: datasets.arrow_dataset.Dataset,
masking_function: Callable[[str], Tuple[List[int], List[int]]],
) -> None:
self.dataset = dataset
self.masking_function = masking_function
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> Tuple[List[int], List[int]]:
tokens, labels = self.masking_function(self.dataset[idx]["text"])
return (tokens, labels)
T = TypeVar("T")
class InfiniteIterator(object):
def __init__(self, iterable: Iterable[T]) -> None:
self._iterable = iterable
self._iterator = iter(self._iterable)
def __iter__(self):
return self
def __next__(self) -> T:
next_item = None
try:
next_item = next(self._iterator)
except StopIteration:
self._iterator = iter(self._iterable)
next_item = next(self._iterator)
return next_item
def create_data_iterator(
mask_prob: float,
random_replace_prob: float,
unmask_replace_prob: float,
batch_size: int,
max_seq_length: int = 512,
tokenizer: str = "roberta-base",
) -> InfiniteIterator:
"""Create the dataloader.
Args:
mask_prob (float):
Fraction of tokens to mask
random_replace_prob (float):
Fraction of masked tokens to replace with random token
unmask_replace_prob (float):
Fraction of masked tokens to replace with the actual token
batch_size (int):
The batch size of the generated tensors
max_seq_length (int, optional):
The maximum sequence length for the MLM task. Defaults to 512.
tokenizer (str, optional):
The tokenizer to use. Defaults to "roberta-base".
Returns:
InfiniteIterator:
The torch DataLoader, wrapped in an InfiniteIterator class, to
be able to continuously generate samples
"""
wikitext_dataset = datasets.load_dataset("wikitext",
"wikitext-2-v1",
split="train")
wikitext_dataset = wikitext_dataset.filter(
lambda record: record["text"] != "").map(
lambda record: {"text": record["text"].rstrip("\n")})
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
masking_function_partial = partial(
masking_function,
tokenizer=tokenizer,
mask_prob=mask_prob,
random_replace_prob=random_replace_prob,
unmask_replace_prob=unmask_replace_prob,
max_length=max_seq_length,
)
dataset = WikiTextMLMDataset(wikitext_dataset, masking_function_partial)
collate_fn_partial = partial(collate_function,
pad_token_id=tokenizer.pad_token_id)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn_partial)
return InfiniteIterator(dataloader)
######################################################################
############### Model Creation Related Functions #####################
######################################################################
class RobertaLMHeadWithMaskedPredict(RobertaLMHead):
def __init__(self,
config: RobertaConfig,
embedding_weight: Optional[torch.Tensor] = None) -> None:
super(RobertaLMHeadWithMaskedPredict, self).__init__(config)
if embedding_weight is not None:
self.decoder.weight = embedding_weight
def forward( # pylint: disable=arguments-differ
self,
features: torch.Tensor,
masked_token_indices: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""The current `transformers` library does not provide support
for masked_token_indices. This function provides the support, by
running the final forward pass only for the masked indices. This saves
memory
Args:
features (torch.Tensor):
The features to select from. Shape (batch, seq_len, h_dim)
masked_token_indices (torch.Tensor, optional):
The indices of masked tokens for index select. Defaults to None.
Shape: (num_masked_tokens,)
Returns:
torch.Tensor:
The index selected features. Shape (num_masked_tokens, h_dim)
"""
if masked_token_indices is not None:
features = torch.index_select(
features.view(-1, features.shape[-1]), 0, masked_token_indices)
return super().forward(features)
class RobertaMLMModel(RobertaPreTrainedModel):
def __init__(self, config: RobertaConfig, encoder: RobertaModel) -> None:
super().__init__(config)
self.encoder = encoder
self.lm_head = RobertaLMHeadWithMaskedPredict(
config, self.encoder.embeddings.word_embeddings.weight)
self.lm_head.apply(self._init_weights)
def forward(
self,
src_tokens: torch.Tensor,
attention_mask: torch.Tensor,
tgt_tokens: torch.Tensor,
) -> torch.Tensor:
"""The forward pass for the MLM task
Args:
src_tokens (torch.Tensor):
The masked token indices. Shape: (batch, seq_len)
attention_mask (torch.Tensor):
The attention mask, since the batches are padded
to the largest sequence. Shape: (batch, seq_len)
tgt_tokens (torch.Tensor):
The output tokens (padded with `config.pad_token_id`)
Returns:
torch.Tensor:
The MLM loss
"""
# shape: (batch, seq_len, h_dim)
sequence_output, *_ = self.encoder(input_ids=src_tokens,
attention_mask=attention_mask,
return_dict=False)
pad_token_id = self.config.pad_token_id
# (labels have also been padded with pad_token_id)
# filter out all masked labels
# shape: (num_masked_tokens,)
masked_token_indexes = torch.nonzero(
(tgt_tokens != pad_token_id).view(-1)).view(-1)
# shape: (num_masked_tokens, vocab_size)
prediction_scores = self.lm_head(sequence_output, masked_token_indexes)
# shape: (num_masked_tokens,)
target = torch.index_select(tgt_tokens.view(-1), 0,
masked_token_indexes)
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), target)
return masked_lm_loss
def create_model(num_layers: int, num_heads: int, ff_dim: int, h_dim: int,
dropout: float) -> RobertaMLMModel:
"""Create a Bert model with the specified `num_heads`, `ff_dim`,
`h_dim` and `dropout`
Args:
num_layers (int):
The number of layers
num_heads (int):
The number of attention heads
ff_dim (int):
The intermediate hidden size of
the feed forward block of the
transformer
h_dim (int):
The hidden dim of the intermediate
representations of the transformer
dropout (float):
The value of dropout to be used.
Note that we apply the same dropout
to both the attention layers and the
FF layers
Returns:
RobertaMLMModel:
A Roberta model for MLM task
"""
roberta_config_dict = {
"attention_probs_dropout_prob": dropout,
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": dropout,
"hidden_size": h_dim,
"initializer_range": 0.02,
"intermediate_size": ff_dim,
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"model_type": "roberta",
"num_attention_heads": num_heads,
"num_hidden_layers": num_layers,
"pad_token_id": 1,
"type_vocab_size": 1,
"vocab_size": 50265,
}
roberta_config = RobertaConfig.from_dict(roberta_config_dict)
roberta_encoder = RobertaModel(roberta_config)
roberta_model = RobertaMLMModel(roberta_config, roberta_encoder)
return roberta_model
######################################################################
########### Experiment Management Related Functions ##################
######################################################################
def get_unique_identifier(length: int = 8) -> str:
"""Create a unique identifier by choosing `length`
random characters from list of ascii characters and numbers
"""
alphabet = string.ascii_lowercase + string.digits
uuid = "".join(alphabet[ix]
for ix in np.random.choice(len(alphabet), length))
return uuid
def create_experiment_dir(checkpoint_dir: pathlib.Path,
all_arguments: Dict[str, Any]) -> pathlib.Path:
"""Create an experiment directory and save all arguments in it.
Additionally, also store the githash and gitdiff. Finally create
a directory for `Tensorboard` logs. The structure would look something
like
checkpoint_dir
`-experiment-name
|- hparams.json
|- githash.log
|- gitdiff.log
`- tb_dir/
Args:
checkpoint_dir (pathlib.Path):
The base checkpoint directory
all_arguments (Dict[str, Any]):
The arguments to save
Returns:
pathlib.Path: The experiment directory
"""
# experiment name follows the following convention
# {exp_type}.{YYYY}.{MM}.{DD}.{HH}.{MM}.{SS}.{uuid}
current_time = datetime.datetime.now(pytz.timezone("US/Pacific"))
expname = "bert_pretrain.{0}.{1}.{2}.{3}.{4}.{5}.{6}".format(
current_time.year,
current_time.month,
current_time.day,
current_time.hour,
current_time.minute,
current_time.second,
get_unique_identifier(),
)
exp_dir = checkpoint_dir / expname
if not is_rank_0():
return exp_dir
exp_dir.mkdir(exist_ok=False)
hparams_file = exp_dir / "hparams.json"
with hparams_file.open("w") as handle:
json.dump(obj=all_arguments, fp=handle, indent=2)
# Save the git hash
try:
gitlog = sh.git.log("-1", format="%H", _tty_out=False, _fg=False)
with (exp_dir / "githash.log").open("w") as handle:
handle.write(gitlog.stdout.decode("utf-8"))
except sh.ErrorReturnCode_128:
log_dist(
"Seems like the code is not running from"
" within a git repo, so hash will"
" not be stored. However, it"
" is strongly advised to use"
" version control.",
ranks=[0],
level=logging.INFO)
# And the git diff
try:
gitdiff = sh.git.diff(_fg=False, _tty_out=False)
with (exp_dir / "gitdiff.log").open("w") as handle:
handle.write(gitdiff.stdout.decode("utf-8"))
except sh.ErrorReturnCode_129:
log_dist(
"Seems like the code is not running from"
" within a git repo, so diff will"
" not be stored. However, it"
" is strongly advised to use"
" version control.",
ranks=[0],
level=logging.INFO)
# Finally create the Tensorboard Dir
tb_dir = exp_dir / "tb_dir"
tb_dir.mkdir(exist_ok=False)
return exp_dir
######################################################################
################ Checkpoint Related Functions ########################
######################################################################
def load_model_checkpoint(
load_checkpoint_dir: pathlib.Path,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
) -> Tuple[int, torch.nn.Module, torch.optim.Optimizer]:
"""Loads the optimizer state dict and model state dict from the load_checkpoint_dir
into the passed model and optimizer. Searches for the most recent checkpoint to
load from
Args:
load_checkpoint_dir (pathlib.Path):
The base checkpoint directory to load from
model (torch.nn.Module):
The model to load the checkpoint weights into
optimizer (torch.optim.Optimizer):
The optimizer to load the checkpoint weigths into
Returns:
Tuple[int, torch.nn.Module, torch.optim.Optimizer]:
The checkpoint step, model with state_dict loaded and
optimizer with state_dict loaded
"""
log_dist(
f"Loading model and optimizer checkpoint from {load_checkpoint_dir}",
ranks=[0],
level=logging.INFO)
checkpoint_files = list(
filter(
lambda path: re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name) is
not None,
load_checkpoint_dir.glob("*.pt"),
))
assert len(checkpoint_files) > 0, "No checkpoints found in directory"
checkpoint_files = sorted(
checkpoint_files,
key=lambda path: int(
re.search(r"iter_(?P<iter_no>\d+)\.pt", path.name).group("iter_no")
),
)
latest_checkpoint_path = checkpoint_files[-1]
checkpoint_step = int(
re.search(r"iter_(?P<iter_no>\d+)\.pt",
latest_checkpoint_path.name).group("iter_no"))
state_dict = torch.load(latest_checkpoint_path)
model.load_state_dict(state_dict["model"], strict=True)
optimizer.load_state_dict(state_dict["optimizer"])
log_dist(
f"Loading model and optimizer checkpoints done. Loaded from {latest_checkpoint_path}",
ranks=[0],
level=logging.INFO)
return checkpoint_step, model, optimizer
######################################################################
######################## Driver Functions ############################
######################################################################
def train(
checkpoint_dir: str = None,
load_checkpoint_dir: str = None,
# Dataset Parameters
mask_prob: float = 0.15,
random_replace_prob: float = 0.1,
unmask_replace_prob: float = 0.1,
max_seq_length: int = 512,
tokenizer: str = "roberta-base",
# Model Parameters
num_layers: int = 6,
num_heads: int = 8,
ff_dim: int = 512,
h_dim: int = 256,
dropout: float = 0.1,
# Training Parameters
batch_size: int = 8,
num_iterations: int = 10000,
checkpoint_every: int = 1000,
log_every: int = 10,
local_rank: int = -1,
) -> pathlib.Path:
"""Trains a [Bert style](https://arxiv.org/pdf/1810.04805.pdf)
(transformer encoder only) model for MLM Task
Args:
checkpoint_dir (str):
The base experiment directory to save experiments to
mask_prob (float, optional):
The fraction of tokens to mask. Defaults to 0.15.
random_replace_prob (float, optional):
The fraction of masked tokens to replace with random token.
Defaults to 0.1.
unmask_replace_prob (float, optional):
The fraction of masked tokens to leave unchanged.
Defaults to 0.1.
max_seq_length (int, optional):
The maximum sequence length of the examples. Defaults to 512.
tokenizer (str, optional):
The tokenizer to use. Defaults to "roberta-base".
num_layers (int, optional):
The number of layers in the Bert model. Defaults to 6.
num_heads (int, optional):
Number of attention heads to use. Defaults to 8.
ff_dim (int, optional):
Size of the intermediate dimension in the FF layer.
Defaults to 512.
h_dim (int, optional):
Size of intermediate representations.
Defaults to 256.
dropout (float, optional):
Amout of Dropout to use. Defaults to 0.1.
batch_size (int, optional):
The minibatch size. Defaults to 8.
num_iterations (int, optional):
Total number of iterations to run the model for.
Defaults to 10000.
checkpoint_every (int, optional):
Save checkpoint after these many steps.
..note ::
You want this to be frequent enough that you can
resume training in case it crashes, but not so much
that you fill up your entire storage !
Defaults to 1000.
log_every (int, optional):
Print logs after these many steps. Defaults to 10.
local_rank (int, optional):
Which GPU to run on (-1 for CPU). Defaults to -1.
Returns:
pathlib.Path: The final experiment directory
"""
device = (torch.device("cuda", local_rank) if (local_rank > -1)
and torch.cuda.is_available() else torch.device("cpu"))
################################
###### Create Exp. Dir #########
################################
if checkpoint_dir is None and load_checkpoint_dir is None:
log_dist(
"Need to specify one of checkpoint_dir"
" or load_checkpoint_dir",
ranks=[0],
level=logging.ERROR)
return
if checkpoint_dir is not None and load_checkpoint_dir is not None:
log_dist(
"Cannot specify both checkpoint_dir"
" and load_checkpoint_dir",
ranks=[0],
level=logging.ERROR)
return
if checkpoint_dir:
log_dist("Creating Experiment Directory",
ranks=[0],
level=logging.INFO)
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(exist_ok=True)
all_arguments = {
# Dataset Params
"mask_prob": mask_prob,
"random_replace_prob": random_replace_prob,
"unmask_replace_prob": unmask_replace_prob,
"max_seq_length": max_seq_length,
"tokenizer": tokenizer,
# Model Params
"num_layers": num_layers,
"num_heads": num_heads,
"ff_dim": ff_dim,
"h_dim": h_dim,
"dropout": dropout,
# Training Params
"batch_size": batch_size,
"num_iterations": num_iterations,
"checkpoint_every": checkpoint_every,
}
exp_dir = create_experiment_dir(checkpoint_dir, all_arguments)
log_dist(f"Experiment Directory created at {exp_dir}",
ranks=[0],
level=logging.INFO)
else:
log_dist("Loading from Experiment Directory",
ranks=[0],
level=logging.INFO)
load_checkpoint_dir = pathlib.Path(load_checkpoint_dir)
assert load_checkpoint_dir.exists()
with (load_checkpoint_dir / "hparams.json").open("r") as handle:
hparams = json.load(handle)
# Set the hparams
# Dataset Params
mask_prob = hparams.get("mask_prob", mask_prob)
tokenizer = hparams.get("tokenizer", tokenizer)
random_replace_prob = hparams.get("random_replace_prob",
random_replace_prob)
unmask_replace_prob = hparams.get("unmask_replace_prob",
unmask_replace_prob)
max_seq_length = hparams.get("max_seq_length", max_seq_length)
# Model Params
ff_dim = hparams.get("ff_dim", ff_dim)
h_dim = hparams.get("h_dim", h_dim)
dropout = hparams.get("dropout", dropout)
num_layers = hparams.get("num_layers", num_layers)
num_heads = hparams.get("num_heads", num_heads)
# Training Params
batch_size = hparams.get("batch_size", batch_size)
_num_iterations = hparams.get("num_iterations", num_iterations)
num_iterations = max(num_iterations, _num_iterations)
checkpoint_every = hparams.get("checkpoint_every", checkpoint_every)
exp_dir = load_checkpoint_dir
# Tensorboard writer
if is_rank_0():
tb_dir = exp_dir / "tb_dir"
assert tb_dir.exists()
summary_writer = SummaryWriter(log_dir=tb_dir)
################################
###### Create Datasets #########
################################
log_dist("Creating Datasets", ranks=[0], level=logging.INFO)
data_iterator = create_data_iterator(
mask_prob=mask_prob,
random_replace_prob=random_replace_prob,
unmask_replace_prob=unmask_replace_prob,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
batch_size=batch_size,
)
log_dist("Dataset Creation Done", ranks=[0], level=logging.INFO)
################################
###### Create Model ############
################################
log_dist("Creating Model", ranks=[0], level=logging.INFO)
model = create_model(
num_layers=num_layers,
num_heads=num_heads,
ff_dim=ff_dim,
h_dim=h_dim,
dropout=dropout,
)
log_dist("Model Creation Done", ranks=[0], level=logging.INFO)
################################
###### DeepSpeed engine ########
################################
log_dist("Creating DeepSpeed engine", ranks=[0], level=logging.INFO)
ds_config = {
"train_micro_batch_size_per_gpu": batch_size,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-4
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 1,
"offload_optimizer": {
"device": "cpu"
}
}
}
model, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
config=ds_config)
log_dist("DeepSpeed engine created", ranks=[0], level=logging.INFO)
################################
#### Load Model checkpoint #####
################################
start_step = 1
if load_checkpoint_dir is not None:
_, client_state = model.load_checkpoint(load_dir=load_checkpoint_dir)
checkpoint_step = client_state['checkpoint_step']
start_step = checkpoint_step + 1
################################
####### The Training Loop ######
################################
log_dist(
f"Total number of model parameters: {sum([p.numel() for p in model.parameters()]):,d}",
ranks=[0],
level=logging.INFO)
model.train()
losses = []
for step, batch in enumerate(data_iterator, start=start_step):
if step >= num_iterations:
break
# Move the tensors to device
for key, value in batch.items():
batch[key] = value.to(device)
# Forward pass
loss = model(**batch)
# Backward pass
model.backward(loss)
# Optimizer Step
model.step()
losses.append(loss.item())
if step % log_every == 0:
log_dist("Loss: {0:.4f}".format(np.mean(losses)),
ranks=[0],
level=logging.INFO)
if is_rank_0():
summary_writer.add_scalar(f"Train/loss", np.mean(losses), step)
if step % checkpoint_every == 0:
model.save_checkpoint(save_dir=exp_dir,
client_state={'checkpoint_step': step})
log_dist("Saved model to {0}".format(exp_dir),
ranks=[0],
level=logging.INFO)
# Save the last checkpoint if not saved yet
if step % checkpoint_every != 0:
model.save_checkpoint(save_dir=exp_dir,
client_state={'checkpoint_step': step})
log_dist("Saved model to {0}".format(exp_dir),
ranks=[0],
level=logging.INFO)
return exp_dir
if __name__ == "__main__":
torch.manual_seed(42)
np.random.seed(0)
random.seed(0)
fire.Fire(train)
| 36.836237 | 95 | 0.576334 | 5,177 | 0.16323 | 0 | 0 | 0 | 0 | 0 | 0 | 13,426 | 0.423319 |
98f5a9225473ea31a925278ee4add1b0f458f788 | 825 | py | Python | programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py | vamsitallapudi/Coderefer-Python-Projects | a7acc682251661e296c64533f4a85d47e6eedda2 | [
"Apache-2.0"
]
| 1 | 2021-01-03T06:42:58.000Z | 2021-01-03T06:42:58.000Z | programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py | vamsitallapudi/Coderefer-Python-Projects | a7acc682251661e296c64533f4a85d47e6eedda2 | [
"Apache-2.0"
]
| null | null | null | programming/leetcode/linkedLists/PalindromeLinkedList/PalindromeLinkedList.py | vamsitallapudi/Coderefer-Python-Projects | a7acc682251661e296c64533f4a85d47e6eedda2 | [
"Apache-2.0"
]
| null | null | null | # Given a singly linked list, determine if it is a palindrome.
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
fast = slow = head
# find the mid node
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# reverse the second half
node = None
while slow:
nxt = slow.next
slow.next = node
node = slow
slow = nxt
# compare first and second half of nodes
while node:
if node.val != head.val:
return False
node = node.next
head = head.next
return True
| 22.916667 | 62 | 0.530909 | 715 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.249697 |
98f70d5ddc8fc406905d54058613214bd95d62ce | 290 | py | Python | __init__.py | CloudCIX/rolly | 8fafd655cb82881ae2cf75a475904cddc39e2f9a | [
"Apache-2.0"
]
| 6 | 2019-12-09T16:13:21.000Z | 2020-07-16T11:42:33.000Z | __init__.py | CloudCIX/rolly | 8fafd655cb82881ae2cf75a475904cddc39e2f9a | [
"Apache-2.0"
]
| null | null | null | __init__.py | CloudCIX/rolly | 8fafd655cb82881ae2cf75a475904cddc39e2f9a | [
"Apache-2.0"
]
| 1 | 2021-01-02T09:44:39.000Z | 2021-01-02T09:44:39.000Z | """
Rocky is a CLI based provisioning and management tool for CloudCIX Cloud software.
Rocky is designed to operate in an out of band (OOB) network, serarated from other CloudCIX networks.
Rocky's purpose is to facilitate monitoring, testing, debug and recovery
"""
__version__ = '0.3.5'
| 32.222222 | 101 | 0.772414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.941379 |
98f808b42f55c190413c10c0ee75bee408ae97c6 | 1,671 | py | Python | calculator.py | harshitbansal373/Python-Games | 4e879b0a97b4b420ed6d440cd2d6a0332a2109b7 | [
"MIT"
]
| null | null | null | calculator.py | harshitbansal373/Python-Games | 4e879b0a97b4b420ed6d440cd2d6a0332a2109b7 | [
"MIT"
]
| null | null | null | calculator.py | harshitbansal373/Python-Games | 4e879b0a97b4b420ed6d440cd2d6a0332a2109b7 | [
"MIT"
]
| null | null | null | from tkinter import *
import time
root=Tk()
root.title('Calculator')
root.config(bg='wheat')
def display(x):
global s
s=s+x
text.set(s)
def solve():
global s
try:
s=str(eval(text.get()))
except Exception as e:
text.set(e)
s=''
else:
text.set(s)
def clear():
global s
s=''
text.set(s)
def clear1():
global s
s=text.get()
s=s[:len(s)-1]
text.set(s)
def con():
label['text']=time.ctime()
label.after(1000,con)
s=''
text=StringVar()
f=Frame(root,bg='#dcdde1')
e=Entry(f,textvariable=text,bg='#f5f6fa',fg='#353b48',font='roboto 34 bold',justify='right',relief=RAISED)
e.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
l=['#aabbcc','#bbccdd','#ccddee','#ddeeff']
for i in ['789/','456*','123+','.0-=']:
f=Frame(root,bg=l.pop())
for j in i:
b=Button(f,text=j,bg='#00a8ff',fg='#353b48',font='roboto 34 italic',command=(lambda x=j:display(x)) if j!='=' else solve)
b.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
f1=Frame(root,bg='#dcdde1')
clear=Button(f1,text='C',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear)
clear.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
clear1=Button(f1,text='CE',bg='#00a8ff',fg='#353b48',font='Roboto 34',command=clear1)
clear1.pack(side=LEFT,padx=10,pady=10,expand=YES,fill=BOTH)
f1.pack(side=TOP,padx=10,pady=10,expand=YES,fill=BOTH)
f2=Frame(root,bg='#dcdde1')
label=Label(f2,bg='#00a8ff',fg='#353b48',font='roboto 34')
label.pack(padx=10,pady=10,expand=YES,fill=BOTH)
f2.pack(padx=10,pady=10,expand=YES,fill=BOTH)
con()
root.mainloop()
| 25.318182 | 125 | 0.668462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.174746 |
98f8ea06315deb6bd9599f36bf3f99bf2965db61 | 8,280 | py | Python | src/Main.py | OlavH96/Master | f98476063e579b7b2a80b81a2c0ca4005f5fce80 | [
"MIT"
]
| null | null | null | src/Main.py | OlavH96/Master | f98476063e579b7b2a80b81a2c0ca4005f5fce80 | [
"MIT"
]
| null | null | null | src/Main.py | OlavH96/Master | f98476063e579b7b2a80b81a2c0ca4005f5fce80 | [
"MIT"
]
| null | null | null | import glob
import os
import keras
import tensorflow as tf
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import src.util.Files as Files
from src.util.ImageLoader import load_images_generator, resize_image, load_images_generator_with_filename
import numpy as np
import logging as log
import random
from src.util.Arguments import anomaly_arguments, get_model_choice
import src.util.Arguments as Arguments
from scipy.stats import norm
from PIL import Image
from src.train.Models import autoencoder, conv_autoencoder, vae_autoencoder, vae_loss, get_dummy_loss, from_argument_choice
import src.train.Models as Models
import src.util.Filenames as Filenames
import math
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def image_generator(path, max_x, max_y, color_mode="RGB"):
for i in load_images_generator(path, color_mode=color_mode):
i = resize_image(i, max_x, max_y)
i = np.array(i)
i = np.expand_dims(i, axis=0)
i = i / 255
yield (i, i)
def image_generator_with_filename(path, max_x, max_y, color_mode="RGB"):
for i, f in load_images_generator_with_filename(path, color_mode=color_mode):
i = resize_image(i, max_x, max_y)
i = np.array(i)
i = np.expand_dims(i, axis=0)
i = i / 255
yield (i, f)
def centered_image_generator(path, max_x, max_y, color_mode="RGB"):
while True:
for i, o in image_generator(path, max_x, max_y, color_mode=color_mode):
yield (i, o)
def train_on_images(epochs, max_x, max_y, path, model_type, model_name, arg_steps, validation_path, color_mode="RGB"):
sess = tf.Session()
keras.backend.set_session(sess)
# max_x = max([i.shape[0] for i in images])
# max_y = max([i.shape[1] for i in images])
# max_x, max_y = find_max_min_image_size(path = 'detected_images/*.png')
# print(max_x, max_y) # 304, 298
epochs = epochs
shape = (max_y, max_x, 3)
model = Models.from_argument_choice(model_type, shape)
steps = len(glob.glob(path))
if arg_steps != 0:
steps = arg_steps
model.summary()
# define the checkpoint
checkpoint = ModelCheckpoint(model_name, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
log.info('Fitting model...')
if validation_path:
history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode),
validation_data=centered_image_generator(validation_path, max_x, max_y, color_mode=color_mode),
validation_steps=100,
epochs=epochs,
steps_per_epoch=steps,
callbacks=callbacks_list)
else:
history = model.fit_generator(generator=centered_image_generator(path, max_x, max_y, color_mode=color_mode),
epochs=epochs,
steps_per_epoch=steps,
callbacks=callbacks_list)
model.save(model_name)
loss = history.history['loss']
try:
plt.plot(loss)
if validation_path:
val_loss = history.history['val_loss']
plt.plot(val_loss, color='g')
plt.title(model_name)
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.savefig(f'training_loss_{model_name}.png')
except:
log.info("Failed to create loss graph")
log.info('Finished fitting model')
return model
def load_model_and_predict(model_path, num_predictions, path, max_x, max_y, model_type, model=None, color_mode="RGB", template_only=False):
# vae_loss(image_shape=(max_x, max_y, 3), log_var=0.5, mu=0.5)
im_shape = (max_x, max_y, 3)
if model_type == get_model_choice(Arguments.VAE) and not model:
model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)})
mu = model.get_layer('mu').output
log_var = model.get_layer('log').output
model.summary()
print(mu, log_var)
model.compile(optimizer='rmsprop', loss=vae_loss(im_shape, log_var, mu))
if model_type == get_model_choice(Arguments.CONVVAE) and not model:
model = load_model(model_path, compile=False)#custom_objects={'custom_vae_loss': vae_loss(im_shape, log_var, mu)})
encoder = model.get_layer('encoder')
decoder = model.get_layer('decoder')
mu = encoder.get_layer('mu').output
log_var = encoder.get_layer('log').output
model.compile(optimizer='adam', loss=vae_loss(im_shape, log_var, mu))
if model_type != get_model_choice(Arguments.VAE) and not model:
model = load_model(model_path)
model.summary()
print("Loaded Model", model, model.input_shape)
max_x = model.input_shape[1]
max_y = model.input_shape[2]
images = list(image_generator_with_filename(path, max_x, max_y, color_mode=color_mode))
random.shuffle(images)
index = 0
print(f'Loaded {len(images)} images')
model_name = model_path.split('.')[0]
save_dir = Files.makedir_else_cleardir(f'./predictions/{model_name}_{Filenames.remove_path(Filenames.strip_path_modifier(path))}')
for i, filename in images: # centered_image_generator(path, max_x, max_y):
hashed = Filenames.md5hash(filename)
anomaly = "anomaly" in filename
extra = "_anomaly_" if anomaly else "_normal_"
pred = model.predict(i)
print(pred.shape)
for ii in i:
if color_mode == 'HSV':
ii = Image.fromarray((ii * 255).astype(np.uint8), 'HSV')
ii = ii.convert("RGB")
ii = np.array(ii)
plt.imsave(str(save_dir / f'orig{extra}{hashed}_{index}.png'), ii)
#plt.imsave(str(save_dir / f'temp.png'), pred[0], vmin=0, vmax=1)
print("input shape",i.shape)
evaluate = model.evaluate(i, i)
if type(evaluate) is list:
evaluate = evaluate[0]
print(index, evaluate)
for p in pred:
#print("prediction",p)
p = p / np.max(p)
if color_mode == 'HSV':
p = Image.fromarray((p * 255).astype(np.uint8), 'HSV')
p = p.convert('RGB')
p = np.array(p)
if template_only:
# Hacky solution, oh well
template_path = './src/sign_detection/image_generation/images/signs/png/362.50/362_5.png'
im = Image.open(template_path)
im = im.convert('RGB')
im = im.resize(size=(64,64))
im = np.array(im)
score = image_mse(i[0], im)
plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{score}.png'), im)
else:
plt.imsave(str(save_dir / f'pred{extra}{index}_{hashed}_{str(evaluate)}.png'), p)
index += 1
if index == num_predictions:
break
def image_mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
if __name__ == '__main__':
args = anomaly_arguments()
log.info('Arguments', args)
print("Arguments", args)
model = None
if args.do_training:
model = train_on_images(
epochs=args.epochs,
path=args.path,
max_x=args.max_x,
max_y=args.max_y,
model_type=args.model_type,
model_name=args.model,
arg_steps=args.steps,
color_mode=args.color,
validation_path=args.validation_path
)
if args.do_predict:
load_model_and_predict(
model_path=args.model,
num_predictions=args.num_predictions,
max_x=args.max_x,
max_y=args.max_y,
path=args.pred_path if args.pred_path else args.path,
model_type=args.model_type,
model=model,
color_mode=args.color,
template_only=args.template
)
| 35.844156 | 139 | 0.620411 | 0 | 0 | 755 | 0.091184 | 0 | 0 | 0 | 0 | 1,286 | 0.155314 |
98f921edc0f4676c7070bf0e769ce5e1dab739bb | 1,353 | py | Python | daproli/manipulation.py | ermshaua/daproli | c1f7aeec431d9c60ae06eeac23455c1a03bc82cf | [
"BSD-3-Clause"
]
| null | null | null | daproli/manipulation.py | ermshaua/daproli | c1f7aeec431d9c60ae06eeac23455c1a03bc82cf | [
"BSD-3-Clause"
]
| null | null | null | daproli/manipulation.py | ermshaua/daproli | c1f7aeec431d9c60ae06eeac23455c1a03bc82cf | [
"BSD-3-Clause"
]
| null | null | null | from .utils import _get_return_type
def windowed(data, size, step=1, ret_type=None):
'''
dp.windowed applies a window function to a collection of data items.
Parameters
-----------
:param data: an iterable collection of data
:param size: the window size
:param step: the window step
:param ret_type: if provided the used return type, otherwise ret_type(data)
:return: the windowed data list
Examples
-----------
>>> import daproli as dp
>>> numbers = range(10)
>>> dp.windowed(numbers, 2, step=2)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
'''
if ret_type is None: ret_type = _get_return_type(data)
return [ret_type(data[i:i+size]) for i in range(0, len(data)-(size-1), step)]
def flatten(data, ret_type=None):
'''
dp.flatten applies a flatten function to a collection of data items.
Parameters
-----------
:param data: an iterable collection of data
:param ret_type: if provided the used return type, otherwise ret_type(data)
:return: the flattened data collection
Examples
-----------
>>> import daproli as dp
>>> dp.flatten([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
if ret_type is None: ret_type = _get_return_type(data)
return ret_type([item for sub in data for item in sub])
| 30.066667 | 81 | 0.610495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 960 | 0.709534 |
98fc678951f86f4c4317fc775c6ba763f66da302 | 8,717 | py | Python | ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py | gcxtx/ambari | 133d9c4661b21182482c25f96c3f0bf0a9740a9f | [
"Apache-2.0"
]
| 1 | 2021-05-06T06:24:04.000Z | 2021-05-06T06:24:04.000Z | ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py | gcxtx/ambari | 133d9c4661b21182482c25f96c3f0bf0a9740a9f | [
"Apache-2.0"
]
| null | null | null | ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py | gcxtx/ambari | 133d9c4661b21182482c25f96c3f0bf0a9740a9f | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import json
import sys
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestMetadataServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "ATLAS/0.1.0.2.3/package"
STACK_VERSION = "2.3"
def configureResourcesCalled(self):
self.assertResourceCalled('Directory', '/var/run/atlas',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/etc/atlas/conf',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/var/log/atlas',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/hbase/logs',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/data',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0755
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/data',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0644
)
self.assertResourceCalled('Directory', '/usr/hdp/current/atlas-server/server/webapp',
owner='atlas',
group='hadoop',
create_parents = True,
cd_access='a',
mode=0644
)
self.assertResourceCalled('File', '/usr/hdp/current/atlas-server/server/webapp/atlas.war',
content = StaticFile('/usr/hdp/current/atlas-server/server/webapp/atlas.war'),
)
appprops = dict(self.getConfig()['configurations'][
'application-properties'])
appprops['atlas.server.bind.address'] = 'c6401.ambari.apache.org'
self.assertResourceCalled('PropertiesFile',
'/etc/atlas/conf/application.properties',
properties=appprops,
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/atlas-env.sh',
content=InlineTemplate(
self.getConfig()['configurations'][
'atlas-env']['content']),
owner='atlas',
group='hadoop',
mode=0755,
)
self.assertResourceCalled('File', '/etc/atlas/conf/atlas-log4j.xml',
content=InlineTemplate(
self.getConfig()['configurations'][
'atlas-log4j']['content']),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/users-credentials.properties',
content=StaticFile('users-credentials.properties'),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', '/etc/atlas/conf/policy-store.txt',
content=StaticFile('policy-store.txt'),
owner='atlas',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'atlas',
group = 'hadoop',
conf_dir = '/usr/hdp/current/atlas-server/hbase/conf',
configurations = self.getConfig()['configurations']['atlas-hbase-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['atlas-hbase-site']
)
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertNoMoreResources()
def test_configure_secure(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "configure",
config_file="secure.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertResourceCalled('TemplateConfig', '/etc/atlas/conf/atlas_jaas.conf',
owner = 'atlas',
)
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertResourceCalled('Execute', 'source /etc/atlas/conf/atlas-env.sh ; /usr/hdp/current/atlas-server/bin/atlas_start.py',
not_if = 'ls /var/run/atlas/atlas.pid >/dev/null 2>&1 && ps -p `cat /var/run/atlas/atlas.pid` >/dev/null 2>&1',
user = 'atlas',
)
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
classname = "MetadataServer",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'source /etc/atlas/conf/atlas-env.sh; /usr/hdp/current/atlas-server/bin/atlas_stop.py',
user = 'atlas',
)
self.assertResourceCalled('File', '/var/run/atlas/atlas.pid',
action = ['delete'],
)
| 46.367021 | 141 | 0.494207 | 7,718 | 0.885396 | 0 | 0 | 7,754 | 0.889526 | 0 | 0 | 2,688 | 0.308363 |
98fd965b02157810b02af85a0eee51f0f9a9f9e1 | 5,040 | py | Python | Udacity P3 Additional Files/model.py | sayeayed/Udacity-Project4 | da39d0013d35d90818f9aa24ef097e185e705489 | [
"MIT"
]
| null | null | null | Udacity P3 Additional Files/model.py | sayeayed/Udacity-Project4 | da39d0013d35d90818f9aa24ef097e185e705489 | [
"MIT"
]
| null | null | null | Udacity P3 Additional Files/model.py | sayeayed/Udacity-Project4 | da39d0013d35d90818f9aa24ef097e185e705489 | [
"MIT"
]
| null | null | null | import os
import csv
import numpy as np
from sklearn.utils import shuffle
## Read in frame data
samples = []
with open('/../opt/carnd_p3/data/driving_log.csv') as csvfile: #open the log file
reader = csv.reader(csvfile) #as a readable csv
for line in reader:
samples.append(line) #add each line of the log file to samples
samples = samples[1:] # to remove table header
samples = shuffle(samples) # shuffle entire sample set before splitting into training and validation so that training isn't biased
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2) #split samples into 80% training, 20% validation
from scipy import ndimage #because cv2.imread() imports the image as BGR, and we want RGB
## Define generator to handle small portions of images at a time so that training is not as memory-heavy
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
# shuffle(samples) #shuffle within the training/validation sets, NOT NECESSARY SINCE SHUFFLING ALREADY SHUFFLED
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size] #collect the images for this batch
images = []
angles = []
for batch_sample in batch_samples:
path = '/../opt/carnd_p3/data/IMG/' #assign the location from which to read images
# read in images from all 3 cameras MAKING SURE TO READ IN AS RGB
center_image = ndimage.imread(path+batch_sample[0].split('/')[-1])
left_image = ndimage.imread(path+batch_sample[1].split('/')[-1])
right_image = ndimage.imread(path+batch_sample[2].split('/')[-1])
# read in steering angle
center_angle = float(batch_sample[3]) #read the steering angle
# apply a steering correction for the left and right images, in a way to generate "new" samples
correction = 0.2
left_angle = center_angle + correction
right_angle = center_angle - correction
# add images and angles to batch set
images.extend([center_image, left_image, right_image])
angles.extend([center_angle, left_angle, right_angle])
# copy all batches' images to final numpy array
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train) #shuffle before yielding result
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
ch, row, col = 3, 160, 320 # Full image format
#import Keras model layers
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Conv2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
# BUILD MODEL
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: x/127.5 - 1.0, input_shape=(row,col,ch)))
# Crop incoming data (training, validation, and autonomous so that everything is consistent)
model.add(Cropping2D(cropping=((60,20), (0,0)))) # could be first layer to reduce memory used in Lambda calculation, and thus faster training
# Begin CNN (similar to NVIDIA architecture)
# Convolution layer 1-3, kernel size 5 with stride of 2
model.add(Conv2D(24,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(36,(5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(48,(5,5),strides=(2,2),activation='relu'))
# Convolution layers 4-5, kernel size 3 wth stride of 1
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
# Flatten convolution output to yield single numerical result
model.add(Flatten())
# Fully connected layers to complete computations, gradually decreasing in parameters until final value
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
## Training hyper parameters to play with
## Stop training checkpoints...
# save_path = 'model{epoch:02d}-{val_loss:.2f}.h5'
# checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)
# stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)
## OR
batch_size = 32
epochs = 5 #***
## Compile and train the model
model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) #use Mean Squared Error to measure loss, use Adam optimizer for tuning
model.fit_generator(train_generator, steps_per_epoch= len(train_samples)/batch_size,validation_data=validation_generator, validation_steps=len(validation_samples)/batch_size, epochs=5, verbose = 1) # train using generators
#save the trained model
model.save('model.h5') | 48.461538 | 222 | 0.709127 | 0 | 0 | 1,761 | 0.349405 | 0 | 0 | 0 | 0 | 2,288 | 0.453968 |
98fe28b6ed135c40a04274c069f20df97e941299 | 2,357 | py | Python | utils/wavelengthfit_prim.py | GeminiDRSoftware/GHOSTDR | 79cd1ac81a7458e06668d6dac51fc6f9c9c61b31 | [
"BSD-3-Clause"
]
| 1 | 2019-09-05T15:29:25.000Z | 2019-09-05T15:29:25.000Z | utils/wavelengthfit_prim.py | GeminiDRSoftware/GHOSTDR | 79cd1ac81a7458e06668d6dac51fc6f9c9c61b31 | [
"BSD-3-Clause"
]
| null | null | null | utils/wavelengthfit_prim.py | GeminiDRSoftware/GHOSTDR | 79cd1ac81a7458e06668d6dac51fc6f9c9c61b31 | [
"BSD-3-Clause"
]
| 2 | 2017-10-10T23:23:53.000Z | 2022-02-15T23:28:22.000Z | #!/usr/bin/env python3
""" A script containing the basic principles of the extraction primitive inner
workings"""
from __future__ import division, print_function
from ghostdr import polyfit
import numpy as pn
# Firstly, let's find all the needed files
fitsdir='/Users/mireland/data/ghost/cal_frames/'
#Define the files in use (NB xmod.txt and wavemod.txt should be correct)
arc_file = fitsdir+"arc_extracted.fits"
# load it in now:
extracted_flux,extracted_vars=pyfits.getdata(arc_file)
# Where is the default location for the model? By default it is a parameter
# in the ghost class. If this needs to be overwritten, go ahead.
# This is the xmod file. Wherever it is saved from the flat reduction.
xmodel_file=fitsdir+'GHOST_1_1_blue_std_xmodPolyfit.fits'
# All the other models... which are currently in the "test" directory.
wmodel_file=test_files_dir+'wparams_blue_std.fits'
spatmod_file=test_files_dir+'spatmod.fits'
specmod_file=test_files_dir+'specmod.fits'
rotmod_file=test_files_dir+'rotmod2.fits'
# Find the arc line list file
arclinefile='/home/jbento/code/ghostdr/ghostdr/ADCONFIG_GHOST/lookups/GHOST/Polyfit/mnras0378-0221-SD1.txt'
arcwaves, arcfluxes= np.loadtxt(arclinefile,usecols=[1,2]).T
#instantiate the ghost arm
arm = polyfit.GhostArm('blue',mode='std')
arm.spectral_format_with_matrix(xpars,wpars,spatpars,specpars,rotpars)
#Get the initial default model from the lookup location
xpars=pyfits.getdata(xmodel_file)
wpars=pyfits.getdata(wmodel_file)
spatpars=pyfits.getdata(spatmod_file)
specpars=pyfits.getdata(specmod_file)
rotpars=pyfits.getdata(rotmod_file)
slitview = polyfit.SlitView(image_array, flat_image_array, mode='std')
# The extractor is given the polyfit "arm" object, and a slitview object which has
# been instantiated with the slit viewer data.
extractor = polyfit.Extractor(arm, slitview)
#Now find the other lines, after first re-loading into the extractor.
# the inspect parameter is a verbose option for visualising the line
# finding results
lines_out=extractor.find_lines(extracted_flux, arcwaves, inspect=False)
#Now finally do the wavelength fit!
fitted_params, wave_and_resid = arm.read_lines_and_fit(wpars,lines_out,ydeg=3,xdeg=3)
# Optionally show residuals?
#Now write the output to a file, in whatever format suits the recipe system best.
pyfits.writeto('outputs.fits',fitted_params)
| 35.712121 | 107 | 0.801018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,347 | 0.571489 |
98ff5d19bcbfb3d13ae61a0ad4df7649e741ec52 | 1,506 | py | Python | time_management/test/kronos_test.py | AyushRawal/time-management | a8876f7b681da837c41f17cf896eaa895017f17f | [
"MIT"
]
| 1 | 2021-11-15T19:35:51.000Z | 2021-11-15T19:35:51.000Z | time_management/test/kronos_test.py | AyushRawal/time-management | a8876f7b681da837c41f17cf896eaa895017f17f | [
"MIT"
]
| null | null | null | time_management/test/kronos_test.py | AyushRawal/time-management | a8876f7b681da837c41f17cf896eaa895017f17f | [
"MIT"
]
| null | null | null | import unittest
import datetime
import kronos
string_format_time = "%Y-%m-%d %H:%M:%S"
date_time_str = "2020-07-19 18:14:21"
class KronosTest(unittest.TestCase):
def test_get_day_of_week(self):
for i in range(len(kronos.week_days)):
date = kronos.get_date_time_from_string(f"2020-08-{10 + i} 13:00:00")
self.assertEqual(kronos.week_days.get(i), kronos.get_day_of_week(date))
def test_is_yesterday(self):
date_time = kronos.get_date_time_from_string("2020-07-20 18:14:21")
self.assertTrue(kronos.is_yesterday(date_time_str, today=date_time))
date_time = kronos.get_date_time_from_string("2020-07-19 18:14:21")
self.assertFalse(kronos.is_yesterday(date_time_str, today=date_time))
def test_is_previous_friday(self):
last_friday = "2020-08-14 13:00:00"
last_monday = kronos.get_date_time_from_string("2020-08-17 13:00:00")
self.assertTrue(kronos.is_previous_friday(last_friday, last_monday))
last_tuesday = kronos.get_date_time_from_string("2020-08-18 13:00:00")
self.assertFalse(kronos.is_previous_friday(last_friday, last_tuesday))
def test_is_overdue_checks_correctly(self):
creation_date = "2020-08-10 13:00:00"
completion_goal = 5
self.assertTrue(kronos.is_overdue(creation_date, completion_goal))
on_time_date = kronos.get_date_time_as_string()
on_time_goal = 100
self.assertFalse(kronos.is_overdue(on_time_date, on_time_goal))
| 41.833333 | 83 | 0.717131 | 1,377 | 0.914343 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.128818 |
98ffa0f6e3c8edf444c1fa0391cb1792a90df5ec | 1,368 | py | Python | mfc/mfc.py | FuelCellUAV/FC_datalogger | 1b4b4fecb6a842f3ba685115db01a50cca7596c7 | [
"CC0-1.0"
]
| null | null | null | mfc/mfc.py | FuelCellUAV/FC_datalogger | 1b4b4fecb6a842f3ba685115db01a50cca7596c7 | [
"CC0-1.0"
]
| null | null | null | mfc/mfc.py | FuelCellUAV/FC_datalogger | 1b4b4fecb6a842f3ba685115db01a50cca7596c7 | [
"CC0-1.0"
]
| null | null | null | ##!/usr/bin/env python3
# Mass Flow Controller Arduino driver
# Copyright (C) 2015 Simon Howroyd, Jason James
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# Import libraries
from time import sleep
#from quick2wire.i2c import I2CMaster, reading
# Define class
class mfc:
@staticmethod
def _getRaw(fun, ch):
return fun(ch)
# External getter
def get(self, fun, ch):
raw = self._getRaw(fun, ch)
rate = raw/5.0*1.5
return rate
# External getter
def getMoles(self, fun, ch):
rate = self.get(fun,ch)*(7.0/6280.0) # TODO should be *125.718/134.82 (density H2 at 1.5bar)
return rate
| 32.571429 | 101 | 0.638889 | 390 | 0.285088 | 0 | 0 | 62 | 0.045322 | 0 | 0 | 1,018 | 0.744152 |
c70208d0f7ec90b8fef96ebe7d673c28540df5bc | 2,558 | py | Python | odm/dialects/postgresql/green.py | quantmind/pulsar-odm | 5955c20beca0a89270c2b390335838deb7d5915e | [
"BSD-3-Clause"
]
| 16 | 2015-02-17T22:23:48.000Z | 2020-08-08T09:35:53.000Z | odm/dialects/postgresql/green.py | quantmind/pulsar-odm | 5955c20beca0a89270c2b390335838deb7d5915e | [
"BSD-3-Clause"
]
| 11 | 2015-02-25T11:37:09.000Z | 2016-03-04T12:08:11.000Z | odm/dialects/postgresql/green.py | quantmind/pulsar-odm | 5955c20beca0a89270c2b390335838deb7d5915e | [
"BSD-3-Clause"
]
| 3 | 2017-02-27T10:24:31.000Z | 2020-10-08T05:43:15.000Z | from asyncio import Future
from greenlet import getcurrent
import psycopg2
from psycopg2 import * # noqa
from psycopg2 import extensions, OperationalError
__version__ = psycopg2.__version__
def psycopg2_wait_callback(conn):
"""A wait callback to allow greenlet to work with Psycopg.
The caller must be from a greenlet other than the main one.
:param conn: psycopg2 connection or file number
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
"""
while True:
state = conn.poll()
if state == extensions.POLL_OK:
# Done with waiting
break
elif state == extensions.POLL_READ:
_wait_fd(conn)
elif state == extensions.POLL_WRITE:
_wait_fd(conn, read=False)
else: # pragma nocover
raise OperationalError("Bad result from poll: %r" % state)
# INTERNALS
def _wait_fd(conn, read=True):
'''Wait for an event on file descriptor ``fd``.
:param conn: file descriptor
:param read: wait for a read event if ``True``, otherwise a wait
for write event.
This function must be invoked from a coroutine with parent, therefore
invoking it from the main greenlet will raise an exception.
'''
current = getcurrent()
parent = current.parent
assert parent, '"_wait_fd" must be called by greenlet with a parent'
try:
fileno = conn.fileno()
except AttributeError:
fileno = conn
future = Future()
# When the event on fd occurs switch back to the current greenlet
if read:
future._loop.add_reader(fileno, _done_wait_fd, fileno, future, read)
else:
future._loop.add_writer(fileno, _done_wait_fd, fileno, future, read)
# switch back to parent greenlet
parent.switch(future)
# Back on the child greenlet. Raise error if there is one
future.result()
def _done_wait_fd(fd, future, read):
try:
if read:
future._loop.remove_reader(fd)
else:
future._loop.remove_writer(fd)
except Exception as exc:
future.set_exception(exc)
else:
future.set_result(None)
try:
extensions.POLL_OK
except AttributeError: # pragma nocover
from pulsar import ImproperlyConfigured
raise ImproperlyConfigured(
'Psycopg2 does not have support for asynchronous connections. '
'You need at least version 2.2.0 of Psycopg2.')
extensions.set_wait_callback(psycopg2_wait_callback)
| 29.744186 | 76 | 0.67631 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,060 | 0.414386 |
c70375d862917fab136e0bc4321aa240c2c6c44e | 27,984 | py | Python | test/test_replica_set_connection.py | h4ck3rm1k3/mongo-python-driver | dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c | [
"Apache-2.0"
]
| 1 | 2019-04-27T20:15:11.000Z | 2019-04-27T20:15:11.000Z | test/test_replica_set_connection.py | h4ck3rm1k3/mongo-python-driver | dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c | [
"Apache-2.0"
]
| null | null | null | test/test_replica_set_connection.py | h4ck3rm1k3/mongo-python-driver | dfaadd53e86a62c72ca8a7564fdacb30cd0ac01c | [
"Apache-2.0"
]
| null | null | null | # Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import copy
import datetime
import os
import signal
import socket
import sys
import time
import thread
import traceback
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import Connection
from pymongo.read_preferences import ReadPreference
from pymongo.replica_set_connection import ReplicaSetConnection
from pymongo.replica_set_connection import _partition_node
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure)
from test import version
from test.utils import delay, assertReadFrom, assertReadFromAll, read_from_which_host
host = os.environ.get("DB_IP", 'localhost')
port = int(os.environ.get("DB_PORT", 27017))
pair = '%s:%d' % (host, port)
class TestReplicaSetConnectionAgainstStandalone(unittest.TestCase):
"""This is a funny beast -- we want to run tests for ReplicaSetConnection
but only if the database at DB_IP and DB_PORT is a standalone.
"""
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
raise SkipTest()
def test_connect(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='anything',
connectTimeoutMS=600)
class TestConnectionReplicaSetBase(unittest.TestCase):
def setUp(self):
conn = Connection(pair)
response = conn.admin.command('ismaster')
if 'setName' in response:
self.name = str(response['setName'])
self.w = len(response['hosts'])
self.hosts = set([_partition_node(h)
for h in response["hosts"]])
self.arbiters = set([_partition_node(h)
for h in response.get("arbiters", [])])
repl_set_status = conn.admin.command('replSetGetStatus')
primary_info = [
m for m in repl_set_status['members']
if m['stateStr'] == 'PRIMARY'
][0]
self.primary = _partition_node(primary_info['name'])
self.secondaries = [
_partition_node(m['name']) for m in repl_set_status['members']
if m['stateStr'] == 'SECONDARY'
]
else:
raise SkipTest()
def _get_connection(self, **kwargs):
return ReplicaSetConnection(pair,
replicaSet=self.name,
**kwargs)
class TestConnection(TestConnectionReplicaSetBase):
def test_connect(self):
self.assertRaises(ConnectionFailure, ReplicaSetConnection,
"somedomainthatdoesntexist.org:27017",
replicaSet=self.name,
connectTimeoutMS=600)
self.assertRaises(ConfigurationError, ReplicaSetConnection,
pair, replicaSet='fdlksjfdslkjfd')
self.assertTrue(ReplicaSetConnection(pair, replicaSet=self.name))
def test_repr(self):
connection = self._get_connection()
self.assertEqual(repr(connection),
"ReplicaSetConnection(%r)" % (["%s:%d" % n
for n in
self.hosts],))
def test_properties(self):
c = ReplicaSetConnection(pair, replicaSet=self.name)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 10)
self.assertEqual(c.document_class, dict)
self.assertEqual(c.tz_aware, False)
# Make sure RSC's properties are copied to Database and Collection
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.PRIMARY)
self.assertEqual(obj.tag_sets, [{}])
self.assertEqual(obj.secondary_acceptable_latency_ms, 15)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, False)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.PRIMARY, cursor._Cursor__read_preference)
self.assertEqual([{}], cursor._Cursor__tag_sets)
self.assertEqual(15, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
c.close()
tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}]
c = ReplicaSetConnection(pair, replicaSet=self.name, max_pool_size=25,
document_class=SON, tz_aware=True,
slaveOk=False, safe=True,
read_preference=ReadPreference.SECONDARY,
tag_sets=copy.deepcopy(tag_sets),
secondary_acceptable_latency_ms=77)
c.admin.command('ping')
self.assertEqual(c.primary, self.primary)
self.assertEqual(c.hosts, self.hosts)
self.assertEqual(c.arbiters, self.arbiters)
self.assertEqual(c.max_pool_size, 25)
self.assertEqual(c.document_class, SON)
self.assertEqual(c.tz_aware, True)
for obj in c, c.pymongo_test, c.pymongo_test.test:
self.assertEqual(obj.read_preference, ReadPreference.SECONDARY)
self.assertEqual(obj.tag_sets, tag_sets)
self.assertEqual(obj.secondary_acceptable_latency_ms, 77)
self.assertEqual(obj.slave_okay, False)
self.assertEqual(obj.safe, True)
cursor = c.pymongo_test.test.find()
self.assertEqual(
ReadPreference.SECONDARY, cursor._Cursor__read_preference)
self.assertEqual(tag_sets, cursor._Cursor__tag_sets)
self.assertEqual(77, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
cursor = c.pymongo_test.test.find(
read_preference=ReadPreference.NEAREST,
tag_sets=[{'dc':'ny'}, {}],
secondary_acceptable_latency_ms=123)
self.assertEqual(
ReadPreference.NEAREST, cursor._Cursor__read_preference)
self.assertEqual([{'dc':'ny'}, {}], cursor._Cursor__tag_sets)
self.assertEqual(123, cursor._Cursor__secondary_acceptable_latency_ms)
self.assertEqual(False, cursor._Cursor__slave_okay)
if version.at_least(c, (1, 7, 4)):
self.assertEqual(c.max_bson_size, 16777216)
else:
self.assertEqual(c.max_bson_size, 4194304)
c.close()
def test_get_db(self):
connection = self._get_connection()
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assertTrue(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
connection.close()
def test_auto_reconnect_exception_when_read_preference_is_secondary(self):
c = self._get_connection()
db = c.pymongo_test
def raise_socket_error(*args, **kwargs):
raise socket.error
old_sendall = socket.socket.sendall
socket.socket.sendall = raise_socket_error
try:
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
self.assertRaises(AutoReconnect, cursor.next)
finally:
socket.socket.sendall = old_sendall
def test_operations(self):
c = self._get_connection()
# Check explicitly for a case we've commonly hit in tests:
# a replica set is started with a tiny oplog, a previous
# test does a big insert that leaves the secondaries
# permanently "RECOVERING", and our insert(w=self.w) hangs
# forever.
rs_status = c.admin.command('replSetGetStatus')
members = rs_status['members']
self.assertFalse(
[m for m in members if m['stateStr'] == 'RECOVERING'],
"Replica set is recovering, try a larger oplogSize next time"
)
db = c.pymongo_test
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.insert({'foo': 'x'}, safe=True, w=self.w, wtimeout=10000)
self.assertEqual(1, db.test.count())
cursor = db.test.find()
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we read from the primary
self.assertEqual(c.primary, cursor._Cursor__connection_id)
cursor = db.test.find(read_preference=ReadPreference.SECONDARY)
doc = cursor.next()
self.assertEqual('x', doc['foo'])
# Ensure we didn't read from the primary
self.assertTrue(cursor._Cursor__connection_id in c.secondaries)
self.assertEqual(1, db.test.count())
db.test.remove({}, safe=True)
self.assertEqual(0, db.test.count())
db.test.drop()
c.close()
def test_database_names(self):
connection = self._get_connection()
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
connection.close()
def test_drop_database(self):
connection = self._get_connection()
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.close()
def test_copy_db(self):
c = self._get_connection()
self.assertTrue(c.in_request())
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
# copy_database() didn't accidentally end the request
self.assertTrue(c.in_request())
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.end_request()
self.assertFalse(c.in_request())
c.copy_database("pymongo_test", "pymongo_test2", pair)
# copy_database() didn't accidentally restart the request
self.assertFalse(c.in_request())
time.sleep(1)
self.assertTrue("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="bar")
self.assertFalse("pymongo_test1" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assertTrue("pymongo_test1" in c.database_names())
time.sleep(2)
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.close()
def test_iteration(self):
connection = self._get_connection()
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
connection.close()
def test_disconnect(self):
c = self._get_connection()
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_fork(self):
"""Test using a connection before and after a fork.
"""
if sys.platform == "win32":
raise SkipTest()
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest()
db = self._get_connection().pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
#db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
traceback.print_exc()
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
db.connection.close()
def test_document_class(self):
c = self._get_connection()
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.close()
c = self._get_connection(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.close()
def test_network_timeout(self):
no_timeout = self._get_connection()
timeout_sec = 1
timeout = self._get_connection(socketTimeoutMS=timeout_sec*1000)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
# A $where clause that takes a second longer than the timeout
where_func = delay(1 + timeout_sec)
def get_x(db):
doc = db.test.find().where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
doc = db.test.find(network_timeout=t).where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
no_timeout.close()
timeout.close()
def test_tz_aware(self):
self.assertRaises(ConfigurationError, ReplicaSetConnection,
tz_aware='foo', replicaSet=self.name)
aware = self._get_connection(tz_aware=True)
naive = self._get_connection()
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
time.sleep(1)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
def test_ipv6(self):
try:
connection = ReplicaSetConnection("[::1]:%d" % (port,),
replicaSet=self.name)
except:
# Either mongod was started without --ipv6
# or the OS doesn't support it (or both).
raise SkipTest()
# Try a few simple things
connection = ReplicaSetConnection("mongodb://[::1]:%d" % (port,),
replicaSet=self.name)
connection = ReplicaSetConnection("mongodb://[::1]:%d/?safe=true;"
"replicaSet=%s" % (port, self.name))
connection = ReplicaSetConnection("[::1]:%d,localhost:"
"%d" % (port, port),
replicaSet=self.name)
connection = ReplicaSetConnection("localhost:%d,[::1]:"
"%d" % (port, port),
replicaSet=self.name)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_bernie.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
connection.close()
def _test_kill_cursor_explicit(self, read_pref):
c = self._get_connection(read_preference=read_pref)
db = c.pymongo_test
db.drop_collection("test")
test = db.test
test.insert([{"i": i} for i in range(20)], w=1 + len(c.secondaries))
# Partially evaluate cursor so it's left alive, then kill it
cursor = test.find().batch_size(10)
cursor.next()
self.assertNotEqual(0, cursor.cursor_id)
connection_id = cursor._Cursor__connection_id
writer = c._ReplicaSetConnection__writer
if read_pref == ReadPreference.PRIMARY:
msg = "Expected cursor's connection_id to be %s, got %s" % (
writer, connection_id)
self.assertEqual(connection_id, writer, msg)
else:
self.assertNotEqual(connection_id, writer,
"Expected cursor's connection_id not to be primary")
cursor_id = cursor.cursor_id
# Cursor dead on server - trigger a getMore on the same cursor_id and
# check that the server returns an error.
cursor2 = cursor.clone()
cursor2._Cursor__id = cursor_id
if (sys.platform.startswith('java') or
'PyPy' in sys.version):
# Explicitly kill cursor.
cursor.close()
else:
# Implicitly kill it in CPython.
del cursor
self.assertRaises(OperationFailure, lambda: list(cursor2))
def test_kill_cursor_explicit_primary(self):
self._test_kill_cursor_explicit(ReadPreference.PRIMARY)
def test_kill_cursor_explicit_secondary(self):
self._test_kill_cursor_explicit(ReadPreference.SECONDARY)
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure Connection closes its
# socket if it gets an interrupt while waiting to recv() from it.
c = self._get_connection()
db = c.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert({'_id': 1}, safe=True)
old_signal_handler = None
try:
# Platform-specific hacks for raising a KeyboardInterrupt on the main
# thread while find() is in-progress: On Windows, SIGALRM is unavailable
# so we use second thread. In our Bamboo setup on Linux, the thread
# technique causes an error in the test at sock.recv():
# TypeError: 'int' object is not callable
# We don't know what causes this in Bamboo, so we hack around it.
if sys.platform == 'win32':
def interrupter():
time.sleep(0.25)
# Raises KeyboardInterrupt in the main thread
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
else:
# Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT for one
# second in the future, but easy to schedule SIGALRM.
def sigalarm(num, frame):
raise KeyboardInterrupt
old_signal_handler = signal.signal(signal.SIGALRM, sigalarm)
signal.alarm(1)
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
db.foo.find({'$where': where}).next()
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected ConnectionFailure")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
db.foo.find().next()
)
finally:
if old_signal_handler:
signal.signal(signal.SIGALRM, old_signal_handler)
def test_auto_start_request(self):
for bad_horrible_value in (None, 5, 'hi!'):
self.assertRaises(
(TypeError, ConfigurationError),
lambda: self._get_connection(auto_start_request=bad_horrible_value)
)
# auto_start_request should default to True
conn = self._get_connection()
pools = [mongo.pool for mongo in
conn._ReplicaSetConnection__members.values()]
self.assertTrue(conn.auto_start_request)
self.assertTrue(conn.in_request())
# Trigger the RSC to actually start a request
conn.test.test.find_one()
for pool in pools:
self.assertTrue(pool.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
for pool in pools:
self.assertFalse(pool.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.close()
conn = self._get_connection(auto_start_request=False)
self.assertFalse(conn.in_request())
conn.start_request()
self.assertTrue(conn.in_request())
conn.end_request()
self.assertFalse(conn.in_request())
conn.close()
def test_schedule_refresh(self):
# Monitor thread starts waiting for _refresh_interval, 30 seconds
conn = self._get_connection()
# Reconnect if necessary
conn.pymongo_test.test.find_one()
secondaries = conn.secondaries
for secondary in secondaries:
conn._ReplicaSetConnection__members[secondary].up = False
conn._ReplicaSetConnection__members[conn.primary].up = False
# Wake up monitor thread
conn._ReplicaSetConnection__schedule_refresh()
# Refresh interval is 30 seconds; scheduling a refresh tells the
# monitor thread / greenlet to start a refresh now. We still need to
# sleep a few seconds for it to complete.
time.sleep(5)
for secondary in secondaries:
self.assertTrue(conn._ReplicaSetConnection__members[secondary].up,
"ReplicaSetConnection didn't detect secondary is up")
self.assertTrue(conn._ReplicaSetConnection__members[conn.primary].up,
"ReplicaSetConnection didn't detect primary is up")
conn.close()
def test_pinned_member(self):
latency = 1000 * 1000
conn = self._get_connection(
auto_start_request=False, secondary_acceptable_latency_ms=latency)
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
# No pinning since we're not in a request
assertReadFromAll(
self, conn, conn.secondaries,
ReadPreference.SECONDARY, None, latency)
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
conn.start_request()
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Repin
primary = read_from_which_host(conn, ReadPreference.PRIMARY)
self.assertEqual(conn.primary, primary)
assertReadFrom(self, conn, primary, ReadPreference.NEAREST)
# Repin again
host = read_from_which_host(conn, ReadPreference.SECONDARY)
self.assertTrue(host in conn.secondaries)
assertReadFrom(self, conn, host, ReadPreference.SECONDARY)
# Unpin
conn.end_request()
assertReadFromAll(
self, conn, list(conn.secondaries) + [conn.primary],
ReadPreference.NEAREST, None, latency)
if __name__ == "__main__":
unittest.main()
| 36.966975 | 85 | 0.611885 | 26,339 | 0.941216 | 0 | 0 | 0 | 0 | 0 | 0 | 5,044 | 0.180246 |
c703e56a113105edf215384785217acba5d2eb75 | 2,177 | py | Python | jqi/cmd.py | jan-g/jqi | f304f9fda33ac9b9eae98848d2a64acbe0893131 | [
"CC-BY-3.0",
"Apache-2.0"
]
| 3 | 2020-04-15T13:40:59.000Z | 2021-06-30T10:09:33.000Z | jqi/cmd.py | jan-g/jqi | f304f9fda33ac9b9eae98848d2a64acbe0893131 | [
"CC-BY-3.0",
"Apache-2.0"
]
| null | null | null | jqi/cmd.py | jan-g/jqi | f304f9fda33ac9b9eae98848d2a64acbe0893131 | [
"CC-BY-3.0",
"Apache-2.0"
]
| null | null | null | import argparse_helper as argparse
import config_dir
import sys
from .editor import Editor
def main(*args):
if len(args) > 0:
args = [args]
parser = argparse.ArgumentParser()
parser.add_argument("-f", dest="cfg_file", help="query save name")
parser.add_argument("-x", default=False, action="store_true", dest="run", help="run immediately")
parser.add_argument("-l", default=False, action="count", dest="list", help="list saved queries")
parser.add_argument("-p", default=False, action="store_true", dest="previous", help="use previous query")
parser.add_argument("pattern", nargs="?", help="override saved pattern")
parser.add_argument("file", nargs="?", help="file to operate on")
args = parser.parse_args(*args)
if args.cfg_file is None and args.previous:
args.cfg_file = "previous"
if args.cfg_file is not None and args.file is None:
args.file = args.pattern
args.pattern = None
editor = Editor(file=args.cfg_file, pattern=args.pattern)
if args.list > 0:
if args.cfg_file is not None:
cfg = config_dir.load_config(name=".jqi", sub_dir="query", sub_name=args.cfg_file, create=False)
print(cfg["pattern"])
else:
list_stored(args.list > 1)
return
if args.file is None:
text = sys.stdin.read()
else:
with open(args.file) as f:
text = f.read()
if args.run:
editor.jq(text, stdio=True)
else:
result = editor.run(text)
if result == 0:
editor.save()
editor.save("previous")
else:
sys.exit(result)
def list_stored(long=False):
d = config_dir.config_dir(name=".jqi", sub_dir="query")
for f in d.iterdir():
name = f.name
cfg = config_dir.load_config(name=".jqi", sub_dir="query", sub_name=name, create=False)
if long:
print(name)
for line in cfg["pattern"].splitlines():
print("\t{}".format(line))
else:
print("{}\t{}".format(name, cfg["pattern"].splitlines()[0]))
if __name__ == '__main__':
main("-f", "foo", "/tmp/x")
| 31.550725 | 109 | 0.601746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.158016 |
c704254fb8b8187007babe4836f7f7b5682b3b65 | 888 | py | Python | setup.py | ASKBOT/python-import-utils | 9cc317cc2a42dd46d41d53e8209203ccfe528c11 | [
"BSD-2-Clause"
]
| 1 | 2015-07-19T10:36:42.000Z | 2015-07-19T10:36:42.000Z | setup.py | ASKBOT/python-import-utils | 9cc317cc2a42dd46d41d53e8209203ccfe528c11 | [
"BSD-2-Clause"
]
| null | null | null | setup.py | ASKBOT/python-import-utils | 9cc317cc2a42dd46d41d53e8209203ccfe528c11 | [
"BSD-2-Clause"
]
| null | null | null | import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import import_utils
setup(
name = "import-utils",
version = import_utils.__version__,
description = 'A module that supports simple programmatic module imports',
packages = find_packages(),
author = 'Evgeny.Fadeev',
author_email = '[email protected]',
license = 'BSD',
keywords = 'import, module',
url = 'http://askbot.org',
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
long_description = import_utils.__doc__
)
| 30.62069 | 78 | 0.647523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.462838 |
c704a4dc1d06546eaf240da05c092e6fa0ab7b9d | 1,704 | py | Python | visual_dynamics/policies/random_offset_camera_target_policy.py | alexlee-gk/visual_dynamics | 90227bb0d0aebb1989117b5c25ca311655ca7cc7 | [
"MIT"
]
| 30 | 2017-04-05T12:55:09.000Z | 2022-03-14T14:31:31.000Z | visual_dynamics/policies/random_offset_camera_target_policy.py | alexlee-gk/visual_dynamics | 90227bb0d0aebb1989117b5c25ca311655ca7cc7 | [
"MIT"
]
| 1 | 2017-06-19T02:39:03.000Z | 2017-06-19T02:39:03.000Z | visual_dynamics/policies/random_offset_camera_target_policy.py | alexlee-gk/visual_dynamics | 90227bb0d0aebb1989117b5c25ca311655ca7cc7 | [
"MIT"
]
| 13 | 2017-04-05T12:55:09.000Z | 2021-03-16T01:59:12.000Z | import numpy as np
from visual_dynamics.policies import CameraTargetPolicy
class RandomOffsetCameraTargetPolicy(CameraTargetPolicy):
def __init__(self, env, target_env, camera_node_name, agent_node_name, target_node_name,
height=12.0, radius=16.0, angle=(-np.pi/4, np.pi/4), tightness=0.1, hra_interpolation=True):
self.height = height
self.radius = radius
self.angle = angle
offset = self.sample_offset()
super(RandomOffsetCameraTargetPolicy, self).__init__(env, target_env, camera_node_name, agent_node_name,
target_node_name, offset, tightness=tightness,
hra_interpolation=hra_interpolation)
def reset(self):
self.offset = self.sample_offset()
state = super(RandomOffsetCameraTargetPolicy, self).reset()
# self.offset = self.sample_offset()
return state
def sample_offset(self):
height = np.random.uniform(*self.height) if isinstance(self.height, (list, tuple)) else self.height
radius = np.random.uniform(*self.radius) if isinstance(self.radius, (list, tuple)) else self.radius
angle = np.random.uniform(*self.angle) if isinstance(self.angle, (list, tuple)) else self.angle
return np.array([radius * np.sin(angle), -radius * np.cos(angle), height])
def _get_config(self):
config = super(RandomOffsetCameraTargetPolicy, self)._get_config()
config.pop('offset')
config.update({'height': self.height,
'radius': self.radius,
'angle': self.angle})
return config
| 47.333333 | 112 | 0.629695 | 1,625 | 0.953638 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.039319 |
c70662701931e0df30976bfadaca0ac6c230e738 | 1,401 | py | Python | Day3/Day3.py | ErAgOn-AmAnSiRoHi/Advent-of-Code-2021 | 0f0d59483d93f6fce4aa06fb36101aea08b02fc3 | [
"MIT"
]
| null | null | null | Day3/Day3.py | ErAgOn-AmAnSiRoHi/Advent-of-Code-2021 | 0f0d59483d93f6fce4aa06fb36101aea08b02fc3 | [
"MIT"
]
| null | null | null | Day3/Day3.py | ErAgOn-AmAnSiRoHi/Advent-of-Code-2021 | 0f0d59483d93f6fce4aa06fb36101aea08b02fc3 | [
"MIT"
]
| null | null | null | with open("inputday3.txt") as f:
data = [x for x in f.read().split()]
gamma = ""
epsilon = ""
for b in range(0, len(data[0])):
one = 0
zero = 0
for c in range(0, len(data)):
if data[c][b] == '0':
zero += 1
else:
one += 1
if zero > one:
gamma += '0'
epsilon += '1'
else:
gamma += '1'
epsilon += '0'
g = int(gamma, 2)
e = int(epsilon, 2)
print("PART 1", g * e)
gamma = ""
epsilon = ""
data2 = data.copy()
index = 0
while len(data) > 1:
one = 0
zero = 0
ones = []
zeroes = []
for c in range(0, len(data)):
if data[c][index] == "0":
zero += 1
zeroes.append(data[c])
else:
one += 1
ones.append(data[c])
if zero > one:
data = zeroes
else:
data = ones
index += 1
oxygen = int(data[0], 2)
data = data2
index = 0
while len(data) > 1:
one = 0
zero = 0
ones = []
zeroes = []
for c in range(0, len(data)):
if data[c][index] == '0':
zero += 1
zeroes.append(data[c])
else:
one += 1
ones.append(data[c])
if one < zero:
data = ones
else:
data = zeroes
index += 1
co2 = int(data[0], 2)
print("PART 2", oxygen * co2)
| 18.932432 | 41 | 0.417559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.042827 |
c706f98a7ed12b68d12a292394d4a9f058dbea40 | 12,449 | py | Python | keras2pytorch_dataset.py | MPCAICDM/MPCA | c996435a0578ea4160f934bc01041c2ef23468f3 | [
"MIT"
]
| null | null | null | keras2pytorch_dataset.py | MPCAICDM/MPCA | c996435a0578ea4160f934bc01041c2ef23468f3 | [
"MIT"
]
| null | null | null | keras2pytorch_dataset.py | MPCAICDM/MPCA | c996435a0578ea4160f934bc01041c2ef23468f3 | [
"MIT"
]
| null | null | null | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
from misc import AverageMeter
from eval_accuracy import simple_accuracy
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import torch
from multiprocessing import Value
def softmax(input_tensor):
act = torch.nn.Softmax(dim=1)
return act(input_tensor).numpy()
class dataset_pytorch(data.Dataset):
def __init__(self, train_data, train_labels, test_data, test_labels, train=True,
transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.train_data = train_data # ndarray
self.train_labels = train_labels
self.test_data = test_data
self.test_labels = test_labels
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class transformer_score_dataset(data.Dataset):
def __init__(self, train_data, train_labels, data_transformer, aux_labels=None, transform=None,
target_transform=None, train_sequential=False):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data
self.train_labels = train_labels
self.aux_labels = aux_labels
self.transfomer = data_transformer
self.n_transforms = self.transfomer.n_transforms
self.train_sequential = train_sequential
if train_sequential:
self.length = self.train_data.shape[0]
self.transform_idx = 0
self.iter_count = Value('i', 0)
else:
self.length = self.train_data.shape[0] * self.transfomer.n_transforms
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
if self.train_sequential:
with self.iter_count.get_lock():
self.iter_count.value += 1
if self.iter_count.value == self.length:
self.transform_idx = (self.transform_idx + 1) % self.n_transforms
self.iter_count.value = 0
image_idx, transform_idx = idx, self.transform_idx
nidx = image_idx * self.n_transforms + transform_idx
else:
image_idx, transform_idx = idx // self.n_transforms, idx % self.n_transforms
nidx = idx
img, target = self.transfomer.transform_one(self.train_data[image_idx], transform_idx).copy(), self.train_labels[nidx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class transformer_dataset(data.Dataset):
def __init__(self, train_data, train_labels, data_transformer, aux_labels=None, transform=None,
target_transform=None, train_sequential=False, is_padding=False):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data
self.train_labels = train_labels
self.aux_labels = aux_labels
self.transfomer = data_transformer
self.n_transforms = self.transfomer.n_transforms
self.train_sequential = train_sequential
self.is_padding = is_padding
if train_sequential:
self.length = self.train_data.shape[0]
self.transform_idx = 0
self.iter_count = Value('i', 0)
else:
self.length = self.train_data.shape[0] * self.transfomer.n_transforms
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
if self.train_sequential:
with self.iter_count.get_lock():
self.iter_count.value += 1
if self.iter_count.value == self.length:
self.transform_idx = (self.transform_idx + 1) % self.n_transforms
self.iter_count.value = 0
image_idx, transform_idx = idx, self.transform_idx
nidx = image_idx * self.n_transforms + transform_idx
else:
image_idx, transform_idx = idx // self.n_transforms, idx % self.n_transforms
nidx = idx
if self.is_padding:
img = np.pad(self.train_data[image_idx].copy(), ((2, 2), (2, 2), (0, 0)), 'constant')
#print(img.shape)
img, target = self.transfomer.transform_one(img, transform_idx).copy(), self.train_labels[nidx]
else:
img, target = self.transfomer.transform_one(self.train_data[image_idx], transform_idx).copy(), self.train_labels[nidx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class h5idx_dataset(data.Dataset):
def __init__(self, train_index, train_labels, total_data, aux_labels=None, transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train_index = train_index # just a index
self.train_labels = train_labels
self.aux_labels = aux_labels
self.total_data = total_data
self.length = self.train_index.shape[0] * self.total_data.shape[1]
self.n_transform = self.total_data.shape[1]
assert self.length == len(self.train_labels)
def __len__(self):
return self.length
def __getitem__(self, idx):
image_idx, transform_idx = idx // self.n_transform, idx % self.n_transform
img, target = np.array(self.total_data[self.train_index[image_idx], transform_idx, :]), self.train_labels[idx]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[idx])
return img, target
class trainset_pytorch(data.Dataset):
def __init__(self, train_data, train_labels, aux_labels=None,transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.train_data = train_data # ndarray
self.train_labels = train_labels
self.aux_labels = aux_labels
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.train_data[index], self.train_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img) # used if the img is [H, W, C] and the dtype is uint8
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.aux_labels is not None:
return img, (target, self.aux_labels[index])
return img, target
def __len__(self):
return len(self.train_data)
class testset_pytorch(data.Dataset):
def __init__(self, test_data, transform=None):
self.transform = transform
self.test_data = test_data # ndarray
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img = self.test_data[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.test_data)
class dataset_reorganized(data.Dataset):
def __init__(self, data, transform=None):
self.transform = transform
self.data = data # ndarray
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
imgs = self.data[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img) # used if the img is [H, W, C] and the dtype is uint8
if self.transform is not None:
new_imgs = []
for i in range(imgs.shape[0]):
img = imgs[i]
img = self.transform(img)
new_imgs.append(img.unsqueeze(0))
new_imgs = torch.cat(new_imgs, dim=0)
else:
raise NotImplementedError
return new_imgs
def __len__(self):
return len(self.data)
def train_reorganized(trainloader, model, criterion, optimizer, epochs):
# train the model
model.train()
top1 = AverageMeter()
losses = AverageMeter()
for epoch in range(epochs):
for batch_idx, (inputs) in enumerate(trainloader):
targets = torch.LongTensor(np.tile(np.arange(inputs.size(1)), inputs.size(0)))
inputs = inputs.reshape(-1, inputs.size(-3), inputs.size(-2), inputs.size(-1))
inputs, targets = torch.autograd.Variable(inputs.cuda()), torch.autograd.Variable(targets.cuda())
outputs, _ = model(inputs)
loss = criterion(outputs, targets)
prec1 = simple_accuracy(outputs.data.cpu(), targets.data.cpu())
top1.update(prec1, inputs.size(0))
losses.update(loss.data.cpu(), inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Epoch: [{} | {}], batch: {}, loss: {}, Accuracy: {}'.format(epoch + 1, epochs, batch_idx + 1, losses.avg, top1.avg))
def test_reorganized(testloader, model):
model.eval()
res = torch.Tensor()
for batch_idx, (inputs) in enumerate(testloader):
inputs = inputs.reshape(-1, inputs.size(-3), inputs.size(-2), inputs.size(-1))
inputs = torch.autograd.Variable(inputs.cuda())
outputs, _ = model(inputs)
res = torch.cat((res, outputs.data.cpu()), dim=0)
return res
def get_scores(outputs, targets):
scores = []
for i in range(outputs.shape[0]):
scores.append(outputs[i, targets[i]])
return np.array(scores) | 34.969101 | 139 | 0.618684 | 10,292 | 0.826733 | 0 | 0 | 0 | 0 | 0 | 0 | 1,569 | 0.126034 |
c7075ad8e2a1229e14b617586ca8b05a9f86dd2f | 1,920 | py | Python | mir/tools/mir_repo_utils.py | fenrir-z/ymir-cmd | 6fbffd3c1ff5dd1c9a44b55de411523b50567661 | [
"Apache-2.0"
]
| 1 | 2022-01-12T03:12:47.000Z | 2022-01-12T03:12:47.000Z | mir/tools/mir_repo_utils.py | fenrir-z/ymir-cmd | 6fbffd3c1ff5dd1c9a44b55de411523b50567661 | [
"Apache-2.0"
]
| null | null | null | mir/tools/mir_repo_utils.py | fenrir-z/ymir-cmd | 6fbffd3c1ff5dd1c9a44b55de411523b50567661 | [
"Apache-2.0"
]
| null | null | null | import json
import logging
import os
from typing import Optional
from mir import scm
from mir.tools import mir_storage
def mir_check_repo_dvc_dirty(mir_root: str = ".") -> bool:
names = [name for name in mir_storage.get_all_mir_paths() if os.path.isfile(os.path.join(mir_root, name))]
if names:
dvc_cmd_args = ["--show-json", "--targets"]
dvc_cmd_args.extend(names)
dvc_scm = scm.Scm(mir_root, scm_executable="dvc")
dvc_result = dvc_scm.diff(dvc_cmd_args)
json_object = json.loads(dvc_result)
keys = ['added', 'deleted', 'modified', 'renamed', 'not in cache']
dvc_dirty = False
for key in keys:
dirty_value = json_object.get(key, None)
if dirty_value:
logging.info(f"{key}: {dirty_value}")
dvc_dirty = True
return dvc_dirty
else:
# if no mir files in this mir repo, it's clean
return False
def mir_check_repo_git_dirty(mir_root: str = ".") -> bool:
git_scm = scm.Scm(mir_root, scm_executable="git")
git_result = git_scm.status("-s") # if clean, returns nothing
if (git_result or len(git_result) > 0):
logging.info(f"{git_result}")
return True
return False # clean
def mir_check_repo_dirty(mir_root: str = '.') -> bool:
return mir_check_repo_dvc_dirty(mir_root) or mir_check_repo_git_dirty(mir_root)
def mir_check_branch_exists(mir_root: str, branch: str) -> bool:
try:
git_scm = scm.Scm(mir_root, scm_executable="git")
git_scm.rev_parse(branch)
return True
except Exception:
# git rev-parse will return non-zero code when can not find branch
# and cmd.py packs non-zero return code as an error
return False
def work_dir_to_monitor_file(work_dir: Optional[str]) -> Optional[str]:
return os.path.join(work_dir, 'out', 'monitor.txt') if work_dir else None
| 32.542373 | 110 | 0.655729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 354 | 0.184375 |
c70864a5d3c270e78a0bc9da8738245a6e27664f | 3,624 | py | Python | utils/edit_utils.py | ermekaitygulov/STIT | 93dca8d589b555fa99a5c5438a8517a52d8898c3 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
]
| 6 | 2022-03-11T23:42:12.000Z | 2022-03-28T09:39:25.000Z | utils/edit_utils.py | bycloudai/STIT-Windows | cadb2a01457bfd1c90bcd8d220587b48e1c2327a | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
]
| null | null | null | utils/edit_utils.py | bycloudai/STIT-Windows | cadb2a01457bfd1c90bcd8d220587b48e1c2327a | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
]
| null | null | null | import argparse
import math
import os
import pickle
from typing import List
import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import configs.paths_config
from configs import paths_config
from training.networks import SynthesisBlock
def add_texts_to_image_vertical(texts, pivot_images):
images_height = pivot_images.height
images_width = pivot_images.width
text_height = 256 + 16 - images_height % 16
num_images = len(texts)
image_width = images_width // num_images
text_image = Image.new('RGB', (images_width, text_height), (255, 255, 255))
draw = ImageDraw.Draw(text_image)
font_size = int(math.ceil(24 * image_width / 256))
try:
font = ImageFont.truetype("truetype/freefont/FreeSans.ttf", font_size)
except OSError:
font = ImageFont.load_default()
for i, text in enumerate(texts):
draw.text((image_width // 2 + i * image_width, text_height // 2), text, fill='black', anchor='ms', font=font)
out_image = Image.new('RGB', (pivot_images.width, pivot_images.height + text_image.height))
out_image.paste(text_image, (0, 0))
out_image.paste(pivot_images, (0, text_image.height))
return out_image
def get_affine_layers(synthesis):
blocks: List[SynthesisBlock] = [getattr(synthesis, f'b{res}') for res in synthesis.block_resolutions]
affine_layers = []
for block in blocks:
if hasattr(block, 'conv0'):
affine_layers.append((block.conv0.affine, True))
affine_layers.append((block.conv1.affine, True))
affine_layers.append((block.torgb.affine, False))
return affine_layers
def load_stylespace_std():
with open(paths_config.stylespace_mean_std, 'rb') as f:
_, s_std = pickle.load(f)
s_std = [torch.from_numpy(s).cuda() for s in s_std]
return s_std
def to_styles(edit: torch.Tensor, affine_layers):
idx = 0
styles = []
for layer, is_conv in affine_layers:
layer_dim = layer.weight.shape[0]
if is_conv:
styles.append(edit[idx:idx + layer_dim].clone())
idx += layer_dim
else:
styles.append(torch.zeros(layer_dim, device=edit.device, dtype=edit.dtype))
return styles
def w_to_styles(w, affine_layers):
w_idx = 0
styles = []
for affine, is_conv in affine_layers:
styles.append(affine(w[:, w_idx]))
if is_conv:
w_idx += 1
return styles
def paste_image_mask(inverse_transform, image, dst_image, mask, radius=0, sigma=0.0):
image_masked = image.copy().convert('RGBA')
pasted_image = dst_image.copy().convert('RGBA')
if radius != 0:
mask_np = np.array(mask)
kernel_size = (radius * 2 + 1, radius * 2 + 1)
kernel = np.ones(kernel_size)
eroded = cv2.erode(mask_np, kernel, borderType=cv2.BORDER_CONSTANT, borderValue=0)
blurred_mask = cv2.GaussianBlur(eroded, kernel_size, sigmaX=sigma)
blurred_mask = Image.fromarray(blurred_mask)
image_masked.putalpha(blurred_mask)
else:
image_masked.putalpha(mask)
projected = image_masked.transform(dst_image.size, Image.PERSPECTIVE, inverse_transform,
Image.BILINEAR)
pasted_image.alpha_composite(projected)
return pasted_image
def paste_image(inverse_transform, img, orig_image):
pasted_image = orig_image.copy().convert('RGBA')
projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, inverse_transform, Image.BILINEAR)
pasted_image.paste(projected, (0, 0), mask=projected)
return pasted_image
| 32.648649 | 117 | 0.683499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.026766 |
c7087550ae8556b1933bc7961a3ed0e9783aaa07 | 6,845 | py | Python | conll_df/conll_df.py | interrogator/conll-df | 35611f295e3f8230f574142151e3a19098edfdca | [
"MIT"
]
| 27 | 2017-03-17T15:39:16.000Z | 2021-11-23T09:10:10.000Z | conll_df/conll_df.py | interrogator/conll-df | 35611f295e3f8230f574142151e3a19098edfdca | [
"MIT"
]
| 2 | 2017-11-21T05:33:04.000Z | 2018-09-22T13:05:06.000Z | conll_df/conll_df.py | interrogator/conll-df | 35611f295e3f8230f574142151e3a19098edfdca | [
"MIT"
]
| 8 | 2017-03-17T14:59:34.000Z | 2022-02-25T19:09:27.000Z | import pandas as pd
# UD 1.0
CONLL_COLUMNS = ['i', 'w', 'l', 'p', 'n', 'm', 'g', 'f', 'd', 'c']
# UD 2.0
CONLL_COLUMNS_V2 = ['i', 'w', 'l', 'x', 'p', 'm', 'g', 'f', 'e', 'o']
# possible morphological attributes
MORPH_ATTS = ['type',
'animacy',
#'gender',
'number'
"Abbr",
"Animacy",
"Aspect",
"Case",
"Definite",
"Degree",
"Evident",
"Foreign",
"Gender",
"Mood",
"NumType",
"Number",
"Person",
"Polarity",
"Polite",
"Poss",
"PronType",
"Reflex",
"Tense",
"VerbForm",
"Voice",
"Type"]
def _make_sent_csv(sentstring, fname, meta, splitter, i, skip_meta=False):
"""
Take one CONLL-U sentence and add all metadata to each row
Return: str (CSV data) and dict (sent level metadata)
"""
fixed_lines = []
raw_lines = sentstring.splitlines()
for line in raw_lines:
if not line:
continue
if line.startswith('#'):
if not skip_meta:
try:
k, v = line.lstrip('# ').split(splitter, 1)
except ValueError:
k, v = line.lstrip('# ').split(splitter.strip(), 1)
meta[k.lower().strip()] = v.strip()
else:
line = '%s\t%s\t%s' % (fname, i, line)
fixed_lines.append(line)
return '\n'.join(fixed_lines), meta
def _add_governors_to_df(df):
"""
Add governor info to a DF. Increases memory usage quite a bit.
"""
# save the original index
i = df.index.get_level_values('i')
# add g
dfg = df.set_index('g', append=True)
# remove i
dfg = dfg.reset_index('i')
dfg = df.loc[dfg.index]
dfg = dfg[['w', 'l', 'p', 'f']]
dfg['i'] = i
dfg = dfg.set_index('i', append=True)
dfg.index.names = ['file', 's', 'g', 'i']
dfg = dfg.reset_index('g', drop=True)
for c in list(dfg.columns):
try:
dfg[c] = dfg[c].cat.add_categories(['ROOT'])
except (AttributeError, ValueError):
pass
dfg = dfg.fillna('ROOT')
dfg.columns = ['gw', 'gl', 'gp', 'gf']
dfg = df.join(dfg, how="inner")
return dfg
def conll_df(path,
corpus_name=False,
corp_folder=False,
v2="auto",
skip_morph=False,
skip_meta=False,
add_gov=False,
drop=['text', 'newdoc id'],
file_index=True,
categories=True,
extra_fields='auto',
drop_redundant=True,
**kwargs):
"""
Optimised CONLL-U reader for v2.0 data
Args:
path (str): the file to prepare
Returns:
pd.DataFrame: 2d array representation of file data
"""
import os
import re
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
splitter = ' = ' if v2 else '='
with open(path, 'r') as fo:
data = fo.read().strip('\n')
if v2 == 'auto':
v2 = 'sent_id = ' in data[:9999]
fname = os.path.basename(path)
# metadata that applies filewide
# a little bonus for those with annual data
basedict = {}
if not skip_meta:
year = re.search(r'[12][0-9][0-9][0-9]', fname)
if year:
basedict['year'] = year.group(0)
sents = data.split('\n\n')
sents_meta = [_make_sent_csv(sstring, fname, dict(basedict), splitter, i, skip_meta=skip_meta) \
for i, sstring in enumerate(sents, start=1)]
sents, metadata = zip(*sents_meta)
# make the sent df
sents = '\n\n'.join(sents)
sents = StringIO(sents)
if v2:
cols = ['file', 's'] + CONLL_COLUMNS_V2
else:
cols = ['file', 's'] + CONLL_COLUMNS
df = pd.read_csv(sents, sep="\t", header=None, names=cols, quoting=kwargs.pop('quoting', 3),
index_col=[0, 1, 2], engine='c', na_filter=False, **kwargs)
if v2 and not skip_morph:
df['m'] = df['m'].fillna('')
df['o'] = df['o'].fillna('')
if extra_fields == 'auto':
# evil line to get all possible keys in the final column
extra_fields = list(df['o'].str.extractall(r'(?:^|\|)([^=]+?)=')[0].unique())
cats = MORPH_ATTS + extra_fields
if 'SpaceAfter' not in cats:
cats.append('SpaceAfter')
cats = list(set(cats))
om = df['o'].str.cat(df['m'], sep='|').str.strip('|_')
# this is a very slow list comp, but i can't think of a better way to do it.
# the 'extractall' solution makes columns for not just the value, but the key...
extra = [om.str.extract('%s=([^|$]+)' % cat.title(), expand=True) for cat in cats]
extra = pd.concat(extra, axis=1)
extra.columns = cats
df = pd.concat([df, extra], axis=1)
# make and join the meta df
if not skip_meta:
metadata = {i: d for i, d in enumerate(metadata, start=1)}
metadata = pd.DataFrame(metadata).T
metadata.index.name = 's'
df = metadata.join(df, how='inner')
# we never want these to show up as a dataframe column
badcols = ['sent_id', 's', 'i', 'file']
# if we aren't parsing morph and extra columns, we should at least keep them
if not skip_morph:
badcols += ['o', 'm']
if drop:
badcols = badcols + drop
df = df.drop(badcols, axis=1, errors='ignore')
# some evil code to handle conll-u files where g col could be a string
if 'g' in df.columns:
df['g'] = df['g'].fillna(0)
if df['g'].dtype in [object, str]:
df['g'] = df['g'].str.replace('_', '0').astype(int)
df['g'] = df['g'].astype(int)
df = df.fillna('_')
# attempt to categorise data
if categories:
for c in list(df.columns):
if c in ['g', 'date']:
continue
try:
df[c] = df[c].astype('category')
except:
pass
if add_gov:
df = _add_governors_to_df(df)
if not file_index:
df.index = df.index.droplevel('file')
if drop_redundant:
empty_cols = []
for c in df.columns:
if len(df[c].unique()) == 1:
empty_cols.append(c)
df = df.drop(empty_cols, axis=1)
#reorder columns so that important things are first
firsts = CONLL_COLUMNS_V2 if v2 else CONLL_COLUMNS
firsts = [i for i in firsts if i in list(df.columns)]
lasts = [i for i in list(df.columns) if i not in firsts]
df = df[firsts + lasts]
return df
| 30.154185 | 100 | 0.512491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,804 | 0.26355 |
c708da26fb5e59e5b2a82edc62ad3d6177cc9df2 | 2,491 | py | Python | scripts/postgres_to_lmdb_bars_60m.py | alexanu/atpy | 3f4b5cfe7de7633ef053d2feaddae421806a9799 | [
"MIT"
]
| 24 | 2018-03-22T06:22:11.000Z | 2022-03-14T09:04:44.000Z | scripts/postgres_to_lmdb_bars_60m.py | alexanu/atpy | 3f4b5cfe7de7633ef053d2feaddae421806a9799 | [
"MIT"
]
| null | null | null | scripts/postgres_to_lmdb_bars_60m.py | alexanu/atpy | 3f4b5cfe7de7633ef053d2feaddae421806a9799 | [
"MIT"
]
| 9 | 2018-03-22T06:22:11.000Z | 2020-09-19T16:47:13.000Z | #!/bin/python3
import argparse
import datetime
import functools
import logging
import os
import psycopg2
from dateutil.relativedelta import relativedelta
from atpy.data.cache.lmdb_cache import *
from atpy.data.cache.postgres_cache import BarsInPeriodProvider
from atpy.data.cache.postgres_cache import request_adjustments
from atpy.data.splits_dividends import adjust_df
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="PostgreSQL to LMDB configuration")
parser.add_argument('-lmdb_path', type=str, default=None, help="LMDB Path")
parser.add_argument('-delta_back', type=int, default=8, help="Default number of years to look back")
parser.add_argument('-adjust_splits', action='store_true', default=True, help="Adjust splits before saving")
parser.add_argument('-adjust_dividends', action='store_true', default=False, help="Adjust dividends before saving")
args = parser.parse_args()
lmdb_path = args.lmdb_path if args.lmdb_path is not None else os.environ['ATPY_LMDB_PATH']
con = psycopg2.connect(os.environ['POSTGRESQL_CACHE'])
adjustments = None
if args.adjust_splits and args.adjust_dividends:
adjustments = request_adjustments(conn=con, table_name='splits_dividends')
elif args.adjust_splits:
adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='split')
elif args.adjust_dividends:
adjustments = request_adjustments(conn=con, table_name='splits_dividends', adj_type='dividend')
now = datetime.datetime.now()
bgn_prd = datetime.datetime(now.year - args.delta_back, 1, 1)
bgn_prd = bgn_prd + relativedelta(days=7 - bgn_prd.weekday())
cache_read = functools.partial(read_pickle, lmdb_path=lmdb_path)
bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table='bars_60m', bgn_prd=bgn_prd, delta=relativedelta(days=7),
overlap=relativedelta(microseconds=-1), cache=cache_read)
for i, df in enumerate(bars_in_period):
if cache_read(bars_in_period.current_cache_key()) is None:
if adjustments is not None:
adjust_df(df, adjustments)
write(bars_in_period.current_cache_key(), df, lmdb_path)
logging.info('Saving ' + bars_in_period.current_cache_key())
else:
logging.info('Cache hit on ' + bars_in_period.current_cache_key())
| 43.701754 | 158 | 0.733842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.158169 |
c7091f356e0452faea68f2b17a6227d31b0f1d34 | 746 | py | Python | src/download_pdf.py | luccanunes/class-url-automation | 6ccb77feaa9aede4c8475d9f79149cc8c2c31cc4 | [
"MIT"
]
| 1 | 2020-10-17T02:08:10.000Z | 2020-10-17T02:08:10.000Z | src/download_pdf.py | luccanunes/class-url-automation | 6ccb77feaa9aede4c8475d9f79149cc8c2c31cc4 | [
"MIT"
]
| null | null | null | src/download_pdf.py | luccanunes/class-url-automation | 6ccb77feaa9aede4c8475d9f79149cc8c2c31cc4 | [
"MIT"
]
| 1 | 2020-12-20T23:53:30.000Z | 2020-12-20T23:53:30.000Z | def download_pdf(URL):
from selenium import webdriver
from time import sleep
URL = URL
options = webdriver.ChromeOptions()
options.add_experimental_option('prefs', {
# Change default directory for downloads
"download.default_directory": r"E:\coding\other\class-url-automation\src\pdfs",
"download.prompt_for_download": False, # To auto download the file
"download.directory_upgrade": True,
# It will not show PDF directly in chrome
"plugins.always_open_pdf_externally": True
})
options.add_argument("--headless")
driver = webdriver.Chrome(
executable_path=r'E:\coding\python\chromedriver.exe', chrome_options=options
)
driver.get(URL)
sleep(5) | 35.52381 | 87 | 0.687668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 333 | 0.446381 |
c709a775fc2c2a745cb1ed61a6cbd8778daaee06 | 609 | py | Python | datadog_checks_dev/datadog_checks/dev/tooling/commands/env/__init__.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
]
| 1 | 2021-01-28T01:45:37.000Z | 2021-01-28T01:45:37.000Z | datadog_checks_dev/datadog_checks/dev/tooling/commands/env/__init__.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
]
| 3 | 2021-01-27T04:56:40.000Z | 2021-02-26T06:29:22.000Z | datadog_checks_dev/datadog_checks/dev/tooling/commands/env/__init__.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
]
| 1 | 2021-04-07T16:58:27.000Z | 2021-04-07T16:58:27.000Z | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from ..console import CONTEXT_SETTINGS
from .check import check_run
from .ls import ls
from .prune import prune
from .reload import reload_env
from .shell import shell
from .start import start
from .stop import stop
from .test import test
ALL_COMMANDS = (check_run, ls, prune, reload_env, shell, start, stop, test)
@click.group(context_settings=CONTEXT_SETTINGS, short_help='Manage environments')
def env():
pass
for command in ALL_COMMANDS:
env.add_command(command)
| 23.423077 | 81 | 0.771757 | 0 | 0 | 0 | 0 | 101 | 0.165846 | 0 | 0 | 133 | 0.218391 |
c709d0df6d7c96b0dace86ff6283e481bd4f3000 | 8,584 | py | Python | sdk/python/pulumi_azure_nextgen/marketplace/private_store_offer.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
]
| 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/marketplace/private_store_offer.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
]
| 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/marketplace/private_store_offer.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
]
| 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateStoreOffer']
class PrivateStoreOffer(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
icon_file_uris: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
offer_id: Optional[pulumi.Input[str]] = None,
plans: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PlanArgs']]]]] = None,
private_store_id: Optional[pulumi.Input[str]] = None,
specific_plan_ids_limitation: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
update_suppressed_due_idempotence: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The privateStore offer data structure.
API Version: 2020-01-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] e_tag: Identifier for purposes of race condition
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] icon_file_uris: Icon File Uris
:param pulumi.Input[str] offer_id: The offer ID to update or delete
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PlanArgs']]]] plans: Offer plans
:param pulumi.Input[str] private_store_id: The store ID - must use the tenant ID
:param pulumi.Input[Sequence[pulumi.Input[str]]] specific_plan_ids_limitation: Plan ids limitation for this offer
:param pulumi.Input[bool] update_suppressed_due_idempotence: Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['e_tag'] = e_tag
__props__['icon_file_uris'] = icon_file_uris
__props__['offer_id'] = offer_id
__props__['plans'] = plans
if private_store_id is None and not opts.urn:
raise TypeError("Missing required property 'private_store_id'")
__props__['private_store_id'] = private_store_id
__props__['specific_plan_ids_limitation'] = specific_plan_ids_limitation
__props__['update_suppressed_due_idempotence'] = update_suppressed_due_idempotence
__props__['created_at'] = None
__props__['modified_at'] = None
__props__['name'] = None
__props__['offer_display_name'] = None
__props__['publisher_display_name'] = None
__props__['type'] = None
__props__['unique_offer_id'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:marketplace/latest:PrivateStoreOffer"), pulumi.Alias(type_="azure-nextgen:marketplace/v20200101:PrivateStoreOffer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateStoreOffer, __self__).__init__(
'azure-nextgen:marketplace:PrivateStoreOffer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateStoreOffer':
"""
Get an existing PrivateStoreOffer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return PrivateStoreOffer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Private store offer creation date
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
Identifier for purposes of race condition
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter(name="iconFileUris")
def icon_file_uris(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Icon File Uris
"""
return pulumi.get(self, "icon_file_uris")
@property
@pulumi.getter(name="modifiedAt")
def modified_at(self) -> pulumi.Output[str]:
"""
Private store offer modification date
"""
return pulumi.get(self, "modified_at")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="offerDisplayName")
def offer_display_name(self) -> pulumi.Output[str]:
"""
It will be displayed prominently in the marketplace
"""
return pulumi.get(self, "offer_display_name")
@property
@pulumi.getter
def plans(self) -> pulumi.Output[Optional[Sequence['outputs.PlanResponse']]]:
"""
Offer plans
"""
return pulumi.get(self, "plans")
@property
@pulumi.getter(name="privateStoreId")
def private_store_id(self) -> pulumi.Output[str]:
"""
Private store unique id
"""
return pulumi.get(self, "private_store_id")
@property
@pulumi.getter(name="publisherDisplayName")
def publisher_display_name(self) -> pulumi.Output[str]:
"""
Publisher name that will be displayed prominently in the marketplace
"""
return pulumi.get(self, "publisher_display_name")
@property
@pulumi.getter(name="specificPlanIdsLimitation")
def specific_plan_ids_limitation(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Plan ids limitation for this offer
"""
return pulumi.get(self, "specific_plan_ids_limitation")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueOfferId")
def unique_offer_id(self) -> pulumi.Output[str]:
"""
Offers unique id
"""
return pulumi.get(self, "unique_offer_id")
@property
@pulumi.getter(name="updateSuppressedDueIdempotence")
def update_suppressed_due_idempotence(self) -> pulumi.Output[Optional[bool]]:
"""
Indicating whether the offer was not updated to db (true = not updated). If the allow list is identical to the existed one in db, the offer would not be updated.
"""
return pulumi.get(self, "update_suppressed_due_idempotence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 40.11215 | 230 | 0.646086 | 8,165 | 0.951188 | 0 | 0 | 3,773 | 0.439539 | 0 | 0 | 3,569 | 0.415774 |
c70a49b112aadc6ae32c90aac8b9581dc39ca540 | 1,491 | py | Python | examples/custom_shape/stages.py | oksumoron/locust | fddfefe7ef1082bc5284cd2dd8477221484dfb0c | [
"MIT"
]
| 18,336 | 2015-01-03T20:38:40.000Z | 2022-03-31T16:02:35.000Z | examples/custom_shape/stages.py | oksumoron/locust | fddfefe7ef1082bc5284cd2dd8477221484dfb0c | [
"MIT"
]
| 1,779 | 2015-01-01T02:09:30.000Z | 2022-03-31T09:58:10.000Z | examples/custom_shape/stages.py | oksumoron/locust | fddfefe7ef1082bc5284cd2dd8477221484dfb0c | [
"MIT"
]
| 2,689 | 2015-01-05T02:01:50.000Z | 2022-03-31T13:13:09.000Z | from locust import HttpUser, TaskSet, task, constant
from locust import LoadTestShape
class UserTasks(TaskSet):
@task
def get_root(self):
self.client.get("/")
class WebsiteUser(HttpUser):
wait_time = constant(0.5)
tasks = [UserTasks]
class StagesShape(LoadTestShape):
"""
A simply load test shape class that has different user and spawn_rate at
different stages.
Keyword arguments:
stages -- A list of dicts, each representing a stage with the following keys:
duration -- When this many seconds pass the test is advanced to the next stage
users -- Total user count
spawn_rate -- Number of users to start/stop per second
stop -- A boolean that can stop that test at a specific stage
stop_at_end -- Can be set to stop once all stages have run.
"""
stages = [
{"duration": 60, "users": 10, "spawn_rate": 10},
{"duration": 100, "users": 50, "spawn_rate": 10},
{"duration": 180, "users": 100, "spawn_rate": 10},
{"duration": 220, "users": 30, "spawn_rate": 10},
{"duration": 230, "users": 10, "spawn_rate": 10},
{"duration": 240, "users": 1, "spawn_rate": 1},
]
def tick(self):
run_time = self.get_run_time()
for stage in self.stages:
if run_time < stage["duration"]:
tick_data = (stage["users"], stage["spawn_rate"])
return tick_data
return None
| 29.82 | 90 | 0.602951 | 1,396 | 0.936284 | 0 | 0 | 58 | 0.0389 | 0 | 0 | 766 | 0.513749 |
c70b23f1cce14640f16607fb8ec77754089292bc | 2,115 | py | Python | db/seed_ids.py | xtuyaowu/jtyd_python_spider | ca5c3efd5519f592c0d587c22f03812e7756c8ea | [
"MIT"
]
| 7 | 2017-08-19T22:36:29.000Z | 2018-06-03T07:02:04.000Z | db/seed_ids.py | xtuyaowu/jtyd_python_spider | ca5c3efd5519f592c0d587c22f03812e7756c8ea | [
"MIT"
]
| 2 | 2021-04-30T20:37:14.000Z | 2021-12-13T19:46:29.000Z | db/seed_ids.py | xtuyaowu/jtyd_python_spider | ca5c3efd5519f592c0d587c22f03812e7756c8ea | [
"MIT"
]
| 4 | 2017-09-06T03:00:11.000Z | 2017-12-10T08:04:21.000Z | # coding:utf-8
from sqlalchemy import text
from db.basic_db import db_session
from db.models import SeedIds
from decorators.decorator import db_commit_decorator
def get_seed():
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds).filter(text('status=0')).all()
def get_seed_ids():
"""
Get all user id to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('is_crawled=0')).all()
def get_home_ids():
"""
Get all user id who's home pages need to be crawled
:return: user ids
"""
return db_session.query(SeedIds.uid).filter(text('home_crawled=0')).all()
@db_commit_decorator
def set_seed_crawled(uid, result):
"""
:param uid: user id that is crawled
:param result: crawling result
:return: None
"""
seed = db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
if seed:
if seed.is_crawled == 0:
seed.is_crawled = result
else:
seed = SeedIds(uid=uid, is_crawled=result)
db_session.add(seed)
db_session.commit()
def get_seed_by_id(uid):
return db_session.query(SeedIds).filter(SeedIds.uid == uid).first()
@db_commit_decorator
def insert_seeds(ids):
db_session.execute(SeedIds.__table__.insert().prefix_with('IGNORE'), [{'uid': i} for i in ids])
db_session.commit()
@db_commit_decorator
def set_seed_other_crawled(uid):
"""
update it if user id already exists, else insert
:param uid: user id
:return: None
"""
seed = get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=1, other_crawled=1, home_crawled=1)
db_session.add(seed)
else:
seed.other_crawled = 1
db_session.commit()
@db_commit_decorator
def set_seed_home_crawled(uid):
"""
:param uid: user id
:return: None
"""
seed = get_seed_by_id(uid)
if seed is None:
seed = SeedIds(uid=uid, is_crawled=0, other_crawled=0, home_crawled=1)
db_session.add(seed)
else:
seed.home_crawled = 1
db_session.commit()
| 24.593023 | 99 | 0.659102 | 0 | 0 | 0 | 0 | 1,323 | 0.625532 | 0 | 0 | 553 | 0.261466 |
c70b35ed30f0bbb93f6ab0a59185f9e44b410fce | 16,745 | py | Python | tobler/area_weighted/area_interpolate.py | sjsrey/tobler | 8e3ebd5d01de459e4387fabd57cbb12cb6735596 | [
"BSD-3-Clause"
]
| 1 | 2019-06-21T19:32:22.000Z | 2019-06-21T19:32:22.000Z | tobler/area_weighted/area_interpolate.py | sjsrey/tobler | 8e3ebd5d01de459e4387fabd57cbb12cb6735596 | [
"BSD-3-Clause"
]
| null | null | null | tobler/area_weighted/area_interpolate.py | sjsrey/tobler | 8e3ebd5d01de459e4387fabd57cbb12cb6735596 | [
"BSD-3-Clause"
]
| null | null | null | """
Area Weighted Interpolation
"""
import numpy as np
import geopandas as gpd
from ._vectorized_raster_interpolation import _fast_append_profile_in_gdf
import warnings
from scipy.sparse import dok_matrix, diags, coo_matrix
import pandas as pd
from tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs
def _area_tables_binning(source_df, target_df, spatial_index):
"""Construct area allocation and source-target correspondence tables using a spatial indexing approach
...
NOTE: this currently relies on Geopandas' spatial index machinery
Parameters
----------
source_df : geopandas.GeoDataFrame
GeoDataFrame containing input data and polygons
target_df : geopandas.GeoDataFramee
GeoDataFrame defining the output geometries
spatial_index : str
Spatial index to use to build the allocation of area from source to
target tables. It currently support the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
tables : scipy.sparse.dok_matrix
"""
if _check_crs(source_df, target_df):
pass
else:
return None
df1 = source_df.copy()
df2 = target_df.copy()
# it is generally more performant to use the longer df as spatial index
if spatial_index == "auto":
if df1.shape[0] > df2.shape[0]:
spatial_index = "source"
else:
spatial_index = "target"
if spatial_index == "source":
ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate="intersects")
elif spatial_index == "target":
ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate="intersects")
else:
raise ValueError(
f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'."
)
areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area
table = coo_matrix(
(areas, (ids_src, ids_tgt),),
shape=(df1.shape[0], df2.shape[0]),
dtype=np.float32,
)
table = table.todok()
return table
def _area_tables(source_df, target_df):
"""
Construct area allocation and source-target correspondence tables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
Returns
-------
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
source_df = source_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union = gpd.overlay(source_df, target_df, how="union")
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row[row.geometry.name].area
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
def _area_interpolate_binning(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
table=None,
allocate_total=True,
spatial_index="auto",
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame
target_df : geopandas.GeoDataFrame
extensive_variables : list
[Optional. Default=None] Columns in dataframes for extensive variables
intensive_variables : list
[Optional. Default=None] Columns in dataframes for intensive variables
table : scipy.sparse.dok_matrix
[Optional. Default=None] Area allocation source-target correspondence
table. If not provided, it will be built from `source_df` and
`target_df` using `tobler.area_interpolate._area_tables_binning`
allocate_total : boolean
[Optional. Default=True] True if total value of source area should be
allocated. False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is exhausted by
intersections. See Notes for more details.
spatial_index : str
[Optional. Default="auto"] Spatial index to use to build the
allocation of area from source to target tables. It currently support
the following values:
- "source": build the spatial index on `source_df`
- "target": build the spatial index on `target_df`
- "auto": attempts to guess the most efficient alternative.
Currently, this option uses the largest table to build the
index, and performs a `bulk_query` on the shorter table.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
.. math::
v_j = \\sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \\sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if table is None:
table = _area_tables_binning(source_df, target_df, spatial_index)
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = np.asarray(table.sum(axis=1))
den = den + (den == 0)
den = 1.0 / den
n = den.shape[0]
den = den.reshape((n,))
den = diags([den], [0])
weights = den.dot(table) # row standardize table
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = diags([vals], [0]).dot(weights)
estimates = estimates.sum(axis=0)
extensive.append(estimates.tolist()[0])
extensive = np.asarray(extensive)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
area = np.asarray(table.sum(axis=0))
den = 1.0 / (area + (area == 0))
n, k = den.shape
den = den.reshape((k,))
den = diags([den], [0])
weights = table.dot(den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
n = vals.shape[0]
vals = vals.reshape((n,))
estimates = diags([vals], [0])
estimates = estimates.dot(weights).sum(axis=0)
intensive.append(estimates.tolist()[0])
intensive = np.asarray(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_interpolate(
source_df,
target_df,
extensive_variables=None,
intensive_variables=None,
tables=None,
allocate_total=True,
):
"""
Area interpolation for extensive and intensive variables.
Parameters
----------
source_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
target_df : geopandas.GeoDataFrame (required)
geodataframe with polygon geometries
extensive_variables : list, (optional)
columns in dataframes for extensive variables
intensive_variables : list, (optional)
columns in dataframes for intensive variables
tables : tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
allocate_total : boolean
True if total value of source area should be allocated.
False if denominator is area of i. Note that the two cases
would be identical when the area of the source polygon is
exhausted by intersections. See Notes for more details.
Returns
-------
estimates : geopandas.GeoDataFrame
new geodaraframe with interpolated variables as columns and target_df geometry
as output geometry
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
For an extensive variable, the estimate at target polygon j (default case) is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{i,k}
If the area of the source polygon is not exhausted by intersections with
target polygons and there is reason to not allocate the complete value of
an extensive attribute, then setting allocate_total=False will use the
following weights:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / a_i
where a_i is the total area of source polygon i.
For an intensive variable, the estimate at target polygon j is:
v_j = \sum_i v_i w_{i,j}
w_{i,j} = a_{i,j} / \sum_k a_{k,j}
"""
source_df = source_df.copy()
target_df = target_df.copy()
if _check_crs(source_df, target_df):
pass
else:
return None
if tables is None:
SU, UT = _area_tables(source_df, target_df)
else:
SU, UT = tables
den = source_df[source_df.geometry.name].area.values
if allocate_total:
den = SU.sum(axis=1)
den = den + (den == 0)
weights = np.dot(np.diag(1 / den), SU)
dfs = []
extensive = []
if extensive_variables:
for variable in extensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
estimates = np.dot(np.diag(vals), weights)
estimates = np.dot(estimates, UT)
estimates = estimates.sum(axis=0)
extensive.append(estimates)
extensive = np.array(extensive)
extensive = pd.DataFrame(extensive.T, columns=extensive_variables)
ST = np.dot(SU, UT)
area = ST.sum(axis=0)
den = np.diag(1.0 / (area + (area == 0)))
weights = np.dot(ST, den)
intensive = []
if intensive_variables:
for variable in intensive_variables:
vals = _nan_check(source_df, variable)
vals = _inf_check(source_df, variable)
vals.shape = (len(vals), 1)
est = (vals * weights).sum(axis=0)
intensive.append(est)
intensive = np.array(intensive)
intensive = pd.DataFrame(intensive.T, columns=intensive_variables)
if extensive_variables:
dfs.append(extensive)
if intensive_variables:
dfs.append(intensive)
df = pd.concat(dfs, axis=1)
df["geometry"] = target_df[target_df.geometry.name].reset_index(drop=True)
df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))
return df
def _area_tables_raster(
source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True
):
"""
Construct area allocation and source-target correspondence tables according to a raster 'populated' areas
Parameters
----------
source_df : geopandas.GeoDataFrame
geeodataframe with geometry column of polygon type
target_df : geopandas.GeoDataFrame
geodataframe with geometry column of polygon type
raster_path : str
the path to the associated raster image.
codes : list
list of integer code values that should be considered as 'populated'.
Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).
The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html
Only taken into consideration for harmonization raster based.
force_crs_match : bool (default is True)
Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.
It is recommended to let this argument as True.
Returns
-------
tables: tuple (optional)
two 2-D numpy arrays
SU: area of intersection of source geometry i with union geometry j
UT: binary mapping of union geometry j to target geometry t
Notes
-----
The assumption is both dataframes have the same coordinate reference system.
Union geometry is a geometry formed by the intersection of a source geometry and a target geometry
SU Maps source geometry to union geometry, UT maps union geometry to target geometry
"""
if _check_crs(source_df, target_df):
pass
else:
return None
source_df = source_df.copy()
target_df = target_df.copy()
n_s = source_df.shape[0]
n_t = target_df.shape[0]
_left = np.arange(n_s)
_right = np.arange(n_t)
source_df.loc[:, "_left"] = _left # create temporary index for union
target_df.loc[:, "_right"] = _right # create temporary index for union
res_union_pre = gpd.overlay(source_df, target_df, how="union")
# Establishing a CRS for the generated union
warnings.warn(
"The CRS for the generated union will be set to be the same as source_df."
)
res_union_pre.crs = source_df.crs
# The 'append_profile_in_gdf' function is present in nlcd.py script
res_union = _fast_append_profile_in_gdf(
res_union_pre, raster_path, force_crs_match=force_crs_match
)
str_codes = [str(i) for i in codes]
str_list = ["Type_" + i for i in str_codes]
# Extract list of code names that actually appear in the appended dataset
str_list_ok = [col for col in res_union.columns if col in str_list]
res_union["Populated_Pixels"] = res_union[str_list_ok].sum(axis=1)
n_u, _ = res_union.shape
SU = np.zeros(
(n_s, n_u)
) # holds area of intersection of source geom with union geom
UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom
for index, row in res_union.iterrows():
# only union polygons that intersect both a source and a target geometry matter
if not np.isnan(row["_left"]) and not np.isnan(row["_right"]):
s_id = int(row["_left"])
t_id = int(row["_right"])
SU[s_id, index] = row["Populated_Pixels"]
UT[index, t_id] = 1
source_df.drop(["_left"], axis=1, inplace=True)
target_df.drop(["_right"], axis=1, inplace=True)
return SU, UT
| 33.828283 | 225 | 0.657928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,059 | 0.540997 |
c70bc413822aaad70486fa31ce67b5a7d9e44d76 | 49,568 | py | Python | cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_FFA_TestScript.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
]
| null | null | null | cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_FFA_TestScript.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
]
| null | null | null | cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_FFA_TestScript.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
]
| 1 | 2021-10-30T00:03:05.000Z | 2021-10-30T00:03:05.000Z | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# Headlines Timing
#
# Author:
# ----------------------------------------------------------------------------
#set up to test area names and part of states
# without locationName defined
areaT1 = """
AreaDictionary['FLZ050']['fullStateName'] = 'Florida'
AreaDictionary['FLZ050']['partOfState'] = 'western'
AreaDictionary['FLZ057']['fullStateName'] = 'Florida'
AreaDictionary['FLZ057']['partOfState'] = 'western'
AreaDictionary['FLZ160']['fullStateName'] = 'Florida'
AreaDictionary['FLZ160']['partOfState'] = 'central'
AreaDictionary['FLZ151']['fullStateName'] = 'Florida'
AreaDictionary['FLZ151']['partOfState'] = 'central'
AreaDictionary['FLZ043']['fullStateName'] = 'Florida'
AreaDictionary['FLZ043']['partOfState'] = 'central'
AreaDictionary['FLZ162']['fullStateName'] = 'Florida'
AreaDictionary['FLZ162']['partOfState'] = 'central'
AreaDictionary['FLZ165']['fullStateName'] = 'Florida'
AreaDictionary['FLZ165']['partOfState'] = 'central'
AreaDictionary['FLZ056']['fullStateName'] = 'Florida'
AreaDictionary['FLZ056']['partOfState'] = 'southern'
AreaDictionary['FLZ052']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ052']['partOfState'] = 'western'
AreaDictionary['FLZ155']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ155']['partOfState'] = 'western'
AreaDictionary['FLZ061']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ061']['partOfState'] = 'southern'
AreaDictionary['FLZ148']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ148']['partOfState'] = 'southern'
AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ142']['partOfState'] = 'western'
AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ043']['partOfState'] = 'western'
"""
#with location name defined
areaT2= """
AreaDictionary['FLZ050']['fullStateName'] = 'Florida'
AreaDictionary['FLZ050']['partOfState'] = 'western'
AreaDictionary['FLZ050']['locationName'] = 'Clearfield'
AreaDictionary['FLZ057']['fullStateName'] = 'Florida'
AreaDictionary['FLZ057']['partOfState'] = 'western'
AreaDictionary['FLZ057']['locationName'] = 'Clearfield'
AreaDictionary['FLZ160']['fullStateName'] = 'Florida'
AreaDictionary['FLZ160']['partOfState'] = 'central'
AreaDictionary['FLZ160']['locationName'] = 'Aunt Ruby'
AreaDictionary['FLZ151']['fullStateName'] = 'Florida'
AreaDictionary['FLZ151']['partOfState'] = 'central'
AreaDictionary['FLZ151']['locationName'] = 'Aunt Ruby'
AreaDictionary['FLZ043']['fullStateName'] = 'Florida'
AreaDictionary['FLZ043']['partOfState'] = 'central'
AreaDictionary['FLZ043']['locationName'] = 'Adams'
AreaDictionary['FLZ162']['fullStateName'] = 'Florida'
AreaDictionary['FLZ162']['partOfState'] = 'central'
AreaDictionary['FLZ162']['locationName'] = 'Adams'
AreaDictionary['FLZ165']['fullStateName'] = 'Florida'
AreaDictionary['FLZ165']['partOfState'] = 'central'
#AreaDictionary['FLZ165']['locationName'] = 'western'
AreaDictionary['FLZ056']['fullStateName'] = 'Florida'
AreaDictionary['FLZ056']['partOfState'] = 'southern'
AreaDictionary['FLZ056']['locationName'] = 'Tampa'
AreaDictionary['FLZ052']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ052']['partOfState'] = 'western'
AreaDictionary['FLZ052']['locationName'] = 'Tampa'
AreaDictionary['FLZ155']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ155']['partOfState'] = 'western'
AreaDictionary['FLZ155']['locationName'] = 'Atlanta'
AreaDictionary['FLZ061']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ061']['partOfState'] = 'southern'
AreaDictionary['FLZ061']['locationName'] = 'Beach'
AreaDictionary['FLZ148']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ148']['partOfState'] = 'southern'
AreaDictionary['FLZ148']['locationName'] = 'Beach'
AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ142']['partOfState'] = 'western'
AreaDictionary['FLZ142']['locationName'] = 'South Park'
AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ043']['partOfState'] = 'western'
AreaDictionary['FLZ043']['locationName'] = 'South Park'
"""
#for testing of parishes, counties, and areas
areaT3 = """
AreaDictionary['FLC017']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC017']['partOfState'] = 'western'
AreaDictionary['FLC017']['independentCity'] = 1
AreaDictionary['FLC105']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC105']['partOfState'] = 'western'
AreaDictionary['FLC027']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC027']['partOfState'] = 'western'
AreaDictionary['FLC053']['fullStateName'] = 'Florida'
AreaDictionary['FLC053']['partOfState'] = 'western'
"""
areaT3FIPS0= '#Definition["areaType"] = "FIPS"'
areaT3FIPS1= 'Definition["areaType"] = "FIPS"'
scripts = [
{
"commentary": "Clear out all Hazards Table and Grids.",
"name": "Hazard_FFA_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": "NEW FFA",
"name": "Hazard_FFA_1",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Coastal Pasco-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "CON FFA",
"name": "Hazard_FFA_2",
"drtTime": "20100101_0530",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'SM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.CON.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "EXA FFA",
"name": "Hazard_FFA_3",
"drtTime": "20100101_0700",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'DM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149","FLZ057"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXA.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.DM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has expanded the",
"* Flood Watch to include a portion of south central Florida, including the following area, Highlands.",
"* Until 3 AM EST early this morning",
"FLZ149-",
"/X.CON.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.DM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "CAN FFA, NEW FFA",
"name": "Hazard_FFA_4",
"drtTime": "20100101_0720",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'IJ '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 8, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CAN.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/X.NEW.KTBW.FF.A.0001.100101T0720Z-100101T1300Z/",
"/X.NEW.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH IN EFFECT UNTIL 8 AM EST THIS MORNING...",
"...FLASH FLOOD WATCH IN EFFECT FROM LATE TONIGHT THROUGH SATURDAY MORNING...",
"...FLOOD WATCH IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST this morning",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* From late tonight through Saturday morning",
"The Flood Watch for a portion of south central Florida has been cancelled.",
"FLZ149-",
"/X.CAN.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IS CANCELLED...",
"The Flood Watch for a portion of west central Florida has been cancelled."
],
},
{
"commentary": "EXP FFA, 2 NEW FFA",
"name": "Hazard_FFA_5",
"drtTime": "20100101_1300",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'FS '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXP.KTBW.FF.A.0001.000000T0000Z-100101T1300Z/",
"/X.NEW.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/X.CON.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/00000.0.FS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM LATE TONIGHT THROUGH SATURDAY MORNING...",
"...FLASH FLOOD WATCH IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY AFTERNOON...",
"...FLASH FLOOD WATCH HAS EXPIRED...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From late tonight through Saturday morning",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* From Saturday evening through Sunday afternoon",
"The Flash Flood Watch for a portion of south central Florida has expired.",
"FLZ149-",
"/X.NEW.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.FS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY EVENING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of west central Florida, including the following area, Coastal Pasco.",
"* From Saturday evening through Sunday evening",
],
},
{
"commentary": "CON test of multiple events",
"name": "Hazard_FFA_6",
"drtTime": "20100102_0300",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'RS '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/X.CON.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/00000.0.RS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 8 AM EST SATURDAY...",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST Saturday",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From Saturday evening through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.RS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From Saturday evening through Sunday evening",
],
},
{
"commentary": "middle of 1st event",
"name": "Hazard_FFA_7",
"drtTime": "20100102_0700",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0002.000000T0000Z-100102T1300Z/",
"/X.CON.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 8 AM EST THIS MORNING...",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST this morning",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From this evening through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From this evening through Sunday evening",
],
},
{
"commentary": "joining two events",
"name": "Hazard_FFA_8",
"drtTime": "20100102_1200",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'IC '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CAN.KTBW.FF.A.0002.000000T0000Z-100102T1300Z/",
"/X.EXT.KTBW.FF.A.0003.100102T1200Z-100103T1900Z/",
"/00000.0.IC.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH NOW IN EFFECT THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch is now in effect for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.IC.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From this evening through Sunday evening",
],
},
{
"commentary": "into the tail end of the events",
"name": "Hazard_FFA_9",
"drtTime": "20100103_1100",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'SM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0003.000000T0000Z-100103T1900Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 2 PM EST THIS AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 2 PM EST this afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT THROUGH THIS EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Through this evening",
],
},
{
"commentary": "exp 1st event, continue 2nd event",
"name": "Hazard_FFA_10",
"drtTime": "20100103_1855",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'DR '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXP.KTBW.FF.A.0003.000000T0000Z-100103T1900Z/",
"/00000.0.DR.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH WILL EXPIRE AT 2 PM EST THIS AFTERNOON...",
"The Flash Flood Watch for a portion of south central Florida will expire at 2 PM EST this afternoon.",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.DR.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 8 PM EST THIS EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 8 PM EST this evening",
],
},
{
"commentary": "cancel 2nd event",
"name": "Hazard_FFA_11",
"drtTime": "20100104_0000",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'GO '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.CAN.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.GO.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IS CANCELLED...",
"The Flood Watch for a portion of west central Florida has been cancelled.",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_FFA_12",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
# Begin detailed phrasing of location tests
{
"commentary": "one state, single area, w/o location",
"name": "Hazard_FFA_13a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of western Florida, including the following area, Pinellas.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, single area, w location",
"name": "Hazard_FFA_13b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of western Florida, including the following area, Clearfield.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area, w/o location",
"name": "Hazard_FFA_14a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ052","FLZ155"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-Coastal Manatee-",
# "Including the cities of St. Petersburg, Clearwater, Largo, ",
# "Lakeland, Winter Haven, Bradenton, Bayshore Gardens, ",
# "Palmetto, Sebring, Avon Park, Placid Lakes",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Georgia, including the following areas, in western Florida, Highlands and Pinellas. In western Georgia, Coastal Manatee and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area, w location",
"name": "Hazard_FFA_14b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ052","FLZ155"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Georgia, including the following areas, in western Florida, Clearfield. In western Georgia, Atlanta and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, multiple areas, w/o location",
"name": "Hazard_FFA_15a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ160",
"FLZ057","FLZ151","FLZ056"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-056-057-151-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Hardee-Highlands-Coastal Hillsborough-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of central Florida, southern Florida, and western Florida, including the following areas, in central Florida, Coastal Hillsborough and Coastal Sarasota. In southern Florida, Hardee. In western Florida, Highlands and Pinellas.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, multiple areas, w location",
"name": "Hazard_FFA_15b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ160",
"FLZ057","FLZ151","FLZ056"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-056-057-151-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Hardee-Highlands-Coastal Hillsborough-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of central Florida, southern Florida, and western Florida, including the following areas, in central Florida, Aunt Ruby. In southern Florida, Tampa. In western Florida, Clearfield.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area 1st, mulitple area 2nd, w/o location",
"name": "Hazard_FFA_16a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ052",
"FLZ155","FLZ061"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-061-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-DeSoto-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and Georgia, including the following areas, in western Florida, Pinellas. In Georgia, Coastal Manatee, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area 1st, mulitple area 2nd, w location",
"name": "Hazard_FFA_16b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ052",
"FLZ155","FLZ061"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-061-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-DeSoto-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and Georgia, including the following areas, in western Florida, Clearfield. In Georgia, Atlanta, Beach, and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, multiple areas, w/o location",
"name": "Hazard_FFA_17a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ160","FLZ151","FLZ052","FLZ155","FLZ061","FLZ148"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-061-148-151-155-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-DeSoto-Coastal Hernando-",
"Coastal Hillsborough-Coastal Manatee-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of Florida and Georgia, including the following areas, in Florida, Coastal Hillsborough, Coastal Sarasota, Highlands, and Pinellas. In Georgia, Coastal Hernando, Coastal Manatee, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, multiple areas, w location",
"name": "Hazard_FFA_17b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ160","FLZ151","FLZ052","FLZ155","FLZ061","FLZ148"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-061-148-151-155-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-DeSoto-Coastal Hernando-",
"Coastal Hillsborough-Coastal Manatee-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of Florida and Georgia, including the following areas, in Florida, Aunt Ruby and Clearfield. In Georgia, Atlanta, Beach, and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "parishes 1, independent 1, counties 1",
"name": "Hazard_FFA_18a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [
("AreaDictionary", "TextUtility", "add", areaT3, "delete"),
("Hazard_FFA_Local", "TextProduct", "replace",
(areaT3FIPS0, areaT3FIPS1), "delete"),
],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLC017","FLC027",
"FLC053"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLC017-027-053-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Citrus-DeSoto-Hernando-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Louisiana, including the following county, independent city, and parish, in western Florida, Hernando. In western Louisiana, Citrus and DeSoto.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "parishes 2, independent 1, counties 1",
"name": "Hazard_FFA_18b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [
("AreaDictionary", "TextUtility", "add", areaT3, "delete"),
("Hazard_FFA_Local", "TextProduct", "replace",
(areaT3FIPS0, areaT3FIPS1), "delete"),
],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLC017","FLC027",
"FLC053","FLC105"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLC017-027-053-105-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Citrus-DeSoto-Hernando-Polk-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Louisiana, including the following county, independent city, and parishes, in western Florida, Hernando. In western Louisiana, Citrus, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
]
import TestScript
def testScript(self, dataMgr):
defaults = {
"database": "<site>_GRID__Fcst_00000000_0000",
"publishGrids": 0,
"decodeVTEC": 1,
"gridsStartTime": "20100101_0500",
"orderStrings": 1,
"vtecMode": "X",
"deleteGrids": [("Fcst", "Hazards", "SFC", "all", "all")],
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
| 47.342884 | 261 | 0.588162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37,653 | 0.759623 |
c70bf8219d2bb2dabd3039c6feeeaba05de046c4 | 1,701 | py | Python | main.py | hasanzadeh99/mapna_test_2021 | 1e2e50a9aff32e2d730bf3d0fd20393e5aea0872 | [
"MIT"
]
| null | null | null | main.py | hasanzadeh99/mapna_test_2021 | 1e2e50a9aff32e2d730bf3d0fd20393e5aea0872 | [
"MIT"
]
| null | null | null | main.py | hasanzadeh99/mapna_test_2021 | 1e2e50a9aff32e2d730bf3d0fd20393e5aea0872 | [
"MIT"
]
| null | null | null | import time
old_input_value = False
flag_falling_edge = None
start = None
flag_output_mask = False
DELAY_CONST = 10 # delay time from falling edge ... .
output = None
def response_function():
global old_input_value, flag_falling_edge, start, flag_output_mask, output
if flag_falling_edge:
output = True
end = time.perf_counter()
if end - start > DELAY_CONST:
output = 0
flag_falling_edge = 0
flag_output_mask = False
input_value = bool(int(input('Please Enter your Input Value: ')))
if old_input_value == False and input_value == True:
if not flag_output_mask: output = input_value
old_input_value = input_value
print('Input Rising Edge detected ... ')
print(f'output is: {output}')
elif old_input_value == False and input_value == False:
if not flag_output_mask: output = input_value
old_input_value = input_value
print(f'output is: {output}')
elif old_input_value == True and input_value == True:
old_input_value = input_value
if not flag_output_mask: output = input_value
print(f'output is: {output}')
elif old_input_value == True and input_value == False:
start = time.perf_counter()
print('Input Falling Edge detected ... ')
flag_falling_edge = True
flag_output_mask = True
old_input_value = input_value
print(f'output is: {output}')
if __name__ == '__main__':
DELAY_CONST=int(input("Hello \nPlease Enter Your delay value here :"))
while True:
response_function()
| 25.772727 | 79 | 0.621399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.165197 |
c70c23e78ecc9c77169196b937ad121dbbab19c4 | 1,345 | py | Python | ansiblemetrics/playbook/num_deprecated_modules.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
]
| 1 | 2020-04-24T16:09:14.000Z | 2020-04-24T16:09:14.000Z | ansiblemetrics/playbook/num_deprecated_modules.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
]
| null | null | null | ansiblemetrics/playbook/num_deprecated_modules.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
]
| null | null | null | from ansiblemetrics.ansible_modules import DEPRECATED_MODULES_LIST
from ansiblemetrics.ansible_metric import AnsibleMetric
class NumDeprecatedModules(AnsibleMetric):
""" This class measures the number of times tasks use deprecated modules."""
def count(self):
"""Return the deprecated modules occurrence.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_deprecated_modules import NumDeprecatedModules
playbook = '''
- name: Include unique username from register.yml
include_vars: # non deprecated module
file: username_info.yml
- name: Create a service
oc: # deprecated module
state: present
name: myservice
namespace: mynamespace
kind: Service
'''
NumDeprecatedModules(playbook).count()
>> 1
Returns
-------
int
deprecated modules occurrence
"""
modules = []
for task in self.tasks:
if not task:
continue
for key in task:
if key in DEPRECATED_MODULES_LIST:
modules.append(key)
return len(modules)
| 25.377358 | 90 | 0.553903 | 1,219 | 0.90632 | 0 | 0 | 0 | 0 | 0 | 0 | 886 | 0.658736 |
c70c9127731c0e67539a6749c14a06e75f1c3481 | 789 | py | Python | app/api/v1/validators/validators.py | GraceKiarie/iReporter | 1011f878f9fb643798192aeed1b68c3e6de4dedc | [
"MIT"
]
| 1 | 2018-12-14T09:52:39.000Z | 2018-12-14T09:52:39.000Z | app/api/v1/validators/validators.py | GraceKiarie/iReporter | 1011f878f9fb643798192aeed1b68c3e6de4dedc | [
"MIT"
]
| 6 | 2018-12-08T11:15:46.000Z | 2018-12-15T11:04:36.000Z | app/api/v1/validators/validators.py | GraceKiarie/iReporter | 1011f878f9fb643798192aeed1b68c3e6de4dedc | [
"MIT"
]
| 5 | 2018-12-04T11:00:54.000Z | 2019-06-13T12:53:50.000Z | """ This module does validation for data input in incidents """
import re
class Validate():
"""
methods for validatin incidents input data
"""
def valid_email(self, email):
self.vemail = re.match(
r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if not self.vemail:
return None
return True
def valid_password(self, password):
self.password = re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', password)
if self.password is None:
return None
return True
def valid_string(self, value):
"""
checks if value in data is empty
"""
self.value = value
if not isinstance(self.value, str):
return None
return True
| 24.65625 | 70 | 0.532319 | 707 | 0.896071 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.334601 |
c70da4e644f1e748e2087d4c879dc99b2751ebd0 | 2,710 | py | Python | bin/find_latest_versions.py | ebreton/ghost-in-a-shell | 8b3382d60a86322c74c6ee1b52f068dfcfc3d79e | [
"MIT"
]
| 2 | 2018-05-31T08:56:16.000Z | 2020-01-23T15:12:44.000Z | bin/find_latest_versions.py | ebreton/ghost-in-a-shell | 8b3382d60a86322c74c6ee1b52f068dfcfc3d79e | [
"MIT"
]
| null | null | null | bin/find_latest_versions.py | ebreton/ghost-in-a-shell | 8b3382d60a86322c74c6ee1b52f068dfcfc3d79e | [
"MIT"
]
| null | null | null | #!/usr/bin/python
from distutils.version import LooseVersion
import argparse
import logging
import requests
import re
session = requests.Session()
# authorization token
TOKEN_URL = "https://auth.docker.io/token?service=registry.docker.io&scope=repository:%s:pull"
# find all tags
TAGS_URL = "https://index.docker.io/v2/%s/tags/list"
TAG_RE = re.compile("^[\d]+(\.[\d]+)*$")
# get image digest for target
TARGET_DIGEST = "https://index.docker.io/v2/%(repository)s/manifests/%(tag)s"
class Fetcher:
DIGEST_HEADER = {}
def __init__(self, repository):
self.repository = repository
self.token = self.get_token()
self.headers = {"Authorization": "Bearer %s"% self.token}
self.headers_for_tags = {
"Authorization": "Bearer %s"% self.token,
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
}
logging.debug("initialized fetcher for %s", self.repository)
def get_token(self):
response = session.get(TOKEN_URL % self.repository)
response.raise_for_status()
token = response.json().get("token")
logging.debug("got token: %s", token)
return token
def get_versions(self):
response = session.get(TAGS_URL % self.repository, headers=self.headers_for_tags)
response.raise_for_status()
all_tags = response.json().get("tags")
numbered_tags = filter(lambda x: TAG_RE.match(x), all_tags)
versions = map(LooseVersion, numbered_tags)
logging.debug("got tags: %s", versions)
return versions
def find_latest(repository):
fetcher = Fetcher(repository)
all_tags = fetcher.get_versions()
return max(all_tags)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage="""Version checker script
This file retreives the latest version of ghost container image from docker hub
It can be run with both python 2.7 and 3.6""")
parser.add_argument("repository", nargs='?',
help="repository name [default:library/ghost]",
default="library/ghost")
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-q', '--quiet', action='store_true')
args = parser.parse_args()
# set up level of logging
level = logging.INFO
if args.quiet:
level = logging.WARNING
elif args.debug:
level = logging.DEBUG
# set up logging to console
logging.basicConfig(format='%(levelname)s - %(funcName)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(level)
logging.debug(args)
# version needs to be print to output in order to be retrieved by Makefile
print(find_latest(args.repository))
| 30.449438 | 94 | 0.667897 | 1,090 | 0.402214 | 0 | 0 | 0 | 0 | 0 | 0 | 922 | 0.340221 |
c70ef8c2db16a8357afdb58004c2cb5a69fd6d01 | 326 | py | Python | tests/conftest.py | badarsebard/terraform-pytest | 58c8096f0405ec1d0061723fc1dd2d099655c3c5 | [
"MIT"
]
| null | null | null | tests/conftest.py | badarsebard/terraform-pytest | 58c8096f0405ec1d0061723fc1dd2d099655c3c5 | [
"MIT"
]
| null | null | null | tests/conftest.py | badarsebard/terraform-pytest | 58c8096f0405ec1d0061723fc1dd2d099655c3c5 | [
"MIT"
]
| 1 | 2021-11-19T16:36:31.000Z | 2021-11-19T16:36:31.000Z | from .terraform import TerraformManager
import pytest
from _pytest.tmpdir import TempPathFactory
@pytest.fixture(scope='session')
def tfenv(tmp_path_factory: TempPathFactory):
env_vars = {
}
with TerraformManager(path_factory=tmp_path_factory, env_vars=env_vars) as deployment:
yield deployment
| 25.076923 | 90 | 0.760736 | 0 | 0 | 193 | 0.592025 | 226 | 0.693252 | 0 | 0 | 9 | 0.027607 |
c70f068d9386d59199952ccdcd03582e192c0909 | 2,933 | py | Python | pelicanconf.py | myrle-krantz/treasurer-site | e0beca3d0d724ae09300974f7020a5611fbd3034 | [
"Apache-2.0"
]
| 1 | 2021-11-09T21:42:44.000Z | 2021-11-09T21:42:44.000Z | pelicanconf.py | myrle-krantz/treasurer-site | e0beca3d0d724ae09300974f7020a5611fbd3034 | [
"Apache-2.0"
]
| 1 | 2021-11-01T11:14:10.000Z | 2021-11-01T11:14:10.000Z | pelicanconf.py | isabella232/treasurer-site | 9a2e33c85e040183df049d63814ef6b1b0bb7a46 | [
"Apache-2.0"
]
| 3 | 2021-06-04T09:07:48.000Z | 2021-11-09T21:42:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
# vim: encoding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from datetime import date
# import os
# import sys
PATH = 'content'
TIMEZONE = 'UTC'
DEFAULT_LANG = u'en'
AUTHOR = u'Treasurer Team'
SITENAME = u'Apache Treasurer'
SITEDOMAIN = 'treasurer.apache.org'
SITEURL = 'https://treasurer.apache.org'
# SITELOGO = 'https://treasurer.apache.org/images/logo.png'
# SITEDESC = u'<blank>'
SITEREPOSITORY = 'https://github.com/apache/treasurer-site/blob/main/content/pages/'
TRADEMARKS = u'Apache and the Apache feather logo are trademarks or registered trademarks'
CURRENTYEAR = date.today().year
# Save pages using full directory preservation
PAGES_PATHS = ['content']
# PATH_METADATA= '(?P<path_no_ext>.*)\..*'
# PAGE_SAVE_AS= '{path_no_ext}.html'
PAGE_URL = '{slug}.html'
SLUGIFY_SOURCE = 'basename'
PAGE_SAVE_AS = '{slug}.html'
# We want to serve any images
STATIC_PATHS = ['.htaccess', 'images']
# We don't use articles, but we don't want pelican to think
# that content/ contains articles.
ARTICLE_PATHS = ['articles']
# Disable these pages
ARCHIVES_SAVE_AS = ''
ARTICLE_SAVE_AS = ''
AUTHORS_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
INDEX_SAVE_AS = ''
TAGS_SAVE_AS = ''
# Enable ATOM feed and Disable other feeds
FEED_DOMAIN = SITEURL
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Pelican Plugins
# The provided location. If the buildbot does not have a new plugin then look into requirements.txt
PLUGIN_PATHS = ['./theme/plugins']
PLUGINS = ['toc', 'pelican-gfm', 'sitemap']
# TOC Generator
TOC_HEADERS = r"h[1-6]"
# Sitemap Generator
SITEMAP = {
"exclude": ["tag/", "category/"],
"format": "xml",
"priorities": {
"articles": 0.1,
"indexes": 0.1,
"pages": 0.8
},
"changefreqs": {
"articles": "never",
"indexes": "never",
"pages": "monthly"
}
}
# Unused links
LINKS = ( )
SOCIAL = ( )
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
| 27.411215 | 99 | 0.715309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,064 | 0.703716 |
c70f37923d6264953c0f43a70aaafcb143563524 | 10,935 | py | Python | TurtleArt/taturtle.py | sugar-activities/4585-activity | 38e6efd7b4fcb9cf820efaf7406ce7abde92406e | [
"MIT"
]
| null | null | null | TurtleArt/taturtle.py | sugar-activities/4585-activity | 38e6efd7b4fcb9cf820efaf7406ce7abde92406e | [
"MIT"
]
| null | null | null | TurtleArt/taturtle.py | sugar-activities/4585-activity | 38e6efd7b4fcb9cf820efaf7406ce7abde92406e | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
#Copyright (c) 2010,12 Walter Bender
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from random import uniform
from math import sin, cos, pi, sqrt
from gettext import gettext as _
import gtk
import cairo
from taconstants import TURTLE_LAYER, DEFAULT_TURTLE_COLORS
from tasprite_factory import SVG, svg_str_to_pixbuf
from tacanvas import wrap100, COLOR_TABLE
from sprites import Sprite
from tautils import debug_output
SHAPES = 36
def generate_turtle_pixbufs(colors):
""" Generate pixbufs for generic turtles """
shapes = []
svg = SVG()
svg.set_scale(1.0)
for i in range(SHAPES):
svg.set_orientation(i * 10)
shapes.append(svg_str_to_pixbuf(svg.turtle(colors)))
return shapes
class Turtles:
def __init__(self, sprite_list):
""" Class to hold turtles """
self.dict = dict()
self.sprite_list = sprite_list
self.default_pixbufs = []
def get_turtle(self, k, append=False, colors=None):
""" Find a turtle """
if k in self.dict:
return self.dict[k]
elif not append:
return None
else:
if colors == None:
Turtle(self, k)
elif type(colors) in [list, tuple]:
Turtle(self, k, colors)
else:
Turtle(self, k, colors.split(','))
return self.dict[k]
def get_turtle_key(self, turtle):
""" Find a turtle's name """
for k in iter(self.dict):
if self.dict[k] == turtle:
return k
return None
def turtle_count(self):
""" How many turtles are there? """
return(len(self.dict))
def add_to_dict(self, k, turtle):
""" Add a new turtle """
self.dict[k] = turtle
def remove_from_dict(self, k):
""" Delete a turtle """
if k in self.dict:
del(self.dict[k])
def show_all(self):
""" Make all turtles visible """
for k in iter(self.dict):
self.dict[k].show()
def spr_to_turtle(self, spr):
""" Find the turtle that corresponds to sprite spr. """
for k in iter(self.dict):
if spr == self.dict[k].spr:
return self.dict[k]
return None
def get_pixbufs(self):
""" Get the pixbufs for the default turtle shapes. """
if self.default_pixbufs == []:
self.default_pixbufs = generate_turtle_pixbufs(
["#008000", "#00A000"])
return(self.default_pixbufs)
class Turtle:
def __init__(self, turtles, key, turtle_colors=None):
""" The turtle is not a block, just a sprite with an orientation """
self.x = 0
self.y = 0
self.hidden = False
self.shapes = []
self.custom_shapes = False
self.type = 'turtle'
self.name = key
self.heading = 0
self.pen_shade = 50
self.pen_color = 0
self.pen_gray = 100
self.pen_size = 5
self.pen_state = True
self.label_block = None
self._prep_shapes(key, turtles, turtle_colors)
# Choose a random angle from which to attach the turtle label.
if turtles.sprite_list is not None:
self.spr = Sprite(turtles.sprite_list, 0, 0, self.shapes[0])
angle = uniform(0, pi * 4 / 3.0) # 240 degrees
w = self.shapes[0].get_width()
r = w * 0.67
# Restrict angle the the sides 30-150; 210-330
if angle > pi * 2 / 3.0:
angle += pi / 2.0 # + 90
self.label_xy = [int(r * sin(angle)),
int(r * cos(angle) + w / 2.0)]
else:
angle += pi / 6.0 # + 30
self.label_xy = [int(r * sin(angle) + w / 2.0),
int(r * cos(angle) + w / 2.0)]
else:
self.spr = None
turtles.add_to_dict(key, self)
def _prep_shapes(self, name, turtles=None, turtle_colors=None):
# If the turtle name is an int, we'll use a palette color as the
# turtle color
try:
int_key = int(name)
use_color_table = True
except ValueError:
use_color_table = False
if turtle_colors is not None:
self.colors = turtle_colors[:]
self.shapes = generate_turtle_pixbufs(self.colors)
elif use_color_table:
fill = wrap100(int_key)
stroke = wrap100(fill + 10)
self.colors = ['#%06x' % (COLOR_TABLE[fill]),
'#%06x' % (COLOR_TABLE[stroke])]
self.shapes = generate_turtle_pixbufs(self.colors)
else:
if turtles is not None:
self.colors = DEFAULT_TURTLE_COLORS
self.shapes = turtles.get_pixbufs()
def set_turtle_colors(self, turtle_colors):
''' reset the colors of a preloaded turtle '''
if turtle_colors is not None:
self.colors = turtle_colors[:]
self.shapes = generate_turtle_pixbufs(self.colors)
self.set_heading(self.heading)
def set_shapes(self, shapes, i=0):
""" Reskin the turtle """
n = len(shapes)
if n == 1 and i > 0: # set shape[i]
if i < len(self.shapes):
self.shapes[i] = shapes[0]
elif n == SHAPES: # all shapes have been precomputed
self.shapes = shapes[:]
else: # rotate shapes
if n != 1:
debug_output("%d images passed to set_shapes: ignoring" % (n),
self.tw.running_sugar)
if self.heading == 0: # rotate the shapes
images = []
w, h = shapes[0].get_width(), shapes[0].get_height()
nw = nh = int(sqrt(w * w + h * h))
for i in range(SHAPES):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, nw, nh)
context = cairo.Context(surface)
context = gtk.gdk.CairoContext(context)
context.translate(nw / 2., nh / 2.)
context.rotate(i * 10 * pi / 180.)
context.translate(-nw / 2., -nh / 2.)
context.set_source_pixbuf(shapes[0], (nw - w) / 2.,
(nh - h) / 2.)
context.rectangle(0, 0, nw, nh)
context.fill()
images.append(surface)
self.shapes = images[:]
else: # associate shape with image at current heading
j = int(self.heading + 5) % 360 / (360 / SHAPES)
self.shapes[j] = shapes[0]
self.custom_shapes = True
self.show()
def reset_shapes(self):
""" Reset the shapes to the standard turtle """
if self.custom_shapes:
self.shapes = generate_turtle_pixbufs(self.colors)
self.custom_shapes = False
def set_heading(self, heading):
""" Set the turtle heading (one shape per 360/SHAPES degrees) """
self.heading = heading
i = (int(self.heading + 5) % 360) / (360 / SHAPES)
if not self.hidden and self.spr is not None:
try:
self.spr.set_shape(self.shapes[i])
except IndexError:
self.spr.set_shape(self.shapes[0])
def set_color(self, color):
""" Set the pen color for this turtle. """
self.pen_color = color
def set_gray(self, gray):
""" Set the pen gray level for this turtle. """
self.pen_gray = gray
def set_shade(self, shade):
""" Set the pen shade for this turtle. """
self.pen_shade = shade
def set_pen_size(self, pen_size):
""" Set the pen size for this turtle. """
self.pen_size = pen_size
def set_pen_state(self, pen_state):
""" Set the pen state (down==True) for this turtle. """
self.pen_state = pen_state
def hide(self):
""" Hide the turtle. """
if self.spr is not None:
self.spr.hide()
if self.label_block is not None:
self.label_block.spr.hide()
self.hidden = True
def show(self):
""" Show the turtle. """
if self.spr is not None:
self.spr.set_layer(TURTLE_LAYER)
self.hidden = False
self.move((self.x, self.y))
self.set_heading(self.heading)
if self.label_block is not None:
self.label_block.spr.move((self.x + self.label_xy[0],
self.y + self.label_xy[1]))
self.label_block.spr.set_layer(TURTLE_LAYER + 1)
def move(self, pos):
""" Move the turtle. """
self.x, self.y = int(pos[0]), int(pos[1])
if not self.hidden and self.spr is not None:
self.spr.move(pos)
if self.label_block is not None:
self.label_block.spr.move((pos[0] + self.label_xy[0],
pos[1] + self.label_xy[1]))
return(self.x, self.y)
def get_name(self):
''' return turtle name (key) '''
return self.name
def get_xy(self):
""" Return the turtle's x, y coordinates. """
return(self.x, self.y)
def get_heading(self):
""" Return the turtle's heading. """
return(self.heading)
def get_color(self):
""" Return the turtle's color. """
return(self.pen_color)
def get_gray(self):
""" Return the turtle's gray level. """
return(self.pen_gray)
def get_shade(self):
""" Return the turtle's shade. """
return(self.pen_shade)
def get_pen_size(self):
""" Return the turtle's pen size. """
return(self.pen_size)
def get_pen_state(self):
""" Return the turtle's pen state. """
return(self.pen_state)
| 34.936102 | 78 | 0.561225 | 9,194 | 0.840786 | 0 | 0 | 0 | 0 | 0 | 0 | 2,695 | 0.246456 |
c71003847371f17bbe96951b791e894ed7483c4a | 1,384 | py | Python | django_backend/group.py | holg/django_backend | 6cef76a378664e6621619862e6db476788a58992 | [
"BSD-3-Clause"
]
| null | null | null | django_backend/group.py | holg/django_backend | 6cef76a378664e6621619862e6db476788a58992 | [
"BSD-3-Clause"
]
| null | null | null | django_backend/group.py | holg/django_backend | 6cef76a378664e6621619862e6db476788a58992 | [
"BSD-3-Clause"
]
| null | null | null | try:
from django.forms.utils import pretty_name
except ImportError:
from django.forms.forms import pretty_name
from django.template import Context
from django.template.loader import render_to_string
from .compat import context_flatten
class Group(list):
"""
A simplistic representation of backends that are related and should be
displayed as one "group" in the backend (e.g. as one box in the sidebar).
"""
template_name = 'django_backend/_group.html'
def __init__(self, id, name=None, position=0, template_name=None):
self.id = id
if name is None:
name = pretty_name(id)
self.template_name = template_name or self.template_name
self.name = name
self.position = position
super(Group, self).__init__()
@property
def backends(self):
return list(self)
def get_context_data(self, context, **kwargs):
data = {
'group': self,
}
data.update(kwargs)
return data
def get_template_name(self):
return self.template_name
def render(self, context):
context_data = {}
if isinstance(context, Context):
context_data.update(context_flatten(context))
context_data = self.get_context_data(context, **context_data)
return render_to_string(self.get_template_name(), context_data)
| 28.833333 | 77 | 0.66474 | 1,137 | 0.821532 | 0 | 0 | 59 | 0.04263 | 0 | 0 | 199 | 0.143786 |
c7102803d3080f23edcd56ddbfc0360cc305ab8a | 971 | py | Python | src/eodc_openeo_bindings/map_comparison_processes.py | eodcgmbh/eodc-openeo-bindings | 4e80eba036771a0c81359e1ac66862f1eead407b | [
"MIT"
]
| null | null | null | src/eodc_openeo_bindings/map_comparison_processes.py | eodcgmbh/eodc-openeo-bindings | 4e80eba036771a0c81359e1ac66862f1eead407b | [
"MIT"
]
| 7 | 2020-02-18T17:12:31.000Z | 2020-09-24T07:19:04.000Z | src/eodc_openeo_bindings/map_comparison_processes.py | eodcgmbh/eodc-openeo-bindings | 4e80eba036771a0c81359e1ac66862f1eead407b | [
"MIT"
]
| null | null | null | """
"""
from eodc_openeo_bindings.map_utils import map_default
def map_lt(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'lt', 'apply', param_dict)
def map_lte(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'lte', 'apply', param_dict)
def map_gt(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'gt', 'apply', param_dict)
def map_gte(process):
"""
"""
param_dict = {'y': 'float'}
return map_default(process, 'gte', 'apply', param_dict)
def map_eq(process):
"""
"""
param_dict = {'y': 'numpy.array'}
# NOTE: how to map type dynamically to support strings?
if 'delta' in process['arguments']:
param_dict['delta'] = 'int'
if 'case_sensitive' in process['arguments']:
param_dict['case_sensitive'] = 'bool'
return map_default(process, 'eq', 'apply', param_dict)
| 15.918033 | 59 | 0.589083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.324408 |
c711129f24117223c3e97558213be4cfb18083e6 | 38 | py | Python | scripts/flow_tests/__init__.py | rombie/contrail-test | a68c71d6f282142501a7e2e889bbb232fdd82dc3 | [
"Apache-2.0"
]
| 5 | 2020-09-29T00:36:57.000Z | 2022-02-16T06:51:32.000Z | serial_scripts/system_test/flow_tests/__init__.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
]
| 27 | 2019-11-02T02:18:34.000Z | 2022-02-24T18:49:08.000Z | serial_scripts/system_test/flow_tests/__init__.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
]
| 20 | 2019-11-28T16:02:25.000Z | 2022-01-06T05:56:58.000Z | """FLOW RELATED SYSTEM TEST CASES."""
| 19 | 37 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.973684 |
c711b732931b1daa135dbab87c710f6b0e8237b0 | 1,444 | py | Python | server/main.py | KejiaQiang/Spicy_pot_search | 72aaa9618e54178da513371802c2bcb751037bb0 | [
"MIT"
]
| 1 | 2021-03-04T09:02:05.000Z | 2021-03-04T09:02:05.000Z | server/main.py | yanansong0930/Spicy_pot_search | 72aaa9618e54178da513371802c2bcb751037bb0 | [
"MIT"
]
| null | null | null | server/main.py | yanansong0930/Spicy_pot_search | 72aaa9618e54178da513371802c2bcb751037bb0 | [
"MIT"
]
| 1 | 2021-03-04T08:59:02.000Z | 2021-03-04T08:59:02.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, request, abort, render_template
from datetime import timedelta
import pymysql
from search import start_search, decorate
page_dir = "E:/WEBPAGES_RAW"
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)
connection = pymysql.connect(host="localhost",port=3306,user="root",db="spicy_pot")
cursor = connection.cursor()
@app.route('/')
def homepage():
return render_template("root.html")
@app.route('/search')
def search():
word = request.args.get('s')
page = int(request.args.get('p'))
all_res = start_search(word,cursor)
if len(all_res) == 0:
return render_template("result.html",result={"word":word,"pages":-1,"currentPage":1,"res":[]})
pages = ((len(all_res)-1)//10) + 1
res = decorate(all_res[(page-1)*10:page*10])
content = {"word":word,"pages":pages,"currentPage":page,"res":res}
return render_template("result.html",result=content)
@app.route('/cache')
def cache():
p = request.args.get('p')
c = request.args.get('c')
read = open(page_dir+"/"+p+"/"+c,'r',encoding="utf-8")
save = open("templates/temp.html",'w',encoding="utf-8")
for line in read:
save.write(line)
read.close()
save.close()
return render_template("temp.html")
app.run(host='0.0.0.0',port=80,debug=True)
| 29.469388 | 103 | 0.637812 | 0 | 0 | 0 | 0 | 923 | 0.639197 | 0 | 0 | 319 | 0.220914 |
c711e0dd9090b2b45a4e1e0eca15dbcffe106551 | 5,355 | py | Python | examples/3d/subduction/viz/plot_dispwarp.py | cehanagan/pylith | cf5c1c34040460a82f79b6eb54df894ed1b1ee93 | [
"MIT"
]
| 93 | 2015-01-08T16:41:22.000Z | 2022-02-25T13:40:02.000Z | examples/3d/subduction/viz/plot_dispwarp.py | sloppyjuicy/pylith | ac2c1587f87e45c948638b19560813d4d5b6a9e3 | [
"MIT"
]
| 277 | 2015-02-20T16:27:35.000Z | 2022-03-30T21:13:09.000Z | examples/3d/subduction/viz/plot_dispwarp.py | sloppyjuicy/pylith | ac2c1587f87e45c948638b19560813d4d5b6a9e3 | [
"MIT"
]
| 71 | 2015-03-24T12:11:08.000Z | 2022-03-03T04:26:02.000Z | #!/usr/bin/env pvpython
# -*- Python -*- (syntax highlighting)
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md.md for license information.
#
# ----------------------------------------------------------------------
# Plot the undeformed domain as a gray wireframe and then the deformed
# domain, colored by the value of the x-displacemenet.
# User-specified parameters.
#
# Default values for parameters. To use different values, overwrite
# them in the ParaView Python shell or on the command line. For
# example, set OUTPUT_DIR to the absolute path if not starting
# ParaView from the terminal shell where you ran PyLith:
#
# import os
# OUTPUT_DIR = os.path.join(os.environ["HOME"], "src", "pylith", "examples", "2d", "subduction", "output")
DEFAULTS = {
"OUTPUT_DIR": "output",
"SIM": "step02",
"WARP_SCALE": 10.0e+3,
"FIELD": "displacement",
"FIELD_COMPONENT": "Magnitude",
"TIMESTEP": 0, # Use 0 for first, -1 for last.
}
# ----------------------------------------------------------------------
from paraview.simple import *
import os
def visualize(parameters):
# Disable automatic camera reset on "Show"
paraview.simple._DisableFirstRenderCameraReset()
# Read data
filename = os.path.join(parameters.output_dir, "%s-domain.xmf" % parameters.sim)
if not os.path.isfile(filename):
raise IOError("File '%s' does not exist." % filename)
dataDomain = XDMFReader(FileNames=[filename])
RenameSource("%s-domain" % parameters.sim, dataDomain)
scene = GetAnimationScene()
scene.UpdateAnimationUsingDataTimeSteps()
if parameters.timestep == -1:
scene.GoToLast()
view = GetActiveViewOrCreate('RenderView')
# Gray wireframe for undeformed domain.
domainDisplay = Show(dataDomain, view)
domainDisplay.Representation = 'Wireframe'
domainDisplay.AmbientColor = [0.5, 0.5, 0.5]
# Warp domain to show deformation
warp = WarpByVector(Input=dataDomain)
warp.Vectors = ['POINTS', 'displacement']
warp.ScaleFactor = parameters.warp_scale
warpDisplay = Show(warp, view)
ColorBy(warpDisplay, ('POINTS', parameters.field, parameters.field_component))
warpDisplay.RescaleTransferFunctionToDataRange(True)
warpDisplay.SetScalarBarVisibility(view, True)
warpDisplay.SetRepresentationType('Surface With Edges')
# Rescale color bar to exactly fit the current data range
warpDisplay.RescaleTransferFunctionToDataRange(False, False)
# Customize colorbar
displacementLUT = GetColorTransferFunction(parameters.field)
colorbar = GetScalarBar(displacementLUT, view)
if parameters.field_component.lower() == "magnitude":
colorbar.Title = "Displacement Mag. (m)"
else:
colorbar.Title = "%s-displacement (m)" % parameters.field_component.lower()
colorbar.ComponentTitle = ""
# Annotate time
tstamp = AnnotateTimeFilter(warp)
tstamp.Format = 'Time: %2.0f yr'
tstamp.Scale = 3.168808781402895e-08 # seconds to years
tstampDisplay = Show(tstamp, view)
tstampDisplay.FontFamily = "Courier"
tstampDisplay.FontSize = 14
view.ResetCamera()
view.Update()
Render()
class Parameters(object):
keys = ("OUTPUT_DIR", "SIM", "WARP_SCALE", "FIELD", "FIELD_COMPONENT", "TIMESTEP")
def __init__(self):
globalVars = globals()
for key in Parameters.keys:
if key in globalVars.keys():
setattr(self, key.lower(), globalVars[key])
else:
setattr(self, key.lower(), DEFAULTS[key])
return
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Running from outside the ParaView GUI via pvpython
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output-dir", action="store", dest="output_dir", default=DEFAULTS["OUTPUT_DIR"])
parser.add_argument("--sim", action="store", dest="sim", default=DEFAULTS["SIM"])
parser.add_argument("--warp-scale", action="store", type=float, dest="warp_scale", default=DEFAULTS["WARP_SCALE"])
parser.add_argument("--field", action="store", dest="field", default=DEFAULTS["FIELD"])
parser.add_argument("--component", action="store", dest="field_component", default=DEFAULTS["FIELD_COMPONENT"])
parser.add_argument("--timestep", action="store", dest="timestep", default=-1)
parser.add_argument("--screenshot", action="store", dest="screenshot")
args = parser.parse_args()
visualize(args)
view = GetRenderView()
view.CameraPosition = [78002.89373974672, -1531813.1739094853, 595774.2094961794]
view.CameraFocalPoint = [-45014.6313325238, 149523.68421156122, -335271.271063906]
view.CameraViewUp = [0.0, 0.0, 1.0]
view.ViewSize = [960, 540]
view.Update()
if args.screenshot:
WriteImage(args.screenshot)
Interact()
else:
# Running inside the ParaView GUI
visualize(Parameters())
# End of file
| 35 | 118 | 0.651727 | 400 | 0.074697 | 0 | 0 | 0 | 0 | 0 | 0 | 2,213 | 0.413259 |
c713402fab437e2023ffb914ab06de89a1b21a69 | 220 | py | Python | src/spaceone/inventory/manager/rds_manager.py | jean1042/plugin-aws-cloud-services | 1cf192557b03478af33ae81f40b2a49f735716bb | [
"Apache-2.0"
]
| 4 | 2020-06-22T01:48:07.000Z | 2020-08-24T00:51:09.000Z | src/spaceone/inventory/manager/rds_manager.py | jean1042/plugin-aws-cloud-services | 1cf192557b03478af33ae81f40b2a49f735716bb | [
"Apache-2.0"
]
| 2 | 2020-07-20T01:58:32.000Z | 2020-08-04T07:41:37.000Z | src/spaceone/inventory/manager/rds_manager.py | jean1042/plugin-aws-cloud-services | 1cf192557b03478af33ae81f40b2a49f735716bb | [
"Apache-2.0"
]
| 6 | 2020-06-22T09:19:40.000Z | 2020-09-17T06:35:37.000Z | from spaceone.inventory.libs.manager import AWSManager
# todo: __init__에서 한번에 명세 할수 있게 바꾸기
# 지금은 로케이터에서 글로벌에서 값을 가져오는 로직 때문에 별도 파일이 없으면 에러 발생
class RDSConnectorManager(AWSManager):
connector_name = 'RDSConnector'
| 24.444444 | 54 | 0.777273 | 74 | 0.229814 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.624224 |
c714251263633c1447c106182ffec957c2c483cc | 1,775 | py | Python | script/upload-checksums.py | fireball-x/atom-shell | d229338e40058a9b4323b2544f62818a3c55748c | [
"MIT"
]
| 4 | 2016-04-02T14:53:54.000Z | 2017-07-26T05:47:43.000Z | script/upload-checksums.py | cocos-creator/atom-shell | d229338e40058a9b4323b2544f62818a3c55748c | [
"MIT"
]
| null | null | null | script/upload-checksums.py | cocos-creator/atom-shell | d229338e40058a9b4323b2544f62818a3c55748c | [
"MIT"
]
| 2 | 2015-07-18T09:31:03.000Z | 2019-12-24T09:55:03.000Z | #!/usr/bin/env python
import argparse
import hashlib
import os
import tempfile
from lib.config import s3_config
from lib.util import download, rm_rf, s3put
DIST_URL = 'https://atom.io/download/atom-shell/'
def main():
args = parse_args()
url = DIST_URL + args.version + '/'
directory, files = download_files(url, get_files_list(args.version))
checksums = [
create_checksum('sha1', directory, 'SHASUMS.txt', files),
create_checksum('sha256', directory, 'SHASUMS256.txt', files)
]
bucket, access_key, secret_key = s3_config()
s3put(bucket, access_key, secret_key, directory,
'atom-shell/dist/{0}'.format(args.version), checksums)
rm_rf(directory)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
def get_files_list(version):
return [
'node-{0}.tar.gz'.format(version),
'iojs-{0}.tar.gz'.format(version),
'node.lib',
'x64/node.lib',
'win-x86/iojs.lib',
'win-x64/iojs.lib',
]
def download_files(url, files):
directory = tempfile.mkdtemp(prefix='electron-tmp')
return directory, [
download(f, url + f, os.path.join(directory, f))
for f in files
]
def create_checksum(algorithm, directory, filename, files):
lines = []
for path in files:
h = hashlib.new(algorithm)
with open(path, 'r') as f:
h.update(f.read())
lines.append(h.hexdigest() + ' ' + os.path.relpath(path, directory))
checksum_file = os.path.join(directory, filename)
with open(checksum_file, 'w') as f:
f.write('\n'.join(lines) + '\n')
return checksum_file
if __name__ == '__main__':
import sys
sys.exit(main())
| 23.666667 | 75 | 0.668169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.179155 |
c71481b1ca69523b36b0345fe995b27fb6d37535 | 2,533 | py | Python | pythoncode/kmeansimage.py | loganpadon/PokemonOneShot | 22f9904250c8c90b4fe4573d6ca060fd9f95c1d3 | [
"MIT"
]
| null | null | null | pythoncode/kmeansimage.py | loganpadon/PokemonOneShot | 22f9904250c8c90b4fe4573d6ca060fd9f95c1d3 | [
"MIT"
]
| 1 | 2019-04-04T20:40:20.000Z | 2019-04-04T20:40:20.000Z | pythoncode/kmeansimage.py | loganpadon/PokemonOneShot | 22f9904250c8c90b4fe4573d6ca060fd9f95c1d3 | [
"MIT"
]
| null | null | null | # import the necessary packages
from sklearn.cluster import KMeans
import skimage
import matplotlib.pyplot as plt
import argparse
import cv2
def mean_image(image,clt):
image2=image
for x in range(len(image2)):
classes=clt.predict(image2[x])
for y in range(len(classes)):
image2[x,y]=clt.cluster_centers_[classes[y]]
image2=skimage.color.lab2rgb(image2)
return image2
def plot_colors(hist, centroids):
# initialize the bar chart representing the relative frequency
# of each of the colors
bar = np.zeros((50, 300, 3), dtype = "uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for (percent, color) in zip(hist, centroids):
print color
c = skimage.color.lab2rgb([[color]])
print c*255
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
c[0][0]*255, -1)
startX = endX
# return the bar chart
return bar
# import the necessary packages
import numpy as np
import cv2
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
ap.add_argument("-c", "--clusters", required = True, type = int,
help = "# of clusters")
args = vars(ap.parse_args())
# load the image and convert it from BGR to RGB so that
# we can dispaly it with matplotlib
image = cv2.imread(args["image"])
image2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = skimage.color.rgb2lab(image2)
# show our image
plt.figure()
plt.axis("off")
plt.imshow(image2)
# reshape the image to be a list of pixels
imagedata = image.reshape((image.shape[0] * image.shape[1], 3))
# cluster the pixel intensities
clt = KMeans(n_clusters = args["clusters"])
clt.fit(imagedata)
hist = centroid_histogram(clt)
bar = plot_colors(hist, clt.cluster_centers_)
# show our color bar
plt.figure()
plt.axis("off")
plt.imshow(bar)
imagek=mean_image(image,clt)
plt.figure()
plt.axis("off")
plt.imshow(imagek)
plt.show()
| 28.460674 | 78 | 0.696802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.339913 |
c716271a9b4b9b525bfcb14f8c07170e7179b37f | 134 | py | Python | tests/encode.py | EddieBreeg/C_b64 | d49b155d1ae889c2ab779f54e6215f9d5e1031e6 | [
"MIT"
]
| null | null | null | tests/encode.py | EddieBreeg/C_b64 | d49b155d1ae889c2ab779f54e6215f9d5e1031e6 | [
"MIT"
]
| null | null | null | tests/encode.py | EddieBreeg/C_b64 | d49b155d1ae889c2ab779f54e6215f9d5e1031e6 | [
"MIT"
]
| null | null | null | from sys import argv
from base64 import b64encode
with open("data", 'rb') as fIn:
b = fIn.read()
print(b64encode(b).decode()) | 22.333333 | 32 | 0.671642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.074627 |
c7162d1c243872610bbf29a5583204c35093859d | 1,691 | py | Python | src/json_sort/lib.py | cdumay/json-sort | a76fe2deaad649264e8ca0d1cc096d9741c60a04 | [
"Apache-2.0"
]
| 3 | 2017-01-03T14:36:25.000Z | 2021-03-06T05:42:08.000Z | src/json_sort/lib.py | cdumay/json-sort | a76fe2deaad649264e8ca0d1cc096d9741c60a04 | [
"Apache-2.0"
]
| null | null | null | src/json_sort/lib.py | cdumay/json-sort | a76fe2deaad649264e8ca0d1cc096d9741c60a04 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Cédric Dumay <[email protected]>
"""
import logging
import sys, os, json
from cdumay_rest_client.client import RESTClient
from cdumay_rest_client.exceptions import NotFound, HTTPException
class NoSuchFile(NotFound):
"""NoSuchFile"""
def oncritical(exc):
"""description of oncritical"""
if isinstance(exc, HTTPException):
logging.critical(exc.message)
else:
logging.critical(str(exc))
sys.exit(1)
def file_exists(filename):
"""description of file_exists"""
filename = os.path.realpath(filename)
logging.debug("Checking file: {}".format(filename))
if not os.path.exists(filename):
raise NoSuchFile(
message="No such file '{}'".format(filename),
extra=dict(filename=filename)
)
return filename
def file_write(dst, data):
"""description of file_write"""
if dst:
dst = os.path.realpath(dst)
logging.debug("Saving to: {}".format(dst))
out = open(dst, "w")
else:
logging.debug("Current std will be used")
out = sys.stdout
json.dump(
data, out, ensure_ascii=False, sort_keys=True, indent=2,
separators=(',', ': ')
)
def from_local(src, dst=None):
"""description of from_local"""
try:
file_write(dst, json.load(open(file_exists(src), "r")))
except Exception as exc:
oncritical(exc)
def from_remote(src, dst=None):
"""description of fromurl"""
try:
file_write(
dst, RESTClient(server=src).do_request(method="GET", path="")
)
except Exception as exc:
oncritical(exc)
| 23.486111 | 73 | 0.622708 | 48 | 0.028369 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.222222 |
c7165074ee0affcd71c302a41edf2c2139ea9a06 | 4,484 | py | Python | test/test_create_dataset.py | gregstarr/ttools | fc8dcbf094370e9885311126724697830167d931 | [
"MIT"
]
| null | null | null | test/test_create_dataset.py | gregstarr/ttools | fc8dcbf094370e9885311126724697830167d931 | [
"MIT"
]
| null | null | null | test/test_create_dataset.py | gregstarr/ttools | fc8dcbf094370e9885311126724697830167d931 | [
"MIT"
]
| null | null | null | import numpy as np
import pytest
import apexpy
import tempfile
import os
import h5py
from ttools import create_dataset, config, io, utils
map_periods = [np.timedelta64(10, 'm'), np.timedelta64(30, 'm'), np.timedelta64(1, 'h'), np.timedelta64(2, 'h')]
@pytest.fixture
def times():
yield np.datetime64('2010-01-01T00:00:00') + np.arange(100) * np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_assemble_args(times, map_period):
mlat = np.arange(10)
mlt = np.arange(10)
ssmlon = np.random.rand(times.shape[0])
mlt, mlat = np.meshgrid(mlt, mlat)
mlat = mlat[None, :, :] * np.ones((times.shape[0], 1, 1))
mlt = mlt[None, :, :] * np.ones((times.shape[0], 1, 1))
tec = np.random.rand(*mlat.shape)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges, bin_edges]
args = create_dataset.assemble_binning_args(mlat, mlt, tec, times, ssmlon, bins, map_period)
assert len(args) == np.ceil((times[-1] - times[0]) / map_period)
assert args[0][3][0] == times[0]
assert args[-1][3][0] + map_period >= times[-1]
assert args[-1][3][0] < times[-1]
assert args[-1][3][-1] == times[-1]
for i in range(len(args) - 1):
assert args[i][3][-1] == args[i + 1][3][0] - np.timedelta64(5, 'm')
@pytest.mark.parametrize('map_period', map_periods)
def test_process_file(madrigal_data_dir, map_period):
"""not that good of a test: wait for bugs and add asserts
"""
start_date = np.datetime64('2012-06-08')
end_date = np.datetime64('2012-06-13')
converter = apexpy.Apex()
mlat, mlon = create_dataset.get_mag_grid(config.madrigal_lat, config.madrigal_lon, converter)
bin_edges = np.arange(-.5, 10)
bins = [bin_edges + 30, bin_edges]
times, tec, ssmlon, n, std = create_dataset.process_file(start_date, end_date, mlat, mlon, converter, bins,
map_period, madrigal_data_dir)
assert times.shape[0] == tec.shape[0] == n.shape[0] == std.shape[0] == ssmlon.shape[0]
assert np.isnan(tec[times < np.datetime64('2012-06-10')]).all()
assert np.isnan(tec[times >= np.datetime64('2012-06-11')]).all()
assert np.isfinite(tec[(times >= np.datetime64('2012-06-10')) * (times < np.datetime64('2012-06-11'))]).any()
assert not np.isnan(tec).all(axis=(0, 1)).any()
assert not np.isnan(tec).all(axis=(0, 2)).any()
def test_calculate_bins():
mlat = np.arange(10)[None, :, None] * np.ones((1, 1, 10))
mlt = np.arange(10)[None, None, :] * np.ones((1, 10, 1))
tec = np.zeros((1, 10, 10))
tec[0, 0, 0] = 10
tec[0, 0, -1] = 20
tec[0, -1, 0] = 30
times = ssmlon = np.ones(1) * np.nan
be = np.array([-.5, 4.5, 9.5])
bins = [be, be]
out_t, out_tec, out_ssm, out_n, out_std = create_dataset.calculate_bins(mlat.ravel(), mlt.ravel(), tec.ravel(),
times, ssmlon, bins)
assert np.isnan(out_t)
assert np.isnan(out_ssm)
assert out_tec.shape == (2, 2)
assert out_tec[0, 0] == 10 / 25
assert out_tec[0, 1] == 20 / 25
assert out_tec[1, 0] == 30 / 25
assert out_tec[1, 1] == 0
assert np.all(out_n == 25)
def test_process_dataset():
start_date = np.datetime64("2012-03-07")
end_date = np.datetime64("2012-03-08")
file_dt = np.timedelta64(12, 'h')
mlat_bins = np.array([35, 45, 55, 65])
mlt_bins = np.array([-1.5, -.5, .5, 1.5])
def fn_pattern(date):
return f"{date.astype('datetime64[h]')}.h5"
dates = np.arange(start_date, end_date, file_dt)
with tempfile.TemporaryDirectory() as tempdir:
files = [os.path.join(tempdir, fn_pattern(d)) for d in dates]
create_dataset.process_dataset(start_date, end_date, mlat_bins, mlt_bins, apex_dt=np.timedelta64(365, 'D'),
file_dt=file_dt, output_dir=tempdir, file_name_pattern=fn_pattern)
grid_fn = os.path.join(tempdir, 'grid.h5')
assert os.path.exists(grid_fn)
with h5py.File(grid_fn, 'r') as f:
mlt_vals = f['mlt'][()]
mlat_vals = f['mlat'][()]
assert np.all(mlt_vals == [-1, 0, 1])
assert np.all(mlat_vals == [40, 50, 60])
for f, d in zip(files, dates):
assert os.path.exists(f)
tec, times, ssmlon, n, std = io.open_tec_file(f)
assert tec.shape == (12, 3, 3)
assert utils.datetime64_to_timestamp(d) == times[0]
| 40.396396 | 115 | 0.599242 | 0 | 0 | 101 | 0.022525 | 2,131 | 0.475245 | 0 | 0 | 289 | 0.064451 |
c717ca8a8d1e158509ebb8f364af201eeca89e64 | 296 | py | Python | docs_src/options/callback/tutorial001.py | madkinsz/typer | a1520dcda685220a9a796288f5eaaebd00d68845 | [
"MIT"
]
| 7,615 | 2019-12-24T13:08:20.000Z | 2022-03-31T22:07:53.000Z | docs_src/options/callback/tutorial001.py | madkinsz/typer | a1520dcda685220a9a796288f5eaaebd00d68845 | [
"MIT"
]
| 351 | 2019-12-24T22:17:54.000Z | 2022-03-31T15:35:08.000Z | docs_src/options/callback/tutorial001.py | jina-ai/typer | 8b5e14b25ddf0dd777403015883301b17bedcee0 | [
"MIT"
]
| 360 | 2019-12-24T15:29:59.000Z | 2022-03-30T20:33:10.000Z | import typer
def name_callback(value: str):
if value != "Camila":
raise typer.BadParameter("Only Camila is allowed")
return value
def main(name: str = typer.Option(..., callback=name_callback)):
typer.echo(f"Hello {name}")
if __name__ == "__main__":
typer.run(main)
| 18.5 | 64 | 0.658784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.192568 |
c719c2fbf99902f8dda33cce99ae748883db934d | 3,276 | py | Python | qft-client-py2.py | bocajspear1/qft | 7a8f3bb5d24bf173489dc4ad6159021e9365e9c4 | [
"MIT"
]
| null | null | null | qft-client-py2.py | bocajspear1/qft | 7a8f3bb5d24bf173489dc4ad6159021e9365e9c4 | [
"MIT"
]
| null | null | null | qft-client-py2.py | bocajspear1/qft | 7a8f3bb5d24bf173489dc4ad6159021e9365e9c4 | [
"MIT"
]
| null | null | null | import socket
import threading
from time import sleep
from threading import Thread
import json
import sys
def display_test(address, port,text_result, test):
if (text_result == "QFT_SUCCESS" and test == True) or (text_result != "QFT_SUCCESS" and test == False):
# Test is correct
print "PASSED: Test for " + str(address) + ":" + str(port) + " resulted in " + str(test)
else:
print "FAILED: Test for " + str(address) + ":" + str(port) + " did not result in " + str(test)
def TCPTest(address, port, test):
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_socket.settimeout(2)
my_socket.connect((address, port))
fileobj = my_socket.makefile("rw")
fileobj.write('QFT_REQUEST\n')
fileobj.flush()
result = fileobj.readline().strip()
display_test(address, port, result, test)
except socket.error as e:
#print(e)
display_test(address, port, "FAILED", test)
except socket.timeout as e:
display_test(address, port, "FAILED", test)
my_socket.close()
def UDPTest(address, port, test):
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_socket.settimeout(2)
my_socket.sendto("QFT_REQUEST".encode('utf-8'), (address, port))
# receive data from client (data, addr)
d = my_socket.recvfrom(1024)
reply = d[0]
addr = d[1]
result = d[0].decode('utf-8').strip()
display_test(address, port, result, test)
except socket.timeout as e:
display_test(address, port, "FAILED", test)
try:
timeout = 5
if len(sys.argv) > 1:
if (len(sys.argv) -1 ) % 2 != 0:
print "\nInvalid number of arguments\n\n-t Time between tests in seconds\n"
sys.exit()
else:
if sys.argv[1] == "-t" and sys.argv[2].isdigit() and int(sys.argv[2]) > 2:
timeout = int(sys.argv[2])
else:
print "\nInvalid arguments\n\n-t Time between tests in seconds\n"
sys.exit()
print "\nqft-client.py v1.s\n\n"
json_cfg = json.loads(open("client.cfg").read())
print "Config loaded. Starting tests in 1 second...\n\n"
sleep(1)
while True:
for item in json_cfg:
if item["type"] == "tcp":
t = Thread(target=TCPTest, args=( item["remote_address"], item["port"], item["test_for"]))
elif item["type"] == "udp":
t = Thread(target=UDPTest, args=( item["remote_address"], item["port"], item["test_for"]))
else:
print "Invalid Type!"
t.start()
sleep(timeout)
print "\n=======================================================\n"
except IOError as e:
print("Config file, client.cfg, not found")
sys.exit()
except ValueError as e:
print("Error in config JSON")
sys.exit()
| 30.616822 | 108 | 0.514042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.209402 |
c719cc42bfa09eeceed2d7963f0cd71faeceedf7 | 14,277 | py | Python | mdemanipulation/src/mdeoperation.py | modelia/ai-for-model-manipulation | 0b15b9d59b0f6009a5709b20db4e55b7d511ac38 | [
"BSD-3-Clause"
]
| null | null | null | mdemanipulation/src/mdeoperation.py | modelia/ai-for-model-manipulation | 0b15b9d59b0f6009a5709b20db4e55b7d511ac38 | [
"BSD-3-Clause"
]
| 1 | 2022-01-10T14:16:48.000Z | 2022-01-10T14:16:48.000Z | mdemanipulation/src/mdeoperation.py | modelia/ai-for-model-manipulation | 0b15b9d59b0f6009a5709b20db4e55b7d511ac38 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python2
import math
import os
import random
import sys
import time
import logging
import argparse
import numpy as np
from six.moves import xrange
import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch import cuda
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
import data_utils
import network
import cPickle as pickle
import datetime
def create_model(source_vocab_size, target_vocab_size, source_vocab_list, target_vocab_list, dropout_rate,
max_source_len, max_target_len):
model = network.Tree2TreeModel(
source_vocab_size,
target_vocab_size,
source_vocab_list,
target_vocab_list,
args.max_depth,
args.embedding_size,
args.hidden_size,
args.num_layers,
args.max_gradient_norm,
args.batch_size,
args.learning_rate,
dropout_rate,
args.no_pf,
args.no_attention)
if cuda.is_available():
model.cuda()
if args.load_model:
print("Reading model parameters from %s" % args.load_model)
pretrained_model = torch.load(args.load_model)
model.load_state_dict(pretrained_model)
else:
print("Created model with fresh parameters.")
model.init_weights(args.param_init)
return model
def step_tree2tree(model, encoder_inputs, init_decoder_inputs, feed_previous=False):
if feed_previous == False:
model.dropout_rate = args.dropout_rate
else:
model.dropout_rate = 0.0
predictions_per_batch, prediction_managers = model(encoder_inputs, init_decoder_inputs, feed_previous=feed_previous)
total_loss = None
for (predictions, target) in predictions_per_batch:
loss = model.loss_function(predictions, target)
if total_loss is None:
total_loss = loss
else:
total_loss += loss
total_loss /= len(encoder_inputs)
if feed_previous:
output_predictions = []
for prediction_manager in prediction_managers:
output_predictions.append(model.tree2seq(prediction_manager, 1))
if feed_previous == False:
model.optimizer.zero_grad()
total_loss.backward()
if args.max_gradient_norm > 0:
clip_grad_norm(model.parameters(), args.max_gradient_norm)
model.optimizer.step()
for idx in range(len(encoder_inputs)):
encoder_inputs[idx].clear_states()
if feed_previous:
return total_loss.data[0], output_predictions
else:
return total_loss.data[0]
def evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list):
test_loss = 0
acc_tokens = 0
tot_tokens = 0
acc_programs = 0
tot_programs = len(test_set)
res = []
for idx in xrange(0, len(test_set), args.batch_size):
encoder_inputs, decoder_inputs = model.get_batch(test_set, start_idx=idx)
eval_loss, raw_outputs = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=True)
test_loss += len(encoder_inputs) * eval_loss
for i in xrange(len(encoder_inputs)):
if idx + i >= len(test_set):
break
current_output = []
for j in xrange(len(raw_outputs[i])):
current_output.append(raw_outputs[i][j])
current_source, current_target, current_source_manager, current_target_manager = test_set[idx + i]
current_target_print = data_utils.serialize_tree_with_vocabulary(current_target, target_vocab)
current_target = data_utils.serialize_tree(current_target)
current_source_print = data_utils.serialize_tree_with_vocabulary(current_source, source_vocab)
current_source = data_utils.serialize_tree(current_source)
# print("Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime))
# print((datetime.datetime.now() - start_evaluation_datetime))
res.append((current_source, current_target, current_output))
current_output_print = data_utils.serialize_seq_with_vocabulary(current_output, target_vocab)
# print("--Current source / Current target / Current output--")
print(current_source_print)
print(current_target_print)
print(current_output_print)
# print(source_vocab)
print("---")
tot_tokens += len(current_target)
all_correct = 1
wrong_tokens = 0
for j in xrange(len(current_output)):
if j >= len(current_target):
break
if current_output[j] == current_target[j]:
acc_tokens += 1
else:
all_correct = 0
wrong_tokens += 1
acc_programs += all_correct
print(acc_tokens, tot_tokens, acc_programs, tot_programs)
test_loss /= tot_programs
print(" eval: loss %.2f" % test_loss)
print(" eval: accuracy of tokens %.2f" % (acc_tokens * 1.0 / tot_tokens))
print(" eval: accuracy of programs %.2f" % (acc_programs * 1.0 / tot_programs))
print(acc_tokens, tot_tokens, acc_programs, tot_programs)
def train(training_dataset, validation_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list, no_train):
train_model = not no_train;
time_training = 0;
# build_from_scratch = True;
# pretrained_model_path = "/home/lola/nn/neuralnetwork.pth";
if (train_model):
print ("Reading training and val data :")
train_set = data_utils.prepare_data(training_dataset, source_vocab, target_vocab)
val_set = data_utils.prepare_data(validation_dataset, source_vocab, target_vocab)
if not os.path.isdir(args.train_dir_checkpoints):
os.makedirs(args.train_dir_checkpoints)
start_time = time.time()
start_datetime = datetime.datetime.now()
# if (build_from_scratch):
print("Creating %d layers of %d units." % (args.num_layers, args.hidden_size))
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list, args.dropout_rate,
args.max_source_len, args.max_target_len)
# else:
# print("Loading pretrained model")
# pretrained_model = torch.load(pretrained_model_path)
# model.load_state_dict(pretrained_model)
print("Training model")
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
training_dataset_size = len(train_set)
for epoch in range(args.num_epochs):
print("epoch: %s/%s" % (epoch+1, args.num_epochs))
batch = 0
random.shuffle(train_set)
for batch_idx in range(0, training_dataset_size, args.batch_size):
batch += 1
start_time = time.time()
encoder_inputs, decoder_inputs = model.get_batch(train_set, start_idx=batch_idx)
step_loss = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=False)
step_time += (time.time() - start_time) / args.steps_per_checkpoint
loss += step_loss / args.steps_per_checkpoint
current_step += 1
print(" batch: %s/%s" % (batch, training_dataset_size/args.batch_size))
if current_step % args.learning_rate_decay_steps == 0 and model.learning_rate > 0.0001:
model.decay_learning_rate(args.learning_rate_decay_factor)
if current_step % args.steps_per_checkpoint == 0:
print ("learning rate %.4f step-time %.2f loss "
"%.2f" % (model.learning_rate, step_time, loss))
previous_losses.append(loss)
ckpt_path = os.path.join(args.train_dir_checkpoints, "translate_" + str(current_step) + ".ckpt")
ckpt = model.state_dict()
torch.save(ckpt, ckpt_path)
step_time, loss = 0.0, 0.0
encoder_inputs, decoder_inputs = model.get_batch(val_set, start_idx=0)
eval_loss, decoder_outputs = step_tree2tree(model, encoder_inputs, decoder_inputs, feed_previous=True)
print(" eval: loss %.2f" % eval_loss)
sys.stdout.flush()
time_training = (datetime.datetime.now() - start_datetime)
print("Saving model")
torch.save(model.state_dict(), "/home/lola/nn/neuralnetwork.pth")
else : # not train_model
print("Loading the pretrained model")
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list,
args.dropout_rate,
args.max_source_len, args.max_target_len)
print("Evaluating model")
start_evaluation_datetime = datetime.datetime.now()
test_dataset = json.load(open(args.test_dataset, 'r'))
test_set = data_utils.prepare_data(test_dataset, source_vocab, target_vocab)
evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
if (train_model):
print("Training time: %s seconds" % time_training)
print("Total Evaluation time: %s seconds" % (datetime.datetime.now() - start_evaluation_datetime))
def test(test_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list):
model = create_model(len(source_vocab), len(target_vocab), source_vocab_list, target_vocab_list, 0.0,
args.max_source_len, args.max_target_len)
test_set = data_utils.prepare_data(test_dataset, source_vocab, target_vocab)
evaluate(model, test_set, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
parser = argparse.ArgumentParser()
parser.add_argument('--param_init', type=float, default=0.1,
help='Parameters are initialized over uniform distribution in (-param_init, param_init)')
parser.add_argument('--num_epochs', type=int, default=30, help='number of training epochs') #default 30
parser.add_argument('--learning_rate', type=float, default=0.005, # default 0.005
help='learning rate')
parser.add_argument('--learning_rate_decay_factor', type=float, default=0.8,
help='learning rate decays by this much')
parser.add_argument('--learning_rate_decay_steps', type=int, default=2000, # default=2000
help='decay the learning rate after certain steps')
parser.add_argument('--max_gradient_norm', type=float, default=5.0,
help='clip gradients to this norm')
parser.add_argument('--batch_size', type=int, default=64, #default 100
help='batch size')
parser.add_argument('--max_depth', type=int, default=100,
help='max depth for tree models')
parser.add_argument('--hidden_size', type=int, default=256,
help='size of each model layer')
parser.add_argument('--embedding_size', type=int, default=256,
help='size of the embedding')
parser.add_argument('--dropout_rate', type=float, default=0.75, # default=0.5
help='dropout rate')
parser.add_argument('--num_layers', type=int, default=1, # default=1,
help='number of layers in the model')
parser.add_argument('--source_vocab_size', type=int, default=0,
help='source vocabulary size (0: no limit)')
parser.add_argument('--target_vocab_size', type=int, default=0,
help='target vocabulary size (0: no limit)')
parser.add_argument('--train_dir_checkpoints', type=str, default='/home/lola/nn/checkpoints', # default='../model_ckpts/tree2tree/',
help='training directory - checkpoints')
parser.add_argument('--training_dataset', type=str, default='/home/lola/nn/models_train.json', # default='../data/CS-JS/BL/preprocessed_progs_train.json',
help='training dataset path')
parser.add_argument('--validation_dataset', type=str, default='/home/lola/nn/models_valid.json', #default='../data/CS-JS/BL/preprocessed_progs_valid.json',
help='validation dataset path')
parser.add_argument('--test_dataset', type=str, default='/home/lola/nn/models_test.json', #default='../data/CS-JS/BL/preprocessed_progs_test.json',
help='test dataset path')
parser.add_argument('--load_model', type=str, default='/home/lola/nn/neuralnetwork.pth', # default=None
help='path to the pretrained model')
parser.add_argument('--vocab_filename', type=str, default=None,
help='filename for the vocabularies')
parser.add_argument('--steps_per_checkpoint', type=int, default=500,
help='number of training steps per checkpoint')
parser.add_argument('--max_source_len', type=int, default=115,
help='max length for input')
parser.add_argument('--max_target_len', type=int, default=315,
help='max length for output')
parser.add_argument('--test', action='store_true', help='set to true for testing')
parser.add_argument('--no_attention', action='store_true', help='set to true to disable attention')
parser.add_argument('--no_pf', action='store_true', help='set to true to disable parent attention feeding')
parser.add_argument('--no_train', help='set to true to prevent the network from training', action='store_true')
args = parser.parse_args()
def main():
if args.no_attention:
args.no_pf = True
training_dataset = json.load(open(args.training_dataset, 'r'))
source_vocab, target_vocab, source_vocab_list, target_vocab_list = data_utils.build_vocab(training_dataset, args.vocab_filename)
if args.test:
test_dataset = json.load(open(args.test_dataset, 'r'))
test(test_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list)
else:
validation_dataset = json.load(open(args.validation_dataset, 'r'))
# print("Val data %s" % validation_dataset)
train(training_dataset, validation_dataset, source_vocab, target_vocab, source_vocab_list, target_vocab_list, args.no_train)
main()
| 43.794479 | 155 | 0.665826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,982 | 0.208867 |
c71a546240f7c071174fd45a93cc36d20aa838b4 | 5,388 | py | Python | barbican/common/resources.py | stanzikratel/barbican-2 | 10fae57c1cae3e140c19069a48f562d62ca53663 | [
"Apache-2.0"
]
| null | null | null | barbican/common/resources.py | stanzikratel/barbican-2 | 10fae57c1cae3e140c19069a48f562d62ca53663 | [
"Apache-2.0"
]
| null | null | null | barbican/common/resources.py | stanzikratel/barbican-2 | 10fae57c1cae3e140c19069a48f562d62ca53663 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shared business logic.
"""
from barbican.common import exception
from barbican.common import utils
from barbican.common import validators
from barbican.model import models
LOG = utils.getLogger(__name__)
def get_or_create_tenant(keystone_id, tenant_repo):
"""Returns tenant with matching keystone_id.
Creates it if it does not exist.
:param keystone_id: The external-to-Barbican ID for this tenant.
:param tenant_repo: Tenant repository.
:return: Tenant model instance
"""
tenant = tenant_repo.find_by_keystone_id(keystone_id,
suppress_exception=True)
if not tenant:
LOG.debug('Creating tenant for {0}'.format(keystone_id))
tenant = models.Tenant()
tenant.keystone_id = keystone_id
tenant.status = models.States.ACTIVE
tenant_repo.create_from(tenant)
return tenant
def create_secret(data, tenant, crypto_manager,
secret_repo, tenant_secret_repo, datum_repo, kek_repo,
ok_to_generate=False):
"""Common business logic to create a secret."""
time_keeper = utils.TimeKeeper('Create Secret Resource')
new_secret = models.Secret(data)
time_keeper.mark('after Secret model create')
new_datum = None
content_type = data.get('payload_content_type',
'application/octet-stream')
if 'payload' in data:
payload = data.get('payload')
content_encoding = data.get('payload_content_encoding')
LOG.debug('Encrypting payload...')
new_datum = crypto_manager.encrypt(payload,
content_type,
content_encoding,
new_secret,
tenant,
kek_repo,
enforce_text_only=True)
time_keeper.mark('after encrypt')
elif ok_to_generate:
LOG.debug('Generating new secret...')
# TODO(atiwari): With new typed Order API proposal
# we need to translate new_secret to meta
# currently it is working as meta will have same attributes
new_datum = crypto_manager. \
generate_symmetric_encryption_key(new_secret,
content_type,
tenant,
kek_repo)
time_keeper.mark('after secret generate')
else:
LOG.debug('Creating metadata only for the new secret. '
'A subsequent PUT is required')
# Create Secret entities in datastore.
secret_repo.create_from(new_secret)
time_keeper.mark('after Secret datastore create')
new_assoc = models.TenantSecret()
time_keeper.mark('after TenantSecret model create')
new_assoc.tenant_id = tenant.id
new_assoc.secret_id = new_secret.id
new_assoc.role = "admin"
new_assoc.status = models.States.ACTIVE
tenant_secret_repo.create_from(new_assoc)
time_keeper.mark('after TenantSecret datastore create')
if new_datum:
new_datum.secret_id = new_secret.id
datum_repo.create_from(new_datum)
time_keeper.mark('after Datum datastore create')
time_keeper.dump()
return new_secret
def create_encrypted_datum(secret, payload,
content_type, content_encoding,
tenant, crypto_manager, datum_repo, kek_repo):
"""Modifies the secret to add the plain_text secret information.
:param secret: the secret entity to associate the secret data to
:param payload: secret data to store
:param content_type: payload content mime type
:param content_encoding: payload content encoding
:param tenant: the tenant (entity) who owns the secret
:param crypto_manager: the crypto plugin manager
:param datum_repo: the encrypted datum repository
:param kek_repo: the KEK metadata repository
:retval The response body, None if N/A
"""
if not payload:
raise exception.NoDataToProcess()
if validators.secret_too_big(payload):
raise exception.LimitExceeded()
if secret.encrypted_data:
raise ValueError('Secret already has encrypted data stored for it.')
# Encrypt payload
LOG.debug('Encrypting secret payload...')
new_datum = crypto_manager.encrypt(payload,
content_type,
content_encoding,
secret,
tenant,
kek_repo)
datum_repo.create_from(new_datum)
return new_datum
| 37.416667 | 76 | 0.625464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,188 | 0.406088 |
c71ac734d6782f901c4c5400d878122dd11ea416 | 567 | py | Python | 7/prime.py | redfast00/euler | 98fc49a1fcb8b49415cc4384952a6447378bd4f4 | [
"MIT"
]
| null | null | null | 7/prime.py | redfast00/euler | 98fc49a1fcb8b49415cc4384952a6447378bd4f4 | [
"MIT"
]
| null | null | null | 7/prime.py | redfast00/euler | 98fc49a1fcb8b49415cc4384952a6447378bd4f4 | [
"MIT"
]
| null | null | null | from math import sqrt
def stream_primes(num):
primes = []
candidate = 2
for i in range(num):
prime = next_prime(primes, candidate)
primes.append(prime)
candidate = prime + 1
yield prime
def next_prime(primes, candidate):
while True:
for prime in primes:
if candidate % prime == 0:
break
elif prime > sqrt(candidate):
return candidate
else:
return candidate
candidate += 1
for prime in stream_primes(10001):
print(prime)
| 22.68 | 45 | 0.560847 | 0 | 0 | 207 | 0.365079 | 0 | 0 | 0 | 0 | 0 | 0 |
c71be407b214b6130f22496ab986a3ca003cfe56 | 777 | py | Python | app/utils.py | HealYouDown/flo-league | c729cad1daddfb89e997c101bd2da505b7137d98 | [
"MIT"
]
| null | null | null | app/utils.py | HealYouDown/flo-league | c729cad1daddfb89e997c101bd2da505b7137d98 | [
"MIT"
]
| 3 | 2021-05-03T19:05:11.000Z | 2021-06-12T09:43:02.000Z | app/utils.py | HealYouDown/flo-league | c729cad1daddfb89e997c101bd2da505b7137d98 | [
"MIT"
]
| null | null | null | import datetime
from app.models import Log
from flask_login import current_user
from app.extensions import db
# https://stackoverflow.com/questions/6558535/find-the-date-for-the-first-monday-after-a-given-date
def next_weekday(
d: datetime.datetime = datetime.datetime.utcnow(),
weekday: int = 0,
) -> datetime.datetime:
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
# Flatten the current time to just the date
date = datetime.datetime(d.year, d.month, d.day)
return date + datetime.timedelta(days_ahead)
def add_moderator_log(log_text: str) -> None:
db.session.add(Log(
moderator_id=current_user.id,
message=log_text,
))
db.session.commit()
| 28.777778 | 99 | 0.705277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.232947 |
c71c00b730b4e3cf508cdefb7968765436ad7ce3 | 68,625 | py | Python | benchmarks/SimResults/combinations_spec_mylocality/oldstuff/cmp_soplexmcfcalculixgcc/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
]
| null | null | null | benchmarks/SimResults/combinations_spec_mylocality/oldstuff/cmp_soplexmcfcalculixgcc/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
]
| null | null | null | benchmarks/SimResults/combinations_spec_mylocality/oldstuff/cmp_soplexmcfcalculixgcc/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
]
| null | null | null | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.181181,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.344996,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.977935,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.486054,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.841669,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.482721,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.81044,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.330514,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.28395,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.184753,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0176198,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.195265,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.130309,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.380018,
'Execution Unit/Register Files/Runtime Dynamic': 0.147929,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.521478,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.08927,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.79801,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00272158,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00272158,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0023766,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000923356,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00187191,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00969166,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0258763,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.12527,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.372767,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.425473,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 0.959077,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.090727,
'L2/Runtime Dynamic': 0.0127692,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.08122,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.38167,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0920133,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0920133,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.51749,
'Load Store Unit/Runtime Dynamic': 1.92746,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.226889,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.453778,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0805237,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0817258,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.061585,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.697703,
'Memory Management Unit/Runtime Dynamic': 0.143311,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.1203,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.644561,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0326103,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.237087,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.914258,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.75489,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.11996,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.29691,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.64733,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.234954,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.378972,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.191292,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.805218,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.169475,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.2954,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.122295,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00985502,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.116195,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0728839,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.23849,
'Execution Unit/Register Files/Runtime Dynamic': 0.0827389,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.274787,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.565173,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.15542,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00133282,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00133282,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00118494,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000471861,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00104698,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00489756,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0119197,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0700652,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.45674,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.197355,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.237973,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.89155,
'Instruction Fetch Unit/Runtime Dynamic': 0.522211,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0504299,
'L2/Runtime Dynamic': 0.0069462,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.70196,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.713329,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0473909,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0473909,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.92575,
'Load Store Unit/Runtime Dynamic': 0.994436,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.116858,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.233716,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0414733,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0421754,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.277104,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0325171,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.504457,
'Memory Management Unit/Runtime Dynamic': 0.0746925,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.2571,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.321701,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0145155,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.111753,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.44797,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.20167,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0065108,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.207803,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0335685,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.102536,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.165386,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0834813,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.351403,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.112125,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.10223,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00634181,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0043008,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0336025,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0318071,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0399443,
'Execution Unit/Register Files/Runtime Dynamic': 0.0361079,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0724192,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.179703,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.18039,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00112696,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00112696,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000995662,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000393137,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000456911,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0037065,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0103022,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0305769,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.94496,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0958958,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.103853,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.25787,
'Instruction Fetch Unit/Runtime Dynamic': 0.244335,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0538499,
'L2/Runtime Dynamic': 0.0148173,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.02873,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.40237,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0256105,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0256104,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.14967,
'Load Store Unit/Runtime Dynamic': 0.554282,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.063151,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.126302,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0224125,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0232096,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.12093,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0157552,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.31554,
'Memory Management Unit/Runtime Dynamic': 0.0389648,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.4686,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0166828,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00482915,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0520126,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0735245,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.10632,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00682822,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.208052,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0364806,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.106185,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.171272,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0864526,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.36391,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.115853,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.11398,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00689197,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00445387,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0347798,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0329391,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0416718,
'Execution Unit/Register Files/Runtime Dynamic': 0.037393,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0749788,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.202833,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.21756,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000625326,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000625326,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000550159,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000215984,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000473173,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00227399,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00579905,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0316652,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.01418,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0689457,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.107549,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.33045,
'Instruction Fetch Unit/Runtime Dynamic': 0.216233,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0418086,
'L2/Runtime Dynamic': 0.00989266,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.36015,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.554162,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0363327,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0363327,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.53172,
'Load Store Unit/Runtime Dynamic': 0.769675,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0895903,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.17918,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0317959,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0324228,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.125234,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0113054,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.335963,
'Memory Management Unit/Runtime Dynamic': 0.0437282,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.9434,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0181291,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0050114,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0551057,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0782462,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.33534,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.868411224021876,
'Runtime Dynamic': 3.868411224021876,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.371973,
'Runtime Dynamic': 0.183113,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 75.1614,
'Peak Power': 108.274,
'Runtime Dynamic': 16.5813,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 74.7894,
'Total Cores/Runtime Dynamic': 16.3982,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.371973,
'Total L3s/Runtime Dynamic': 0.183113,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 75.082057 | 124 | 0.682157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46,943 | 0.684051 |
c71c6e80583baf2cb3846a4c3d378463d41f4b27 | 9,582 | py | Python | packages/gtmcore/gtmcore/environment/conda.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
]
| 60 | 2018-09-26T15:46:00.000Z | 2021-10-10T02:37:14.000Z | packages/gtmcore/gtmcore/environment/conda.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
]
| 1,706 | 2018-09-26T16:11:22.000Z | 2021-08-20T13:37:59.000Z | packages/gtmcore/gtmcore/environment/conda.py | griffinmilsap/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
]
| 11 | 2019-03-14T13:23:51.000Z | 2022-01-25T01:29:16.000Z | from typing import List, Dict
import json
from gtmcore.http import ConcurrentRequestManager, ConcurrentRequest
from gtmcore.environment.packagemanager import PackageManager, PackageResult, PackageMetadata
from gtmcore.container import container_for_context
from gtmcore.labbook import LabBook
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class CondaPackageManagerBase(PackageManager):
"""Class to implement the conda package manager
"""
def __init__(self):
# String to be set in child classes indicating which python version you are checking. Typically should be either
# python 3.6* or python 2.7*
self.python_depends_str = None
# String of the name of the conda environment (e.g. py36 or py27, as created via container build)
self.python_env = None
# Note, currently we hard code channel config. Future changes to support the user specifying channels
# will modify this behavior
self.channel_priority = ['conda-forge', 'anaconda']
self.request_mgr = ConcurrentRequestManager()
def list_versions(self, package_name: str, labbook: LabBook, username: str) -> List[str]:
"""Method to list all available versions of a package based on the package name
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
list(str): Version strings
"""
# Check for package in channels, picking out version by priority
request_list = list()
for channel in self.channel_priority:
request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{package_name}",
headers={'Accept': 'application/json'}))
responses = self.request_mgr.resolve_many(request_list)
versions = None
for response in responses:
if response.status_code != 200:
continue
versions = response.json.get('versions')
break
if not versions:
raise ValueError(f"Package {package_name} not found in channels {' ,'.join(self.channel_priority)}.")
versions.reverse()
return versions
def list_installed_packages(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all packages that are currently installed
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format (name: <package name>, version: <version string>)
Returns:
list
"""
project_container = container_for_context(username, labbook=labbook)
result = project_container.run_container("conda list --no-pip --json", wait_for_output=True)
if result:
data = json.loads(result)
if data:
return [{"name": x['name'], 'version': x['version']} for x in data]
else:
return []
def validate_packages(self, package_list: List[Dict[str, str]], labbook: LabBook, username: str) \
-> List[PackageResult]:
"""Method to validate a list of packages, and if needed fill in any missing versions
Should check both the provided package name and version. If the version is omitted, it should be generated
from the latest version.
Args:
package_list(list): A list of dictionaries of packages to validate
labbook(str): The labbook instance
username(str): The username for the logged in user
Returns:
namedtuple: namedtuple indicating if the package and version are valid
"""
result = list()
# Check for package in channels, picking out version by priority
request_list = list()
for pkg in package_list:
for channel in self.channel_priority:
request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{pkg['package']}",
headers={'Accept': 'application/json'}))
responses = self.request_mgr.resolve_many(request_list)
# Repack into groups by package
responses_per_package = list(zip(*(iter(responses),) * len(self.channel_priority)))
for package, responses in zip(package_list, responses_per_package):
versions = None
latest_version = None
for response in responses:
if response.status_code != 200:
continue
versions = response.json.get('versions')
latest_version = response.json.get('latest_version')
break
if not versions:
# Package is not found
result.append(PackageResult(package=package['package'], version=package.get('version'), error=True))
continue
if package.get('version'):
# Package has been set, so validate it
if package.get('version') in versions:
# Both package name and version are valid
result.append(PackageResult(package=package['package'], version=package.get('version'),
error=False))
else:
# The package version is not in the list, so invalid
result.append(PackageResult(package=package['package'], version=package.get('version'), error=True))
else:
# You need to look up the latest version since not included
result.append(PackageResult(package=package['package'], version=str(latest_version),
error=False))
return result
def get_packages_metadata(self, package_list: List[str], labbook: LabBook, username: str) -> List[PackageMetadata]:
"""Method to get package metadata
Args:
package_list: List of package names
labbook(str): The labbook instance
username(str): The username for the logged in user
Returns:
list
"""
def _extract_metadata(data):
"""Extraction method to pull out the docs URL and description"""
latest_val = data.get('latest_version')
description_val = data.get('summary').strip()
docs_val = data.get('doc_url')
if not docs_val:
docs_val = data.get('html_url')
return latest_val, description_val, docs_val
# Check for package in channels, picking out version by priority
request_list = list()
for pkg in package_list:
for channel in self.channel_priority:
request_list.append(ConcurrentRequest(f"https://api.anaconda.org/package/{channel}/{pkg}",
headers={'Accept': 'application/json'},
extraction_function=_extract_metadata))
responses = self.request_mgr.resolve_many(request_list)
# Repack into groups by package
responses_per_package = list(zip(*(iter(responses),) * len(self.channel_priority)))
result = list()
for package, responses in zip(package_list, responses_per_package):
data = None
for response in responses:
if response.status_code == 200:
data = response.extracted_json
break
if data:
latest_version, description, docs_url = data
result.append(PackageMetadata(package_manager="conda", package=package, latest_version=latest_version,
description=description, docs_url=docs_url))
else:
result.append(PackageMetadata(package_manager="conda", package=package, latest_version=None,
description=None, docs_url=None))
return result
def generate_docker_install_snippet(self, packages: List[Dict[str, str]], single_line: bool = False) -> List[str]:
"""Method to generate a docker snippet to install 1 or more packages
Note: Because conda be so slow to solve environments with conda-forge included, always single line it.
Args:
packages(list(dict)): A list of package names and versions to install
single_line(bool): If true, collapse
Returns:
list
"""
package_strings = [f"{x['name']}={x['version']}" for x in packages]
if single_line:
return [f"RUN conda install -yq {' '.join(package_strings)}"]
else:
return [f"RUN conda install -yq {' '.join(package_strings)}"]
class Conda3PackageManager(CondaPackageManagerBase):
"""Class to implement the conda3 package manager
"""
def __init__(self):
super().__init__()
self.python_depends_str = 'python 3.6*'
self.python_env = 'py36'
class Conda2PackageManager(CondaPackageManagerBase):
"""Class to implement the conda2 package manager
"""
def __init__(self):
super().__init__()
self.python_depends_str = 'python 2.7*'
self.python_env = 'py27'
| 40.774468 | 120 | 0.611668 | 9,210 | 0.961177 | 0 | 0 | 0 | 0 | 0 | 0 | 3,749 | 0.391254 |
c71da90915f08f68f935060eea6dba44dc3beaac | 1,147 | py | Python | netchos/io/io_mpl_to_px.py | brainets/netchos | ccfcd2ec85894adffbd20fbc67410dbdacfe6812 | [
"BSD-3-Clause"
]
| 11 | 2021-04-20T19:45:23.000Z | 2021-11-17T15:18:33.000Z | netchos/io/io_mpl_to_px.py | brainets/netchos | ccfcd2ec85894adffbd20fbc67410dbdacfe6812 | [
"BSD-3-Clause"
]
| 3 | 2021-04-26T09:01:42.000Z | 2021-06-30T12:09:15.000Z | netchos/io/io_mpl_to_px.py | brainets/netchos | ccfcd2ec85894adffbd20fbc67410dbdacfe6812 | [
"BSD-3-Clause"
]
| 2 | 2021-05-06T20:28:46.000Z | 2021-05-24T10:36:44.000Z | """Conversion of Matplotlib / Seaborn inputs to plotly."""
import os.path as op
from pkg_resources import resource_filename
import json
def mpl_to_px_inputs(inputs, plt_types=None):
"""Convert typical matplotlib inputs to plotly to simplify API.
Parameters
----------
inputs : dict
Dictionary of inputs
plt_types : string or list or None
Sub select some plotting types (e.g heatmap, line etc.). If None, all
types are used
Returns
-------
outputs : dict
Dictionary of converted inputs
"""
# load reference table
file = op.join(op.dirname(__file__), "io_mpl_to_px.json")
with open(file, 'r') as f:
table = json.load(f)
# go through the desired plotting types for conversion
if plt_types is None:
plt_types = list(table.keys())
if isinstance(plt_types, str):
plt_types = [plt_types]
ref = {}
for plt_type in plt_types:
ref.update(table[plt_type])
# convert inputs
outputs = {}
for k, v in inputs.items():
if k in ref.keys():
k = ref[k]
outputs[k] = v
return outputs
| 25.488889 | 77 | 0.62075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.474281 |
c71dc157e40f86937d395921d62896697e8b4c70 | 186 | py | Python | fizzbuzz_for_02.py | toastyxen/FizzBuzz | 094270e3882e743a80c5d32b3903c2483d37755f | [
"MIT"
]
| null | null | null | fizzbuzz_for_02.py | toastyxen/FizzBuzz | 094270e3882e743a80c5d32b3903c2483d37755f | [
"MIT"
]
| null | null | null | fizzbuzz_for_02.py | toastyxen/FizzBuzz | 094270e3882e743a80c5d32b3903c2483d37755f | [
"MIT"
]
| null | null | null | """Fizzbuzz for loop variant 3"""
for x in range(1, 101):
OUTPUT = ""
if x % 3 == 0:
OUTPUT += "Fizz"
if x % 5 == 0:
OUTPUT += "Buzz"
print(OUTPUT or x)
| 18.6 | 33 | 0.473118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.252688 |
c71ef3a9007aa0aebc08a606ded35bff47c69406 | 242 | py | Python | cnn/struct/layer/parse_tensor_module.py | hslee1539/GIS_GANs | 6901c830b924e59fd06247247db3f925bab26583 | [
"MIT"
]
| null | null | null | cnn/struct/layer/parse_tensor_module.py | hslee1539/GIS_GANs | 6901c830b924e59fd06247247db3f925bab26583 | [
"MIT"
]
| null | null | null | cnn/struct/layer/parse_tensor_module.py | hslee1539/GIS_GANs | 6901c830b924e59fd06247247db3f925bab26583 | [
"MIT"
]
| null | null | null | from tensor.main_module import Tensor
import numpy as np
def getTensor(value):
if type(value) is np.ndarray:
return Tensor.numpy2Tensor(value)
elif type(value) is Tensor:
return value
else:
raise Exception | 24.2 | 41 | 0.68595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c71f19c3cf33a6be263067d8b8a273844fc916bd | 3,337 | py | Python | openstack_dashboard/dashboards/admin/volume_types/qos_specs/forms.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
]
| null | null | null | openstack_dashboard/dashboards/admin/volume_types/qos_specs/forms.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
]
| null | null | null | openstack_dashboard/dashboards/admin/volume_types/qos_specs/forms.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
]
| null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
KEY_NAME_REGEX = re.compile(r"^[a-zA-Z0-9-_:. /]+$", re.UNICODE)
KEY_ERROR_MESSAGES = {
'invalid': _("The key must match the following the regex: "
"'^[a-zA-Z0-9-_:. /]'")}
class CreateKeyValuePair(forms.SelfHandlingForm):
# this if for creating a spec key-value pair for an existing QOS Spec
key = forms.RegexField(max_length=255, label=_("Key"),
regex=KEY_NAME_REGEX,
error_messages=KEY_ERROR_MESSAGES)
value = forms.CharField(max_length=255, label=_("Value"))
def handle(self, request, data):
qos_spec_id = self.initial['qos_spec_id']
try:
# first retrieve current value of specs
specs = api.cinder.qos_spec_get(request, qos_spec_id)
# now add new key-value pair to list of specs
specs.specs[data['key']] = data['value']
api.cinder.qos_spec_set_keys(request,
qos_spec_id,
specs.specs)
msg = _('Created spec "%s".') % data['key']
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_("Unable to create spec."),
redirect=redirect)
class EditKeyValuePair(forms.SelfHandlingForm):
value = forms.CharField(max_length=255, label=_("Value"))
# update the backend with the new qos spec value
def handle(self, request, data):
key = self.initial['key']
qos_spec_id = self.initial['qos_spec_id']
# build up new 'specs' object with all previous values plus new value
try:
# first retrieve current value of specs
specs = api.cinder.qos_spec_get_keys(request,
qos_spec_id,
raw=True)
specs.specs[key] = data['value']
api.cinder.qos_spec_set_keys(request,
qos_spec_id,
specs.specs)
msg = _('Saved spec "%s".') % key
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_("Unable to edit spec."),
redirect=redirect)
| 39.72619 | 77 | 0.585556 | 2,367 | 0.70932 | 0 | 0 | 0 | 0 | 0 | 0 | 1,169 | 0.350315 |
c71fc189fa6f73122afbe242bbfd89bd9a8a50ea | 9,050 | py | Python | data_structure/const_tree.py | alipay/StructuredLM_RTDT | 6edf2acf8747e17015523d78b6c580431a4f7b5c | [
"Apache-2.0"
]
| 42 | 2021-06-01T07:07:12.000Z | 2022-03-18T02:38:53.000Z | data_structure/const_tree.py | alipay/StructuredLM_RTDT | 6edf2acf8747e17015523d78b6c580431a4f7b5c | [
"Apache-2.0"
]
| 1 | 2021-12-15T03:50:24.000Z | 2021-12-15T08:46:56.000Z | data_structure/const_tree.py | alipay/StructuredLM_RTDT | 6edf2acf8747e17015523d78b6c580431a4f7b5c | [
"Apache-2.0"
]
| 7 | 2021-06-02T02:28:01.000Z | 2022-01-14T06:59:29.000Z | # coding=utf-8
# Copyright (c) 2021 Ant Group
import sys
LABEL_SEP = '@'
INDENT_STRING1 = '│ '
INDENT_STRING2 = '├──'
EMPTY_TOKEN = '___EMPTY___'
def print_tree(const_tree, indent=0, out=sys.stdout):
for i in range(indent - 1):
out.write(INDENT_STRING1)
if indent > 0:
out.write(INDENT_STRING2)
out.write(const_tree.tag)
if not isinstance(const_tree.children[0], ConstTree):
out.write(f' {const_tree.children[0].string}\n')
else:
out.write('\n')
for child in const_tree.children:
print_tree(child, indent + 1, out)
def _make_tree(string, make_leaf_fn, make_internal_fn):
tokens = string.replace('(', ' ( ').replace(')', ' ) ').split()
index, stack = 0, []
lexicons = []
root = None
while index < len(tokens):
token = tokens[index]
index += 1
if token == ')':
if not stack:
raise ConstTreeParserError('redundant ")" at token ' + str(index))
node = stack.pop()
if not stack:
root = node
else:
stack[-1].children.append(node)
elif token == '(':
tag = tokens[index]
index += 1
stack.append(make_internal_fn(tag))
else:
if not stack:
raise ConnectionError('??? at pos ' + str(index))
new_token = []
while token != ')':
if not token != '(':
raise Exception('bracket error')
new_token.append(token)
token = tokens[index]
index += 1
# is lexicon
leaf_node = make_leaf_fn('_'.join(new_token))
lexicons.append(leaf_node)
postag_node = stack.pop()
postag_node.children.append(leaf_node)
if not stack:
root = postag_node
else:
stack[-1].children.append(postag_node)
if not root or stack:
raise ConstTreeParserError('missing ")".')
return root, lexicons
class ConstTreeParserError(Exception):
pass
class Lexicon:
__slots__ = ('string', 'span', 'parent')
def __init__(self, string, span=None):
self.string = string
self.span = span
def __str__(self):
return f'<Lexicon {self.string}>'
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.string == other.string
def __hash__(self):
return hash(self.string) + 2
@property
def tag(self):
return self.string
def to_string(self, quote_lexicon):
if quote_lexicon:
return f'"{self.string}"'
return self.string
class ConstTree:
__slots__ = ('children', 'tag', 'span', 'index', 'parent', 'attrs')
ROOT_LABEL = 'ROOT'
def __init__(self, tag, children=None, span=None):
self.tag = tag
self.children = children if children is not None else []
self.span = span
self.index = None
def __str__(self):
child_string = ' + '.join(child.tag for child in self.children)
return f'{self.span} {self.tag} => {child_string}'
def __repr__(self):
return str(self)
def __getitem__(self, index):
if isinstance(index, int):
return self.children[index]
if isinstance(index, str):
for child in self.children:
if isinstance(child, ConstTree) and child.tag == index.upper():
return child
raise KeyError
def to_string(self, quote_lexicon=False):
child_string = ' '.join(child.to_string(quote_lexicon) for child in self.children)
return f'({self.tag} {child_string})'
@staticmethod
def from_string(string):
""" Construct ConstTree from parenthesis representation.
:param string: string of parenthesis representation
:return: ConstTree root and all leaf Lexicons
"""
tree, lexicons = _make_tree(string, Lexicon, ConstTree)
for index, lexicon in enumerate(lexicons):
lexicon.span = index, index + 1
tree.populate_spans_internal()
return tree, lexicons
def traverse_postorder(self):
for child in self.children:
if isinstance(child, ConstTree):
yield from child.traverse_postorder()
yield self
def traverse_postorder_with_lexicons(self):
for child in self.children:
if isinstance(child, ConstTree):
yield from child.traverse_postorder_with_lexicons()
else:
yield child
yield self
def generate_preterminals(self):
for child in self.children:
if isinstance(child, ConstTree):
yield from child.generate_preterminals()
for child in self.children:
if isinstance(child, Lexicon):
yield self
def generate_lexicons(self):
for child in self.children:
if isinstance(child, ConstTree):
yield from child.generate_lexicons()
for child in self.children:
if isinstance(child, Lexicon):
yield child
def is_binary_tree(self):
if isinstance(self.children[0], Lexicon):
return True
return len(self.children <= 2) and all(child.is_binary_tree() for child in self.children)
def condensed_unary_chain(self, include_preterminal=True, remove_root=None):
if self.tag == remove_root:
assert len(self.children) == 1
return self.children[0].condensed_unary_chain(include_preterminal=include_preterminal)
if len(self.children) > 1:
return ConstTree(self.tag,
children=list(child.condensed_unary_chain()
for child in self.children),
span=self.span)
if isinstance(self.children[0], Lexicon):
return ConstTree((self.tag if include_preterminal else EMPTY_TOKEN),
children=list(self.children),
span=self.span)
assert isinstance(self.children[0], ConstTree)
node = self
new_tag = self.tag
while len(node.children) == 1 and isinstance(node.children[0], ConstTree):
node = node.children[0]
if include_preterminal or isinstance(node.children[0], ConstTree):
new_tag += LABEL_SEP + node.tag
if len(node.children) == 1:
children = list(node.children)
else:
children = list(child.condensed_unary_chain() for child in node.children)
return ConstTree(new_tag, children=children, span=self.span)
def expanded_unary_chain(self, add_root=None):
if isinstance(self.children[0], Lexicon):
children = list(self.children)
else:
children = list(child.expanded_unary_chain() for child in self.children)
tags = self.tag.split(LABEL_SEP)
for tag in reversed(tags):
children = [ConstTree(tag, children=children, span=self.span)]
root = children[0]
if add_root:
root = ConstTree(add_root, children=[root])
return root
def calculate_span(self):
self.span = self.children[0].span[0], self.children[-1].span[1]
def populate_spans_internal(self):
for child in self.children:
if isinstance(child, ConstTree):
child.populate_spans_internal()
self.calculate_span()
def add_postorder_index(self):
for index, node in enumerate(self.traverse_postorder()):
node.index = index
def add_parents(self, parent=None):
self.parent = parent
for child in self.children:
if isinstance(child, ConstTree):
child.add_parents(self)
def is_ancestor_of(self, other):
other = other.parent
while other is not None and other is not self:
other = other.parent
return other is not None
def generate_path_to_root(self, include_self=False):
node = self
if not include_self:
node = self.parent
while node is not None:
yield node
node = node.parent
def lowest_common_ancestor(self, other):
path = list(other.generate_path_to_root())
for node in self.generate_path_to_root():
try:
return path[path.index(node)]
except ValueError:
pass
def remove_nodes(self, filter):
_children = []
for c in self.children:
if isinstance(c, ConstTree):
if filter(c):
pass
else:
filtered_node = c.remove_nodes(filter)
_children.append(filtered_node)
else:
_children.append(c)
return ConstTree(self.tag, _children)
| 30.782313 | 98 | 0.575912 | 6,953 | 0.767439 | 1,207 | 0.133223 | 517 | 0.057064 | 0 | 0 | 611 | 0.067439 |
c71fcfdd300a9f0f56bf5188a7e7a694d05f3faa | 4,098 | py | Python | tests/test_minimize.py | The-Ludwig/iminuit | 8eef7b711846d6c8db9fe1fc883f6fa0977eb514 | [
"MIT"
]
| null | null | null | tests/test_minimize.py | The-Ludwig/iminuit | 8eef7b711846d6c8db9fe1fc883f6fa0977eb514 | [
"MIT"
]
| null | null | null | tests/test_minimize.py | The-Ludwig/iminuit | 8eef7b711846d6c8db9fe1fc883f6fa0977eb514 | [
"MIT"
]
| null | null | null | import pytest
from iminuit import minimize
import numpy as np
from numpy.testing import assert_allclose, assert_equal
opt = pytest.importorskip("scipy.optimize")
def func(x, *args):
c = args[0] if args else 1
return c + x[0] ** 2 + (x[1] - 1) ** 2 + (x[2] - 2) ** 2
def grad(x, *args):
return 2 * (x - (0, 1, 2))
def test_simple():
result = minimize(func, (1, 1, 1))
assert_allclose(result.x, (0, 1, 2), atol=1e-8)
assert_allclose(result.fun, 1)
assert result.nfev > 0
assert result.njev == 0
def test_gradient():
result = minimize(func, (1, 1, 1), jac=grad)
assert_allclose(result.x, (0, 1, 2), atol=1e-8)
assert_allclose(result.fun, 1)
assert result.nfev > 0
assert result.njev > 0
def test_args():
result = minimize(func, np.ones(3), args=(5,))
assert_allclose(result.x, (0, 1, 2), atol=1e-8)
assert_allclose(result.fun, 5)
assert result.nfev > 0
assert result.njev == 0
def test_callback():
trace = []
result = minimize(func, np.ones(3), callback=lambda x: trace.append(x.copy()))
assert_allclose(result.x, (0, 1, 2), atol=1e-8)
assert_allclose(result.fun, 1)
assert result.nfev == len(trace)
assert_allclose(trace[0], np.ones(3), atol=1e-2)
assert_allclose(trace[-1], result.x, atol=1e-2)
def test_tol():
ref = np.ones(2)
def rosen(par):
x, y = par
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
r1 = minimize(rosen, (0, 0), tol=1)
r2 = minimize(rosen, (0, 0), tol=1e-6)
assert max(np.abs(r2.x - ref)) < max(np.abs(r1.x - ref))
def test_disp(capsys):
minimize(lambda x: x ** 2, 0)
assert capsys.readouterr()[0] == ""
minimize(lambda x: x ** 2, 0, options={"disp": True})
assert capsys.readouterr()[0] != ""
def test_hessinv():
r = minimize(func, (1, 1, 1))
href = np.zeros((3, 3))
for i in range(3):
href[i, i] = 0.5
assert_allclose(r.hess_inv, href, atol=1e-8)
def test_unsupported():
with pytest.raises(ValueError):
minimize(func, (1, 1, 1), constraints=[])
with pytest.raises(ValueError):
minimize(func, (1, 1, 1), jac=True)
def test_call_limit():
ref = minimize(func, (1, 1, 1))
with pytest.warns(UserWarning):
r1 = minimize(func, (1, 1, 1), options={"maxiter": 1})
assert r1.nfev < ref.nfev
assert not r1.success
assert "Call limit" in r1.message
with pytest.warns(DeprecationWarning):
r2 = minimize(func, (1, 1, 1), options={"maxfev": 1})
assert not r2.success
assert r2.nfev == r1.nfev
r3 = minimize(func, (1, 1, 1), options={"maxfun": 1})
assert not r3.success
assert r3.nfev == r1.nfev
def test_eps():
ref = minimize(func, (1, 1, 1))
r = minimize(func, (1, 1, 1), options={"eps": 1e-10})
assert np.any(ref.x != r.x)
assert_allclose(r.x, ref.x, atol=1e-9)
def test_bad_function():
class Fcn:
n = 0
def __call__(self, x):
self.n += 1
return x ** 2 + 1e-4 * (self.n % 3)
r = minimize(Fcn(), [1], options={"maxfun": 100000000})
assert not r.success
assert "Estimated distance to minimum too large" in r.message
def test_bounds():
r1 = minimize(func, (1.5, 1.7, 1.5), bounds=opt.Bounds((1, 1.5, 1), (2, 2, 2)))
assert r1.success
assert_allclose(r1.x, (1, 1.5, 2), atol=1e-2)
r2 = minimize(func, (1.5, 1.7, 1.5), bounds=((1, 2), (1.5, 2), (1, 2)))
assert r2.success
assert_equal(r1.x, r2.x)
def test_method_warn():
with pytest.raises(ValueError):
minimize(func, (1.5, 1.7, 1.5), method="foo")
def test_hess_warn():
with pytest.warns(UserWarning):
minimize(func, (1.5, 1.7, 1.5), hess=True)
def test_unreliable_uncertainties():
r = minimize(func, (1.5, 1.7, 1.5), options={"stra": 0})
assert (
r.message
== "Optimization terminated successfully, but uncertainties are unrealiable."
)
def test_simplex():
r = minimize(func, (1.5, 1.7, 1.5), method="simplex", tol=1e-4)
assert r.success
assert_allclose(r.x, (0, 1, 2), atol=2e-3)
| 26.269231 | 85 | 0.59346 | 128 | 0.031235 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.051489 |
c72190831a83ec1b623a951d123f7148309fad86 | 2,468 | py | Python | murtanto/parsing.py | amandatv20/botfb | 2be3ce0265fd86f48f24d2b496d36fd346e49d29 | [
"MIT"
]
| 1 | 2021-03-24T13:54:33.000Z | 2021-03-24T13:54:33.000Z | murtanto/parsing.py | amandatv20/botfb | 2be3ce0265fd86f48f24d2b496d36fd346e49d29 | [
"MIT"
]
| 2 | 2020-06-15T08:10:55.000Z | 2020-06-16T15:03:19.000Z | murtanto/parsing.py | amandatv20/botfb | 2be3ce0265fd86f48f24d2b496d36fd346e49d29 | [
"MIT"
]
| null | null | null | # coded by: salism3
# 23 - 05 - 2020 23:18 (Malam Takbir)
from bs4 import BeautifulSoup as parser
from . import sorting
import re
def to_bs4(html):
return parser(html, "html.parser")
def refsrc(html):
return True if re.search(r'http.+\Wrefsrc', html) else False
def parsing_href(html, href, one = False, bs4_class = False):
data = to_bs4(html)
if one:
data = data.find("a", href = lambda x: x and href in x)
if not bs4_class and data != None:
data = sorting.to_mbasic(data["href"])
else:
data = data.find_all("a", href = lambda x: x and href in x)
if not bs4_class:
data = [sorting.to_mbasic(x["href"]) for x in data]
return data
def parsing_href_regex(html, pattern, one = False, bs4_class = False):
data = to_bs4(html)
if one:
data = data.find("a", href = lambda x: x and re.search(pattern, x))
if not bs4_class and data != None:
data = sorting.to_mbasic(data["href"])
else:
data = data.find_all("a", href = lambda x: x and re.search(pattern, x))
if not bs4_class:
data = [sorting.to_mbasic(x["href"]) for x in data]
return data
def getMyName(html):
data = to_bs4(html).find("title").text
return data
def getName(html):
data = to_bs4(html).find("title").text
return data
def getMyId(html):
data = to_bs4(html).find("a", href = lambda x:"/allactivity" in x)["href"]
data = re.search(r"/\d+/?", data).group().replace("/", "")
return data
def getHiddenInput(html, post_action):
rv = {}
data = to_bs4(html).find("form", action = lambda x: post_action in x)
data = data.find_all("input", {"type":"hidden", "name":True, "value":True})
for x in data:
rv[x["name"]] = x["value"]
return rv
def friendRequestParser(html):
confirm = parsing_href(html, "?confirm=")
reject = parsing_href(html, "?delete=")
rv = list(zip(confirm, reject))
next = parsing_href(html, "?ppk=", one = True)
return {"items":rv, "next":next}
def listFriendParser(html):
data = parsing_href(html, "fref=fr_tab", bs4_class = True)
nama = [x.text for x in data]
id_ = [re.search(r"\w[\w.]+", x["href"].replace("/", "").replace("profile.php?id=", "")).group() for x in data]
img = [x["src"] for x in to_bs4(html).find_all("img", alt = lambda x: x and "profile picture" in x)]
if "/allactivity?" in html:
del img[0]
next = parsing_href(html, "unit_cursor=", one = True)
return {"items":list(zip(nama, id_, img)), "next":next, "html":html} | 31.641026 | 113 | 0.636548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.161264 |
c721ab40af9f4d2f1e869b104c622361e1311025 | 878 | py | Python | test/test_watchdog_status.py | ike709/tgs4-api-pyclient | 97918cfe614cc4ef06ef2485efff163417a8cd44 | [
"MIT"
]
| null | null | null | test/test_watchdog_status.py | ike709/tgs4-api-pyclient | 97918cfe614cc4ef06ef2485efff163417a8cd44 | [
"MIT"
]
| null | null | null | test/test_watchdog_status.py | ike709/tgs4-api-pyclient | 97918cfe614cc4ef06ef2485efff163417a8cd44 | [
"MIT"
]
| null | null | null | # coding: utf-8
"""
TGS API
A production scale tool for BYOND server management # noqa: E501
OpenAPI spec version: 9.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.watchdog_status import WatchdogStatus # noqa: E501
from swagger_client.rest import ApiException
class TestWatchdogStatus(unittest.TestCase):
"""WatchdogStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWatchdogStatus(self):
"""Test WatchdogStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.watchdog_status.WatchdogStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.95 | 86 | 0.702733 | 407 | 0.463554 | 0 | 0 | 0 | 0 | 0 | 0 | 444 | 0.505695 |
c721d7a43c6300b41e4a0357169d5ebc646135d1 | 235 | py | Python | setup.py | joesan/housing-classification-example | 93f921cf01c79ab63732ef302ab52d2c9ffedee1 | [
"FTL"
]
| null | null | null | setup.py | joesan/housing-classification-example | 93f921cf01c79ab63732ef302ab52d2c9ffedee1 | [
"FTL"
]
| null | null | null | setup.py | joesan/housing-classification-example | 93f921cf01c79ab63732ef302ab52d2c9ffedee1 | [
"FTL"
]
| null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Python codebase for the housing classification ML problem',
author='Joesan',
license='',
)
| 21.363636 | 76 | 0.685106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.344681 |
c7226ff1219f925df17003fe42d233729469035d | 4,187 | py | Python | tests/test_models/test_backbones/test_sr_backbones/test_edvr_net.py | wangruohui/mmediting | 6577d307caf9edfb34c6e46547994e6314fffc37 | [
"Apache-2.0"
]
| 45 | 2022-03-05T06:54:34.000Z | 2022-03-30T02:15:42.000Z | tests/test_models/test_backbones/test_sr_backbones/test_edvr_net.py | wangruohui/mmediting | 6577d307caf9edfb34c6e46547994e6314fffc37 | [
"Apache-2.0"
]
| 1 | 2022-03-25T14:04:39.000Z | 2022-03-31T04:48:38.000Z | tests/test_models/test_backbones/test_sr_backbones/test_edvr_net.py | wangruohui/mmediting | 6577d307caf9edfb34c6e46547994e6314fffc37 | [
"Apache-2.0"
]
| 1 | 2022-03-10T01:00:24.000Z | 2022-03-10T01:00:24.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.edvr_net import (EDVRNet,
PCDAlignment,
TSAFusion)
def test_pcd_alignment():
"""Test PCDAlignment."""
# cpu
pcd_alignment = PCDAlignment(mid_channels=4, deform_groups=2)
input_list = []
for i in range(3, 0, -1):
input_list.append(torch.rand(1, 4, 2**i, 2**i))
pcd_alignment = pcd_alignment
input_list = [v for v in input_list]
output = pcd_alignment(input_list, input_list)
assert output.shape == (1, 4, 8, 8)
with pytest.raises(AssertionError):
pcd_alignment(input_list[0:2], input_list)
# gpu
if torch.cuda.is_available():
pcd_alignment = PCDAlignment(mid_channels=4, deform_groups=2)
input_list = []
for i in range(3, 0, -1):
input_list.append(torch.rand(1, 4, 2**i, 2**i))
pcd_alignment = pcd_alignment.cuda()
input_list = [v.cuda() for v in input_list]
output = pcd_alignment(input_list, input_list)
assert output.shape == (1, 4, 8, 8)
with pytest.raises(AssertionError):
pcd_alignment(input_list[0:2], input_list)
def test_tsa_fusion():
"""Test TSAFusion."""
# cpu
tsa_fusion = TSAFusion(mid_channels=4, num_frames=5, center_frame_idx=2)
input_tensor = torch.rand(1, 5, 4, 8, 8)
output = tsa_fusion(input_tensor)
assert output.shape == (1, 4, 8, 8)
# gpu
if torch.cuda.is_available():
tsa_fusion = tsa_fusion.cuda()
input_tensor = input_tensor.cuda()
output = tsa_fusion(input_tensor)
assert output.shape == (1, 4, 8, 8)
def test_edvrnet():
"""Test EDVRNet."""
# cpu
# with tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=True)
input_tensor = torch.rand(1, 5, 3, 8, 8)
edvrnet.init_weights(pretrained=None)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
# without tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=False)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
with pytest.raises(AssertionError):
# The height and width of inputs should be a multiple of 4
input_tensor = torch.rand(1, 5, 3, 3, 3)
edvrnet(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
edvrnet.init_weights(pretrained=[1])
# gpu
if torch.cuda.is_available():
# with tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=True).cuda()
input_tensor = torch.rand(1, 5, 3, 8, 8).cuda()
edvrnet.init_weights(pretrained=None)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
# without tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=False).cuda()
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
with pytest.raises(AssertionError):
# The height and width of inputs should be a multiple of 4
input_tensor = torch.rand(1, 5, 3, 3, 3).cuda()
edvrnet(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
edvrnet.init_weights(pretrained=[1])
| 28.482993 | 76 | 0.578696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.088608 |
c72294488588ee770a6039927fb6209367d51df5 | 225 | py | Python | mat2py/core/datastoreio.py | mat2py/mat2py | 2776fbe9ca4ad2aaa3eac6aa79d17747a9ec24a8 | [
"MIT"
]
| null | null | null | mat2py/core/datastoreio.py | mat2py/mat2py | 2776fbe9ca4ad2aaa3eac6aa79d17747a9ec24a8 | [
"MIT"
]
| 37 | 2021-12-23T03:22:20.000Z | 2022-02-16T15:40:47.000Z | mat2py/core/datastoreio.py | mat2py/mat2py | 2776fbe9ca4ad2aaa3eac6aa79d17747a9ec24a8 | [
"MIT"
]
| 2 | 2022-01-23T07:59:10.000Z | 2022-02-03T09:15:54.000Z | # type: ignore
__all__ = [
"readDatastoreImage",
"datastore",
]
def readDatastoreImage(*args):
raise NotImplementedError("readDatastoreImage")
def datastore(*args):
raise NotImplementedError("datastore")
| 15 | 51 | 0.711111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.337778 |
c7235d9e02846d039085054a4375d4bc687a9231 | 12,229 | py | Python | enjoliver-api/tests/test_generate_groups.py | netturpin/enjoliver | 9700470939da40ff84304af6e8c7210a5fd693a4 | [
"MIT"
]
| 11 | 2017-11-06T08:42:55.000Z | 2021-01-08T11:01:02.000Z | enjoliver-api/tests/test_generate_groups.py | netturpin/enjoliver | 9700470939da40ff84304af6e8c7210a5fd693a4 | [
"MIT"
]
| 7 | 2017-12-28T12:05:50.000Z | 2021-04-02T15:04:46.000Z | enjoliver-api/tests/test_generate_groups.py | netturpin/enjoliver | 9700470939da40ff84304af6e8c7210a5fd693a4 | [
"MIT"
]
| 4 | 2017-11-08T10:03:31.000Z | 2018-06-03T17:59:43.000Z | import os
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase
from enjoliver import generator
class GenerateGroupTestCase(TestCase):
api_uri = None
test_matchbox_path = None
test_resources_path = None
tests_path = None
@classmethod
def setUpClass(cls):
cls.tests_path = mkdtemp(dir='/tmp')
cls.test_matchbox_path = os.path.join(cls.tests_path, 'test_matchbox')
cls.test_resources_path = os.path.join(cls.tests_path, 'test_resources')
os.mkdir(cls.test_matchbox_path)
os.mkdir(cls.test_resources_path)
os.mkdir(os.path.join(cls.test_matchbox_path, 'groups'))
cls.api_uri = "http://127.0.0.1:5000"
@classmethod
def tearDownClass(cls):
rmtree(cls.tests_path)
class TestGenerateGroups(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
matchbox_path=cls.test_matchbox_path
)
cls.gen.profiles_path = cls.test_resources_path
def test_instantiate_generate_group_with_incorrect_parameters(self):
with self.assertRaises(TypeError):
generator.GenerateGroup()
def test_instantiate_generate_group_with_non_existing_matchbox_path(self):
with self.assertRaises(OSError):
generator.GenerateGroup(
api_uri='foobar',
_id='foo',
name='foo-bar',
profile='foo-bar-baz',
matchbox_path='/foo/bar'
)
def test_instantiate_generate_group(self):
sandbox = mkdtemp(dir='/tmp')
os.mkdir(os.path.join(sandbox, 'groups'))
generator.GenerateGroup(
api_uri='foobar',
_id='foo',
name='foo-bar',
profile='foo-bar-baz',
matchbox_path=sandbox
)
rmtree(sandbox)
def test_00_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {'etcd_initial_cluster': '',
'api_uri': '%s' % self.gen.api_uri,
'ssh_authorized_keys': []}
self.gen._metadata()
self.assertEqual(expect['api_uri'], self.gen._target_data["metadata"]["api_uri"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': '%s' % self.gen.api_uri,
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy'
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="etcd-proxy.yaml",
matchbox_path=self.test_matchbox_path
)
result = new.generate()
self.assertEqual(expect["profile"], result["profile"])
self.assertEqual(expect["id"], result["id"])
self.assertEqual(expect["name"], result["name"])
self.assertEqual(expect["metadata"]["api_uri"], result["metadata"]["api_uri"])
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id=_id,
name="etcd-test",
profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
self.assertFalse(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id=_id,
name="etcd-test",
profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"one": "selector"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsSelectorLower(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=cls.test_matchbox_path
)
def test_00_api_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {
'api_uri': "%s" % self.gen.api_uri,
'ssh_authorized_keys': []
}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy", name="etcd-proxy", profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path)
result = new.generate()
result["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsSelectorUpper(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2E"},
matchbox_path=cls.test_matchbox_path
)
def test_00_ip_address(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {
'api_uri': "%s" % self.gen.api_uri,
'ssh_authorized_keys': []
}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': "%s" % self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri, _id="etcd-proxy",
name="etcd-proxy",
profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path
)
result = new.generate()
result["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
new.dump()
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsExtraMetadata(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2E"},
metadata={"etcd_initial_cluster": "static0=http://192.168.1.1:2379",
"api_seed": "http://192.168.1.2:5000"},
matchbox_path=cls.test_matchbox_path
)
def test_00_api_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {'etcd_initial_cluster': 'static0=http://192.168.1.1:2379',
'api_uri': "%s" % self.gen.api_uri,
'api_seed': 'http://192.168.1.2:5000',
'ssh_authorized_keys': []}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': "%s" % self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy", name="etcd-proxy", profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path
)
result = new.generate()
result["metadata"]["ssh_authorized_keys"] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
self.assertTrue(new.dump())
for i in range(10):
self.assertFalse(new.dump())
new.api_uri = "http://google.com"
self.assertTrue(new.dump())
self.assertFalse(new.dump())
| 35.446377 | 93 | 0.568485 | 12,086 | 0.988306 | 0 | 0 | 2,436 | 0.199199 | 0 | 0 | 2,738 | 0.223894 |
c72423d0c9647d3f45e1ae401dca8a26496518f2 | 265 | py | Python | HackerRank/Calendar Module/solution.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
]
| 9 | 2020-07-02T06:06:17.000Z | 2022-02-26T11:08:09.000Z | HackerRank/Calendar Module/solution.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
]
| 1 | 2021-11-04T17:26:36.000Z | 2021-11-04T17:26:36.000Z | HackerRank/Calendar Module/solution.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
]
| 8 | 2021-01-31T10:31:12.000Z | 2022-03-13T09:15:55.000Z | # Enter your code here. Read input from STDIN. Print output to STDOUT
import calendar
mm,dd,yyyy = map(int,input().split())
day = ["MONDAY","TUESDAY","WEDNESDAY","THURSDAY","FRIDAY","SATURDAY","SUNDAY"]
val = int (calendar.weekday(yyyy,mm,dd))
print(day[val])
| 22.083333 | 78 | 0.698113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.501887 |
c7245a8913ae3a1c31f00b1392df9f4dd3d991e9 | 7,560 | py | Python | scale/trigger/models.py | stevevarner/scale | 9623b261db4ddcf770f00df16afc91176142bb7c | [
"Apache-2.0"
]
| null | null | null | scale/trigger/models.py | stevevarner/scale | 9623b261db4ddcf770f00df16afc91176142bb7c | [
"Apache-2.0"
]
| null | null | null | scale/trigger/models.py | stevevarner/scale | 9623b261db4ddcf770f00df16afc91176142bb7c | [
"Apache-2.0"
]
| null | null | null | """Defines the models for trigger rules and events"""
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import models, transaction
from django.utils.timezone import now
class TriggerEventManager(models.Manager):
"""Provides additional methods for handling trigger events
"""
def create_trigger_event(self, trigger_type, rule, description, occurred):
"""Creates a new trigger event and returns the event model. The given rule model, if not None, must have already
been saved in the database (it must have an ID). The returned trigger event model will be saved in the database.
:param trigger_type: The type of the trigger that occurred
:type trigger_type: str
:param rule: The rule that triggered the event, possibly None
:type rule: :class:`trigger.models.TriggerRule`
:param description: The JSON description of the event as a dict
:type description: dict
:param occurred: When the event occurred
:type occurred: :class:`datetime.datetime`
:returns: The new trigger event
:rtype: :class:`trigger.models.TriggerEvent`
"""
if trigger_type is None:
raise Exception('Trigger event must have a type')
if description is None:
raise Exception('Trigger event must have a JSON description')
if occurred is None:
raise Exception('Trigger event must have a timestamp')
event = TriggerEvent()
event.type = trigger_type
event.rule = rule
event.description = description
event.occurred = occurred
event.save()
return event
class TriggerEvent(models.Model):
"""Represents an event where a trigger occurred
:keyword type: The type of the trigger that occurred
:type type: :class:`django.db.models.CharField`
:keyword rule: The rule that triggered this event, possibly None (some events are not triggered by rules)
:type rule: :class:`django.db.models.ForeignKey`
:keyword description: JSON description of the event. This will contain fields specific to the type of the trigger
that occurred.
:type description: :class:`django.contrib.postgres.fields.JSONField`
:keyword occurred: When the event occurred
:type occurred: :class:`django.db.models.DateTimeField`
"""
type = models.CharField(db_index=True, max_length=50)
rule = models.ForeignKey('trigger.TriggerRule', blank=True, null=True, on_delete=models.PROTECT)
description = django.contrib.postgres.fields.JSONField(default=dict)
occurred = models.DateTimeField(db_index=True)
objects = TriggerEventManager()
class Meta(object):
"""meta information for the db"""
db_table = 'trigger_event'
class TriggerRuleManager(models.Manager):
"""Provides additional methods for handling trigger rules
"""
@transaction.atomic
def archive_trigger_rule(self, trigger_rule_id):
"""Archives the trigger rule (will no longer be active) with the given ID
:param trigger_rule_id: The ID of the trigger rule to archive
:type trigger_rule_id: int
"""
rule = TriggerRule.objects.select_for_update().get(pk=trigger_rule_id)
rule.is_active = False
rule.archived = now()
rule.save()
def create_trigger_rule(self, trigger_type, configuration, name='', is_active=True):
"""Creates a new trigger rule and returns the rule model. The returned trigger rule model will be saved in the
database.
:param trigger_type: The type of this trigger rule
:type trigger_type: str
:param configuration: The rule configuration
:type configuration: :class:`trigger.configuration.TriggerRuleConfiguration`
:param name: An optional name for the trigger
:type name: str
:param is_active: Whether or not the trigger should be active
:type is_active: bool
:returns: The new trigger rule
:rtype: :class:`trigger.models.TriggerRule`
:raises trigger.configuration.exceptions.InvalidTriggerRule: If the configuration is invalid
"""
if not trigger_type:
raise Exception('Trigger rule must have a type')
if not configuration:
raise Exception('Trigger rule must have a configuration')
configuration.validate()
rule = TriggerRule()
rule.type = trigger_type
rule.name = name
rule.is_active = is_active
rule.configuration = configuration.get_dict()
rule.save()
return rule
def get_by_natural_key(self, name):
"""Django method to retrieve a trigger rule for the given natural key. NOTE: All trigger rule names are NOT
unique. This is implemented to allow the loading of defined system trigger rules which do have unique names.
:param name: The name of the trigger rule
:type name: str
:returns: The trigger rule defined by the natural key
:rtype: :class:`error.models.Error`
"""
return self.get(name=name)
class TriggerRule(models.Model):
"""Represents a rule that, when triggered, creates a trigger event
:keyword type: The type of the trigger for the rule
:type type: :class:`django.db.models.CharField`
:keyword name: The identifying name of the trigger rule used by clients for queries
:type name: :class:`django.db.models.CharField`
:keyword configuration: JSON configuration for the rule. This will contain fields specific to the type of the
trigger.
:type configuration: :class:`django.contrib.postgres.fields.JSONField`
:keyword is_active: Whether the rule is still active (false once rule is archived)
:type is_active: :class:`django.db.models.BooleanField`
:keyword created: When the rule was created
:type created: :class:`django.db.models.DateTimeField`
:keyword archived: When the rule was archived (no longer active)
:type archived: :class:`django.db.models.DateTimeField`
:keyword last_modified: When the rule was last modified
:type last_modified: :class:`django.db.models.DateTimeField`
"""
type = models.CharField(max_length=50, db_index=True)
name = models.CharField(blank=True, max_length=50)
configuration = django.contrib.postgres.fields.JSONField(default=dict)
is_active = models.BooleanField(default=True, db_index=True)
created = models.DateTimeField(auto_now_add=True)
archived = models.DateTimeField(blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True)
objects = TriggerRuleManager()
def get_configuration(self):
"""Returns the configuration for this trigger rule
:returns: The configuration for this trigger rule
:rtype: :class:`trigger.configuration.trigger_rule.TriggerRuleConfiguration`
:raises :class:`trigger.configuration.exceptions.InvalidTriggerType`: If the trigger type is invalid
"""
from trigger.handler import get_trigger_rule_handler
handler = get_trigger_rule_handler(self.type)
return handler.create_configuration(self.configuration)
def natural_key(self):
"""Django method to define the natural key for a trigger rule as the name
:returns: A tuple representing the natural key
:rtype: tuple(str,)
"""
return (self.name,)
class Meta(object):
"""meta information for the db"""
db_table = 'trigger_rule'
| 38.769231 | 120 | 0.693783 | 7,335 | 0.970238 | 0 | 0 | 433 | 0.057275 | 0 | 0 | 4,776 | 0.631746 |
c724bce6559444b809161c07169a0eaf827f8a70 | 1,125 | py | Python | leetcode/0506_relative_ranks.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
]
| null | null | null | leetcode/0506_relative_ranks.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
]
| null | null | null | leetcode/0506_relative_ranks.py | chaosWsF/Python-Practice | ff617675b6bcd125933024bb4c246b63a272314d | [
"BSD-2-Clause"
]
| null | null | null | """
Given scores of N athletes, find their relative ranks and the people with the top
three highest scores, who will be awarded medals: "Gold Medal", "Silver Medal" and
"Bronze Medal".
Example 1:
Input: [5, 4, 3, 2, 1]
Output: ["Gold Medal", "Silver Medal", "Bronze Medal", "4", "5"]
Explanation: The first three athletes got the top three highest scores, so they
got "Gold Medal", "Silver Medal" and "Bronze Medal". For the left two athletes,
you just need to output their relative ranks according to their scores.
Note:
N is a positive integer and won't exceed 10,000.
All the scores of athletes are guaranteed to be unique.
"""
class Solution:
def findRelativeRanks(self, nums):
scores_rank = sorted(nums, reverse=True)
d = {}
for i, score in enumerate(scores_rank):
if i == 0:
d[score] = 'Gold Medal'
elif i == 1:
d[score] = 'Silver Medal'
elif i == 2:
d[score] = 'Bronze Medal'
else:
d[score] = str(i + 1)
return [d[x] for x in nums]
| 32.142857 | 84 | 0.593778 | 456 | 0.405333 | 0 | 0 | 0 | 0 | 0 | 0 | 705 | 0.626667 |
c724c503b44eb473d695fa13f0446956650e0c2b | 987 | py | Python | barriers/models/history/assessments/economic_impact.py | felix781/market-access-python-frontend | 3b0e49feb4fdf0224816326938a46002aa4a2b1c | [
"MIT"
]
| 1 | 2021-12-15T04:14:03.000Z | 2021-12-15T04:14:03.000Z | barriers/models/history/assessments/economic_impact.py | felix781/market-access-python-frontend | 3b0e49feb4fdf0224816326938a46002aa4a2b1c | [
"MIT"
]
| 19 | 2019-12-11T11:32:47.000Z | 2022-03-29T15:40:57.000Z | barriers/models/history/assessments/economic_impact.py | felix781/market-access-python-frontend | 3b0e49feb4fdf0224816326938a46002aa4a2b1c | [
"MIT"
]
| 2 | 2021-02-09T09:38:45.000Z | 2021-03-29T19:07:09.000Z | from ..base import BaseHistoryItem, GenericHistoryItem
from ..utils import PolymorphicBase
class ArchivedHistoryItem(BaseHistoryItem):
field = "archived"
field_name = "Valuation assessment: Archived"
def get_value(self, value):
if value is True:
return "Archived"
elif value is False:
return "Unarchived"
class ExplanationHistoryItem(BaseHistoryItem):
field = "explanation"
field_name = "Valuation assessment: Explanation"
class ImpactHistoryItem(BaseHistoryItem):
field = "impact"
field_name = "Valuation assessment: Impact"
def get_value(self, value):
if value:
return value.get("name")
class EconomicImpactAssessmentHistoryItem(PolymorphicBase):
model = "economic_impact_assessment"
key = "field"
subclasses = (
ArchivedHistoryItem,
ExplanationHistoryItem,
ImpactHistoryItem,
)
default_subclass = GenericHistoryItem
class_lookup = {}
| 24.675 | 59 | 0.68997 | 884 | 0.895643 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.193516 |
c72537aa56e0fec5c2e19ae544ffe17dd652b46b | 727 | py | Python | link_prob_show.py | Rheinwalt/spatial-effects-networks | 7b77a22b45341b024a57e1759b7b61cd91d90849 | [
"MIT"
]
| 3 | 2018-12-21T20:19:18.000Z | 2021-01-02T12:58:56.000Z | link_prob_show.py | rick-foo/spatial-effects-networks | 7b77a22b45341b024a57e1759b7b61cd91d90849 | [
"MIT"
]
| null | null | null | link_prob_show.py | rick-foo/spatial-effects-networks | 7b77a22b45341b024a57e1759b7b61cd91d90849 | [
"MIT"
]
| 2 | 2020-09-03T14:18:37.000Z | 2021-10-01T18:06:42.000Z | import sys
import numpy as np
from sern import *
ids, lon, lat = np.loadtxt('nodes', unpack = True)
links = np.loadtxt('links', dtype = 'int')
A, b = AdjacencyMatrix(ids, links)
lon, lat = lon[b], lat[b]
n = A.shape[0]
# LinkProbability expects A as triu
A = A[np.triu_indices(n, 1)]
# play around with the scale, maybe you don't need log binning?
D, x = IntegerDistances(lat, lon, scale = 50)
p = LinkProbability(A, D)
from matplotlib import pyplot as pl
pl.plot(p, 'bo')
pl.ylabel('Link probability given distance')
pl.xlabel('Bin number')
pl.savefig('link_prob_bin.png')
pl.close('all')
pl.semilogx(x, p, 'bo')
pl.ylabel('Link probability given distance')
pl.xlabel('Distance [km]')
pl.savefig('link_prob_distance.png')
| 25.964286 | 63 | 0.707015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.365887 |
c7268aa939534725180b033986da1a690622e70b | 3,899 | py | Python | controller/components/app.py | isabella232/flight-lab | bd666b1d2bcec6f928a2e8da9f13fd5dae21319f | [
"Apache-2.0"
]
| 15 | 2018-10-18T07:50:46.000Z | 2021-10-21T03:40:55.000Z | controller/components/app.py | google/flight-lab | bd666b1d2bcec6f928a2e8da9f13fd5dae21319f | [
"Apache-2.0"
]
| 9 | 2018-09-17T23:00:02.000Z | 2019-01-22T21:08:04.000Z | controller/components/app.py | isabella232/flight-lab | bd666b1d2bcec6f928a2e8da9f13fd5dae21319f | [
"Apache-2.0"
]
| 12 | 2019-01-07T12:43:37.000Z | 2021-10-21T03:40:44.000Z | # Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for components related to running apps."""
import subprocess
import threading
from components import base
from protos import controller_pb2
from utils import app
class AppComponent(base.Component):
"""Component to run command-line based app on any platform.
This component can start app, restart app upon crash, and stop app.
Events:
"status_changed": when status of the app is changed.
Args:
app_component: instance of this class.
"""
def __init__(self, proto, *args, **kwargs):
"""Initializes the component.
Args:
proto: flightlab.App proto defining app details and options.
"""
super(AppComponent, self).__init__(proto, *args, **kwargs)
self._app = app.Application(
name=self.name,
bin_path=self.settings.executable_path,
arguments=(list(self.settings.arguments)
if self.settings.arguments else []),
working_dir=self.settings.working_dir,
restart_on_crash=(self.settings.restart_on_crash
if self.settings.restart_on_crash else False),
env=(self.settings.env if self.settings.env else None))
self._app.on('started', self._on_app_started)
self._app.on('stopped', self._on_app_stopped)
self._monitor = threading.Timer(1, self._check_status)
self._monitor.start()
def close(self):
if self._monitor:
self._monitor.cancel()
self._monitor = None
self._app.stop()
def _check_status(self):
if self._app.has_running_instance():
component_status = controller_pb2.Component.ON
app_status = controller_pb2.App.RUNNING
else:
component_status = controller_pb2.Component.OFF
app_status = controller_pb2.App.NOT_RUNNING
if (self.proto.status != component_status or
self.settings.status != app_status):
self.proto.status = component_status
self.settings.status = app_status
self.emit('status_changed', self)
def _start(self):
self.logger.info('[App - {0}] Starting...'.format(self.name))
self._app.start()
def _stop(self):
self.logger.info('[App - {0}] Stopping...'.format(self.name))
self._app.stop()
def _restart(self):
self._stop()
self._start()
def _on_app_started(self, app):
self.logger.info('[App - {0}] Started.'.format(self.name))
self.settings.status = controller_pb2.App.RUNNING
self.proto.status = controller_pb2.Component.ON
self.emit('status_changed', self)
def _on_app_stopped(self, app):
self.logger.info('[App - {0}] Stopped.'.format(self.name))
self.settings.status = controller_pb2.App.NOT_RUNNING
self.proto.status = controller_pb2.Component.OFF
self.emit('status_changed', self)
class CommandLineComponent(base.Component):
"""Component to run command-line based apps on any platform."""
def _start(self):
for cmd in self.settings.when_on:
self.logger.info('[{0}] Running: {1}'.format(self.name, cmd))
ret = subprocess.call(cmd)
self.logger.info('[{0}] Done (return code={1})'.format(self.name, ret))
def _stop(self):
for cmd in self.settings.when_off:
self.logger.info('[{0}] Running: {1}'.format(self.name, cmd))
ret = subprocess.call(cmd)
self.logger.info('[{0}] Done (return code={1})'.format(self.name, ret)) | 33.612069 | 77 | 0.691459 | 3,136 | 0.804309 | 0 | 0 | 0 | 0 | 0 | 0 | 1,324 | 0.339574 |
c727467c9c5f9cbcf49804ff4103bf27f2140c3f | 1,504 | py | Python | botorch/acquisition/__init__.py | jmren168/botorch | 6c067185f56d3a244c4093393b8a97388fb1c0b3 | [
"MIT"
]
| 1 | 2020-03-29T20:06:45.000Z | 2020-03-29T20:06:45.000Z | botorch/acquisition/__init__.py | jmren168/botorch | 6c067185f56d3a244c4093393b8a97388fb1c0b3 | [
"MIT"
]
| null | null | null | botorch/acquisition/__init__.py | jmren168/botorch | 6c067185f56d3a244c4093393b8a97388fb1c0b3 | [
"MIT"
]
| 1 | 2020-03-29T20:06:48.000Z | 2020-03-29T20:06:48.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .acquisition import AcquisitionFunction
from .analytic import (
AnalyticAcquisitionFunction,
ConstrainedExpectedImprovement,
ExpectedImprovement,
NoisyExpectedImprovement,
PosteriorMean,
ProbabilityOfImprovement,
UpperConfidenceBound,
)
from .fixed_feature import FixedFeatureAcquisitionFunction
from .monte_carlo import (
MCAcquisitionFunction,
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
)
from .objective import (
ConstrainedMCObjective,
GenericMCObjective,
IdentityMCObjective,
LinearMCObjective,
MCAcquisitionObjective,
ScalarizedObjective,
)
from .utils import get_acquisition_function
__all__ = [
"AcquisitionFunction",
"AnalyticAcquisitionFunction",
"ConstrainedExpectedImprovement",
"ExpectedImprovement",
"FixedFeatureAcquisitionFunction",
"NoisyExpectedImprovement",
"PosteriorMean",
"ProbabilityOfImprovement",
"UpperConfidenceBound",
"qExpectedImprovement",
"qNoisyExpectedImprovement",
"qProbabilityOfImprovement",
"qSimpleRegret",
"qUpperConfidenceBound",
"ConstrainedMCObjective",
"GenericMCObjective",
"IdentityMCObjective",
"LinearMCObjective",
"MCAcquisitionFunction",
"MCAcquisitionObjective",
"ScalarizedObjective",
"get_acquisition_function",
]
| 25.491525 | 70 | 0.757979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 609 | 0.40492 |
c72c87715b18d844a4d1e6b4c82ec44a40f2bde2 | 2,810 | py | Python | examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
]
| 9,136 | 2015-01-02T00:41:45.000Z | 2022-03-31T15:30:02.000Z | examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
]
| 2,424 | 2015-01-05T08:55:58.000Z | 2022-03-30T19:34:55.000Z | examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
]
| 2,921 | 2015-01-02T10:19:30.000Z | 2022-03-31T02:48:42.000Z | """Randomize the minitaur_gym_alternating_leg_env when reset() is called.
The randomization include swing_offset, extension_offset of all legs that mimics
bent legs, desired_pitch from user input, battery voltage and motor damping.
"""
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
import tf.compat.v1 as tf
from pybullet_envs.minitaur.envs import env_randomizer_base
# Absolute range.
NUM_LEGS = 4
BATTERY_VOLTAGE_RANGE = (14.8, 16.8)
MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01)
class MinitaurAlternatingLegsEnvRandomizer(env_randomizer_base.EnvRandomizerBase):
"""A randomizer that changes the minitaur_gym_alternating_leg_env."""
def __init__(self,
perturb_swing_bound=0.1,
perturb_extension_bound=0.1,
perturb_desired_pitch_bound=0.01):
super(MinitaurAlternatingLegsEnvRandomizer, self).__init__()
self.perturb_swing_bound = perturb_swing_bound
self.perturb_extension_bound = perturb_extension_bound
self.perturb_desired_pitch_bound = perturb_desired_pitch_bound
def randomize_env(self, env):
perturb_magnitude = np.random.uniform(low=-self.perturb_swing_bound,
high=self.perturb_swing_bound,
size=NUM_LEGS)
env.set_swing_offset(perturb_magnitude)
tf.logging.info("swing_offset: {}".format(perturb_magnitude))
perturb_magnitude = np.random.uniform(low=-self.perturb_extension_bound,
high=self.perturb_extension_bound,
size=NUM_LEGS)
env.set_extension_offset(perturb_magnitude)
tf.logging.info("extension_offset: {}".format(perturb_magnitude))
perturb_magnitude = np.random.uniform(low=-self.perturb_desired_pitch_bound,
high=self.perturb_desired_pitch_bound)
env.set_desired_pitch(perturb_magnitude)
tf.logging.info("desired_pitch: {}".format(perturb_magnitude))
randomized_battery_voltage = np.random.uniform(BATTERY_VOLTAGE_RANGE[0],
BATTERY_VOLTAGE_RANGE[1])
env.minitaur.SetBatteryVoltage(randomized_battery_voltage)
tf.logging.info("battery_voltage: {}".format(randomized_battery_voltage))
randomized_motor_damping = np.random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0],
MOTOR_VISCOUS_DAMPING_RANGE[1])
env.minitaur.SetMotorViscousDamping(randomized_motor_damping)
tf.logging.info("motor_damping: {}".format(randomized_motor_damping))
| 45.322581 | 86 | 0.70605 | 2,102 | 0.748043 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.149822 |
c72ca1c8b4319d09d601fa708b5ddc14cb8e0859 | 14,704 | py | Python | pygsti/modelmembers/states/tensorprodstate.py | pyGSTi-Developers/pyGSTi | bfedc1de4d604f14b0f958615776fb80ddb59e33 | [
"Apache-2.0"
]
| 73 | 2016-01-28T05:02:05.000Z | 2022-03-30T07:46:33.000Z | pygsti/modelmembers/states/tensorprodstate.py | pyGSTi-Developers/pyGSTi | bfedc1de4d604f14b0f958615776fb80ddb59e33 | [
"Apache-2.0"
]
| 113 | 2016-02-25T15:32:18.000Z | 2022-03-31T13:18:13.000Z | pygsti/modelmembers/states/tensorprodstate.py | pyGSTi-Developers/pyGSTi | bfedc1de4d604f14b0f958615776fb80ddb59e33 | [
"Apache-2.0"
]
| 41 | 2016-03-15T19:32:07.000Z | 2022-02-16T10:22:05.000Z | """
The TensorProductState class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import functools as _functools
import itertools as _itertools
import numpy as _np
from pygsti.modelmembers.states.state import State as _State
from pygsti.modelmembers import modelmember as _modelmember, term as _term
from pygsti.baseobjs import statespace as _statespace
from pygsti.tools import listtools as _lt
from pygsti.tools import matrixtools as _mt
class TensorProductState(_State):
"""
A state vector that is a tensor-product of other state vectors.
Parameters
----------
factors : list of States
a list of the component states to take the tensor product of.
state_space : StateSpace, optional
The state space for this operation.
"""
def __init__(self, factors, state_space):
assert(len(factors) > 0), "Must have at least one factor!"
self.factors = factors # do *not* copy - needs to reference common objects
evotype = self.factors[0]._evotype
rep = evotype.create_tensorproduct_state_rep([f._rep for f in factors], state_space)
_State.__init__(self, rep, evotype)
self.init_gpindices() # initialize our gpindices based on sub-members
self._update_rep() # initializes rep data
#Note: no to_memoized_dict needed, as ModelMember version does all we need.
@classmethod
def _from_memoized_dict(cls, mm_dict, serial_memo):
state_space = _statespace.StateSpace.from_nice_serialization(mm_dict['state_space'])
factors = [serial_memo[i] for i in mm_dict['submembers']]
return cls(factors, state_space)
def submembers(self):
"""
Get the ModelMember-derived objects contained in this one.
Returns
-------
list
"""
return self.factors # factor POVM object
def _update_rep(self):
self._rep.reps_have_changed()
@property
def parameter_labels(self):
"""
An array of labels (usually strings) describing this model member's parameters.
"""
vl = _np.empty(self.num_params, dtype=object)
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
vl[factor_local_inds] = factor_state.parameter_labels
return vl
def to_dense(self, on_space='minimal', scratch=None):
"""
Return this state vector as a (dense) numpy array.
The memory in `scratch` maybe used when it is not-None.
Parameters
----------
on_space : {'minimal', 'Hilbert', 'HilbertSchmidt'}
The space that the returned dense operation acts upon. For unitary matrices and bra/ket vectors,
use `'Hilbert'`. For superoperator matrices and super-bra/super-ket vectors use `'HilbertSchmidt'`.
`'minimal'` means that `'Hilbert'` is used if possible given this operator's evolution type, and
otherwise `'HilbertSchmidt'` is used.
scratch : numpy.ndarray, optional
scratch space available for use.
Returns
-------
numpy.ndarray
"""
return self._rep.to_dense(on_space)
def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False):
"""
Get the `order`-th order Taylor-expansion terms of this state vector.
This function either constructs or returns a cached list of the terms at
the given order. Each term is "rank-1", meaning that it is a state
preparation followed by or POVM effect preceded by actions on a
density matrix `rho` of the form:
`rho -> A rho B`
The coefficients of these terms are typically polynomials of the
State's parameters, where the polynomial's variable indices index the
*global* parameters of the State's parent (usually a :class:`Model`)
, not the State's local parameter array (i.e. that returned from
`to_vector`).
Parameters
----------
order : int
The order of terms to get.
max_polynomial_vars : int, optional
maximum number of variables the created polynomials can have.
return_coeff_polys : bool
Whether a parallel list of locally-indexed (using variable indices
corresponding to *this* object's parameters rather than its parent's)
polynomial coefficients should be returned as well.
Returns
-------
terms : list
A list of :class:`RankOneTerm` objects.
coefficients : list
Only present when `return_coeff_polys == True`.
A list of *compact* polynomial objects, meaning that each element
is a `(vtape,ctape)` 2-tuple formed by concatenating together the
output of :method:`Polynomial.compact`.
"""
terms = []
fnq = [int(round(_np.log2(f.dim))) // 2 for f in self.factors] # num of qubits per factor
# assumes density matrix evolution
total_nQ = sum(fnq) # total number of qubits
for p in _lt.partition_into(order, len(self.factors)):
factor_lists = [self.factors[i].taylor_order_terms(pi, max_polynomial_vars) for i, pi in enumerate(p)]
# When possible, create COLLAPSED factor_lists so each factor has just a single
# (State) pre & post op, which can be formed into the new terms'
# TensorProdState ops.
# - DON'T collapse stabilizer states & clifford ops - can't for POVMs
collapsible = False # bool(self._evotype =="svterm") # need to use reps for collapsing now... TODO?
if collapsible:
factor_lists = [[t.collapse_vec() for t in fterms] for fterms in factor_lists]
for factors in _itertools.product(*factor_lists):
# create a term with a TensorProdState - Note we always create
# "prep"-mode vectors, since even when self._prep_or_effect == "effect" these
# vectors are created with factor (prep- or effect-type) States not factor POVMs
# we workaround this by still allowing such "prep"-mode
# TensorProdStates to be represented as effects (i.e. in torep('effect'...) works)
coeff = _functools.reduce(lambda x, y: x.mult(y), [f.coeff for f in factors])
pre_rep = self._evotype.create_tensorproduct_state_rep(
[f.pre_state for f in factors if (f.pre_state is not None)], self.state_space)
post_rep = self._evotype.create_tensorproduct_state_rep(
[f.post_state for f in factors if (f.post_state is not None)], self.state_space)
term = _term.RankOnePolynomialPrepTerm.create_from(coeff, pre_rep, post_rep,
self._evotype, self.state_space)
if not collapsible: # then may need to add more ops. Assume factor ops are clifford gates
# Embed each factors ops according to their target qubit(s) and just daisy chain them
ss = _statespace.QubitSpace(total_nQ); curQ = 0
for f, nq in zip(factors, fnq):
targetLabels = tuple(range(curQ, curQ + nq)); curQ += nq
term._rep.pre_ops.extend([self._evotype.create_embedded_rep(ss, targetLabels, op)
for op in f.pre_ops]) # embed and add ops
term._rep.post_ops.extend([self._evotype.create_embedded_rep(ss, targetLabels, op)
for op in f.post_ops]) # embed and add ops
terms.append(term)
if return_coeff_polys:
def _decompose_indices(x):
return tuple(_modelmember._decompose_gpindices(
self.gpindices, _np.array(x, _np.int64)))
poly_coeffs = [t.coeff.map_indices(_decompose_indices) for t in terms] # with *local* indices
tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs]
if len(tapes) > 0:
vtape = _np.concatenate([t[0] for t in tapes])
ctape = _np.concatenate([t[1] for t in tapes])
else:
vtape = _np.empty(0, _np.int64)
ctape = _np.empty(0, complex)
coeffs_as_compact_polys = (vtape, ctape)
#self.local_term_poly_coeffs[order] = coeffs_as_compact_polys #FUTURE?
return terms, coeffs_as_compact_polys
else:
return terms # Cache terms in FUTURE?
@property
def num_params(self):
"""
Get the number of independent parameters which specify this state vector.
Returns
-------
int
the number of independent parameters.
"""
return len(self.gpindices_as_array())
def to_vector(self):
"""
Get the state vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
v = _np.empty(self.num_params, 'd')
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
v[factor_local_inds] = factor_state.to_vector()
return v
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the state vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of state vector parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this state vector's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
for factor_state, factor_local_inds in zip(self.factors, self._submember_rpindices):
factor_state.from_vector(v[factor_local_inds], close, dirty_value)
#Update representation, which may be a dense matrix or
# just fast-kron arrays or a stabilizer state.
self._update_rep() # TODO - how does this apply to state reps??
def deriv_wrt_params(self, wrt_filter=None):
"""
The element-wise derivative this state vector.
Construct a matrix whose columns are the derivatives of the state vector
with respect to a single param. Thus, each column is of length
dimension and there is one column per state vector parameter.
An empty 2D array in the StaticState case (num_params == 0).
Parameters
----------
wrt_filter : list or numpy.ndarray
List of parameter indices to take derivative with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
typ = self.factors[0].to_dense(on_space='minimal').dtype if len(self.factors) > 0 else 'd'
#HACK to deal with fact that output of to_dense is really what is differentiated
# but this may not match self.dim == self.state_space.dim, e.g. for pure state vecs.
dims = [len(fct.to_dense(on_space='minimal')) for fct in self.factors]
dim = int(_np.product(dims))
derivMx = _np.zeros((dim, self.num_params), typ)
#Product rule to compute jacobian
# loop over the spamvec/povm we differentiate wrt:
for i, (fct, fct_local_inds, fct_dim) in enumerate(zip(self.factors, self._submember_rpindices, dims)):
vec = fct
if vec.num_params == 0: continue # no contribution
deriv = vec.deriv_wrt_params(None) # TODO: use filter?? / make relative to this gate...
deriv.shape = (fct_dim, vec.num_params)
if i > 0: # factors before ith
pre = self.factors[0].to_dense(on_space='minimal')
for vecA in self.factors[1:i]:
pre = _np.kron(pre, vecA.to_dense(on_space='minimal'))
deriv = _np.kron(pre[:, None], deriv) # add a dummy 1-dim to 'pre' and do kron properly...
if i + 1 < len(self.factors): # factors after ith
post = self.factors[i + 1].to_dense(on_space='minimal')
for vecA in self.factors[i + 2:]:
post = _np.kron(post, vecA.to_dense(on_space='minimal'))
deriv = _np.kron(deriv, post[:, None]) # add a dummy 1-dim to 'post' and do kron properly...
assert(fct_local_inds is not None), \
"Error: gpindices has not been initialized for factor %d - cannot compute derivative!" % i
derivMx[:, fct_local_inds] += deriv
derivMx.shape = (dim, self.num_params) # necessary?
if wrt_filter is None:
return derivMx
else:
return _np.take(derivMx, wrt_filter, axis=1)
def has_nonzero_hessian(self):
"""
Whether this state vector has a non-zero Hessian with respect to its parameters.
Returns
-------
bool
"""
return False
def __str__(self):
s = "Tensor product %s vector with length %d\n" % (self._prep_or_effect, self.dim)
#ar = self.to_dense()
#s += _mt.mx_to_string(ar, width=4, prec=2)
# factors are just other States
s += " x ".join([_mt.mx_to_string(fct.to_dense(on_space='minimal'), width=4, prec=2) for fct in self.factors])
return s
| 42.994152 | 118 | 0.609698 | 13,589 | 0.92417 | 0 | 0 | 934 | 0.06352 | 0 | 0 | 7,913 | 0.538153 |
c72d167470fc1e484c9ed6ee92db56b541a26d0c | 3,216 | py | Python | edivorce/apps/core/views/graphql.py | gerritvdm/eDivorce | e3c0a4037a7141769250b96df6cc4eb4ea5ef3af | [
"Apache-2.0"
]
| 6 | 2017-03-24T18:20:33.000Z | 2021-01-29T03:25:07.000Z | edivorce/apps/core/views/graphql.py | gerritvdm/eDivorce | e3c0a4037a7141769250b96df6cc4eb4ea5ef3af | [
"Apache-2.0"
]
| 13 | 2018-10-12T17:20:37.000Z | 2021-11-05T23:13:21.000Z | edivorce/apps/core/views/graphql.py | gerritvdm/eDivorce | e3c0a4037a7141769250b96df6cc4eb4ea5ef3af | [
"Apache-2.0"
]
| 11 | 2017-03-15T12:36:39.000Z | 2021-03-05T14:35:59.000Z | import graphene
import graphene_django
from django.http import HttpResponseForbidden
from graphene_django.views import GraphQLView
from graphql import GraphQLError
from edivorce.apps.core.models import Document
class PrivateGraphQLView(GraphQLView):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return HttpResponseForbidden()
return super().dispatch(request, *args, **kwargs)
class DocumentType(graphene_django.DjangoObjectType):
file_url = graphene.String(source='get_file_url')
content_type = graphene.String(source='get_content_type')
class Meta:
model = Document
exclude = ('id', 'file')
class Query(graphene.ObjectType):
documents = graphene.List(DocumentType, doc_type=graphene.String(required=True), party_code=graphene.Int(required=True))
def resolve_documents(self, info, **kwargs):
if info.context.user.is_anonymous:
raise GraphQLError('Unauthorized')
q = Document.objects.filter(bceid_user=info.context.user, **kwargs)
for doc in q:
if not doc.file_exists():
q.delete()
return Document.objects.none()
return q
class DocumentInput(graphene.InputObjectType):
filename = graphene.String(required=True)
size = graphene.Int(required=True)
width = graphene.Int()
height = graphene.Int()
rotation = graphene.Int()
class DocumentMetaDataInput(graphene.InputObjectType):
files = graphene.List(DocumentInput, required=True)
doc_type = graphene.String(required=True)
party_code = graphene.Int(required=True)
class UpdateMetadata(graphene.Mutation):
class Arguments:
input = DocumentMetaDataInput(required=True)
documents = graphene.List(DocumentType)
def mutate(self, info, **kwargs):
input_ = kwargs['input']
documents = Document.objects.filter(bceid_user=info.context.user, doc_type=input_['doc_type'], party_code=input_['party_code'])
unique_files = [dict(s) for s in set(frozenset(d.items()) for d in input_['files'])]
if documents.count() != len(input_['files']) or documents.count() != len(unique_files):
raise GraphQLError("Invalid input: there must be the same number of files")
for i, file in enumerate(input_['files']):
try:
doc = documents.get(filename=file['filename'], size=file['size'])
doc.sort_order = i + 1
doc.width = file.get('width', doc.width)
doc.height = file.get('height', doc.height)
doc.rotation = file.get('rotation', doc.rotation)
if doc.rotation not in [0, 90, 180, 270]:
raise GraphQLError(f"Invalid rotation {doc.rotation}, must be 0, 90, 180, 270")
doc.save()
except Document.DoesNotExist:
raise GraphQLError(f"Couldn't find document '{file['filename']}' with size '{file['size']}'")
return UpdateMetadata(documents=documents.all())
class Mutations(graphene.ObjectType):
update_metadata = UpdateMetadata.Field()
graphql_schema = graphene.Schema(query=Query, mutation=Mutations)
| 36.545455 | 135 | 0.668221 | 2,915 | 0.906405 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.103856 |
c72e729bd791fda04d1f1bf87cc60496068da071 | 5,862 | py | Python | amazing/maze.py | danieloconell/maze-solver | f60e476d827d59bfa17cd2148787332707846882 | [
"MIT"
]
| null | null | null | amazing/maze.py | danieloconell/maze-solver | f60e476d827d59bfa17cd2148787332707846882 | [
"MIT"
]
| 2 | 2021-06-08T19:35:19.000Z | 2021-09-08T00:44:59.000Z | amazing/maze.py | danieloconell/amazing | f60e476d827d59bfa17cd2148787332707846882 | [
"MIT"
]
| null | null | null | from .exceptions import MazeNotSolved, AlgorithmNotFound
from .dijkstra import Dijkstra
from .astar import Astar
from functools import wraps
import warnings
from daedalus import Maze as _maze
from PIL import Image
warnings.simplefilter("once", UserWarning)
class Maze:
"""
Create a maze and solve it.
Available algorithms:
dijkstra
astar (WIP)
Steps:
1. Create maze using the daedalus library.
2. Convert maze to graph.
3. Solve maze with algorithm.
"""
WHITE = (0, 0, 0)
BLACK = (255, 255, 255)
RED = (255, 0, 0)
def __init__(self, width, height, algorithm="dijkstra"):
"""Set algorithm to be used when solving.
Args:
algorithm (str) to be used when solving maze
width (int) of maze in pixels
height (int) of maze in pixels
"""
self.algorithm = algorithm
if not width % 2 or not height % 2:
warnings.warn(
"Using even width or height, use even numbers for optimal images"
)
self._create_maze(width, height)
self._create_graph()
self.width = width
self.height = height
def _create_maze(self, width, height):
"""Make maze to be solved and add border to maze.
Args:
width (int) of maze
height (int) of maze
"""
# create maze
self.maze = _maze(width, height)
self.maze.create_perfect()
# define maze variables
self.entrance = self.maze.entrance
self.exit = self.maze.exit
# add index to maze
self.maze = {
row_i: {item_i: item for item_i, item in enumerate(row)}
for row_i, row in enumerate(self.maze)
}
def _create_graph(self):
"""Remove unnecessary states from maze and convert maze to graph to be
solved."""
self.graph = {}
# convert to graph
for column in self.maze.keys():
for row in self.maze[column].keys():
item = self.maze[column][row]
if item != 1:
neighbours = []
try:
if self.maze[column][row - 1] != 1:
neighbours.append(["left", (column, row - 1)])
except KeyError:
None
try:
if self.maze[column][row + 1] != 1:
neighbours.append(["right", (column, row + 1)])
except KeyError:
None
try:
if self.maze[column - 1][row] != 1:
neighbours.append(["above", (column - 1, row)])
except KeyError:
None
try:
if self.maze[column + 1][row] != 1:
neighbours.append(["below", (column + 1, row)])
except KeyError:
None
self.graph[(column, row)] = {x[:][1]: 1 for x in neighbours}
# TODO: remove unnecessary states
def _maze_maker(file_name):
def real_decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
data = []
for row_i, row in enumerate(list(self.maze)):
for item_i, item in enumerate(self.maze[row].values()):
func(self, data, item, row_i=row_i, item_i=item_i)
# save maze
image = Image.new("RGB", (self.width, self.height))
image.putdata(data)
image.save(file_name)
return wrapper
return real_decorator
@_maze_maker("maze.png")
def save(self, data, item, row_i=None, item_i=None):
"""Save maze locally as an image."""
# invert maze because maze is incorrect
if item:
data.append(self.WHITE)
else:
data.append(self.BLACK)
def solve(self):
""" Solve maze using specified algorithm.
Returns:
shortest path as a queue from start to finish of maze
"""
if self.algorithm == "astar":
algorithm = Astar()
elif self.algorithm == "dijkstra":
algorithm = Dijkstra()
else:
raise AlgorithmNotFound(
f"Invalid algorithm: {self.algorithm}. See help({type(self).__name__}) for available algorithms."
)
# add nodes to graph
for node in self.graph:
algorithm.add_node(node, self.graph[node])
# pydaedalus stores y then x value which need to be reversed
self.entrance = tuple(reversed(self.entrance))
self.exit = tuple(reversed(self.exit))
self.path = algorithm.shortest_path(self.entrance, self.exit)
@_maze_maker("solution.png")
def save_solution(self, data, item, row_i=None, item_i=None):
"""Save maze image and the shortest path."""
if not hasattr(self, "path"):
raise MazeNotSolved(
f"Maze must be solved to save solution. Run {type(self).__name__}.solve() first."
)
if (row_i, item_i) in self.path:
data.append(self.RED)
elif item:
data.append(self.WHITE)
else:
data.append(self.BLACK)
def __str__(self):
"""Just cause it looks nice."""
string = []
for row in self.maze:
string.append(["█" if item else " " for item in self.maze[row].values()])
return "\n".join(["".join(line) for line in string])
def __repr__(self):
"""Easier on the eyes."""
return f"Maze(algorithm='{self.algorithm}', width={self.width}, height={self.height})"
| 32.932584 | 114 | 0.525589 | 5,601 | 0.95515 | 0 | 0 | 1,257 | 0.214359 | 0 | 0 | 1,608 | 0.274216 |
c72eaa2b73efe739c3a50690c7c96660b59023bd | 4,215 | py | Python | config.py | FarbodFarhangfar/midi_player_python | 924cd164b7867d294c761a70d06ab330fa1b8373 | [
"MIT"
]
| null | null | null | config.py | FarbodFarhangfar/midi_player_python | 924cd164b7867d294c761a70d06ab330fa1b8373 | [
"MIT"
]
| null | null | null | config.py | FarbodFarhangfar/midi_player_python | 924cd164b7867d294c761a70d06ab330fa1b8373 | [
"MIT"
]
| null | null | null | import os
def get_note_dic():
_note_dic = {'C': 0, 'C#': 1, 'Db': 1, 'D': 2, 'D#': 3, 'Eb': 3, 'E': 4, 'F': 5, 'F#': 6,
'Gb': 6, 'G': 7,
'G#': 8, 'Ab': 8, 'A': 9, 'A#': 10, 'Bb': 10, 'B': 11}
return _note_dic
def get_value_list():
values = {"16": 16, "8": 8, "4": 4, "2": 2, "1": 1, "0.5": 0.5, "1/2": 0.5, "0.25": 0.25, "1/4": 0.25,
"0.125": 0.125, "1/8": 0.125, "0.0625": 0.0625, "1/16": 0.0625,
"0.03125": 0.03125, "1/32": 0.03125}
return values
def instruments(inst):
instruments_dict = {
# Piano
'Acoustic Grand Piano': '1', 'Bright Acoustic Piano': '2', 'Electric Grand Piano': '3',
'Honky-tonk Piano': '4',
'Electric Piano 1': '5', 'Electric Piano 2': '6', 'Harpsichord': '7', 'Clavi': '8',
# Chromatic Percussion
'Celesta': '9',
'Glockenspiel': '10', 'Music Box': '11', 'Vibraphone': '12', 'Marimba': '13', 'Xylophone': '14',
'Tubular Bells': '15', 'Dulcimer': '16',
# Organ
'Drawbar Organ': '17', 'Percussive Organ': '18',
'Rock Organ': '19',
'Church Organ': '20', 'Reed Organ': '21', 'Accordion': '22', 'Harmonica': '23',
'Tango Accordion': '24',
# Guitar
'Acoustic Guitar (nylon)': '25', 'Acoustic Guitar (steel)': '26',
'Electric Guitar (jazz)': '27',
'Electric Guitar (clean)': '28', 'Electric Guitar (muted)': '29', 'Overdriven Guitar': '30',
'Distortion Guitar': '31', 'Guitar Harmonics': '32',
# Bass
'Acoustic Bass': '33',
'Electric Bass (finger)': '34',
'Electric Bass (pick)': '35', 'Fretless Bass': '36', 'Slap Bass 1': '37', 'Slap Bass 2': '38',
'Synth Bass 1': '39', 'Synth Bass 2': '40',
# Strings
'Violin': '41', 'Viola': '42', 'Cello': '43',
'Contrabass': '44',
'Tremolo Strings': '45', 'Pizzicato Strings': '46', 'Orchestral Harp': '47', 'Timpani': '48',
# Ensemble
'String Ensemble 1': '49', 'String Ensemble 2': '50', 'Synth Strings 1': '51',
'Synth Strings 2': '52',
'Choir Aahs': '53', 'Voice Oohs': '54', 'Synth Choir': '55', 'Orchestra Hit': '56',
# Brass
'Trumpet': '57',
'Trombone': '58', 'Tuba': '59', 'Muted Trumpet': '60', 'French Horn': '61',
'Brass Section': '62',
'Synth Brass 1': '63', 'Synth Brass 2': '64',
# Reed
'Soprano Sax': '65', 'Alto Sax': '66',
'Tenor Sax': '67',
'Baritone Sax': '68', 'Oboe': '69', 'English Horn': '70', 'Bassoon': '71', 'Clarinet': '72',
# Pipe
'Piccolo': '73',
'Flute': '74', 'Recorder': '75', 'Pan Flute': '76', 'Blown bottle': '77', 'Shakuhachi': '78',
'Whistle': '79',
'Ocarina': '80',
# Synth Lead
'Lead 1 (square)': '81', 'Lead 2 (sawtooth)': '82', 'Lead 3 (calliope)': '83',
'Lead 4 (chiff)': '84', 'Lead 5 (charang)': '85', 'Lead 6 (voice)': '86',
'Lead 7 (fifths)': '87',
'Lead 8 (bass + lead)': '88',
# Synth Pad
'Pad 1 (new age)': '89', 'Pad 2 (warm)': '90',
'Pad 3 (polysynth)': '91',
'Pad 4 (choir)': '92', 'Pad 5 (bowed)': '93', 'Pad 6 (metallic)': '94', 'Pad 7 (halo)': '95',
'Pad 8 (sweep)': '96',
# Synth Effects
'FX 1 (rain)': '97', 'FX 2 (soundtrack)': '98', 'FX 3 (crystal)': '99',
'FX 4 (atmosphere)': '100', 'FX 5 (brightness)': '101', 'FX 6 (goblins)': '102',
'FX 7 (echoes)': '103',
'FX 8 (sci-fi)': '104',
# Ethnic
'Sitar': '105', 'Banjo': '106', 'Shamisen': '107', 'Koto': '108',
'Kalimba': '109',
'Bagpipe': '110', 'Fiddle': '111', 'Shanai': '112',
# Percussive
'Tinkle Bell': '113', 'Agogo': '114',
'Steel Drums': '115',
'Woodblock': '116', 'Taiko Drum': '117', 'Melodic Tom': '118', 'Synth Drum': '119',
'Reverse Cymbal': '120',
# Sound effects
'Guitar Fret Noise': '121', 'Breath Noise': '122', 'Seashore': '123', 'Bird Tweet': '124',
'Telephone Ring': '125',
'Helicopter': '126', 'Applause': '127'}
return instruments_dict
| 38.669725 | 106 | 0.474496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,622 | 0.622064 |
c72f4c5b309a87813b09f64b422ca7519b3e740b | 2,182 | py | Python | roles/openshift_health_checker/library/ocutil.py | shgriffi/openshift-ansible | 6313f519307cf50055589c3876d8bec398bbc4d4 | [
"Apache-2.0"
]
| 164 | 2015-07-29T17:35:04.000Z | 2021-12-16T16:38:04.000Z | roles/openshift_health_checker/library/ocutil.py | shgriffi/openshift-ansible | 6313f519307cf50055589c3876d8bec398bbc4d4 | [
"Apache-2.0"
]
| 3,634 | 2015-06-09T13:49:15.000Z | 2022-03-23T20:55:44.000Z | roles/openshift_health_checker/library/ocutil.py | shgriffi/openshift-ansible | 6313f519307cf50055589c3876d8bec398bbc4d4 | [
"Apache-2.0"
]
| 250 | 2015-06-08T19:53:11.000Z | 2022-03-01T04:51:23.000Z | #!/usr/bin/python
"""Interface to OpenShift oc command"""
import os
import shlex
import shutil
import subprocess
from ansible.module_utils.basic import AnsibleModule
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
"""Find and return oc binary file"""
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
def main():
"""Module that executes commands on a remote OpenShift cluster"""
module = AnsibleModule(
argument_spec=dict(
namespace=dict(type="str", required=False),
config_file=dict(type="str", required=True),
cmd=dict(type="str", required=True),
extra_args=dict(type="list", default=[]),
),
)
cmd = [locate_oc_binary(), '--config', module.params["config_file"]]
if module.params["namespace"]:
cmd += ['-n', module.params["namespace"]]
cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"]
failed = True
try:
cmd_result = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT)
failed = False
except subprocess.CalledProcessError as exc:
cmd_result = '[rc {}] {}\n{}'.format(exc.returncode, ' '.join(exc.cmd), exc.output)
except OSError as exc:
# we get this when 'oc' is not there
cmd_result = str(exc)
module.exit_json(
changed=False,
failed=failed,
result=cmd_result,
)
if __name__ == '__main__':
main()
| 29.486486 | 91 | 0.636114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.263978 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.