ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4d6d88a774b9ca67d63d15799ff8c1c2fef85b | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Import ArcPy site-package and os modules
#
import arcpy
import os
import sys, traceback
import time
from datetime import datetime
from datetime import date
## ======================================================
## Read in parameters
## ======================================================
featClass = arcpy.GetParameterAsText(0)
## ======================================================
## Read geoname file and insert feature
## ======================================================
try:
arcpy.AddMessage("Checking input feature class " + featClass + "...")
hasError = 0
# ======================================================
# Get shape type and check if point
# ======================================================
desc = arcpy.Describe(featClass)
arcpy.AddMessage("Geometry Check: Make sure input feature class geometry is point...")
if desc.shapeType.upper() != "POINT":
arcpy.AddError("Error: Input feature class does not have geometry type of point")
hasError = hasError + 1
# ======================================================
# Get list of fields and check if required fields exists
# ======================================================
fields = arcpy.ListFields(featClass)
arcpy.AddMessage("Field Check: Make sure input feature class has correct geonames fields...")
geonameFields = ["RC", "UFI", "UNI", "LAT", "LONG", "DMS_LAT", "DMS_LONG", "MGRS", "JOG", "FC", \
"DSG", "PC", "CC1", "ADM1", "POP", "ELEV", "CC2", "NT", "LC", "SHORT_FORM", \
"GENERIC", "SORT_NAME_RO", "FULL_NAME_RO", "FULL_NAME_ND_RO", "SORT_NAME_RG", \
"FULL_NAME_RG", "FULL_NAME_ND_RG", "NOTE", "MODIFY_DATE", "COUNTRYCODE1", \
"COUNTRYNAME1", "ADM1CODE", "ADM1NAMEALL", "ADM1NAME", "ADM1CLASSALL", \
"ADM1CLASS", "PLACENAME", "DSGNAME", "USER_FLD", \
"DISPLAY", "NAME_RANK", "NAME_LINK", "TRANSL_CD", "NM_MODIFY_DATE", \
"POINT_X", "POINT_Y", "F_EFCTV_DT", "F_TERM_DT"]
numMissing = 0
for geonameField in geonameFields:
found = 0
for field in fields:
if geonameField.upper() == field.name.upper():
found = 1
break
if found == 0:
numMissing = numMissing + 1
arcpy.AddError("Error: Input feature class is missing field: " + geonameField)
if numMissing > 0:
hasError = hasError + 1
# ======================================================
# Check if input has any features
# ======================================================
if sys.version_info[0] > 2:
numCount = int(arcpy.GetCount_management(featClass).getOutput(0))
else:
numCount = long(arcpy.GetCount_management(featClass).getOutput(0))
arcpy.AddMessage("Feature Count Check: Make sure input feature class does not have any features...")
if numCount > 0:
arcpy.AddError("Error: Input feature class has " + str(numCount) + " features.")
hasError = hasError + 1
# ======================================================
# Check if input coordinate system is WGS1984
# ======================================================
SR = desc.spatialReference
arcpy.AddMessage("Spatial Reference Check: Make sure input feature class is 'GCS_WGS_1984'...")
if SR.name.upper() != "GCS_WGS_1984":
arcpy.AddError("Error: Spatial Reference is " + SR.name)
hasError = hasError + 1
if hasError > 0:
result = "FALSE"
else:
result = "TRUE"
# Set Output parameter (required so that script
# tool output can be connected to other model tools)
arcpy.SetParameter(1, result)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
|
py | 1a4d6dc32db6c5de12dda31910696ee748fce8fa | class Solution:
def decode(self, encoded: List[int], first: int) -> List[int]: return [first] + [first:=first^i for i in encoded]
|
py | 1a4d709be8d2ecf9ca8bcb5304d46db8d4d6618f | print('A frese é palíndromo?')
frase = str(input('Escreva uma frase sem pontuação e acentos: ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto) - 1, -1, -1):
inverso += junto[letra]
print('O inverso de {} é {}'.format(junto, inverso))
if junto == inverso:
print('A frase é PALÍNDROMO')
else:
print('A frase NÃO É PALÍNDROMO')
|
py | 1a4d716825eb76db8da4220528de0132a17e0c4e | # Generated by Django 3.0.7 on 2020-06-26 11:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_post_featured'),
]
operations = [
migrations.AddField(
model_name='author',
name='profile_picture',
field=models.ImageField(blank=True, upload_to=''),
),
]
|
py | 1a4d719dfff2de86d93c0e5f8e4d14c15f6966e3 | # -*- coding: utf-8 -*-
"""This is a generated class and is not intended for modification!
TODO: Point to Github contribution instructions
"""
from datetime import datetime
from infobip.util.models import DefaultObject, serializable
from infobip.api.model.sms.nc.lookup.Network import Network
from infobip.api.model.sms.Error import Error
from infobip.api.model.sms.Status import Status
class NCResponseDetails(DefaultObject):
@property
@serializable(name="ported", type=bool)
def ported(self):
return self.get_field_value("ported")
@ported.setter
def ported(self, ported):
self.set_field_value("ported", ported)
def set_ported(self, ported):
self.ported = ported
return self
@property
@serializable(name="roaming", type=bool)
def roaming(self):
return self.get_field_value("roaming")
@roaming.setter
def roaming(self, roaming):
self.set_field_value("roaming", roaming)
def set_roaming(self, roaming):
self.roaming = roaming
return self
@property
@serializable(name="mccMnc", type=unicode)
def mcc_mnc(self):
return self.get_field_value("mcc_mnc")
@mcc_mnc.setter
def mcc_mnc(self, mcc_mnc):
self.set_field_value("mcc_mnc", mcc_mnc)
def set_mcc_mnc(self, mcc_mnc):
self.mcc_mnc = mcc_mnc
return self
@property
@serializable(name="roamingNetwork", type=Network)
def roaming_network(self):
return self.get_field_value("roaming_network")
@roaming_network.setter
def roaming_network(self, roaming_network):
self.set_field_value("roaming_network", roaming_network)
def set_roaming_network(self, roaming_network):
self.roaming_network = roaming_network
return self
@property
@serializable(name="portedNetwork", type=Network)
def ported_network(self):
return self.get_field_value("ported_network")
@ported_network.setter
def ported_network(self, ported_network):
self.set_field_value("ported_network", ported_network)
def set_ported_network(self, ported_network):
self.ported_network = ported_network
return self
@property
@serializable(name="to", type=unicode)
def to(self):
return self.get_field_value("to")
@to.setter
def to(self, to):
self.set_field_value("to", to)
def set_to(self, to):
self.to = to
return self
@property
@serializable(name="imsi", type=unicode)
def imsi(self):
return self.get_field_value("imsi")
@imsi.setter
def imsi(self, imsi):
self.set_field_value("imsi", imsi)
def set_imsi(self, imsi):
self.imsi = imsi
return self
@property
@serializable(name="servingMSC", type=unicode)
def serving_m_s_c(self):
return self.get_field_value("serving_m_s_c")
@serving_m_s_c.setter
def serving_m_s_c(self, serving_m_s_c):
self.set_field_value("serving_m_s_c", serving_m_s_c)
def set_serving_m_s_c(self, serving_m_s_c):
self.serving_m_s_c = serving_m_s_c
return self
@property
@serializable(name="error", type=Error)
def error(self):
return self.get_field_value("error")
@error.setter
def error(self, error):
self.set_field_value("error", error)
def set_error(self, error):
self.error = error
return self
@property
@serializable(name="originalNetwork", type=Network)
def original_network(self):
return self.get_field_value("original_network")
@original_network.setter
def original_network(self, original_network):
self.set_field_value("original_network", original_network)
def set_original_network(self, original_network):
self.original_network = original_network
return self
@property
@serializable(name="status", type=Status)
def status(self):
return self.get_field_value("status")
@status.setter
def status(self, status):
self.set_field_value("status", status)
def set_status(self, status):
self.status = status
return self |
py | 1a4d71d2f9118dc666f79551649a96142da8a5f2 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainmenu.ui',
# licensing of 'mainmenu.ui' applies.
#
# Created: Mon Jun 24 09:49:11 2019
# by: pyside2-uic running on PySide2 5.12.4
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_MainMenu(object):
def setupUi(self, MainMenu):
MainMenu.setObjectName("MainMenu")
MainMenu.resize(800, 600)
self.gridLayout = QtWidgets.QGridLayout(MainMenu)
self.gridLayout.setObjectName("gridLayout")
self.cancelButton = QtWidgets.QPushButton(MainMenu)
font = QtGui.QFont()
font.setPointSize(16)
self.cancelButton.setFont(font)
self.cancelButton.setObjectName("cancelButton")
self.gridLayout.addWidget(self.cancelButton, 5, 3, 1, 1)
self.selectButton = QtWidgets.QPushButton(MainMenu)
font = QtGui.QFont()
font.setPointSize(16)
self.selectButton.setFont(font)
self.selectButton.setObjectName("selectButton")
self.gridLayout.addWidget(self.selectButton, 5, 1, 1, 1)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 5, 2, 1, 1)
self.titleLable = QtWidgets.QLabel(MainMenu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.titleLable.sizePolicy().hasHeightForWidth())
self.titleLable.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(20)
self.titleLable.setFont(font)
self.titleLable.setTextFormat(QtCore.Qt.AutoText)
self.titleLable.setAlignment(QtCore.Qt.AlignCenter)
self.titleLable.setObjectName("titleLable")
self.gridLayout.addWidget(self.titleLable, 1, 1, 1, 3)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem1, 6, 1, 1, 3)
spacerItem2 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem2, 2, 1, 1, 3)
spacerItem3 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem3, 0, 1, 1, 3)
spacerItem4 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem4, 4, 1, 1, 3)
spacerItem5 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem5, 1, 0, 5, 1)
spacerItem6 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem6, 1, 4, 5, 1)
self.taskList = QtWidgets.QListView(MainMenu)
font = QtGui.QFont()
font.setPointSize(12)
self.taskList.setFont(font)
self.taskList.setObjectName("taskList")
self.gridLayout.addWidget(self.taskList, 3, 1, 1, 3)
self.retranslateUi(MainMenu)
QtCore.QMetaObject.connectSlotsByName(MainMenu)
def retranslateUi(self, MainMenu):
MainMenu.setWindowTitle(QtWidgets.QApplication.translate("MainMenu", "BehaviorTaskMasterMenu", None, -1))
self.cancelButton.setText(QtWidgets.QApplication.translate("MainMenu", "Cancel", None, -1))
self.selectButton.setText(QtWidgets.QApplication.translate("MainMenu", "Select", None, -1))
self.titleLable.setText(QtWidgets.QApplication.translate("MainMenu", "Task Selection", None, -1))
|
py | 1a4d72a4da1f53c1e1e4d6ca40390010398ff404 | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import string
import re
import threading
from newrelic.packages import requests
from newrelic.core.internal_metrics import internal_count_metric
try:
import queue
except ImportError:
import Queue as queue
_logger = logging.getLogger(__name__)
VALID_CHARS_RE = re.compile(r'[0-9a-zA-Z_ ./-]')
class CommonUtilization(object):
METADATA_URL = ''
HEADERS = None
EXPECTED_KEYS = ()
VENDOR_NAME = ''
TIMEOUT = 0.4
@classmethod
def record_error(cls, resource, data):
# As per spec
internal_count_metric(
'Supportability/utilization/%s/error' % cls.VENDOR_NAME, 1)
_logger.warning('Invalid %r data (%r): %r',
cls.VENDOR_NAME, resource, data)
@classmethod
def _fetch(cls, q):
# Create own requests session and disable all environment variables,
# so that we can bypass any proxy set via env var for this request.
session = requests.Session()
session.trust_env = False
try:
resp = session.get(
cls.METADATA_URL,
timeout=cls.TIMEOUT,
headers=cls.HEADERS)
resp.raise_for_status()
except Exception as e:
resp = None
_logger.debug('Unable to fetch %s data from %r: %r',
cls.VENDOR_NAME, cls.METADATA_URL, e)
q.put(resp)
@classmethod
def fetch(cls):
q = queue.Queue()
t = threading.Thread(
target=cls._fetch,
name="UtilizationDetect/{}".format(cls.VENDOR_NAME),
args=(q,),
)
t.daemon = True
t.start()
try:
return q.get(timeout=cls.TIMEOUT + 0.1)
except queue.Empty:
_logger.debug('Timeout waiting to fetch %s data from %r',
cls.VENDOR_NAME, cls.METADATA_URL)
@classmethod
def get_values(cls, response):
if response is None:
return
try:
j = response.json()
except ValueError:
_logger.debug('Invalid %s data (%r): %r',
cls.VENDOR_NAME, cls.METADATA_URL, response.text)
return
return j
@classmethod
def valid_chars(cls, data):
if data is None:
return False
for c in data:
if not VALID_CHARS_RE.match(c) and ord(c) < 0x80:
return False
return True
@classmethod
def valid_length(cls, data):
if data is None:
return False
b = data.encode('utf-8')
valid = len(b) <= 255
if valid:
return True
return False
@classmethod
def normalize(cls, key, data):
if data is None:
return
try:
stripped = data.strip()
if (stripped and cls.valid_length(stripped) and
cls.valid_chars(stripped)):
return stripped
except:
pass
@classmethod
def sanitize(cls, values):
if values is None:
return
out = {}
for key in cls.EXPECTED_KEYS:
metadata = values.get(key, None)
if not metadata:
cls.record_error(key, metadata)
return
normalized = cls.normalize(key, metadata)
if not normalized:
cls.record_error(key, metadata)
return
out[key] = normalized
return out
@classmethod
def detect(cls):
response = cls.fetch()
values = cls.get_values(response)
return cls.sanitize(values)
class AWSUtilization(CommonUtilization):
EXPECTED_KEYS = ('availabilityZone', 'instanceId', 'instanceType')
METADATA_URL = '%s/2016-09-02/dynamic/instance-identity/document' % (
'http://169.254.169.254'
)
VENDOR_NAME = 'aws'
class AzureUtilization(CommonUtilization):
METADATA_URL = ('http://169.254.169.254'
'/metadata/instance/compute?api-version=2017-03-01')
EXPECTED_KEYS = ('location', 'name', 'vmId', 'vmSize')
HEADERS = {'Metadata': 'true'}
VENDOR_NAME = 'azure'
class GCPUtilization(CommonUtilization):
EXPECTED_KEYS = ('id', 'machineType', 'name', 'zone')
HEADERS = {'Metadata-Flavor': 'Google'}
METADATA_URL = 'http://%s/computeMetadata/v1/instance/?recursive=true' % (
'metadata.google.internal')
VENDOR_NAME = 'gcp'
@classmethod
def normalize(cls, key, data):
if data is None:
return
if key in ('machineType', 'zone'):
formatted = data.strip().split('/')[-1]
elif key == 'id':
formatted = str(data)
else:
formatted = data
return super(GCPUtilization, cls).normalize(key, formatted)
class PCFUtilization(CommonUtilization):
EXPECTED_KEYS = ('cf_instance_guid', 'cf_instance_ip', 'memory_limit')
VENDOR_NAME = 'pcf'
@staticmethod
def fetch():
cf_instance_guid = os.environ.get('CF_INSTANCE_GUID')
cf_instance_ip = os.environ.get('CF_INSTANCE_IP')
memory_limit = os.environ.get('MEMORY_LIMIT')
pcf_vars = (cf_instance_guid, cf_instance_ip, memory_limit)
if all(pcf_vars):
return pcf_vars
@classmethod
def get_values(cls, response):
if response is None or len(response) != 3:
return
values = {}
for k, v in zip(cls.EXPECTED_KEYS, response):
if hasattr(v, 'decode'):
v = v.decode('utf-8')
values[k] = v
return values
class DockerUtilization(CommonUtilization):
VENDOR_NAME = 'docker'
EXPECTED_KEYS = ('id',)
METADATA_FILE = '/proc/self/cgroup'
DOCKER_RE = re.compile(r'([0-9a-f]{64,})')
@classmethod
def fetch(cls):
try:
with open(cls.METADATA_FILE, 'rb') as f:
for line in f:
stripped = line.decode('utf-8').strip()
cgroup = stripped.split(':')
if len(cgroup) != 3:
continue
subsystems = cgroup[1].split(',')
if 'cpu' in subsystems:
return cgroup[2]
except:
# There are all sorts of exceptions that can occur here
# (i.e. permissions, non-existent file, etc)
pass
@classmethod
def get_values(cls, contents):
if contents is None:
return
value = contents.split('/')[-1]
match = cls.DOCKER_RE.search(value)
if match:
value = match.group(0)
return {'id': value}
@classmethod
def valid_chars(cls, data):
if data is None:
return False
hex_digits = set(string.hexdigits)
valid = all((c in hex_digits for c in data))
if valid:
return True
return False
@classmethod
def valid_length(cls, data):
if data is None:
return False
# Must be exactly 64 characters
valid = len(data) == 64
if valid:
return True
return False
class KubernetesUtilization(CommonUtilization):
EXPECTED_KEYS = ('kubernetes_service_host', )
VENDOR_NAME = 'kubernetes'
@staticmethod
def fetch():
kubernetes_service_host = os.environ.get('KUBERNETES_SERVICE_HOST')
if kubernetes_service_host:
return kubernetes_service_host
@classmethod
def get_values(cls, v):
if v is None:
return
if hasattr(v, 'decode'):
v = v.decode('utf-8')
return {'kubernetes_service_host': v}
|
py | 1a4d73857ab609a68cd92153069bf078cf909bf3 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
import coverage
import json
from mock import DEFAULT
from mock import MagicMock
import os
from os import path, sys
import platform
import shutil
import subprocess
import tempfile
import unittest
# Requires python-coverage and python-mock. Native python coverage
# version >= 3.7.1 should be installed to get the best speed.
TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
V8_JSON = {
"path": ["."],
"binary": "d7",
"flags": ["--flag"],
"main": "run.js",
"run_count": 1,
"results_regexp": "^%s: (.+)$",
"tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
]
}
V8_NESTED_SUITES_JSON = {
"path": ["."],
"flags": ["--flag"],
"run_count": 1,
"units": "score",
"tests": [
{"name": "Richards",
"path": ["richards"],
"binary": "d7",
"main": "run.js",
"resources": ["file1.js", "file2.js"],
"run_count": 2,
"results_regexp": "^Richards: (.+)$"},
{"name": "Sub",
"path": ["sub"],
"tests": [
{"name": "Leaf",
"path": ["leaf"],
"run_count_x64": 3,
"units": "ms",
"main": "run.js",
"results_regexp": "^Simple: (.+) ms.$"},
]
},
{"name": "DeltaBlue",
"path": ["delta_blue"],
"main": "run.js",
"flags": ["--flag2"],
"results_regexp": "^DeltaBlue: (.+)$"},
{"name": "ShouldntRun",
"path": ["."],
"archs": ["arm"],
"main": "run.js"},
]
}
V8_GENERIC_JSON = {
"path": ["."],
"binary": "cc",
"flags": ["--flag"],
"generic": True,
"run_count": 1,
"units": "ms",
}
Output = namedtuple("Output", "stdout, stderr, timed_out")
class PerfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.base = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(cls.base)
cls._cov = coverage.coverage(
include=([os.path.join(cls.base, "run_perf.py")]))
cls._cov.start()
import run_perf
from testrunner.local import commands
global commands
global run_perf
@classmethod
def tearDownClass(cls):
cls._cov.stop()
print ""
print cls._cov.report()
def setUp(self):
self.maxDiff = None
if path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
os.makedirs(TEST_WORKSPACE)
def tearDown(self):
if path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
def _WriteTestInput(self, json_content):
self._test_input = path.join(TEST_WORKSPACE, "test.json")
with open(self._test_input, "w") as f:
f.write(json.dumps(json_content))
def _MockCommand(self, *args, **kwargs):
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
stderr=None,
timed_out=kwargs.get("timed_out", False))
for arg in args[1]]
def execute(*args, **kwargs):
return test_outputs.pop()
commands.Execute = MagicMock(side_effect=execute)
# Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
self.assertEquals(dirs.pop(), args[0])
os.chdir = MagicMock(side_effect=chdir)
subprocess.check_call = MagicMock()
platform.system = MagicMock(return_value='Linux')
def _CallMain(self, *args):
self._test_output = path.join(TEST_WORKSPACE, "results.json")
all_args=[
"--json-test-results",
self._test_output,
self._test_input,
]
all_args += args
return run_perf.Main(all_args)
def _LoadResults(self, file_name=None):
with open(file_name or self._test_output) as f:
return json.load(f)
def _VerifyResults(self, suite, units, traces, file_name=None):
self.assertEquals([
{"units": units,
"graphs": [suite, trace["name"]],
"results": trace["results"],
"stddev": trace["stddev"]} for trace in traces],
self._LoadResults(file_name)["traces"])
def _VerifyErrors(self, errors):
self.assertEquals(errors, self._LoadResults()["errors"])
def _VerifyMock(self, binary, *args, **kwargs):
arg = [path.join(path.dirname(self.base), binary)]
arg += args
commands.Execute.assert_called_with(
arg, timeout=kwargs.get("timeout", 60))
def _VerifyMockMultiple(self, *args, **kwargs):
expected = []
for arg in args:
a = [path.join(path.dirname(self.base), arg[0])]
a += arg[1:]
expected.append(((a,), {"timeout": kwargs.get("timeout", 60)}))
self.assertEquals(expected, commands.Execute.call_args_list)
def testOneRun(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testOneRunWithTestFlags(self):
test_input = dict(V8_JSON)
test_input["test_flags"] = ["2", "test_name"]
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js",
"--", "2", "test_name")
def testTwoRuns_Units_SuiteName(self):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
test_input["name"] = "v8"
test_input["units"] = "ms"
self._WriteTestInput(test_input)
self._MockCommand([".", "."],
["Richards: 100\nDeltaBlue: 200\n",
"Richards: 50\nDeltaBlue: 300\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("v8", "ms", [
{"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testTwoRuns_SubRegexp(self):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
del test_input["results_regexp"]
test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
self._WriteTestInput(test_input)
self._MockCommand([".", "."],
["Richards: 100\nDeltaBlue: 200\n",
"Richards: 50\nDeltaBlue: 300\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testNestedSuite(self):
self._WriteTestInput(V8_NESTED_SUITES_JSON)
self._MockCommand(["delta_blue", "sub/leaf", "richards"],
["DeltaBlue: 200\n",
"Simple: 1 ms.\n",
"Simple: 2 ms.\n",
"Simple: 3 ms.\n",
"Richards: 100\n",
"Richards: 50\n"])
self.assertEquals(0, self._CallMain())
self.assertEquals([
{"units": "score",
"graphs": ["test", "Richards"],
"results": ["50.0", "100.0"],
"stddev": ""},
{"units": "ms",
"graphs": ["test", "Sub", "Leaf"],
"results": ["3.0", "2.0", "1.0"],
"stddev": ""},
{"units": "score",
"graphs": ["test", "DeltaBlue"],
"results": ["200.0"],
"stddev": ""},
], self._LoadResults()["traces"])
self._VerifyErrors([])
self._VerifyMockMultiple(
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
def testOneRunStdDevRegExp(self):
test_input = dict(V8_JSON)
test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n"
"DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": "106"},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testTwoRunsStdDevRegExp(self):
test_input = dict(V8_JSON)
test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
test_input["run_count"] = 2
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
"DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
"Richards: 2\nRichards-stddev: 0.5\n"
"DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["2.0", "3.0"], "stddev": "0.7"},
{"name": "DeltaBlue", "results": ["5.0", "6.0"], "stddev": "0.8"},
])
self._VerifyErrors(
["Test Richards should only run once since a stddev is provided "
"by the test.",
"Test DeltaBlue should only run once since a stddev is provided "
"by the test.",
"Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
"DeltaBlue."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testBuildbot(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
self.assertEquals(0, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
def testBuildbotWithTotal(self):
test_input = dict(V8_JSON)
test_input["total"] = True
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
self.assertEquals(0, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
{"name": "Total", "results": ["3626.49109719"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
def testBuildbotWithTotalAndErrors(self):
test_input = dict(V8_JSON)
test_input["total"] = True
self._WriteTestInput(test_input)
self._MockCommand(["."], ["x\nRichards: bla\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(1, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
self._VerifyErrors(
["Regexp \"^Richards: (.+)$\" "
"returned a non-numeric for test Richards.",
"Not all traces have the same number of results."])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
def testRegexpNoMatch(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
self._VerifyErrors(
["Regexp \"^Richards: (.+)$\" didn't match for test Richards."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testOneRunGeneric(self):
test_input = dict(V8_GENERIC_JSON)
self._WriteTestInput(test_input)
self._MockCommand(["."], [
"RESULT Infra: Constant1= 11 count\n"
"RESULT Infra: Constant2= [10,5,10,15] count\n"
"RESULT Infra: Constant3= {12,1.2} count\n"
"RESULT Infra: Constant4= [10,5,error,15] count\n"])
self.assertEquals(1, self._CallMain())
self.assertEquals([
{"units": "count",
"graphs": ["test", "Infra", "Constant1"],
"results": ["11.0"],
"stddev": ""},
{"units": "count",
"graphs": ["test", "Infra", "Constant2"],
"results": ["10.0", "5.0", "10.0", "15.0"],
"stddev": ""},
{"units": "count",
"graphs": ["test", "Infra", "Constant3"],
"results": ["12.0"],
"stddev": "1.2"},
{"units": "count",
"graphs": ["test", "Infra", "Constant4"],
"results": [],
"stddev": ""},
], self._LoadResults()["traces"])
self._VerifyErrors(["Found non-numeric in test/Infra/Constant4"])
self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
def testOneRunTimingOut(self):
test_input = dict(V8_JSON)
test_input["timeout"] = 70
self._WriteTestInput(test_input)
self._MockCommand(["."], [""], timed_out=True)
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": [], "stddev": ""},
])
self._VerifyErrors([
"Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
"Regexp \"^DeltaBlue: (.+)$\" didn't match for test DeltaBlue.",
])
self._VerifyMock(
path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
# Simple test that mocks out the android platform. Testing the platform would
# require lots of complicated mocks for the android tools.
def testAndroid(self):
self._WriteTestInput(V8_JSON)
# FIXME(machenbach): This is not test-local!
platform = run_perf.AndroidPlatform
platform.PreExecution = MagicMock(return_value=None)
platform.PostExecution = MagicMock(return_value=None)
platform.PreTests = MagicMock(return_value=None)
platform.Run = MagicMock(
return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
run_perf.AndroidPlatform = MagicMock(return_value=platform)
self.assertEquals(
0, self._CallMain("--android-build-tools", "/some/dir",
"--arch", "arm"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
def testTwoRuns_Trybot(self):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
self._WriteTestInput(test_input)
self._MockCommand([".", ".", ".", "."],
["Richards: 100\nDeltaBlue: 200\n",
"Richards: 200\nDeltaBlue: 20\n",
"Richards: 50\nDeltaBlue: 200\n",
"Richards: 100\nDeltaBlue: 20\n"])
test_output_no_patch = path.join(TEST_WORKSPACE, "results_no_patch.json")
self.assertEquals(0, self._CallMain(
"--outdir-no-patch", "out-no-patch",
"--json-test-results-no-patch", test_output_no_patch,
))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["100.0", "200.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["20.0", "20.0"], "stddev": ""},
])
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["200.0", "200.0"], "stddev": ""},
], test_output_no_patch)
self._VerifyErrors([])
self._VerifyMockMultiple(
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
)
def testWrongBinaryWithProf(self):
test_input = dict(V8_JSON)
self._WriteTestInput(test_input)
self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(0, self._CallMain("--extra-flags=--prof"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"),
"--flag", "--prof", "run.js")
def testUnzip(self):
def Gen():
for i in [1, 2, 3]:
yield i, i + 1
l, r = run_perf.Unzip(Gen())
self.assertEquals([1, 2, 3], list(l()))
self.assertEquals([2, 3, 4], list(r()))
|
py | 1a4d73dacfb39d5783f24b4c1808eca1d7dac06f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" Implements routines for converting FPGA interchange capnp files to models.
The models are implemented in python in fpga_interchange.logical_netlist and
fpga_interchange.physical_netlist.
LogicalNetlistBuilder - Internal helper class for constructing logical netlist
format. Recommend use is to first construct logical
netlist using classes from logical_netlist module
and calling Interchange.output_logical_netlist.
output_logical_netlist - Implements conversion of classes from logical_netlist
module to FPGA interchange logical netlist format.
This function requires LogicalNetlist schema loaded,
recommend to use Interchange class to load schemas
from interchange schema directory, and then invoke
Interchange.output_logical_netlist.
PhysicalNetlistBuilder - Internal helper class for constructing physical
netlist format.
output_physical_netlist - Implements conversion of classes from physicla
module to FPGA interchange physical netlist format.
This function requires PhysicalNetlist schema loaded,
recommend to use Interchange class to load schemas
from interchange schema directory, and then invoke
Interchange.output_interchange.
Interchange - Class that handles loading capnp schemas.
"""
import capnp
import capnp.lib.capnp
capnp.remove_import_hook()
import enum
import gzip
import os.path
from .logical_netlist import check_logical_netlist, LogicalNetlist, Cell, \
CellInstance, Library, Direction
from .physical_netlist import PhysicalNetlist, PhysicalCellType, \
PhysicalNetType, PhysicalBelPin, PhysicalSitePin, PhysicalSitePip, \
PhysicalPip, PhysicalNet, Placement
from .device_resources import DeviceResources
# Flag indicating use of Packed Cap'n Proto Serialization
IS_PACKED = False
class CompressionFormat(enum.Enum):
UNCOMPRESSED = 0
GZIP = 1
# Flag indicating that files are gziped on output
DEFAULT_COMPRESSION_TYPE = CompressionFormat.GZIP
# Set traversal limit to maximum to effectively disable.
NO_TRAVERSAL_LIMIT = 2**63 - 1
NESTING_LIMIT = 1024
# Level 6 is much faster than level 9, but still has a reasonable compression
# level.
#
# From man page:
# The default compression level is -6 (that is, biased towards high
# compression at expense of speed).
#
DEFAULT_COMPRESSION = 6
def read_capnp_file(capnp_schema,
f_in,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
""" Read file to a capnp object.
is_gzipped - bool
Is output GZIP'd?
is_packed - bool
Is capnp file in packed or unpacked in its encoding?
"""
if compression_format == CompressionFormat.GZIP:
f_comp = gzip.GzipFile(fileobj=f_in, mode='rb')
if is_packed:
return capnp_schema.from_bytes_packed(
f_comp.read(),
traversal_limit_in_words=NO_TRAVERSAL_LIMIT,
nesting_limit=NESTING_LIMIT)
else:
return capnp_schema.from_bytes(
f_comp.read(),
traversal_limit_in_words=NO_TRAVERSAL_LIMIT,
nesting_limit=NESTING_LIMIT)
else:
assert compression_format == CompressionFormat.UNCOMPRESSED
if is_packed:
return capnp_schema.read_packed(
f_in, traversal_limit_in_words=NO_TRAVERSAL_LIMIT)
else:
return capnp_schema.read(
f_in, traversal_limit_in_words=NO_TRAVERSAL_LIMIT)
def write_capnp_file(capnp_obj,
f_out,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
""" Write capnp object to file.
is_gzipped - bool
Is output GZIP'd?
is_packed - bool
Should output capnp file be packed or unpacked in its encoding?
"""
if compression_format == CompressionFormat.GZIP:
with gzip.GzipFile(
fileobj=f_out, mode='wb',
compresslevel=DEFAULT_COMPRESSION) as f:
if is_packed:
f.write(capnp_obj.to_bytes_packed())
else:
f.write(capnp_obj.to_bytes())
else:
assert compression_format == CompressionFormat.UNCOMPRESSED
if is_packed:
capnp_obj.write_packed(f_out)
else:
capnp_obj.write(f_out)
class LogicalNetlistBuilder():
""" Builder class for LogicalNetlist capnp format.
The total number of cells, ports, cell instances should be known prior to
calling the constructor for LogicalNetlistBuilder.
logical_netlist_schema - Loaded logical netlist schema.
name (str) - Name of logical netlist.
cell_count (int) - Total number of cells in all libraries for this file.
port_count (int) - Total number of cell ports in all cells in all
libraries for this file.
cell_instance_count (int) - Total number of cell instances in all cells
in all libraries for this file.
property_map (dict) - Root level property map for the netlist.
indexed_strings (list of str, optional) - If provided, this string list
is used to store strings, instead of LogicalNetlist.strList.
This is useful when embedding LogicalNetlist in other schemas.
"""
def __init__(self,
logical_netlist_schema,
name,
cell_count,
port_count,
cell_instance_count,
property_map,
indexed_strings=None):
self.logical_netlist_schema = logical_netlist_schema
self.logical_netlist = self.logical_netlist_schema.Netlist.new_message(
)
self.logical_netlist.name = name
if indexed_strings is None:
self.own_string_list = True
self.string_map = {}
self.string_list = []
else:
# An external string list is being provided. Use that list (and
# update it), and initialize the string_map with that initial list.
self.own_string_list = False
self.string_list = indexed_strings
self.string_map = {}
for idx, s in enumerate(self.string_list):
self.string_map[s] = idx
self.cell_idx = 0
self.cell_count = cell_count
self.cell_decls = self.logical_netlist.init("cellDecls", cell_count)
self.cells = self.logical_netlist.init("cellList", cell_count)
self.port_idx = 0
self.port_count = port_count
self.logical_netlist.init("portList", port_count)
self.ports = self.logical_netlist.portList
self.cell_instance_idx = 0
self.cell_instance_count = cell_instance_count
self.logical_netlist.init("instList", cell_instance_count)
self.cell_instances = self.logical_netlist.instList
self.create_property_map(self.logical_netlist.propMap, property_map)
def next_cell(self):
""" Return next logical_netlist.Cell pycapnp object and it's index. """
assert self.cell_idx < self.cell_count
cell_decl = self.cell_decls[self.cell_idx]
cell = self.cells[self.cell_idx]
cell_idx = self.cell_idx
cell.index = cell_idx
self.cell_idx += 1
return cell_idx, cell, cell_decl
def get_cell(self, cell_idx):
""" Get logical_netlist.Cell pycapnp object at given index. """
return self.logical_netlist.cellList[cell_idx]
def next_port(self):
""" Return next logical_netlist.Port pycapnp object and it's index. """
assert self.port_idx < self.port_count
port = self.ports[self.port_idx]
port_idx = self.port_idx
self.port_idx += 1
return port_idx, port
def next_cell_instance(self):
""" Return next logical_netlist.CellInstance pycapnp object and it's index. """
assert self.cell_instance_idx < self.cell_instance_count
cell_instance = self.cell_instances[self.cell_instance_idx]
cell_instance_idx = self.cell_instance_idx
self.cell_instance_idx += 1
return cell_instance_idx, cell_instance
def string_id(self, s):
""" Intern string into file, and return its StringIdx. """
assert isinstance(s, str)
if s not in self.string_map:
self.string_map[s] = len(self.string_list)
self.string_list.append(s)
return self.string_map[s]
def finish_encode(self):
""" Completes the encoding of the logical netlist and returns root pycapnp object.
Invoke after all cells, ports and cell instances have been populated
with data.
Returns completed logical_netlist.Netlist pycapnp object.
"""
if self.own_string_list:
self.logical_netlist.init('strList', len(self.string_list))
for idx, s in enumerate(self.string_list):
self.logical_netlist.strList[idx] = s
return self.logical_netlist
def create_property_map(self, property_map, d):
""" Create a property_map from a python dictionary for this LogicalNetlist file.
property_map (logical_netlist.PropertyMap pycapnp object) - Pycapnp
object to write property map.
d (dict-like) - Dictionary to convert to property map.
Keys must be strings. Values can be strings, ints or
bools.
"""
entries = property_map.init('entries', len(d))
for entry, (k, v) in zip(entries, d.items()):
assert isinstance(k, str)
entry.key = self.string_id(k)
if isinstance(v, str):
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
entry.textValue = self.string_id(v)
elif isinstance(v, bool):
entry.boolValue = v
elif isinstance(v, int):
entry.intValue = v
else:
assert False, "Unknown type of value {}, type = {}".format(
repr(v), type(v))
def get_top_cell_instance(self):
""" Return the top cell instance from the LogicalNetlist. """
return self.logical_netlist.topInst
def output_logical_netlist(logical_netlist_schema,
libraries,
name,
top_instance_name,
top_instance,
view="netlist",
property_map={},
indexed_strings=None):
""" Convert logical_netlist.Library python classes to a FPGA interchange LogicalNetlist capnp.
logical_netlist_schema - logical_netlist schema.
libraries (dict) - Dict of str to logical_netlist.Library python classes.
top_level_cell (str) - Name of Cell to instance at top level
top_level_name (str) - Name of top level cell instance
view (str) - EDIF internal constant.
property_map - PropertyMap for top level cell instance
"""
# Sanity that the netlist libraries are complete and consistent, also
# output master cell list.
master_cell_list = check_logical_netlist(libraries)
# Make sure top level cell is in the master cell list.
assert top_instance is None or top_instance.cell_name in master_cell_list
# Count cell, port and cell instance counts to enable pre-allocation of
# capnp arrays.
cell_count = 0
port_count = 0
cell_instance_count = 0
for lib in libraries.values():
cell_count += len(lib.cells)
for cell in lib.cells.values():
port_count += len(cell.ports)
cell_instance_count += len(cell.cell_instances)
logical_netlist = LogicalNetlistBuilder(
logical_netlist_schema=logical_netlist_schema,
name=name,
cell_count=cell_count,
port_count=port_count,
cell_instance_count=cell_instance_count,
property_map=property_map,
indexed_strings=indexed_strings)
# Assign each python Cell objects in libraries to capnp
# logical_netlist.Cell objects, and record the cell index for use with
# cell instances later.
#
# Ports can also be converted now, do that too. Build a map of cell name
# and port name to port objects for use on constructing cell nets.
cell_name_to_idx = {}
ports = {}
for library, lib in libraries.items():
library_id = logical_netlist.string_id(library)
for cell in lib.cells.values():
assert cell.name not in cell_name_to_idx
cell_idx, cell_obj, cell_decl = logical_netlist.next_cell()
cell_decl.name = logical_netlist.string_id(cell.name)
cell_decl.view = logical_netlist.string_id(cell.view)
cell_decl.lib = library_id
cell_name_to_idx[cell.name] = cell_idx
logical_netlist.create_property_map(cell_decl.propMap,
cell.property_map)
cell_decl.init('ports', len(cell.ports))
for idx, (port_name, port) in enumerate(cell.ports.items()):
port_idx, port_obj = logical_netlist.next_port()
ports[cell.name, port_name] = (port_idx, port)
cell_decl.ports[idx] = port_idx
port_obj.dir = logical_netlist_schema.Netlist.Direction.__dict__[
port.direction.name.lower()]
logical_netlist.create_property_map(port_obj.propMap,
port.property_map)
if port.bus is not None:
port_obj.name = logical_netlist.string_id(port_name)
bus = port_obj.init('bus')
bus.busStart = port.bus.start
bus.busEnd = port.bus.end
else:
port_obj.name = logical_netlist.string_id(port_name)
port_obj.bit = None
# Now that each cell type has been assigned a cell index, add cell
# instances to cells as needed.
for lib in libraries.values():
for cell in lib.cells.values():
cell_obj = logical_netlist.get_cell(cell_name_to_idx[cell.name])
# Save mapping of cell instance name to cell instance index for
# cell net construction
cell_instances = {}
cell_obj.init('insts', len(cell.cell_instances))
for idx, (cell_instance_name,
cell_instance) in enumerate(cell.cell_instances.items()):
cell_instance_idx, cell_instance_obj = logical_netlist.next_cell_instance(
)
cell_instances[cell_instance_name] = cell_instance_idx
cell_instance_obj.name = logical_netlist.string_id(
cell_instance_name)
logical_netlist.create_property_map(cell_instance_obj.propMap,
cell_instance.property_map)
cell_instance_obj.view = logical_netlist.string_id(
cell_instance.view)
cell_instance_obj.cell = cell_name_to_idx[cell_instance.
cell_name]
cell_obj.insts[idx] = cell_instance_idx
cell_obj.init('nets', len(cell.nets))
for net_obj, (netname, net) in zip(cell_obj.nets,
cell.nets.items()):
net_obj.name = logical_netlist.string_id(netname)
logical_netlist.create_property_map(net_obj.propMap,
net.property_map)
net_obj.init('portInsts', len(net.ports))
for port_obj, port in zip(net_obj.portInsts, net.ports):
if port.instance_name is not None:
# If port.instance_name is not None, then this is a
# cell instance port connection.
instance_cell_name = cell.cell_instances[
port.instance_name].cell_name
port_obj.inst = cell_instances[port.instance_name]
port_obj.port, port_pyobj = ports[instance_cell_name,
port.name]
else:
# If port.instance_name is None, then this is a cell
# port connection
port_obj.extPort = None
port_obj.port, port_pyobj = ports[cell.name, port.name]
# Handle bussed port annotations
if port.idx is not None:
port_obj.busIdx.idx = port_pyobj.encode_index(port.idx)
else:
port_obj.busIdx.singleBit = None
if top_instance is not None:
top_level_cell_instance = logical_netlist.get_top_cell_instance()
# Convert the top level cell now that the libraries have been converted.
top_level_cell_instance.name = logical_netlist.string_id(
top_instance_name)
top_level_cell_instance.cell = cell_name_to_idx[top_instance.cell_name]
top_level_cell_instance.view = logical_netlist.string_id(
top_instance.view)
logical_netlist.create_property_map(top_level_cell_instance.propMap,
top_instance.property_map)
return logical_netlist.finish_encode()
class PhysicalNetlistBuilder():
""" Builder class for PhysicalNetlist capnp format.
physical_netlist_schema - Loaded physical netlist schema.
"""
def __init__(self, physical_netlist_schema):
self.physical_netlist_schema = physical_netlist_schema
def init_string_map(self):
self.string_map = {}
self.string_list = []
def string_id(self, s):
""" Intern string into file, and return its StringIdx. """
assert isinstance(s, str)
if s not in self.string_map:
self.string_map[s] = len(self.string_list)
self.string_list.append(s)
return self.string_map[s]
def encode(self, phys_netlist):
""" Completes the encoding of the physical netlist and returns root pycapnp object.
Invoke after all placements, physical cells and physical nets have
been added.
Returns completed physical_netlist.PhysNetlist pycapnp object.
"""
self.init_string_map()
physical_netlist = self.physical_netlist_schema.PhysNetlist.new_message(
)
physical_netlist.part = phys_netlist.part
physical_netlist.init('placements', len(phys_netlist.placements))
placements = physical_netlist.placements
for idx, placement in enumerate(phys_netlist.placements):
placement_obj = placements[idx]
placement_obj.cellName = self.string_id(placement.cell_name)
placement_obj.type = self.string_id(placement.cell_type)
placement_obj.site = self.string_id(placement.site)
placement_obj.bel = self.string_id(placement.bel)
placement_obj.isSiteFixed = True
placement_obj.isBelFixed = True
if placement.other_bels:
placement_obj.init('otherBels', len(placement.other_bels))
other_bels_obj = placement_obj.otherBels
for idx, s in enumerate(placement.other_bels):
other_bels_obj[idx] = self.string_id(s)
placement_obj.init('pinMap', len(placement.pins))
pin_map = placement_obj.pinMap
for idx, pin in enumerate(placement.pins):
pin_map[idx].cellPin = self.string_id(pin.cell_pin)
pin_map[idx].belPin = self.string_id(pin.bel_pin)
if pin.bel is None:
pin_map[idx].bel = placement_obj.bel
else:
pin_map[idx].bel = self.string_id(pin.bel)
pin_map[idx].isFixed = True
if pin.other_cell_type:
assert pin.other_cell_name is not None
pin.otherCell.multiCell = self.string_id(
pin.other_cell_name)
pin.otherCell.multiType = self.string_id(
pin.other_cell_type)
physical_netlist.init('physNets', len(phys_netlist.nets))
nets = physical_netlist.physNets
for idx, net in enumerate(phys_netlist.nets):
net_obj = nets[idx]
net_obj.name = self.string_id(net.name)
net_obj.init('sources', len(net.sources))
for root_obj, root in zip(net_obj.sources, net.sources):
root.output_interchange(root_obj, self.string_id)
net_obj.init('stubs', len(net.stubs))
for stub_obj, stub in zip(net_obj.stubs, net.stubs):
stub.output_interchange(stub_obj, self.string_id)
net_obj.type = self.physical_netlist_schema.PhysNetlist.NetType.__dict__[
net.type.name.lower()]
physical_netlist.init('physCells', len(phys_netlist.physical_cells))
physical_cells = physical_netlist.physCells
for idx, (cell_name,
cell_type) in enumerate(phys_netlist.physical_cells.items()):
physical_cell = physical_cells[idx]
physical_cell.cellName = self.string_id(cell_name)
physical_cell.physType = self.physical_netlist_schema.PhysNetlist.PhysCellType.__dict__[
cell_type.name.lower()]
physical_netlist.init('properties', len(phys_netlist.properties))
properties = physical_netlist.properties
for idx, (k, v) in enumerate(phys_netlist.properties.items()):
properties[idx].key = self.string_id(k)
properties[idx].value = self.string_id(v)
physical_netlist.init('siteInsts', len(phys_netlist.site_instances))
site_instances = physical_netlist.siteInsts
for idx, (k, v) in enumerate(phys_netlist.site_instances.items()):
site_instances[idx].site = self.string_id(k)
site_instances[idx].type = self.string_id(v)
physical_netlist.init('strList', len(self.string_list))
for idx, s in enumerate(self.string_list):
physical_netlist.strList[idx] = s
return physical_netlist
def output_physical_netlist(physical_netlist, physical_netlist_schema):
builder = PhysicalNetlistBuilder(physical_netlist_schema)
return builder.encode(physical_netlist)
def first_upper(s):
return s[0].upper() + s[1:]
def to_logical_netlist(netlist_capnp, strs=None):
# name @0 : Text;
# propMap @1 : PropertyMap;
# topInst @2 : CellInstance;
# strList @3 : List(Text);
# cellList @4 : List(Cell);
# portList @5 : List(Port);
# instList @6 : List(CellInstance);
if strs is None:
strs = [s for s in netlist_capnp.strList]
libraries = {}
def convert_property_map(prop_map):
out = {}
for prop in prop_map.entries:
key = strs[prop.key]
if prop.which() == 'textValue':
value = strs[prop.textValue]
elif prop.which() == 'intValue':
value = prop.intValue
else:
assert prop.which() == 'boolValue'
value = prop.boolValue
out[key] = value
return out
def convert_cell_instance(cell_instance_capnp):
prop_map = convert_property_map(cell_instance_capnp.propMap)
name = strs[cell_instance_capnp.name]
return name, CellInstance(
view=strs[cell_instance_capnp.view],
cell_name=strs[netlist_capnp.cellDecls[cell_instance_capnp.cell].
name],
property_map=prop_map,
capnp_name=cell_instance_capnp.cell)
for cell_capnp in netlist_capnp.cellList:
cell_decl = netlist_capnp.cellDecls[cell_capnp.index]
cell = Cell(
name=strs[cell_decl.name],
capnp_index=cell_capnp.index,
property_map=convert_property_map(cell_decl.propMap),
)
cell.view = strs[cell_decl.view]
for inst in cell_capnp.insts:
cell_instance_name, cell_instance = convert_cell_instance(
netlist_capnp.instList[inst])
cell.cell_instances[cell_instance_name] = cell_instance
for port_idx in cell_decl.ports:
port = netlist_capnp.portList[port_idx]
port_name = strs[port.name]
direction = Direction[first_upper(str(port.dir))]
prop_map = convert_property_map(port.propMap)
if port.which() == 'bit':
cell.add_port(
name=port_name, direction=direction, property_map=prop_map)
else:
assert port.which() == 'bus'
cell.add_bus_port(
name=port_name,
direction=direction,
property_map=prop_map,
start=port.bus.busStart,
end=port.bus.busEnd)
for net in cell_capnp.nets:
net_name = strs[net.name]
cell.add_net(
name=net_name,
property_map=convert_property_map(net.propMap),
)
for port_inst in net.portInsts:
port_capnp = netlist_capnp.portList[port_inst.port]
port_name = strs[port_capnp.name]
if port_inst.busIdx.which() == 'singleBit':
idx = None
else:
assert port_inst.busIdx.which() == 'idx'
assert port_capnp.which() == 'bus'
bus = port_capnp.bus
if bus.busStart <= bus.busEnd:
idx = port_inst.busIdx.idx + bus.busStart
else:
idx = bus.busStart - port_inst.busIdx.idx
if port_inst.which() == 'extPort':
cell.connect_net_to_cell_port(
net_name=net_name, port=port_name, idx=idx)
else:
assert port_inst.which() == 'inst'
instance_name = strs[netlist_capnp.instList[port_inst.
inst].name]
cell.connect_net_to_instance(
net_name=net_name,
instance_name=instance_name,
port=port_name,
idx=idx)
library = strs[cell_decl.lib]
if library not in libraries:
libraries[library] = Library(name=library)
libraries[library].add_cell(cell)
top_instance_name, top_instance = convert_cell_instance(
netlist_capnp.topInst)
return LogicalNetlist(
name=netlist_capnp.name,
property_map=convert_property_map(netlist_capnp.propMap),
top_instance_name=top_instance_name,
top_instance=top_instance,
libraries=libraries)
def to_physical_netlist(phys_netlist_capnp):
strs = [s for s in phys_netlist_capnp.strList]
properties = {}
for prop in phys_netlist_capnp.properties:
properties[strs[prop.key]] = strs[prop.value]
phys_netlist = PhysicalNetlist(phys_netlist_capnp.part, properties)
for site_instance in phys_netlist_capnp.siteInsts:
phys_netlist.add_site_instance(strs[site_instance.site],
strs[site_instance.type])
for physical_cell in phys_netlist_capnp.physCells:
phys_netlist.add_physical_cell(
strs[physical_cell.cellName], PhysicalCellType[first_upper(
str(physical_cell.physType))])
def convert_route_segment(route_segment_capnp):
which = route_segment_capnp.which()
if which == 'belPin':
bel_pin = route_segment_capnp.belPin
return PhysicalBelPin(
site=strs[bel_pin.site],
bel=strs[bel_pin.bel],
pin=strs[bel_pin.pin])
elif which == 'sitePin':
site_pin = route_segment_capnp.sitePin
return PhysicalSitePin(
site=strs[site_pin.site], pin=strs[site_pin.pin])
elif which == 'pip':
# TODO: Shouldn't be discard isFixed field
pip = route_segment_capnp.pip
site = strs[pip.site] if pip.which() == 'site' else None
return PhysicalPip(
tile=strs[pip.tile],
wire0=strs[pip.wire0],
wire1=strs[pip.wire1],
forward=pip.forward,
site=site)
else:
assert which == 'sitePIP'
# TODO: Shouldn't be discard isFixed and inverts, isInverting
# fields
site_pip = route_segment_capnp.sitePIP
return PhysicalSitePip(
site=strs[site_pip.site],
bel=strs[site_pip.bel],
pin=strs[site_pip.pin],
is_inverting=site_pip.isInverting)
def convert_route_branch(route_branch_capnp):
obj = convert_route_segment(route_branch_capnp.routeSegment)
for branch in route_branch_capnp.branches:
obj.branches.append(convert_route_branch(branch))
return obj
def convert_net(net_capnp):
sources = []
for source_capnp in net_capnp.sources:
sources.append(convert_route_branch(source_capnp))
stubs = []
for stub_capnp in net_capnp.stubs:
stubs.append(convert_route_branch(stub_capnp))
return PhysicalNet(
name=strs[net_capnp.name],
type=PhysicalNetType[first_upper(str(net_capnp.type))],
sources=sources,
stubs=stubs)
null_net = convert_net(phys_netlist_capnp.nullNet)
assert len(null_net.sources) == 0
phys_netlist.set_null_net(null_net.stubs)
for physical_net in phys_netlist_capnp.physNets:
net = convert_net(physical_net)
phys_netlist.add_physical_net(
net_name=net.name,
sources=net.sources,
stubs=net.stubs,
net_type=net.type)
for placement_capnp in phys_netlist_capnp.placements:
# TODO: Shouldn't be discarding isBelFixed/isSiteFixed/altSiteType
placement = Placement(
cell_type=strs[placement_capnp.type],
cell_name=strs[placement_capnp.cellName],
site=strs[placement_capnp.site],
bel=strs[placement_capnp.bel],
)
for pin_map in placement_capnp.pinMap:
# TODO: Shouldn't be discarding isFixed
other_cell_name = None
other_cell_type = None
if pin_map.which() == 'otherCell':
other_cell = pin_map.otherCell
other_cell_name = strs[other_cell.multiCell]
other_cell_type = strs[other_cell.multiType]
placement.add_bel_pin_to_cell_pin(
bel=strs[pin_map.bel],
bel_pin=strs[pin_map.belPin],
cell_pin=strs[pin_map.cellPin],
other_cell_type=other_cell_type,
other_cell_name=other_cell_name)
for other_bel in placement_capnp.otherBels:
placement.other_bels.add(strs[other_bel])
phys_netlist.add_placement(placement)
return phys_netlist
class Interchange():
def __init__(self, schema_directory):
search_path = [os.path.dirname(os.path.dirname(capnp.__file__))]
if 'CONDA_PREFIX' in os.environ:
search_path.append(
os.path.join(os.environ['CONDA_PREFIX'], 'include'))
if 'CAPNP_PATH' in os.environ:
search_path.append(os.environ['CAPNP_PATH'])
for path in ['/usr/local/include', '/usr/include']:
if os.path.exists(path):
search_path.append(path)
self.references_schema = capnp.load(
os.path.join(schema_directory, 'References.capnp'),
imports=search_path)
self.logical_netlist_schema = capnp.load(
os.path.join(schema_directory, 'LogicalNetlist.capnp'),
imports=search_path)
self.physical_netlist_schema = capnp.load(
os.path.join(schema_directory, 'PhysicalNetlist.capnp'),
imports=search_path)
self.device_resources_schema = capnp.load(
os.path.join(schema_directory, 'DeviceResources.capnp'),
imports=search_path)
def output_logical_netlist(self, *args, **kwargs):
return output_logical_netlist(
logical_netlist_schema=self.logical_netlist_schema,
*args,
**kwargs)
def output_physical_netlist(self, *args, **kwargs):
return output_physical_netlist(
physical_netlist_schema=self.physical_netlist_schema,
*args,
**kwargs)
def read_logical_netlist_raw(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return read_capnp_file(self.logical_netlist_schema.Netlist, f,
compression_format, is_packed)
def read_logical_netlist(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return to_logical_netlist(
read_capnp_file(self.logical_netlist_schema.Netlist, f,
compression_format, is_packed))
def read_physical_netlist(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return to_physical_netlist(
read_capnp_file(self.physical_netlist_schema.PhysNetlist, f,
compression_format, is_packed))
def read_physical_netlist_raw(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return read_capnp_file(self.physical_netlist_schema.PhysNetlist, f,
compression_format, is_packed)
def read_device_resources_raw(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return read_capnp_file(self.device_resources_schema.Device, f,
compression_format, is_packed)
def read_device_resources(self,
f,
compression_format=DEFAULT_COMPRESSION_TYPE,
is_packed=IS_PACKED):
return DeviceResources(
read_capnp_file(self.device_resources_schema.Device, f,
compression_format, is_packed))
|
py | 1a4d73f66a2bfb0c653c96cfa5065eb5c0f72623 | # Generated by Django 3.0.3 on 2020-08-31 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rcsystem', '0041_auto_20200831_2102'),
]
operations = [
migrations.AddField(
model_name='moviedatabase',
name='movie_index',
field=models.IntegerField(default=-1),
),
]
|
py | 1a4d744d57eab6852052c157c972655924cc1058 | from typing import List
import pandas
import pytest
import math
from muller.inheritance import scoring
@pytest.fixture
def scorer() -> scoring.Score:
return scoring.Score(0.03, 0.97, 0.05, weights = [1,1,1,1])
@pytest.fixture
def legacy_scorer() -> scoring.LegacyScore:
return scoring.LegacyScore(0.05, 0.03, 0.97)
@pytest.mark.parametrize(
"left,right,expected",
[ # nested, unnested, expected_score
([0, 0.2, 0.3, 0.4, 0.5], [0, 0.1, 0.2, 0.3, 0.4], 1),
([0.3, 0.3, 0.3, 0.3, 0.3], [0, 0.1, 0.2, 0.3, 0.4], 1),
([0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 0.4, 0.5, 0.6, 0.7], -1),
([0, .1, .1, .2, .2, .3, .3], [0, .1, .1, .2, .2, .3, .3], math.nan),
([0, .1, .1, .1, .1, .1, .1], [0, 0, 0, 0, 0, .93, 0], math.nan),
([0, .1, .1, .1, .1, .1, .1], [0, 0, 0, 0, 0, .94, .94], math.nan),
([0, .1, .1, .1, .1, .16, .1], [0, 0, 0, 0, 0, 1.0, 0], math.nan),
# ([0, .1, .2, .3, .4, .16, .1], [0, 0, 0, 0, 0, 1.0, 0], -1),
# ([0, .2, .2, .2, .4, .2, .2], [0, 0, 0, 0, 0, 1.0, 0], -1),
([0, 0, 0, 0.403, 0.489, 0.057, 0.08], [0, 0, 0, 0, 0, 0.2675, 0.326], math.nan),
([0, 0, 0, 0.403, 0.489, 0.057, 0.08], [0, 0, 0, 0.2, 0.2, 0, 0], 1)
]
)
def test_greater_score(scorer, left, right, expected):
left_series = pandas.Series(left)
right_series = pandas.Series(right)
result = scorer.calculate_score_greater(left_series, right_series)
assert result == expected or (math.isnan(result) and math.isnan(expected))
@pytest.mark.parametrize("left,right,expected",
[
([0, .1, .1, .2, .2, .3, .3], [0, .1, .1, .2, .2, .3, .3], 0),
([0, 0, 0, 0.403, 0.489, 0.057, 0.08], [0, 0, 0, 0, 0, 0.2675, 0.326], 0),
([0, .1, .1, .1, .1, .1, .1], [0, 0, 0, 0, 0, .94, .94], 1),
([0, .1, .1, .1, .1, .1, .1], [0, .9, .9, 1.0, .5, .93, .9], 0),
([0, .1, .2, .3, .3, .2, .1], [0, .9, .9, 1.0, .5, .93, .9], 1),
([0, .1, .1, .3, .5, .5, .2], [0.2, 0.9, 0.85, 0.9, .95, 1.0, 0.9], 1),
([0, 0.5, 0.5, 0.403, 0.489, 0.05, 0.05], [0, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7], 0)
])
def test_calculate_above_fixed_score(scorer, left: List[float], right: List[float], expected: int):
# check if two series consistently sum to greater than 1.
left = pandas.Series(left)
right = pandas.Series(right)
result = scorer.calculate_score_above_fixed(left, right)
assert result == expected
@pytest.mark.parametrize(
"left,right,expected",
[ # nested, unnested, expected_score
([0, 0.2, 0.3, 0.4, 0.5], [0, 0.1, 0.2, 0.3, 0.4], 1),
([0.3, 0.3, 0.3, 0.3, 0.3], [0, 0.1, 0.2, 0.1, 0.2], 0),
([0.3, 0.2, 0.1, 0.0, 0.0], [0.3, 0.4, 0.5, 0.6, 0.7], -1),
([0, .1, .1, .2, .2, .3, .3], [0, .1, .1, .2, .2, .3, .3], 1),
([0, .1, .1, .2, .2, .3, .3], [0, .1, .1, .2, .2, .4, .3], 1),
([1, .9, .8, .7, .6, .5, .4], [0, .1, .2, .3, .4, .5, .6], -1),
([0, .1, .1, .1, .1, .1, .1], [0, 0, 0, 0, 0, .94, .94], 0),
([0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1.0, 0], 0)
]
)
def test_calculate_derivative_score(scorer, left, right, expected):
left_series = pandas.Series(left)
right_series = pandas.Series(right)
result = scorer.calculate_score_derivative(left_series, right_series)
assert result == expected
@pytest.mark.parametrize(
"left, right, expected",
[
([0.1, 0.1, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.1, 0.2, 0.0, 0.0], -1),
([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2],0),
([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], [0.0, 0.0, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2],0),
([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], [0.0, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8],1),
([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.1, 0.0, 0.0],0),
]
)
def test_calculate_area_score(scorer, left, right, expected):
l = pandas.Series(left)
r = pandas.Series(right)
result = scorer.calculate_score_area(l, r)
assert result == expected
|
py | 1a4d749efaee9c237e2670c3aafdccb8c6152a5e | # ------------------------------------------------------------------------------
# Functions to save and restore different data types.
# ------------------------------------------------------------------------------
import os
# PICKLE
import pickle
def pkl_dump(obj, name, path='obj'):
r"""Saves an object in pickle format."""
if '.p' not in name:
name = name + '.pkl'
path = os.path.join(path, name)
pickle.dump(obj, open(path, 'wb'))
def pkl_load(name, path='obj'):
r"""Restores an object from a pickle file."""
if '.p' not in name:
name = name + '.pkl'
path = os.path.join(path, name)
try:
obj = pickle.load(open(path, 'rb'))
except FileNotFoundError:
obj = None
return obj
# NUMPY
from numpy import save, load
def np_dump(obj, name, path='obj'):
r"""Saves an object in npy format."""
if '.npy' not in name:
name = name + '.npy'
path = os.path.join(path, name)
save(path, obj)
def np_load(name, path='obj'):
r"""Restores an object from a npy file."""
if '.npy' not in name:
name = name + '.npy'
path = os.path.join(path, name)
try:
obj = load(path)
except FileNotFoundError:
obj = None
return obj
# JSON
import json
def save_json(dict_obj, path, name):
r"""Saves a dictionary in json format."""
if '.json' not in name:
name += '.json'
with open(os.path.join(path, name), 'w') as json_file:
json.dump(dict_obj, json_file)
def load_json(path, name):
r"""Restores a dictionary from a json file."""
if '.json' not in name:
name += '.json'
with open(os.path.join(path, name), 'r') as json_file:
return json.load(json_file)
# NIFTY
def nifty_dump(x, name, path):
r"""Save a tensor of numpy array in nifty format."""
if 'torch.Tensor' in str(type(x)):
x = x.detach().cpu().numpy()
if '.nii' not in name:
name = name + '.nii.gz'
# Remove channels dimension and rotate axis so depth first
if len(x.shape) == 4:
x = np.moveaxis(x[0], -1, 0)
assert len(x.shape) == 3
path = os.path.join(path, name)
sitk.WriteImage(sitk.GetImageFromArray(x), path)
# OTHERS
import functools
def join_path(list):
r"""From a list of chained directories, forms a path"""
return functools.reduce(os.path.join, list)
|
py | 1a4d76e0f38ed7e677f854cfc64d787c7bbb58a9 | from celery import shared_task
from api.models import Vote
@shared_task
def refresh_votes():
Vote.objects.all().delete()
return "Votes successfully reset"
|
py | 1a4d7759ffa248f90d564c874408b104475b00b6 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
from node import Node
class V2ray(Node):
uuid = ''
alterId = 0
network = ''
camouflageType = ''
camouflageHost = ''
camouflagePath = ''
camouflageTls = ''
def __init__(self, ip, port, remark, security, uuid, alterId, network, camouflageType, camouflageHost, camouflagePath, camouflageTls):
super(V2ray, self).__init__(ip, port, remark, security)
self.uuid = uuid
self.alterId = alterId
self.network = network
self.camouflageHost = camouflageHost
self.camouflagePath = camouflagePath
self.camouflageTls = camouflageTls
self.camouflageType = camouflageType
def formatConfig(self):
v2rayConf = {
"log" : {
"access" : "/var/log/v2ray/access.log",
"error":"/var/log/v2ray/error.log",
"logLevel": "none"
},
"inbounds": [
{
"port": 1080,
"listen": "127.0.0.1",
"protocol": "socks",
"settings": {
"udp": True
},
"tag": "in"
}
],
"outbounds": [
{
"settings" : {},
"protocol":"freedom",
"tag" : "direct"
},
{
"settings" : {},
"protocol":"blackhole",
"tag" : "blocked"
}
],
"routing": {
"strategy": "rules",
"settings": {
"domainStrategy": "AsIs",
"rules": [
{
"type" : "field",
"ip" : [
"geoip:cn",
"geoip:private"
],
"outboundTag": "direct"
},
{
"type":"field",
"inboundTag":["in"],
"outboundTag":"out"
}
]
}
}
}
if self.network == 'tcp' or self.network == 'auto':
# tcp下
v2rayConf['outbounds'].append({
"protocol": "vmess",
"settings": {
"vnext": [{
"address": self.ip,
"port": int(self.port),
"users": [
{
"id": self.uuid,
"alterId" : self.alterId
}
]
}]
},
"streamSettings": {
"network": "tcp"
},
"tag": "out"
})
return v2rayConf
elif self.network == 'kcp':
# kcp 下
v2rayConf['outbounds'].append({
"protocol": "vmess",
"settings": {
"vnext": [{
"address": self.ip,
"port": int(self.port),
"users": [
{
"id": self.uuid,
"alterId": self.alterId
}
]
}]
},
"streamSettings" : {
"network": "kcp",
"kcpSettings": {
"mtu": 1350,
"tti": 50,
"uplinkCapacity": 12,
"downlinkCapacity": 100,
"congestion": False,
"readBufferSize": 2,
"writeBufferSize": 2,
"header": {
"type": self.camouflageType,
}
}
},
"tag": "out"
})
return v2rayConf
elif self.network == 'ws':
# ws
v2rayConf['outbounds'].append({
"protocol": "vmess",
"settings": {
"vnext": [{
"address": self.ip,
"port": int(self.port),
"users": [
{
"id": self.uuid,
"alterId": self.alterId
}
]
}]
},
"streamSettings": {
"network": "ws",
"security": self.camouflageTls,
"tlsSettings": {
"allowInsecure": True,
},
"wsSettings" : {
"path": self.camouflagePath,
"headers" : {
"Host": self.camouflageHost
}
}
},
"tag": "out"
})
return v2rayConf
else:
# h2
v2rayConf['outbounds'].append({
"protocol": "vmess",
"settings": {
"vnext": [{
"address": self.ip,
"port": int(self.port),
"users": [
{
"id": self.uuid,
"alterId": self.alterId
}
]
}]
},
"streamSettings": {
"network": "ws",
"security": self.camouflageTls,
"tlsSettings": {
"allowInsecure": True,
},
"httpSettings": {
"path": self.camouflagePath,
"host": [
self.camouflageHost
]
}
},
"tag": "out"
})
return v2rayConf
1 |
py | 1a4d7833b6b0d72e2912769bc67c546142a498a8 | #!/usr/bin/env python3
### Eval option 1: onnxruntime
import numpy as np
import onnxruntime as rt
sess = rt.InferenceSession("test.onnx")
input_name = sess.get_inputs()[0].name
img = np.ones((3, 3)).astype(np.float32)
pred_onx = sess.run(None, {input_name: img})[0]
print(pred_onx)
### Expected output:
"""
[[667. 667. 667.]
[667. 667. 667.]
[667. 667. 667.]]
""" |
py | 1a4d78c79fe9226b7cfe1d00fd7ca6dcfbf2d555 | '''
Created on Apr 4, 2022
@author: mballance
'''
from libarl import core
from rctgen.impl.ctor import Ctor
from rctgen.impl.model_info import ModelInfo
from rctgen.impl.type_info import TypeInfo
from rctgen.impl.exec_kind_e import ExecKindE
from rctgen.impl.exec_group import ExecGroup
from rctgen.impl.rt_ctxt import RtCtxt
from rctgen.impl.impl_base import ImplBase
class ComponentImpl(ImplBase):
"""Methods added to Component-decorated classes"""
@staticmethod
async def eval(self, action_t):
print("ComponentImpl.eval")
ctor = Ctor.inst()
ev = ctor.ctxt().mkModelEvaluator()
it = ev.eval(
self._modelinfo._lib_obj,
action_t._typeinfo._lib_obj)
print("Iterating...")
while it.next():
if it.type() == core.ModelEvalNodeT.Action:
action_field = it.action()
action = action_field.getFieldData()
await action._evalExecTarget(ExecKindE.Body)
# print("Action: %s" % str(action))
elif it.type() == core.ModelEvalNodeT.Parallel:
# Create a coroutine for each branch
# Wait for coroutines to complete
pass
elif it.type() == core.ModelEvalNodeT.Sequence:
# Iterate through each item and dispatch
pass
print("type: %s" % str(it.type()))
@staticmethod
def init(self, base, *args, **kwargs):
ctor = Ctor.inst()
typeinfo = type(self)._typeinfo
s = ctor.scope()
if s is not None:
if s.facade_obj is None:
# The field-based caller has created a frame for us
s.facade_obj = self
elif s.facade_obj is self:
s.inc_inh_depth()
else:
# Need to create a new scope
if s._type_mode:
raise Exception("Should hit in type mode")
s = ctor.push_scope(
self,
ctor.ctxt().buildModelComponent(
typeinfo.lib_obj,
type(self).__name__),
False)
pass
pass
else:
# Push a new scope, knowing that we're not in type mode
s = ctor.push_scope(
self,
ctor.ctxt().buildModelComponent(
typeinfo.lib_obj,
type(self).__name__),
False)
self._modelinfo = ModelInfo(self, "<>")
self._modelinfo._lib_obj = s._lib_scope
print("__init__")
# Populate the fields
for i,fc in enumerate(typeinfo._field_ctor_l):
print("Field: %s" % fc[0])
ctor.push_scope(None, s.lib_scope.getField(i), False)
field_facade = fc[1](fc[0])
setattr(self, fc[0], field_facade)
ctor.pop_scope()
# Invoke the user-visible constructor
base(self, *args, *kwargs)
if s.dec_inh_depth() == 0:
if not ctor.is_type_mode():
# Run the init sequence
self._runInitSeq()
pass
@staticmethod
def _runInitSeq(self):
typeinfo : TypeInfo = type(self)._typeinfo
ctxt = RtCtxt.inst()
if ExecKindE.InitDown in typeinfo._exec_m.keys():
exec_g : ExecGroup = typeinfo._exec_m[ExecKindE.InitDown]
ctxt.push_exec_group(exec_g)
for e in exec_g.execs:
e.func(self)
ctxt.pop_exec_group()
# TODO: Recurse for any component-type fields
if ExecKindE.InitUp in typeinfo._exec_m.keys():
exec_g : ExecGroup = typeinfo._exec_m[ExecKindE.InitUp]
ctxt.push_exec_group(exec_g)
for e in exec_g.execs:
e.func(self)
ctxt.pop_exec_group()
pass
@staticmethod
def _createInst(cls, name):
ret = cls()
return ret
@classmethod
def addMethods(cls, T):
ImplBase.addMethods(T)
base_init = T.__init__
setattr(T, "__init__", lambda self, *args, **kwargs: cls.init(
self, base_init, *args, **kwargs))
setattr(T, "_runInitSeq", cls._runInitSeq)
setattr(T, "_createInst", cls._createInst)
setattr(T, "eval", cls.eval)
|
py | 1a4d791b85d8ad940a199a0a4446072242b3bd4f | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.layers import EventLayers
def test_EventLayersMixIn():
grid = RasterModelGrid((4, 4))
assert hasattr(grid, "event_layers")
assert grid.event_layers.number_of_layers == 0
assert grid.event_layers.number_of_stacks == 4
def test_setitem_with_scalar():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
layers.add(2.0, age=4.0)
truth = np.array([[3.0, 3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0, 4.0]])
assert_array_equal(layers["age"], truth)
layers["age"] = 2.0
truth = np.array([[2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0]])
assert_array_equal(layers["age"], truth)
def test_set_item_with_1d():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
layers.add(2.0, age=4.0)
truth = np.array([[3.0, 3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0, 4.0]])
assert_array_equal(layers["age"], truth)
layers["age"] = [4.0, 7.0]
truth = np.array([[4.0, 4.0, 4.0, 4.0, 4.0], [7.0, 7.0, 7.0, 7.0, 7.0]])
assert_array_equal(layers["age"], truth)
def test_set_item_with_2d():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
layers.add(2.0, age=4.0)
truth = np.array([[3.0, 3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0, 4.0]])
assert_array_equal(layers["age"], truth)
layers["age"] = [[4.0, 4.0, 4.0, 4.0, 4.0], [7.0, 7.0, 7.0, 7.0, 7.0]]
truth = np.array([[4.0, 4.0, 4.0, 4.0, 4.0], [7.0, 7.0, 7.0, 7.0, 7.0]])
assert_array_equal(layers["age"], truth)
def test__str__():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
vals = str(layers)
assert vals.splitlines() == [
"number_of_layers: 1",
"number_of_stacks: 5",
"tracking: age",
]
def test__repr__():
layers = EventLayers(5)
layers.add(1.0, age=3.0)
vals = repr(layers)
assert vals == "EventLayers(5)"
def test_adding_untracked_layer():
layers = EventLayers(3)
layers.add(1.0, type=3.0, size="sand")
layers.add([0.0, 0.0, 1.0], type=3.0, size="sand")
with pytest.raises(ValueError):
layers.add([1.0], type=3.0, size="sand", spam="eggs")
|
py | 1a4d7aa1aa55a0a40706ae6b858695e0c108fbae | data = input()
words_to_search = ["Sand", "Water", "Fish", "Sun"]
count = 0
for word in words_to_search:
count += data.lower().count(word.lower())
print(count)
|
py | 1a4d7b10dacc07dba90b173adb9a433b23255942 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert references to JSON file."""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import with_statement
import re
import os
import sys
import glob
import logging
import json
from nltk.stem.snowball import SnowballStemmer as Stemmer
logging.basicConfig(level=logging.INFO)
references = {}
readers_references = {}
author_references = {}
for input_dir in glob.glob(sys.argv[1]+'/[0-9]*'):
file_id = input_dir.split('/')[-1].split('.')[0]
logging.info("loading author-assigned references from {}".format(file_id))
author_references[file_id] = []
try:
with open(input_dir+"/"+file_id+".kwd", 'r', errors='replace') as f:
text = f.read()
text = text.replace(u"\uFFFD", "\n")
text = re.sub(r'\n+', '\n', text).strip()
lines = text.split("\n")
keyphrases = []
for line in lines:
words = line.strip().split()
if sys.argv[3] == "stem":
stems = [Stemmer('porter').stem(w.lower()) for w in words]
keyphrases.append(' '.join(stems))
else:
keyphrases.append(' '.join([w.lower() for w in words]))
author_references[file_id] = keyphrases
except IOError:
logging.info("No author-assigned references for {}".format(file_id))
readers_references[file_id] = []
for reader_file in glob.glob(input_dir+'/KEY/*.key'):
logging.info("loading reader-assigned references from {}".format(reader_file))
with open(reader_file, 'r', errors='replace') as f:
text = f.read()
text = text.replace(u"\uFFFD", "\n")
text = re.sub(r'\n+', '\n', text).strip()
lines = text.split("\n")
keyphrases = []
for line in lines:
words = line.strip().split()
if sys.argv[3] == "stem":
stems = [Stemmer('porter').stem(w.lower()) for w in words]
keyphrases.append(' '.join(stems))
else:
keyphrases.append(' '.join([w.lower() for w in words]))
for keyphrase in keyphrases:
readers_references[file_id].append(keyphrase)
if sys.argv[4] == "author":
for doc_id in author_references:
references[doc_id] = [[u] for u in set(author_references[doc_id])]
elif sys.argv[4] == "reader":
for doc_id in readers_references:
references[doc_id] = [[u] for u in set(readers_references[doc_id])]
else:
for doc_id in readers_references:
references[doc_id] = [[u] for u in set(readers_references[doc_id])| set(author_references[doc_id])]
with open(sys.argv[2], 'w') as o:
json.dump(references, o, sort_keys = True, indent = 4)
|
py | 1a4d7b77840b474d09021fda8c72e3d882088636 | import os
class Config:
'''
General configuration parent class
'''
QUOTE_API_BASE_URL = 'http://quotes.stormconsultancy.co.uk/random.json'
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:sijuinigani@localhost/bloggz'
UPLOADS_DEFAULT_DEST ='app/static'
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
@staticmethod
def init_app(app):
pass
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestConfig(Config):
# SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:sijuinigani@localhost/bloggz'
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:sijuinigani@localhost/bloggz'
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
} |
py | 1a4d7ba51b6bdbcd783b061569afacf2d8071a0c | from sqlalchemy import Column, Integer
from database.main import Base, session
class CreatureDefaultsSchema(Base):
"""
This table holds the default values that a creature should have/give at a certain level.
The table's contents are as follows:
creature_level - the level of the creature which should have these attributes
armor - the default amount of armor points a creature of this level should have
min_gold_reward - the minimum amount of gold a creature of this level should give
max_gold_reward - the maximum amount of gold a creature of this level should give
xp_reward - the default amount of experience points a creature of this level should give
Example:
creature_level, armor, min_gold_reward, max_gold_reward, xp_reward
1, 50, 2, 5, 50
2, 65, 4, 6, 75
etc...
A creature of level 1 would drop between 2-5 gold and reward 50 XP on death
"""
__tablename__ = 'creature_defaults'
creature_level = Column(Integer, primary_key=True)
armor = Column(Integer)
min_gold_reward = Column(Integer)
max_gold_reward = Column(Integer)
xp_reward = Column(Integer)
|
py | 1a4d7cac2df313f9c2bdc48956e92a53f1fa3644 | from setuptools import setup
import os
VERSION = "0.6"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="airtable-export",
description="Export Airtable data to files on disk",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Simon Willison",
url="https://github.com/simonw/airtable-export",
project_urls={
"Issues": "https://github.com/simonw/airtable-export/issues",
"CI": "https://github.com/simonw/airtable-export/actions",
"Changelog": "https://github.com/simonw/airtable-export/releases",
},
license="Apache License, Version 2.0",
version=VERSION,
packages=["airtable_export"],
entry_points="""
[console_scripts]
airtable-export=airtable_export.cli:cli
""",
install_requires=["click", "PyYAML", "httpx", "sqlite-utils"],
extras_require={"test": ["pytest", "pytest-mock"]},
tests_require=["airtable-export[test]"],
)
|
py | 1a4d7cf1896cbac8f0b3e69db71b33d0ca2e8a65 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'attribute_exists': 'bool',
'case_sensitive': 'bool',
'field': 'str',
'operator': 'str',
'type': 'str',
'value': 'str',
'whole_word': 'bool'
}
attribute_map = {
'attribute_exists': 'attribute_exists',
'case_sensitive': 'case_sensitive',
'field': 'field',
'operator': 'operator',
'type': 'type',
'value': 'value',
'whole_word': 'whole_word'
}
def __init__(self, attribute_exists=None, case_sensitive=None, field=None, operator=None, type=None, value=None, whole_word=None): # noqa: E501
"""SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem - a model defined in Swagger""" # noqa: E501
self._attribute_exists = None
self._case_sensitive = None
self._field = None
self._operator = None
self._type = None
self._value = None
self._whole_word = None
self.discriminator = None
if attribute_exists is not None:
self.attribute_exists = attribute_exists
if case_sensitive is not None:
self.case_sensitive = case_sensitive
if field is not None:
self.field = field
if operator is not None:
self.operator = operator
self.type = type
if value is not None:
self.value = value
if whole_word is not None:
self.whole_word = whole_word
@property
def attribute_exists(self):
"""Gets the attribute_exists of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
For \"custom_attribute\" type criteria. The file will match as long as the attribute named by \"field\" exists. Default is true. # noqa: E501
:return: The attribute_exists of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:rtype: bool
"""
return self._attribute_exists
@attribute_exists.setter
def attribute_exists(self, attribute_exists):
"""Sets the attribute_exists of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
For \"custom_attribute\" type criteria. The file will match as long as the attribute named by \"field\" exists. Default is true. # noqa: E501
:param attribute_exists: The attribute_exists of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:type: bool
"""
self._attribute_exists = attribute_exists
@property
def case_sensitive(self):
"""Gets the case_sensitive of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
If true, the value comparison will be case sensitive. Default is true. # noqa: E501
:return: The case_sensitive of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:rtype: bool
"""
return self._case_sensitive
@case_sensitive.setter
def case_sensitive(self, case_sensitive):
"""Sets the case_sensitive of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
If true, the value comparison will be case sensitive. Default is true. # noqa: E501
:param case_sensitive: The case_sensitive of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:type: bool
"""
self._case_sensitive = case_sensitive
@property
def field(self):
"""Gets the field of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string \"\". # noqa: E501
:return: The field of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""Sets the field of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string \"\". # noqa: E501
:param field: The field of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:type: str
"""
if field is not None and len(field) > 255:
raise ValueError("Invalid value for `field`, length must be less than or equal to `255`") # noqa: E501
if field is not None and len(field) < 0:
raise ValueError("Invalid value for `field`, length must be greater than or equal to `0`") # noqa: E501
self._field = field
@property
def operator(self):
"""Gets the operator of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
How to compare the specified attribute of each file to the specified value. # noqa: E501
:return: The operator of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""Sets the operator of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
How to compare the specified attribute of each file to the specified value. # noqa: E501
:param operator: The operator of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:type: str
"""
self._operator = operator
@property
def type(self):
"""Gets the type of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
The type of this criterion, that is, which file attribute to match on. # noqa: E501
:return: The type of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The type of this criterion, that is, which file attribute to match on. # noqa: E501
:param type: The type of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["name", "path", "accessed_time", "accessed_before", "accessed_after", "birth_time", "birth_before", "birth_after", "changed_time", "changed_before", "changed_after", "size", "file_type", "posix_regex_name", "user_name", "user_id", "group_name", "group_id", "no_user", "no_group"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def value(self):
"""Gets the value of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
The value to compare the specified attribute of each file to. # noqa: E501
:return: The value of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The value to compare the specified attribute of each file to. # noqa: E501
:param value: The value of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:type: str
"""
self._value = value
@property
def whole_word(self):
"""Gets the whole_word of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
If true, the attribute must match the entire word. Default is true. # noqa: E501
:return: The whole_word of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:rtype: bool
"""
return self._whole_word
@whole_word.setter
def whole_word(self, whole_word):
"""Sets the whole_word of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
If true, the attribute must match the entire word. Default is true. # noqa: E501
:param whole_word: The whole_word of this SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem. # noqa: E501
:type: bool
"""
self._whole_word = whole_word
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a4d7f0211b7d635d21b257a47e07929c8c365e0 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .tfvc_branch_ref import TfvcBranchRef
class TfvcBranch(TfvcBranchRef):
"""TfvcBranch.
:param path: Path for the branch.
:type path: str
:param _links: A collection of REST reference links.
:type _links: :class:`ReferenceLinks <tfvc.v4_1.models.ReferenceLinks>`
:param created_date: Creation date of the branch.
:type created_date: datetime
:param description: Description of the branch.
:type description: str
:param is_deleted: Is the branch deleted?
:type is_deleted: bool
:param owner: Alias or display name of user
:type owner: :class:`IdentityRef <tfvc.v4_1.models.IdentityRef>`
:param url: URL to retrieve the item.
:type url: str
:param children: List of children for the branch.
:type children: list of :class:`TfvcBranch <tfvc.v4_1.models.TfvcBranch>`
:param mappings: List of branch mappings.
:type mappings: list of :class:`TfvcBranchMapping <tfvc.v4_1.models.TfvcBranchMapping>`
:param parent: Path of the branch's parent.
:type parent: :class:`TfvcShallowBranchRef <tfvc.v4_1.models.TfvcShallowBranchRef>`
:param related_branches: List of paths of the related branches.
:type related_branches: list of :class:`TfvcShallowBranchRef <tfvc.v4_1.models.TfvcShallowBranchRef>`
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'description': {'key': 'description', 'type': 'str'},
'is_deleted': {'key': 'isDeleted', 'type': 'bool'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'url': {'key': 'url', 'type': 'str'},
'children': {'key': 'children', 'type': '[TfvcBranch]'},
'mappings': {'key': 'mappings', 'type': '[TfvcBranchMapping]'},
'parent': {'key': 'parent', 'type': 'TfvcShallowBranchRef'},
'related_branches': {'key': 'relatedBranches', 'type': '[TfvcShallowBranchRef]'}
}
def __init__(self, path=None, _links=None, created_date=None, description=None, is_deleted=None, owner=None, url=None, children=None, mappings=None, parent=None, related_branches=None):
super(TfvcBranch, self).__init__(path=path, _links=_links, created_date=created_date, description=description, is_deleted=is_deleted, owner=owner, url=url)
self.children = children
self.mappings = mappings
self.parent = parent
self.related_branches = related_branches
|
py | 1a4d7fe297f4a66875f8ee010524a5a9a9190a2f | import warnings
from collections import defaultdict
import copy
from coffea.nanoevents.schemas.base import BaseSchema, zip_forms
from coffea.nanoevents.util import quote
class PHYSLITESchema(BaseSchema):
"""PHYSLITE schema builder - work in progress.
This is a schema for the `ATLAS DAOD_PHYSLITE derivation
<https://gitlab.cern.ch/atlas/athena/-/blob/release/21.2.108.0/PhysicsAnalysis/DerivationFramework/DerivationFrameworkPhys/share/PHYSLITE.py>`_.
Closely following `schemas.nanoaod.NanoAODSchema`, it is mainly build from
naming patterns where the "Analysis" prefix has been removed, so the
collections will be named Electrons, Muons, instead of AnalysisElectrons,
AnalysisMunos, etc. The collection fields correspond to the "Aux" and
"AuxDyn" columns.
Collections are assigned mixin types according to the `mixins` mapping.
All collections are then zipped into one `base.NanoEvents` record and returned.
Cross references are build from ElementLink columns. Global indices are
created dynamically, using an ``_eventindex`` field that is attached to
each collection.
"""
truth_collections = [
"TruthPhotons",
"TruthMuons",
"TruthNeutrinos",
"TruthTaus",
"TruthElectrons",
"TruthBoson",
"TruthBottom",
"TruthTop",
]
"""TRUTH3 collection names.
TruthParticle behavior is assigned to all of them and global index forms
for parent/children relations are created for all combinations.
"""
mixins = {
"Electrons": "Electron",
"Muons": "Muon",
"Jets": "Particle",
"TauJets": "Particle",
"CombinedMuonTrackParticles": "TrackParticle",
"ExtrapolatedMuonTrackParticles": "TrackParticle",
"GSFTrackParticles": "TrackParticle",
"InDetTrackParticles": "TrackParticle",
"MuonSpectrometerTrackParticles": "TrackParticle",
}
"""Default configuration for mixin types, based on the collection name.
The types are implemented in the `coffea.nanoevents.methods.physlite` module.
"""
for _k in truth_collections:
mixins[_k] = "TruthParticle"
def __init__(self, base_form):
super().__init__(base_form)
self._form["contents"] = self._build_collections(self._form["contents"])
def _build_collections(self, branch_forms):
zip_groups = defaultdict(list)
has_eventindex = defaultdict(bool)
for key, ak_form in branch_forms.items():
# Normal fields
key_fields = key.split("/")[-1].split(".")
top_key = key_fields[0]
sub_key = ".".join(key_fields[1:])
objname = top_key.replace("Analysis", "").replace("AuxDyn", "")
zip_groups[objname].append(((key, sub_key), ak_form))
# add eventindex form, based on the first single-jagged list column
if (
not has_eventindex[objname]
and "List" in ak_form["class"]
and "List" not in ak_form["content"]["class"]
):
zip_groups[objname].append(
((key, "_eventindex"), self._create_eventindex_form(ak_form, key))
)
has_eventindex[objname] = True
# zip the forms
contents = {}
for objname, keys_and_form in zip_groups.items():
try:
contents[objname] = zip_forms(
{sub_key: form for (key, sub_key), form in keys_and_form},
objname,
self.mixins.get(objname, None),
bypass=True,
)
content = contents[objname]["content"]
content["parameters"] = dict(
content.get("parameters", {}), collection_name=objname
)
except NotImplementedError:
warnings.warn(f"Can't zip collection {objname}")
return contents
@staticmethod
def _create_eventindex_form(base_form, key):
form = copy.deepcopy(base_form)
form["content"] = {
"class": "NumpyArray",
"parameters": {},
"form_key": quote(f"{key},!load,!eventindex,!content"),
"itemsize": 8,
"primitive": "int64",
}
return form
@property
def behavior(self):
"""Behaviors necessary to implement this schema"""
from coffea.nanoevents.methods import physlite
return physlite.behavior
|
py | 1a4d80f896cbd819f772d429b26615ea6d6076ca |
from .core import start, register_plugin, set_bounds, add_initialization_hook
|
py | 1a4d816bb2babce2f7099777f151edcf6c3489fe | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for version 3 of the zero_out op."""
import tensorflow as tf
from tensorflow.examples.adding_an_op import zero_out_op_3
class ZeroOut3Test(tf.test.TestCase):
def test(self):
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result, [5, 0, 0, 0, 0])
def testAttr(self):
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=3)
self.assertAllEqual(result, [0, 0, 0, 2, 0])
def testNegative(self):
with self.assertRaisesOpError("Need preserve_index >= 0, got -1"):
self.evaluate(zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=-1))
def testLarge(self):
with self.assertRaisesOpError("preserve_index out of range"):
self.evaluate(zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=17))
if __name__ == "__main__":
tf.test.main()
|
py | 1a4d82c1abcf3cc079aeff8bcdd3c3d637f4aeba | from const import GAME_COUNT
from game import game, TACTIC_LIST, game_process
from itertools import combinations
if __name__ == "__main__":
t = TACTIC_LIST
s = {i:0 for i in t.keys()}
for i, j in combinations(t.keys(), r=2):
x, y = game_process(t[i], t[j], GAME_COUNT)
print(f'{i} vs {j}: +{x}, +{y}')
s[i] += x
s[j] += y
print("----------result----------")
print(sorted(s.items(), key=lambda x:(-x[1])))
|
py | 1a4d83d8759c8187d2dbacc6d527caa3b94061df | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 15:07:20 2017
@author: spxrxs
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import astropy.io.fits as fits
import matplotlib.cm as cm
import os
import matplotlib.ticker as ticker
from astropy.wcs import WCS
import matplotlib.colors
from reproject import reproject_interp
from astroscrappy import detect_cosmics
#getting list of every file in the directory
files = os.listdir('/home/user/spxrxs/SupernovaImaging/lcogtdata-20170802-80')
print(files)
#loading the data for plotting
data = np.loadtxt('sn2017ahndata.csv', delimiter = ',', dtype = object)
#opening the FITS file whilch all imaags are aligned to
hdu1 = fits.open('/home/user/spxrxs/SupernovaImaging/lcogtdata-20170802-80/' + 'coj0m403-kb98-20170302-0140-e91.fits.fz')[1]
#loops through every file in teh folder
for i in range(len(files)):
#opening the file for plotting
hdu2 = fits.open('/home/user/spxrxs/SupernovaImaging/lcogtdata-20170802-80/' + files[i])[1]
thing, hdu2.data = detect_cosmics(hdu2.data, readnoise=20., gain=1.4, sigclip=5., sigfrac=.5, objlim=6.)
times = np.zeros(np.shape(data)[0])
mags = np.zeros(np.shape(data)[0])
dmags = np.zeros(np.shape(data)[0])
k = 0
for j in range(np.shape(data)[0]):
if hdu2.header['FILTER'] == data[j,1]:
if hdu2.header['MJD-OBS'] >= float(data[j,0]):
times[k] = float(data[j,0])
mags[k] = float(data[j,4])
dmags[k] = float(data[j,3])
k +=1
j +=1
times = times[:k]
mags = mags[:k]
dmags = dmags[:k]
array, footprint = reproject_interp(hdu2, hdu1.header)
plt.figure()
ax1 = plt.subplot2grid((3,3), (0,0), rowspan = 2, colspan = 3)
#ax1 = plt.subplot(2,1,1, projection = WCS(hdu1.header))
normalised = np.clip(array, np.nanpercentile(array, 50), np.nanpercentile(array, 99.5)) / np.nanpercentile(array, 40)
# normalised =array /np.nanpercentile(array, 25)
# sigma = np.sqrt(np.var(normalised))
# final_data = np.clip(normalised - np.nanpercentile(normalised, 25), 1,4)
ax1.imshow(np.log(normalised)[200:800,400:1200], norm =matplotlib.colors.Normalize() , cmap = cm.bone )
ax1.spines['right'].set_color('none')
ax1.spines['left'].set_color('none')
ax1.yaxis.set_major_locator(ticker.NullLocator())
ax1.xaxis.set_major_locator(ticker.NullLocator())
#ax1.coords.grid()
#ax1.coords['ra'].set_axislabel('Right Ascension')
#ax1.coords['dec'].set_axislabel('Declination')
#ax1.set_title(hdu2.header['FILTER']+ ' ' + str(hdu2.header['MJD-OBS']))
ax2 = plt.subplot2grid((3,3), (2,0), rowspan = 1, colspan = 3)
plt.errorbar(times -57790, mags, yerr = dmags, fmt = 'o', color = 'red')
plt.gca().invert_yaxis()
plt.ylim([21,12])
plt.xlim([0, 100])
plt.xlabel('Time (Days)')
plt.ylabel('Magnitude')
plt.tight_layout()
#plt.show()
i +=1
print(i)
plt.savefig(hdu2.header['FIlTER'] + str(hdu2.header['MJD-OBS']) + 'final' +'.png')
|
py | 1a4d8422a46e942cb21ef5611bae9e390d037924 | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from ngraph.transformers.passes.passes import GraphPass
class VerifyPass(GraphPass):
def do_pass(self, computation_decl, **kwargs):
self.computation_decl = computation_decl
# Make sure they can print. Since this is python there are no compile time checks.
for exop in computation_decl.exop_block:
str(exop)
self.test_read_before_write()
def test_read_before_write(self):
written_tensors = set()
for exop in self.computation_decl.exop_block:
for arg in exop.args:
tensor = arg.read_view.tensor
if tensor.is_persistent is False:
if tensor not in written_tensors:
raise RuntimeError(
'tensor read before written: {} - {}'.format(exop.name, tensor))
for output_decl in exop.output_decls:
if output_decl.tensor_view_decl.tensor.is_persistent is False:
written_tensors.add(output_decl.tensor_view_decl.tensor)
|
py | 1a4d84a6ee23463e56984626e4a0a0fc09afa7ea | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import struct
from datetime import datetime
from Session import Session
from IoTDBConstants import *
from SessionDataSet import SessionDataSet
from thrift.protocol import TBinaryProtocol, TCompactProtocol
from thrift.transport import TSocket, TTransport
import csv
class Importer:
def __init__(self, session: Session):
self.session = session
def align_all_series(self, file, time_format='%Y-%m-%dT%H:%M:%S.%f%z', sg=None):
self.session.open(False)
try:
csvFile = open(file, "r")
reader = csv.reader(csvFile)
deviceID_lst = []
measurement_lst = []
for line in reader:
num_of_series = len(line) - 1
if reader.line_num == 1:
for item in line:
if item != 'Time':
deviceID_lst.append('.'.join(item.split('.')[:-1]))
measurement_lst.append(item.split('.')[-1])
else:
time = self.__time_to_timestamp(line[0], time_format)
for i in range(num_of_series):
if line[i + 1] not in ('', ' ', None, "null", "Null"):
if sg:
deviceID = sg + "." + deviceID_lst[i]
else:
deviceID = deviceID_lst[i]
self.session.insert_str_record(deviceID, time,
[measurement_lst[i]],
[line[i + 1]])
csvFile.close()
except Exception:
print("the csv format is incorrect")
self.session.close()
def align_by_device(self, file, time_format='%Y-%m-%dT%H:%M:%S.%f%z', sg=None):
self.session.open(False)
try:
csvFile = open(file, "r")
reader = csv.reader(csvFile)
measurement_lst = []
for line in reader:
num_of_series = len(line) - 2
if reader.line_num == 1:
for item in line:
if item != 'Time' and item != 'Device':
measurement_lst.append(item)
else:
time = self.__time_to_timestamp(line[0], time_format)
if sg:
deviceID = sg + "." + line[1]
else:
deviceID = line[1]
for i in range(num_of_series):
if line[i + 2] not in ('', ' ', None, "null", "Null"):
self.session.insert_str_record(deviceID, time,
[measurement_lst[i]],
[line[i + 2]])
csvFile.close()
except Exception:
print("the csv format is incorrect")
self.session.close()
@staticmethod
def __time_to_timestamp(str_time: str, time_format: str):
"""str_time: the string representation of date and time with timezone
at the end.
e.g. '2012-11-01T04:16:13-04:00'
time_format: the time format written with format tokens and included
the time zone at the end
e.g. '%Y-%m-%dT%H:%M:%S%z'
"""
try:
return int(str_time)
except TypeError:
time = datetime.strptime(''.join(str_time.rsplit(':', 1)), time_format)
timestamp = int(datetime.timestamp(time))
return timestamp
|
py | 1a4d86a1c85430aed8df6d9a6fedd2c0929840ed | import json
import re
from rasa_nlu.model import Interpreter
# Custom Components
class SemesterExtractor:
@staticmethod
def process(text):
words = text.split(" ")
ordinal_values = {"first": 1, "second": 2, "third": 3, "fourth": 4, "fifth": 5, "sixth": 6, "seventh": 7, "eigth": 8}
semester = None
for word in words:
pattern = re.compile(r"\d+(st|nd|rd|th)")
if pattern.search(word):
semester = int(word[:-2])
for word in words:
word = word.lower()
pattern = re.compile(r"(first|second|third|fourth|fifth|sixth|seventh|eigth)")
if pattern.search(word):
semester = ordinal_values[word]
if semester != None:
data = [{'entity': 'semester', 'value': semester}]
return data
return semester
# End of Custom Components
class Data:
def __init__(self, text, data):
self.text = text
self.data = data
def __repr__(self):
return str(self.data)
def get_intent(self):
if self.text == "Get Started":
return "start"
else:
return self.data['intent']['name']
def get_confidence(self):
return self.data['intent']['confidence']
def get_entities(self):
semester_data = SemesterExtractor.process(self.text)
if semester_data != None:
self.data['entities'] += semester_data
return dict(map((lambda x : (x['entity'], x['value'])), self.data['entities']))
class Engine:
def __init__(self, models_path = "./models/vardhamanbot/nlu"):
self.interpreter = Interpreter.load(models_path)
def parse(self, message):
message = message.strip(" \n\t\r.")
return Data(message, self.interpreter.parse(message)) |
py | 1a4d873a57c6bbf9dfda043cfcd68360d5215db3 | from test import support
import time
import unittest
import locale
import sysconfig
import sys
import platform
try:
import threading
except ImportError:
threading = None
# Max year is only limited by the size of C int.
SIZEOF_INT = sysconfig.get_config_var('SIZEOF_INT') or 4
TIME_MAXYEAR = (1 << 8 * SIZEOF_INT - 1) - 1
TIME_MINYEAR = -TIME_MAXYEAR - 1
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_time(self):
time.time()
info = time.get_clock_info('time')
self.assertFalse(info.monotonic)
self.assertTrue(info.adjustable)
def test_clock(self):
time.clock()
info = time.get_clock_info('clock')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_clock_realtime(self):
time.clock_gettime(time.CLOCK_REALTIME)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
@unittest.skipUnless(hasattr(time, 'CLOCK_MONOTONIC'),
'need time.CLOCK_MONOTONIC')
def test_clock_monotonic(self):
a = time.clock_gettime(time.CLOCK_MONOTONIC)
b = time.clock_gettime(time.CLOCK_MONOTONIC)
self.assertLessEqual(a, b)
@unittest.skipUnless(hasattr(time, 'clock_getres'),
'need time.clock_getres()')
def test_clock_getres(self):
res = time.clock_getres(time.CLOCK_REALTIME)
self.assertGreater(res, 0.0)
self.assertLessEqual(res, 1.0)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime()')
def test_clock_settime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
try:
time.clock_settime(time.CLOCK_REALTIME, t)
except PermissionError:
pass
if hasattr(time, 'CLOCK_MONOTONIC'):
self.assertRaises(OSError,
time.clock_settime, time.CLOCK_MONOTONIC, 0)
def test_conversions(self):
self.assertEqual(time.ctime(self.t),
time.asctime(time.localtime(self.t)))
self.assertEqual(int(time.mktime(time.localtime(self.t))),
int(self.t))
def test_sleep(self):
self.assertRaises(ValueError, time.sleep, -2)
self.assertRaises(ValueError, time.sleep, -1)
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
# Issue #10762: Guard against invalid/non-supported format string
# so that Python don't crash (Windows crashes when the format string
# input to [w]strftime is not kosher.
if sys.platform.startswith('win'):
with self.assertRaises(ValueError):
time.strftime('%f')
def _bounds_checking(self, func):
# Make sure that strftime() checks the bounds of the various parts
# of the time tuple (0 is valid for *all* values).
# The year field is tested by other test cases above
# Check month [1, 12] + zero support
func((1900, 0, 1, 0, 0, 0, 0, 1, -1))
func((1900, 12, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
func((1900, 1, 0, 0, 0, 0, 0, 1, -1))
func((1900, 1, 31, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
func((1900, 1, 1, 23, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
func((1900, 1, 1, 0, 59, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
func((1900, 1, 1, 0, 0, 60, 0, 1, -1))
func((1900, 1, 1, 0, 0, 61, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertEqual(func((1900, 1, 1, 0, 0, 0, -1, 1, -1)),
func((1900, 1, 1, 0, 0, 0, +6, 1, -1)))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
func((1900, 1, 1, 0, 0, 0, 0, 0, -1))
func((1900, 1, 1, 0, 0, 0, 0, 366, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_strftime_bounding_check(self):
self._bounds_checking(lambda tup: time.strftime('', tup))
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default
# values. No test for daylight savings since strftime() does
# not change output based on its value and no test for year
# because systems vary in their support for year 0.
expected = "2000 01 01 00 00 00 1 001"
with support.check_warnings():
result = time.strftime("%Y %m %d %H %M %S %w %j", (2000,)+(0,)*8)
self.assertEqual(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# raising an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_bytes(self):
# Make sure only strings are accepted as arguments to strptime.
self.assertRaises(TypeError, time.strptime, b'2009', "%Y")
self.assertRaises(TypeError, time.strptime, '2009', b'%Y')
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
time.strptime('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
time.strptime('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_asctime(self):
time.asctime(time.gmtime(self.t))
# Max year is only limited by the size of C int.
for bigyear in TIME_MAXYEAR, TIME_MINYEAR:
asc = time.asctime((bigyear, 6, 1) + (0,) * 6)
self.assertEqual(asc[-len(str(bigyear)):], str(bigyear))
self.assertRaises(OverflowError, time.asctime,
(TIME_MAXYEAR + 1,) + (0,) * 8)
self.assertRaises(OverflowError, time.asctime,
(TIME_MINYEAR - 1,) + (0,) * 8)
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
self.assertRaises(TypeError, time.asctime, (0,) * 10)
def test_asctime_bounding_check(self):
self._bounds_checking(time.asctime)
def test_ctime(self):
t = time.mktime((1973, 9, 16, 1, 3, 52, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sun Sep 16 01:03:52 1973')
t = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sat Jan 1 00:00:00 2000')
for year in [-100, 100, 1000, 2000, 10000]:
try:
testval = time.mktime((year, 1, 10) + (0,)*6)
except (ValueError, OverflowError):
# If mktime fails, ctime will fail too. This may happen
# on some platforms.
pass
else:
self.assertEqual(time.ctime(testval)[20:], str(year))
@unittest.skipUnless(hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
# Issue #11886: Australian Eastern Standard Time (UTC+10) is called
# "EST" (as Eastern Standard Time, UTC-5) instead of "AEST"
# (non-DST timezone), and "EDT" instead of "AEDT" (DST timezone),
# on some operating systems (e.g. FreeBSD), which is wrong. See for
# example this bug:
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=93810
self.assertIn(time.tzname[0], ('AEST' 'EST'), time.tzname[0])
self.assertTrue(time.tzname[1] in ('AEDT', 'EDT'), str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif 'TZ' in environ:
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(OverflowError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_mktime(self):
# Issue #1726687
for t in (-2, -1, 0, 1):
try:
tt = time.localtime(t)
except (OverflowError, OSError):
pass
else:
self.assertEqual(time.mktime(tt), t)
# Issue #13309: passing extreme values to mktime() or localtime()
# borks the glibc's internal timezone data.
@unittest.skipUnless(platform.libc_ver()[0] != 'glibc',
"disabled because of a bug in glibc. Issue #13309")
def test_mktime_error(self):
# It may not be possible to reliably make mktime return error
# on all platfom. This will make sure that no other exception
# than OverflowError is raised for an extreme value.
tt = time.gmtime(self.t)
tzname = time.strftime('%Z', tt)
self.assertNotEqual(tzname, 'LMT')
try:
time.mktime((-1, 1, 1, 0, 0, 0, -1, -1, -1))
except OverflowError:
pass
self.assertEqual(time.strftime('%Z', tt), tzname)
@unittest.skipUnless(hasattr(time, 'monotonic'),
'need time.monotonic')
def test_monotonic(self):
t1 = time.monotonic()
time.sleep(0.5)
t2 = time.monotonic()
dt = t2 - t1
self.assertGreater(t2, t1)
# Issue #20101: On some Windows machines, dt may be slightly low
self.assertTrue(0.45 <= dt <= 1.0, dt)
info = time.get_clock_info('monotonic')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_perf_counter(self):
time.perf_counter()
def test_process_time(self):
# process_time() should not include time spend during a sleep
start = time.process_time()
time.sleep(0.100)
stop = time.process_time()
# use 20 ms because process_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
info = time.get_clock_info('process_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'monotonic'),
'need time.monotonic')
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime')
def test_monotonic_settime(self):
t1 = time.monotonic()
realtime = time.clock_gettime(time.CLOCK_REALTIME)
# jump backward with an offset of 1 hour
try:
time.clock_settime(time.CLOCK_REALTIME, realtime - 3600)
except PermissionError as err:
self.skipTest(err)
t2 = time.monotonic()
time.clock_settime(time.CLOCK_REALTIME, realtime)
# monotonic must not be affected by system clock updates
self.assertGreaterEqual(t2, t1)
def test_localtime_failure(self):
# Issue #13847: check for localtime() failure
invalid_time_t = None
for time_t in (-1, 2**30, 2**33, 2**60):
try:
time.localtime(time_t)
except OverflowError:
self.skipTest("need 64-bit time_t")
except OSError:
invalid_time_t = time_t
break
if invalid_time_t is None:
self.skipTest("unable to find an invalid time_t value")
self.assertRaises(OSError, time.localtime, invalid_time_t)
self.assertRaises(OSError, time.ctime, invalid_time_t)
def test_get_clock_info(self):
clocks = ['clock', 'perf_counter', 'process_time', 'time']
if hasattr(time, 'monotonic'):
clocks.append('monotonic')
for name in clocks:
info = time.get_clock_info(name)
#self.assertIsInstance(info, dict)
self.assertIsInstance(info.implementation, str)
self.assertNotEqual(info.implementation, '')
self.assertIsInstance(info.monotonic, bool)
self.assertIsInstance(info.resolution, float)
# 0.0 < resolution <= 1.0
self.assertGreater(info.resolution, 0.0)
self.assertLessEqual(info.resolution, 1.0)
self.assertIsInstance(info.adjustable, bool)
self.assertRaises(ValueError, time.get_clock_info, 'xxx')
class TestLocale(unittest.TestCase):
def setUp(self):
self.oldloc = locale.setlocale(locale.LC_ALL)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.oldloc)
def test_bug_3061(self):
try:
tmp = locale.setlocale(locale.LC_ALL, "fr_FR")
except locale.Error:
self.skipTest('could not set locale.LC_ALL to fr_FR')
# This should not cause an exception
time.strftime("%B", (2009,2,1,0,0,0,0,0,0))
class _TestAsctimeYear:
_format = '%d'
def yearstr(self, y):
return time.asctime((y,) + (0,) * 8).split()[-1]
def test_large_year(self):
# Check that it doesn't crash for year > 9999
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
class _TestStrftimeYear:
# Issue 13305: For years < 1000, the value is not always
# padded to 4 digits across platforms. The C standard
# assumes year >= 1900, so it does not specify the number
# of digits.
if time.strftime('%Y', (1,) + (0,) * 8) == '0001':
_format = '%04d'
else:
_format = '%d'
def yearstr(self, y):
return time.strftime('%Y', (y,) + (0,) * 8)
def test_4dyear(self):
# Check that we can return the zero padded value.
if self._format == '%04d':
self.test_year('%04d')
else:
def year4d(y):
return time.strftime('%4Y', (y,) + (0,) * 8)
self.test_year('%04d', func=year4d)
def skip_if_not_supported(y):
msg = "strftime() is limited to [1; 9999] with Visual Studio"
# Check that it doesn't crash for year > 9999
try:
time.strftime('%Y', (y,) + (0,) * 8)
except ValueError:
cond = False
else:
cond = True
return unittest.skipUnless(cond, msg)
@skip_if_not_supported(10000)
def test_large_year(self):
return super().test_large_year()
@skip_if_not_supported(0)
def test_negative(self):
return super().test_negative()
del skip_if_not_supported
class _Test4dYear:
_format = '%d'
def test_year(self, fmt=None, func=None):
fmt = fmt or self._format
func = func or self.yearstr
self.assertEqual(func(1), fmt % 1)
self.assertEqual(func(68), fmt % 68)
self.assertEqual(func(69), fmt % 69)
self.assertEqual(func(99), fmt % 99)
self.assertEqual(func(999), fmt % 999)
self.assertEqual(func(9999), fmt % 9999)
def test_large_year(self):
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
self.assertEqual(self.yearstr(TIME_MAXYEAR), str(TIME_MAXYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MAXYEAR + 1)
def test_negative(self):
self.assertEqual(self.yearstr(-1), self._format % -1)
self.assertEqual(self.yearstr(-1234), '-1234')
self.assertEqual(self.yearstr(-123456), '-123456')
self.assertEqual(self.yearstr(-123456789), str(-123456789))
self.assertEqual(self.yearstr(-1234567890), str(-1234567890))
self.assertEqual(self.yearstr(TIME_MINYEAR + 1900), str(TIME_MINYEAR + 1900))
# Issue #13312: it may return wrong value for year < TIME_MINYEAR + 1900
# Skip the value test, but check that no error is raised
self.yearstr(TIME_MINYEAR)
# self.assertEqual(self.yearstr(TIME_MINYEAR), str(TIME_MINYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MINYEAR - 1)
class TestAsctime4dyear(_TestAsctimeYear, _Test4dYear, unittest.TestCase):
pass
class TestStrftime4dyear(_TestStrftimeYear, _Test4dYear, unittest.TestCase):
pass
class TestPytime(unittest.TestCase):
def setUp(self):
self.invalid_values = (
-(2 ** 100), 2 ** 100,
-(2.0 ** 100.0), 2.0 ** 100.0,
)
@support.cpython_only
def test_time_t(self):
from _testcapi import pytime_object_to_time_t
for obj, time_t in (
(0, 0),
(-1, -1),
(-1.0, -1),
(-1.9, -1),
(1.0, 1),
(1.9, 1),
):
self.assertEqual(pytime_object_to_time_t(obj), time_t)
for invalid in self.invalid_values:
self.assertRaises(OverflowError, pytime_object_to_time_t, invalid)
@support.cpython_only
def test_timeval(self):
from _testcapi import pytime_object_to_timeval
for obj, timeval in (
(0, (0, 0)),
(-1, (-1, 0)),
(-1.0, (-1, 0)),
(1e-6, (0, 1)),
(-1e-6, (-1, 999999)),
(-1.2, (-2, 800000)),
(1.1234560, (1, 123456)),
(1.1234569, (1, 123456)),
(-1.1234560, (-2, 876544)),
(-1.1234561, (-2, 876543)),
):
self.assertEqual(pytime_object_to_timeval(obj), timeval)
for invalid in self.invalid_values:
self.assertRaises(OverflowError, pytime_object_to_timeval, invalid)
@support.cpython_only
def test_timespec(self):
from _testcapi import pytime_object_to_timespec
for obj, timespec in (
(0, (0, 0)),
(-1, (-1, 0)),
(-1.0, (-1, 0)),
(1e-9, (0, 1)),
(-1e-9, (-1, 999999999)),
(-1.2, (-2, 800000000)),
(1.1234567890, (1, 123456789)),
(1.1234567899, (1, 123456789)),
(-1.1234567890, (-2, 876543211)),
(-1.1234567891, (-2, 876543210)),
):
self.assertEqual(pytime_object_to_timespec(obj), timespec)
for invalid in self.invalid_values:
self.assertRaises(OverflowError, pytime_object_to_timespec, invalid)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_localtime_timezone(self):
# Get the localtime and examine it for the offset and zone.
lt = time.localtime()
self.assertTrue(hasattr(lt, "tm_gmtoff"))
self.assertTrue(hasattr(lt, "tm_zone"))
# See if the offset and zone are similar to the module
# attributes.
if lt.tm_gmtoff is None:
self.assertTrue(not hasattr(time, "timezone"))
else:
self.assertEqual(lt.tm_gmtoff, -[time.timezone, time.altzone][lt.tm_isdst])
if lt.tm_zone is None:
self.assertTrue(not hasattr(time, "tzname"))
else:
self.assertEqual(lt.tm_zone, time.tzname[lt.tm_isdst])
# Try and make UNIX times from the localtime and a 9-tuple
# created from the localtime. Test to see that the times are
# the same.
t = time.mktime(lt); t9 = time.mktime(lt[:9])
self.assertEqual(t, t9)
# Make localtimes from the UNIX times and compare them to
# the original localtime, thus making a round trip.
new_lt = time.localtime(t); new_lt9 = time.localtime(t9)
self.assertEqual(new_lt, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt.tm_zone, lt.tm_zone)
self.assertEqual(new_lt9, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt9.tm_zone, lt.tm_zone)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_strptime_timezone(self):
t = time.strptime("UTC", "%Z")
self.assertEqual(t.tm_zone, 'UTC')
t = time.strptime("+0500", "%z")
self.assertEqual(t.tm_gmtoff, 5 * 3600)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_short_times(self):
import pickle
# Load a short time structure using pickle.
st = b"ctime\nstruct_time\np0\n((I2007\nI8\nI11\nI1\nI24\nI49\nI5\nI223\nI1\ntp1\n(dp2\ntp3\nRp4\n."
lt = pickle.loads(st)
self.assertIs(lt.tm_gmtoff, None)
self.assertIs(lt.tm_zone, None)
if __name__ == "__main__":
unittest.main()
|
py | 1a4d87717e6860bb3b9db1f367eb68c56b45e133 | """
Low-level serial communication for Trinamic TMCM-140-42-SE controller
(used internally for the Thorlabs MFC1)
"""
import serial, struct, time, collections
try:
# this is nicer because it provides deadlock debugging information
from acq4.util.Mutex import RecursiveMutex as RLock
except ImportError:
from threading import RLock
try:
from ..SerialDevice import SerialDevice, TimeoutError, DataError
except ValueError:
## relative imports not allowed when running from command prompt, so
## we adjust sys.path when running the script for testing
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from SerialDevice import SerialDevice, TimeoutError, DataError
def threadsafe(method):
# decorator for automatic mutex lock/unlock
def lockMutex(self, *args, **kwds):
with self.lock:
return method(self, *args, **kwds)
return lockMutex
COMMANDS = {
'rol': 2,
'ror': 1,
'mvp': 4,
'mst': 3,
'rfs': 13,
'sco': 30,
'cco': 32,
'gco': 31,
'sap': 5,
'gap': 6,
'stap': 7,
'rsap': 8,
'sgp': 9,
'ggp': 10,
'stgp': 11,
'rsgp': 12,
'sio': 14,
'gio': 15,
'calc': 19,
'comp': 20,
'jc': 21,
'ja': 22,
'csub': 23,
'rsub': 24,
'wait': 27,
'stop': 28,
'sco': 30,
'gco': 31,
'cco': 32,
'calcx': 33,
'aap': 34,
'agp': 35,
'aco': 39,
'sac': 29,
'stop_application': 128,
'run_application': 129,
'step_application': 130,
'reset_application': 131,
'start_download': 132,
'stop_download': 133,
'get_application_status': 135,
'get_firmware_version': 136,
'restore_factory_settings': 137,
}
PARAMETERS = { # negative values indicate read-only parameters
'target_position': 0,
'actual_position': 1,
'target_speed': 2,
'actual_speed': 3,
'maximum_speed': 4,
'maximum_acceleration': 5,
'maximum_current': 6,
'standby_current': 7,
'target_pos_reached': 8,
'ref_switch_status': 9,
'right_limit_switch_status': 10,
'left_limit_switch_status': 11,
'right_limit_switch_disable': 12,
'left_limit_switch_disable': 13,
'minimum_speed': -130,
'acceleration': -135,
'ramp_mode': 138,
'microstep_resolution': 140,
'soft_stop_flag': 149,
'ramp_divisor': 153,
'pulse_divisor': 154,
'referencing_mode': 193,
'referencing_search_speed': 194,
'referencing_switch_speed': 195,
'distance_end_switches': 196,
'mixed_decay_threshold': 203,
'freewheeling': 204,
'stall_detection_threshold': 205,
'actual_load_value': 206,
'driver_error_flags': -208,
'encoder_position': 209,
'encoder_prescaler': 210,
'fullstep_threshold': 211,
'maximum_encoder_deviation': 212,
'power_down_delay': 214,
'absolute_encoder_value': -215,
}
GLOBAL_PARAMETERS = {
'eeprom_magic': 64,
'baud_rate': 65,
'serial_address': 66,
'ascii_mode': 67,
'eeprom_lock': 73,
'auto_start_mode': 77,
'tmcl_code_protection': 81,
'coordinate_storage': 84,
'tmcl_application_status': 128,
'download_mode': 129,
'tmcl_program_counter': 130,
'tick_timer': 132,
'random_number': -133,
}
OPERATORS = {
'add': 0,
'sub': 1,
'mul': 2,
'div': 3,
'mod': 4,
'and': 5,
'or': 6,
'xor': 7,
'not': 8,
'load': 9,
'swap': 10,
}
CONDITIONS = {
'ze': 0,
'nz': 1,
'eq': 2,
'ne': 3,
'gt': 4,
'ge': 5,
'lt': 6,
'le': 7,
'eto': 8,
'eal': 9,
'esd': 12,
}
STATUS = {
1: "Wrong checksum",
2: "Invalid command",
3: "Wrong type",
4: "Invalid value",
5: "Configuration EEPROM locked",
6: "Command not available",
}
class TMCMError(Exception):
def __init__(self, status):
self.status = status
msg = STATUS[status]
Exception.__init__(msg)
class TMCM140(SerialDevice):
def __init__(self, port, baudrate=9600, module_addr=1):
"""
port: serial COM port (eg. COM3 or /dev/ttyACM0)
baudrate: 9600 by default
module_addr: 1 by default
"""
self.lock = RLock(debug=True)
self.port = port
assert isinstance(module_addr, int)
assert module_addr > 0
self.module_addr = module_addr
self.module_str = chr(module_addr+64)
self._waiting_for_reply = False
SerialDevice.__init__(self, port=self.port, baudrate=baudrate)
@threadsafe
def command(self, cmd, type, motor, value):
"""Send a command to the controller and return the reply.
If an error is returned from the controller then raise an exception.
"""
self._send_cmd(cmd, type, motor, value)
return self._get_reply()
def rotate(self, velocity):
"""Begin rotating motor.
velocity: -2047 to +2047
negative values turn left; positive values turn right.
"""
assert isinstance(velocity, int)
assert -2047 <= velocity <= 2047
if velocity < 0:
direction = 'l'
velocity = -velocity
else:
direction = 'r'
self.command('ro'+direction, 0, 0, velocity)
def stop(self):
"""Stop the motor.
Note: does not stop currently running programs.
"""
self.command('mst', 0, 0, 0)
def move(self, pos, relative=False, velocity=None):
"""Rotate until reaching *pos*.
pos: The target position
relative: If True, then *pos* is interpreted as relative to the current
position
velocity: Optionally set the target velocity before moving
"""
assert isinstance(pos, int)
assert -2**32 <= pos < 2**32
if velocity is not None:
assert isinstance(velocity, int)
assert 0 <= velocity < 2048
raise NotImplementedError()
type = 1 if relative else 0
self.command('mvp', type, 0, pos)
def get_param(self, param):
pnum = abs(PARAMETERS[param])
return self.command('gap', pnum, 0, 0)[4]
def __getitem__(self, param):
return self.get_param(param)
def set_param(self, param, value, **kwds):
"""Set a parameter value.
If valus is 'accum' then the parameter is set from the accumulator
register.
"""
pnum = PARAMETERS[param]
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if pnum in (PARAMETERS['maximum_current'], PARAMETERS['standby_current']) and value > 100:
if kwds.get('force', False) is not True:
raise Exception("Refusing to set current > 100 (this can damage the motor). "
"To override, use force=True.")
if value == 'accum':
self.command('aap', pnum, 0, 0)
else:
self.command('sap', pnum, 0, value)
@threadsafe
def set_params(self, **kwds):
"""Set multiple parameters.
The driver is thread-locked until all parameters are set.
"""
for param, value in kwds.items():
self.set_param(param, value)
def __setitem__(self, param, value):
return self.set_param(param, value)
def get_global(self, param):
"""Return a global parameter or copy global to accumulator.
Use param='gpX' to refer to general-purpose variables.
"""
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = abs(GLOBAL_PARAMETERS[param])
bank = 0
return self.command('ggp', pnum, bank, 0)[4]
def set_global(self, param, value):
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = GLOBAL_PARAMETERS[param]
bank = 0
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if value == 'accum':
self.command('agp', pnum, bank, 0)
else:
self.command('sgp', pnum, bank, value)
def stop_program(self):
"""Stop the currently running TMCL program.
"""
self.command('stop_application', 0, 0, 0)
def start_program(self, address=None):
"""Start running TMCL program code from the given address (in bytes?),
or from the current address if None.
"""
if address is None:
self.command('run_application', 0, 0, 0)
else:
self.command('run_application', 1, 0, address)
def start_download(self, address=0):
"""Begin loading TMCL commands into EEPROM .
"""
self.command('start_download', 0, 0, address)
def stop_download(self):
"""Finish loading TMCL commands into EEPROM.
"""
self.command('stop_download', 0, 0, 0)
def write_program(self, address=0):
return ProgramManager(self, address)
def program_status(self):
"""Return current program status:
0=stop, 1=run, 2=step, 3=reset
"""
return self.command('get_application_status', 0, 0, 0)[4]
def calc(self, op, value):
opnum = OPERATORS[op]
if opnum > 9:
raise TypeError("Operator %s invalid for calc" % op)
self.command('calc', opnum, 0, value)
def calcx(self, op):
opnum = OPERATORS[op]
self.command('calcx', opnum, 0, 0)
def comp(self, val):
self.command('comp', 0, 0, val)
def jump(self, *args):
"""Program jump to *addr* (instruction index).
Usage:
jump(address)
jump(cond, address)
Where *cond* may be ze, nz, eq, ne, gt, ge, lt, le, eto, eal, or esd.
"""
if len(args) == 1:
assert isinstance(args[0], int)
self.command('ja', 0, 0, args[0])
else:
cnum = CONDITIONS[args[0]]
self.command('jc', cnum, 0, args[1])
def _send_cmd(self, cmd, type, motor, value):
"""Send a command to the controller.
"""
if self._waiting_for_reply:
raise Exception("Cannot send command; previous reply has not been "
"received yet.")
cmd_num = COMMANDS[cmd]
assert isinstance(type, int)
assert isinstance(motor, int)
# Try packing the value first as unsigned, then signed. (the overlapping
# integer ranges have identical bit representation, so there is no
# ambiguity)
try:
cmd = struct.pack('>BBBBI', self.module_addr, cmd_num, type, motor, value)
except struct.error:
cmd = struct.pack('>BBBBi', self.module_addr, cmd_num, type, motor, value)
chksum = sum(bytearray(cmd)) % 256
out = cmd + struct.pack('B', chksum)
self.write(out)
self._waiting_for_reply = True
def _get_reply(self):
"""Read and parse a reply from the controller.
Raise an exception if an error was reported.
"""
if not self._waiting_for_reply:
raise Exception("No reply expected.")
try:
d = self.read(9)
finally:
self._waiting_for_reply = False
d2 = self.readAll()
if len(d2) > 0:
raise Exception("Error: extra data while reading reply.")
parts = struct.unpack('>BBBBiB', d)
reply_addr, module_addr, status, cmd_num, value, chksum = parts
if chksum != sum(bytearray(d[:-1])) % 256:
raise Exception("Invalid checksum reading from controller.")
if status < 100:
raise TMCMError(status)
return parts
class ProgramManager(object):
def __init__(self, mcm, start=0):
self.mcm = mcm
self.start = start
self.count = 0
def __enter__(self):
self.mcm.lock.acquire()
self.mcm.start_download(self.start)
return self
def __exit__(self, *args):
# insert an extra stop to ensure the program can't leak
# into previously written code.
self.mcm.command('stop', 0, 0, 0)
self.mcm.stop_download()
self.mcm.lock.release()
def __getattr__(self, name):
self.count += 1
return getattr(self.mcm, name)
|
py | 1a4d87b9392abdeecfb13e1e293e24cf1d3d64ba | # Python 3 - Verifica disponibilidade de sites
# verifica conexao com um teste que sempre esta online e armazena log em um arquivo de texto.
from urllib.request import Request, urlopen
from urllib.error import URLError
from datetime import datetime
import time
class Url(object):
def __init__(self, url, nome):
self.url = url
self.sucesso = 0
self.erro = 0
self.nome = nome
self.teste = False # se passou no teste
tempo = 120 # verificar a cada quanto tempo, segundos.
url0 = Url('http://www.google.com', 'teste')
url1 = Url('http://uol.com.br', 'Site 1')
url2 = Url('http://baixaki.com.br', 'Site 2')
urls = [url0, url1, url2] # Quais vai testar
while True:
for url in urls:
try:
response = urlopen(url.url)
if response.info():
url.teste = True
url.sucesso += 1
else:
url.teste = False
url.erro += 1
except URLError:
url.teste = False
url.erro += 1
#print(url.nome + ' - ' + url.teste)
if url.nome == 'teste' and not url.teste: # se o teste nao passar, break
texto = '\nSem conexao local com a internet.'
arq = open('log-status.txt', 'a')
arq.write(texto)
arq.close()
print(texto)
break
elif url.nome != 'teste': # se nao for o link teste, escreve
texto = url.nome + ' - Sucessos: '+ str(url.sucesso) + \
' - Erros: '+ str(url.erro) + ' - ' + str(datetime.now())+'\n'
arq = open('log-status.txt', 'a')
arq.write(texto)
arq.close()
print(texto)
time.sleep(1)
time.sleep(tempo)
|
py | 1a4d87c38017310b8b2ebc496b16ae301534b6ab | from __future__ import absolute_import
from itertools import izip
def deassoc(x):
"""
Turns an array consisting of alternating key-value pairs into a
dictionary.
Osm2pgsql stores the tags for ways and relations in the planet_osm_ways and
planet_osm_rels tables in this format. Hstore would make more sense now,
but this encoding pre-dates the common availability of hstore.
Example:
>>> from raw_tiles.index.util import deassoc
>>> deassoc(['a', 1, 'b', 'B', 'c', 3.14])
{'a': 1, 'c': 3.14, 'b': 'B'}
"""
pairs = [iter(x)] * 2
return dict(izip(*pairs))
|
py | 1a4d87c6234f08ae11f563bbae89f3c9eb1b2f41 | # coding=utf-8
from __future__ import print_function, unicode_literals
import numpy as np
import pandas as pd
import json
import requests
import argparse
data = pd.read_csv('/home/purvar/Downloads/location/t_sup_complaint.csv',
names=np.arange(27))
# text = data.iloc[:, 11]
NER_URL = 'http://api.bosonnlp.com/ner/analysis' # BosonNLP
parser = argparse.ArgumentParser()
parser.add_argument('--index',
type=int,
default=100,
help='Please input an index')
FLAGS, unparsed = parser.parse_known_args()
s = [ data.iloc[FLAGS.index, 11] ] #投诉内容位于第十二列
print(s)
# s = ['硚口区汉西三路香江家居对面常青国际小区,光头卤店铺24小时抽烟机噪音扰民,\
# 油烟扰民,区局已派第三方检查公司进行检测,投诉人等待测试结果的回复。多次来电,请重点处理。']
data = json.dumps(s)
headers = {'X-Token': 'LkwQR-rW.21981.qz7z9JKCk9A9'}
resp = requests.post(NER_URL, headers=headers, data=data.encode('utf-8'))
for item in resp.json():
for entity in item['entity']:
if entity[2] in ['location', 'org_name', 'company_name']:
print(''.join(item['word'][entity[0]:entity[1]]), entity[2])
# print(resp.text)
|
py | 1a4d88d5398f256ee90f2f49260551a21f04abd3 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Assemble function for converting a list of circuits into a qobj"""
import uuid
import copy
import logging
import warnings
from time import time
from typing import Union, List, Dict, Optional
from qiskit.circuit import QuantumCircuit, Qubit, Parameter
from qiskit.exceptions import QiskitError
from qiskit.pulse import LoConfig, Instruction
from qiskit.assembler.run_config import RunConfig
from qiskit.assembler import assemble_circuits, assemble_schedules
from qiskit.qobj import QobjHeader, Qobj
from qiskit.qobj.utils import MeasLevel, MeasReturnType
from qiskit.validation.jsonschema import SchemaValidationError
from qiskit.providers import BaseBackend
from qiskit.providers.backend import Backend
from qiskit.pulse.channels import PulseChannel
from qiskit.pulse import Schedule
logger = logging.getLogger(__name__)
def _log_assembly_time(start_time, end_time):
log_msg = "Total Assembly Time - %.5f (ms)" % ((end_time - start_time) * 1000)
logger.info(log_msg)
# TODO: parallelize over the experiments (serialize each separately, then add global header/config)
def assemble(experiments: Union[QuantumCircuit, List[QuantumCircuit], Schedule, List[Schedule]],
backend: Optional[Union[Backend, BaseBackend]] = None,
qobj_id: Optional[str] = None,
qobj_header: Optional[Union[QobjHeader, Dict]] = None,
shots: Optional[int] = None, memory: Optional[bool] = False,
max_credits: Optional[int] = None,
seed_simulator: Optional[int] = None,
qubit_lo_freq: Optional[List[int]] = None,
meas_lo_freq: Optional[List[int]] = None,
qubit_lo_range: Optional[List[int]] = None,
meas_lo_range: Optional[List[int]] = None,
schedule_los: Optional[Union[List[Union[Dict[PulseChannel, float], LoConfig]],
Union[Dict[PulseChannel, float], LoConfig]]] = None,
meas_level: Union[int, MeasLevel] = MeasLevel.CLASSIFIED,
meas_return: Union[str, MeasReturnType] = MeasReturnType.AVERAGE,
meas_map: Optional[List[List[Qubit]]] = None,
memory_slot_size: int = 100,
rep_time: Optional[int] = None,
rep_delay: Optional[float] = None,
parameter_binds: Optional[List[Dict[Parameter, float]]] = None,
parametric_pulses: Optional[List[str]] = None,
init_qubits: bool = True,
**run_config: Dict) -> Qobj:
"""Assemble a list of circuits or pulse schedules into a ``Qobj``.
This function serializes the payloads, which could be either circuits or schedules,
to create ``Qobj`` "experiments". It further annotates the experiment payload with
header and configurations.
Args:
experiments: Circuit(s) or pulse schedule(s) to execute
backend: If set, some runtime options are automatically grabbed from
``backend.configuration()`` and ``backend.defaults()``.
If any other option is explicitly set (e.g., ``rep_time``), it
will override the backend's.
If any other options is set in the run_config, it will
also override the backend's.
qobj_id: String identifier to annotate the ``Qobj``
qobj_header: User input that will be inserted in ``Qobj`` header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots: Number of repetitions of each circuit, for sampling. Default: 1024
or ``max_shots`` from the backend configuration, whichever is smaller
memory: If ``True``, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option.
max_credits: Maximum credits to spend on job. Default: 10
seed_simulator: Random seed to control sampling, for when backend is a simulator
qubit_lo_freq: List of default qubit LO frequencies in Hz. Will be overridden by
``schedule_los`` if set.
meas_lo_freq: List of default measurement LO frequencies in Hz. Will be overridden
by ``schedule_los`` if set.
qubit_lo_range: List of drive LO ranges each of form ``[range_min, range_max]`` in Hz.
Used to validate the supplied qubit frequencies.
meas_lo_range: List of measurement LO ranges each of form ``[range_min, range_max]`` in Hz.
Used to validate the supplied qubit frequencies.
schedule_los: Experiment LO configurations, frequencies are given in Hz.
meas_level: Set the appropriate level of the measurement output for pulse experiments.
meas_return: Level of measurement data for the backend to return.
For ``meas_level`` 0 and 1:
* ``single`` returns information from every shot.
* ``avg`` returns average measurement output (averaged over number of shots).
meas_map: List of lists, containing qubits that must be measured together.
memory_slot_size: Size of each memory slot if the output is Level 0.
rep_time (int): Time per program execution in seconds. Must be from the list provided
by the backend (``backend.configuration().rep_times``). Defaults to the first entry.
rep_delay (float): Delay between programs in seconds. Only supported on certain
backends (if ``backend.configuration().dynamic_reprate_enabled=True``). If supported,
``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied
by the backend (``backend.configuration().rep_delay_range``). Default is given by
``backend.configuration().default_rep_delay``.
parameter_binds: List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments; e.g., if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
parametric_pulses: A list of pulse shapes which are supported internally on the backend.
Example::
['gaussian', 'constant']
init_qubits: Whether to reset the qubits to the ground state for each shot.
Default: ``True``.
**run_config: Extra arguments used to configure the run (e.g., for Aer configurable
backends). Refer to the backend documentation for details on these
arguments.
Returns:
A ``Qobj`` that can be run on a backend. Depending on the type of input,
this will be either a ``QasmQobj`` or a ``PulseQobj``.
Raises:
QiskitError: if the input cannot be interpreted as either circuits or schedules
"""
start_time = time()
experiments = experiments if isinstance(experiments, list) else [experiments]
qobj_id, qobj_header, run_config_common_dict = _parse_common_args(backend, qobj_id, qobj_header,
shots, memory, max_credits,
seed_simulator, init_qubits,
rep_delay, **run_config)
# assemble either circuits or schedules
if all(isinstance(exp, QuantumCircuit) for exp in experiments):
run_config = _parse_circuit_args(parameter_binds, backend, meas_level,
meas_return, parametric_pulses,
**run_config_common_dict)
# If circuits are parameterized, bind parameters and remove from run_config
bound_experiments, run_config = _expand_parameters(circuits=experiments,
run_config=run_config)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_circuits(circuits=bound_experiments, qobj_id=qobj_id,
qobj_header=qobj_header, run_config=run_config)
elif all(isinstance(exp, (Schedule, Instruction)) for exp in experiments):
run_config = _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq,
qubit_lo_range, meas_lo_range,
schedule_los, meas_level, meas_return,
meas_map, memory_slot_size,
rep_time, parametric_pulses,
**run_config_common_dict)
end_time = time()
_log_assembly_time(start_time, end_time)
return assemble_schedules(schedules=experiments, qobj_id=qobj_id,
qobj_header=qobj_header, run_config=run_config)
else:
raise QiskitError("bad input to assemble() function; "
"must be either circuits or schedules")
# TODO: rework to return a list of RunConfigs (one for each experiments), and a global one
def _parse_common_args(backend, qobj_id, qobj_header, shots,
memory, max_credits, seed_simulator,
init_qubits, rep_delay, **run_config):
"""Resolve the various types of args allowed to the assemble() function through
duck typing, overriding args, etc. Refer to the assemble() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a run option is passed through multiple args (explicitly setting an arg
has more priority than the arg set by backend)
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
QiskitError: if the memory arg is True and the backend does not support
memory. Also if shots exceeds max_shots for the configured backend. Also if
the type of shots is not int.
"""
# grab relevant info from backend if it exists
backend_config = None
if backend:
backend_config = backend.configuration()
# check for memory flag applied to backend that does not support memory
if memory and not backend_config.memory:
raise QiskitError("memory not supported by backend {}"
.format(backend_config.backend_name))
# an identifier for the Qobj
qobj_id = qobj_id or str(uuid.uuid4())
# The header that goes at the top of the Qobj (and later Result)
# we process it as dict, then write entries that are not None to a QobjHeader object
qobj_header = qobj_header or {}
if isinstance(qobj_header, QobjHeader):
qobj_header = qobj_header.to_dict()
backend_name = getattr(backend_config, 'backend_name', None)
backend_version = getattr(backend_config, 'backend_version', None)
qobj_header = {**dict(backend_name=backend_name, backend_version=backend_version),
**qobj_header}
qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None})
max_shots = getattr(backend_config, 'max_shots', None)
if shots is None:
if max_shots:
shots = min(1024, max_shots)
else:
shots = 1024
elif not isinstance(shots, int):
raise QiskitError(
"Argument 'shots' should be of type 'int'")
elif max_shots and max_shots < shots:
raise QiskitError(
'Number of shots specified: %s exceeds max_shots property of the '
'backend: %s.' % (shots, max_shots))
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
if dynamic_reprate_enabled:
default_rep_delay = getattr(backend_config, "default_rep_delay", None)
rep_delay_range = getattr(backend_config, "rep_delay_range", None)
rep_delay = _parse_rep_delay(rep_delay, default_rep_delay, rep_delay_range)
else:
if rep_delay is not None:
rep_delay = None
warnings.warn(
"Dynamic rep rates not supported on this backend, cannot use rep_delay.",
RuntimeWarning,
)
# create run configuration and populate
run_config_dict = dict(shots=shots,
memory=memory,
max_credits=max_credits,
seed_simulator=seed_simulator,
init_qubits=init_qubits,
rep_delay=rep_delay,
**run_config)
return qobj_id, qobj_header, run_config_dict
def _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq, qubit_lo_range,
meas_lo_range, schedule_los, meas_level,
meas_return, meas_map,
memory_slot_size,
rep_time, parametric_pulses,
**run_config):
"""Build a pulse RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
SchemaValidationError: If the given meas_level is not allowed for the given `backend`.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_default = backend.defaults()
backend_config = backend.configuration()
if meas_level not in getattr(backend_config, 'meas_levels', [MeasLevel.CLASSIFIED]):
raise SchemaValidationError(
('meas_level = {} not supported for backend {}, only {} is supported'
).format(meas_level, backend_config.backend_name, backend_config.meas_levels)
)
meas_map = meas_map or getattr(backend_config, 'meas_map', None)
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
if not qubit_lo_freq and hasattr(backend_default, 'qubit_freq_est'):
qubit_lo_freq = backend_default.qubit_freq_est
if not meas_lo_freq and hasattr(backend_default, 'meas_freq_est'):
meas_lo_freq = backend_default.meas_freq_est
qubit_lo_range = qubit_lo_range or getattr(backend_config, 'qubit_lo_range', None)
meas_lo_range = meas_lo_range or getattr(backend_config, 'meas_lo_range', None)
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if rep_time:
if dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates are supported on this backend. 'rep_delay' will be "
"used instead of 'rep_time'.", RuntimeWarning)
if isinstance(rep_time, list):
rep_time = rep_time[0]
rep_time = int(rep_time * 1e6) # convert sec to μs
parametric_pulses = parametric_pulses or getattr(backend_config, 'parametric_pulses', [])
# create run configuration and populate
run_config_dict = dict(qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
meas_map=meas_map,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
parametric_pulses=parametric_pulses,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return run_config
def _parse_circuit_args(parameter_binds, backend, meas_level, meas_return,
parametric_pulses, **run_config):
"""Build a circuit RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
"""
parameter_binds = parameter_binds or []
# create run configuration and populate
run_config_dict = dict(parameter_binds=parameter_binds, **run_config)
if backend:
run_config_dict['parametric_pulses'] = getattr(backend.configuration(), 'parametric_pulses',
[])
if parametric_pulses:
run_config_dict['parametric_pulses'] = parametric_pulses
if meas_level:
run_config_dict['meas_level'] = meas_level
# only enable `meas_return` if `meas_level` isn't classified
if meas_level != MeasLevel.CLASSIFIED:
run_config_dict['meas_return'] = meas_return
run_config = RunConfig(
**{k: v
for k, v in run_config_dict.items() if v is not None})
return run_config
def _parse_rep_delay(rep_delay: float,
default_rep_delay: float,
rep_delay_range: List[float]) -> float:
"""Parse and set ``rep_delay`` parameter in runtime config.
Args:
rep_delay: Initial rep delay.
default_rep_delay: Backend default rep delay.
rep_delay_range: Backend list defining allowable range of rep delays.
Raises:
SchemaValidationError: If rep_delay is not in the backend rep_delay_range.
Returns:
float: Modified rep delay after parsing.
"""
if rep_delay is None:
rep_delay = default_rep_delay
if rep_delay is not None:
# check that rep_delay is in rep_delay_range
if rep_delay_range is not None and isinstance(rep_delay_range, list):
if len(rep_delay_range) != 2:
raise SchemaValidationError(
"Backend rep_delay_range {} must be a list with two entries.".format(
rep_delay_range
)
)
if not rep_delay_range[0] <= rep_delay <= rep_delay_range[1]:
raise SchemaValidationError(
"Supplied rep delay {} not in the supported "
"backend range {}".format(rep_delay, rep_delay_range)
)
rep_delay = rep_delay * 1e6 # convert sec to μs
return rep_delay
def _expand_parameters(circuits, run_config):
"""Verifies that there is a single common set of parameters shared between
all circuits and all parameter binds in the run_config. Returns an expanded
list of circuits (if parameterized) with all parameters bound, and a copy of
the run_config with parameter_binds cleared.
If neither the circuits nor the run_config specify parameters, the two are
returned unmodified.
Raises:
QiskitError: if run_config parameters are not compatible with circuit parameters
Returns:
Tuple(List[QuantumCircuit], RunConfig):
- List of input circuits expanded and with parameters bound
- RunConfig with parameter_binds removed
"""
parameter_binds = run_config.parameter_binds
if parameter_binds or \
any(circuit.parameters for circuit in circuits):
# Unroll params here in order to handle ParamVects
all_bind_parameters = [QuantumCircuit()._unroll_param_dict(bind).keys()
for bind in parameter_binds]
all_circuit_parameters = [circuit.parameters for circuit in circuits]
# Collect set of all unique parameters across all circuits and binds
unique_parameters = {param
for param_list in all_bind_parameters + all_circuit_parameters
for param in param_list}
# Check that all parameters are common to all circuits and binds
if not all_bind_parameters \
or not all_circuit_parameters \
or any(unique_parameters != bind_params for bind_params in all_bind_parameters) \
or any(unique_parameters != parameters for parameters in all_circuit_parameters):
raise QiskitError(
('Mismatch between run_config.parameter_binds and all circuit parameters. ' +
'Parameter binds: {} ' +
'Circuit parameters: {}').format(all_bind_parameters, all_circuit_parameters))
circuits = [circuit.bind_parameters(binds)
for circuit in circuits
for binds in parameter_binds]
# All parameters have been expanded and bound, so remove from run_config
run_config = copy.deepcopy(run_config)
run_config.parameter_binds = []
return circuits, run_config
|
py | 1a4d892378837d335fcf55e1424e6aedfa25cbf9 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.common import utils
from rally.plugins.openstack.cleanup import base
from rally.plugins.openstack.cleanup import manager
from tests.unit import test
BASE = "rally.plugins.openstack.cleanup.manager"
class SeekAndDestroyTestCase(test.TestCase):
def setUp(self):
super(SeekAndDestroyTestCase, self).setUp()
# clear out the client cache
manager.SeekAndDestroy.cache = {}
def test__get_cached_client(self):
api_versions = {"cinder": {"version": "1", "service_type": "volume"}}
destroyer = manager.SeekAndDestroy(None, None, None,
api_versions=api_versions)
cred = mock.Mock()
user = {"credential": cred}
clients = destroyer._get_cached_client(user)
self.assertIs(cred.clients.return_value, clients)
cred.clients.assert_called_once_with(api_info=api_versions)
self.assertIsNone(destroyer._get_cached_client(None))
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10,
_interval=0.01)
mock_resource.delete.side_effect = [Exception, Exception, True]
mock_resource.is_deleted.side_effect = [False, False, True]
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_resource.delete.call_count)
mock_resource.is_deleted.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_resource.is_deleted.call_count)
# NOTE(boris-42): No logs and no exceptions means no bugs!
self.assertEqual(0, mock_log.call_count)
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource_timeout(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=1, _timeout=0.02,
_interval=0.025)
mock_resource.delete.return_value = True
mock_resource.is_deleted.side_effect = [False, False, True]
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_called_once_with()
mock_resource.is_deleted.assert_called_once_with()
self.assertEqual(1, mock_log.warning.call_count)
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource_excpetion_in_is_deleted(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10,
_interval=0)
mock_resource.delete.return_value = True
mock_resource.is_deleted.side_effect = [Exception] * 4
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_called_once_with()
self.assertEqual(4, mock_resource.is_deleted.call_count)
self.assertEqual(5, mock_log.warning.call_count)
self.assertEqual(4, mock_log.exception.call_count)
def _manager(self, list_side_effect, **kw):
mock_mgr = mock.MagicMock()
mock_mgr().list.side_effect = list_side_effect
mock_mgr.reset_mock()
for k, v in kw.items():
setattr(mock_mgr, k, v)
return mock_mgr
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_admin(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3]],
_perform_for_admin_only=False)
admin = mock.MagicMock()
publish = manager.SeekAndDestroy(mock_mgr, admin, None)._publisher
queue = []
publish(queue)
mock__get_cached_client.assert_called_once_with(admin)
mock_mgr.assert_called_once_with(
admin=mock__get_cached_client.return_value)
self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)])
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_admin_only(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3]],
_perform_for_admin_only=True)
admin = mock.MagicMock()
publish = manager.SeekAndDestroy(
mock_mgr, admin, ["u1", "u2"])._publisher
queue = []
publish(queue)
mock__get_cached_client.assert_called_once_with(admin)
mock_mgr.assert_called_once_with(
admin=mock__get_cached_client.return_value)
self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)])
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_user_resource(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3],
Exception, Exception, [4, 5]],
_perform_for_admin_only=False,
_tenant_resource=True)
admin = mock.MagicMock()
users = [{"tenant_id": 1, "id": 1}, {"tenant_id": 2, "id": 2}]
publish = manager.SeekAndDestroy(mock_mgr, admin, users)._publisher
queue = []
publish(queue)
mock_client = mock__get_cached_client.return_value
mock_mgr.assert_has_calls([
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[0]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call().list(),
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[1]["tenant_id"]),
mock.call().list(),
mock.call().list()
])
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(users[0]),
mock.call(users[1])
])
expected_queue = [(admin, users[0], x) for x in range(1, 4)]
expected_queue += [(admin, users[1], x) for x in range(4, 6)]
self.assertEqual(expected_queue, queue)
@mock.patch("%s.LOG" % BASE)
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__gen_publisher_tenant_resource(self, mock__get_cached_client,
mock_log):
mock_mgr = self._manager([Exception, [1, 2, 3],
Exception, Exception, Exception,
["this shouldn't be in results"]],
_perform_for_admin_only=False,
_tenant_resource=True)
users = [{"tenant_id": 1, "id": 1},
{"tenant_id": 1, "id": 2},
{"tenant_id": 2, "id": 3}]
publish = manager.SeekAndDestroy(
mock_mgr, None, users)._publisher
queue = []
publish(queue)
mock_client = mock__get_cached_client.return_value
mock_mgr.assert_has_calls([
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[0]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[2]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call().list()
])
mock__get_cached_client.assert_has_calls([
mock.call(None),
mock.call(users[0]),
mock.call(users[2])
])
self.assertEqual(queue, [(None, users[0], x) for x in range(1, 4)])
self.assertTrue(mock_log.warning.mock_called)
self.assertTrue(mock_log.exception.mock_called)
@mock.patch("rally.common.utils.name_matches_object")
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
@mock.patch("%s.SeekAndDestroy._delete_single_resource" % BASE)
def test__consumer(self, mock__delete_single_resource,
mock__get_cached_client,
mock_name_matches_object):
mock_mgr = mock.MagicMock(__name__="Test")
resource_classes = [mock.Mock()]
task_id = "task_id"
mock_name_matches_object.return_value = True
consumer = manager.SeekAndDestroy(
mock_mgr, None, None,
resource_classes=resource_classes,
task_id=task_id)._consumer
admin = mock.MagicMock()
user1 = {"id": "a", "tenant_id": "uuid1"}
cache = {}
consumer(cache, (admin, user1, "res"))
mock_mgr.assert_called_once_with(
resource="res",
admin=mock__get_cached_client.return_value,
user=mock__get_cached_client.return_value,
tenant_uuid=user1["tenant_id"])
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(user1)
])
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
mock_mgr.reset_mock()
mock__get_cached_client.reset_mock()
mock__delete_single_resource.reset_mock()
mock_name_matches_object.reset_mock()
consumer(cache, (admin, None, "res2"))
mock_mgr.assert_called_once_with(
resource="res2",
admin=mock__get_cached_client.return_value,
user=mock__get_cached_client.return_value,
tenant_uuid=None)
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(None)
])
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
@mock.patch("rally.common.utils.name_matches_object")
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
@mock.patch("%s.SeekAndDestroy._delete_single_resource" % BASE)
def test__consumer_with_noname_resource(self, mock__delete_single_resource,
mock__get_cached_client,
mock_name_matches_object):
mock_mgr = mock.MagicMock(__name__="Test")
mock_mgr.return_value.name.return_value = True
task_id = "task_id"
mock_name_matches_object.return_value = False
consumer = manager.SeekAndDestroy(mock_mgr, None, None,
task_id=task_id)._consumer
consumer(None, (None, None, "res"))
self.assertFalse(mock__delete_single_resource.called)
mock_mgr.return_value.name.return_value = base.NoName("foo")
consumer(None, (None, None, "res"))
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
@mock.patch("%s.broker.run" % BASE)
def test_exterminate(self, mock_broker_run):
manager_cls = mock.MagicMock(_threads=5)
cleaner = manager.SeekAndDestroy(manager_cls, None, None)
cleaner._publisher = mock.Mock()
cleaner._consumer = mock.Mock()
cleaner.exterminate()
mock_broker_run.assert_called_once_with(cleaner._publisher,
cleaner._consumer,
consumers_count=5)
class ResourceManagerTestCase(test.TestCase):
def _get_res_mock(self, **kw):
_mock = mock.MagicMock()
for k, v in kw.items():
setattr(_mock, k, v)
return _mock
def _list_res_names_helper(self, names, admin_required, mock_iter):
self.assertEqual(set(names),
manager.list_resource_names(admin_required))
mock_iter.assert_called_once_with(base.ResourceManager)
mock_iter.reset_mock()
@mock.patch("%s.discover.itersubclasses" % BASE)
def test_list_resource_names(self, mock_itersubclasses):
mock_itersubclasses.return_value = [
self._get_res_mock(_service="fake", _resource="1",
_admin_required=True),
self._get_res_mock(_service="fake", _resource="2",
_admin_required=False),
self._get_res_mock(_service="other", _resource="2",
_admin_required=False)
]
self._list_res_names_helper(
["fake", "other", "fake.1", "fake.2", "other.2"],
None, mock_itersubclasses)
self._list_res_names_helper(
["fake", "fake.1"],
True, mock_itersubclasses)
self._list_res_names_helper(
["fake", "other", "fake.2", "other.2"],
False, mock_itersubclasses)
@mock.patch("%s.discover.itersubclasses" % BASE)
def test_find_resource_managers(self, mock_itersubclasses):
mock_itersubclasses.return_value = [
self._get_res_mock(_service="fake", _resource="1", _order=1,
_admin_required=True),
self._get_res_mock(_service="fake", _resource="2", _order=3,
_admin_required=False),
self._get_res_mock(_service="other", _resource="2", _order=2,
_admin_required=False)
]
self.assertEqual(mock_itersubclasses.return_value[0:2],
manager.find_resource_managers(names=["fake"]))
self.assertEqual(mock_itersubclasses.return_value[0:1],
manager.find_resource_managers(names=["fake.1"]))
self.assertEqual(
[mock_itersubclasses.return_value[0],
mock_itersubclasses.return_value[2],
mock_itersubclasses.return_value[1]],
manager.find_resource_managers(names=["fake", "other"]))
self.assertEqual(mock_itersubclasses.return_value[0:1],
manager.find_resource_managers(names=["fake"],
admin_required=True))
self.assertEqual(mock_itersubclasses.return_value[1:2],
manager.find_resource_managers(names=["fake"],
admin_required=False))
@mock.patch("rally.common.plugin.discover.itersubclasses")
@mock.patch("%s.SeekAndDestroy" % BASE)
@mock.patch("%s.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
def test_cleanup(self, mock_find_resource_managers, mock_seek_and_destroy,
mock_itersubclasses):
class A(utils.RandomNameGeneratorMixin):
pass
class B(object):
pass
mock_itersubclasses.return_value = [A, B]
manager.cleanup(names=["a", "b"], admin_required=True,
admin="admin", users=["user"],
superclass=A,
task_id="task_id")
mock_find_resource_managers.assert_called_once_with(["a", "b"], True)
mock_seek_and_destroy.assert_has_calls([
mock.call(mock_find_resource_managers.return_value[0], "admin",
["user"], api_versions=None,
resource_classes=[A], task_id="task_id"),
mock.call().exterminate(),
mock.call(mock_find_resource_managers.return_value[1], "admin",
["user"], api_versions=None,
resource_classes=[A], task_id="task_id"),
mock.call().exterminate()
])
@mock.patch("rally.common.plugin.discover.itersubclasses")
@mock.patch("%s.SeekAndDestroy" % BASE)
@mock.patch("%s.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
def test_cleanup_with_api_versions(self,
mock_find_resource_managers,
mock_seek_and_destroy,
mock_itersubclasses):
class A(utils.RandomNameGeneratorMixin):
pass
class B(object):
pass
mock_itersubclasses.return_value = [A, B]
api_versions = {"cinder": {"version": "1", "service_type": "volume"}}
manager.cleanup(names=["a", "b"], admin_required=True,
admin="admin", users=["user"],
api_versions=api_versions,
superclass=utils.RandomNameGeneratorMixin,
task_id="task_id")
mock_find_resource_managers.assert_called_once_with(["a", "b"], True)
mock_seek_and_destroy.assert_has_calls([
mock.call(mock_find_resource_managers.return_value[0], "admin",
["user"], api_versions=api_versions,
resource_classes=[A], task_id="task_id"),
mock.call().exterminate(),
mock.call(mock_find_resource_managers.return_value[1], "admin",
["user"], api_versions=api_versions,
resource_classes=[A], task_id="task_id"),
mock.call().exterminate()
])
|
py | 1a4d894f58a076d80cd68867b5dc628769b2a9eb | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _Poly.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_Poly', [dirname(__file__)])
except ImportError:
import _Poly
return _Poly
if fp is not None:
try:
_mod = imp.load_module('_Poly', fp, pathname, description)
finally:
fp.close()
return _mod
_Poly = swig_import_helper()
del swig_import_helper
else:
import _Poly
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _Poly.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_Poly.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_Poly.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_Poly.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_Poly.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_Poly.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_Poly.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_Poly.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_Poly.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_Poly.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_Poly.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_Poly.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_Poly.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_Poly.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_Poly.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_Poly.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_Poly.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _Poly.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.Standard
import OCC.gp
import OCC.NCollection
import OCC.MMgt
import OCC.TColgp
import OCC.TCollection
import OCC.TColStd
import OCC.TShort
def register_handle(handle, base_object):
"""
Inserts the handle into the base object to
prevent memory corruption in certain cases
"""
try:
if base_object.IsKind("Standard_Transient"):
base_object.thisHandle = handle
base_object.thisown = False
except:
pass
class poly(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def Catenate(*args):
"""
* Computes and stores the link from nodes to triangles and from triangles to neighbouring triangles. This tool is obsolete, replaced by Poly_CoherentTriangulation Algorithm to make minimal loops in a graph Join several triangulations to one new triangulation object. The new triangulation is just a mechanical sum of input triangulations, without node sharing. UV coordinates are dropped in the result.
:param lstTri:
:type lstTri: Poly_ListOfTriangulation &
:rtype: Handle_Poly_Triangulation
"""
return _Poly.poly_Catenate(*args)
Catenate = staticmethod(Catenate)
def Write(*args):
"""
* Writes the content of the triangulation <T> on the stream <OS>. If <Compact> is true this is a 'save' format intended to be read back with the Read method. If compact is False it is a 'Dump' format intended to be informative.
:param T:
:type T: Handle_Poly_Triangulation &
:param OS:
:type OS: Standard_OStream &
:param Compact: default value is Standard_True
:type Compact: bool
:rtype: void
* Writes the content of the 3D polygon <P> on the stream <OS>. If <Compact> is true this is a 'save' format intended to be read back with the Read method. If compact is False it is a 'Dump' format intended to be informative.
:param P:
:type P: Handle_Poly_Polygon3D &
:param OS:
:type OS: Standard_OStream &
:param Compact: default value is Standard_True
:type Compact: bool
:rtype: void
* Writes the content of the 2D polygon <P> on the stream <OS>. If <Compact> is true this is a 'save' format intended to be read back with the Read method. If compact is False it is a 'Dump' format intended to be informative.
:param P:
:type P: Handle_Poly_Polygon2D &
:param OS:
:type OS: Standard_OStream &
:param Compact: default value is Standard_True
:type Compact: bool
:rtype: void
"""
return _Poly.poly_Write(*args)
Write = staticmethod(Write)
def Dump(*args):
"""
* Dumps the triangulation. This is a call to the previous method with Comapct set to False.
:param T:
:type T: Handle_Poly_Triangulation &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Dumps the 3D polygon. This is a call to the previous method with Comapct set to False.
:param P:
:type P: Handle_Poly_Polygon3D &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Dumps the 2D polygon. This is a call to the previous method with Comapct set to False.
:param P:
:type P: Handle_Poly_Polygon2D &
:param OS:
:type OS: Standard_OStream &
:rtype: void
"""
return _Poly.poly_Dump(*args)
Dump = staticmethod(Dump)
def ReadTriangulationFromString(self, *args):
"""ReadTriangulationFromString(poly self, std::string src)"""
return _Poly.poly_ReadTriangulationFromString(self, *args)
def ReadPolygon3DFromString(self, *args):
"""ReadPolygon3DFromString(poly self, std::string src)"""
return _Poly.poly_ReadPolygon3DFromString(self, *args)
def ReadPolygon2DFromString(self, *args):
"""ReadPolygon2DFromString(poly self, std::string src)"""
return _Poly.poly_ReadPolygon2DFromString(self, *args)
def ComputeNormals(*args):
"""
* Compute node normals for face triangulation as mean normal of surrounding triangles
:param Tri:
:type Tri: Handle_Poly_Triangulation &
:rtype: void
"""
return _Poly.poly_ComputeNormals(*args)
ComputeNormals = staticmethod(ComputeNormals)
def PointOnTriangle(*args):
"""
* Computes parameters of the point P on triangle defined by points P1, P2, and P3, in 2d. The parameters U and V are defined so that P = P1 + U * (P2 - P1) + V * (P3 - P1), with U >= 0, V >= 0, U + V <= 1. If P is located outside of triangle, or triangle is degenerated, the returned parameters correspond to closest point, and returned value is square of the distance from original point to triangle (0 if point is inside).
:param P1:
:type P1: gp_XY
:param P2:
:type P2: gp_XY
:param P3:
:type P3: gp_XY
:param P:
:type P: gp_XY
:param UV:
:type UV: gp_XY
:rtype: float
"""
return _Poly.poly_PointOnTriangle(*args)
PointOnTriangle = staticmethod(PointOnTriangle)
def __init__(self):
"""__init__(Poly self) -> poly"""
_Poly.poly_swiginit(self,_Poly.new_poly())
__swig_destroy__ = _Poly.delete_poly
poly.ReadTriangulationFromString = new_instancemethod(_Poly.poly_ReadTriangulationFromString,None,poly)
poly.ReadPolygon3DFromString = new_instancemethod(_Poly.poly_ReadPolygon3DFromString,None,poly)
poly.ReadPolygon2DFromString = new_instancemethod(_Poly.poly_ReadPolygon2DFromString,None,poly)
poly_swigregister = _Poly.poly_swigregister
poly_swigregister(poly)
def poly_Catenate(*args):
"""
* Computes and stores the link from nodes to triangles and from triangles to neighbouring triangles. This tool is obsolete, replaced by Poly_CoherentTriangulation Algorithm to make minimal loops in a graph Join several triangulations to one new triangulation object. The new triangulation is just a mechanical sum of input triangulations, without node sharing. UV coordinates are dropped in the result.
:param lstTri:
:type lstTri: Poly_ListOfTriangulation &
:rtype: Handle_Poly_Triangulation
"""
return _Poly.poly_Catenate(*args)
def poly_Write(*args):
"""
* Writes the content of the triangulation <T> on the stream <OS>. If <Compact> is true this is a 'save' format intended to be read back with the Read method. If compact is False it is a 'Dump' format intended to be informative.
:param T:
:type T: Handle_Poly_Triangulation &
:param OS:
:type OS: Standard_OStream &
:param Compact: default value is Standard_True
:type Compact: bool
:rtype: void
* Writes the content of the 3D polygon <P> on the stream <OS>. If <Compact> is true this is a 'save' format intended to be read back with the Read method. If compact is False it is a 'Dump' format intended to be informative.
:param P:
:type P: Handle_Poly_Polygon3D &
:param OS:
:type OS: Standard_OStream &
:param Compact: default value is Standard_True
:type Compact: bool
:rtype: void
* Writes the content of the 2D polygon <P> on the stream <OS>. If <Compact> is true this is a 'save' format intended to be read back with the Read method. If compact is False it is a 'Dump' format intended to be informative.
:param P:
:type P: Handle_Poly_Polygon2D &
:param OS:
:type OS: Standard_OStream &
:param Compact: default value is Standard_True
:type Compact: bool
:rtype: void
"""
return _Poly.poly_Write(*args)
def poly_Dump(*args):
"""
* Dumps the triangulation. This is a call to the previous method with Comapct set to False.
:param T:
:type T: Handle_Poly_Triangulation &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Dumps the 3D polygon. This is a call to the previous method with Comapct set to False.
:param P:
:type P: Handle_Poly_Polygon3D &
:param OS:
:type OS: Standard_OStream &
:rtype: void
* Dumps the 2D polygon. This is a call to the previous method with Comapct set to False.
:param P:
:type P: Handle_Poly_Polygon2D &
:param OS:
:type OS: Standard_OStream &
:rtype: void
"""
return _Poly.poly_Dump(*args)
def poly_ComputeNormals(*args):
"""
* Compute node normals for face triangulation as mean normal of surrounding triangles
:param Tri:
:type Tri: Handle_Poly_Triangulation &
:rtype: void
"""
return _Poly.poly_ComputeNormals(*args)
def poly_PointOnTriangle(*args):
"""
* Computes parameters of the point P on triangle defined by points P1, P2, and P3, in 2d. The parameters U and V are defined so that P = P1 + U * (P2 - P1) + V * (P3 - P1), with U >= 0, V >= 0, U + V <= 1. If P is located outside of triangle, or triangle is degenerated, the returned parameters correspond to closest point, and returned value is square of the distance from original point to triangle (0 if point is inside).
:param P1:
:type P1: gp_XY
:param P2:
:type P2: gp_XY
:param P3:
:type P3: gp_XY
:param P:
:type P: gp_XY
:param UV:
:type UV: gp_XY
:rtype: float
"""
return _Poly.poly_PointOnTriangle(*args)
class Poly_Array1OfTriangle(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param Low:
:type Low: int
:param Up:
:type Up: int
:rtype: None
:param Item:
:type Item: Poly_Triangle &
:param Low:
:type Low: int
:param Up:
:type Up: int
:rtype: None
"""
_Poly.Poly_Array1OfTriangle_swiginit(self,_Poly.new_Poly_Array1OfTriangle(*args))
def Init(self, *args):
"""
:param V:
:type V: Poly_Triangle &
:rtype: None
"""
return _Poly.Poly_Array1OfTriangle_Init(self, *args)
def Destroy(self, *args):
"""
:rtype: None
"""
return _Poly.Poly_Array1OfTriangle_Destroy(self, *args)
def IsAllocated(self, *args):
"""
:rtype: bool
"""
return _Poly.Poly_Array1OfTriangle_IsAllocated(self, *args)
def Assign(self, *args):
"""
:param Other:
:type Other: Poly_Array1OfTriangle &
:rtype: Poly_Array1OfTriangle
"""
return _Poly.Poly_Array1OfTriangle_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: Poly_Array1OfTriangle &
:rtype: Poly_Array1OfTriangle
"""
return _Poly.Poly_Array1OfTriangle_Set(self, *args)
def Length(self, *args):
"""
:rtype: int
"""
return _Poly.Poly_Array1OfTriangle_Length(self, *args)
def Lower(self, *args):
"""
:rtype: int
"""
return _Poly.Poly_Array1OfTriangle_Lower(self, *args)
def Upper(self, *args):
"""
:rtype: int
"""
return _Poly.Poly_Array1OfTriangle_Upper(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param Value:
:type Value: Poly_Triangle &
:rtype: None
"""
return _Poly.Poly_Array1OfTriangle_SetValue(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Poly_Triangle
"""
return _Poly.Poly_Array1OfTriangle_Value(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
:rtype: Poly_Triangle
"""
return _Poly.Poly_Array1OfTriangle_ChangeValue(self, *args)
__swig_destroy__ = _Poly.delete_Poly_Array1OfTriangle
Poly_Array1OfTriangle.Init = new_instancemethod(_Poly.Poly_Array1OfTriangle_Init,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.Destroy = new_instancemethod(_Poly.Poly_Array1OfTriangle_Destroy,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.IsAllocated = new_instancemethod(_Poly.Poly_Array1OfTriangle_IsAllocated,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.Assign = new_instancemethod(_Poly.Poly_Array1OfTriangle_Assign,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.Set = new_instancemethod(_Poly.Poly_Array1OfTriangle_Set,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.Length = new_instancemethod(_Poly.Poly_Array1OfTriangle_Length,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.Lower = new_instancemethod(_Poly.Poly_Array1OfTriangle_Lower,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.Upper = new_instancemethod(_Poly.Poly_Array1OfTriangle_Upper,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.SetValue = new_instancemethod(_Poly.Poly_Array1OfTriangle_SetValue,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.Value = new_instancemethod(_Poly.Poly_Array1OfTriangle_Value,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle.ChangeValue = new_instancemethod(_Poly.Poly_Array1OfTriangle_ChangeValue,None,Poly_Array1OfTriangle)
Poly_Array1OfTriangle_swigregister = _Poly.Poly_Array1OfTriangle_swigregister
Poly_Array1OfTriangle_swigregister(Poly_Array1OfTriangle)
class Poly_CoherentLink(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* /** * Empty constructor. */
:rtype: None
* /** * Constructor. Creates a Link that has no reference to 'opposite nodes'. * This constructor is useful to create temporary object that is not * inserted into any existing triangulation. */
:param iNode0:
:type iNode0: int
:param iNode1:
:type iNode1: int
:rtype: None
* /** * Constructor, takes a triangle and a side. A link is created always such * that myNode[0] < myNode[1]. Unlike the previous constructor, this one * assigns the 'opposite node' fields. This constructor is used when a * link is inserted into a Poly_CoherentTriangulation structure. * @param theTri * Triangle containing the link that is created * @param iSide * Can be 0, 1 or 2. Index of the node */
:param theTri:
:type theTri: Poly_CoherentTriangle &
:param iSide:
:type iSide: int
:rtype: None
"""
_Poly.Poly_CoherentLink_swiginit(self,_Poly.new_Poly_CoherentLink(*args))
def Node(self, *args):
"""
* /** * Return the node index in the current triangulation. * @param ind * 0 or 1 making distinction of the two nodes that constitute the Link. * Node(0) always returns a smaller number than Node(1). */
:param ind:
:type ind: int
:rtype: inline int
"""
return _Poly.Poly_CoherentLink_Node(self, *args)
def OppositeNode(self, *args):
"""
* /** * Return the opposite node (belonging to the left or right incident triangle) * index in the current triangulation. * @param ind * 0 or 1 making distinction of the two involved triangles: 0 on the left, * 1 on the right side of the Link. */
:param ind:
:type ind: int
:rtype: inline int
"""
return _Poly.Poly_CoherentLink_OppositeNode(self, *args)
def GetAttribute(self, *args):
"""
* /** * Query the attribute of the Link. */
:rtype: inline Standard_Address
"""
return _Poly.Poly_CoherentLink_GetAttribute(self, *args)
def SetAttribute(self, *args):
"""
* /** * Set the attribute of the Link. */
:param theAtt:
:type theAtt: Standard_Address
:rtype: inline void
"""
return _Poly.Poly_CoherentLink_SetAttribute(self, *args)
def IsEmpty(self, *args):
"""
* /** * Query the status of the link - if it is an invalid one. * An invalid link has Node members equal to -1. */
:rtype: inline bool
"""
return _Poly.Poly_CoherentLink_IsEmpty(self, *args)
def Nullify(self, *args):
"""
* /** * Invalidate this Link. */
:rtype: inline void
"""
return _Poly.Poly_CoherentLink_Nullify(self, *args)
__swig_destroy__ = _Poly.delete_Poly_CoherentLink
Poly_CoherentLink.Node = new_instancemethod(_Poly.Poly_CoherentLink_Node,None,Poly_CoherentLink)
Poly_CoherentLink.OppositeNode = new_instancemethod(_Poly.Poly_CoherentLink_OppositeNode,None,Poly_CoherentLink)
Poly_CoherentLink.GetAttribute = new_instancemethod(_Poly.Poly_CoherentLink_GetAttribute,None,Poly_CoherentLink)
Poly_CoherentLink.SetAttribute = new_instancemethod(_Poly.Poly_CoherentLink_SetAttribute,None,Poly_CoherentLink)
Poly_CoherentLink.IsEmpty = new_instancemethod(_Poly.Poly_CoherentLink_IsEmpty,None,Poly_CoherentLink)
Poly_CoherentLink.Nullify = new_instancemethod(_Poly.Poly_CoherentLink_Nullify,None,Poly_CoherentLink)
Poly_CoherentLink_swigregister = _Poly.Poly_CoherentLink_swigregister
Poly_CoherentLink_swigregister(Poly_CoherentLink)
class Poly_CoherentNode(OCC.gp.gp_XYZ):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* /** * Empty constructor. */
:rtype: None
* /** * Constructor. */
:param thePnt:
:type thePnt: gp_XYZ
:rtype: None
"""
_Poly.Poly_CoherentNode_swiginit(self,_Poly.new_Poly_CoherentNode(*args))
def SetUV(self, *args):
"""
* /** * Set the UV coordinates of the Node. */
:param theU:
:type theU: float
:param theV:
:type theV: float
:rtype: inline void
"""
return _Poly.Poly_CoherentNode_SetUV(self, *args)
def GetU(self, *args):
"""
* /** * Get U coordinate of the Node. */
:rtype: inline float
"""
return _Poly.Poly_CoherentNode_GetU(self, *args)
def GetV(self, *args):
"""
* /** * Get V coordinate of the Node. */
:rtype: inline float
"""
return _Poly.Poly_CoherentNode_GetV(self, *args)
def SetNormal(self, *args):
"""
* /** * Define the normal vector in the Node. */
:param theVector:
:type theVector: gp_XYZ
:rtype: None
"""
return _Poly.Poly_CoherentNode_SetNormal(self, *args)
def HasNormal(self, *args):
"""
* /** * Query if the Node contains a normal vector. */
:rtype: inline bool
"""
return _Poly.Poly_CoherentNode_HasNormal(self, *args)
def GetNormal(self, *args):
"""
* /** * Get the stored normal in the node. */
:rtype: inline gp_XYZ
"""
return _Poly.Poly_CoherentNode_GetNormal(self, *args)
def SetIndex(self, *args):
"""
* /** * Set the value of node Index. */
:param theIndex:
:type theIndex: int
:rtype: inline void
"""
return _Poly.Poly_CoherentNode_SetIndex(self, *args)
def GetIndex(self, *args):
"""
* /** * Get the value of node Index. */
:rtype: inline int
"""
return _Poly.Poly_CoherentNode_GetIndex(self, *args)
def IsFreeNode(self, *args):
"""
* /** * Check if this is a free node, i.e., a node without a single * incident triangle. */
:rtype: inline bool
"""
return _Poly.Poly_CoherentNode_IsFreeNode(self, *args)
def Clear(self, *args):
"""
* /** * Reset the Node to void. */
:param &:
:type &: Handle_NCollection_BaseAllocator
:rtype: None
"""
return _Poly.Poly_CoherentNode_Clear(self, *args)
def AddTriangle(self, *args):
"""
* /** * Connect a triangle to this Node. */
:param theTri:
:type theTri: Poly_CoherentTriangle &
:param theA:
:type theA: Handle_NCollection_BaseAllocator &
:rtype: None
"""
return _Poly.Poly_CoherentNode_AddTriangle(self, *args)
def RemoveTriangle(self, *args):
"""
* /** * Disconnect a triangle from this Node. */
:param theTri:
:type theTri: Poly_CoherentTriangle &
:param theA:
:type theA: Handle_NCollection_BaseAllocator &
:rtype: bool
"""
return _Poly.Poly_CoherentNode_RemoveTriangle(self, *args)
def TriangleIterator(self, *args):
"""
* /** * Create an iterator of incident triangles. */
:rtype: inline Poly_CoherentTriPtr::Iterator
"""
return _Poly.Poly_CoherentNode_TriangleIterator(self, *args)
def DumpToString(self):
"""DumpToString(Poly_CoherentNode self) -> std::string"""
return _Poly.Poly_CoherentNode_DumpToString(self)
__swig_destroy__ = _Poly.delete_Poly_CoherentNode
Poly_CoherentNode.SetUV = new_instancemethod(_Poly.Poly_CoherentNode_SetUV,None,Poly_CoherentNode)
Poly_CoherentNode.GetU = new_instancemethod(_Poly.Poly_CoherentNode_GetU,None,Poly_CoherentNode)
Poly_CoherentNode.GetV = new_instancemethod(_Poly.Poly_CoherentNode_GetV,None,Poly_CoherentNode)
Poly_CoherentNode.SetNormal = new_instancemethod(_Poly.Poly_CoherentNode_SetNormal,None,Poly_CoherentNode)
Poly_CoherentNode.HasNormal = new_instancemethod(_Poly.Poly_CoherentNode_HasNormal,None,Poly_CoherentNode)
Poly_CoherentNode.GetNormal = new_instancemethod(_Poly.Poly_CoherentNode_GetNormal,None,Poly_CoherentNode)
Poly_CoherentNode.SetIndex = new_instancemethod(_Poly.Poly_CoherentNode_SetIndex,None,Poly_CoherentNode)
Poly_CoherentNode.GetIndex = new_instancemethod(_Poly.Poly_CoherentNode_GetIndex,None,Poly_CoherentNode)
Poly_CoherentNode.IsFreeNode = new_instancemethod(_Poly.Poly_CoherentNode_IsFreeNode,None,Poly_CoherentNode)
Poly_CoherentNode.Clear = new_instancemethod(_Poly.Poly_CoherentNode_Clear,None,Poly_CoherentNode)
Poly_CoherentNode.AddTriangle = new_instancemethod(_Poly.Poly_CoherentNode_AddTriangle,None,Poly_CoherentNode)
Poly_CoherentNode.RemoveTriangle = new_instancemethod(_Poly.Poly_CoherentNode_RemoveTriangle,None,Poly_CoherentNode)
Poly_CoherentNode.TriangleIterator = new_instancemethod(_Poly.Poly_CoherentNode_TriangleIterator,None,Poly_CoherentNode)
Poly_CoherentNode.DumpToString = new_instancemethod(_Poly.Poly_CoherentNode_DumpToString,None,Poly_CoherentNode)
Poly_CoherentNode_swigregister = _Poly.Poly_CoherentNode_swigregister
Poly_CoherentNode_swigregister(Poly_CoherentNode)
class Poly_CoherentTriangle(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* /** * Empty constructor. */
:rtype: None
* /** * Constructor. */
:param iNode0:
:type iNode0: int
:param iNode1:
:type iNode1: int
:param iNode2:
:type iNode2: int
:rtype: None
"""
_Poly.Poly_CoherentTriangle_swiginit(self,_Poly.new_Poly_CoherentTriangle(*args))
def Node(self, *args):
"""
* /** * Query the node index in the position given by the parameter 'ind' */
:param ind:
:type ind: int
:rtype: inline int
"""
return _Poly.Poly_CoherentTriangle_Node(self, *args)
def IsEmpty(self, *args):
"""
* /** * Query if this is a valid triangle. */
:rtype: inline bool
"""
return _Poly.Poly_CoherentTriangle_IsEmpty(self, *args)
def SetConnection(self, *args):
"""
* /** * Create connection with another triangle theTri. * This method creates both connections: in this triangle and in theTri. You * do not need to call the same method on triangle theTr. * @param iConn * Can be 0, 1 or 2 - index of the node that is opposite to the connection * (shared link). * @param theTr * Triangle that is connected on the given link. * returns * True if successful, False if the connection is rejected * due to improper topology. */
:param iConn:
:type iConn: int
:param theTr:
:type theTr: Poly_CoherentTriangle &
:rtype: bool
* /** * Create connection with another triangle theTri. * This method creates both connections: in this triangle and in theTri. * This method is slower than the previous one, because it makes analysis * what sides of both triangles are connected. * @param theTri * Triangle that is connected. * returns * True if successful, False if the connection is rejected * due to improper topology. */
:param theTri:
:type theTri: Poly_CoherentTriangle &
:rtype: bool
"""
return _Poly.Poly_CoherentTriangle_SetConnection(self, *args)
def RemoveConnection(self, *args):
"""
* /** * Remove the connection with the given index. * @param iConn * Can be 0, 1 or 2 - index of the node that is opposite to the connection * (shared link). */
:param iConn:
:type iConn: int
:rtype: None
* /** * Remove the connection with the given Triangle. * returns * True if successfuol or False if the connection has not been found. */
:param theTri:
:type theTri: Poly_CoherentTriangle &
:rtype: bool
"""
return _Poly.Poly_CoherentTriangle_RemoveConnection(self, *args)
def NConnections(self, *args):
"""
* /** * Query the number of connected triangles. */
:rtype: inline int
"""
return _Poly.Poly_CoherentTriangle_NConnections(self, *args)
def GetConnectedNode(self, *args):
"""
* /** * Query the connected node on the given side. * Returns -1 if there is no connection on the specified side. */
:param iConn:
:type iConn: int
:rtype: inline int
"""
return _Poly.Poly_CoherentTriangle_GetConnectedNode(self, *args)
def GetConnectedTri(self, *args):
"""
* /** * Query the connected triangle on the given side. * Returns NULL if there is no connection on the specified side. */
:param iConn:
:type iConn: int
:rtype: inline Poly_CoherentTriangle *
"""
return _Poly.Poly_CoherentTriangle_GetConnectedTri(self, *args)
def GetLink(self, *args):
"""
* /** * Query the Link associate with the given side of the Triangle. * May return NULL if there are no links in the triangulation. */
:param iLink:
:type iLink: int
:rtype: inline Poly_CoherentLink *
"""
return _Poly.Poly_CoherentTriangle_GetLink(self, *args)
def FindConnection(self, *args):
"""
* /** * Retuns the index of the connection with the given triangle, or -1 if not * found. */
:param &:
:type &: Poly_CoherentTriangle
:rtype: int
"""
return _Poly.Poly_CoherentTriangle_FindConnection(self, *args)
__swig_destroy__ = _Poly.delete_Poly_CoherentTriangle
Poly_CoherentTriangle.Node = new_instancemethod(_Poly.Poly_CoherentTriangle_Node,None,Poly_CoherentTriangle)
Poly_CoherentTriangle.IsEmpty = new_instancemethod(_Poly.Poly_CoherentTriangle_IsEmpty,None,Poly_CoherentTriangle)
Poly_CoherentTriangle.SetConnection = new_instancemethod(_Poly.Poly_CoherentTriangle_SetConnection,None,Poly_CoherentTriangle)
Poly_CoherentTriangle.RemoveConnection = new_instancemethod(_Poly.Poly_CoherentTriangle_RemoveConnection,None,Poly_CoherentTriangle)
Poly_CoherentTriangle.NConnections = new_instancemethod(_Poly.Poly_CoherentTriangle_NConnections,None,Poly_CoherentTriangle)
Poly_CoherentTriangle.GetConnectedNode = new_instancemethod(_Poly.Poly_CoherentTriangle_GetConnectedNode,None,Poly_CoherentTriangle)
Poly_CoherentTriangle.GetConnectedTri = new_instancemethod(_Poly.Poly_CoherentTriangle_GetConnectedTri,None,Poly_CoherentTriangle)
Poly_CoherentTriangle.GetLink = new_instancemethod(_Poly.Poly_CoherentTriangle_GetLink,None,Poly_CoherentTriangle)
Poly_CoherentTriangle.FindConnection = new_instancemethod(_Poly.Poly_CoherentTriangle_FindConnection,None,Poly_CoherentTriangle)
Poly_CoherentTriangle_swigregister = _Poly.Poly_CoherentTriangle_swigregister
Poly_CoherentTriangle_swigregister(Poly_CoherentTriangle)
class Poly_Connect(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Constructs an algorithm to explore the adjacency data of nodes or triangles for the triangulation T.
:param T:
:type T: Handle_Poly_Triangulation &
:rtype: None
"""
_Poly.Poly_Connect_swiginit(self,_Poly.new_Poly_Connect(*args))
def Triangulation(self, *args):
"""
* Returns the triangulation analyzed by this tool.
:rtype: Handle_Poly_Triangulation
"""
return _Poly.Poly_Connect_Triangulation(self, *args)
def Triangle(self, *args):
"""
* Returns the index of a triangle containing the node at index N in the nodes table specific to the triangulation analyzed by this tool
:param N:
:type N: int
:rtype: int
"""
return _Poly.Poly_Connect_Triangle(self, *args)
def Triangles(self, *args):
"""
* Returns in t1, t2 and t3, the indices of the 3 triangles adjacent to the triangle at index T in the triangles table specific to the triangulation analyzed by this tool. Warning Null indices are returned when there are fewer than 3 adjacent triangles.
:param T:
:type T: int
:param t1:
:type t1: int &
:param t2:
:type t2: int &
:param t3:
:type t3: int &
:rtype: None
"""
return _Poly.Poly_Connect_Triangles(self, *args)
def Nodes(self, *args):
"""
* Returns, in n1, n2 and n3, the indices of the 3 nodes adjacent to the triangle referenced at index T in the triangles table specific to the triangulation analyzed by this tool. Warning Null indices are returned when there are fewer than 3 adjacent nodes.
:param T:
:type T: int
:param n1:
:type n1: int &
:param n2:
:type n2: int &
:param n3:
:type n3: int &
:rtype: None
"""
return _Poly.Poly_Connect_Nodes(self, *args)
def Initialize(self, *args):
"""
* Initializes an iterator to search for all the triangles containing the node referenced at index N in the nodes table, for the triangulation analyzed by this tool. The iterator is managed by the following functions: - More, which checks if there are still elements in the iterator - Next, which positions the iterator on the next element - Value, which returns the current element. The use of such an iterator provides direct access to the triangles around a particular node, i.e. it avoids iterating on all the component triangles of a triangulation. Example Poly_Connect C(Tr); for (C.Initialize(n1);C.More();C.Next()) { t = C.Value(); }
:param N:
:type N: int
:rtype: None
"""
return _Poly.Poly_Connect_Initialize(self, *args)
def More(self, *args):
"""
* Returns true if there is another element in the iterator defined with the function Initialize (i.e. if there is another triangle containing the given node).
:rtype: bool
"""
return _Poly.Poly_Connect_More(self, *args)
def Next(self, *args):
"""
* Advances the iterator defined with the function Initialize to access the next triangle. Note: There is no action if the iterator is empty (i.e. if the function More returns false).-
:rtype: None
"""
return _Poly.Poly_Connect_Next(self, *args)
def Value(self, *args):
"""
* Returns the index of the current triangle to which the iterator, defined with the function Initialize, points. This is an index in the triangles table specific to the triangulation analyzed by this tool
:rtype: int
"""
return _Poly.Poly_Connect_Value(self, *args)
__swig_destroy__ = _Poly.delete_Poly_Connect
Poly_Connect.Triangulation = new_instancemethod(_Poly.Poly_Connect_Triangulation,None,Poly_Connect)
Poly_Connect.Triangle = new_instancemethod(_Poly.Poly_Connect_Triangle,None,Poly_Connect)
Poly_Connect.Triangles = new_instancemethod(_Poly.Poly_Connect_Triangles,None,Poly_Connect)
Poly_Connect.Nodes = new_instancemethod(_Poly.Poly_Connect_Nodes,None,Poly_Connect)
Poly_Connect.Initialize = new_instancemethod(_Poly.Poly_Connect_Initialize,None,Poly_Connect)
Poly_Connect.More = new_instancemethod(_Poly.Poly_Connect_More,None,Poly_Connect)
Poly_Connect.Next = new_instancemethod(_Poly.Poly_Connect_Next,None,Poly_Connect)
Poly_Connect.Value = new_instancemethod(_Poly.Poly_Connect_Value,None,Poly_Connect)
Poly_Connect_swigregister = _Poly.Poly_Connect_swigregister
Poly_Connect_swigregister(Poly_Connect)
class Poly_HArray1OfTriangle(OCC.MMgt.MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param Low:
:type Low: int
:param Up:
:type Up: int
:rtype: None
:param Low:
:type Low: int
:param Up:
:type Up: int
:param V:
:type V: Poly_Triangle &
:rtype: None
"""
_Poly.Poly_HArray1OfTriangle_swiginit(self,_Poly.new_Poly_HArray1OfTriangle(*args))
def Init(self, *args):
"""
:param V:
:type V: Poly_Triangle &
:rtype: None
"""
return _Poly.Poly_HArray1OfTriangle_Init(self, *args)
def Length(self, *args):
"""
:rtype: int
"""
return _Poly.Poly_HArray1OfTriangle_Length(self, *args)
def Lower(self, *args):
"""
:rtype: int
"""
return _Poly.Poly_HArray1OfTriangle_Lower(self, *args)
def Upper(self, *args):
"""
:rtype: int
"""
return _Poly.Poly_HArray1OfTriangle_Upper(self, *args)
def SetValue(self, *args):
"""
:param Index:
:type Index: int
:param Value:
:type Value: Poly_Triangle &
:rtype: None
"""
return _Poly.Poly_HArray1OfTriangle_SetValue(self, *args)
def Value(self, *args):
"""
:param Index:
:type Index: int
:rtype: Poly_Triangle
"""
return _Poly.Poly_HArray1OfTriangle_Value(self, *args)
def ChangeValue(self, *args):
"""
:param Index:
:type Index: int
:rtype: Poly_Triangle
"""
return _Poly.Poly_HArray1OfTriangle_ChangeValue(self, *args)
def Array1(self, *args):
"""
:rtype: Poly_Array1OfTriangle
"""
return _Poly.Poly_HArray1OfTriangle_Array1(self, *args)
def ChangeArray1(self, *args):
"""
:rtype: Poly_Array1OfTriangle
"""
return _Poly.Poly_HArray1OfTriangle_ChangeArray1(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_Poly_HArray1OfTriangle(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _Poly.delete_Poly_HArray1OfTriangle
Poly_HArray1OfTriangle.Init = new_instancemethod(_Poly.Poly_HArray1OfTriangle_Init,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle.Length = new_instancemethod(_Poly.Poly_HArray1OfTriangle_Length,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle.Lower = new_instancemethod(_Poly.Poly_HArray1OfTriangle_Lower,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle.Upper = new_instancemethod(_Poly.Poly_HArray1OfTriangle_Upper,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle.SetValue = new_instancemethod(_Poly.Poly_HArray1OfTriangle_SetValue,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle.Value = new_instancemethod(_Poly.Poly_HArray1OfTriangle_Value,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle.ChangeValue = new_instancemethod(_Poly.Poly_HArray1OfTriangle_ChangeValue,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle.Array1 = new_instancemethod(_Poly.Poly_HArray1OfTriangle_Array1,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle.ChangeArray1 = new_instancemethod(_Poly.Poly_HArray1OfTriangle_ChangeArray1,None,Poly_HArray1OfTriangle)
Poly_HArray1OfTriangle_swigregister = _Poly.Poly_HArray1OfTriangle_swigregister
Poly_HArray1OfTriangle_swigregister(Poly_HArray1OfTriangle)
class Handle_Poly_HArray1OfTriangle(OCC.MMgt.Handle_MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_Poly.Handle_Poly_HArray1OfTriangle_swiginit(self,_Poly.new_Handle_Poly_HArray1OfTriangle(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_Poly.Handle_Poly_HArray1OfTriangle_DownCast)
__swig_destroy__ = _Poly.delete_Handle_Poly_HArray1OfTriangle
Handle_Poly_HArray1OfTriangle.Nullify = new_instancemethod(_Poly.Handle_Poly_HArray1OfTriangle_Nullify,None,Handle_Poly_HArray1OfTriangle)
Handle_Poly_HArray1OfTriangle.IsNull = new_instancemethod(_Poly.Handle_Poly_HArray1OfTriangle_IsNull,None,Handle_Poly_HArray1OfTriangle)
Handle_Poly_HArray1OfTriangle.GetObject = new_instancemethod(_Poly.Handle_Poly_HArray1OfTriangle_GetObject,None,Handle_Poly_HArray1OfTriangle)
Handle_Poly_HArray1OfTriangle_swigregister = _Poly.Handle_Poly_HArray1OfTriangle_swigregister
Handle_Poly_HArray1OfTriangle_swigregister(Handle_Poly_HArray1OfTriangle)
def Handle_Poly_HArray1OfTriangle_DownCast(*args):
return _Poly.Handle_Poly_HArray1OfTriangle_DownCast(*args)
Handle_Poly_HArray1OfTriangle_DownCast = _Poly.Handle_Poly_HArray1OfTriangle_DownCast
class Poly_Polygon2D(OCC.MMgt.MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Constructs a 2D polygon defined by the table of points, <Nodes>.
:param Nodes:
:type Nodes: TColgp_Array1OfPnt2d
:rtype: None
"""
_Poly.Poly_Polygon2D_swiginit(self,_Poly.new_Poly_Polygon2D(*args))
def Deflection(self, *args):
"""
* Returns the deflection of this polygon. Deflection is used in cases where the polygon is an approximate representation of a curve. Deflection represents the maximum distance permitted between any point on the curve and the corresponding point on the polygon. By default the deflection value is equal to 0. An algorithm using this 2D polygon with a deflection value equal to 0 considers that it is working with a true polygon and not with an approximate representation of a curve. The Deflection function is used to modify the deflection value of this polygon. The deflection value can be used by any algorithm working with 2D polygons. For example: - An algorithm may use a unique deflection value for all its polygons. In this case it is not necessary to use the Deflection function. - Or an algorithm may want to attach a different deflection to each polygon. In this case, the Deflection function is used to set a value on each polygon, and later to fetch the value.
:rtype: float
* Sets the deflection of this polygon to D
:param D:
:type D: float
:rtype: None
"""
return _Poly.Poly_Polygon2D_Deflection(self, *args)
def NbNodes(self, *args):
"""
* Returns the number of nodes in this polygon. Note: If the polygon is closed, the point of closure is repeated at the end of its table of nodes. Thus, on a closed triangle, the function NbNodes returns 4.
:rtype: int
"""
return _Poly.Poly_Polygon2D_NbNodes(self, *args)
def Nodes(self, *args):
"""
* Returns the table of nodes for this polygon.
:rtype: TColgp_Array1OfPnt2d
"""
return _Poly.Poly_Polygon2D_Nodes(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_Poly_Polygon2D(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _Poly.delete_Poly_Polygon2D
Poly_Polygon2D.Deflection = new_instancemethod(_Poly.Poly_Polygon2D_Deflection,None,Poly_Polygon2D)
Poly_Polygon2D.NbNodes = new_instancemethod(_Poly.Poly_Polygon2D_NbNodes,None,Poly_Polygon2D)
Poly_Polygon2D.Nodes = new_instancemethod(_Poly.Poly_Polygon2D_Nodes,None,Poly_Polygon2D)
Poly_Polygon2D_swigregister = _Poly.Poly_Polygon2D_swigregister
Poly_Polygon2D_swigregister(Poly_Polygon2D)
class Handle_Poly_Polygon2D(OCC.MMgt.Handle_MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_Poly.Handle_Poly_Polygon2D_swiginit(self,_Poly.new_Handle_Poly_Polygon2D(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_Poly.Handle_Poly_Polygon2D_DownCast)
__swig_destroy__ = _Poly.delete_Handle_Poly_Polygon2D
Handle_Poly_Polygon2D.Nullify = new_instancemethod(_Poly.Handle_Poly_Polygon2D_Nullify,None,Handle_Poly_Polygon2D)
Handle_Poly_Polygon2D.IsNull = new_instancemethod(_Poly.Handle_Poly_Polygon2D_IsNull,None,Handle_Poly_Polygon2D)
Handle_Poly_Polygon2D.GetObject = new_instancemethod(_Poly.Handle_Poly_Polygon2D_GetObject,None,Handle_Poly_Polygon2D)
Handle_Poly_Polygon2D_swigregister = _Poly.Handle_Poly_Polygon2D_swigregister
Handle_Poly_Polygon2D_swigregister(Handle_Poly_Polygon2D)
def Handle_Poly_Polygon2D_DownCast(*args):
return _Poly.Handle_Poly_Polygon2D_DownCast(*args)
Handle_Poly_Polygon2D_DownCast = _Poly.Handle_Poly_Polygon2D_DownCast
class Poly_Polygon3D(OCC.MMgt.MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* onstructs a 3D polygon defined by the table of points, Nodes.
:param Nodes:
:type Nodes: TColgp_Array1OfPnt
:rtype: None
* Constructs a 3D polygon defined by the table of points, Nodes, and the parallel table of parameters, Parameters, where each value of the table Parameters is the parameter of the corresponding point on the curve approximated by the constructed polygon. Warning Both the Nodes and Parameters tables must have the same bounds. This property is not checked at construction time.
:param Nodes:
:type Nodes: TColgp_Array1OfPnt
:param Parameters:
:type Parameters: TColStd_Array1OfReal &
:rtype: None
"""
_Poly.Poly_Polygon3D_swiginit(self,_Poly.new_Poly_Polygon3D(*args))
def Deflection(self, *args):
"""
* Returns the deflection of this polygon
:rtype: float
* Sets the deflection of this polygon to D. See more on deflection in Poly_Polygon2D
:param D:
:type D: float
:rtype: None
"""
return _Poly.Poly_Polygon3D_Deflection(self, *args)
def NbNodes(self, *args):
"""
* Returns the number of nodes in this polygon. Note: If the polygon is closed, the point of closure is repeated at the end of its table of nodes. Thus, on a closed triangle the function NbNodes returns 4.
:rtype: int
"""
return _Poly.Poly_Polygon3D_NbNodes(self, *args)
def Nodes(self, *args):
"""
* Returns the table of nodes for this polygon.
:rtype: TColgp_Array1OfPnt
"""
return _Poly.Poly_Polygon3D_Nodes(self, *args)
def HasParameters(self, *args):
"""
* Returns the table of the parameters associated with each node in this polygon. HasParameters function checks if parameters are associated with the nodes of this polygon.
:rtype: bool
"""
return _Poly.Poly_Polygon3D_HasParameters(self, *args)
def Parameters(self, *args):
"""
* Returns true if parameters are associated with the nodes in this polygon.
:rtype: TColStd_Array1OfReal
"""
return _Poly.Poly_Polygon3D_Parameters(self, *args)
def ChangeParameters(self, *args):
"""
* Returns the table of the parameters associated with each node in this polygon. ChangeParameters function returnes the array as shared. Therefore if the table is selected by reference you can, by simply modifying it, directly modify the data structure of this polygon.
:rtype: TColStd_Array1OfReal
"""
return _Poly.Poly_Polygon3D_ChangeParameters(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_Poly_Polygon3D(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _Poly.delete_Poly_Polygon3D
Poly_Polygon3D.Deflection = new_instancemethod(_Poly.Poly_Polygon3D_Deflection,None,Poly_Polygon3D)
Poly_Polygon3D.NbNodes = new_instancemethod(_Poly.Poly_Polygon3D_NbNodes,None,Poly_Polygon3D)
Poly_Polygon3D.Nodes = new_instancemethod(_Poly.Poly_Polygon3D_Nodes,None,Poly_Polygon3D)
Poly_Polygon3D.HasParameters = new_instancemethod(_Poly.Poly_Polygon3D_HasParameters,None,Poly_Polygon3D)
Poly_Polygon3D.Parameters = new_instancemethod(_Poly.Poly_Polygon3D_Parameters,None,Poly_Polygon3D)
Poly_Polygon3D.ChangeParameters = new_instancemethod(_Poly.Poly_Polygon3D_ChangeParameters,None,Poly_Polygon3D)
Poly_Polygon3D_swigregister = _Poly.Poly_Polygon3D_swigregister
Poly_Polygon3D_swigregister(Poly_Polygon3D)
class Handle_Poly_Polygon3D(OCC.MMgt.Handle_MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_Poly.Handle_Poly_Polygon3D_swiginit(self,_Poly.new_Handle_Poly_Polygon3D(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_Poly.Handle_Poly_Polygon3D_DownCast)
__swig_destroy__ = _Poly.delete_Handle_Poly_Polygon3D
Handle_Poly_Polygon3D.Nullify = new_instancemethod(_Poly.Handle_Poly_Polygon3D_Nullify,None,Handle_Poly_Polygon3D)
Handle_Poly_Polygon3D.IsNull = new_instancemethod(_Poly.Handle_Poly_Polygon3D_IsNull,None,Handle_Poly_Polygon3D)
Handle_Poly_Polygon3D.GetObject = new_instancemethod(_Poly.Handle_Poly_Polygon3D_GetObject,None,Handle_Poly_Polygon3D)
Handle_Poly_Polygon3D_swigregister = _Poly.Handle_Poly_Polygon3D_swigregister
Handle_Poly_Polygon3D_swigregister(Handle_Poly_Polygon3D)
def Handle_Poly_Polygon3D_DownCast(*args):
return _Poly.Handle_Poly_Polygon3D_DownCast(*args)
Handle_Poly_Polygon3D_DownCast = _Poly.Handle_Poly_Polygon3D_DownCast
class Poly_PolygonOnTriangulation(OCC.MMgt.MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Constructs a 3D polygon on the triangulation of a shape, defined by the table of nodes, <Nodes>.
:param Nodes:
:type Nodes: TColStd_Array1OfInteger &
:rtype: None
* Constructs a 3D polygon on the triangulation of a shape, defined by: - the table of nodes, Nodes, and the table of parameters, <Parameters>. where: - a node value is an index in the table of nodes specific to an existing triangulation of a shape - and a parameter value is the value of the parameter of the corresponding point on the curve approximated by the constructed polygon. Warning The tables Nodes and Parameters must be the same size. This property is not checked at construction time.
:param Nodes:
:type Nodes: TColStd_Array1OfInteger &
:param Parameters:
:type Parameters: TColStd_Array1OfReal &
:rtype: None
"""
_Poly.Poly_PolygonOnTriangulation_swiginit(self,_Poly.new_Poly_PolygonOnTriangulation(*args))
def Deflection(self, *args):
"""
* Returns the deflection of this polygon
:rtype: float
* Sets the deflection of this polygon to D. See more on deflection in Poly_Polygones2D.
:param D:
:type D: float
:rtype: None
"""
return _Poly.Poly_PolygonOnTriangulation_Deflection(self, *args)
def NbNodes(self, *args):
"""
* Returns the number of nodes for this polygon. Note: If the polygon is closed, the point of closure is repeated at the end of its table of nodes. Thus, on a closed triangle, the function NbNodes returns 4.
:rtype: int
"""
return _Poly.Poly_PolygonOnTriangulation_NbNodes(self, *args)
def Nodes(self, *args):
"""
* Returns the table of nodes for this polygon. A node value is an index in the table of nodes specific to an existing triangulation of a shape.
:rtype: TColStd_Array1OfInteger
"""
return _Poly.Poly_PolygonOnTriangulation_Nodes(self, *args)
def HasParameters(self, *args):
"""
* Returns true if parameters are associated with the nodes in this polygon.
:rtype: bool
"""
return _Poly.Poly_PolygonOnTriangulation_HasParameters(self, *args)
def Parameters(self, *args):
"""
* Returns the table of the parameters associated with each node in this polygon. Warning Use the function HasParameters to check if parameters are associated with the nodes in this polygon.
:rtype: Handle_TColStd_HArray1OfReal
"""
return _Poly.Poly_PolygonOnTriangulation_Parameters(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_Poly_PolygonOnTriangulation(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _Poly.delete_Poly_PolygonOnTriangulation
Poly_PolygonOnTriangulation.Deflection = new_instancemethod(_Poly.Poly_PolygonOnTriangulation_Deflection,None,Poly_PolygonOnTriangulation)
Poly_PolygonOnTriangulation.NbNodes = new_instancemethod(_Poly.Poly_PolygonOnTriangulation_NbNodes,None,Poly_PolygonOnTriangulation)
Poly_PolygonOnTriangulation.Nodes = new_instancemethod(_Poly.Poly_PolygonOnTriangulation_Nodes,None,Poly_PolygonOnTriangulation)
Poly_PolygonOnTriangulation.HasParameters = new_instancemethod(_Poly.Poly_PolygonOnTriangulation_HasParameters,None,Poly_PolygonOnTriangulation)
Poly_PolygonOnTriangulation.Parameters = new_instancemethod(_Poly.Poly_PolygonOnTriangulation_Parameters,None,Poly_PolygonOnTriangulation)
Poly_PolygonOnTriangulation_swigregister = _Poly.Poly_PolygonOnTriangulation_swigregister
Poly_PolygonOnTriangulation_swigregister(Poly_PolygonOnTriangulation)
class Handle_Poly_PolygonOnTriangulation(OCC.MMgt.Handle_MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_Poly.Handle_Poly_PolygonOnTriangulation_swiginit(self,_Poly.new_Handle_Poly_PolygonOnTriangulation(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_Poly.Handle_Poly_PolygonOnTriangulation_DownCast)
__swig_destroy__ = _Poly.delete_Handle_Poly_PolygonOnTriangulation
Handle_Poly_PolygonOnTriangulation.Nullify = new_instancemethod(_Poly.Handle_Poly_PolygonOnTriangulation_Nullify,None,Handle_Poly_PolygonOnTriangulation)
Handle_Poly_PolygonOnTriangulation.IsNull = new_instancemethod(_Poly.Handle_Poly_PolygonOnTriangulation_IsNull,None,Handle_Poly_PolygonOnTriangulation)
Handle_Poly_PolygonOnTriangulation.GetObject = new_instancemethod(_Poly.Handle_Poly_PolygonOnTriangulation_GetObject,None,Handle_Poly_PolygonOnTriangulation)
Handle_Poly_PolygonOnTriangulation_swigregister = _Poly.Handle_Poly_PolygonOnTriangulation_swigregister
Handle_Poly_PolygonOnTriangulation_swigregister(Handle_Poly_PolygonOnTriangulation)
def Handle_Poly_PolygonOnTriangulation_DownCast(*args):
return _Poly.Handle_Poly_PolygonOnTriangulation_DownCast(*args)
Handle_Poly_PolygonOnTriangulation_DownCast = _Poly.Handle_Poly_PolygonOnTriangulation_DownCast
class Poly_Triangle(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Constructs a triangle and sets all indices to zero.
:rtype: None
* Constructs a triangle and sets its three indices to N1, N2 and N3 respectively, where these node values are indices in the table of nodes specific to an existing triangulation of a shape.
:param N1:
:type N1: int
:param N2:
:type N2: int
:param N3:
:type N3: int
:rtype: None
"""
_Poly.Poly_Triangle_swiginit(self,_Poly.new_Poly_Triangle(*args))
def Set(self, *args):
"""
* Sets the value of the three nodes of this triangle to N1, N2 and N3 respectively.
:param N1:
:type N1: int
:param N2:
:type N2: int
:param N3:
:type N3: int
:rtype: None
* Sets the value of the Indexth node of this triangle to Node. Raises OutOfRange if Index is not in 1,2,3
:param Index:
:type Index: int
:param Node:
:type Node: int
:rtype: None
"""
return _Poly.Poly_Triangle_Set(self, *args)
def Get(self, *args):
"""
* Returns the node indices of this triangle in N1, N2 and N3.
:param N1:
:type N1: int &
:param N2:
:type N2: int &
:param N3:
:type N3: int &
:rtype: None
"""
return _Poly.Poly_Triangle_Get(self, *args)
def Value(self, *args):
"""
* Get the node of given Index. Raises OutOfRange from Standard if Index is not in 1,2,3
:param Index:
:type Index: int
:rtype: int
"""
return _Poly.Poly_Triangle_Value(self, *args)
def ChangeValue(self, *args):
"""
* Get the node of given Index. Raises OutOfRange if Index is not in 1,2,3
:param Index:
:type Index: int
:rtype: int
"""
return _Poly.Poly_Triangle_ChangeValue(self, *args)
__swig_destroy__ = _Poly.delete_Poly_Triangle
Poly_Triangle.Set = new_instancemethod(_Poly.Poly_Triangle_Set,None,Poly_Triangle)
Poly_Triangle.Get = new_instancemethod(_Poly.Poly_Triangle_Get,None,Poly_Triangle)
Poly_Triangle.Value = new_instancemethod(_Poly.Poly_Triangle_Value,None,Poly_Triangle)
Poly_Triangle.ChangeValue = new_instancemethod(_Poly.Poly_Triangle_ChangeValue,None,Poly_Triangle)
Poly_Triangle_swigregister = _Poly.Poly_Triangle_swigregister
Poly_Triangle_swigregister(Poly_Triangle)
class Poly_Triangulation(OCC.MMgt.MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Constructs a triangulation from a set of triangles. The triangulation is initialized without a triangle or a node, but capable of containing nbNodes nodes, and nbTriangles triangles. Here the UVNodes flag indicates whether 2D nodes will be associated with 3D ones, (i.e. to enable a 2D representation).
:param nbNodes:
:type nbNodes: int
:param nbTriangles:
:type nbTriangles: int
:param UVNodes:
:type UVNodes: bool
:rtype: None
* Constructs a triangulation from a set of triangles. The triangulation is initialized with 3D points from Nodes and triangles from Triangles.
:param Nodes:
:type Nodes: TColgp_Array1OfPnt
:param Triangles:
:type Triangles: Poly_Array1OfTriangle &
:rtype: None
* Constructs a triangulation from a set of triangles. The triangulation is initialized with 3D points from Nodes, 2D points from UVNodes and triangles from Triangles, where coordinates of a 2D point from UVNodes are the (u, v) parameters of the corresponding 3D point from Nodes on the surface approximated by the constructed triangulation.
:param Nodes:
:type Nodes: TColgp_Array1OfPnt
:param UVNodes:
:type UVNodes: TColgp_Array1OfPnt2d
:param Triangles:
:type Triangles: Poly_Array1OfTriangle &
:rtype: None
"""
_Poly.Poly_Triangulation_swiginit(self,_Poly.new_Poly_Triangulation(*args))
def Deflection(self, *args):
"""
* Returns the deflection of this triangulation.
:rtype: float
* Sets the deflection of this triangulation to D. See more on deflection in Polygon2D
:param D:
:type D: float
:rtype: None
"""
return _Poly.Poly_Triangulation_Deflection(self, *args)
def RemoveUVNodes(self, *args):
"""
* Deallocates the UV nodes.
:rtype: None
"""
return _Poly.Poly_Triangulation_RemoveUVNodes(self, *args)
def NbNodes(self, *args):
"""
* Returns the number of nodes for this triangulation. Null if the nodes are not yet defined.
:rtype: int
"""
return _Poly.Poly_Triangulation_NbNodes(self, *args)
def NbTriangles(self, *args):
"""
* Returns the number of triangles for this triangulation. Null if the Triangles are not yet defined.
:rtype: int
"""
return _Poly.Poly_Triangulation_NbTriangles(self, *args)
def HasUVNodes(self, *args):
"""
* Returns true if 2D nodes are associated with 3D nodes for this triangulation.
:rtype: bool
"""
return _Poly.Poly_Triangulation_HasUVNodes(self, *args)
def Nodes(self, *args):
"""
* Returns the table of 3D nodes (3D points) for this triangulation.
:rtype: TColgp_Array1OfPnt
"""
return _Poly.Poly_Triangulation_Nodes(self, *args)
def ChangeNodes(self, *args):
"""
* Returns the table of 3D nodes (3D points) for this triangulation. The returned array is shared. Therefore if the table is selected by reference, you can, by simply modifying it, directly modify the data structure of this triangulation.
:rtype: TColgp_Array1OfPnt
"""
return _Poly.Poly_Triangulation_ChangeNodes(self, *args)
def UVNodes(self, *args):
"""
* Returns the table of 2D nodes (2D points) associated with each 3D node of this triangulation. The function HasUVNodes checks if 2D nodes are associated with the 3D nodes of this triangulation. Const reference on the 2d nodes values.
:rtype: TColgp_Array1OfPnt2d
"""
return _Poly.Poly_Triangulation_UVNodes(self, *args)
def ChangeUVNodes(self, *args):
"""
* Returns the table of 2D nodes (2D points) associated with each 3D node of this triangulation. Function ChangeUVNodes shares the returned array. Therefore if the table is selected by reference, you can, by simply modifying it, directly modify the data structure of this triangulation.
:rtype: TColgp_Array1OfPnt2d
"""
return _Poly.Poly_Triangulation_ChangeUVNodes(self, *args)
def Triangles(self, *args):
"""
* Returns the table of triangles for this triangulation.
:rtype: Poly_Array1OfTriangle
"""
return _Poly.Poly_Triangulation_Triangles(self, *args)
def ChangeTriangles(self, *args):
"""
* Returns the table of triangles for this triangulation. Function ChangeUVNodes shares the returned array. Therefore if the table is selected by reference, you can, by simply modifying it, directly modify the data structure of this triangulation.
:rtype: Poly_Array1OfTriangle
"""
return _Poly.Poly_Triangulation_ChangeTriangles(self, *args)
def SetNormals(self, *args):
"""
* Sets the table of node normals. raises exception if length of theNormals != 3*NbNodes
:param theNormals:
:type theNormals: Handle_TShort_HArray1OfShortReal &
:rtype: None
"""
return _Poly.Poly_Triangulation_SetNormals(self, *args)
def Normals(self, *args):
"""
:rtype: TShort_Array1OfShortReal
"""
return _Poly.Poly_Triangulation_Normals(self, *args)
def ChangeNormals(self, *args):
"""
:rtype: TShort_Array1OfShortReal
"""
return _Poly.Poly_Triangulation_ChangeNormals(self, *args)
def HasNormals(self, *args):
"""
:rtype: bool
"""
return _Poly.Poly_Triangulation_HasNormals(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_Poly_Triangulation(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _Poly.delete_Poly_Triangulation
Poly_Triangulation.Deflection = new_instancemethod(_Poly.Poly_Triangulation_Deflection,None,Poly_Triangulation)
Poly_Triangulation.RemoveUVNodes = new_instancemethod(_Poly.Poly_Triangulation_RemoveUVNodes,None,Poly_Triangulation)
Poly_Triangulation.NbNodes = new_instancemethod(_Poly.Poly_Triangulation_NbNodes,None,Poly_Triangulation)
Poly_Triangulation.NbTriangles = new_instancemethod(_Poly.Poly_Triangulation_NbTriangles,None,Poly_Triangulation)
Poly_Triangulation.HasUVNodes = new_instancemethod(_Poly.Poly_Triangulation_HasUVNodes,None,Poly_Triangulation)
Poly_Triangulation.Nodes = new_instancemethod(_Poly.Poly_Triangulation_Nodes,None,Poly_Triangulation)
Poly_Triangulation.ChangeNodes = new_instancemethod(_Poly.Poly_Triangulation_ChangeNodes,None,Poly_Triangulation)
Poly_Triangulation.UVNodes = new_instancemethod(_Poly.Poly_Triangulation_UVNodes,None,Poly_Triangulation)
Poly_Triangulation.ChangeUVNodes = new_instancemethod(_Poly.Poly_Triangulation_ChangeUVNodes,None,Poly_Triangulation)
Poly_Triangulation.Triangles = new_instancemethod(_Poly.Poly_Triangulation_Triangles,None,Poly_Triangulation)
Poly_Triangulation.ChangeTriangles = new_instancemethod(_Poly.Poly_Triangulation_ChangeTriangles,None,Poly_Triangulation)
Poly_Triangulation.SetNormals = new_instancemethod(_Poly.Poly_Triangulation_SetNormals,None,Poly_Triangulation)
Poly_Triangulation.Normals = new_instancemethod(_Poly.Poly_Triangulation_Normals,None,Poly_Triangulation)
Poly_Triangulation.ChangeNormals = new_instancemethod(_Poly.Poly_Triangulation_ChangeNormals,None,Poly_Triangulation)
Poly_Triangulation.HasNormals = new_instancemethod(_Poly.Poly_Triangulation_HasNormals,None,Poly_Triangulation)
Poly_Triangulation_swigregister = _Poly.Poly_Triangulation_swigregister
Poly_Triangulation_swigregister(Poly_Triangulation)
class Handle_Poly_Triangulation(OCC.MMgt.Handle_MMgt_TShared):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_Poly.Handle_Poly_Triangulation_swiginit(self,_Poly.new_Handle_Poly_Triangulation(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_Poly.Handle_Poly_Triangulation_DownCast)
__swig_destroy__ = _Poly.delete_Handle_Poly_Triangulation
Handle_Poly_Triangulation.Nullify = new_instancemethod(_Poly.Handle_Poly_Triangulation_Nullify,None,Handle_Poly_Triangulation)
Handle_Poly_Triangulation.IsNull = new_instancemethod(_Poly.Handle_Poly_Triangulation_IsNull,None,Handle_Poly_Triangulation)
Handle_Poly_Triangulation.GetObject = new_instancemethod(_Poly.Handle_Poly_Triangulation_GetObject,None,Handle_Poly_Triangulation)
Handle_Poly_Triangulation_swigregister = _Poly.Handle_Poly_Triangulation_swigregister
Handle_Poly_Triangulation_swigregister(Handle_Poly_Triangulation)
def Handle_Poly_Triangulation_DownCast(*args):
return _Poly.Handle_Poly_Triangulation_DownCast(*args)
Handle_Poly_Triangulation_DownCast = _Poly.Handle_Poly_Triangulation_DownCast
|
py | 1a4d89976d36815dc34e878fad55d4977e84d0ed | from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from dataactcore.models.domainModels import CountyCode
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs40_detached_award_financial_assistance_1'
def test_column_headers(database):
expected_subset = {"row_number", "place_of_performance_code"}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" PrimaryPlaceOfPerformanceCode last three digits must be a valid county code when format is XX**###. """
county_code = CountyCode(county_number="123", state_code="NY")
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NY*****")
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FO333")
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NY**123")
det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="Ny**123")
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, county_code])
assert errors == 0
def test_failure(database):
""" Test failure for PrimaryPlaceOfPerformanceCode last three digits must be a valid county code when
format is XX**###. """
county_code = CountyCode(county_number="123", state_code="NY")
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00**333")
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00**33")
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="Ny**124")
det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NA**123")
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, county_code])
assert errors == 4
|
py | 1a4d8a0ac81d22bcfc0e44e10abc402d791c6d47 | #!/bin/bash/env python
import argparse
import numpy as np
import math
from numpy.linalg import inv
from numpy import linalg as LA
from os.path import basename, expanduser, isfile, join, splitext
import socket
from matplotlib import pyplot as plt
import time
from skimage import measure
import rospy
from sensor_msgs.msg import CompressedImage
from geometry_msgs.msg import PoseArray, Point, Pose, Quaternion
from duckietown_utils import d8_compressed_image_from_cv_image, logger, rgb_from_ros, yaml_load, get_duckiefleet_root
from duckietown_utils import get_base_name, load_camera_intrinsics, load_homography, load_map, rectify
from duckietown_utils import load_map, load_camera_intrinsics, load_homography, rectify
class Avoider():
'''class to avoid detected obstacles'''
def __init__(self, robot_name=''):
# Robot name
self.robot_name = robot_name
# Parameter definitions
self.lWidthRobot = 140 # mm
self.lWidthLane = 250 # mm
# Control parameters
self.yAvoidanceMargin = 20 # mm
def avoid(self, obstacle_poses_on_track, d_current, theta):
print('AvoiderFct')
self.d_target = 0
self.d_current = d_current
self.theta = theta
emergency_stop = 0
if len(obstacle_poses_on_track.poses) == 1:
# self.d_robot = self.d_current
# self.theta = self.theta_current
x_obstacle = obstacle_poses_on_track.poses[0].position.x * 1000 # mm
y_obstacle = obstacle_poses_on_track.poses[0].position.y * 1000 # mm
r_obstacle = obstacle_poses_on_track.poses[0].position.z * 1000 # mm
# print('x_obstacle = ', x_obstacle)
# print('y_obstacle = ', y_obstacle)
# print('r_obstacle = ', r_obstacle)
global_pos_vec = self.coordinatetransform(x_obstacle, y_obstacle, self.theta, self.d_current)
# x_global = global_pos_vec[0]
y_global = global_pos_vec[1]
# print('y_global = ', y_global)
# print('abs(y_global) = ', abs(y_global))
# print('lanew = ', self.lWidthLane)
# print('robiwidth = ', self.lWidthRobot)
# print('margin =', self.yAvoidanceMargin)
# print('theta=', self.theta)
# print('d_current= ',self.d_current)
# Stop if there is no space
if (abs(y_global) + self.lWidthLane/2 - abs(r_obstacle)) < (self.lWidthRobot + self.yAvoidanceMargin):
print('Emergency Stop')
emergency_stop = 1
# React if possible
self.d_target = (y_global - (np.sign(y_global) * (self.lWidthRobot / 2 + self.yAvoidanceMargin + abs(r_obstacle))))/1000 # convert to m
# print('d_target = ', self.d_target)
elif len(obstacle_poses_on_track.poses) > 1:
print('Number of obstacles reaching avoid function too high')
emergency_stop = 1
targets = [self.d_target, emergency_stop]
return targets
def coordinatetransform(self, x_obstacle, y_obstacle, theta, d_current):
self.theta = theta
self.d_current = d_current
self.x_obstacle = x_obstacle
self.y_obstacle = y_obstacle
vector_local = [self.x_obstacle, self.y_obstacle]
rot_matrix = [[math.cos(self.theta), -math.sin(self.theta)],
[math.sin(self.theta), math.cos(self.theta)]]
vector_global = np.dot(rot_matrix, vector_local) + np.array([0, self.d_current])
x_global = vector_global[0]
y_global = vector_global[1]
return np.array([x_global, y_global])
|
py | 1a4d8a8026a3849f4ae640ef2fdf834441093e13 | """This module contains the ``SeleniumMiddleware`` scrapy middleware"""
from importlib import import_module
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.http import HtmlResponse
from selenium.webdriver.support.ui import WebDriverWait
from .http import SeleniumRequest
class SeleniumMiddleware:
"""Scrapy middleware handling the requests using selenium"""
def __init__(self, driver_name, driver_executable_path, driver_arguments,
browser_executable_path):
"""Initialize the selenium webdriver
Parameters
----------
driver_name: str
The selenium ``WebDriver`` to use
driver_executable_path: str
The path of the executable binary of the driver
driver_arguments: list
A list of arguments to initialize the driver
browser_executable_path: str
The path of the executable binary of the browser
"""
webdriver_base_path = f'selenium.webdriver.{driver_name}'
driver_klass_module = import_module(f'{webdriver_base_path}.webdriver')
driver_klass = getattr(driver_klass_module, 'WebDriver')
driver_options_module = import_module(f'{webdriver_base_path}.options')
driver_options_klass = getattr(driver_options_module, 'Options')
driver_options = driver_options_klass()
if browser_executable_path:
driver_options.binary_location = browser_executable_path
for argument in driver_arguments:
driver_options.add_argument(argument)
driver_kwargs = {
'executable_path': driver_executable_path,
f'{driver_name}_options': driver_options
}
self.driver = driver_klass(**driver_kwargs)
@classmethod
def from_crawler(cls, crawler):
"""Initialize the middleware with the crawler settings"""
driver_name = crawler.settings.get('SELENIUM_DRIVER_NAME')
driver_executable_path = crawler.settings.get('SELENIUM_DRIVER_EXECUTABLE_PATH')
browser_executable_path = crawler.settings.get('SELENIUM_BROWSER_EXECUTABLE_PATH')
driver_arguments = crawler.settings.get('SELENIUM_DRIVER_ARGUMENTS')
if not driver_name or not driver_executable_path:
raise NotConfigured(
'SELENIUM_DRIVER_NAME and SELENIUM_DRIVER_EXECUTABLE_PATH must be set'
)
middleware = cls(
driver_name=driver_name,
driver_executable_path=driver_executable_path,
driver_arguments=driver_arguments,
browser_executable_path=browser_executable_path
)
crawler.signals.connect(middleware.spider_closed, signals.spider_closed)
return middleware
def process_request(self, request, spider):
"""Process a request using the selenium driver if applicable"""
if not isinstance(request, SeleniumRequest):
return None
self.driver.get(request.url)
for cookie_name, cookie_value in request.cookies.items():
self.driver.add_cookie(
{
'name': cookie_name,
'value': cookie_value
}
)
if request.wait_until:
WebDriverWait(self.driver, request.wait_time).until(
request.wait_until
)
if request.screenshot:
request.meta['screenshot'] = self.driver.get_screenshot_as_png()
body = str.encode(self.driver.page_source)
# Expose the driver via the "meta" attribute
request.meta.update({'driver': self.driver})
return HtmlResponse(
self.driver.current_url,
body=body,
encoding='utf-8',
request=request
)
def spider_closed(self):
"""Shutdown the driver when spider is closed"""
self.driver.quit()
|
py | 1a4d8ae14145e7ba0c9a5750c919be6c6ef324dc | # -*- coding: utf-8 -*-
# Copyright (c) 2018, RP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class TipoTecnologia(Document):
pass
|
py | 1a4d8b84d6a7cbcc5237c7d92ea8a57a6229f473 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import sys,argparse
from fnmatch import fnmatch
from openvino.tools.benchmark.utils.utils import show_available_devices
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
return ivalue
class print_help(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
show_available_devices()
sys.exit()
def parse_args():
parser = argparse.ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action=print_help, nargs='?', default=argparse.SUPPRESS,
help='Show this help message and exit.')
args.add_argument('-i', '--paths_to_input', action='append', nargs='+', type=str, required=False,
help='Optional. '
'Path to a folder with images and/or binaries or to specific image or binary file.')
args.add_argument('-m', '--path_to_model', type=str, required=True,
help='Required. Path to an .xml/.onnx/.prototxt file with a trained model or '
'to a .blob file with a trained compiled model.')
args.add_argument('-d', '--target_device', type=str, required=False, default='CPU',
help='Optional. Specify a target device to infer on (the list of available devices is shown below). '
'Default value is CPU. Use \'-d HETERO:<comma separated devices list>\' format to specify HETERO plugin. '
'Use \'-d MULTI:<comma separated devices list>\' format to specify MULTI plugin. '
'The application looks for a suitable plugin for the specified device.')
args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
help='Optional. Required for CPU custom layers. '
'Absolute path to a shared library with the kernels implementations.')
args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
help='Optional. Required for GPU custom kernels. Absolute path to an .xml file with the '
'kernels description.')
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
help='Optional. Enable using sync/async API. Default value is async.')
args.add_argument('-niter', '--number_iterations', type=check_positive, required=False, default=None,
help='Optional. Number of iterations. '
'If not specified, the number of iterations is calculated depending on a device.')
args.add_argument('-nireq', '--number_infer_requests', type=check_positive, required=False, default=None,
help='Optional. Number of infer requests. Default value is determined automatically for device.')
args.add_argument('-b', '--batch_size', type=int, required=False, default=0,
help='Optional. ' +
'Batch size value. ' +
'If not specified, the batch size value is determined from Intermediate Representation')
args.add_argument('-stream_output', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. '
'Print progress as a plain text. '
'When specified, an interactive progress bar is replaced with a multi-line output.')
args.add_argument('-t', '--time', type=int, required=False, default=None,
help='Optional. Time in seconds to execute topology.')
args.add_argument('-progress', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. '
'Show progress bar (can affect performance measurement). Default values is \'False\'.')
args.add_argument('-shape', type=str, required=False, default='',
help='Optional. '
'Set shape for input. For example, "input1[1,3,224,224],input2[1,4]" or "[1,3,224,224]" in case of one input size.')
args.add_argument('-layout', type=str, required=False, default='',
help='Optional. '
'Prompts how network layouts should be treated by application. '
'For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size.')
args.add_argument('-nstreams', '--number_streams', type=str, required=False, default=None,
help='Optional. Number of streams to use for inference on the CPU/GPU/MYRIAD '
'(for HETERO and MULTI device cases use format <device1>:<nstreams1>,<device2>:<nstreams2> '
'or just <nstreams>). '
'Default value is determined automatically for a device. Please note that although the automatic selection '
'usually provides a reasonable performance, it still may be non - optimal for some cases, especially for very small networks. '
'Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency '
'estimations the number of streams should be set to 1. '
'See samples README for more details.')
args.add_argument('-enforcebf16', '--enforce_bfloat16', type=str2bool, required=False, default=False, nargs='?', const=True, choices=[True, False],
help='Optional. By default floating point operations execution in bfloat16 precision are enforced if supported by platform. '
'\'true\' - enable bfloat16 regardless of platform support. '
'\'false\' - disable bfloat16 regardless of platform support.')
args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
help='Number of threads to use for inference on the CPU, GNA '
'(including HETERO and MULTI cases).')
args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES', choices=['YES', 'NO', 'NUMA'],
help='Optional. Enable threads->cores (\'YES\' is default value), threads->(NUMA)nodes (\'NUMA\') or completely disable (\'NO\')'
'CPU threads pinning for CPU-involved inference.')
args.add_argument('-exec_graph_path', '--exec_graph_path', type=str, required=False,
help='Optional. Path to a file where to store executable graph information serialized.')
args.add_argument('-pc', '--perf_counts', type=str2bool, required=False, default=False, nargs='?', const=True,
help='Optional. Report performance counters.', )
args.add_argument('-report_type', '--report_type', type=str, required=False,
choices=['no_counters', 'average_counters', 'detailed_counters'],
help="Optional. Enable collecting statistics report. \"no_counters\" report contains "
"configuration options specified, resulting FPS and latency. \"average_counters\" "
"report extends \"no_counters\" report and additionally includes average PM "
"counters values for each layer from the network. \"detailed_counters\" report "
"extends \"average_counters\" report and additionally includes per-layer PM "
"counters and latency for each executed infer request.")
args.add_argument('-report_folder', '--report_folder', type=str, required=False, default='',
help="Optional. Path to a folder where statistics report is stored.")
args.add_argument('-dump_config', type=str, required=False, default='',
help="Optional. Path to JSON file to dump IE parameters, which were set by application.")
args.add_argument('-load_config', type=str, required=False, default='',
help="Optional. Path to JSON file to load custom IE parameters."
" Please note, command line parameters have higher priority then parameters from configuration file.")
args.add_argument('-qb', '--quantization_bits', type=int, required=False, default=None, choices=[8, 16],
help="Optional. Weight bits for quantization: 8 (I8) or 16 (I16) ")
args.add_argument('-ip', '--input_precision', type=str, required=False, default='U8', choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all input layers of the network.')
args.add_argument('-op', '--output_precision', type=str, required=False, default='FP32', choices=['U8', 'FP16', 'FP32'],
help='Optional. Specifies precision for all output layers of the network.')
args.add_argument('-iop', '--input_output_precision', type=str, required=False,
help='Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. Overwrites precision from ip and op options for specified layers.')
parsed_args = parser.parse_args()
return parsed_args
|
py | 1a4d8bf0587baecd1a6afc8ff07efa94b640e322 | """
This module provides fundamental solar physical constants.
"""
import io
from astropy.table import Table
from astropy.time import Time
from sunpy.sun import _constants as _con
__all__ = [
'get', 'find', 'print_all', 'spectral_classification', 'au', 'mass', 'equatorial_radius',
'volume', 'surface_area', 'average_density', 'equatorial_surface_gravity',
'effective_temperature', 'luminosity', 'mass_conversion_rate', 'escape_velocity', 'sfu',
'average_angular_size', 'sidereal_rotation_rate', 'first_carrington_rotation',
'mean_synodic_period'
]
constants = _con.physical_constants
def get(key):
"""
Retrieve a constant by key. This is just a short cut into a dictionary.
Parameters
----------
key : `str`
Key in dictionary in ``constants``.
Returns
-------
constant : `~astropy.constants.Constant`
See Also
--------
`~sunpy.sun.constants` :
Contains the description of ``constants``, which, as a dictionary literal object, does not
itself possess a docstring.
Examples
--------
>>> from sunpy.sun import constants
>>> constants.get('mass')
<<class 'astropy.constants.iau2015.IAU2015'> name='Solar mass' value=1.9884754153381438e+30 uncertainty=9.236140093538353e+25 unit='kg' reference='IAU 2015 Resolution B 3 + CODATA 2014'>
"""
ret = constants[key]
ret.__doc__ = ret.name
return ret
def find(sub=None):
"""
Return list of constants keys containing a given string.
Parameters
----------
sub : `str`, optional
Sub-string to search keys for. By default set to `None` and returns all keys.
Returns
-------
`None`, `list`
The matching keys.
See Also
--------
`~sunpy.sun.constants` :
Contains the description of ``constants``, which, as a dictionary literal object, does not itself possess a docstring.
"""
if sub is None:
result = list(constants.keys())
else:
result = [key for key in constants if sub.lower() in key.lower()]
result.sort()
return result
def print_all():
"""
Provides a table of the complete list of constants.
Returns
-------
`astropy.table.Table`
"""
data_rows = []
for key, this_constant in constants.items():
data_rows.append([
key, this_constant.name, this_constant.value, this_constant.uncertainty,
str(this_constant.unit), this_constant.reference
])
t = Table(rows=data_rows, names=('key', 'name', 'value', 'uncertainty', 'unit', 'Reference'))
return t
def _build_docstring():
"""Build docstring containing RST-formatted table of constants."""
lines = ['The following constants are available:\n']
rows = []
for key, const in constants.items():
rows.append([key, const.value, const._unit_string, const.name])
table = Table(rows=rows, names=('Name', 'Value', 'Unit', 'Description'))
table['Value'].info.format = '14.9g'
f = io.StringIO()
table.write(f, format='ascii.rst')
lines.append(f.getvalue())
return '\n'.join(lines)
# Add a table of constants to the docs
if __doc__ is not None:
__doc__ += _build_docstring()
# Spectral class is not included in physical constants since it is not a number
#: Spectral classification
spectral_classification = 'G2V'
au = astronomical_unit = get('mean distance')
# The following variables from _gets are brought out by making them
# accessible through a call such as sun.volume
mass = get('mass')
equatorial_radius = radius = get('radius')
volume = get('volume')
surface_area = get('surface area')
average_density = density = get('average density')
equatorial_surface_gravity = surface_gravity = get('surface gravity')
effective_temperature = get('effective temperature')
luminosity = get('luminosity')
mass_conversion_rate = get('mass conversion rate')
escape_velocity = get('escape velocity')
sfu = get('solar flux unit')
# Observable parameters
average_angular_size = get('average angular size')
sidereal_rotation_rate = get('sidereal rotation rate')
#: Time of the start of the first Carrington rotation
first_carrington_rotation = Time(get('first Carrington rotation (JD TT)'), format='jd', scale='tt')
mean_synodic_period = get('mean synodic period')
|
py | 1a4d8c4fe37bf659c3144f76cc0680c974b85cb5 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test Cases to be run for the import module"""
IMPLEMENTED_OPERATORS_TEST = [
'test_random_uniform',
'test_random_normal',
'test_add',
'test_sub',
'test_mul',
'test_div',
'test_neg',
'test_abs',
'test_sum',
'test_tanh',
'test_ceil',
'test_floor',
'test_concat',
'test_sigmoid',
'test_relu',
'test_constant_pad',
'test_edge_pad',
'test_reflect_pad',
'test_reduce_min',
'test_reduce_max',
'test_reduce_mean',
'test_reduce_prod',
'test_squeeze',
'test_unsqueeze',
'test_softmax_example',
'test_softmax_large_number',
'test_softmax_axis_2',
'test_transpose',
'test_globalmaxpool',
'test_globalaveragepool',
'test_slice_cpu',
'test_slice_neg',
'test_squeeze_',
'test_reciprocal',
'test_sqrt',
'test_pow',
'test_exp',
'test_argmax',
'test_argmin',
'test_min',
'test_logical_and',
'test_logical_xor',
'test_logical_not',
'test_logical_or',
'test_clip',
'test_softsign',
'test_reduce_l2',
'test_reduce_log_sum',
'test_reduce_log_sum_exp',
'test_reduce_sum_square'
#pytorch operator tests
'test_operator_exp',
'test_operator_maxpool',
'test_operator_params',
'test_operator_permute2'
]
BASIC_MODEL_TESTS = [
'test_AvgPool2D',
'test_BatchNorm',
'test_ConstantPad2d'
'test_Conv2d',
'test_ELU',
'test_LeakyReLU',
'test_MaxPool',
'test_PReLU',
'test_ReLU',
'test_Sigmoid',
'test_Softmax',
'test_softmax_functional',
'test_softmax_lastdim',
'test_Tanh'
]
STANDARD_MODEL = [
'test_bvlc_alexnet',
'test_densenet121',
#'test_inception_v1',
#'test_inception_v2',
'test_resnet50',
#'test_shufflenet',
'test_squeezenet',
'test_zfnet512',
'test_vgg19'
]
|
py | 1a4d8c97937f1c8e26acf9c2215ef9467a7f76fe | import unittest
from find_max_indices import find_max_indices
from drop_first import drop_first
class TestFindMaxIndices(unittest.TestCase):
def test_find_max_indices(self):
Ms = [[[1, 2, 3], [9, 8, 7, 6], [4, 5]]]
expecteds = [(1, (0, 9))]
for M, expected in zip(Ms, expecteds):
self.assertEqual(expected, find_max_indices(M))
class TestDropFirst(unittest.TestCase):
def test_drop_first(self):
iterables = [[1, 2, 3]]
expecteds = [[2, 3]]
for iterable, expected in zip(iterables, expecteds):
self.assertEqual(expected, list(drop_first(iterable)))
if __name__ == "__main__":
unittest.main()
|
py | 1a4d8cde6f4276906811116a71b0a0c5aa61b7f6 | from filebeat import BaseTest
from beat.beat import INTEGRATION_TESTS
import os
import unittest
import glob
import subprocess
from elasticsearch import Elasticsearch
import json
import logging
class Test(BaseTest):
def init(self):
self.elasticsearch_url = self.get_elasticsearch_url()
print("Using elasticsearch: {}".format(self.elasticsearch_url))
self.es = Elasticsearch([self.elasticsearch_url])
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
self.modules_path = os.path.abspath(self.working_dir +
"/../../../../module")
self.filebeat = os.path.abspath(self.working_dir +
"/../../../../filebeat.test")
self.index_name = "test-filebeat-modules"
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_modules(self):
self.init()
modules = os.getenv("TESTING_FILEBEAT_MODULES")
if modules:
modules = modules.split(",")
else:
modules = os.listdir(self.modules_path)
# generate a minimal configuration
cfgfile = os.path.join(self.working_dir, "filebeat.yml")
self.render_config_template(
template="filebeat_modules.yml.j2",
output=cfgfile,
index_name=self.index_name,
elasticsearch_url=self.elasticsearch_url)
for module in modules:
path = os.path.join(self.modules_path, module)
filesets = [name for name in os.listdir(path) if
os.path.isfile(os.path.join(path, name,
"manifest.yml"))]
for fileset in filesets:
test_files = glob.glob(os.path.join(self.modules_path, module,
fileset, "test", "*.log"))
for test_file in test_files:
self.run_on_file(
module=module,
fileset=fileset,
test_file=test_file,
cfgfile=cfgfile)
def run_on_file(self, module, fileset, test_file, cfgfile):
print("Testing {}/{} on {}".format(module, fileset, test_file))
try:
self.es.indices.delete(index=self.index_name)
except:
pass
cmd = [
self.filebeat, "-systemTest",
"-e", "-d", "*", "-once",
"-c", cfgfile,
"-modules={}".format(module),
"-M", "{module}.*.enabled=false".format(module=module),
"-M", "{module}.{fileset}.enabled=true".format(module=module, fileset=fileset),
"-M", "{module}.{fileset}.var.paths=[{test_file}]".format(
module=module, fileset=fileset, test_file=test_file),
"-M", "*.*.prospector.close_eof=true",
]
output = open(os.path.join(self.working_dir, "output.log"), "ab")
output.write(" ".join(cmd) + "\n")
subprocess.Popen(cmd,
stdin=None,
stdout=output,
stderr=subprocess.STDOUT,
bufsize=0).wait()
# Make sure index exists
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
res = self.es.search(index=self.index_name,
body={"query": {"match_all": {}}})
objects = [o["_source"] for o in res["hits"]["hits"]]
assert len(objects) > 0
for obj in objects:
assert obj["fileset"]["module"] == module, "expected fileset.module={} but got {}".format(
module, obj["fileset"]["module"])
if not (module == "mysql" and fileset == "slowlog"):
# TODO: There are errors parsing the test logs from these modules.
assert "error" not in obj, "not error expected but got: {}".format(obj)
if module != "auditd" and fileset != "log":
# There are dynamic fields in audit logs that are not documented.
self.assert_fields_are_documented(obj)
if os.path.exists(test_file + "-expected.json"):
with open(test_file + "-expected.json", "r") as f:
expected = json.load(f)
assert len(expected) == len(objects), "expected {} but got {}".format(len(expected), len(objects))
for ev in expected:
found = False
for obj in objects:
if ev["_source"][module] == obj[module]:
found = True
break
if not found:
raise Exception("The following expected object was" +
" not found: {}".format(obj))
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_prospector_pipeline_config(self):
"""
Tests that the pipeline configured in the prospector overwrites
the one from the output.
"""
self.init()
index_name = "filebeat-test-prospector"
try:
self.es.indices.delete(index=index_name)
except:
pass
self.wait_until(lambda: not self.es.indices.exists(index_name))
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
elasticsearch=dict(
host=self.elasticsearch_url,
pipeline="estest",
index=index_name),
pipeline="test",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'a') as file:
file.write("Hello World1\n")
# put pipeline
self.es.transport.perform_request("PUT", "/_ingest/pipeline/test",
body={
"processors": [{
"set": {
"field": "x-pipeline",
"value": "test-pipeline",
}
}]})
filebeat = self.start_beat()
# Wait until the event is in ES
self.wait_until(lambda: self.es.indices.exists(index_name))
def search_objects():
try:
self.es.indices.refresh(index=index_name)
res = self.es.search(index=index_name,
body={"query": {"match_all": {}}})
return [o["_source"] for o in res["hits"]["hits"]]
except:
return []
self.wait_until(lambda: len(search_objects()) > 0, max_timeout=20)
filebeat.check_kill_and_wait()
objects = search_objects()
assert len(objects) == 1
o = objects[0]
assert o["x-pipeline"] == "test-pipeline"
|
py | 1a4d8d3749ce732b4821e4b87d972c0b4f5dbbf3 | # -*- coding: utf-8 -*-
import Utils.BlazeFuncs as BlazeFuncs, Utils.Globals as Globals
import time, os
def Ping(self,data_e):
packet = BlazeFuncs.BlazeDecoder(data_e)
reply = BlazeFuncs.BlazePacket("0009","0002",packet.packetID,"1000")
reply.writeInt("STIM",int(time.time()))
self.transport.getHandle().sendall(reply.build().decode('Hex'))
def PreAuth(self,data_e):
packet = BlazeFuncs.BlazeDecoder(data_e)
#preAuth_Response = "86ebee0000873ca30107333030323934008e99330400130119041b1c0607090a82e0070b80e00381e00382e00383e0031485e00386e003901f8ee9ee0101008efba6038efba60501010510636f6e6e49646c6554696d656f75740004393073001664656661756c745265717565737454696d656f75740004383073000b70696e67506572696f6400043230730016766f69704865616473657455706461746552617465000531303030001a786c7370436f6e6e656374696f6e49646c6554696d656f757400043330300000a6ecf40111626174746c656669656c642d332d706300b69bb20000ba1cf0010a63656d5f65615f696400c29b24010100c2c8740103706300c34867010100c6fcf3038b7c3303c33840011e676f7370726f642d716f7330312e727370632d6c68722e65612e636f6d00c33c00009e9102cee840011072732d70726f642d6c68722d6266330000b2ec00000ab34c330501030704616d7300c33840011d676f7370726f642d716f7330312e6d33642d616d732e65612e636f6d00c33c00009e9102cee840011065612d70726f642d616d732d62663300000464667700c33840011d676f7370726f642d716f7330312e6d33642d6466772e65612e636f6d00c33c00009e9102cee84001116d69732d70726f642d6466772d62663300000469616400c33840011d676f7370726f642d716f7330312e6d33642d6961642e65612e636f6d00c33c00009e9102cee840011065612d70726f642d6961642d6266330000046c617800c33840011d676f7370726f642d716f7330312e6d33642d6c61782e65612e636f6d00c33c00009e9102cee840011065612d70726f642d6c61782d6266330000046c687200c33840011e676f7370726f642d716f7330312e727370632d6c68722e65612e636f6d00c33c00009e9102cee840011072732d70726f642d6c68722d6266330000046e727400c33840011d676f7370726f642d716f7330312e6d33642d6e72742e65612e636f6d00c33c00009e9102cee84001116933642d70726f642d6e72742d62663300000473796400c33840011d676f7370726f642d716f7330312e6d33642d7379642e65612e636f6d00c33c00009e9102cee840011065612d70726f642d7379642d6266330000cf6a640085a088d40800cb3ca3010733303032393400cf6972011e426c617a6520332e31352e30382e302028434c2320393434323635290a00"
#preAuth_Response = "86ebee0000873ca30107333030323934008e99330400120119041b1c0607090a0b80e00381e00382e00383e0031485e00386e003901f8ee9ee0101008efba6038efba60501010510636f6e6e49646c6554696d656f75740004393073001664656661756c745265717565737454696d656f75740004383073000b70696e67506572696f6400043230730016766f69704865616473657455706461746552617465000531303030001a786c7370436f6e6e656374696f6e49646c6554696d656f757400043330300000a6ecf40111626174746c656669656c642d332d706300b69bb20000ba1cf0010a63656d5f65615f696400c29b24010100c2c8740103706300c34867010100c6fcf3038b7c3303c33840010931302e302e302e3100c33c00009e9102cee84001047a6c6f0000b2ec00000ab34c330501030104616d7300c33840010931302e302e302e3100c33c00009e9102cee84001047a6c6f0000cf6a640085a088d40800cb3ca3010733303032393400cf6972011d5a4c4f7a6520332e31352e30382e302028434c23203833343535362900"
#preAuth_Response = "873ca30107333032313233008e993304001688e0030189e003198ae0031b041c0607090a82e007230f80e00382e00383e00384e00386e003901f87e0038efba6038efba6050101111e6173736f63696174696f6e4c697374536b6970496e697469616c5365740002310014626c617a65536572766572436c69656e7449640017474f532d426c617a655365727665722d4246342d50430012627974657661756c74486f73746e616d65001e627974657661756c742e67616d6573657276696365732e65612e636f6d000e627974657661756c74506f7274000634323231300010627974657661756c74536563757265000574727565001863617073537472696e6756616c69646174696f6e557269001c636c69656e742d737472696e67732e78626f786c6976652e636f6d0010636f6e6e49646c6554696d656f75740004393073001664656661756c745265717565737454696d656f7574000436307300136964656e74697479446973706c61795572690011636f6e736f6c65322f77656c636f6d6500146964656e7469747952656469726563745572690019687474703a2f2f3132372e302e302e312f73756363657373000f6e75636c657573436f6e6e656374001868747470733a2f2f6163636f756e74732e65612e636f6d000d6e75636c65757350726f7879001768747470733a2f2f676174657761792e65612e636f6d000b70696e67506572696f640004333073001a757365724d616e616765724d617843616368656455736572730002300016766f69704865616473657455706461746552617465000531303030000c78626c546f6b656e55726e00106163636f756e74732e65612e636f6d001a786c7370436f6e6e656374696f6e49646c6554696d656f757400043330300000973ca3010733303231323300a6ecf40111626174746c656669656c642d342d706300b69bb20000ba1cf0010a63656d5f65615f696400c29b24010100c2c8740103706300c6fcf3038b7c3303c33840012a716f732d70726f642d62696f2d6475622d636f6d6d6f6e2d636f6d6d6f6e2e676f732e65612e636f6d00c33c00009e9102cee840011162696f2d70726f642d616d732d6266340000b2ec00000ab34c330501030604616d7300c33840012a716f732d70726f642d62696f2d6475622d636f6d6d6f6e2d636f6d6d6f6e2e676f732e65612e636f6d00c33c00009e9102cee840011162696f2d70726f642d616d732d62663400000467727500c33840012a716f732d70726f642d6d33642d62727a2d636f6d6d6f6e2d636f6d6d6f6e2e676f732e65612e636f6d00c33c00009e9102cee840010100000469616400c33840012a716f732d70726f642d62696f2d6961642d636f6d6d6f6e2d636f6d6d6f6e2e676f732e65612e636f6d00c33c00009e9102cee840011162696f2d70726f642d6961642d6266340000046c617800c33840012a716f732d70726f642d62696f2d736a632d636f6d6d6f6e2d636f6d6d6f6e2e676f732e65612e636f6d00c33c00009e9102cee840011162696f2d70726f642d6c61782d6266340000046e727400c33840012a716f732d70726f642d6d33642d6e72742d636f6d6d6f6e2d636f6d6d6f6e2e676f732e65612e636f6d00c33c00009e9102cee84001116933642d70726f642d6e72742d62663400000473796400c33840012a716f732d70726f642d62696f2d7379642d636f6d6d6f6e2d636f6d6d6f6e2e676f732e65612e636f6d00c33c00009e9102cee840011162696f2d70726f642d7379642d6266340000cf6a640085a088d408d29b650080ade20400cb3ca3010733303231323300cf69720120426c617a652031332e332e312e382e302028434c232031313438323639290a00"
reply = BlazeFuncs.BlazePacket("0009","0007",packet.packetID,"1000")
reply.writeBool("ANON", False)
reply.writeString("ASRC", "300294")
reply.append("8e99330400130119041b1c0607090a82e0070b80e00381e00382e00383e0031485e00386e003901f")
reply.writeSStruct("CONF") #CONF STRUCT 1
reply.writeMap("CONF")
reply.writeMapData("associationListSkipInitialSet", "1")
reply.writeMapData("blazeServerClientId", "GOS-BlazeServer-BF4-PC")
reply.writeMapData("bytevaultHostname", "bytevault.gameservices.ea.com")
reply.writeMapData("bytevaultPort", "42210")
reply.writeMapData("bytevaultSecure", "false")
reply.writeMapData("capsStringValidationUri", "client-strings.xboxlive.com")
reply.writeMapData("connIdleTimeout", "90s")
reply.writeMapData("defaultRequestTimeout", "60s")
reply.writeMapData("identityDisplayUri", "console2/welcome")
reply.writeMapData("identityRedirectUri", "http://127.0.0.1/success")
reply.writeMapData("nucleusConnect", "http://127.0.0.1")
reply.writeMapData("nucleusProxy", "http://127.0.0.1/")
reply.writeMapData("pingPeriod", "20s")
reply.writeMapData("userManagerMaxCachedUsers", "0")
reply.writeMapData("voipHeadsetUpdateRate", "1000")
reply.writeMapData("xblTokenUrn", "http://127.0.0.1")
reply.writeMapData("xlspConnectionIdleTimeout", "300")
reply.writeBuildMap()
reply.writeEUnion() #END MAP
reply.writeString("INST", "battlefield-4-pc")
reply.writeBool("MINR", False)
reply.writeString("NASP", "cem_ea_id")
reply.writeString("PLAT", "pc")
reply.writeSStruct("QOSS") #QOSS STRUCT
reply.writeSStruct("BWPS") #BWPS STRUCT
reply.writeString("PSA ", Globals.serverIP)
reply.writeInt("PSP ", 17502)
reply.writeString("SNA ", "rs-prod-lhr-bf4")
reply.writeEUnion() #END BWPS
reply.writeInt("LNP ", 10)
reply.append("b34c330501030704616d7300c33840011d676f7370726f642d716f7330312e6d33642d616d732e65612e636f6d00c33c00009e9102cee840011065612d70726f642d616d732d62663300000464667700c33840011d676f7370726f642d716f7330312e6d33642d6466772e65612e636f6d00c33c00009e9102cee84001116d69732d70726f642d6466772d62663300000469616400c33840011d676f7370726f642d716f7330312e6d33642d6961642e65612e636f6d00c33c00009e9102cee840011065612d70726f642d6961642d6266330000046c617800c33840011d676f7370726f642d716f7330312e6d33642d6c61782e65612e636f6d00c33c00009e9102cee840011065612d70726f642d6c61782d6266330000046c687200c33840011e676f7370726f642d716f7330312e727370632d6c68722e65612e636f6d00c33c00009e9102cee840011072732d70726f642d6c68722d6266330000046e727400c33840011d676f7370726f642d716f7330312e6d33642d6e72742e65612e636f6d00c33c00009e9102cee84001116933642d70726f642d6e72742d62663300000473796400c33840011d676f7370726f642d716f7330312e6d33642d7379642e65612e636f6d00c33c00009e9102cee840011065612d70726f642d7379642d6266330000");
reply.writeInt("SVID", 1337)
reply.writeEUnion() #END QOSS
reply.writeString("RSRC", "302123")
reply.writeString("SVER", "Blaze 13.15.08.0 (CL# 9442625)")
#reply.append(preAuth_Response)
self.transport.getHandle().sendall(reply.build().decode('Hex'))
def PostAuth(self,data_e):
packet = BlazeFuncs.BlazeDecoder(data_e)
reply = BlazeFuncs.BlazePacket("0009","0008",packet.packetID,"1000")
#reply.append("c33cc003864cb301010086d8780000bed8780000c2aa64010100c2fcb40000cb0cb40000d29a64000000d25b2503864cb3011b676f7374656c656d657472792e626c617a65332e65612e636f6d0086ebee0000929ce10101009648f400009a9b3401102d47414d452f434f4d4d2f4558504400b2f8c00093d5f2d60cb69bb20000bafbeb010100c2fcb400849c01ce4b390098ea01ce5cf3010b46577065574a3652727000ceb97901b1015eb9caf7d19cb3ddefcb93afaaff818cbbd8e18b9af6ed9bb6b1e8b0a986c6ceb1e2f4d0a9a6a78eb1baea84d3b3ec8d96a4e0c08183868c98b0e0c089e6c6989ab7c2c9e182eed897e2c2d1a3c7ad99b3e9cab1a3d685cd96f0c6b189c3a68d98b8eed091c3a68d96e5dcd59aa5818000cf08f4008b01cf4a6d010844656661756c7400cf6bad011374656c656d657472792d332d636f6d6d6f6e0000d298eb03864cb3010d31302e31302e37382e31353000c2fcb400a78c01ceb9790180013137343830323835302c31302e31302e37382e3135303a383939392c626174746c656669656c642d342d70632c31302c35302c35302c35302c35302c302c300000d72bf003d2dbf00001")
#reply.writeInt("UID ", self.GAMEOBJ.PersonaID)
#reply.writeEUnion() #END UROP
reply.writeSStruct("PSS ")
reply.writeString("ADRS", "")
reply.writeInt("AMAX", 0)
reply.writeInt("OMAX", 0)
reply.writeString("PJID", "")
reply.writeInt("PORT", 0)
reply.writeInt("RPRT", 0)
reply.writeInt("TIID", 0)
reply.writeEUnion() #END PSS
reply.writeSStruct("TELE")
reply.writeString("ADRS", "gostelemetry.blaze3.ea.com")
reply.writeBool("ANON", False)
reply.writeString("DISA", "")
reply.writeBool("EDCT", False)
reply.writeString("FILT", "-GAME/COMM/EXPD")
reply.writeInt("LOC ", 1701729619)
reply.writeBool("MINR", False)
reply.writeString("NOOK", "")
reply.writeInt("PORT", 9988)
reply.writeInt("SDLY", 15000)
reply.writeString("SESS", "2fj3StGgjcB")
reply.writeString("SKEY", "^–âëôÀ“¯ùú· ¤žïéöш³¶¦Ë¹åäÙ«¦®‹¸ãØÈ‰¦§ŒœºîàÑÃæáä衦†‹’°àÀƒ†Œ˜°âÜ‘³†Ž˜¶Ä±·ì‹±áèÑãÖÌÙ´åØ‘ë²¦‹¸ãØÄ¡£¦Íš¹îØ¡“†Ë²îªÍÒ€€")
reply.writeInt("SPCT", 75)
reply.writeString("STIM", "Default")
reply.writeString("SVNM", "telemetry-3-common")
reply.writeEUnion() #END TELE
reply.writeSStruct("TICK")
reply.writeString("ADRS", "10.10.78.150")
reply.writeInt("PORT", 8999)
reply.writeString("SKEY", str(self.GAMEOBJ.PersonaID)+",10.10.78.150:8999,battlefield-4-pc,10,50,50,50,50,0,0")
reply.writeEUnion() #END TICK
reply.writeSStruct("UROP")
reply.writeInt("TMOP", 0)
reply.writeInt("UID ", self.GAMEOBJ.PersonaID)
reply.writeEUnion() #END UROP
self.transport.getHandle().sendall(reply.build().decode('Hex'))
def UserSettingsLoadAll(self,data_e):
packet = BlazeFuncs.BlazeDecoder(data_e)
reply = BlazeFuncs.BlazePacket("0009","000c",packet.packetID,"1000")
cwd = os.getcwd()
path = cwd+"\\Users\\"+self.GAMEOBJ.Name+"\\"
f = open(path+'usersettings.txt', 'r')
data = f.readline()
reply.writeMap("SMAP")
reply.writeMapData("cust", str(data))
reply.writeBuildMap()
self.transport.getHandle().sendall(reply.build().decode('Hex'))
f.close()
def GetTelemetryServer(self,data_e):
packet = BlazeFuncs.BlazeDecoder(data_e)
reply = BlazeFuncs.BlazePacket("0009","0005",packet.packetID,"1000")
'''
reply.writeString("ADRS", Globals.serverIP)
reply.writeBool("ANON", False)
reply.writeString("DISA", "")
reply.writeInt("LOC ", 1701729619)
reply.writeString("NOOK", "US,CA,MX")
reply.writeInt("PORT", 9988)
reply.writeInt("SDLY", 15000)
reply.writeString("SESS", "2fj3StGgjcB")
reply.writeString("SKEY", "daywwdh")
reply.writeInt("SPCT", 30)
reply.writeString("STIM", "Default")
'''
reply.append("864cb3011b676f7374656c656d657472792e626c617a65332e65612e636f6d0086ebee0000929ce10101009648f400009a9b3401102d47414d452f434f4d4d2f4558504400b2f8c00093d5f2d60cb69bb20000bafbeb010100c2fcb400849c01ce4b390098ea01ce5cf3010b46577065574a3652727000ceb979019e015ea8e28687a0ca81a2b087a7eb8bf3e4b3beb38a9af6ed9bb6b1e8b0e1c2848c98b0e0c08183868c98e1ec88a3f3a69899ace08dfba2ac98baf4d895b396ad99b6e4dad0e982ee9896b1e8d48183e78d9ab2e8d4e1d2ccdbaad394808000cf08f4008b01cf4a6d010844656661756c7400cf6bad011374656c656d657472792d332d636f6d6d6f6e00")
self.transport.getHandle().sendall(reply.build().decode('Hex'))
def FetchClientConfig(self,data_e):
packet = BlazeFuncs.BlazeDecoder(data_e)
cfid = packet.getVar("CFID")
reply = BlazeFuncs.BlazePacket("0009","0001",packet.packetID,"1000")
if cfid == "GOSAchievements":
print "GOSAchievements"
reply.writeMap("CONF")
reply.writeMapData("Achievements", "ACH32_00,ACH33_00,ACH34_00,ACH35_00,ACH36_00,ACH37_00,ACH38_00,ACH39_00,ACH40_00,XPACH01_00,XPACH02_00,XPACH03_00,XPACH04_00,XPACH05_00,XP2ACH01_00,XP2ACH04_00,XP2ACH03_00,XP2ACH05_00,XP2ACH02_00,XP3ACH01_00,XP3ACH05_00,XP3ACH03_00,XP3ACH04_00,XP3ACH02_00,XP4ACH01_00,XP4ACH02_00,XP4ACH03_00,XP4ACH04_00,XP4ACH05_00,XP5ACH01_00,XP5ACH02_00,XP5ACH03_00,XP5ach04_00,XP5ach05_00")
reply.writeMapData("WinCodes", "r01_00,r05_00,r04_00,r03_00,r02_00,r10_00,r08_00,r07_00,r06_00,r09_00,r11_00,r12_00,r13_00,r14_00,r15_00,r16_00,r17_00,r18_00,r19_00,r20_00,r21_00,r22_00,r23_00,r24_00,r25_00,r26_00,r27_00,r28_00,r29_00,r30_00,r31_00,r32_00,r33_00,r35_00,r36_00,r37_00,r34_00,r38_00,r39_00,r40_00,r41_00,r42_00,r43_00,r44_00,r45_00,xp2rgm_00,xp2rntdmcq_00,xp2rtdmc_00,xp3rts_00,xp3rdom_00,xp3rnts_00,xp3rngm_00,xp4rndom_00,xp4rscav_00,xp4rnscv_00,xp4ramb1_00,xp4ramb2_00,xp5r502_00,xp5r501_00,xp5ras_00,xp5asw_00")
reply.writeBuildMap()
self.transport.getHandle().sendall(reply.build().decode('Hex'))
def UserSettingsSave(self, data_e):
packet = BlazeFuncs.BlazeDecoder(data_e)
data = packet.getVar("DATA")
cwd = os.getcwd()
path = cwd+"\\Users\\"+self.GAMEOBJ.Name+"\\"
f = open(path+'usersettings.txt', 'w')
f.write(data)
f.close()
reply = BlazeFuncs.BlazePacket("0009","000b",packet.packetID,"1000")
self.transport.getHandle().sendall(reply.build().decode('Hex'))
def SetUserMode(self, data_e):
packet = BlazeFuncs.BlazeDecoder(data_e)
mode = packet.getVar("MODE")
reply = BlazeFuncs.BlazePacket("0009","001c",packet.packetID,"1000")
self.transport.getHandle().sendall(reply.build().decode('Hex'))
def ReciveComponent(self,func,data_e):
func = func.upper()
if func == "0001":
print("[UTIL] FetchClientConfig")
FetchClientConfig(self,data_e)
elif func == "0002":
Ping(self,data_e)
elif func == "0003":
print("[UTIL] SendClientData")
elif func == "0004":
print("[UTIL] LocalizeStrings")
elif func == "0005":
print("[UTIL] GetTelemetryServer")
GetTelemetryServer(self,data_e)
elif func == "0006":
print("[UTIL] GetTickerServer")
elif func == "0007":
print("[UTIL] PreAuth")
PreAuth(self,data_e)
elif func == "0008":
print("[UTIL] PostAuth")
PostAuth(self,data_e)
elif func == "000A":
print("[UTIL] UserSettingsLoad")
elif func == "000B":
print("[UTIL] UserSettingsSave")
UserSettingsSave(self, data_e)
elif func == "000C":
print("[UTIL] UserSettingsLoadAll")
UserSettingsLoadAll(self,data_e)
elif func == "000E":
print("[UTIL] DeleteUserSettings")
elif func == "0014":
print("[UTIL] FilterForProfanity")
elif func == "0015":
print("[UTIL] FetchQOSConfig")
elif func == "0016":
print("[UTIL] SetClientMetrics")
elif func == "0017":
print("[UTIL] SetConnectionState")
elif func == "0018":
print("[UTIL] GetPassConfig")
elif func == "0019":
print("[UTIL] GetUserOptions")
elif func == "001A":
print("[UTIL] SetUserOptions")
elif func == "001B":
print("[UTIL] SuspendUserPing")
elif func == "001C":
print("[UTIL] SetUserMode")
SetUserMode(self,data_e)
else:
print("[UTIL] ERROR! UNKNOWN FUNC "+func)
|
py | 1a4d8dbe2885ebfe549addb97a1f67a8548a4f12 | from __future__ import annotations
import itertools
from typing import (
TYPE_CHECKING,
cast,
)
import numpy as np
import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
from pandas._typing import Dtype
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_1d_only_ea_dtype,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import notna
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import factorize_from_iterable
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import (
Index,
MultiIndex,
)
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_compressed_ids,
get_group_index,
get_group_index_sorter,
)
if TYPE_CHECKING:
from pandas.core.arrays import ExtensionArray
class _Unstacker:
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
index : MultiIndex
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, index: MultiIndex, level=-1, constructor=None):
if constructor is None:
constructor = DataFrame
self.constructor = constructor
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
# Note: the "pop" below alters these in-place.
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise an error before this happens
num_rows = np.max([index_level.size for index_level in self.new_index_levels])
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError("Unstacked DataFrame is too big, causing int32 overflow")
self._make_selectors()
@cache_readonly
def _indexer_and_to_sort(self):
v = self.level
codes = list(self.index.codes)
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1 :] + [codes[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = get_group_index_sorter(comp_index, ngroups)
indexer = ensure_platform_int(indexer)
return indexer, to_sort
@cache_readonly
def sorted_labels(self):
indexer, to_sort = self._indexer_and_to_sort
return [line.take(indexer) for line in to_sort]
def _make_sorted_values(self, values: np.ndarray) -> np.ndarray:
indexer, _ = self._indexer_and_to_sort
sorted_values = algos.take_nd(values, indexer, axis=0)
return sorted_values
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
# error: Argument 1 to "zeros" has incompatible type "number"; expected
# "Union[int, Sequence[int]]"
mask = np.zeros(np.prod(self.full_shape), dtype=bool) # type: ignore[arg-type]
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError("Index contains duplicate entries, cannot reshape")
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self, values, value_columns, fill_value):
if values.ndim == 1:
values = values[:, np.newaxis]
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError("must pass column labels for multi-column data")
values, _ = self.get_new_values(values, fill_value)
columns = self.get_new_columns(value_columns)
index = self.new_index
return self.constructor(values, index=index, columns=columns)
def get_new_values(self, values, fill_value=None):
if values.ndim == 1:
values = values[:, np.newaxis]
sorted_values = self._make_sorted_values(values)
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
# TODO: Under what circumstances can we rely on sorted_values
# matching values? When that holds, we can slice instead
# of take (in particular for EAs)
new_values = (
sorted_values.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values.dtype):
sorted_values = sorted_values.view("i8")
new_values = new_values.view("i8")
elif is_bool_dtype(values.dtype):
sorted_values = sorted_values.astype("object")
new_values = new_values.astype("object")
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
libreshape.unstack(
sorted_values,
mask.view("u1"),
stride,
length,
width,
new_values,
new_mask.view("u1"),
)
# reconstruct dtype if needed
if needs_i8_conversion(values.dtype):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self, value_columns):
if value_columns is None:
if self.lift == 0:
return self.removed_level._rename(name=self.removed_name)
lev = self.removed_level.insert(0, item=self.removed_level._na_value)
return lev.rename(self.removed_name)
stride = len(self.removed_level) + self.lift
width = len(value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(value_columns, MultiIndex):
new_levels = value_columns.levels + (self.removed_level_full,)
new_names = value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator) for lab in value_columns.codes]
else:
new_levels = [value_columns, self.removed_level_full]
new_names = [value_columns.name, self.removed_name]
new_codes = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new codes to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_codes.append(np.tile(repeater, width))
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
@cache_readonly
def new_index(self):
# Does not depend on values or value_columns
result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
level, level_codes = self.new_index_levels[0], result_codes[0]
if (level_codes == -1).any():
level = level.insert(len(level), level._na_value)
return level.take(level_codes).rename(self.new_index_names[0])
return MultiIndex(
levels=self.new_index_levels,
codes=result_codes,
names=self.new_index_names,
verify_integrity=False,
)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
# GH 19966 Make sure if MultiIndexed index has tuple name, they will be
# recognised as a whole
if clocs in index.names:
clocs = [clocs]
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
ccodes = [index.codes[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rcodes = [index.codes[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(ccodes, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False)
if not rlocs:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name="__placeholder__")
else:
dummy_index = MultiIndex(
levels=rlevels + [obs_ids],
codes=rcodes + [comp_ids],
names=rnames + ["__placeholder__"],
verify_integrity=False,
)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_codes = recons_codes
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val, fill_value=fill_value)
clocs = [v if v < val else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
assert isinstance(unstcols, MultiIndex) # for mypy
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_codes = [unstcols.codes[0]]
for rec in recons_codes:
new_codes.append(rec.take(unstcols.codes[-1]))
new_columns = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
# Prioritize integer interpretation (GH #21677):
if not is_integer(level) and not level == "__placeholder__":
level = obj.index._get_level_number(level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
elif not isinstance(obj.index, MultiIndex):
# GH 36113
# Give nicer error messages when unstack a Series whose
# Index is not a MultiIndex.
raise ValueError(
f"index must be a MultiIndex to unstack, {type(obj.index)} was passed"
)
else:
if is_1d_only_ea_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(
obj.index, level=level, constructor=obj._constructor_expanddim
)
return unstacker.get_result(
obj._values, value_columns=None, fill_value=fill_value
)
def _unstack_frame(obj, level, fill_value=None):
if not obj._can_fast_transpose:
unstacker = _Unstacker(obj.index, level=level)
mgr = obj._mgr.unstack(unstacker, fill_value=fill_value)
return obj._constructor(mgr)
else:
unstacker = _Unstacker(obj.index, level=level, constructor=obj._constructor)
return unstacker.get_result(
obj._values, value_columns=obj.columns, fill_value=fill_value
)
def _unstack_extension_series(series, level, fill_value):
"""
Unstack an ExtensionArray-backed Series.
The ExtensionDtype is preserved.
Parameters
----------
series : Series
A Series with an ExtensionArray for values
level : Any
The level name or number.
fill_value : Any
The user-level (not physical storage) fill value to use for
missing values introduced by the reshape. Passed to
``series.values.take``.
Returns
-------
DataFrame
Each column of the DataFrame will have the same dtype as
the input Series.
"""
# Defer to the logic in ExtensionBlock._unstack
df = series.to_frame()
result = df.unstack(level=level, fill_value=fill_value)
return result.droplevel(level=0, axis=1)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(
levels=levels,
codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False,
)
if not frame.empty and frame._is_homogeneous_type:
# For homogeneous EAs, frame._values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes._values)
dtype = dtypes[0]
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type(
[col._values for _, col in frame.items()]
)
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame._values.ravel()
else:
# non-homogeneous
new_values = frame._values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError(
"level should contain all level names or all level "
"numbers, not a mixture of the two."
)
return result
def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex:
"""Creates a MultiIndex from the first N-1 levels of this MultiIndex."""
if len(columns.levels) <= 2:
return columns.levels[0]._rename(name=columns.names[0])
levs = [
[lev[c] if c >= 0 else None for c in codes]
for lev, codes in zip(columns.levels[:-1], columns.codes[:-1])
]
# Remove duplicate tuples in the MultiIndex.
tuples = zip(*levs)
unique_tuples = (key for key, _ in itertools.groupby(tuples))
new_levs = zip(*unique_tuples)
# The dtype of each level must be explicitly set to avoid inferring the wrong type.
# See GH-36991.
return MultiIndex.from_arrays(
[
# Not all indices can accept None values.
Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev
for new_lev, lev in zip(new_levs, columns.levels)
],
names=columns.names[:-1],
)
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel.
If `level_num` matches a column name return the name from
position `level_num`, otherwise return `level_num`.
"""
if level_num in columns.names:
return columns.names[level_num]
return level_num
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns._is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
new_columns = _stack_multi_column_index(this.columns)
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_codes = sorted(set(this.columns.codes[-1]))
level_vals_nan = level_vals.insert(len(level_vals), None)
level_vals_used = np.take(level_vals_nan, level_codes)
levsize = len(level_codes)
drop_cols = []
for key in new_columns:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals_nan.take(chunk.columns.codes[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_homogeneous_type and is_extension_array_dtype(
frame.dtypes.iloc[0]
):
dtype = this[this.columns[loc]].dtypes.iloc[0]
subset = this[this.columns[loc]]
value_slice = dtype.construct_array_type()._concat_same_type(
[x._values for _, x in subset.items()]
)
N, K = this.shape
idx = np.arange(N * K).reshape(K, N).T.ravel()
value_slice = value_slice.take(idx)
elif frame._is_mixed_type:
value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()
new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
old_codes, old_levels = factorize_from_iterable(this.index)
new_levels = [old_levels]
new_codes = [old_codes.repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_codes.append(np.tile(level_codes, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how="all")
return result
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na: bool = False,
columns=None,
sparse: bool = False,
drop_first: bool = False,
dtype: Dtype | None = None,
) -> DataFrame:
"""
Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
Returns
-------
DataFrame
Dummy-coded data.
See Also
--------
Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
dtypes_to_encode = ["object", "category"]
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(include=dtypes_to_encode)
elif not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = (
f"Length of '{name}' ({len(item)}) did not match the "
"length of the columns being encoded "
f"({data_to_encode.shape[1]})."
)
raise ValueError(len_msg)
check_len(prefix, "prefix")
check_len(prefix_sep, "prefix_sep")
if isinstance(prefix, str):
prefix = itertools.cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, str):
prefix_sep = itertools.cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
with_dummies: list[DataFrame]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(
col[1],
prefix=pre,
prefix_sep=sep,
dummy_na=dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(
data,
prefix,
prefix_sep,
dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
return result
def _get_dummies_1d(
data,
prefix,
prefix_sep="_",
dummy_na: bool = False,
sparse: bool = False,
drop_first: bool = False,
dtype: Dtype | None = None,
) -> DataFrame:
from pandas.core.reshape.concat import concat
# Series avoids inconsistent NaN handling
codes, levels = factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
# error: Argument 1 to "dtype" has incompatible type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"; expected "Type[Any]"
dtype = np.dtype(dtype) # type: ignore[arg-type]
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_frame(data) -> DataFrame:
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
return DataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_frame(data)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_frame(data)
number_of_cols = len(levels)
if prefix is None:
dummy_cols = levels
else:
dummy_cols = Index([f"{prefix}{prefix_sep}{level}" for level in levels])
index: Index | None
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
fill_value: bool | float | int
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
sparse_series = []
N = len(data)
sp_indices: list[list] = [[] for _ in range(len(dummy_cols))]
mask = codes != -1
codes = codes[mask]
n_idx = np.arange(N)[mask]
for ndx, code in zip(n_idx, codes):
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(
np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs),
fill_value=fill_value,
dtype=dtype,
)
sparse_series.append(Series(data=sarr, index=index, name=col))
out = concat(sparse_series, axis=1, copy=False)
# TODO: overload concat with Literal for axis
out = cast(DataFrame, out)
return out
else:
# take on axis=1 + transpose to ensure ndarray layout is column-major
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=1).T
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def _reorder_for_extension_array_stack(
arr: ExtensionArray, n_rows: int, n_columns: int
) -> ExtensionArray:
"""
Re-orders the values when stacking multiple extension-arrays.
The indirect stacking method used for EAs requires a followup
take to get the order correct.
Parameters
----------
arr : ExtensionArray
n_rows, n_columns : int
The number of rows and columns in the original DataFrame.
Returns
-------
taken : ExtensionArray
The original `arr` with elements re-ordered appropriately
Examples
--------
>>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
>>> _reorder_for_extension_array_stack(arr, 3, 2)
array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
"""
# final take to get the order correct.
# idx is an indexer like
# [c0r0, c1r0, c2r0, ...,
# c0r1, c1r1, c2r1, ...]
idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()
return arr.take(idx)
|
py | 1a4d8fc742fbc89fd26ce670ade72b75e94577b4 | # Copyright (C) 2015-2022 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from dataclasses import dataclass
from typing import Optional
from .input_message_content import InputMessageContent
@dataclass(frozen=True)
class InputContactMessageContent(InputMessageContent):
"""\
Represents InputContactMessageContent object:
https://core.telegram.org/bots/api#inputcontactmessagecontent
"""
latitude: float
longitude: float
title: str
address: str
foursquare_id: Optional[str] = None
foursquare_type: Optional[str] = None
|
py | 1a4d916e18a493230b73ffddb1bfc7846406efef | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
_VARIABLE_OPS = {
"Assign",
"AssignAdd",
"AssignSub",
"Queue",
"ScatterAdd",
"ScatterSub",
"ScatterUpdate",
"TruncatedNormal",
"Variable",
}
def _is_variable_op(op):
"""Returns true if 'op' refers to a Variable node."""
return op in _VARIABLE_OPS
def set_cpu0(device_string):
"""Creates a new device string based on `device_string' but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.DeviceSpec.from_string(device_string)
parsed_device.device_type = "CPU"
parsed_device.device_index = 0
return parsed_device.to_string()
def must_run_on_cpu(node, pin_variables_on_cpu=False):
"""Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CPU, otherwise False.
"""
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, graph_pb2.NodeDef)
node_def = node
# If the op is a variable-related op, should we pin it on CPU?
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
# Constant operations producing a string or int32 must run on CPU.
if node_def.op == "Const":
# Get the value of the 'dtype' attr
dtype = node_def.attr["dtype"].type
if dtype == dtypes.string or dtype == dtypes.int32:
return True
if node_def.op == "DynamicStitch":
dtype = node_def.attr["T"].type
if dtype == dtypes.int32:
# DynamicStitch on GPU only works for int32 values.
return True
if node_def.op in ["Cast"]:
dtype = node_def.attr["SrcT"].type
if dtype == dtypes.int32:
# Cast on GPU does not works for int32 values.
return True
return False
################################################################################
#
# device functions for use in with g.device(...)
#
################################################################################
def _node_name(n):
if n.startswith("^"):
return n[1:]
else:
return n.split(":")[0]
def extract_sub_graph(graph_def, dest_nodes):
"""Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
Args:
graph_def: A graph_pb2.GraphDef proto.
dest_nodes: A list of strings specifying the destination node names.
Returns:
The GraphDef of the sub-graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
edges = {} # Keyed by the dest node name.
name_to_node_map = {} # Keyed by node name.
# Keeps track of node sequences. It is important to still output the
# operations in the original order.
node_seq = {} # Keyed by node name.
seq = 0
for node in graph_def.node:
n = _node_name(node.name)
name_to_node_map[n] = node
edges[n] = [_node_name(x) for x in node.input]
node_seq[n] = seq
seq += 1
for d in dest_nodes:
assert d in name_to_node_map, "%s is not in graph" % d
nodes_to_keep = set()
# Breadth first search to find all the nodes that we should keep.
next_to_visit = dest_nodes[:]
while next_to_visit:
n = next_to_visit[0]
del next_to_visit[0]
if n in nodes_to_keep:
# Already visited this node.
continue
nodes_to_keep.add(n)
next_to_visit += edges[n]
nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: node_seq[n])
# Now construct the output GraphDef
out = graph_pb2.GraphDef()
for n in nodes_to_keep_list:
out.node.extend([copy.deepcopy(name_to_node_map[n])])
return out
def tensor_shape_from_node_def_name(graph, input_name):
"""Convenience function to get a shape from a NodeDef's input string."""
# To get a tensor, the name must be in the form <input>:<port>, for example
# 'Mul:0'. The GraphDef input strings don't always have the port specified
# though, so if there isn't a colon we need to add a default ':0' to the end.
if ":" not in input_name:
canonical_name = input_name + ":0"
else:
canonical_name = input_name
tensor = graph.get_tensor_by_name(canonical_name)
shape = tensor.get_shape()
return shape
def convert_variables_to_constants(sess, input_graph_def, output_node_names):
"""Replaces all the variables in a graph with constants of the same values.
If you have a trained graph containing Variable ops, it can be convenient to
convert them all to Const ops holding the same values. This makes it possible
to describe the network fully with a single GraphDef file, and allows the
removal of a lot of ops related to loading and saving the variables.
Args:
sess: Active TensorFlow session containing the variables.
input_graph_def: GraphDef object holding the network.
output_node_names: List of name strings for the result nodes of the graph.
Returns:
GraphDef containing a simplified version of the original.
"""
found_variables = {}
variable_names = []
variable_dict_names = []
for node in input_graph_def.node:
if node.op == "Assign":
variable_name = node.input[0]
variable_dict_names.append(variable_name)
variable_names.append(variable_name + ":0")
if variable_names:
returned_variables = sess.run(variable_names)
else:
returned_variables = []
found_variables = dict(zip(variable_dict_names, returned_variables))
logging.info("Frozen %d variables." % len(returned_variables))
# This graph only includes the nodes needed to evaluate the output nodes, and
# removes unneeded nodes like those involved in saving and assignment.
inference_graph = extract_sub_graph(input_graph_def, output_node_names)
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = graph_pb2.NodeDef()
if input_node.name in found_variables:
output_node.op = "Const"
output_node.name = input_node.name
dtype = input_node.attr["dtype"]
data = found_variables[input_node.name]
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(data,
dtype=dtype.type,
shape=data.shape)))
how_many_converted += 1
else:
output_node.CopyFrom(input_node)
output_graph_def.node.extend([output_node])
print("Converted %d variables to const ops." % how_many_converted)
return output_graph_def
|
py | 1a4d918d62281f72b7b02429ada5eb7601dc605b | # Copyright (c) 2016, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSHelperFn, AWSObject, AWSProperty, Tags
from .validators import boolean, integer, positive_integer
class SourceAuth(AWSProperty):
props = {
'Resource': (basestring, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'OAUTH'
]
auth_types = self.properties.get('Type')
if auth_types not in valid_types:
raise ValueError('SourceAuth Type: must be one of %s' %
','.join(valid_types))
class Artifacts(AWSProperty):
props = {
'EncryptionDisabled': (boolean, False),
'Location': (basestring, False),
'Name': (basestring, False),
'NamespaceType': (basestring, False),
'OverrideArtifactName': (boolean, False),
'Packaging': (basestring, False),
'Path': (basestring, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'CODEPIPELINE',
'NO_ARTIFACTS',
'S3',
]
artifact_type = self.properties.get('Type')
if artifact_type not in valid_types:
raise ValueError('Artifacts Type: must be one of %s' %
','.join(valid_types))
if artifact_type == 'S3':
for required_property in ['Name', 'Location']:
if not self.properties.get(required_property):
raise ValueError(
'Artifacts Type S3: requires %s to be set' %
required_property
)
class EnvironmentVariable(AWSProperty):
props = {
'Name': (basestring, True),
'Type': (basestring, False),
'Value': (basestring, True),
}
def validate(self):
if 'Type' in self.properties:
valid_types = [
'PARAMETER_STORE',
'PLAINTEXT',
]
env_type = self.properties.get('Type')
if env_type not in valid_types:
raise ValueError(
'EnvironmentVariable Type: must be one of %s' %
','.join(valid_types))
class Environment(AWSProperty):
props = {
'ComputeType': (basestring, True),
'EnvironmentVariables': ((list, [EnvironmentVariable]), False),
'Image': (basestring, True),
'PrivilegedMode': (boolean, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'LINUX_CONTAINER',
'WINDOWS_CONTAINER',
]
env_type = self.properties.get('Type')
if env_type not in valid_types:
raise ValueError('Environment Type: must be one of %s' %
','.join(valid_types))
class ProjectCache(AWSProperty):
props = {
'Location': (basestring, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'NO_CACHE',
'S3',
]
cache_type = self.properties.get('Type')
if cache_type not in valid_types:
raise ValueError('ProjectCache Type: must be one of %s' %
','.join(valid_types))
class Source(AWSProperty):
props = {
'Auth': (SourceAuth, False),
'BuildSpec': (basestring, False),
'GitCloneDepth': (positive_integer, False),
'InsecureSsl': (boolean, False),
'Location': (basestring, False),
'ReportBuildStatus': (boolean, False),
'Type': (basestring, True),
}
def validate(self):
valid_types = [
'BITBUCKET',
'CODECOMMIT',
'CODEPIPELINE',
'GITHUB',
'GITHUB_ENTERPRISE',
'NO_SOURCE',
'S3',
]
location_agnostic_types = [
'CODEPIPELINE',
'NO_SOURCE',
]
source_type = self.properties.get('Type')
# Don't do additional checks if source_type can't
# be determined (for example, being a Ref).
if isinstance(source_type, AWSHelperFn):
return
if source_type not in valid_types:
raise ValueError('Source Type: must be one of %s' %
','.join(valid_types))
location = self.properties.get('Location')
if source_type not in location_agnostic_types and not location:
raise ValueError(
'Source Location: must be defined when type is %s' %
source_type
)
auth = self.properties.get('Auth')
if auth is not None and source_type is not 'GITHUB':
raise ValueError("SourceAuth: must only be defined when using "
"'GITHUB' Source Type.")
class VpcConfig(AWSProperty):
props = {
'SecurityGroupIds': ([basestring], True),
'Subnets': ([basestring], True),
'VpcId': (basestring, True),
}
class ProjectTriggers(AWSProperty):
props = {
'Webhook': (boolean, False),
}
def validate_status(status):
""" Validate status
:param status: The Status of CloudWatchLogs or S3Logs
:return: The provided value if valid
"""
valid_statuses = [
'ENABLED',
'DISABLED'
]
if status not in valid_statuses:
raise ValueError('Status: must be one of %s' %
','.join(valid_statuses))
return status
class CloudWatchLogs(AWSProperty):
props = {
"Status": (validate_status, True),
"GroupName": (basestring, False),
"StreamName": (basestring, False)
}
class S3Logs(AWSProperty):
props = {
"Status": (validate_status, True),
"Location": (basestring, False)
}
class LogsConfig(AWSProperty):
props = {
'CloudWatchLogs': (CloudWatchLogs, False),
'S3Logs': (S3Logs, False)
}
class Project(AWSObject):
resource_type = "AWS::CodeBuild::Project"
props = {
'Artifacts': (Artifacts, True),
'BadgeEnabled': (boolean, False),
'Cache': (ProjectCache, False),
'Description': (basestring, False),
'EncryptionKey': (basestring, False),
'Environment': (Environment, True),
"LogsConfig": (LogsConfig, False),
'Name': (basestring, True),
'SecondaryArtifacts': ([Artifacts], False),
'SecondarySources': ([Source], False),
'ServiceRole': (basestring, True),
'Source': (Source, True),
'Tags': (Tags, False),
'TimeoutInMinutes': (integer, False),
'Triggers': (ProjectTriggers, False),
'VpcConfig': (VpcConfig, False),
}
|
py | 1a4d942d33ff0fb6c39f2b92c1eb335dec9b4103 | import sys
sys.path.append('.')
from util.game import Game
from util.func import Case
from util.card import Card, CardList, CardSuit
from util.player import Player
from typing import List, Optional, Tuple
import random
class LevelUp(Game):
### Constants
PLAYERNUM: int = 4
CARDPOOL: List[Card] = [Card(i) for i in range(54)] * 2
BASESCORE: int = 80
LEVELSCORE: int = 40
def __init__(self):
self.players: List[Optional[Player]] = [None] * LevelUp.PLAYERNUM
self.discard_buffer: Optional[CardList] = None
self.curPlayerIndex: Optional[int] = None
self.rankLevel: Tuple[int, int] = (1, 1)
self.dealerIndex: Optional[int] = None
self.rankMain: int = 1
# E.g. (Spade, 0, 1) means the main suit is spade (suited with a single spade by 0-th player)
self.suitMain: Optional[Tuple(CardSuit, int, int)] = None
self.score: int = 0
self.state: str = 'END'
for i in range(len(self.players)):
self.players[i] = Player()
def inform(self, information):
case = Case(self.state)
if case('END'):
case = Case(information)
if case('START'):
self.state = 'DISPATCH'
return (True, self._dispatch(),
{
'suit': self.suitMain,
'rank': self.rankMain,
'level': self.rankLevel
}
)
if case('DISPATCH'):
case = Case(information)
if case('FINISH'):
self.state = 'DISCARD'
if self.dealerIndex is None:
self.dealerIndex = self.suitMain[1]
self.curPlayerIndex = self.dealerIndex
self.players[self.curPlayerIndex].cardInHand += self.discard_buffer
self.discard_buffer = CardList()
for player in self.players:
player.cardFront = CardList()
return (True, None, None)
return (False, None, None)
def _dispatch(self) -> List[int]:
newCardPool: List[Card] = random.sample(LevelUp.CARDPOOL, len(LevelUp.CARDPOOL))
dispatch = [newCardPool[0:25], newCardPool[25:50], newCardPool[50:75], newCardPool[75:100], newCardPool[100:]]
for id, player in enumerate(self.players):
player.cardInHand = CardList(dispatch[id])
self.discard_buffer = CardList(dispatch[-1])
return [[card.ID for card in cards] for cards in dispatch]
def isSuitable(self, cards: List[int], playerID: int, suit: Optional[CardSuit] = None):
#suit: 0 NT, 1 Spade, 2 Heart, 3 Club, 4 Diamond
if suit is None:
return [self.isSuitable(cards, playerID, s) for s in [
CardSuit.Joker, CardSuit.Spade, CardSuit.Heart, CardSuit.Club, CardSuit.Diamond
]]
cardnum = -1
case = Case(suit)
if case(CardSuit.Spade): cardnum = 39 + self.rankMain
elif case(CardSuit.Heart): cardnum = 26 + self.rankMain
elif case(CardSuit.Club): cardnum = 13 + self.rankMain
elif case(CardSuit.Diamond): cardnum = self.rankMain
if self.suitMain is None:
if suit == CardSuit.Joker:
return cards.count(52) == 2 or cards.count(53) == 2
else:
return cardnum in cards
elif self.suitMain[1] == playerID:
if self.suitMain[2] == 2: return False
if suit != self.suitMain[0]: return False
return cards.count(cardnum) == 2
else:
if suit == CardSuit.Joker:
if self.suitMain[0] == CardSuit.Joker:
return cards.count(53) == 2
else:
return cards.count(53) == 2 or cards.count(52) == 2
if self.suitMain[2] == 2: return False
return cards.count(cardnum) == 2
def suitRequest(self, playerID: int, suit: CardSuit):
cards = self.players[playerID].cardInHand.tolist()
if not self.isSuitable(cards, playerID, suit):
return False
for player in self.players:
player.cardFront = CardList()
cardnum = -1
case = Case(suit)
if case(CardSuit.Spade): cardnum = 39 + self.rankMain
elif case(CardSuit.Heart): cardnum = 26 + self.rankMain
elif case(CardSuit.Club): cardnum = 13 + self.rankMain
elif case(CardSuit.Diamond): cardnum = self.rankMain
if suit == CardSuit.Joker:
if cards.count(52) == 2:
self.suitMain = (CardSuit.Joker, playerID, 2)
self.players[playerID].cardFront += CardList([Card(52), Card(52)])
else:
self.suitMain = (CardSuit.Joker, playerID, 2)
self.players[playerID].cardFront += CardList([Card(53), Card(53)])
else:
if self.suitMain is None:
self.suitMain = (suit, playerID, 1)
self.players[playerID].cardFront += Card(cardnum)
else:
self.suitMain = (suit, playerID, 2)
self.players[playerID].cardFront += CardList([Card(cardnum), Card(cardnum)])
front = [player.cardFront.tolist() for player in self.players]
return [front, self.suitMain]
|
py | 1a4d94e0bc66450cc63ef04da76228e286187ea2 | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
""" Read data from the magnetometer and print it out, ASAP! """
import board
import adafruit_lsm303dlh_mag
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_lsm303dlh_mag.LSM303DLH_Mag(i2c)
while True:
mag_x, mag_y, mag_z = sensor.magnetic
print("{0:10.3f} {1:10.3f} {2:10.3f}".format(mag_x, mag_y, mag_z))
|
py | 1a4d95e597de30775fd763bbd48e097634f1dace | '''
File: pathtracker.py
Path tracking simulation with Stanley steering control and PID speed control.
author: Atsushi Sakai (@Atsushi_twi)
Ref:
- [Stanley: The robot that won the DARPA grand challenge](http://isl.ecst.csuchico.edu/DOCS/darpa2005/DARPA%202005%20Stanley.pdf)
- [Autonomous Automobile Path Tracking](https://www.ri.cmu.edu/pub_files/2009/2/Automatic_Steering_Methods_for_Autonomous_Automobile_Path_Tracking.pdf)
'''
import numpy as np
import math
class StanleyController(object):
MIN_THROTTLE = 0.0
MAX_THROTTLE = 1.0
def __init__(self, cfg):
self.cfg = cfg
self.k = 0.5 # control gain
self.Kp = cfg.KP # speed proportional gain
self.Kd = cfg.KD # speed diferential gain
self.Kta = 0.5 # accel to throttle ratio
self.maxaccel = cfg.MAX_ACCEL
self.L = 30 # [m] Wheel base of vehicle
self.x = 0.
self.y = 0.
self.camx = 105.
self.camy = 400.
self.yaw = -math.pi # Current yaw (birdseye frame)
self.v = 0.
self.throttle = 0. # current throttle setting
self.img_count = 0
def constant_speed_control(self,v_target,v_current,throttle):
"""
Proportional control for the speed.
:param target v: (float)
:param current v: (float)
:param previous v: (float)
:return target change in accel: (float)
"""
v_correction = self.Kp * (v_target - v_current)
current_accel = v_current - self.v
accel_delta = v_correction - current_accel
if accel_delta > self.maxaccel:
accel_delta = self.maxaccel
if accel_delta < -self.maxaccel:
accel_delta = -self.maxaccel
throttle = throttle + (accel_delta * self.Kta)
if throttle < self.MIN_THROTTLE:
throttle = self.MIN_THROTTLE
if throttle > self.MAX_THROTTLE:
throttle = self.MAX_THROTTLE
return throttle
def stanley_control(self, cx, cy, cyaw, v, last_target_idx):
"""
Stanley steering control.
:param state: (State object)
:param cx: ([float])
:param cy: ([float])
:param cyaw: ([float])
:param last_target_idx: (int)
:return: (float, int)
"""
current_target_idx, error_front_axle = self.calc_target_index(cx, cy)
if last_target_idx >= current_target_idx:
current_target_idx = last_target_idx
# theta_e corrects the heading error
theta_e = cyaw[current_target_idx] - self.yaw #self.normalize_angle(cyaw[current_target_idx] - self.yaw)
# theta_d corrects the cross track error
# theta_d = np.arctan2(self.k * error_front_axle, v)
# Steering control
delta = theta_e # + theta_d
return delta, current_target_idx
def normalize_angle(self,angle):
"""
Normalize an angle to [-pi, pi].
:param angle: (float)
:return: (float) Angle in radian in [-pi, pi]
"""
while angle > np.pi:
angle -= 2.0 * np.pi
while angle < -np.pi:
angle += 2.0 * np.pi
return angle
def calc_target_index(self, cx, cy):
"""
Compute index in the trajectory list of the target.
:param state: (State object)
:param cx: [float]
:param cy: [float]
:return: (int, float)
"""
# Calc front axle position
fx = self.camx + self.L * np.cos(self.yaw)
fy = self.camy + self.L * np.sin(self.yaw)
# Search nearest point index
dx = [fx - icx for icx in cx]
dy = [fy - icy for icy in cy]
d = np.hypot(dx, dy)
target_idx = np.argmin(d)
# Project RMS error onto front axle vector
front_axle_vec = [-np.cos(self.yaw + np.pi / 2),
-np.sin(self.yaw + np.pi / 2)]
error_front_axle = np.dot([dx[target_idx], dy[target_idx]], front_axle_vec)
return target_idx, error_front_axle
def run(self,img_count,x,y,yaw,velturn,velfwd,rax,ray,ryaw,speedprofile,runstate):
if img_count > self.img_count:
self.x = x
self.y = y
self.camx = 105
self.camy = 400
self.img_count = img_count
else:
dx = (x - self.x) * 100
dy = (y - self.y) * 100
self.camy = self.camy - (np.cos(yaw)*dy - np.sin(yaw)*dx) # rotate velocity by yaw angle to the camera frame
self.camx = self.camx + (np.sin(yaw)*dy + np.cos(yaw)*dx)
self.x = x
self.y = y
print(f'reuse situation {self.camx},{self.camy}')
v = np.abs(np.hypot(velfwd, velturn))
self.yaw = -np.pi/2 #np.arctan2(velfwd, velturn) - (np.pi / 2.)
if runstate == 'running':
target_idx, _ = self.calc_target_index(rax, ray)
target_speed = speedprofile[target_idx]
delta, target_idx = self.stanley_control(rax, ray, ryaw, v, target_idx)
# yaw_correction = delta - (np.arctan2(velfwd, velturn) + np.pi)
else: # if the car is not in a running state keep it stopped
target_speed = 0.0
delta = 0.0
# yaw_correction = 0.0
steering_angle = self.yaw + delta + np.pi
throttle = self.constant_speed_control(target_speed, v, self.throttle)
print(delta,steering_angle, v, target_speed, throttle, runstate)
self.throttle = throttle # for next time around
self.v = v
return self.camx,self.camy,delta,steering_angle,throttle
|
py | 1a4d96106593ec9c021fb3bdbdeb9742c1c9d15e | import os
import json
from tabulate import tabulate
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.layers import Dense, Dropout, Input, LSTM
from keras.constraints import maxnorm
from keras.callbacks import EarlyStopping, ModelCheckpoint
from stochnet.classes.TimeSeriesDataset import TimeSeriesDataset
from stochnet.classes.NeuralNetworks import StochNeuralNetwork
from stochnet.classes.TopLayers import MultivariateNormalCholeskyOutputLayer, MixtureOutputLayer
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
current = os.getcwd()
working_path = os.path.dirname(current)
basename = os.path.abspath(working_path)
dataset_address = os.path.join(basename, 'dataset/SIR_dataset_upgraded_2.npy')
test_dataset_address = os.path.join(basename, 'dataset/SIR_dataset_upgraded_3.npy')
data_labels = {'Timestamps': 0, 'Susceptible': 1, 'Infected': 2, 'Removed': 3}
dataset = TimeSeriesDataset(dataset_address, labels=data_labels)
test_dataset = TimeSeriesDataset(test_dataset_address, labels=data_labels)
nb_past_timesteps = 5
dataset.format_dataset_for_ML(nb_past_timesteps=nb_past_timesteps, must_be_rescaled=True, percentage_of_test_data=0)
test_dataset.format_dataset_for_ML(nb_past_timesteps=nb_past_timesteps, must_be_rescaled=True, percentage_of_test_data=0)
X_train = dataset.X_train
X_test = test_dataset.X_train
Y_train = dataset.y_train
Y_test = test_dataset.y_train
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
input_tensor = Input(shape=(5, 3))
hidden1 = LSTM({{choice([64, 128, 256, 512, 1024])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}),
recurrent_constraint=maxnorm({{uniform(1, 3)}}))(input_tensor)
dropout1 = Dropout({{uniform(0.2, 0.7)}})(hidden1)
NN_body = Dense({{choice([64, 128, 256, 512, 1024])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}))(dropout1)
dropout2 = Dropout({{uniform(0.2, 0.7)}})(NN_body)
NN_body = Dense({{choice([64, 128, 256, 512, 1024])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}))(dropout2)
number_of_components = 2
components = []
for j in range(number_of_components):
components.append(MultivariateNormalCholeskyOutputLayer(3))
TopModel_obj = MixtureOutputLayer(components)
NN = StochNeuralNetwork(input_tensor, NN_body, TopModel_obj)
callbacks = []
callbacks.append(EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='min'))
result = NN.fit(X_train, Y_train,
batch_size={{choice([512, 1024, 2048, 3072, 4096])}},
epochs={{choice([10, 15, 20, 40])}},
verbose=2,
callbacks=callbacks,
validation_data=(X_test, Y_test))
parameters = space
val_loss = min(result.history['val_loss'])
parameters["val_loss"] = val_loss
print('Validation loss: {0}'.format(val_loss))
if 'results' not in globals():
global results
results = []
results.append(parameters)
print(tabulate(results, headers="keys", tablefmt="fancy_grid", floatfmt=".8f"))
with open('/home/lpalmier/workspace/output/SIR/SIR_model_tuning_MNC_01.json', 'w') as f:
f.write(json.dumps(results))
return {'loss': val_loss, 'status': STATUS_OK, 'model': NN.model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=20,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
|
py | 1a4d96666ee1cb22df9e586777ea3e8a6617c15e | """
scada.py
"""
from minicps.devices import SCADAServer
from utils import SCADA_PROTOCOL, STATE
from utils import SCADA_PERIOD_SEC
from utils import IP
from utils import CO_0_2a, CO_1_2a, CO_2_2a, CO_3_2a
from utils import HR_0_2a
import time
RTU2A_ADDR = IP['rtu2a'] + ':502'
RTU2B_ADDR = IP['rtu2b'] + ':502'
SCADA_ADDR = IP['scada'] + ':502'
class SCADAServer(SCADAServer):
def pre_loop(self, sleep=0.5):
"""scada pre loop.
- sleep
"""
time.sleep(sleep)
def main_loop(self):
"""scada main loop.
For each RTU in the network
- Read the pump status
"""
while(True):
#co_00_2a = self.receive(CO_0_2a, RTU2A_ADDR)
co_00_2a = self.receive(CO_0_2a, SCADA_ADDR)
# NOTE: used for testing first challenge
#print('DEBUG scada from rtu2a: CO 0-0 2a: {}'.format(co_00_2a))
# NOTE: used for testing second challenge
# NOTE: comment out
# hr_03_2a = self.receive(HR_0_2a, RTU2B_ADDR, count=3)
# print('DEBUG scada from rtu2b: HR 0-2 2a: {}'.format(hr_03_2a))
# print("DEBUG: scada main loop")
time.sleep(SCADA_PERIOD_SEC)
if __name__ == "__main__":
scada = SCADAServer(
name='scada',
state=STATE,
protocol=SCADA_PROTOCOL)
|
py | 1a4d9680c1e0c1d96d3a7eff77637af4d4db0c20 | """Support for deCONZ binary sensors."""
from pydeconz.sensor import Presence, Vibration
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import ATTR_TEMPERATURE
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import ATTR_DARK, ATTR_ON, NEW_SENSOR
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry, DeconzEntityHandler
ATTR_ORIENTATION = "orientation"
ATTR_TILTANGLE = "tiltangle"
ATTR_VIBRATIONSTRENGTH = "vibrationstrength"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up deCONZ platforms."""
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ binary sensor."""
gateway = get_gateway_from_config_entry(hass, config_entry)
entity_handler = DeconzEntityHandler(gateway)
@callback
def async_add_sensor(sensors, new=True):
"""Add binary sensor from deCONZ."""
entities = []
for sensor in sensors:
if new and sensor.BINARY:
new_sensor = DeconzBinarySensor(sensor, gateway)
entity_handler.add_entity(new_sensor)
entities.append(new_sensor)
async_add_entities(entities, True)
gateway.listeners.append(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_SENSOR), async_add_sensor
)
)
async_add_sensor(gateway.api.sensors.values())
class DeconzBinarySensor(DeconzDevice, BinarySensorDevice):
"""Representation of a deCONZ binary sensor."""
@callback
def async_update_callback(self, force_update=False):
"""Update the sensor's state."""
changed = set(self._device.changed_keys)
keys = {"on", "reachable", "state"}
if force_update or any(key in changed for key in keys):
self.async_schedule_update_ha_state()
@property
def is_on(self):
"""Return true if sensor is on."""
return self._device.is_tripped
@property
def device_class(self):
"""Return the class of the sensor."""
return self._device.SENSOR_CLASS
@property
def icon(self):
"""Return the icon to use in the frontend."""
return self._device.SENSOR_ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attr = {}
if self._device.on is not None:
attr[ATTR_ON] = self._device.on
if self._device.secondary_temperature is not None:
attr[ATTR_TEMPERATURE] = self._device.secondary_temperature
if self._device.type in Presence.ZHATYPE and self._device.dark is not None:
attr[ATTR_DARK] = self._device.dark
elif self._device.type in Vibration.ZHATYPE:
attr[ATTR_ORIENTATION] = self._device.orientation
attr[ATTR_TILTANGLE] = self._device.tiltangle
attr[ATTR_VIBRATIONSTRENGTH] = self._device.vibrationstrength
return attr
|
py | 1a4d9928d7a778935c2a41d9b12c296e0ad11675 | #!/usr/bin/env python
"""settings.py
Udacity conference server-side Python App Engine app user settings
$Id$
created/forked from conference.py by wesc on 2014 may 24
"""
# Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = '1009053430959-tdqqi86iai9gdqlods5m7mpoo1it0b0q.apps.googleusercontent.com'
ANDROID_CLIENT_ID = 'replace with Android client ID'
IOS_CLIENT_ID = 'replace with iOS client ID'
ANDROID_AUDIENCE = WEB_CLIENT_ID
|
py | 1a4d9a51c71d754c041de47df28d60e91277ec47 | import subprocess
import logging
import sys
from contextlib import contextmanager
@contextmanager
def maybe_open1(out):
if isinstance(out, str):
with open(out, "ab") as f:
yield f
else:
yield out
@contextmanager
def maybe_open2(stdout, stderr):
with maybe_open1(stdout) as fout:
if isinstance(stderr, str):
if stderr == stdout:
yield fout, fout
else:
with open(stderr, "ab") as ferr:
yield fout, ferr
else:
yield fout, stderr
class Make:
def __init__(self, root_dir, args=[], stdout=None, stderr=None, verbose=False):
self._root_dir = root_dir
self._args = ["make"] + args
if not verbose:
self._args += ["-s", "--no-print-directory"]
self._proc_stdout = stdout
self._proc_stderr = stderr
def check_call(self, args):
args = self._args + args
logging.debug(f"Execute {args} in {self._root_dir}, stdout={self._proc_stdout}, stderr={self._proc_stderr}")
with maybe_open2(self._proc_stdout, self._proc_stderr) as (stdout, stderr):
subprocess.check_call(args,
cwd=self._root_dir,
stdout=stdout,
stderr=stderr
)
def check_output(self, args):
args = self._args + args
logging.debug(f"Execute {args} in {self._root_dir} ...")
with maybe_open1(self._proc_stderr) as stderr:
output = subprocess.check_output(args,
cwd=self._root_dir,
stderr=stderr
)
logging.debug(f"Output of {args} command: {output}")
return output
def get_output_lines(self, args):
out = self.check_output(args)
return [l.strip() for l in out.decode("utf-8").split("\n")]
|
py | 1a4d9b1712540c72f46246b14a64ccbcef1f8511 | import numpy as np
def calc_jacobian(frames: list, transformations: dict, jsize: int) -> np.array:
"""
Args:
frames (list): frames to compute jacobian
transformations (dict): transformations from forward kinematics
thetas (int): size of joint space
Returns:
Jacobian (np.array(6, jsize)): return Jacobian
"""
target_position = list(transformations.values())[-1].pos
J = np.zeros((6, jsize))
n = 0
for frame in frames:
if frame.joint.dtype == "revolute":
n += 1
w = np.dot(transformations[frame.link.name].h_mat[:3, :3], frame.joint.axis)
v = np.cross(w, target_position - transformations[frame.link.name].pos)
J[:, n - 1] = np.hstack((v, w))
elif frame.joint.dtype == "prismatic":
n += 1
w = np.zeros(3)
v = np.dot(transformations[frame.link.name].h_mat[:3, :3], frame.joint.axis)
J[:, n - 1] = np.hstack((v, w))
return J
|
py | 1a4d9bf30e955f4a79a528998995b6ba400e3b75 | import os
os.add_dll_directory(r'C:\Program Files\VideoLAN\VLC')
import pafy
import vlc
def play_music1():
url = "https://youtu.be/St-H0-xc-sc?list=LL"
video = pafy.new(url)
best = video.getbest()
playurl = best.url
Instance = vlc.Instance("--vout=dummy")
player = Instance.media_player_new()
Media = Instance.media_new(playurl)
Media.get_mrl()
player.set_media(Media)
player.audio_set_volume(50)
player.play()
while True:
pass |
py | 1a4d9f7d39b40685c71b0307ff8e71da100a7998 | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
from PIL import Image
import cv2
import numpy as np
from ..adapters import Adapter
from ..representation import ImageProcessingPrediction, SuperResolutionPrediction, ContainerPrediction
from ..config import ConfigValidator, BoolField, StringField, DictField, NormalizationArgsField
from ..preprocessor import Normalize
class ImageProcessingAdapter(Adapter):
__provider__ = 'image_processing'
prediction_types = (ImageProcessingPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'reverse_channels': BoolField(
optional=True, default=False, description="Allow switching output image channels e.g. RGB to BGR"
),
'mean': NormalizationArgsField(
optional=True, default=0,
description='The value which should be added to prediction pixels for scaling to range [0, 255]'
'(usually it is the same mean value which subtracted in preprocessing step))',
precomputed_args=Normalize.PRECOMPUTED_MEANS
),
'std': NormalizationArgsField(
optional=True, default=255,
description='The value on which prediction pixels should be multiplied for scaling to range '
'[0, 255] (usually it is the same scale (std) used in preprocessing step))',
precomputed_args=Normalize.PRECOMPUTED_STDS,
allow_zeros=False
),
'target_out': StringField(optional=True, description='Target super resolution model output'),
"cast_to_uint8": BoolField(
optional=True, default=True, description="Cast prediction values to integer within [0, 255] range"
)
})
return parameters
@classmethod
def validate_config(cls, config, fetch_only=False, **kwargs):
return super().validate_config(
config, fetch_only=fetch_only, on_extra_argument=ConfigValidator.IGNORE_ON_EXTRA_ARGUMENT
)
def configure(self):
self.reverse_channels = self.get_value_from_config('reverse_channels')
self.mean = self.get_value_from_config('mean')
self.std = self.get_value_from_config('std')
self.target_out = self.get_value_from_config('target_out')
self.cast_to_uint8 = self.get_value_from_config('cast_to_uint8')
self.output_verified = False
def select_output_blob(self, outputs):
self.output_verified = True
if not self.target_out:
super().select_output_blob(outputs)
self.target_out = self.output_blob
return
self.target_out = self.check_output_name(self.target_out, outputs)
return
def process(self, raw, identifiers, frame_meta):
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.output_verified:
self.select_output_blob(raw_outputs)
for identifier, out_img in zip(identifiers, raw_outputs[self.target_out]):
out_img = self._basic_postprocess(out_img)
result.append(ImageProcessingPrediction(identifier, out_img))
return result
def _basic_postprocess(self, img):
img = img.transpose((1, 2, 0)) if img.shape[-1] > 4 else img
img *= self.std
img += self.mean
if self.cast_to_uint8:
img = np.clip(img, 0., 255.)
img = img.astype(np.uint8)
if self.reverse_channels:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img, 'RGB') if Image is not None else img
img = np.array(img).astype(np.uint8)
return img
class SuperResolutionAdapter(ImageProcessingAdapter):
__provider__ = 'super_resolution'
prediction_types = (SuperResolutionPrediction, )
def process(self, raw, identifiers=None, frame_meta=None):
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.output_verified:
self.select_output_blob(raw_outputs)
for identifier, img_sr in zip(identifiers, raw_outputs[self.target_out]):
img_sr = self._basic_postprocess(img_sr)
result.append(SuperResolutionPrediction(identifier, img_sr))
return result
class MultiSuperResolutionAdapter(Adapter):
__provider__ = 'multi_super_resolution'
prediction_types = (SuperResolutionPrediction, )
@property
def additional_output_mapping(self):
return getattr(self, '_additional_output_mapping', None)
@additional_output_mapping.setter
def additional_output_mapping(self, value):
self._additional_output_mapping = value
if hasattr(self, '_per_target_adapters'):
for adapter in self._per_target_adapters.values():
adapter.additional_output_mapping = value
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'reverse_channels': BoolField(
optional=True, default=False, description="Allow switching output image channels e.g. RGB to BGR"
),
'mean': NormalizationArgsField(
optional=True, default=0,
description='The value which should be added to prediction pixels for scaling to range [0, 255]'
'(usually it is the same mean value which subtracted in preprocessing step))',
precomputed_args=Normalize.PRECOMPUTED_MEANS
),
'std': NormalizationArgsField(
optional=True, default=255,
description='The value on which prediction pixels should be multiplied for scaling to range '
'[0, 255] (usually it is the same scale (std) used in preprocessing step))',
precomputed_args=Normalize.PRECOMPUTED_STDS,
allow_zeros=False
),
"cast_to_uint8": BoolField(
optional=True, default=True, description="Cast prediction values to integer within [0, 255] range"
),
'target_mapping': DictField(allow_empty=False, key_type=str, value_type=str)
})
return parameters
@classmethod
def validate_config(cls, config, fetch_only=False, **kwargs):
return super().validate_config(
config, fetch_only=fetch_only, on_extra_argument=ConfigValidator.IGNORE_ON_EXTRA_ARGUMENT
)
def configure(self):
self.target_mapping = self.get_value_from_config('target_mapping')
common_adapter_config = deepcopy(self.launcher_config)
self._per_target_adapters = {}
for key, output_name in self.target_mapping.items():
adapter_config = deepcopy(common_adapter_config)
adapter_config['target_out'] = output_name
self._per_target_adapters[key] = SuperResolutionAdapter(
adapter_config,
additional_output_mapping=self.additional_output_mapping
)
def process(self, raw, identifiers=None, frame_meta=None):
predictions = [{}] * len(identifiers)
for key, adapter in self._per_target_adapters.items():
result = adapter.process(raw, identifiers, frame_meta)
for batch_id, output_res in enumerate(result):
predictions[batch_id][key] = output_res
results = [ContainerPrediction(prediction_mapping) for prediction_mapping in predictions]
return results
class SuperResolutionYUV(Adapter):
__provider__ = 'super_resolution_yuv'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'y_output': StringField(),
'u_output': StringField(),
'v_output': StringField(),
'target_color': StringField(optional=True, choices=['bgr', 'rgb'], default='bgr')
})
return parameters
def configure(self):
self.y_output = self.get_value_from_config('y_output')
self.u_output = self.get_value_from_config('u_output')
self.v_output = self.get_value_from_config('v_output')
self.color = cv2.COLOR_YUV2BGR if self.get_value_from_config('target_color') == 'bgr' else cv2.COLOR_YUV2RGB
def get_image(self, y, u, v):
is_hwc = u.shape[-1] == 1
if not is_hwc:
y = np.transpose(y, (1, 2, 0))
u = np.transpose(u, (1, 2, 0))
v = np.transpose(v, (1, 2, 0))
h, w, __ = u.shape
u = u.reshape(h, w, 1)
v = v.reshape(h, w, 1)
u = cv2.resize(u, None, fx=2, fy=2)
v = cv2.resize(v, None, fx=2, fy=2)
y = y.reshape(2 * h, 2 * w, 1)
u = u.reshape(2 * h, 2 * w, 1)
v = v.reshape(2 * h, 2 * w, 1)
yuv = np.concatenate([y, u, v], axis=2)
image = cv2.cvtColor(yuv, self.color)
return image
def process(self, raw, identifiers=None, frame_meta=None):
outs = self._extract_predictions(raw, frame_meta)
results = []
for identifier, yres, ures, vres in zip(
identifiers, outs[self.y_output], outs[self.u_output], outs[self.v_output]
):
sr_img = self.get_image(yres, ures, vres)
results.append(SuperResolutionPrediction(identifier, sr_img))
return results
class TrimapAdapter(ImageProcessingAdapter):
__provider__ = 'trimap'
prediction_types = (ImageProcessingPrediction, )
def process(self, raw, identifiers, frame_meta):
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.output_verified:
self.select_output_blob(raw_outputs)
for identifier, out_img, out_meta in zip(identifiers, raw_outputs[self.target_out], frame_meta):
tmap = np.expand_dims(out_meta['tmap'], axis=0)
C, _, W = out_img.shape
if C > 1 and W == 1:
out_img = np.transpose(out_img, [2, 0, 1])
out_img[tmap == 2] = 1
out_img[tmap == 0] = 0
out_img = self._basic_postprocess(out_img)
result.append(ImageProcessingPrediction(identifier, out_img))
return result
|
py | 1a4da06669fccb0b0d0780c40baac77e4b6b0892 | from corelib import *
import MotifParser as MP
from pkg_resources import resource_filename
from fastafrombed import *
from motif_clustering import *
import json #make the string to list
import os
import math
#Update: make the motif detecting part availiable in other genome assembily (not limited to hg19 and mm9).
class Motif_Scan:
'''this module has 2 parts, one is sequence preparation, the other is the motif scan by mis'''
def __init__(self, genepeaks, peaksoutofDistance, options, selected, nontarget):
self.genepeaks = genepeaks
self.distance = options.distance
self.curdir = os.getcwd()
self.outdir = options.output
self.name = options.name
self.motifnumber = options.motifnumber
self.nontarget = nontarget
self.peaksoutofDistance = peaksoutofDistance
if options.genome and options.genome.startswith('hg'):
self.Species = 'Homo sapiens'
elif options.genome and options.genome.startswith('mm'):
self.Species = 'Mus musculus'
else:
self.Species = "NA"
self.genefiles = []
self.selected = selected
self.associate_peaks = []
for i in self.selected:
if i == '"upregulate"':
self.genefiles.append(os.path.join(self.outdir,self.name + '_uptarget.txt'))
self.associate_peaks.append(self.name + '_uptarget_associate_peaks.bed')
if i == '"downregulate"':
self.genefiles.append(os.path.join(self.outdir,self.name + '_downtarget.txt'))
self.associate_peaks.append(self.name + '_downtarget_associate_peaks.bed')
self.genomesequence = options.genomesequence
############################
#part1 Sequence preparation
############################
def get_gene_list(self):
files = self.genefiles
self.genelists = []#uptarget and downtarget gene lists
self.motifgenelists = []#Only the significant target genes associated peaks join to do the motif analysis.
for f in files:
genelist = [] #selected target genes
with open (f) as inf:
infile = inf.readlines()
totalnumber = len(infile)
if totalnumber <= 500:
top = totalnumber
elif totalnumber > 500 and totalnumber <= 1000:
top = 250 + 0.5*totalnumber
else:
top = 0.5 * totalnumber
for line in infile:
if line.startswith('Chroms'):
pass
else:
line = line.strip()
line = line.split('\t')
gene = [line[0],line[3],line[1],line[2],line[5],line[6]]#chrom,refseqID, start, end, strands, symbol
genelist.append(tuple(gene))
motifgenelist = genelist[:int(top)]
self.motifgenelists.append(motifgenelist)
self.genelists.append(genelist)
def getpeaks(self):
'''get all peaks within the certain distance around this gene'''
Info("pick out the peaks %sbp around the selected genes"%str(self.distance))
score = lambda t: math.exp(-0.5-4*t)
peakdist = []
for i in range(len(self.genelists)):
dist = {}
associatepeakf = open(self.associate_peaks[i],'w')
associatepeakf.write('chrom\tpStart\tpEnd\tRefseq\tSymbol\tDistance\tScore\n')
for gene in self.genelists[i]:
if gene[4] == '+':
TSS = gene[2]
else:
TSS = gene[3]
try:
peaks = self.genepeaks[gene]
for peak in peaks:
d = (int(peak[1]) + int(peak[2]))/2 - int(TSS)
if d <= self.distance:
regscore = score(abs(float(d))/float(self.distance))
associatepeakf.write(str(peak[0]) + '\t' + str(peak[1]) + '\t' + str(peak[2]) + '\t' + gene[1] + '\t' + gene[5] + '\t' + str(d) + '\t' + str(regscore) + '\n')
if gene in self.motifgenelists[i]:
key = tuple(peak)
dist[key] = [gene,d]
else:
continue
except KeyError:
pass
associatepeakf.close()
run_cmd('mv %s %s'%(self.associate_peaks[i], self.outdir))
peakdist.append(dist)
Info("Finished: Find target gene associated peaks in %s"%self.outdir)
self.peaklists = []
if len(peakdist) == 2:
up = peakdist[0]
down = peakdist[1]
uppeaks = up.keys()
downpeaks = down.keys()
for peak in uppeaks:
if peak in downpeaks:
updist = up[peak][-1]
downdist = down[peak][-1]
if updist > downdist:
del up[peak]
elif downdist > updist:
del down[peak]
else:
del up[peak]
del down[peak]
else:
continue
self.peaklists.append(up.keys())
self.peaklists.append(down.keys())
elif len(peakdist) == 1:
self.peaklists.append(peakdist[0].keys())
self.peaklists.append(self.peaksoutofDistance)
def getsequence(self):
'''extend the peak summit to 3 regions, left(-300,-101), middle(-100,99), right(100,299), get
the fasta sequece by fastaFromBED--bedtools need to be installed'''
if self.Species == 'NA':
Info("Motif database is not provided for your species, Skip the motif part ;)")
Info("For motif analysis, set the -g with hg19, mm9, hg or mm an rerun your job ;)")
Info("BETA Finished, Find your result in %s"%self.outdir)
sys.exit(1)
flags = []
for flag in self.selected:
#'upregulate' or 'downregulate'
if flag == '"upregulate"':
flag = 'up'
flags.append(flag)
if flag == '"downregulate"':
flag = 'down'
flags.append(flag)
flags.append('non')
self.fastas = []
i = 0
for peaks in self.peaklists:#different gene types(up and down)
flag = flags[i]
sequences = []
temp = open('select_peak.bed','w')
for peak in peaks:#each gene has a lot of peaks within 100k
peak_center = (int(peak[1]) + int(peak[2]))/2 #peak center equals to peak summit if the input peak file is the peak summit file
temp.write('%s\t%s\t%s\n'%(peak[0],str(peak_center),str(peak_center + 1)))
temp.close()
location = 'middle'
commandline = "awk -v OFS='\t' '{print $1, $2-100, $3+99}' %s > %s"%('select_peak.bed', flag + '_' + location + '.bed')
run_cmd(commandline)
sequences.append(flag + '_' + location + '.bed')
location = 'left'
commandline = "awk -v OFS='\t' '{print $1, $2-300, $2-101}' %s > %s"%('select_peak.bed', flag + '_' + location + '.bed')
run_cmd(commandline)
sequences.append(flag + '_' + location + '.bed')
location = 'right'
commandline = "awk -v OFS='\t' '{print $1, $3+100, $3+299}' %s > %s"%('select_peak.bed', flag + '_' + location + '.bed')
run_cmd(commandline)
sequences.append(flag + '_' + location + '.bed')
Info("get three regions of every peak around the %s genes"%flag)
print sequences
for sequence in sequences:
outputfasta = sequence.replace('.bed','.fa')
self.fastas.append(outputfasta)
runfastafrombed(self.genomesequence, sequence, outputfasta)
run_cmd('rm %s'%sequence)
Info("get the fasta format sequence data of the three regions of %s"%flag)
i += 1
run_cmd('rm select_peak.bed')
######################################
#part2 Motif Scan and score caculate
######################################
def run_mis(self):
'''mis algorithm is refered of MOODS: Motif Occurrence Detection Suite, we write it into a c code to
improve the efficiency, before run mis, the make command is required'''
Info("run mis to do the known motif scan with cistrome motif database")
#misdir = resource_filename('BETA','mis')
#os.chdir(misdir)
#Usage: ./mis <in.seq> <in.db> <p-value> <motif-id> <output-prefix>
self.motifscore = []
for seq in self.fastas:
inseq = os.path.join(self.curdir,seq)
db = resource_filename('BETA','references/cistrome.db')
p_value = 0.001
motif_ID = 'all'
prefix = seq.replace('.fa','')
misout = os.path.join(self.curdir,prefix)
commandline = 'misp %s %s %s %s %s'%(inseq,db,p_value,motif_ID,misout)#run mis
run_cmd(commandline)
run_cmd('rm %s'%inseq)
scoref = prefix + '_all'
self.motifscore.append(scoref)
def statistical_test(self):
#we are intersted in the motif enriched in the up regulated genes
#down regulated genes and the up versus down genes
#curdir = os.getcwd()
Info("T statistic test performed here to do the significance testing of every motif")
groups = []
pairfns = []
pairs = []
for group in self.selected:
if group == '"upregulate"':
upgroup = [f for f in self.motifscore if f.startswith('up')]
groups.append(upgroup)
if group == '"downregulate"':
downgroup = [f for f in self.motifscore if f.startswith('down')]
groups.append(downgroup)
nongroup = [f for f in self.motifscore if f.startswith('non')]
groups.append(nongroup)
if len(groups) == 3:
#if this factor has both active and repressive funcation, we will scan the motif of up vs down.
pairfn = "upvsdown_motif.txt"
pairfns.append(pairfn)
pairf = open(pairfn,'w')
pairs.append(pairf)
pairf.write('MotifID\tSpecies\tSymbol\tDNA BindDom\\tPSSM\tTscore\tPvalue\n')
else:
pairfn = "upvsdown_motif.txt"
pairf = ''
middlescores = []
a = MP.MotifParser()
motifdb = resource_filename('BETA','references/cistrome.xml')
a.Parser(motifdb)
motifinfo = a.motifs
self.FNs = []#the motif output file
upnonf = ''
downnonf = ''
upnon = "up_non_motif.txt"
downnon = "down_non_motif.txt"
for group in groups:
motifscore = {}
if group[0].startswith('up'):
fn = "up_motif.txt"
outf = open(fn,'w')
upnonf = open(upnon,'w')
upnonf.write('MotifID\tSpecies\tSymbol\tDNA BindDom\tPSSM\tTscore\tPvalue\n')
if group[0].startswith('down'):
fn = "down_motif.txt"
outf = open(fn,'w')
downnonf = open(downnon,'w')
downnonf.write('MotifID\tSpecies\tSymbol\tDNA BindDom\tPSSM\tTscore\tPvalue\n')
if group[0].startswith('non'):
fn = "non_motif.txt"
outf = open(fn,'w')
outf.write('MotifID\tSpecies\tSymbol\tDNA BindDom\tPSSM\tTscore\tPvalue\n')
group.sort() #have the order like: left,middle,right
if len(group) != 3:
Info('MISP step wronging!')
sys.exit(1)
for f in group:
f = os.path.join(self.curdir, f)
with open(f) as scoref:
lines = scoref.readlines()
for line in lines:
if line.startswith('#'):
continue
elif line.startswith('@'):
#@ factor:EN0055
index = lines.index(line)
line = line.strip()
line = line.split('\t')
key = line[0].split(':')[1]
scoreline = lines[index + 2]
#score line looks like 0,0.20(102),0.23(29),0,0,3.34(-395),
scoreline = scoreline.strip()
scoreline = scoreline.split(',')[:-1]
value = []
for score in scoreline:
score = score.split('(')[0]
value.append(score)
try:
motifscore[key].append(value)
except:
motifscore[key] = [value]
else:
continue
run_cmd('rm %s'%f)
motifs = motifscore.keys()
mscore = []
for motif in motifs:
species = motifinfo[motif]['species']
if len(species) == 1 and species[0] != self.Species:
continue
else:
species = ', '.join(species) #list to string
scores = motifscore[motif]
leftscore = 'left <- c('
middlescore = 'middle <- c('
rightscore = 'right <- c('
string = [leftscore, middlescore,rightscore]
rtestf = open('temptest.r','w')
rtestf.write('options(warn=-1)\n')
for i in range(3):
for s in scores[i]:
string[i] += str(s)
string[i] += ','
string[i] = string[i].rstrip(',')
string[i] += ')'
rtestf.write(string[i] + '\n')
mscore.append(string[1])
rtestf.write('pvalue=1\n')
rtestf.write('summaryml = t.test(middle,left,alternative="greater")\n')
rtestf.write('mlpvalue = summaryml$p.value\n')
rtestf.write('mltscore = summaryml$statistic\n')
rtestf.write('summarymr= t.test(middle,right,alternative="greater")\n')
rtestf.write('mrpvalue= summarymr$p.value\n')
rtestf.write('mrtscore = summarymr$statistic\n')
rtestf.write('pvalue = max(mlpvalue, mrpvalue)\n')
rtestf.write('tscore = min(mltscore, mrtscore)\n')
rtestf.write('print(pvalue)\n')
rtestf.write('print(tscore)\n')
rtestf.close()
cmd = 'Rscript temptest.r'
#testinfo = os.popen(cmd).read().strip().split('\n')
if not os.popen(cmd).read():
pvalue = 'NaN'
tscore = 'NaN'
else:
testinfo = os.popen(cmd).read().strip().split('\n')
pvalue = testinfo[0].split()[1]
tscore = testinfo[-1].split()[-1]
symbol = ', '.join(motifinfo[motif]['symbol'])
#TFs = motifinfo[motif]['symbol']
DBD = ', '.join(motifinfo[motif]['dbd'])
description = ', '.join(motifinfo[motif]['description'])
synonym = ', '.join(motifinfo[motif]['synonym'])
pssm = str(motifinfo[motif]['pssm'])
if pvalue == 'NaN':
pvalue = 1000
if tscore == 'NaN':
tscore = 0
#print motif, species, symbol, DBD, description, synonym, pssm, tscore, pvalue
outf.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n'%(motif, species, symbol, DBD, pssm, str("%.2f"%float(tscore)), str("%.2e"%float(pvalue))))
outf.close()
FN = fn.split('.')[0].upper() + 'S.' + fn.split('.')[1] #the sorted motif file
run_cmd('sort -t "\t" -k7,7g %s > %s'%(fn,FN))
self.FNs.append(FN)
run_cmd('rm %s'%fn)
middlescores.append(mscore)
run_cmd('rm temptest.r')
pairfns = [upnon, downnon, pairfn]
pairs = [upnonf, downnonf, pairf]
k = 0
for f in pairs:
if f == '':
pass
else:
if f == upnonf:
middlescore1 = middlescores[0]
middlescore2 = middlescores[-1]
if f == downnonf:
middlescore1 = middlescores[-2]
middlescore2 = middlescores[-1]
if f == pairf:
middlescore1 = middlescores[0]
middlescore2 = middlescores[1]
for i in range(len(middlescores[0])):
motif = motifs[i]
species = motifinfo[motif]['species']
if len(species) == 1 and species[0] != self.Species:
pass
else:
species = ', '.join(species) #list to string
pairtest = open('pairtest.r','w')
pairtest.write('options(warn=-1)\n')
middlescore = middlescore1[i].replace('middle','middle1')
pairtest.write(middlescore + '\n')
middlescore = middlescore2[i].replace('middle','middle2')
pairtest.write(middlescore + '\n')
pairtest.write('summary = t.test(middle1,middle2)\n')
pairtest.write('pvalue = summary$p.value\n')
pairtest.write('tscore= summary$statistic\n')
pairtest.write('print(pvalue)\n')
pairtest.write('print(tscore)')
pairtest.close()
cmd = 'Rscript pairtest.r'
testinfo = os.popen(cmd).read().strip().split('\n')
if not os.popen(cmd).read():
pvalue = 'NaN'
tscore = 'NaN'
else:
testinfo = os.popen(cmd).read().strip().split('\n')
pvalue = testinfo[0].split()[1]
tscore = testinfo[-1].split()[-1]
if pvalue == 'NaN':
pvalue = 1000
if tscore == 'NaN':
tscore = 0
description = ', '.join(motifinfo[motifs[i]]['description'])
symbol = ', '.join(motifinfo[motifs[i]]['symbol'])
DBD = ', '.join(motifinfo[motifs[i]]['dbd'])
synonym = ', '.join(motifinfo[motifs[i]]['synonym'])
pssm = motifinfo[motifs[i]]['pssm']
f.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n'%(motifs[i], species, symbol, DBD, pssm, str("%.2f"%float(tscore)), str("%.2e"%float(pvalue))))
f.close()
fn = pairfns[k]
PAIRFN = fn.split('.')[0].upper() + 'S.' + fn.split('.')[1]
run_cmd('sort -t "\t" -k7,7g %s > %s'%(fn, PAIRFN))
self.FNs.append(PAIRFN)
run_cmd('rm %s'%fn)
run_cmd('rm pairtest.r')
k += 1
def out2html(self):
#write the top 10 motif into the html format
Info('motif result will be in html format')
FNs = self.FNs
template = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n\
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">\n\
<head>\n\
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\n\
<title>BETA Motif Part</title>\n \
<link rel="stylesheet" type="text/css" href="styles.css" />\n\
</head>\n\
<body>\n\
<div class="section" id="page"> <!-- Defining the #page section with the section tag -->\n\
<div class="header"> <!-- Defining the header section of the page with the appropriate tag -->\n\
<h1>BETA: Motif Analysis</h1>\n\
<h3>Motif Scan on the TF Target Genes</h3>\n\
</div>\n\
<div class="section" id="articles"> <!-- A new section with the articles -->\n\
<div class="line"></div> <!-- Dividing line -->\n\
<div class="article" id="article1"> <!-- The new article tag. The id is supplied so it can be scrolled into view. -->\n\
<h2>Part1: Up Target Genes</h2>\n\
<div class="line"></div>\n\
<div class="articleBody clear">\n\
<table style="text-align: left; width: 100%%" cellpadding="2" cellspacing="2" border="1" style="border: 1px solid #000;" >\n\
%s\n\
%s\n\
</table>\n\
</div>\n\
</div>\n\
<div class="line"></div>\n\
<div class="article" id="article2">\n\
<h2>Part2: Down Target Genes</h2>\n\
<div class="line"></div>\n\
<div class="articleBody clear">\n\
<table style="text-align: left; width: 100%%" cellpadding="2" cellspacing="2" border="1" style="border: 1px solid #000;" >\n\
%s\n\
%s\n\
</table>\n\
</div>\n\
</div>\n\
<div class="line"></div>\n\
<div class="article" id="article3">\n\
<h2>Part3: UP vs DOWN Motif Scan</h2>\n\
<div class="line"></div>\n\
<div class="articleBody clear">\n\
<table style="text-align: left; width: 100%%" cellpadding="2" cellspacing="2" border="1" style="border: 1px solid #000;" >\n\
%s\n\
%s\n\
</table>\n\
</div>\n\
</div>\n\
<div class="line"></div> <!-- Dividing line -->\n\
<div class="article" id="article1"> <!-- The new article tag. The id is supplied so it can be scrolled into view. -->\n\
<h2>Part4: Up VS Non Target Motif</h2>\n\
<div class="line"></div>\n\
<div class="articleBody clear">\n\
<table style="text-align: left; width: 100%%" cellpadding="2" cellspacing="2" border="1" style="border: 1px solid #000;" >\n\
%s\n\
%s\n\
</table>\n\
</div>\n\
</div>\n\
<div class="line"></div> <!-- Dividing line -->\n\
<div class="article" id="article1"> <!-- The new article tag. The id is supplied so it can be scrolled into view. -->\n\
<h2>Part5: Down VS Non Target Motif</h2>\n\
<div class="line"></div>\n\
<div class="articleBody clear">\n\
<table style="text-align: left; width: 100%%" cellpadding="2" cellspacing="2" border="1" style="border: 1px solid #000;" >\n\
%s\n\
%s\n\
</table>\n\
</div>\n\
</div>\n\
</div>\n\
<div class="footer"> <!-- Marking the footer section -->\n\
<div class="line"></div>\n\
<p>BETA: Binding and Expression Target Analysis</p> <!-- Change the copyright notice -->\n\
<a href="#" class="up">Go UP</a>\n\
</div>\n\
</div> <!-- Closing the #page section -->\n\
<script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js"></script>\n\
<script type="text/javascript" src="jquery.scrollTo-1.4.2/jquery.scrollTo-min.js"></script>\n\
<script type="text/javascript" src="script.js"></script>\n\
</body>\n\
</html>'
outhtml = open('betamotif.html','w')
resultdir = os.path.join(self.outdir,'motifresult')
if os.path.isdir(resultdir):
run_cmd('rm -r %s'%resultdir)
run_cmd('mkdir %s'%resultdir)
else:
run_cmd('mkdir %s'%resultdir)
imgdir = os.path.join(resultdir,'img')
if os.path.isdir(imgdir):
run_cmd('rm -r %s'%imgdir)
run_cmd('mkdir %s'%imgdir)
else:
run_cmd('mkdir %s'%imgdir)
tabletitle = '\n\
<tr>\n\
<th>\n\
<font size="4" color="yellow">Symbol</font>\n\
</th>\n\
<th>\n\
<font size="4" color="yellow">DNA BindDom</font>\n\
</th>\n\
<th>\n\
<font size="4" color="yellow">Species</font>\n\
</th>\n\
<th>\n\
<font size="4" color="yellow">Pvalue (T Test)</font>\n\
</th>\n\
<th>\n\
<font size="4" color="yellow">T Score</font>\n\
</th>\n\
<th>\n\
<font size="4" color="yellow">Logo</font>\n\
</th>\n\
</tr>'
nontable = '<p></p>'
subtemplate = '\n\
<tr>\n\
<td>\n\
%s\n\
</td>\n\
<td>\n\
%s\n\
</td>\n\
<td>\n\
<font size="4" >%s</font>\n\
</td>\n\
<td>\n\
<font size="4" >%s</font>\n\
</td>\n\
<td>\n\
<font size="4" >%s</font>\n\
</td>\n\
<td>\n\
<img width="500" height="220" border="0" align="left" alt="some_text" src=%s>\n\
</td>\n\
</tr>'
cluster = '\n <p><font size="4">%s</font></p>\n'
if 'UP_MOTIFS.txt' in FNs:
motif1 = 'UP_MOTIFS.txt'#upmotif
motif2 = 'UP_NON_MOTIFS.txt'#upvsnonmotif
mistable1 = 'upmis.txt'#up motif mis table
mistable2 = 'upnonmis.txt'#upvsnon mis table
motifs = [motif1,motif2]
mistables = [mistable1,mistable2]
subups = []
for q in range(2):
motif = motifs[q]
mistable = mistables[q]
subup = ''
output = runmotifcluster_side1(self.outdir, motif, mistable, self.motifnumber)
output.sort(key=lambda x:float(x[3]))
for motif_class in output:
factors = motif_class[0].split('; ')
factorinput = ''
for factor in factors:
factorinput += cluster%factor
dbdinput = ''
dbds = motif_class[1].split('; ')
for dbd in dbds:
dbdinput += cluster%dbd
species = motif_class[2]
pvalue = motif_class[3]
tscore = motif_class[4]
img = motif_class[-1]
run_cmd('cp %s %s'%(img, imgdir))
imgID = os.path.split(img)[1]
img = os.path.join('img',imgID)
temp = subtemplate%(factorinput,dbdinput,species,pvalue,tscore,img)
subup += temp
subups.append(subup)
tabletitleup = tabletitle
run_cmd('mv %s %s'%('UP_MOTIFS.txt', resultdir))
run_cmd('mv %s %s'%('UP_NON_MOTIFS.txt', resultdir))
run_cmd('mv %s %s'%(self.name + '_uptarget.txt', self.outdir))
else:
subups = [nontable,nontable]
tabletitleup = '<font size="5" color="yellow">This Factor has no Active Function to its target genes, Skipped this part Motif Scan</font>'
subcompare = nontable
tabletitlecompare = '<font size="5" color="yellow">This Factor has no Active Function to its target genes, Cannot do the comparision Motif Scan</font>'
if 'DOWN_MOTIFS.txt' in FNs:
motif1 = 'DOWN_MOTIFS.txt'#upmotif
motif2 = 'DOWN_NON_MOTIFS.txt'#upvsnonmotif
mistable1 = 'downmis.txt'#up motif mis table
mistable2 = 'downnonmis.txt'#upvsnon mis table
motifs = [motif1,motif2]
mistables = [mistable1,mistable2]
subdowns = []
for q in range(2):
subdown = ''
motif = motifs[q]
mistable = mistables[q]
output = runmotifcluster_side1(self.outdir, motif, mistable, self.motifnumber)
output.sort(key=lambda x:float(x[3]))
for motif_class in output:
factors = motif_class[0].split('; ')
factorinput = ''
for factor in factors:
factorinput += cluster%factor
dbdinput = ''
dbds = motif_class[1].split('; ')
for dbd in dbds:
dbdinput += cluster%dbd
species = motif_class[2]
pvalue = motif_class[3]
tscore = motif_class[4]
img = motif_class[-1]
run_cmd('cp %s %s'%(img, imgdir))
imgID = os.path.split(img)[1]
img = os.path.join('img',imgID)
temp = subtemplate%(factorinput,dbdinput,species,pvalue,tscore,img)
subdown += temp
subdowns.append(subdown)
tabletitledown = tabletitle
run_cmd('mv %s %s'%('DOWN_MOTIFS.txt', resultdir))
run_cmd('mv %s %s'%('DOWN_NON_MOTIFS.txt', resultdir))
run_cmd('mv %s %s'%(self.name + '_downtarget.txt', self.outdir))
else:
subdowns = [nontable,nontable]
tabletitledown = '<font size="5" color="yellow">This Factor has no Repressive Function to its target genes, Skipped this part Motif Scan</font>'
subcompare = nontable
tabletitlecompare = '<font size="5" color="yellow">This Factor has no Repressive Function to its target genes, Cannot do the comparision Motif Scan</font>'
if 'UPVSDOWN_MOTIFS.txt' in FNs:
motif = 'UPVSDOWN_MOTIFS.txt'
mistable1 = 'upvsdownmis1.txt'
mostable2 = 'upvsdownmis2.txt'
output = runmotifcluster_side2(self.outdir, motif, mistable1, mistable2, self.motifnumber)
output.sort(key=lambda x:float(x[3]))
subcompare = ''
for motif_class in output:
factors = motif_class[0].split('; ')
factorinput = ''
for factor in factors:
factorinput += cluster%factor
dbdinput = ''
dbds = motif_class[1].split('; ')
for dbd in dbds:
dbdinput += cluster%dbd
species = motif_class[2]
pvalue = motif_class[3]
tscore = motif_class[4]
img = motif_class[-1]
run_cmd('cp %s %s'%(img, imgdir))
imgID = os.path.split(img)[1]
img = os.path.join('img',imgID)
temp = subtemplate%(factorinput,dbdinput,species,pvalue,tscore,img)
subcompare += temp
tabletitledown = tabletitle
run_cmd('mv %s %s'%(self.name + '_downtarget.txt', self.outdir))
tabletitlecompare = tabletitle
run_cmd('mv %s %s'%('UPVSDOWN_MOTIFS.txt', 'DIFFERENTIAL_MOTIF_UP_DOWN.txt'))
run_cmd('mv %s %s'%('DIFFERENTIAL_MOTIF_UP_DOWN.txt', resultdir))
b = template%(tabletitleup,subups[0],tabletitledown,subdowns[0],tabletitlecompare,subcompare,tabletitleup,subups[1],tabletitledown,subdowns[1])
outhtml.write(b)
outhtml.close()
jsfile = resource_filename('BETA','templates/script.js')
cssfile = resource_filename('BETA','templates/styles.css')
run_cmd('rm %s'%'NON_MOTIFS.txt')
run_cmd('cp %s %s'%(jsfile,resultdir))
run_cmd('cp %s %s'%(cssfile,resultdir))
motiflogos = os.path.join(self.outdir,'motiflogos')
run_cmd('rm -rf %s'%motiflogos)
run_cmd('mv betamotif.html %s'%resultdir)
Info("Done: find motif result in beatmotif.html file")
Info("Done: find all BETA result in %s"%self.outdir)
|
py | 1a4da117967346d93c94567c1523151572360fa8 | from DataUploader.PgsqlDataUploader import PgsqlDataUploader
def clean():
input_file = 'csvfiles.json'
uploader = PgsqlDataUploader(input_file)
uploader.run_script("sql/AdventureWorks_postgres_drop.sql")
uploader.clean_up()
def prepare():
input_file = 'csvfiles.json'
uploader = PgsqlDataUploader(input_file)
uploader.run_script("sql/AdventureWorks_postgres_create_NoRels.sql")
uploader.clean_up()
def upload():
input_file = 'csvfiles.json'
uploader = PgsqlDataUploader(input_file)
uploader.upload_from_csv()
uploader.clean_up()
def execute(i):
switcher = {0: clean, 1: prepare, 2: upload}
func = switcher.get(i, lambda: 'Invalid')
return func()
if __name__ == '__main__':
"""
Little application to make some basic operations on Postgres Databases
- Clean, Drop, Upload data from scripts
- Data in CSV format
- Make use of psycopg2
Author: Andres Osorio
Date: 27/06/2021
Company: Phystech SAS
Client: DS4A Course
"""
execute(2)
print("All done")
|
py | 1a4da12ec2a4a20e3dec3fe1436e9fd37abbd9d9 |
import sys
from rlpyt.utils.launching.affinity import affinity_from_code
from rlpyt.samplers.parallel.gpu.sampler import GpuSampler
from rlpyt.samplers.parallel.gpu.collectors import GpuWaitResetCollector
from rlpyt.envs.atari.atari_env import AtariEnv, AtariTrajInfo
from rlpyt.algos.pg.a2c import A2C
from rlpyt.agents.pg.atari import AtariLstmAgent
from rlpyt.runners.minibatch_rl import MinibatchRl
from rlpyt.utils.logging.context import logger_context
from rlpyt.utils.launching.variant import load_variant, update_config
from rlpyt.experiments.configs.atari.pg.atari_lstm_a2c import configs
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = GpuSampler(
EnvCls=AtariEnv,
env_kwargs=config["env"],
CollectorCls=GpuWaitResetCollector,
TrajInfoCls=AtariTrajInfo,
**config["sampler"]
)
algo = A2C(optim_kwargs=config["optim"], **config["algo"])
agent = AtariLstmAgent(model_kwargs=config["model"], **config["agent"])
runner = MinibatchRl(
algo=algo,
agent=agent,
sampler=sampler,
affinity=affinity,
**config["runner"]
)
name = config["env"]["game"]
with logger_context(log_dir, run_ID, name, config):
runner.train()
if __name__ == "__main__":
build_and_train(*sys.argv[1:])
|
py | 1a4da13d4eb92e4b3b318f9a98cfc35a00968bdc | """
WSGI config for worldCountries project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'worldCountries.settings')
application = get_wsgi_application()
|
py | 1a4da142cf7aa5bd642e9c44a348473c67ab69eb | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
self.m_resampler = torchaudio.transforms.Resample(
prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
|
py | 1a4da236aa9221d51d4d9408cc9214ad10fa898d | from hw_asr.augmentations.spectrogram_augmentations.SpecAug import SpecAug
__all__ = [
"SpecAug",
]
|
py | 1a4da2680714d63815e341b9f93956763cc664cc | from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^$", views.PostListView.as_view(), name="post_list"),
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r"^post/(?P<pk>\d+)$", views.PostDetailView.as_view(), name="post_detail"),
url(r"^post/new/$", views.CreatePostView.as_view(), name="post_new"),
url(r"^post/(?P<pk>\d+)/edit/$",
views.PostUpdateView.as_view(), name="post_edit"),
url(r"^post/(?P<pk>\d+)/publish/$",
views.post_publish, name="post_publish"),
url(r'^post/(?P<pk>\d+)/remove/$',
views.PostDeleteView.as_view(), name='post_remove'),
url(r'^drafts/$', views.DraftListView.as_view(), name='post_draft_list'),
url(r'^post/(?P<pk>\d+)/comment/$',
views.add_comment_to_post, name='add_comment_to_post'),
url(r'^comment/(?P<pk>\d+)/approve/$',
views.comment_approve, name='comment_approve'),
url(r'^comment/(?P<pk>\d+)/remove/$',
views.comment_remove, name='comment_remove'),
]
|
py | 1a4da2d3ea4bd0047ff3625b316901f36320e773 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "sana-khan-34437.botics.co"
site_params = {
"name": "Sana Khan",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
py | 1a4da488847e1a4c6c0f4c5026bf162cbef4eb7b | # Import Flask
from flask import Flask, jsonify
# Dependencies and Setup
import numpy as np
import datetime as dt
# Python SQL Toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy.pool import StaticPool
import dateutil.parser as dparser
# Database Setup by creating engine to the db path
engine = create_engine("sqlite:///Resources/hawaii.sqlite", echo=False)
# Reflect hawaii database into Base
Base = automap_base()
# Reflect all the tables in hawaii db
Base.prepare(engine, reflect=True)
# Create instances each Table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Setting-up Flask
# Initialize Flask app
app = Flask(__name__)
# set-up all the routes
@app.route("/api/v1.0/precipitation")
def percipation():
# Create our session (thread) from Python to the DB
session = Session(engine)
date = session.query(Measurement.date).order_by(Measurement.date.desc())[0][0]
latest_date = dt.datetime.strptime(date, "%Y-%m-%d").date()
latest_12 = latest_date - dt.timedelta(days=365)
percipitation_data = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.desc()).filter(Measurement.date >= latest_12 ).all()
session.close()
return dict(percipitation_data)
@app.route("/api/v1.0/stations")
def stations():
# Create our session (thread) from Python to the DB
session = Session(engine)
stations = session.query(Station.id, Station.station).distinct().all()
session.close()
results = []
for row in stations:
station = {}
station["id"] = row[0]
station["station"] = row[1]
results.append(station)
return jsonify(results)
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (thread) from Python to the DB
session = Session(engine)
active_stations = session.query(Measurement.id, Station.id, Measurement.station).\
filter(Station.station == Measurement.station).\
group_by(Measurement.station).\
order_by(Measurement.id.desc()).all()
most_active_station = active_stations[0][1]
recent_date = session.query(Measurement.date).filter(Station.station == Measurement.station).filter(Station.id == most_active_station).order_by(Measurement.date.desc())[0][0]
recent_date = dt.datetime.strptime(recent_date, "%Y-%m-%d").date()
recent_year = recent_date - dt.timedelta(days=365)
recent_year_temp = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= recent_year).order_by(Measurement.date.desc()).all()
session.close()
return dict(recent_year_temp)
@app.route("/api/v1.0/<start_date>")
def start_date(start_date):
session = Session(engine)
result = session.query(Measurement.date, func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.tobs).filter(Measurement.date >= start_date).first()
session.close()
aggre = {}
aggre["Date"]= result[0]
aggre["Min"] = result[1]
aggre["Max"] = result[2]
aggre["Average"] = result[3]
return aggre
@app.route("/api/v1.0/<start_date>/<end_date>")
def range_date(start_date, end_date):
session = Session(engine)
start_date = dt.datetime.strptime(start_date, "%Y-%m-%d").date()
start_date = dt.datetime.strptime(end_date, "%Y-%m-%d").date()
result = session.query(Measurement.date, func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= start_date)[0]
session.close()
aggre = {}
aggre["Date"]= result[0]
aggre["Min"] = result[1]
aggre["Max"] = result[2]
aggre["Average"] = result[3]
return aggre
@app.route("/api/v1.0/questions")
def questions():
return """<html>
<center>
<img src="/static/silly.png", alt="There you go!!!", width="700",height="680" />
</center>
</html>"""
# set-up Home routes
@app.route("/")
def welcomepage():
return """<html>
<h1>Welcome to Hawaii Climate Analysis!!!</h1>
<aside> By Shilpa...</aside>
<a href = "http://climate.geography.hawaii.edu/interactivemap.html" target = "_blank" ><img src="/static/hawaii_climate.png",width="718" height="135", alt="Hawaii Climate Analysis"/></a>
<section><h2><b>Analysis</b><img src="/static/Hawaii_surfing2.png",width="300",height="115", style="float:right", alt="Surf's up!!!"></h2>
<p><i>Below are the analysis performed on the Hawaii Climate data: </i></p>
<article>
<dt><li><b>Percipitation Data</b></li></dt>
<dd><a href="/api/v1.0/precipitation" target = "_blank">Percipitation(last 12-months)</a></dd>
<dd> <mark>Reurns 'Date' & 'Percipitation' for last 12-month period</mark></dd>
</article>
<dl><dt><li><b>Stations Data</b></li></dt>
<dd><a href="/api/v1.0/stations" target = "_blank">Most active Stations</a></dd>
<dd><mark>Returns List of Station 'id's' & 'station names' in Hawaii </mark></dd>
</dl>
<dl><dt><li><b>Temperature of Bias(Tobs)</b></li></dt>
<dd><a href="/api/v1.0/tobs" target = "_blank">Temperature of Bias for last 12-months</a></dd>
<dd><mark>Returns 'Date' & 'Temperature' of most active station in the last 12 month period </mark></dd>
</dl>
<dl><dt><li><b>MIN, MAX & AVERAGE Temperatures</b></li></dt>
<dd><a href="/api/v1.0/2016-8-23" target = "_blank">Temperature Aggregations starting 2016-8-23</a></dd>
<dd><a href="/api/v1.0/2017-6-23/2017-8-15" target = "_blank">Temperature Aggregations from 2016-8-23 to 2017-1-15</a></dd>
<dd><mark>Returns 'Min', 'Max' & 'Average' for the given date or range of dates</mark></dd>
</dl>
</section>
<section><h2><b>Question & Concerns</b></h2>
<dl>
<dd><h3><a href="/api/v1.0/questions" target = "_blank">Have Questions?? Contact-us here</a></h3></dd>
</dl>
</section>
</html>"""
if __name__ == '__main__':
app.run(debug=True)
|
py | 1a4da4c14384212ccbf2aff2ffb163872a03a728 | from typing import NamedTuple
class APIVersion(NamedTuple):
major: int
minor: int
@classmethod
def from_string(cls, inp: str) -> 'APIVersion':
parts = inp.split('.')
if len(parts) != 2:
raise ValueError(inp)
intparts = [int(p) for p in parts]
return cls(major=intparts[0], minor=intparts[1])
def __str__(self):
return f'{self.major}.{self.minor}'
|
py | 1a4da506476b52d9539dcf60a06e14d5786bf43f | import math
def get_odd(n):
while True:
if n%2:
return n
n//=2
def solve():
n=int(input())
c='Ashishgup'
o='FastestFinger'
while True:
if n<=1:
print(o)
break
if (n%2) or n==2:
print(c)
break
if not n&n-1:
print(o)
break
n//=get_odd(n)
c,o=o,c
if __name__ == '__main__':
t=int(input())
for _ in range(t):
solve()
|
py | 1a4da5e148774b8ff72d5d5d09d4dfe16431f2de | # -*- coding=utf-8 -*-
import os
from setuptools import setup, find_packages
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
LONGDOC = '''
jionlp
================================================================================
面向公司算法和使用部门提供算法api接口
安装方法:
代码使用 Python 3
- 半自动安装:
$ git clone http://git.bbdops.com/BBD-AI-Lab/BBD-Tools-Documentation.git
$ cd BBD-Tools-Documentation
$ pip install .
- 通过 import bbd_tools as bbd 来引用
'''
__name__ = 'jionlp'
__author__ = "cuiguoer"
__copyright__ = "Copyright 2020, dongrixinyu"
__credits__ = []
__license__ = "Apache License 2.0"
__maintainer__ = "dongrixinyu"
__email__ = "[email protected]"
__url__ = 'https://github.com/dongrixinyu/jionlp'
__description__ = 'Simple, Keras-powered multilingual NLP framework,' \
' allows you to build your models in 5 minutes for named entity recognition (NER),' \
' part-of-speech tagging (PoS) and text classification tasks. ' \
'Includes BERT, GPT-2 and word2vec embedding.'
with open(os.path.join(DIR_PATH, 'requirements.txt'),
'r', encoding='utf-8') as f:
requirements = f.readlines()
setup(name=__name__,
version='0.1.0',
url=__url__,
author=__author__,
author_email=__email__,
description=__description__,
long_description=LONGDOC,
license=__license__,
py_modules=[],
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
entry_points={
'console_scripts': [
# 'scheduler_start = algorithm_platform.scheduler.server: start',
]
},
test_suite='nose.collector',
tests_require=['nose'])
|
py | 1a4da69809384a20d10fcf0199ce0787d0062010 | """
This file offers the methods to automatically retrieve the graph Rhizobium leguminosarum viciae 3841.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def RhizobiumLeguminosarumViciae3841(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Rhizobium leguminosarum viciae 3841 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Rhizobium leguminosarum viciae 3841 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="RhizobiumLeguminosarumViciae3841",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a4da6d9f5e96dd1ed24b1b384c9281487316ab3 | # -*- coding: utf-8 -*-
__author__ = 'ooo'
__date__ = '2019/1/15 12:17'
import math, torch
import torch.nn as nn
import torch.nn.functional as F
class ViewLayer(nn.Module):
def __init__(self, dim=-1):
super(ViewLayer, self).__init__()
self.dim = dim
def forward(self, x):
# print('view-layer -> ', x.size())
x = x.view(x.size(0), self.dim)
return x
class AdaAvgPool(nn.Module):
def __init__(self, size=0):
self.size = size
super(AdaAvgPool, self).__init__()
def forward(self, x):
# print('avg-layer -> ', x.size())
if self.size == -1:
return x
if self.size == 0:
h, w = x.size(2), x.size(3)
assert h == w
elif self.size >= 1:
h, w = self.size, self.size
else:
raise NotImplementedError('check the avg kernel size !')
return F.avg_pool2d(x, kernel_size=(h, w))
class Activate(nn.Module):
def __init__(self, method='relu'):
super(Activate, self).__init__()
if method == 'relu':
self.method = nn.ReLU(inplace=True)
elif method == 'sigmoid':
self.method = nn.Sigmoid()
elif method == 'leaky_relu':
self.method = nn.LeakyReLU(negative_slope=0.02)
else:
raise NotImplementedError('--->%s' % method)
def forward(self, x):
return self.method(x)
class SweetBlock(nn.Module):
def __init__(self, depth, inter=1, downexp=2, downsize=False):
super(SweetBlock, self).__init__()
self.downsize = downsize
self.bn1 = nn.BatchNorm2d(depth)
self.conv1 = nn.Conv2d(depth, depth * inter, 3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(depth * inter)
self.deconv2 = nn.ConvTranspose2d(depth * inter, depth, 3, stride=2, padding=1, output_padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
if downsize:
self.down1 = nn.Sequential(
nn.BatchNorm2d(depth),
nn.ReLU(inplace=True),
nn.Conv2d(depth, depth * downexp, 3, stride=1, padding=1, bias=False),
nn.AvgPool2d(2)
)
self.down2 = nn.Sequential(
nn.BatchNorm2d(depth),
nn.ReLU(inplace=True),
nn.Conv2d(depth, depth * downexp, 3, stride=1, padding=1, bias=False),
nn.AvgPool2d(2),
# nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
)
def forward(self, x):
if isinstance(x, (list, tuple)):
assert len(x) == 3, 'len of x is: %s ...' % len(x)
x1, x2, pred = x # (big, small, pred)
else:
x1, x2, pred = x, None, None
res1 = self.conv1(self.relu(self.bn1(x1)))
res2 = self.deconv2(self.relu(self.bn2(res1)))
res1 = res1 + x2
res2 = res2 + x1
if self.downsize:
res2 = self.down2(res2)
res1 = self.down1(res1)
# utils.print_size([res2, res1])
return res2, res1, pred
class TransBlock(nn.Module):
def __init__(self, indepth, outdepth):
super(TransBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(indepth)
self.conv1 = nn.Conv2d(indepth, outdepth, 3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(indepth)
self.conv2 = nn.Conv2d(indepth, outdepth, 3, stride=1, padding=1, bias=False)
def forward(self, x):
if isinstance(x, (list, tuple)):
x1, x2, pred = x
else:
x1, x2, pred = x, None, None
x1 = self.conv1(F.relu(self.bn1(x1)))
x1 = F.avg_pool2d(x1, 2)
x2 = self.conv2(F.relu(self.bn2(x2)))
x2 = F.avg_pool2d(x2, 2)
return x1, x2
class SumaryBlock(nn.Module):
def __init__(self, depth, classify=1, avgpool=True, active='relu', nclass=1000):
super(SumaryBlock, self).__init__()
self.classify = classify
if self.classify >= 1:
self.classifier1 = nn.Sequential(
nn.BatchNorm2d(depth),
Activate(active),
AdaAvgPool(),
ViewLayer(),
nn.Linear(depth, nclass)
)
if self.classify >= 2:
self.classifier2 = nn.Sequential(
nn.BatchNorm2d(depth),
Activate(active),
AdaAvgPool(),
ViewLayer(),
nn.Linear(depth, nclass)
)
def forward(self, x):
if isinstance(x, (list, tuple)):
x1, x2, pred = x
else:
x1, x2, pred = x, None, None
if self.classify == 1:
x1 = self.classifier1(x1)
pred.extend([x1])
elif self.classify == 2:
x1 = self.classifier1(x1)
x2 = self.classifier2(x2)
pred.extend([x2, x1])
else:
raise NotImplementedError
return pred
class RockBlock(nn.Module):
def __init__(self, outdepth, branch=2, dataset='cifar'):
super(RockBlock, self).__init__()
self.branch = branch
if dataset == 'cifar':
self.branch1 = nn.Sequential(
nn.Conv2d(3, outdepth, kernel_size=3, stride=1, padding=1, bias=False),
# nn.BatchNorm2d(depth),
# nn.ReLU(inplace=True),
)
if branch >= 2:
self.branch2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
elif dataset == 'imagenet':
self.branch1 = nn.Sequential(
nn.Conv2d(3, outdepth, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(outdepth),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.branch2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
pred = []
if self.branch == 1:
x = self.branch1(x)
return x, None, pred
elif self.branch == 2:
x = self.branch1(x)
x2 = self.branch2(x)
return x, x2, pred
else:
raise ValueError('check branch must be in [1, 2, 3]!')
class SweetNet(nn.Module):
def __init__(self, branch=2, depth=64, layers=(2, 3, 3, 3), expand=(1, 2, 4, 8), downexp=2, downlast=False,
inter=(1, 1, 1, 1), classify=1, active='relu', nclass=1000):
super(SweetNet, self).__init__()
self.layers = layers
self.layer0 = RockBlock(depth, branch, dataset='imagenet')
self.layer1 = self._make_sweet_layer(SweetBlock, layers[0], depth * expand[0], inter[0], downexp, down=True)
self.layer2 = self._make_sweet_layer(SweetBlock, layers[1], depth * expand[1], inter[1], downexp, down=True)
self.layer3 = self._make_sweet_layer(SweetBlock, layers[2], depth * expand[2], inter[2], downexp, down=True)
self.layer4 = self._make_sweet_layer(SweetBlock, layers[3], depth * expand[3], inter[3], downexp, down=downlast)
if downlast:
indepth = depth * expand[3] * downexp
else:
indepth = depth * expand[3]
self.classifier = SumaryBlock(indepth, classify, avgpool=True, active=active, nclass=nclass)
def _make_sweet_layer(self, block, nums, depth, inter=1, downexp=2, down=True):
layers = []
for i in range(nums - 1):
layers.append(block(depth, inter, downexp, downsize=False))
layers.append(block(depth, inter, downexp, downsize=down))
return nn.Sequential(*layers)
def _make_trans_layer(self, block, indepth, outdepth):
return block(indepth, outdepth)
def forward(self, x):
x = self.layer0(x)
# utils.print_size(x)
x = self.layer1(x)
# utils.print_size(x)
x = self.layer2(x)
# utils.print_size(x)
x = self.layer3(x)
# utils.print_size(x)
x = self.layer4(x)
# utils.print_size(x)
x = self.classifier(x)
return x
class CifarSweetNet(nn.Module):
def __init__(self, branch=2, depth=16, layers=(2, 3, 3), expand=(1, 2, 4), downexp=2, downlast=False,
inter=(1, 1, 1), classify=1, active='relu', nclass=10):
super(CifarSweetNet, self).__init__()
self.layers = layers
self.layer0 = RockBlock(depth, branch, dataset='cifar')
self.layer1 = self._make_sweet_layer(SweetBlock, layers[0], depth * expand[0], inter[0], downexp, down=True)
self.layer2 = self._make_sweet_layer(SweetBlock, layers[1], depth * expand[1], inter[1], downexp, down=True)
self.layer3 = self._make_sweet_layer(SweetBlock, layers[2], depth * expand[2], inter[2], downexp, down=downlast)
if downlast:
indepth = depth * expand[2] * downexp
else:
indepth = depth * expand[2]
self.classifier = SumaryBlock(indepth, classify, avgpool=True, active=active, nclass=nclass)
def _make_sweet_layer(self, block, nums, depth, inter=1, downexp=2, down=True):
layers = []
for i in range(nums - 1):
layers.append(block(depth, inter, downexp, downsize=False))
layers.append(block(depth, inter, downexp, downsize=down))
return nn.Sequential(*layers)
def _make_trans_layer(self, block, indepth, outdepth):
return block(indepth, outdepth)
def forward(self, x):
x = self.layer0(x)
# utils.print_size(x)
x = self.layer1(x)
# utils.print_size(x)
x = self.layer2(x)
# utils.print_size(x)
x = self.layer3(x)
# utils.print_size(x)
x = self.classifier(x)
return x
if __name__ == '__main__':
import xtils
torch.manual_seed(9528)
criterion = nn.CrossEntropyLoss()
# model = SweetNet(branch=2, depth=64, layers=(2, 5, 3, 2), expand=(1, 2, 4, 8), downexp=2, downlast=True,
# inter=(1, 1, 1, 1), classify=2, active='relu', nclass=1000)
# print('\n', model, '\n')
# x = torch.randn(4, 3, 256, 256)
# # utils.tensorboard_add_model(model, x)
# utils.calculate_params_scale(model, format='million')
# utils.calculate_layers_num(model, layers=('conv2d', 'deconv2d', 'linear'))
# y = model(x)
# print(sum(model.layers), len(y), ':', [(yy.shape, yy.max(1)) for yy in y if yy is not None])
arch_kwargs = {}
model = CifarSweetNet(branch=2, depth=16, layers=(2, 2, 2), expand=(1, 2, 4), downexp=2, downlast=False,
inter=(1, 1, 1), classify=1, active='relu', nclass=10)
print('\n', model, '\n')
x = torch.randn(4, 3, 32, 32)
# utils.tensorboard_add_model(model, x)
xtils.calculate_params_scale(model, format='million')
xtils.calculate_layers_num(model, layers=('conv2d', 'deconv2d', 'linear'))
y = model(x)
# loss = [criterion(o, torch.randint(0, 10, o.size()).long()) for o in y]
# optimizer = torch.optim.Adam(params=model.parameters(), lr=0.1)
# optimizer.zero_grad()
# sum(loss).backward()
# optimizer.step()
print(sum(model.layers), len(y), ':', [(yy.shape, yy.max(1)) for yy in y if yy is not None])
|
py | 1a4da7a5842ff5242587ac28406437cdee11396e | import time
import sqlalchemy_1_3 as tsa
from sqlalchemy_1_3 import create_engine
from sqlalchemy_1_3 import event
from sqlalchemy_1_3 import exc
from sqlalchemy_1_3 import Integer
from sqlalchemy_1_3 import MetaData
from sqlalchemy_1_3 import pool
from sqlalchemy_1_3 import select
from sqlalchemy_1_3 import String
from sqlalchemy_1_3 import testing
from sqlalchemy_1_3 import util
from sqlalchemy_1_3.engine import url
from sqlalchemy_1_3.testing import assert_raises
from sqlalchemy_1_3.testing import assert_raises_message
from sqlalchemy_1_3.testing import assert_raises_message_context_ok
from sqlalchemy_1_3.testing import engines
from sqlalchemy_1_3.testing import eq_
from sqlalchemy_1_3.testing import expect_warnings
from sqlalchemy_1_3.testing import fixtures
from sqlalchemy_1_3.testing import is_false
from sqlalchemy_1_3.testing import is_true
from sqlalchemy_1_3.testing import mock
from sqlalchemy_1_3.testing import ne_
from sqlalchemy_1_3.testing.engines import testing_engine
from sqlalchemy_1_3.testing.mock import call
from sqlalchemy_1_3.testing.mock import Mock
from sqlalchemy_1_3.testing.mock import patch
from sqlalchemy_1_3.testing.schema import Column
from sqlalchemy_1_3.testing.schema import Table
from sqlalchemy_1_3.testing.util import gc_collect
class MockError(Exception):
pass
class MockDisconnect(MockError):
pass
class MockExitIsh(BaseException):
pass
def mock_connection():
def mock_cursor():
def execute(*args, **kwargs):
if conn.explode == "execute":
raise MockDisconnect("Lost the DB connection on execute")
elif conn.explode == "interrupt":
conn.explode = "explode_no_disconnect"
raise MockExitIsh("Keyboard / greenlet / etc interruption")
elif conn.explode == "interrupt_dont_break":
conn.explode = None
raise MockExitIsh("Keyboard / greenlet / etc interruption")
elif conn.explode in (
"execute_no_disconnect",
"explode_no_disconnect",
):
raise MockError(
"something broke on execute but we didn't lose the "
"connection"
)
elif conn.explode in (
"rollback",
"rollback_no_disconnect",
"explode_no_disconnect",
):
raise MockError(
"something broke on execute but we didn't lose the "
"connection"
)
elif args and "SELECT" in args[0]:
cursor.description = [("foo", None, None, None, None, None)]
else:
return
def close():
cursor.fetchall = cursor.fetchone = Mock(
side_effect=MockError("cursor closed")
)
cursor = Mock(
execute=Mock(side_effect=execute), close=Mock(side_effect=close)
)
return cursor
def cursor():
while True:
yield mock_cursor()
def rollback():
if conn.explode == "rollback":
raise MockDisconnect("Lost the DB connection on rollback")
if conn.explode == "rollback_no_disconnect":
raise MockError(
"something broke on rollback but we didn't lose the "
"connection"
)
else:
return
conn = Mock(
rollback=Mock(side_effect=rollback), cursor=Mock(side_effect=cursor())
)
return conn
def MockDBAPI():
connections = []
stopped = [False]
def connect():
while True:
if stopped[0]:
raise MockDisconnect("database is stopped")
conn = mock_connection()
connections.append(conn)
yield conn
def shutdown(explode="execute", stop=False):
stopped[0] = stop
for c in connections:
c.explode = explode
def restart():
stopped[0] = False
connections[:] = []
def dispose():
stopped[0] = False
for c in connections:
c.explode = None
connections[:] = []
return Mock(
connect=Mock(side_effect=connect()),
shutdown=Mock(side_effect=shutdown),
dispose=Mock(side_effect=dispose),
restart=Mock(side_effect=restart),
paramstyle="named",
connections=connections,
Error=MockError,
)
class PrePingMockTest(fixtures.TestBase):
def setup(self):
self.dbapi = MockDBAPI()
def _pool_fixture(self, pre_ping):
dialect = url.make_url(
"postgresql://foo:bar@localhost/test"
).get_dialect()()
dialect.dbapi = self.dbapi
_pool = pool.QueuePool(
creator=lambda: self.dbapi.connect("foo.db"),
pre_ping=pre_ping,
dialect=dialect,
)
dialect.is_disconnect = lambda e, conn, cursor: isinstance(
e, MockDisconnect
)
return _pool
def teardown(self):
self.dbapi.dispose()
def test_connect_across_restart(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
stale_connection = conn.connection
conn.close()
self.dbapi.shutdown("execute")
self.dbapi.restart()
conn = pool.connect()
cursor = conn.cursor()
cursor.execute("hi")
stale_cursor = stale_connection.cursor()
assert_raises(MockDisconnect, stale_cursor.execute, "hi")
def test_raise_db_is_stopped(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
assert_raises_message_context_ok(
MockDisconnect, "database is stopped", pool.connect
)
def test_waits_til_exec_wo_ping_db_is_stopped(self):
pool = self._pool_fixture(pre_ping=False)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
conn = pool.connect()
cursor = conn.cursor()
assert_raises_message(
MockDisconnect,
"Lost the DB connection on execute",
cursor.execute,
"foo",
)
def test_waits_til_exec_wo_ping_db_is_restarted(self):
pool = self._pool_fixture(pre_ping=False)
conn = pool.connect()
conn.close()
self.dbapi.shutdown("execute", stop=True)
self.dbapi.restart()
conn = pool.connect()
cursor = conn.cursor()
assert_raises_message(
MockDisconnect,
"Lost the DB connection on execute",
cursor.execute,
"foo",
)
@testing.requires.predictable_gc
def test_pre_ping_weakref_finalizer(self):
pool = self._pool_fixture(pre_ping=True)
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.cursor(), call.rollback()])
self.dbapi.shutdown("execute", stop=True)
self.dbapi.restart()
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.cursor(), call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(
old_dbapi_conn.mock_calls,
[call.cursor(), call.rollback(), call.cursor(), call.close()],
)
class MockReconnectTest(fixtures.TestBase):
def setup(self):
self.dbapi = MockDBAPI()
self.db = testing_engine(
"postgresql://foo:bar@localhost/test",
options=dict(module=self.dbapi, _initialize=False),
)
self.mock_connect = call(
host="localhost", password="bar", user="foo", database="test"
)
# monkeypatch disconnect checker
self.db.dialect.is_disconnect = lambda e, conn, cursor: isinstance(
e, MockDisconnect
)
def teardown(self):
self.dbapi.dispose()
def test_reconnect(self):
"""test that an 'is_disconnect' condition will invalidate the
connection, and additionally dispose the previous connection
pool and recreate."""
# make a connection
conn = self.db.connect()
# connection works
conn.execute(select([1]))
# create a second connection within the pool, which we'll ensure
# also goes away
conn2 = self.db.connect()
conn2.close()
# two connections opened total now
assert len(self.dbapi.connections) == 2
# set it to fail
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
# assert was invalidated
assert not conn.closed
assert conn.invalidated
# close shouldn't break
conn.close()
# ensure one connection closed...
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
conn = self.db.connect()
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], [call()], []],
)
conn.execute(select([1]))
conn.close()
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], [call()], []],
)
def test_invalidate_trans(self):
conn = self.db.connect()
trans = conn.begin()
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
eq_([c.close.mock_calls for c in self.dbapi.connections], [[call()]])
assert not conn.closed
assert conn.invalidated
assert trans.is_active
assert_raises_message(
tsa.exc.StatementError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select([1]),
)
assert trans.is_active
assert_raises_message(
tsa.exc.InvalidRequestError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
assert trans.is_active
trans.rollback()
assert not trans.is_active
conn.execute(select([1]))
assert not conn.invalidated
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
def test_invalidate_dont_call_finalizer(self):
conn = self.db.connect()
finalizer = mock.Mock()
conn.connection._connection_record.finalize_callback.append(finalizer)
conn.invalidate()
assert conn.invalidated
eq_(finalizer.call_count, 0)
def test_conn_reusable(self):
conn = self.db.connect()
conn.execute(select([1]))
eq_(self.dbapi.connect.mock_calls, [self.mock_connect])
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
assert not conn.closed
assert conn.invalidated
eq_([c.close.mock_calls for c in self.dbapi.connections], [[call()]])
# test reconnects
conn.execute(select([1]))
assert not conn.invalidated
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
def test_invalidated_close(self):
conn = self.db.connect()
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
conn.close()
assert conn.closed
assert conn.invalidated
assert_raises_message(
tsa.exc.StatementError,
"This Connection is closed",
conn.execute,
select([1]),
)
def test_noreconnect_execute_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("execute_no_disconnect")
# raises error
assert_raises_message(
tsa.exc.DBAPIError,
"something broke on execute but we didn't lose the connection",
conn.execute,
select([1]),
)
assert conn.closed
assert not conn.invalidated
def test_noreconnect_rollback_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("rollback_no_disconnect")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"something broke on rollback but we didn't "
"lose the connection",
conn.execute,
select([1]),
)
assert conn.closed
assert not conn.invalidated
assert_raises_message(
tsa.exc.StatementError,
"This Connection is closed",
conn.execute,
select([1]),
)
def test_reconnect_on_reentrant(self):
conn = self.db.connect()
conn.execute(select([1]))
assert len(self.dbapi.connections) == 1
self.dbapi.shutdown("rollback")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"Lost the DB connection on rollback",
conn.execute,
select([1]),
)
assert not conn.closed
assert conn.invalidated
def test_reconnect_on_reentrant_plus_closewresult(self):
conn = self.db.connect(close_with_result=True)
self.dbapi.shutdown("rollback")
# raises error
with expect_warnings(
"An exception has occurred during handling .*"
"something broke on execute but we didn't lose the connection",
py2konly=True,
):
assert_raises_message(
tsa.exc.DBAPIError,
"Lost the DB connection on rollback",
conn.execute,
select([1]),
)
assert conn.closed
assert conn.invalidated
assert_raises_message(
tsa.exc.StatementError,
"This Connection is closed",
conn.execute,
select([1]),
)
def test_check_disconnect_no_cursor(self):
conn = self.db.connect()
result = conn.execute(select([1]))
result.cursor.close()
conn.close()
assert_raises_message(
tsa.exc.DBAPIError, "cursor closed", list, result
)
def test_dialect_initialize_once(self):
from sqlalchemy_1_3.engine.url import URL
from sqlalchemy_1_3.engine.default import DefaultDialect
dbapi = self.dbapi
class MyURL(URL):
def _get_entrypoint(self):
return Dialect
def get_dialect(self):
return Dialect
class Dialect(DefaultDialect):
initialize = Mock()
engine = create_engine(MyURL("foo://"), module=dbapi)
engine.connect()
# note that the dispose() call replaces the old pool with a new one;
# this is to test that even though a single pool is using
# dispatch.exec_once(), by replacing the pool with a new one, the event
# would normally fire again onless once=True is set on the original
# listen as well.
engine.dispose()
engine.connect()
eq_(Dialect.initialize.call_count, 1)
def test_dialect_initialize_retry_if_exception(self):
from sqlalchemy_1_3.engine.url import URL
from sqlalchemy_1_3.engine.default import DefaultDialect
dbapi = self.dbapi
class MyURL(URL):
def _get_entrypoint(self):
return Dialect
def get_dialect(self):
return Dialect
class Dialect(DefaultDialect):
initialize = Mock()
# note that the first_connect hook is only invoked when the pool
# makes a new DBAPI connection, and not when it checks out an existing
# connection. So there is a dependency here that if the initializer
# raises an exception, the pool-level connection attempt is also
# failed, meaning no DBAPI connection is pooled. If the first_connect
# exception raise did not prevent the connection from being pooled,
# there could be the case where the pool could return that connection
# on a subsequent attempt without initialization having proceeded.
Dialect.initialize.side_effect = TypeError
engine = create_engine(MyURL("foo://"), module=dbapi)
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 1)
is_true(engine.pool._pool.empty())
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 2)
is_true(engine.pool._pool.empty())
engine.dispose()
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 3)
is_true(engine.pool._pool.empty())
Dialect.initialize.side_effect = None
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
engine.dispose()
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
def test_invalidate_conn_w_contextmanager_interrupt(self):
# test [ticket:3803]
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("interrupt")
def go():
with conn.begin():
conn.execute(select([1]))
assert_raises(MockExitIsh, go)
assert conn.invalidated
eq_(pool._invalidate_time, 0) # pool not invalidated
conn.execute(select([1]))
assert not conn.invalidated
def test_invalidate_conn_interrupt_nodisconnect_workaround(self):
# test [ticket:3803] workaround for no disconnect on keyboard interrupt
@event.listens_for(self.db, "handle_error")
def cancel_disconnect(ctx):
ctx.is_disconnect = False
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("interrupt_dont_break")
def go():
with conn.begin():
conn.execute(select([1]))
assert_raises(MockExitIsh, go)
assert not conn.invalidated
eq_(pool._invalidate_time, 0) # pool not invalidated
conn.execute(select([1]))
assert not conn.invalidated
def test_invalidate_conn_w_contextmanager_disconnect(self):
# test [ticket:3803] change maintains old behavior
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("execute")
def go():
with conn.begin():
conn.execute(select([1]))
assert_raises(exc.DBAPIError, go) # wraps a MockDisconnect
assert conn.invalidated
ne_(pool._invalidate_time, 0) # pool is invalidated
conn.execute(select([1]))
assert not conn.invalidated
class CursorErrTest(fixtures.TestBase):
# this isn't really a "reconnect" test, it's more of
# a generic "recovery". maybe this test suite should have been
# named "test_error_recovery".
def _fixture(self, explode_on_exec, initialize):
class DBAPIError(Exception):
pass
def MockDBAPI():
def cursor():
while True:
if explode_on_exec:
yield Mock(
description=[],
close=Mock(side_effect=DBAPIError("explode")),
execute=Mock(side_effect=DBAPIError("explode")),
)
else:
yield Mock(
description=[],
close=Mock(side_effect=Exception("explode")),
)
def connect():
while True:
yield Mock(
spec=["cursor", "commit", "rollback", "close"],
cursor=Mock(side_effect=cursor()),
)
return Mock(
Error=DBAPIError,
paramstyle="qmark",
connect=Mock(side_effect=connect()),
)
dbapi = MockDBAPI()
from sqlalchemy_1_3.engine import default
url = Mock(
get_dialect=lambda: default.DefaultDialect,
_get_entrypoint=lambda: default.DefaultDialect,
_instantiate_plugins=lambda kwargs: (),
translate_connect_args=lambda: {},
query={},
)
eng = testing_engine(
url, options=dict(module=dbapi, _initialize=initialize)
)
eng.pool.logger = Mock()
return eng
def test_cursor_explode(self):
db = self._fixture(False, False)
conn = db.connect()
result = conn.execute("select foo")
result.close()
conn.close()
eq_(
db.pool.logger.error.mock_calls,
[call("Error closing cursor", exc_info=True)],
)
def test_cursor_shutdown_in_initialize(self):
db = self._fixture(True, True)
assert_raises_message_context_ok(
exc.SAWarning, "Exception attempting to detect", db.connect
)
eq_(
db.pool.logger.error.mock_calls,
[call("Error closing cursor", exc_info=True)],
)
def _assert_invalidated(fn, *args):
try:
fn(*args)
assert False
except tsa.exc.DBAPIError as e:
if not e.connection_invalidated:
raise
class RealReconnectTest(fixtures.TestBase):
__backend__ = True
__requires__ = "graceful_disconnects", "ad_hoc_engines"
def setup(self):
self.engine = engines.reconnecting_engine()
def teardown(self):
self.engine.dispose()
def test_reconnect(self):
conn = self.engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
assert not conn.closed
assert conn.invalidated
assert conn.invalidated
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
# one more time
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
assert conn.invalidated
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
conn.close()
def test_multiple_invalidate(self):
c1 = self.engine.connect()
c2 = self.engine.connect()
eq_(c1.execute(select([1])).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1.execute, select([1]))
p2 = self.engine.pool
_assert_invalidated(c2.execute, select([1]))
# pool isn't replaced
assert self.engine.pool is p2
def test_branched_invalidate_branch_to_parent(self):
c1 = self.engine.connect()
with patch.object(self.engine.pool, "logger") as logger:
c1_branch = c1.connect()
eq_(c1_branch.execute(select([1])).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1_branch.execute, select([1]))
assert c1.invalidated
assert c1_branch.invalidated
c1_branch._revalidate_connection()
assert not c1.invalidated
assert not c1_branch.invalidated
assert "Invalidate connection" in logger.mock_calls[0][1][0]
def test_branched_invalidate_parent_to_branch(self):
c1 = self.engine.connect()
c1_branch = c1.connect()
eq_(c1_branch.execute(select([1])).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1.execute, select([1]))
assert c1.invalidated
assert c1_branch.invalidated
c1._revalidate_connection()
assert not c1.invalidated
assert not c1_branch.invalidated
def test_branch_invalidate_state(self):
c1 = self.engine.connect()
c1_branch = c1.connect()
eq_(c1_branch.execute(select([1])).scalar(), 1)
self.engine.test_shutdown()
_assert_invalidated(c1_branch.execute, select([1]))
assert not c1_branch.closed
assert not c1_branch._connection_is_valid
def test_ensure_is_disconnect_gets_connection(self):
def is_disconnect(e, conn, cursor):
# connection is still present
assert conn.connection is not None
# the error usually occurs on connection.cursor(),
# though MySQLdb we get a non-working cursor.
# assert cursor is None
self.engine.dialect.is_disconnect = is_disconnect
conn = self.engine.connect()
self.engine.test_shutdown()
with expect_warnings(
"An exception has occurred during handling .*", py2konly=True
):
assert_raises(tsa.exc.DBAPIError, conn.execute, select([1]))
def test_rollback_on_invalid_plain(self):
conn = self.engine.connect()
trans = conn.begin()
conn.invalidate()
trans.rollback()
@testing.requires.two_phase_transactions
def test_rollback_on_invalid_twophase(self):
conn = self.engine.connect()
trans = conn.begin_twophase()
conn.invalidate()
trans.rollback()
@testing.requires.savepoints
def test_rollback_on_invalid_savepoint(self):
conn = self.engine.connect()
conn.begin()
trans2 = conn.begin_nested()
conn.invalidate()
trans2.rollback()
def test_invalidate_twice(self):
conn = self.engine.connect()
conn.invalidate()
conn.invalidate()
@testing.skip_if(
[lambda: util.py3k, "oracle+cx_oracle"], "Crashes on py3k+cx_oracle"
)
def test_explode_in_initializer(self):
engine = engines.testing_engine()
def broken_initialize(connection):
connection.execute("select fake_stuff from _fake_table")
engine.dialect.initialize = broken_initialize
# raises a DBAPIError, not an AttributeError
assert_raises(exc.DBAPIError, engine.connect)
@testing.skip_if(
[lambda: util.py3k, "oracle+cx_oracle"], "Crashes on py3k+cx_oracle"
)
def test_explode_in_initializer_disconnect(self):
engine = engines.testing_engine()
def broken_initialize(connection):
connection.execute("select fake_stuff from _fake_table")
engine.dialect.initialize = broken_initialize
def is_disconnect(e, conn, cursor):
return True
engine.dialect.is_disconnect = is_disconnect
# invalidate() also doesn't screw up
assert_raises(exc.DBAPIError, engine.connect)
def test_null_pool(self):
engine = engines.reconnecting_engine(
options=dict(poolclass=pool.NullPool)
)
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.closed
engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
assert not conn.closed
assert conn.invalidated
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
def test_close(self):
conn = self.engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
conn.close()
conn = self.engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
def test_with_transaction(self):
conn = self.engine.connect()
trans = conn.begin()
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.closed
self.engine.test_shutdown()
_assert_invalidated(conn.execute, select([1]))
assert not conn.closed
assert conn.invalidated
assert trans.is_active
assert_raises_message(
tsa.exc.StatementError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select([1]),
)
assert trans.is_active
assert_raises_message(
tsa.exc.InvalidRequestError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
assert trans.is_active
trans.rollback()
assert not trans.is_active
assert conn.invalidated
eq_(conn.execute(select([1])).scalar(), 1)
assert not conn.invalidated
class RecycleTest(fixtures.TestBase):
__backend__ = True
def test_basic(self):
engine = engines.reconnecting_engine()
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
# set the pool recycle down to 1.
# we aren't doing this inline with the
# engine create since cx_oracle takes way
# too long to create the 1st connection and don't
# want to build a huge delay into this test.
engine.pool._recycle = 1
# kill the DB connection
engine.test_shutdown()
# wait until past the recycle period
time.sleep(2)
# can connect, no exception
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
class PrePingRealTest(fixtures.TestBase):
__backend__ = True
def test_pre_ping_db_is_restarted(self):
engine = engines.reconnecting_engine(options={"pool_pre_ping": True})
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
stale_connection = conn.connection.connection
conn.close()
engine.test_shutdown()
engine.test_restart()
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
def exercise_stale_connection():
curs = stale_connection.cursor()
curs.execute("select 1")
assert_raises(engine.dialect.dbapi.Error, exercise_stale_connection)
def test_pre_ping_db_stays_shutdown(self):
engine = engines.reconnecting_engine(options={"pool_pre_ping": True})
conn = engine.connect()
eq_(conn.execute(select([1])).scalar(), 1)
conn.close()
engine.test_shutdown(stop=True)
assert_raises(exc.DBAPIError, engine.connect)
class InvalidateDuringResultTest(fixtures.TestBase):
__backend__ = True
def setup(self):
self.engine = engines.reconnecting_engine()
self.meta = MetaData(self.engine)
table = Table(
"sometable",
self.meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
self.meta.create_all()
table.insert().execute(
[{"id": i, "name": "row %d" % i} for i in range(1, 100)]
)
def teardown(self):
self.meta.drop_all()
self.engine.dispose()
@testing.crashes(
"oracle",
"cx_oracle 6 doesn't allow a close like this due to open cursors",
)
@testing.fails_if(
["+mysqlconnector", "+mysqldb", "+cymysql", "+pymysql", "+pg8000"],
"Buffers the result set and doesn't check for connection close",
)
def test_invalidate_on_results(self):
conn = self.engine.connect()
result = conn.execute("select * from sometable")
for x in range(20):
result.fetchone()
self.engine.test_shutdown()
_assert_invalidated(result.fetchone)
assert conn.invalidated
|
py | 1a4da87cc03ced70a2ba4704b8d055df2b65e0f5 | # pylint: disable=C,R,W
from datetime import datetime, timedelta
import inspect
import logging
import os
import re
import time
import traceback
from urllib import parse
from flask import (
flash, g, Markup, redirect, render_template, request, Response, url_for,
)
from flask_appbuilder import expose, SimpleFormView
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access, has_access_api
from flask_appbuilder.security.views import AuthDBView
from flask_login import login_user
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
import pandas as pd
import simplejson as json
import sqlalchemy as sqla
from sqlalchemy import and_, create_engine, MetaData, or_, update
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import IntegrityError
from unidecode import unidecode
from werkzeug.routing import BaseConverter
from werkzeug.utils import secure_filename
from superset import (
app, appbuilder, cache, dashboard_import_export_util, db, results_backend,
security_manager, sql_lab, utils, viz, csrf)
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import AnnotationDatasource, SqlaTable
from superset.exceptions import SupersetException
from superset.forms import CsvToDatabaseForm
from superset.jinja_context import get_template_processor
from superset.legacy import cast_form_data, update_time_range
import superset.models.core as models
from superset.models.sql_lab import Query
from superset.models.user_attributes import UserAttribute
from superset.sql_parse import SupersetQuery
from superset.utils import (
merge_extra_filters, merge_request_params, QueryStatus,
)
from .base import (
api, BaseSupersetView,
check_ownership,
CsvResponse, DeleteMixin,
generate_download_headers, get_error_msg,
json_error_response, SupersetFilter, SupersetModelView, YamlExportMixin,
)
from .utils import bootstrap_user_data
config = app.config
stats_logger = config.get('STATS_LOGGER')
log_this = models.Log.log_this
DAR = models.DatasourceAccessRequest
ALL_DATASOURCE_ACCESS_ERR = __(
'This endpoint requires the `all_datasource_access` permission')
DATASOURCE_MISSING_ERR = __('The datasource seems to have been deleted')
ACCESS_REQUEST_MISSING_ERR = __(
'The access requests seem to have been deleted')
USER_MISSING_ERR = __('The user seems to have been deleted')
FORM_DATA_KEY_BLACKLIST = []
if not config.get('ENABLE_JAVASCRIPT_CONTROLS'):
FORM_DATA_KEY_BLACKLIST = [
'js_tooltip',
'js_onclick_href',
'js_data_mutator',
]
def get_database_access_error_msg(database_name):
return __('This view requires the database %(name)s or '
'`all_datasource_access` permission', name=database_name)
def json_success(json_msg, status=200):
return Response(json_msg, status=status, mimetype='application/json')
def is_owner(obj, user):
""" Check if user is owner of the slice """
return obj and user in obj.owners
def check_dbp_user(user, is_shared):
if app.config['ENABLE_CUSTOM_ROLE_RESOURCE_SHOW'] and not is_shared and user:
for role in user.roles:
if role.name.lower().find(app.config['CUSTOM_ROLE_NAME_KEYWORD'].lower()) >= 0:
return True
return False
class SliceFilter(SupersetFilter):
def apply(self, query, func): # noqa
if security_manager.all_datasource_access():
return query
perms = self.get_view_menus('datasource_access')
# TODO(bogdan): add `schema_access` support here
#if len(perms) > 0 :
if check_dbp_user(g.user, app.config['ENABLE_CHART_SHARE_IN_CUSTOM_ROLE']):
slice_ids = self.get_current_user_slice_ids()
return query.filter(self.model.perm.in_(perms)).filter(self.model.id.in_(slice_ids))
else:
return query.filter(self.model.perm.in_(perms))
#else:
# return query.filter(self.model.id.in_(slice_ids))
class DashboardFilter(SupersetFilter):
"""List dashboards for which users have access to at least one slice or are owners"""
def apply(self, query, func): # noqa
if security_manager.all_datasource_access():
return query
Slice = models.Slice # noqa
Dash = models.Dashboard # noqa
User = security_manager.user_model
# TODO(bogdan): add `schema_access` support here
datasource_perms = self.get_view_menus('datasource_access')
slice_ids_qry = None
if check_dbp_user(g.user, app.config['ENABLE_DASHBOARD_SHARE_IN_CUSTOM_ROLE']):
slice_ids = self.get_current_user_slice_ids()
slice_ids_qry = (
db.session
.query(Slice.id)
.filter(Slice.perm.in_(datasource_perms)).filter(Slice.id.in_(slice_ids))
)
else:
slice_ids_qry = (
db.session
.query(Slice.id)
.filter(Slice.perm.in_(datasource_perms))
)
owner_ids_qry = (
db.session
.query(Dash.id)
.join(Dash.owners)
.filter(User.id == User.get_user_id())
)
query = query.filter(
or_(Dash.id.in_(
db.session.query(Dash.id)
.distinct()
.join(Dash.slices)
.filter(Slice.id.in_(slice_ids_qry)),
), Dash.id.in_(owner_ids_qry)),
)
return query
class DatabaseView(SupersetModelView, DeleteMixin, YamlExportMixin): # noqa
datamodel = SQLAInterface(models.Database)
list_title = _('List Databases')
show_title = _('Show Database')
add_title = _('Add Database')
edit_title = _('Edit Database')
list_columns = [
'database_name', 'backend', 'allow_run_sync', 'allow_run_async',
'allow_dml', 'allow_csv_upload', 'creator', 'modified']
order_columns = [
'database_name', 'allow_run_sync', 'allow_run_async', 'allow_dml',
'modified', 'allow_csv_upload',
]
add_columns = [
'database_name', 'sqlalchemy_uri', 'cache_timeout', 'expose_in_sqllab',
'allow_run_sync', 'allow_run_async', 'allow_csv_upload',
'allow_ctas', 'allow_dml', 'force_ctas_schema', 'impersonate_user',
'allow_multi_schema_metadata_fetch', 'extra',
]
search_exclude_columns = (
'password', 'tables', 'created_by', 'changed_by', 'queries',
'saved_queries')
edit_columns = add_columns
show_columns = [
'tables',
'cache_timeout',
'extra',
'database_name',
'sqlalchemy_uri',
'perm',
'created_by',
'created_on',
'changed_by',
'changed_on',
]
add_template = 'superset/models/database/add.html'
edit_template = 'superset/models/database/edit.html'
base_order = ('changed_on', 'desc')
description_columns = {
'sqlalchemy_uri': utils.markdown(
'Refer to the '
'[SqlAlchemy docs]'
'(http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#'
'database-urls) '
'for more information on how to structure your URI.', True),
'expose_in_sqllab': _('Expose this DB in SQL Lab'),
'allow_run_sync': _(
'Allow users to run synchronous queries, this is the default '
'and should work well for queries that can be executed '
'within a web request scope (<~1 minute)'),
'allow_run_async': _(
'Allow users to run queries, against an async backend. '
'This assumes that you have a Celery worker setup as well '
'as a results backend.'),
'allow_ctas': _('Allow CREATE TABLE AS option in SQL Lab'),
'allow_dml': _(
'Allow users to run non-SELECT statements '
'(UPDATE, DELETE, CREATE, ...) '
'in SQL Lab'),
'force_ctas_schema': _(
'When allowing CREATE TABLE AS option in SQL Lab, '
'this option forces the table to be created in this schema'),
'extra': utils.markdown(
'JSON string containing extra configuration elements.<br/>'
'1. The ``engine_params`` object gets unpacked into the '
'[sqlalchemy.create_engine]'
'(http://docs.sqlalchemy.org/en/latest/core/engines.html#'
'sqlalchemy.create_engine) call, while the ``metadata_params`` '
'gets unpacked into the [sqlalchemy.MetaData]'
'(http://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html'
'#sqlalchemy.schema.MetaData) call.<br/>'
'2. The ``metadata_cache_timeout`` is a cache timeout setting '
'in seconds for metadata fetch of this database. Specify it as '
'**"metadata_cache_timeout": {"schema_cache_timeout": 600}**. '
'If unset, cache will not be enabled for the functionality. '
'A timeout of 0 indicates that the cache never expires.<br/>'
'3. The ``schemas_allowed_for_csv_upload`` is a comma separated list '
'of schemas that CSVs are allowed to upload to. '
'Specify it as **"schemas_allowed": ["public", "csv_upload"]**. '
'If database flavor does not support schema or any schema is allowed '
'to be accessed, just leave the list empty', True),
'impersonate_user': _(
'If Presto, all the queries in SQL Lab are going to be executed as the '
'currently logged on user who must have permission to run them.<br/>'
'If Hive and hive.server2.enable.doAs is enabled, will run the queries as '
'service account, but impersonate the currently logged on user '
'via hive.server2.proxy.user property.'),
'allow_multi_schema_metadata_fetch': _(
'Allow SQL Lab to fetch a list of all tables and all views across '
'all database schemas. For large data warehouse with thousands of '
'tables, this can be expensive and put strain on the system.'),
'cache_timeout': _(
'Duration (in seconds) of the caching timeout for charts of this database. '
'A timeout of 0 indicates that the cache never expires. '
'Note this defaults to the global timeout if undefined.'),
'allow_csv_upload': _(
'If selected, please set the schemas allowed for csv upload in Extra.'),
}
label_columns = {
'expose_in_sqllab': _('Expose in SQL Lab'),
'allow_ctas': _('Allow CREATE TABLE AS'),
'allow_dml': _('Allow DML'),
'force_ctas_schema': _('CTAS Schema'),
'database_name': _('Database'),
'creator': _('Creator'),
'changed_on_': _('Last Changed'),
'sqlalchemy_uri': _('SQLAlchemy URI'),
'cache_timeout': _('Chart Cache Timeout'),
'extra': _('Extra'),
'allow_run_sync': _('Allow Run Sync'),
'allow_run_async': _('Allow Run Async'),
'impersonate_user': _('Impersonate the logged on user'),
'allow_csv_upload': _('Allow Csv Upload'),
'modified': _('Modified'),
'allow_multi_schema_metadata_fetch': _('Allow Multi Schema Metadata Fetch'),
'backend': _('Backend'),
}
def pre_add(self, db):
self.check_extra(db)
db.set_sqlalchemy_uri(db.sqlalchemy_uri)
security_manager.merge_perm('database_access', db.perm)
# adding a new database we always want to force refresh schema list
for schema in db.all_schema_names(force_refresh=True):
security_manager.merge_perm(
'schema_access', security_manager.get_schema_perm(db, schema))
def pre_update(self, db):
self.pre_add(db)
def pre_delete(self, obj):
if obj.tables:
raise SupersetException(Markup(
'Cannot delete a database that has tables attached. '
"Here's the list of associated tables: " +
', '.join('{}'.format(o) for o in obj.tables)))
def _delete(self, pk):
DeleteMixin._delete(self, pk)
def check_extra(self, db):
# this will check whether json.loads(extra) can succeed
try:
extra = db.get_extra()
except Exception as e:
raise Exception('Extra field cannot be decoded by JSON. {}'.format(str(e)))
# this will check whether 'metadata_params' is configured correctly
metadata_signature = inspect.signature(MetaData)
for key in extra.get('metadata_params', {}):
if key not in metadata_signature.parameters:
raise Exception('The metadata_params in Extra field '
'is not configured correctly. The key '
'{} is invalid.'.format(key))
appbuilder.add_link(
'Import Dashboards',
label=__('Import Dashboards'),
href='/superset/import_dashboards',
icon='fa-cloud-upload',
category='Manage',
category_label=__('Manage'),
category_icon='fa-wrench')
appbuilder.add_view(
DatabaseView,
'Databases',
label=__('Databases'),
icon='fa-database',
category='Sources',
category_label=__('Sources'),
category_icon='fa-database')
class DatabaseAsync(DatabaseView):
list_columns = [
'id', 'database_name',
'expose_in_sqllab', 'allow_ctas', 'force_ctas_schema',
'allow_run_async', 'allow_run_sync', 'allow_dml',
'allow_multi_schema_metadata_fetch', 'allow_csv_upload',
'allows_subquery',
]
appbuilder.add_view_no_menu(DatabaseAsync)
class CsvToDatabaseView(SimpleFormView):
form = CsvToDatabaseForm
form_template = 'superset/form_view/csv_to_database_view/edit.html'
form_title = _('CSV to Database configuration')
add_columns = ['database', 'schema', 'table_name']
def form_get(self, form):
form.sep.data = ','
form.header.data = 0
form.mangle_dupe_cols.data = True
form.skipinitialspace.data = False
form.skip_blank_lines.data = True
form.infer_datetime_format.data = True
form.decimal.data = '.'
form.if_exists.data = 'fail'
def form_post(self, form):
database = form.con.data
schema_name = form.schema.data or ''
if not self.is_schema_allowed(database, schema_name):
message = _('Database "{0}" Schema "{1}" is not allowed for csv uploads. '
'Please contact Superset Admin'.format(database.database_name,
schema_name))
flash(message, 'danger')
return redirect('/csvtodatabaseview/form')
csv_file = form.csv_file.data
form.csv_file.data.filename = secure_filename(form.csv_file.data.filename)
csv_filename = form.csv_file.data.filename
path = os.path.join(config['UPLOAD_FOLDER'], csv_filename)
try:
utils.ensure_path_exists(config['UPLOAD_FOLDER'])
csv_file.save(path)
if csv_filename.lower().endswith("csv"):
table = SqlaTable(table_name=form.name.data)
table.database = form.data.get('con')
table.database_id = table.database.id
table.database.db_engine_spec.create_table_from_csv(form, table)
elif csv_filename.lower().endswith("xls") or csv_filename.lower().endswith("xlsx"):
table = SqlaTable(table_name=form.name.data)
table.database = form.data.get('con')
table.database_id = table.database.id
table.database.db_engine_spec.create_table_from_excel(form, path)
except Exception as e:
try:
os.remove(path)
except OSError:
pass
message = 'Table name {} already exists. Please pick another'.format(
form.name.data) if isinstance(e, IntegrityError) else e
flash(
message,
'danger')
return redirect('/csvtodatabaseview/form')
os.remove(path)
# Go back to welcome page / splash screen
db_name = table.database.database_name
message = _('CSV file "{0}" uploaded to table "{1}" in '
'database "{2}"'.format(csv_filename,
form.name.data,
db_name))
flash(message, 'info')
return redirect('/tablemodelview/list/')
def is_schema_allowed(self, database, schema):
if not database.allow_csv_upload:
return False
schemas = database.get_schema_access_for_csv_upload()
if schemas:
return schema in schemas
return (security_manager.database_access(database) or
security_manager.all_datasource_access())
appbuilder.add_view_no_menu(CsvToDatabaseView)
class DatabaseTablesAsync(DatabaseView):
list_columns = ['id', 'all_table_names', 'all_schema_names']
appbuilder.add_view_no_menu(DatabaseTablesAsync)
if config.get('ENABLE_ACCESS_REQUEST'):
class AccessRequestsModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(DAR)
list_columns = [
'username', 'user_roles', 'datasource_link',
'roles_with_datasource', 'created_on']
order_columns = ['created_on']
base_order = ('changed_on', 'desc')
label_columns = {
'username': _('User'),
'user_roles': _('User Roles'),
'database': _('Database URL'),
'datasource_link': _('Datasource'),
'roles_with_datasource': _('Roles to grant'),
'created_on': _('Created On'),
}
appbuilder.add_view(
AccessRequestsModelView,
'Access requests',
label=__('Access requests'),
category='Security',
category_label=__('Security'),
icon='fa-table')
class SliceModelView(SupersetModelView, DeleteMixin): # noqa
route_base = '/chart'
datamodel = SQLAInterface(models.Slice)
list_title = _('List Charts')
show_title = _('Show Chart')
add_title = _('Add Chart')
edit_title = _('Edit Chart')
can_add = False
label_columns = {
'datasource_link': _('Datasource'),
}
search_columns = (
'slice_name', 'description', 'viz_type', 'datasource_name', 'owners',
)
list_columns = [
'slice_link', 'viz_type', 'datasource_link', 'creator', 'modified']
order_columns = ['viz_type', 'datasource_link', 'modified']
edit_columns = [
'slice_name', 'description', 'viz_type', 'owners', 'dashboards',
'params', 'cache_timeout']
base_order = ('changed_on', 'desc')
description_columns = {
'description': Markup(
'The content here can be displayed as widget headers in the '
'dashboard view. Supports '
'<a href="https://daringfireball.net/projects/markdown/"">'
'markdown</a>'),
'params': _(
'These parameters are generated dynamically when clicking '
'the save or overwrite button in the explore view. This JSON '
'object is exposed here for reference and for power users who may '
'want to alter specific parameters.',
),
'cache_timeout': _(
'Duration (in seconds) of the caching timeout for this chart. '
'Note this defaults to the datasource/table timeout if undefined.'),
}
base_filters = [['id', SliceFilter, lambda: []]]
label_columns = {
'cache_timeout': _('Cache Timeout'),
'creator': _('Creator'),
'dashboards': _('Dashboards'),
'datasource_link': _('Datasource'),
'description': _('Description'),
'modified': _('Last Modified'),
'owners': _('Owners'),
'params': _('Parameters'),
'slice_link': _('Chart'),
'slice_name': _('Name'),
'table': _('Table'),
'viz_type': _('Visualization Type'),
}
def pre_add(self, obj):
utils.validate_json(obj.params)
def pre_update(self, obj):
utils.validate_json(obj.params)
check_ownership(obj)
def pre_delete(self, obj):
check_ownership(obj)
@expose('/add', methods=['GET', 'POST'])
@has_access
def add(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [
{'value': str(d.id) + '__' + d.type, 'label': repr(d)}
for d in datasources
]
return self.render_template(
'superset/add_slice.html',
bootstrap_data=json.dumps({
'datasources': sorted(datasources, key=lambda d: d['label']),
}),
)
appbuilder.add_view(
SliceModelView,
'Charts',
label=__('Charts'),
icon='fa-bar-chart',
category='',
category_icon='')
class SliceAsync(SliceModelView): # noqa
route_base = '/sliceasync'
list_columns = [
'id', 'slice_link', 'viz_type', 'slice_name',
'creator', 'modified', 'icons']
label_columns = {
'icons': ' ',
'slice_link': _('Chart'),
}
appbuilder.add_view_no_menu(SliceAsync)
class SliceAddView(SliceModelView): # noqa
route_base = '/sliceaddview'
list_columns = [
'id', 'slice_name', 'slice_url', 'edit_url', 'viz_type', 'params',
'description', 'description_markeddown', 'datasource_id', 'datasource_type',
'datasource_name_text', 'datasource_link',
'owners', 'modified', 'changed_on']
appbuilder.add_view_no_menu(SliceAddView)
class DashboardModelView(SupersetModelView, DeleteMixin): # noqa
route_base = '/dashboard'
datamodel = SQLAInterface(models.Dashboard)
list_title = _('List Dashboards')
show_title = _('Show Dashboard')
add_title = _('Add Dashboard')
edit_title = _('Edit Dashboard')
list_columns = ['dashboard_link', 'creator', 'modified']
order_columns = ['modified']
edit_columns = [
'dashboard_title', 'slug', 'owners', 'position_json', 'css',
'json_metadata']
show_columns = edit_columns + ['table_names', 'slices']
search_columns = ('dashboard_title', 'slug', 'owners')
add_columns = edit_columns
base_order = ('changed_on', 'desc')
description_columns = {
'position_json': _(
'This json object describes the positioning of the widgets in '
'the dashboard. It is dynamically generated when adjusting '
'the widgets size and positions by using drag & drop in '
'the dashboard view'),
'css': _(
'The css for individual dashboards can be altered here, or '
'in the dashboard view where changes are immediately '
'visible'),
'slug': _('To get a readable URL for your dashboard'),
'json_metadata': _(
'This JSON object is generated dynamically when clicking '
'the save or overwrite button in the dashboard view. It '
'is exposed here for reference and for power users who may '
'want to alter specific parameters.'),
'owners': _('Owners is a list of users who can alter the dashboard.'),
}
base_filters = [['slice', DashboardFilter, lambda: []]]
label_columns = {
'dashboard_link': _('Dashboard'),
'dashboard_title': _('Title'),
'slug': _('Slug'),
'slices': _('Charts'),
'owners': _('Owners'),
'creator': _('Creator'),
'modified': _('Modified'),
'position_json': _('Position JSON'),
'css': _('CSS'),
'json_metadata': _('JSON Metadata'),
'table_names': _('Underlying Tables'),
}
def pre_add(self, obj):
obj.slug = obj.slug.strip() or None
if obj.slug:
obj.slug = obj.slug.replace(' ', '-')
obj.slug = re.sub(r'[^\w\-]+', '', obj.slug)
if g.user not in obj.owners:
obj.owners.append(g.user)
utils.validate_json(obj.json_metadata)
utils.validate_json(obj.position_json)
owners = [o for o in obj.owners]
for slc in obj.slices:
slc.owners = list(set(owners) | set(slc.owners))
def pre_update(self, obj):
check_ownership(obj)
self.pre_add(obj)
def pre_delete(self, obj):
check_ownership(obj)
@action('mulexport', __('Export'), __('Export dashboards?'), 'fa-database')
def mulexport(self, items):
if not isinstance(items, list):
items = [items]
ids = ''.join('&id={}'.format(d.id) for d in items)
return redirect(
'/dashboard/export_dashboards_form?{}'.format(ids[1:]))
@expose('/export_dashboards_form')
def download_dashboards(self):
if request.args.get('action') == 'go':
ids = request.args.getlist('id')
return Response(
models.Dashboard.export_dashboards(ids),
headers=generate_download_headers('json'),
mimetype='application/text')
return self.render_template(
'superset/export_dashboards.html',
dashboards_url='/dashboard/list',
)
appbuilder.add_view(
DashboardModelView,
'Dashboards',
label=__('Dashboards'),
icon='fa-dashboard',
category='',
category_icon='')
class DashboardModelViewAsync(DashboardModelView): # noqa
route_base = '/dashboardasync'
list_columns = [
'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',
'changed_on', 'url', 'changed_by_name',
]
label_columns = {
'dashboard_link': _('Dashboard'),
'dashboard_title': _('Title'),
'creator': _('Creator'),
'modified': _('Modified'),
}
appbuilder.add_view_no_menu(DashboardModelViewAsync)
class DashboardAddView(DashboardModelView): # noqa
route_base = '/dashboardaddview'
list_columns = [
'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',
'changed_on', 'url', 'changed_by_name',
]
show_columns = list(set(DashboardModelView.edit_columns + list_columns))
appbuilder.add_view_no_menu(DashboardAddView)
class LogModelView(SupersetModelView):
datamodel = SQLAInterface(models.Log)
list_title = _('List Log')
show_title = _('Show Log')
add_title = _('Add Log')
edit_title = _('Edit Log')
list_columns = ('user', 'action', 'local_dttm')
edit_columns = ('user', 'action', 'dttm', 'json')
base_order = ('dttm', 'desc')
label_columns = {
'user': _('User'),
'action': _('Action'),
'local_dttm': _('Time'),
'json': _('JSON'),
}
appbuilder.add_view(
LogModelView,
'Action Log',
label=__('Action Log'),
category='Security',
category_label=__('Security'),
icon='fa-list-ol')
@app.route('/health')
def health():
return 'OK'
@app.route('/healthcheck')
def healthcheck():
return 'OK'
@app.route('/ping')
def ping():
return 'OK'
@csrf.exempt
@app.route('/add_user_from_dbp', methods=['POST'])
def add_user_from_dbp():
raw_user_info = request.data
user_info = json.loads(raw_user_info, encoding='utf-8')
try:
username = user_info.get('username', None)
first_name = user_info.get('first_name', None)
last_name = user_info.get('last_name', None)
email = user_info.get('email', None)
password = user_info.get('password', None)
user_role = user_info.get('role', config.get('CUSTOM_ROLE_NAME_KEYWORD'))
if not username and not email:
return json_error_response(
'username and email are missing.')
user = security_manager.find_user(username, email)
if user:
return json_error_response(
'User with name(%s) or email(%s) exist.' % (username, email))
role = security_manager.find_role(user_role)
if not role:
return json_error_response(
'Role with name(%s) not exist.' % (user_role,))
user = security_manager.add_user(username=username, first_name=first_name, last_name=last_name, email=email,
role=role, password=password)
resp = json_success(json.dumps(
{'user_id': user.id}, default=utils.json_int_dttm_ser,
ignore_nan=True), status=200)
return resp
except Exception:
return json_error_response(
'Error in call add_user_from_dbp.'
'The error message returned was:\n{}').format(traceback.format_exc())
class KV(BaseSupersetView):
"""Used for storing and retrieving key value pairs"""
@log_this
@expose('/store/', methods=['POST'])
def store(self):
try:
value = request.form.get('data')
obj = models.KeyValue(value=value)
db.session.add(obj)
db.session.commit()
except Exception as e:
return json_error_response(e)
return Response(
json.dumps({'id': obj.id}),
status=200)
@log_this
@expose('/<key_id>/', methods=['GET'])
def get_value(self, key_id):
kv = None
try:
kv = db.session.query(models.KeyValue).filter_by(id=key_id).one()
except Exception as e:
return json_error_response(e)
return Response(kv.value, status=200)
appbuilder.add_view_no_menu(KV)
class R(BaseSupersetView):
"""used for short urls"""
@log_this
@expose('/<url_id>')
def index(self, url_id):
url = db.session.query(models.Url).filter_by(id=url_id).first()
if url:
return redirect('/' + url.url)
else:
flash('URL to nowhere...', 'danger')
return redirect('/')
@log_this
@expose('/shortner/', methods=['POST', 'GET'])
def shortner(self):
url = request.form.get('data')
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return Response(
'{scheme}://{request.headers[Host]}/r/{obj.id}'.format(
scheme=request.scheme, request=request, obj=obj),
mimetype='text/plain')
@expose('/msg/')
def msg(self):
"""Redirects to specified url while flash a message"""
flash(Markup(request.args.get('msg')), 'info')
return redirect(request.args.get('url'))
appbuilder.add_view_no_menu(R)
class Superset(BaseSupersetView):
"""The base views for Superset!"""
@has_access_api
@expose('/datasources/')
def datasources(self):
datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [o.short_data for o in datasources]
datasources = sorted(datasources, key=lambda o: o['name'])
return self.json_response(datasources)
@has_access_api
@expose('/override_role_permissions/', methods=['POST'])
def override_role_permissions(self):
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data['role_name']
databases = data['database']
db_ds_names = set()
for dbs in databases:
for schema in dbs['schema']:
for ds_name in schema['datasources']:
fullname = utils.get_datasource_full_name(
dbs['name'], ds_name, schema=schema['name'])
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [
d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm,
permission_name='datasource_access')
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response({
'granted': granted_perms,
'requested': list(db_ds_names),
}, status=201)
@log_this
@has_access
@expose('/request_access/')
def request_access(self):
datasources = set()
dashboard_id = request.args.get('dashboard_id')
if dashboard_id:
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(dashboard_id))
.one()
)
datasources |= dash.datasources
datasource_id = request.args.get('datasource_id')
datasource_type = request.args.get('datasource_type')
if datasource_id:
ds_class = ConnectorRegistry.sources.get(datasource_type)
datasource = (
db.session.query(ds_class)
.filter_by(id=int(datasource_id))
.one()
)
datasources.add(datasource)
has_access = all(
(
datasource and security_manager.datasource_access(datasource)
for datasource in datasources
))
if has_access:
return redirect('/superset/dashboard/{}'.format(dashboard_id))
if request.args.get('action') == 'go':
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id,
datasource_type=datasource.type)
db.session.add(access_request)
db.session.commit()
flash(__('Access was requested'), 'info')
return redirect('/')
return self.render_template(
'superset/request_access.html',
datasources=datasources,
datasource_names=', '.join([o.name for o in datasources]),
)
@log_this
@has_access
@expose('/approve')
def approve(self):
def clean_fulfilled_requests(session):
for r in session.query(DAR).all():
datasource = ConnectorRegistry.get_datasource(
r.datasource_type, r.datasource_id, session)
user = security_manager.get_user_by_id(r.created_by_fk)
if not datasource or \
security_manager.datasource_access(datasource, user):
# datasource does not exist anymore
session.delete(r)
session.commit()
datasource_type = request.args.get('datasource_type')
datasource_id = request.args.get('datasource_id')
created_by_username = request.args.get('created_by')
role_to_grant = request.args.get('role_to_grant')
role_to_extend = request.args.get('role_to_extend')
session = db.session
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, session)
if not datasource:
flash(DATASOURCE_MISSING_ERR, 'alert')
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, 'alert')
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter(
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id)
.all()
)
if not requests:
flash(ACCESS_REQUEST_MISSING_ERR, 'alert')
return json_error_response(ACCESS_REQUEST_MISSING_ERR)
# check if you can approve
if security_manager.all_datasource_access() or g.user.id == datasource.owner_id:
# can by done by admin only
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
'%(user)s was granted the role %(role)s that gives access '
'to the %(datasource)s',
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name)
utils.notify_user_about_perm_udate(
g.user, requested_by, role, datasource,
'email/role_granted.txt', app.config)
flash(msg, 'info')
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
'email/datasource_access', datasource.perm)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __('Role %(r)s was extended to provide the access to '
'the datasource %(ds)s', r=role_to_extend,
ds=datasource.full_name)
utils.notify_user_about_perm_udate(
g.user, requested_by, role, datasource,
'email/role_extended.txt', app.config)
flash(msg, 'info')
clean_fulfilled_requests(session)
else:
flash(__('You have no permission to approve this request'),
'danger')
return redirect('/accessrequestsmodelview/list/')
for r in requests:
session.delete(r)
session.commit()
return redirect('/accessrequestsmodelview/list/')
def get_form_data(self, slice_id=None, use_slice_data=False):
form_data = {}
post_data = request.form.get('form_data')
request_args_data = request.args.get('form_data')
# Supporting POST
if post_data:
form_data.update(json.loads(post_data))
# request params can overwrite post body
if request_args_data:
form_data.update(json.loads(request_args_data))
url_id = request.args.get('r')
if url_id:
saved_url = db.session.query(models.Url).filter_by(id=url_id).first()
if saved_url:
url_str = parse.unquote_plus(
saved_url.url.split('?')[1][10:], encoding='utf-8', errors=None)
url_form_data = json.loads(url_str)
# allow form_date in request override saved url
url_form_data.update(form_data)
form_data = url_form_data
if request.args.get('viz_type'):
# Converting old URLs
form_data = cast_form_data(form_data)
form_data = {
k: v
for k, v in form_data.items()
if k not in FORM_DATA_KEY_BLACKLIST
}
# When a slice_id is present, load from DB and override
# the form_data from the DB with the other form_data provided
slice_id = form_data.get('slice_id') or slice_id
slc = None
# Check if form data only contains slice_id
contains_only_slc_id = not any(key != 'slice_id' for key in form_data)
# Include the slice_form_data if request from explore or slice calls
# or if form_data only contains slice_id
if slice_id and (use_slice_data or contains_only_slc_id):
slc = db.session.query(models.Slice).filter_by(id=slice_id).first()
slice_form_data = slc.form_data.copy()
# allow form_data in request override slice from_data
slice_form_data.update(form_data)
form_data = slice_form_data
update_time_range(form_data)
return form_data, slc
def get_viz(
self,
slice_id=None,
form_data=None,
datasource_type=None,
datasource_id=None,
force=False,
):
if slice_id:
slc = (
db.session.query(models.Slice)
.filter_by(id=slice_id)
.one()
)
return slc.get_viz()
else:
viz_type = form_data.get('viz_type', 'table')
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
viz_obj = viz.viz_types[viz_type](
datasource,
form_data=form_data,
force=force,
)
return viz_obj
@has_access
@expose('/slice/<slice_id>/')
def slice(self, slice_id):
form_data, slc = self.get_form_data(slice_id, use_slice_data=True)
endpoint = '/superset/explore/?form_data={}'.format(
parse.quote(json.dumps(form_data)),
)
if request.args.get('standalone') == 'true':
endpoint += '&standalone=true'
return redirect(endpoint)
def get_query_string_response(self, viz_obj):
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as e:
logging.exception(e)
return json_error_response(e)
if query_obj and query_obj['prequeries']:
query_obj['prequeries'].append(query)
query = ';\n\n'.join(query_obj['prequeries'])
if query:
query += ';'
else:
query = 'No query.'
return self.json_response({
'query': query,
'language': viz_obj.datasource.query_language,
})
def get_raw_results(self, viz_obj):
return self.json_response({
'data': viz_obj.get_df().to_dict('records'),
})
def get_samples(self, viz_obj):
return self.json_response({
'data': viz_obj.get_samples(),
})
def generate_json(
self, datasource_type, datasource_id, form_data,
csv=False, query=False, force=False, results=False,
samples=False,
):
try:
viz_obj = self.get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
except Exception as e:
logging.exception(e)
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
if not security_manager.datasource_access(viz_obj.datasource, g.user):
return json_error_response(
security_manager.get_datasource_access_error_msg(viz_obj.datasource),
status=404,
link=security_manager.get_datasource_access_link(viz_obj.datasource))
if csv:
return CsvResponse(
viz_obj.get_csv(),
status=200,
headers=generate_download_headers('csv'),
mimetype='application/csv')
if query:
return self.get_query_string_response(viz_obj)
if results:
return self.get_raw_results(viz_obj)
if samples:
return self.get_samples(viz_obj)
try:
payload = viz_obj.get_payload()
except SupersetException as se:
logging.exception(se)
return json_error_response(utils.error_msg_from_exception(se),
status=se.status)
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e))
status = 200
if (
payload.get('status') == QueryStatus.FAILED or
payload.get('error') is not None
):
status = 400
return json_success(viz_obj.json_dumps(payload), status=status)
@log_this
@has_access_api
@expose('/slice_json/<slice_id>')
def slice_json(self, slice_id):
try:
form_data, slc = self.get_form_data(slice_id, use_slice_data=True)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
except Exception as e:
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return self.generate_json(datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data)
@log_this
@has_access_api
@expose('/annotation_json/<layer_id>')
def annotation_json(self, layer_id):
form_data = self.get_form_data()[0]
form_data['layer_id'] = layer_id
form_data['filters'] = [{'col': 'layer_id',
'op': '==',
'val': layer_id}]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types['table'](
datasource,
form_data=form_data,
force=False,
)
try:
payload = viz_obj.get_payload()
except Exception as e:
logging.exception(e)
return json_error_response(utils.error_msg_from_exception(e))
status = 200
if payload.get('status') == QueryStatus.FAILED:
status = 400
return json_success(viz_obj.json_dumps(payload), status=status)
@log_this
@has_access_api
@expose('/explore_json/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])
@expose('/explore_json/', methods=['GET', 'POST'])
def explore_json(self, datasource_type=None, datasource_id=None):
"""Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape"""
csv = request.args.get('csv') == 'true'
query = request.args.get('query') == 'true'
results = request.args.get('results') == 'true'
samples = request.args.get('samples') == 'true'
force = request.args.get('force') == 'true'
try:
form_data = self.get_form_data()[0]
datasource_id, datasource_type = self.datasource_info(
datasource_id, datasource_type, form_data)
except Exception as e:
logging.exception(e)
return json_error_response(
utils.error_msg_from_exception(e),
stacktrace=traceback.format_exc())
return self.generate_json(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
csv=csv,
query=query,
results=results,
force=force,
samples=samples,
)
@log_this
@has_access
@expose('/import_dashboards', methods=['GET', 'POST'])
def import_dashboards(self):
"""Overrides the dashboards using json instances from the file."""
f = request.files.get('file')
if request.method == 'POST' and f:
dashboard_import_export_util.import_dashboards(db.session, f.stream)
return redirect('/dashboard/list/')
return self.render_template('superset/import_dashboards.html')
@log_this
@has_access
@expose('/explorev2/<datasource_type>/<datasource_id>/')
def explorev2(self, datasource_type, datasource_id):
"""Deprecated endpoint, here for backward compatibility of urls"""
return redirect(url_for(
'Superset.explore',
datasource_type=datasource_type,
datasource_id=datasource_id,
**request.args))
@staticmethod
def datasource_info(datasource_id, datasource_type, form_data):
"""Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code"""
datasource = form_data.get('datasource', '')
if '__' in datasource:
datasource_id, datasource_type = datasource.split('__')
# The case where the datasource has been deleted
datasource_id = None if datasource_id == 'None' else datasource_id
if not datasource_id:
raise Exception(
'The datasource associated with this chart no longer exists')
datasource_id = int(datasource_id)
return datasource_id, datasource_type
@log_this
@has_access
@expose('/explore/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])
@expose('/explore/', methods=['GET', 'POST'])
def explore(self, datasource_type=None, datasource_id=None):
user_id = g.user.get_id() if g.user else None
form_data, slc = self.get_form_data(use_slice_data=True)
datasource_id, datasource_type = self.datasource_info(
datasource_id, datasource_type, form_data)
error_redirect = '/chart/list/'
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
flash(DATASOURCE_MISSING_ERR, 'danger')
return redirect(error_redirect)
if config.get('ENABLE_ACCESS_REQUEST') and (
not security_manager.datasource_access(datasource)
):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
'danger')
return redirect(
'superset/request_access/?'
'datasource_type={datasource_type}&'
'datasource_id={datasource_id}&'
''.format(**locals()))
viz_type = form_data.get('viz_type')
if not viz_type and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
# slc perms
slice_add_perm = security_manager.can_access('can_add', 'SliceModelView')
slice_overwrite_perm = is_owner(slc, g.user)
slice_download_perm = security_manager.can_access(
'can_download', 'SliceModelView')
form_data['datasource'] = str(datasource_id) + '__' + datasource_type
# On explore, merge legacy and extra filters into the form data
utils.convert_legacy_filters_into_adhoc(form_data)
merge_extra_filters(form_data)
# merge request url params
if request.method == 'GET':
merge_request_params(form_data, request.args)
# handle save or overwrite
action = request.args.get('action')
if action == 'overwrite' and not slice_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') + _('chart'),
status=400)
if action == 'saveas' and not slice_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('chart'),
status=400)
if action in ('saveas', 'overwrite'):
return self.save_or_overwrite_slice(
request.args,
slc, slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource_id,
datasource_type,
datasource.name)
standalone = request.args.get('standalone') == 'true'
bootstrap_data = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': slice_overwrite_perm,
'datasource': datasource.data,
'form_data': form_data,
'datasource_id': datasource_id,
'datasource_type': datasource_type,
'slice': slc.data if slc else None,
'standalone': standalone,
'user_id': user_id,
'user_name': g.user.username,
'forced_height': request.args.get('height'),
'common': self.common_bootsrap_payload(),
}
table_name = datasource.table_name \
if datasource_type == 'table' \
else datasource.datasource_name
if slc:
title = slc.slice_name
else:
title = _('Explore - %(table)s', table=table_name)
return self.render_template(
'superset/basic.html',
bootstrap_data=json.dumps(bootstrap_data),
entry='explore',
title=title,
standalone_mode=standalone)
@api
@has_access_api
@expose('/filter/<datasource_type>/<datasource_id>/<column>/')
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
if not security_manager.datasource_access(datasource):
return json_error_response(
security_manager.get_datasource_access_error_msg(datasource))
payload = json.dumps(
datasource.values_for_column(
column,
config.get('FILTER_SELECT_ROW_LIMIT', 10000),
),
default=utils.json_int_dttm_ser)
return json_success(payload)
def save_or_overwrite_slice(
self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,
datasource_id, datasource_type, datasource_name):
"""Save or overwrite a slice"""
slice_name = args.get('slice_name')
action = args.get('action')
form_data, _ = self.get_form_data()
if action in ('saveas'):
if 'slice_id' in form_data:
form_data.pop('slice_id') # don't save old slice_id
slc = models.Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data)
slc.datasource_name = datasource_name
slc.viz_type = form_data['viz_type']
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ('saveas') and slice_add_perm:
self.save_slice(slc)
elif action == 'overwrite' and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get('add_to_dash') == 'existing':
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(request.args.get('save_to_dashboard_id')))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') +
_('dashboard'),
status=400)
flash(
'Slice [{}] was added to dashboard [{}]'.format(
slc.slice_name,
dash.dashboard_title),
'info')
elif request.args.get('add_to_dash') == 'new':
# check create dashboard permissions
dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')
if not dash_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('dashboard'),
status=400)
dash = models.Dashboard(
dashboard_title=request.args.get('new_dashboard_name'),
owners=[g.user] if g.user else [])
flash(
'Dashboard [{}] just got created and slice [{}] was added '
'to it'.format(
dash.dashboard_title,
slc.slice_name),
'info')
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': is_owner(slc, g.user),
'form_data': slc.form_data,
'slice': slc.data,
}
if request.args.get('goto_dash') == 'true':
response.update({'dashboard': dash.url})
return json_success(json.dumps(response))
def save_slice(self, slc):
session = db.session()
msg = _('Chart [{}] has been saved').format(slc.slice_name)
session.add(slc)
session.commit()
flash(msg, 'info')
def overwrite_slice(self, slc):
session = db.session()
session.merge(slc)
session.commit()
msg = _('Chart [{}] has been overwritten').format(slc.slice_name)
flash(msg, 'info')
@api
@has_access_api
@expose('/checkbox/<model_view>/<id_>/<attr>/<value>', methods=['GET'])
def checkbox(self, model_view, id_, attr, value):
"""endpoint for checking/unchecking any boolean in a sqla model"""
modelview_to_model = {
'{}ColumnInlineView'.format(name.capitalize()): source.column_class
for name, source in ConnectorRegistry.sources.items()
}
model = modelview_to_model[model_view]
col = db.session.query(model).filter_by(id=id_).first()
checked = value == 'true'
if col:
setattr(col, attr, checked)
if checked:
metrics = col.get_metrics().values()
col.datasource.add_missing_metrics(metrics)
db.session.commit()
return json_success('OK')
@api
@has_access_api
@expose('/schemas/<db_id>/')
@expose('/schemas/<db_id>/<force_refresh>/')
def schemas(self, db_id, force_refresh='true'):
db_id = int(db_id)
force_refresh = force_refresh.lower() == 'true'
database = (
db.session
.query(models.Database)
.filter_by(id=db_id)
.one()
)
schemas = database.all_schema_names(force_refresh=force_refresh)
schemas = security_manager.schemas_accessible_by_user(database, schemas)
return Response(
json.dumps({'schemas': schemas}),
mimetype='application/json')
@api
@has_access_api
@expose('/tables/<db_id>/<schema>/<substr>/')
def tables(self, db_id, schema, substr):
"""Endpoint to fetch the list of tables for given database"""
db_id = int(db_id)
schema = utils.js_string_to_python(schema)
substr = utils.js_string_to_python(substr)
database = db.session.query(models.Database).filter_by(id=db_id).one()
table_names = security_manager.accessible_by_user(
database, database.all_table_names(schema), schema)
view_names = security_manager.accessible_by_user(
database, database.all_view_names(schema), schema)
if substr:
table_names = [tn for tn in table_names if substr in tn]
view_names = [vn for vn in view_names if substr in vn]
max_items = config.get('MAX_TABLE_NAMES') or len(table_names)
total_items = len(table_names) + len(view_names)
max_tables = len(table_names)
max_views = len(view_names)
if total_items and substr:
max_tables = max_items * len(table_names) // total_items
max_views = max_items * len(view_names) // total_items
table_options = [{'value': tn, 'label': tn}
for tn in table_names[:max_tables]]
table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)}
for vn in view_names[:max_views]])
payload = {
'tableLength': len(table_names) + len(view_names),
'options': table_options,
}
return json_success(json.dumps(payload))
@api
@has_access_api
@expose('/copy_dash/<dashboard_id>/', methods=['GET', 'POST'])
def copy_dash(self, dashboard_id):
"""Copy dashboard"""
session = db.session()
data = json.loads(request.form.get('data'))
dash = models.Dashboard()
original_dash = (
session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data['dashboard_title']
if data['duplicate_slices']:
# Duplicating slices as well, mapping old ids to new ones
old_to_new_sliceids = {}
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_sliceids['{}'.format(slc.id)] = \
'{}'.format(new_slice.id)
# update chartId of layout entities
# in v2_dash positions json data, chartId should be integer,
# while in older version slice_id is string type
for value in data['positions'].values():
if (
isinstance(value, dict) and value.get('meta') and
value.get('meta').get('chartId')
):
old_id = '{}'.format(value.get('meta').get('chartId'))
new_id = int(old_to_new_sliceids[old_id])
value['meta']['chartId'] = new_id
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
self._set_dash_metadata(dash, data)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@expose('/save_dash/<dashboard_id>/', methods=['GET', 'POST'])
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
dash = (session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get('data'))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return 'SUCCESS'
@staticmethod
def _set_dash_metadata(dashboard, data):
positions = data['positions']
# find slices in the position data
slice_ids = []
slice_id_to_name = {}
for value in positions.values():
if (
isinstance(value, dict) and value.get('meta') and
value.get('meta').get('chartId')
):
slice_id = value.get('meta').get('chartId')
slice_ids.append(slice_id)
slice_id_to_name[slice_id] = value.get('meta').get('sliceName')
session = db.session()
Slice = models.Slice # noqa
current_slices = session.query(Slice).filter(
Slice.id.in_(slice_ids)).all()
dashboard.slices = current_slices
# update slice names. this assumes user has permissions to update the slice
for slc in dashboard.slices:
new_name = slice_id_to_name[slc.id]
if slc.slice_name != new_name:
slc.slice_name = new_name
session.merge(slc)
session.flush()
# remove leading and trailing white spaces in the dumped json
dashboard.position_json = json.dumps(
positions, indent=None, separators=(',', ':'), sort_keys=True)
md = dashboard.params_dict
dashboard.css = data.get('css')
dashboard.dashboard_title = data['dashboard_title']
if 'filter_immune_slices' not in md:
md['filter_immune_slices'] = []
if 'timed_refresh_immune_slices' not in md:
md['timed_refresh_immune_slices'] = []
if 'filter_immune_slice_fields' not in md:
md['filter_immune_slice_fields'] = {}
md['expanded_slices'] = data['expanded_slices']
default_filters_data = json.loads(data.get('default_filters', '{}'))
applicable_filters = \
{key: v for key, v in default_filters_data.items()
if int(key) in slice_ids}
md['default_filters'] = json.dumps(applicable_filters)
dashboard.json_metadata = json.dumps(md)
@api
@has_access_api
@expose('/add_slices/<dashboard_id>/', methods=['POST'])
def add_slices(self, dashboard_id):
"""Add and save slices to a dashboard"""
data = json.loads(request.form.get('data'))
session = db.session()
Slice = models.Slice # noqa
dash = (
session.query(models.Dashboard).filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(
Slice.id.in_(data['slice_ids']))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return 'SLICES ADDED'
@api
@has_access_api
@expose('/testconn', methods=['POST', 'GET'])
def testconn(self):
"""Tests a sqla connection"""
try:
username = g.user.username if g.user is not None else None
uri = request.json.get('uri')
db_name = request.json.get('name')
impersonate_user = request.json.get('impersonate_user')
database = None
if db_name:
database = (
db.session
.query(models.Database)
.filter_by(database_name=db_name)
.first()
)
if database and uri == database.safe_sqlalchemy_uri():
# the password-masked uri was passed
# use the URI associated with this database
uri = database.sqlalchemy_uri_decrypted
configuration = {}
if database and uri:
url = make_url(uri)
db_engine = models.Database.get_db_engine_spec_for_backend(
url.get_backend_name())
db_engine.patch()
masked_url = database.get_password_masked_url_from_uri(uri)
logging.info('Superset.testconn(). Masked URL: {0}'.format(masked_url))
configuration.update(
db_engine.get_configuration_for_impersonation(uri,
impersonate_user,
username),
)
engine_params = (
request.json
.get('extras', {})
.get('engine_params', {}))
connect_args = engine_params.get('connect_args')
if configuration:
connect_args['configuration'] = configuration
engine = create_engine(uri, **engine_params)
engine.connect()
return json_success(json.dumps(engine.table_names(), indent=4))
except Exception as e:
logging.exception(e)
return json_error_response((
'Connection failed!\n\n'
'The error message returned was:\n{}').format(e))
@api
@has_access_api
@expose('/recent_activity/<user_id>/', methods=['GET'])
def recent_activity(self, user_id):
"""Recent activity (actions) for a given user"""
M = models # noqa
if request.args.get('limit'):
limit = int(request.args.get('limit'))
else:
limit = 1000
qry = (
db.session.query(M.Log, M.Dashboard, M.Slice)
.outerjoin(
M.Dashboard,
M.Dashboard.id == M.Log.dashboard_id,
)
.outerjoin(
M.Slice,
M.Slice.id == M.Log.slice_id,
)
.filter(
sqla.and_(
~M.Log.action.in_(('queries', 'shortner', 'sql_json')),
M.Log.user_id == user_id,
),
)
.order_by(M.Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
if log.Dashboard:
item_url = log.Dashboard.url
item_title = log.Dashboard.dashboard_title
elif log.Slice:
item_url = log.Slice.slice_url
item_title = log.Slice.slice_name
payload.append({
'action': log.Log.action,
'item_url': item_url,
'item_title': item_title,
'time': log.Log.dttm,
})
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/csrf_token/', methods=['GET'])
def csrf_token(self):
return Response(
self.render_template('superset/csrf_token.json'),
mimetype='text/json',
)
@api
@has_access_api
@expose('/fave_dashboards_by_username/<username>/', methods=['GET'])
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
@api
@has_access_api
@expose('/fave_dashboards/<user_id>/', methods=['GET'])
def fave_dashboards(self, user_id):
qry = (
db.session.query(
models.Dashboard,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'Dashboard',
models.Dashboard.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Dashboard.id,
'dashboard': o.Dashboard.dashboard_link(),
'title': o.Dashboard.dashboard_title,
'url': o.Dashboard.url,
'dttm': o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/created_dashboards/<user_id>/', methods=['GET'])
def created_dashboards(self, user_id):
Dash = models.Dashboard # noqa
qry = (
db.session.query(
Dash,
)
.filter(
sqla.or_(
Dash.created_by_fk == user_id,
Dash.changed_by_fk == user_id,
),
)
.order_by(
Dash.changed_on.desc(),
)
)
payload = [{
'id': o.id,
'dashboard': o.dashboard_link(),
'title': o.dashboard_title,
'url': o.url,
'dttm': o.changed_on,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/user_slices', methods=['GET'])
@expose('/user_slices/<user_id>/', methods=['GET'])
def user_slices(self, user_id=None):
"""List of slices a user created, or faved"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
FavStar = models.FavStar # noqa
qry = (
db.session.query(Slice,
FavStar.dttm).join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
isouter=True).filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
),
)
.order_by(Slice.slice_name.asc())
)
payload = [{
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'data': o.Slice.form_data,
'dttm': o.dttm if o.dttm else o.Slice.changed_on,
'viz_type': o.Slice.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/created_slices', methods=['GET'])
@expose('/created_slices/<user_id>/', methods=['GET'])
def created_slices(self, user_id=None):
"""List of slices created by this user"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
qry = (
db.session.query(Slice)
.filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
),
)
.order_by(Slice.changed_on.desc())
)
payload = [{
'id': o.id,
'title': o.slice_name,
'url': o.slice_url,
'dttm': o.changed_on,
'viz_type': o.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/fave_slices', methods=['GET'])
@expose('/fave_slices/<user_id>/', methods=['GET'])
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(
models.Slice,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'dttm': o.dttm,
'viz_type': o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@expose('/warm_up_cache/', methods=['GET'])
def warm_up_cache(self):
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
"""
slices = None
session = db.session()
slice_id = request.args.get('slice_id')
table_name = request.args.get('table_name')
db_name = request.args.get('db_name')
if not slice_id and not (table_name and db_name):
return json_error_response(__(
'Malformed request. slice_id or table_name and db_name '
'arguments are expected'), status=400)
if slice_id:
slices = session.query(models.Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(__(
'Chart %(id)s not found', id=slice_id), status=404)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources['table']
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name or
SqlaTable.table_name == table_name)
).first()
if not table:
return json_error_response(__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name, s=db_name), status=404)
slices = session.query(models.Slice).filter_by(
datasource_id=table.id,
datasource_type=table.type).all()
for slc in slices:
try:
obj = slc.get_viz(force=True)
obj.get_json()
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
return json_success(json.dumps(
[{'slice_id': slc.id, 'slice_name': slc.slice_name}
for slc in slices]))
@expose('/favstar/<class_name>/<obj_id>/<action>/')
def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
FavStar = models.FavStar # noqa
count = 0
favs = session.query(FavStar).filter_by(
class_name=class_name, obj_id=obj_id,
user_id=g.user.get_id()).all()
if action == 'select':
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
),
)
count = 1
elif action == 'unselect':
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({'count': count}))
@has_access
@expose('/dashboard/<dashboard_id>/')
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(models.Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one()
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config.get('ENABLE_ACCESS_REQUEST'):
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
'danger')
return redirect(
'superset/request_access/?'
'dashboard_id={dash.id}&'.format(**locals()))
dash_edit_perm = True
if check_dbp_user(g.user, app.config['ENABLE_DASHBOARD_SHARE_IN_CUSTOM_ROLE']):
dash_edit_perm = check_ownership(dash, raise_if_false=False) and \
security_manager.can_access('can_save_dash', 'Superset') and g.user.id == dash.created_by_fk
else:
dash_edit_perm = check_ownership(dash, raise_if_false=False) and \
security_manager.can_access('can_save_dash', 'Superset')
dash_save_perm = security_manager.can_access('can_save_dash', 'Superset')
superset_can_explore = security_manager.can_access('can_explore', 'Superset')
slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView')
standalone_mode = request.args.get('standalone') == 'true'
edit_mode = request.args.get('edit') == 'true'
# Hack to log the dashboard_id properly, even when getting a slug
@log_this
def dashboard(**kwargs): # noqa
pass
dashboard(
dashboard_id=dash.id,
dashboard_version='v2',
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode)
dashboard_data = dash.data
dashboard_data.update({
'standalone_mode': standalone_mode,
'dash_save_perm': dash_save_perm,
'dash_edit_perm': dash_edit_perm,
'superset_can_explore': superset_can_explore,
'slice_can_edit': slice_can_edit,
})
bootstrap_data = {
'user_id': g.user.get_id(),
'user_name': g.user.username,
'dashboard_data': dashboard_data,
'datasources': {ds.uid: ds.data for ds in datasources},
'common': self.common_bootsrap_payload(),
'editMode': edit_mode,
}
if request.args.get('json') == 'true':
return json_success(json.dumps(bootstrap_data))
return self.render_template(
'superset/dashboard.html',
entry='dashboard',
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(bootstrap_data),
)
@api
@log_this
@expose('/log/', methods=['POST'])
def log(self):
return Response(status=200)
@has_access
@expose('/sync_druid/', methods=['POST'])
@log_this
def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload['config']
user_name = payload['user']
cluster_name = payload['cluster']
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources['druid']
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __("Can't find User '%(name)s', please ask your admin "
'to create one.', name=user_name)
logging.error(err_msg)
return json_error_response(err_msg)
cluster = db.session.query(DruidCluster).filter_by(
cluster_name=cluster_name).first()
if not cluster:
err_msg = __("Can't find DruidCluster with cluster_name = "
"'%(name)s'", name=cluster_name)
logging.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(
druid_config, user, cluster)
except Exception as e:
logging.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
@has_access
@expose('/sqllab_viz/', methods=['POST'])
@log_this
def sqllab_viz(self):
SqlaTable = ConnectorRegistry.sources['table']
data = json.loads(request.form.get('data'))
table_name = data.get('datasourceName')
table = (
db.session.query(SqlaTable)
.filter_by(table_name=table_name)
.first()
)
if not table:
table = SqlaTable(table_name=table_name)
table.database_id = data.get('dbId')
table.schema = data.get('schema')
table.template_params = data.get('templateParams')
table.is_sqllab_view = True
q = SupersetQuery(data.get('sql'))
table.sql = q.stripped()
db.session.add(table)
cols = []
for config in data.get('columns'):
column_name = config.get('name')
SqlaTable = ConnectorRegistry.sources['table']
TableColumn = SqlaTable.column_class
SqlMetric = SqlaTable.metric_class
col = TableColumn(
column_name=column_name,
filterable=True,
groupby=True,
is_dttm=config.get('is_date', False),
type=config.get('type', False),
)
cols.append(col)
table.columns = cols
table.metrics = [
SqlMetric(metric_name='count', expression='count(*)'),
]
db.session.commit()
return self.json_response(json.dumps({
'table_id': table.id,
}))
@has_access
@expose('/table/<database_id>/<table_name>/<schema>/')
@log_this
def table(self, database_id, table_name, schema):
schema = utils.js_string_to_python(schema)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload_columns = []
indexes = []
primary_key = []
foreign_keys = []
try:
columns = mydb.get_columns(table_name, schema)
indexes = mydb.get_indexes(table_name, schema)
primary_key = mydb.get_pk_constraint(table_name, schema)
foreign_keys = mydb.get_foreign_keys(table_name, schema)
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
keys = []
if primary_key and primary_key.get('constrained_columns'):
primary_key['column_names'] = primary_key.pop('constrained_columns')
primary_key['type'] = 'pk'
keys += [primary_key]
for fk in foreign_keys:
fk['column_names'] = fk.pop('constrained_columns')
fk['type'] = 'fk'
keys += foreign_keys
for idx in indexes:
idx['type'] = 'index'
keys += indexes
for col in columns:
dtype = ''
try:
dtype = '{}'.format(col['type'])
except Exception:
# sqla.types.JSON __str__ has a bug, so using __class__.
dtype = col['type'].__class__.__name__
pass
payload_columns.append({
'name': col['name'],
'type': dtype.split('(')[0] if '(' in dtype else dtype,
'longType': dtype,
'keys': [
k for k in keys
if col['name'] in k.get('column_names')
],
})
tbl = {
'name': table_name,
'columns': payload_columns,
'selectStar': mydb.select_star(
table_name, schema=schema, show_cols=True, indent=True,
cols=columns, latest_partition=True),
'primaryKey': primary_key,
'foreignKeys': foreign_keys,
'indexes': keys,
}
return json_success(json.dumps(tbl))
@has_access
@expose('/extra_table_metadata/<database_id>/<table_name>/<schema>/')
@log_this
def extra_table_metadata(self, database_id, table_name, schema):
schema = utils.js_string_to_python(schema)
mydb = db.session.query(models.Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(
mydb, table_name, schema)
return json_success(json.dumps(payload))
@has_access
@expose('/select_star/<database_id>/<table_name>')
@expose('/select_star/<database_id>/<table_name>/<schema>')
@log_this
def select_star(self, database_id, table_name, schema=None):
mydb = db.session.query(
models.Database).filter_by(id=database_id).first()
return json_success(
mydb.select_star(
table_name,
schema,
latest_partition=True,
show_cols=True,
),
)
@expose('/theme/')
def theme(self):
return self.render_template('superset/theme.html')
@has_access_api
@expose('/cached_key/<key>/')
@log_this
def cached_key(self, key):
"""Returns a key from the cache"""
resp = cache.get(key)
if resp:
return resp
return 'nope'
@has_access_api
@expose('/cache_key_exist/<key>/')
@log_this
def cache_key_exist(self, key):
"""Returns if a key from cache exist"""
key_exist = True if cache.get(key) else False
status = 200 if key_exist else 404
return json_success(json.dumps({'key_exist': key_exist}),
status=status)
@has_access_api
@expose('/results/<key>/')
@log_this
def results(self, key):
"""Serves a key off of the results backend"""
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = utils.now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
'sqllab.query.results_backend_read',
utils.now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
'Data could not be retrieved. '
'You may want to re-run the query.',
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one()
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
return json_error_response(security_manager.get_table_access_error_msg(
'{}'.format(rejected_tables)), status=403)
payload = utils.zlib_decompress_to_string(blob)
display_limit = app.config.get('DISPLAY_MAX_ROW', None)
if display_limit:
payload_json = json.loads(payload)
payload_json['data'] = payload_json['data'][:display_limit]
return json_success(
json.dumps(
payload_json,
default=utils.json_iso_dttm_ser,
ignore_nan=True,
),
)
@has_access_api
@expose('/stop_query/', methods=['POST'])
@log_this
def stop_query(self):
client_id = request.form.get('client_id')
try:
query = (
db.session.query(Query)
.filter_by(client_id=client_id).one()
)
query.status = utils.QueryStatus.STOPPED
db.session.commit()
except Exception:
pass
return self.json_response('OK')
@has_access_api
@expose('/sql_json/', methods=['POST', 'GET'])
@log_this
def sql_json(self):
"""Runs arbitrary sql and returns and json"""
async_ = request.form.get('runAsync') == 'true'
sql = request.form.get('sql')
database_id = request.form.get('database_id')
schema = request.form.get('schema') or None
template_params = json.loads(
request.form.get('templateParams') or '{}')
session = db.session()
mydb = session.query(models.Database).filter_by(id=database_id).first()
if not mydb:
json_error_response(
'Database with id {} is missing.'.format(database_id))
rejected_tables = security_manager.rejected_datasources(sql, mydb, schema)
if rejected_tables:
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables),
link=security_manager.get_table_access_link(rejected_tables),
status=403)
session.commit()
select_as_cta = request.form.get('select_as_cta') == 'true'
tmp_table_name = request.form.get('tmp_table_name')
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = '{}.{}'.format(
mydb.force_ctas_schema,
tmp_table_name,
)
client_id = request.form.get('client_id') or utils.shortid()[:10]
query = Query(
database_id=int(database_id),
limit=mydb.db_engine_spec.get_limit_from_sql(sql),
sql=sql,
schema=schema,
select_as_cta=request.form.get('select_as_cta') == 'true',
start_time=utils.now_as_float(),
tab_name=request.form.get('tab'),
status=QueryStatus.PENDING if async_ else QueryStatus.RUNNING,
sql_editor_id=request.form.get('sql_editor_id'),
tmp_table_name=tmp_table_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
if not query_id:
raise Exception(_('Query record was not created as expected.'))
logging.info('Triggering query_id: {}'.format(query_id))
try:
template_processor = get_template_processor(
database=query.database, query=query)
rendered_query = template_processor.process_template(
query.sql,
**template_params)
except Exception as e:
return json_error_response(
'Template rendering failed: {}'.format(utils.error_msg_from_exception(e)))
# Async request.
if async_:
logging.info('Running query on a Celery worker')
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
query_id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username if g.user else None,
start_time=utils.now_as_float())
except Exception as e:
logging.exception(e)
msg = (
'Failed to start remote query on a worker. '
'Tell your administrator to verify the availability of '
'the message queue.'
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response('{}'.format(msg))
resp = json_success(json.dumps(
{'query': query.to_dict()}, default=utils.json_int_dttm_ser,
ignore_nan=True), status=202)
session.commit()
return resp
# Sync request.
try:
timeout = config.get('SQLLAB_TIMEOUT')
timeout_msg = (
'The query exceeded the {timeout} seconds '
'timeout.').format(**locals())
with utils.timeout(seconds=timeout,
error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query_id,
rendered_query,
return_results=True,
user_name=g.user.username if g.user else None)
payload = json.dumps(
data,
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except Exception as e:
logging.exception(e)
return json_error_response('{}'.format(e))
if data.get('status') == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(payload)
@has_access
@expose('/csv/<client_id>')
@log_this
def csv(self, client_id):
"""Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
.one()
)
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
flash(
security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))
return redirect('/')
blob = None
if results_backend and query.results_key:
logging.info(
'Fetching CSV from results backend '
'[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = (
'attachment; filename={}.csv'.format(unidecode(query.name)))
logging.info('Ready to return response')
return response
@has_access
@expose('/fetch_datasource_metadata')
@log_this
def fetch_datasource_metadata(self):
datasource_id, datasource_type = (
request.args.get('datasourceKey').split('__'))
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
# Check if datasource exists
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
# Check permission for datasource
if not security_manager.datasource_access(datasource):
return json_error_response(
security_manager.get_datasource_access_error_msg(datasource),
link=security_manager.get_datasource_access_link(datasource))
return json_success(json.dumps(datasource.data))
@expose('/queries/<last_updated_ms>')
def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response(
'Please login to access the queries.', status=403)
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(),
Query.changed_on >= last_updated_dt,
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
now = int(round(time.time() * 1000))
unfinished_states = [
utils.QueryStatus.PENDING,
utils.QueryStatus.RUNNING,
]
queries_to_timeout = [
client_id for client_id, query_dict in dict_queries.items()
if (
query_dict['state'] in unfinished_states and (
now - query_dict['startDttm'] >
config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000
)
)
]
if queries_to_timeout:
update(Query).where(
and_(
Query.user_id == g.user.get_id(),
Query.client_id in queries_to_timeout,
),
).values(state=utils.QueryStatus.TIMED_OUT)
for client_id in queries_to_timeout:
dict_queries[client_id]['status'] = utils.QueryStatus.TIMED_OUT
return json_success(
json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@expose('/search_queries')
@log_this
def search_queries(self):
"""Search for queries."""
query = db.session.query(Query)
search_user_id = request.args.get('user_id')
database_id = request.args.get('database_id')
search_text = request.args.get('search_text')
status = request.args.get('status')
# From and To time stamp should be Epoch timestamp in seconds
from_time = request.args.get('from')
to_time = request.args.get('to')
if search_user_id:
# Filter on db Id
query = query.filter(Query.user_id == search_user_id)
if database_id:
# Filter on db Id
query = query.filter(Query.database_id == database_id)
if status:
# Filter on status
query = query.filter(Query.status == status)
if search_text:
# Filter on search text
query = query \
.filter(Query.sql.like('%{}%'.format(search_text)))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config.get('QUERY_SEARCH_LIMIT', 1000)
sql_queries = (
query.order_by(Query.start_time.asc())
.limit(query_limit)
.all()
)
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype='application/json')
@app.errorhandler(500)
def show_traceback(self):
return render_template(
'superset/traceback.html',
error_msg=get_error_msg(),
), 500
@expose('/welcome')
def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session
.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
'user': bootstrap_user_data(),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='welcome',
title='Superset',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
@has_access
@expose('/profile/<username>/')
def profile(self, username):
"""User profile page"""
if not username and g.user:
username = g.user.username
payload = {
'user': bootstrap_user_data(username, include_perms=True),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
title=_("%(user)s's profile", user=username),
entry='profile',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
@has_access
@expose('/sqllab')
def sqllab(self):
"""SQL Editor"""
d = {
'defaultDbId': config.get('SQLLAB_DEFAULT_DBID'),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='sqllab',
bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser),
)
@api
@has_access_api
@expose('/slice_query/<slice_id>/')
def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = self.get_viz(slice_id)
if not security_manager.datasource_access(viz_obj.datasource):
return json_error_response(
security_manager.get_datasource_access_error_msg(viz_obj.datasource),
status=401,
link=security_manager.get_datasource_access_link(viz_obj.datasource))
return self.get_query_string_response(viz_obj)
@api
@has_access_api
@expose('/schema_access_for_csv_upload')
def schemas_access_for_csv_upload(self):
"""
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
"""
if not request.args.get('db_id'):
return json_error_response(
'No database is allowed for your csv upload')
db_id = int(request.args.get('db_id'))
database = (
db.session
.query(models.Database)
.filter_by(id=db_id)
.one()
)
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (security_manager.database_access(database) or
security_manager.all_datasource_access()):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False)
return self.json_response(schemas_allowed_processed)
except Exception:
return json_error_response((
'Failed to fetch schemas allowed for csv upload in this database! '
'Please contact Superset Admin!\n\n'
'The error message returned was:\n{}').format(traceback.format_exc()))
appbuilder.add_view_no_menu(Superset)
class CssTemplateModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.CssTemplate)
list_title = _('List Css Template')
show_title = _('Show Css Template')
add_title = _('Add Css Template')
edit_title = _('Edit Css Template')
list_columns = ['template_name']
edit_columns = ['template_name', 'css']
add_columns = edit_columns
label_columns = {
'template_name': _('Template Name'),
}
class CssTemplateAsyncModelView(CssTemplateModelView):
list_columns = ['template_name', 'css']
appbuilder.add_separator('Sources')
appbuilder.add_view(
CssTemplateModelView,
'CSS Templates',
label=__('CSS Templates'),
icon='fa-css3',
category='Manage',
category_label=__('Manage'),
category_icon='')
appbuilder.add_view_no_menu(CssTemplateAsyncModelView)
appbuilder.add_link(
'SQL Editor',
label=_('SQL Editor'),
href='/superset/sqllab',
category_icon='fa-flask',
icon='fa-flask',
category='SQL Lab',
category_label=__('SQL Lab'),
)
appbuilder.add_link(
'Query Search',
label=_('Query Search'),
href='/superset/sqllab#search',
icon='fa-search',
category_icon='fa-flask',
category='SQL Lab',
category_label=__('SQL Lab'),
)
appbuilder.add_link(
'Upload a CSV',
label=__('Upload a CSV'),
href='/csvtodatabaseview/form',
icon='fa-upload',
category='Sources',
category_label=__('Sources'),
category_icon='fa-wrench')
appbuilder.add_separator('Sources')
@app.after_request
def apply_caching(response):
"""Applies the configuration's http headers to all responses"""
for k, v in config.get('HTTP_HEADERS').items():
response.headers[k] = v
return response
# ---------------------------------------------------------------------
# Redirecting URL from previous names
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
@app.route('/<regex("panoramix\/.*"):url>')
def panoramix(url): # noqa
return redirect(request.full_path.replace('panoramix', 'superset'))
@app.route('/<regex("caravel\/.*"):url>')
def caravel(url): # noqa
return redirect(request.full_path.replace('caravel', 'superset'))
# ---------------------------------------------------------------------
class SupersetCasAuthDBView(AuthDBView):
login_template = 'appbuilder/general/security/login_cas.html'
@expose('/hna_iam_authorize', methods=['GET'])
def cas_authorized(self):
if g.user is not None and g.user.is_authenticated:
return redirect(self.appbuilder.get_url_for_index)
return redirect(app.config['IAM_LOGIN_VALID_URL'] + "?service=" + app.config['SUPERSET_CAS_CALL_URL'] + "¶ms=")
def add_role_if_missing(self, sm, user_id, role_name):
found_role = sm.find_role(role_name)
session = sm.get_session
user = session.query(sm.user_model).get(user_id)
if found_role and found_role not in user.roles:
user.roles += [found_role]
session.commit()
@expose('/callback', methods=['GET'])
def cas_callback(self):
if 'ticket' not in request.args:
flash("Invalid ticket param in callback")
return redirect(self.appbuilder.get_url_for_login)
ticket = request.args.get('ticket')
validateUrl = "%s?service=%s&ticket=%s&format=json" % (app.config['IAM_VALID_URL'], app.config['SUPERSET_CAS_CALL_URL'], ticket)
import requests
res = requests.get(validateUrl)
if res.status_code != 200 :
flash("request iam validate failure in callback")
return redirect(self.appbuilder.get_url_for_login)
user_info = res.content.decode()
user_info_json = json.load(user_info)
if 'authenticationSuccess' in user_info_json['serviceResponse']:
sucessRes = user_info_json['serviceResponse']['authenticationSuccess']
username = sucessRes.get('user')
email = sucessRes['attributes'].get('email')
sm = self.appbuilder.sm
user = sm.find_user(username)
role = sm.find_role(app.config['CUSTOM_ROLE_NAME_KEYWORD'])
if user is None and username:
user = sm.add_user(
username=username,
first_name=username,
last_name='',
email=email,
role=role
)
msg = ("Welcome to Superset, {}".format(username))
flash(msg, 'info')
user = sm.auth_user_remote_user(username)
self.add_role_if_missing(sm, user.id, app.config['CUSTOM_ROLE_NAME_KEYWORD'])
login_user(user)
return redirect(self.redirect_url())
else:
flash("Error :%s " % user_info_json['serviceResponse']['authenticationFailure']['description'])
return redirect(self.appbuilder.get_url_for_login)
class VistorRegModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(models.VisitorReg)
list_title = _('List Vistors ')
show_title = _('Show Vistor')
add_title = _('Add Vistor')
edit_title = _('Edit Vistor')
list_columns = [
'jbh_uid', 'name', 'phone', 'group_prop',
'registry_type', 'first_vistor_time', 'first_receptor', 'communication_times', 'agree', 'status'
]
add_columns = [
'jbh_uid', 'name', 'phone', 'group_prop',
'registry_type', 'illustration', 'first_vistor_time', 'first_receptor', 'communication_times', 'agree', 'status'
]
label_columns = {
'jbh_uid': _('聚宝汇UID'),
'name': _('姓名'),
'phone': _('电话'),
'group_prop': _('集团属性'),
'registry_type': _('登记类型'),
'first_vistor_time': _('首次来访时间'),
'first_receptor': _('首次接待人员'),
'communication_times': _('沟通次数'),
'agree': _('客户是否同意'),
'status': _('状态'),
'illustration': _('客户诉求'),
}
appbuilder.add_view(
VistorRegModelView,
'Vistor Registion',
label=__('访客登记'),
icon='fa-registered',
category='Disposal process',
category_label=__('处置流程'),
category_icon='fa-hand-lizard-o')
appbuilder.add_separator('Disposal process')
appbuilder.add_link(
'Investor Communication',
label=__('投资人沟通'),
href='/csvtodatabaseview/form',
icon='fa-odnoklassniki',
category='Disposal process',
category_label=__('处置流程'),
category_icon='fa-hand-lizard-o')
appbuilder.add_separator('Disposal process')
appbuilder.add_link(
'Cash Plan',
label=__('兑付计划'),
href='/csvtodatabaseview/form',
icon='fa-odnoklassniki',
category='Disposal process',
category_label=__('处置流程'),
category_icon='fa-hand-lizard-o')
|
py | 1a4da947be1d84f9ffff13b9d4f102b755114bd9 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, \
get_object_or_404 # redirect consegue mandar uma pessoa p uma url, no caso a person_list
# get object é para pegar o objeto do usuário e caso não consiga, retorna um 404
from .models import Pessoa
from .forms import PersonForm
@login_required
def person_list(request):
persons = Pessoa.objects.all() # é como um select * from Pessoa, ou seja, busca todas as pessoas
return render(request, 'person.html', {"galeres": persons})
@login_required
def person_new(request):
form = PersonForm(request.POST or None,
request.FILES or None) # o files são para pegar os arquivos de midia mandados
if form.is_valid():
form.save()
return redirect('person_list') # após concluir a ação, irá redirecionar para a lista
return render(request, 'person_form.html', {'form': form})
@login_required
def person_update(request, id):
pessoa = get_object_or_404(Pessoa, pk=id) # o pk é que vai procurar a pessoa atraves do id no banco de dados, o pk é o id no bd / Pessoa é no models
form = PersonForm(request.POST or None, request.FILES or None, instance=pessoa) # PersonForm é no forms.py
if form.is_valid():
form.save()
return redirect('person_list')
return render(request, 'person_form.html', {'form': form})
@login_required
def person_delete(request, id):
pessoa = get_object_or_404(Pessoa, pk=id)
form = PersonForm(request.POST or None, request.FILES or None, instance=pessoa) # não sei como isso se dá, mas o form nesse caso é usado
#para quando clicarmos em deletar, ir para o formulário da pessoa. Caso não seja necessário, podemos apenas deixar a página com o botão delete
if request.method == 'POST': # se o usuario usar http post, retornará TRUE, se não, retornará FALSE
pessoa.delete()
return redirect('person_list')
return render(request, 'person_delete_confirm.html', {'form': form})
|
py | 1a4da9faeaa430c1f4f15fb3e869d5601be2d482 |
import os
import sys
import yaml
import re
##
def LoadConfig(confFile: str) -> dict:
"""
Load some YAML, or return None
"""
if isinstance( confFile, list):
return SearchConfigs( confFile )
try:
with open( confFile, 'r') as stream:
return yaml.load(stream, Loader=yaml.SafeLoader);
except OSError as e:
# TODO: Log location with trace or debug level?
return None
except Exception as e:
print( "Failed to load config with error: {}".format(e))
return None
def SearchConfigs(searchPaths: list) -> dict:
"""
Given an array of search locations, try to load files, first file to load is returned.
"""
config = None;
if isinstance(searchPaths, list):
for path in searchPaths:
if config is None:
config = LoadConfig( path )
if config is not None:
return config;
if config is None:
print( "Failed to load config, searched: ['%s']" % "', '".join(searchPaths), file=sys.stderr)
else:
return LoadConfig(searchPaths)
def MergeConfigs(searchPaths: list) -> dict:
"""
Given an array of search locations, files are "merged" first to last, if they exists.
Any keys defined in last config will overwrite values
in the first config, including blank and False values.
If a key exists in the first config that isn't overwritten by a later config, it will be included in final configuration.
- Usefor for ['default.yaml','app.PROD.yaml'] ect
"""
config = None;
if isinstance( searchPaths, list):
for path in searchPaths:
if config is None:
config = LoadConfig( path )
else:
overlayConf = LoadConfig( path )
if overlayConf is not None:
config.update( overlayConf )
if config is None:
print( "Failed to merge any config, : ['%s']" % "', '".join(searchPaths), file=sys.stderr)
else:
return LoadConfig(searchPaths)
return config
def AutoLoad(scriptName: str) -> dict:
"""
Attempt to autoload yaml config based on typical search paths/conventions
"""
scriptName = os.path.basename(scriptName)
scriptName = re.sub(r"\.py$","", scriptName)
defaultPaths = [
'./default.yaml',
'./etc/' + scriptName + '.yaml',
os.path.expanduser('~/etc/' + scriptName + '.yaml'),
os.path.expanduser('~/.' + scriptName + '.yaml'),
'/etc/' + scriptName + '.yaml'
]
return MergeConfigs(defaultPaths)
|
py | 1a4daa67a7b6a847fe4f95a9e4780b2a33b65690 | __author__ = 'rramchandani'
from .operation import Operations
class VimBase:
def __init__(self, vmomi_object=None, service_instance=None, credentials=None):
self.vmomi_object = vmomi_object
self.service_instance = service_instance
self.credentials = dict() if credentials is None else credentials
class Details:
def __init__(self, vobj):
self._vobj = vobj
@property
def runtime(self):
"""
:return: *vim.vm.RuntimeInfo*, the runtime details of the current machine.
| (vim.vm.RuntimeInfo) {
| dynamicType = <unset>,
| dynamicProperty = (vmodl.DynamicProperty) [],
| device = (vim.vm.DeviceRuntimeInfo) [
| (vim.vm.DeviceRuntimeInfo) {
| dynamicType = <unset>,
| dynamicProperty = (vmodl.DynamicProperty) [],
| runtimeState = (vim.vm.DeviceRuntimeInfo.VirtualEthernetCardRuntimeState) {
| dynamicType = <unset>,
| dynamicProperty = (vmodl.DynamicProperty) [],
| vmDirectPathGen2Active = false,
| vmDirectPathGen2InactiveReasonVm = (str) [],
| vmDirectPathGen2InactiveReasonOther = (str) [
| 'vmNptIncompatibleNetwork'
| ],
| vmDirectPathGen2InactiveReasonExtended = <unset>,
| reservationStatus = <unset>
| },
| key = 4000
| }
| ],
| host = 'vim.HostSystem:host-14',
| connectionState = 'connected',
| **powerState** = 'poweredOn',
| faultToleranceState = 'notConfigured',
| dasVmProtection = <unset>,
| toolsInstallerMounted = false,
| suspendTime = <unset>,
| bootTime = 2017-02-17T14:39:35.245193Z,
| suspendInterval = 0L,
| question = <unset>,
| memoryOverhead = <unset>,
| maxCpuUsage = 9196,
| maxMemoryUsage = 4096,
| numMksConnections = 0,
| recordReplayState = 'inactive',
| cleanPowerOff = <unset>,
| needSecondaryReason = <unset>,
| onlineStandby = false,
| minRequiredEVCModeKey = <unset>,
| consolidationNeeded = false,
| offlineFeatureRequirement = (vim.vm.FeatureRequirement) [
| (vim.vm.FeatureRequirement) {
| dynamicType = <unset>,
| dynamicProperty = (vmodl.DynamicProperty) [],
| key = 'cpuid.lm',
| featureName = 'cpuid.lm',
| value = 'Bool:Min:1'
| }
| ],
| featureRequirement = (vim.vm.FeatureRequirement) [
| (vim.vm.FeatureRequirement) {
| dynamicType = <unset>,
| dynamicProperty = (vmodl.DynamicProperty) [],
| key = 'cpuid.SSE3',
| featureName = 'cpuid.SSE3',
| value = 'Bool:Min:1'
| },...
| ],
| featureMask = (vim.host.FeatureMask) [],
| vFlashCacheAllocation = 0L,
| paused = false,
| snapshotInBackground = false,
| quiescedForkParent = <unset>
| }
"""
return self._vobj.summary.runtime
@property
def guest(self):
"""
:return: *vim.vm.Summary.GuestSummary*, the guest details of the current machine.
| (vim.vm.Summary.GuestSummary) {
| dynamicType = <unset>,
| dynamicProperty = (vmodl.DynamicProperty) [],
| guestId = 'windows7_64Guest',
| **guestFullName** = 'Microsoft Windows 7 (64-bit)',
| toolsStatus = 'toolsOk',
| toolsVersionStatus = 'guestToolsCurrent',
| toolsVersionStatus2 = 'guestToolsCurrent',
| toolsRunningStatus = 'guestToolsRunning',
| **hostName** = 'W7x64',
| **ipAddress** = '10.112.19.116'
| }
"""
return self._vobj.summary.guest
@property
def config(self):
"""
:return: *vim.vm.Summary.ConfigSummary*, the config details of the current machine.
| (vim.vm.Summary.ConfigSummary) {
| dynamicType = <unset>,
| dynamicProperty = (vmodl.DynamicProperty) [],
| **name** = 'Jenkins',
| **template** = false,
| vmPathName = '[datastore1] Jenkins/Jenkins.vmx',
| memorySizeMB = 4096,
| cpuReservation = 0,
| memoryReservation = 0,
| numCpu = 4,
| numEthernetCards = 1,
| numVirtualDisks = 1,
| uuid = '420c6ef6-eef0-03ff-20f2-5d2479b2afdc',
| instanceUuid = '500ce065-5f5b-17fa-fa8f-d7033e548ecb',
| guestId = 'windows7_64Guest',
| guestFullName = 'Microsoft Windows 7 (64-bit)',
| annotation = '',
| product = <unset>,
| installBootRequired = false,
| ftInfo = <unset>,
| managedBy = <unset>
| }
"""
return self._vobj.summary.config
@property
def storage(self):
"""
:return: *vim.vm.Summary.StorageSummary*, the storage details of the current machine.
| (vim.vm.Summary.StorageSummary) {
| dynamicType = <unset>,
| dynamicProperty = (vmodl.DynamicProperty) [],
| committed = 38614424818L,
| uncommitted = 239075873L,
| unshared = 34120663040L,
| timestamp = 2017-05-18T09:16:22.357187Z
| }
"""
return self._vobj.summary.storage
def __repr__(self):
return "<{0}_Details: runtime, guest, config, storage>".format(self.config.name)
class VirtualMachine(object):
def __init__(self, vmomi_obj, service_instance, **kwargs):
self.vim = VimBase()
self.vim.vmomi_object = vmomi_obj
self.vim.service_instance = service_instance
self._operations = None
self._dirty = False
self.details = Details(vmomi_obj)
self.timeout = kwargs.get('timeout', None)
def set_credentials(self, username, password, credentials_type, default=False):
"""
Adds the credentials for the Guest operations on this virtual machine.
:param username: (str) Guest username. ex: domain_name\\user_name
:param password: (str) Guest password.
:param credentials_type: (any immutable) A dictionary key which helps to different credentials for the system.
:param default: (bool) If specified then for all the guest operation these credentials will be used by default.
Unless with the operation one specifies the *credential_type* with it.
| **Examples**
| >> vm.set_credentials(username="myDomain\\domainUser", password="secret", credentials_type="user", default=True)
| >> vm.set_credentials(username="myDomain\\domainAdmin", password="secret", credentials_type="admin")
"""
self._dirty = True
self.vim.credentials[credentials_type] = {'username': username, 'password': password, 'default': default}
@property
def operations(self):
"""
Set of operations supported over Virtual machine.
"""
if not self._operations or self._dirty:
self._operations = Operations(self.vim, timeout=self.timeout)
self._dirty = False
return self._operations
def __repr__(self):
return "<VirtualMachine: {0}>".format(self.vim.vmomi_object.name)
|
py | 1a4daaebb2b982ef6adad0572f889c8d872dd6b6 | # Copyright 2016 - Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from vitrageclient.common import yaml_utils
from vitrageclient import exceptions as exc
class Template(object):
url = 'v1/template/'
def __init__(self, api):
self.api = api
def list(self):
"""Get templates list"""
return self.api.get(self.url).json()
def versions(self):
"""Get templates versions"""
return self.api.get(self.url + 'versions').json()
def show(self, _id):
"""Show template content"""
url = self.url + _id
return self.api.get(url).json()
def add(self, path=None, template_type=None,
params=None, template_str=None, overwrite=False):
"""Add a new template
:param path: (optional) The template file path or templates dir path
:param template_type: (optional) The template type, in case it is not
written inside the template metadata section
:param params: (optional) Actual values for the template parameters
:param template_str: (optional) A string representation of the template
:param overwrite: (optional) overwrite the template if exists
yaml
Either path or template_str must exist (but not both)
:return:
"""
files_content = \
self._load_template(path=path, template_str=template_str)
api_params = dict(templates=files_content,
template_type=template_type,
params=params, overwrite=overwrite)
return self.api.put(self.url, json=api_params).json()
def delete(self, ids):
"""Delete existing"""
params = dict(id=ids)
return self.api.delete(self.url, json=params).json()
def validate(self, path=None, template_type=None,
params=None, template_str=None):
"""Template validation
Make sure that the template file is correct in terms of syntax
and content.
It is possible to pass a specific file path in order to validate one
template, or directory path for validation of several templates (the
directory must contain only templates)
:param path: (optional) The template file path or templates dir path
:param template_type: (optional) The template type, in case it is not
written inside the template metadata section
:param params: (optional) Actual values for the template parameters
:param template_str: (optional) A string representation of the template
yaml
Either path or template_str must exist (but not both)
:return:
"""
files_content = \
self._load_template(path=path, template_str=template_str)
api_params = dict(templates=files_content,
template_type=template_type,
params=params)
return self.api.post(self.url, json=api_params).json()
@classmethod
def _load_yaml_files(cls, path):
if os.path.isdir(path):
files_content = []
for file_name in os.listdir(path):
file_path = '%s/%s' % (path, file_name)
if os.path.isfile(file_path):
template = cls._load_yaml_file(file_path)
files_content.append((file_path, template))
else:
files_content = [(path, cls._load_yaml_file(path))]
return files_content
@classmethod
def _load_yaml_file(cls, path):
with open(path, 'r') as stream:
return cls._load_yaml(stream)
@classmethod
def _load_yaml(cls, yaml_content):
try:
return yaml_utils.load(yaml_content)
except ValueError as e:
message = 'Could not load template: %s. Reason: %s' \
% (yaml_content, e)
raise exc.CommandError(message)
@classmethod
def _load_template(cls, path, template_str):
if path:
files_content = cls._load_yaml_files(path)
elif template_str:
files_content = [(None, cls._load_yaml(template_str))]
else:
raise exc.CommandError(
'Add template API must be called with either \'path\' or '
'\'template_str\'')
return files_content
|
py | 1a4dab14c2a56a2cd577374917f55310d85695bf | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: target_log.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from assets_inventory_sdk.model.easy_command import action_log_pb2 as assets__inventory__sdk_dot_model_dot_easy__command_dot_action__log__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='target_log.proto',
package='easy_command',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/easy_command'),
serialized_pb=_b('\n\x10target_log.proto\x12\x0c\x65\x61sy_command\x1a\x38\x61ssets_inventory_sdk/model/easy_command/action_log.proto\"\xe6\x01\n\tTargetLog\x12\x10\n\x08targetId\x18\x01 \x01(\t\x12\x12\n\ntargetName\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\x11\n\tsysStatus\x18\x04 \x01(\t\x12\x0c\n\x04\x63ode\x18\x05 \x01(\x05\x12\x0b\n\x03msg\x18\x06 \x01(\t\x12+\n\nactionsLog\x18\x07 \x03(\x0b\x32\x17.easy_command.ActionLog\x12\x10\n\x08usedTime\x18\x08 \x01(\x05\x12\x11\n\tstartTime\x18\t \x01(\t\x12\x12\n\nupdateTime\x18\n \x01(\t\x12\x0f\n\x07\x65ndTime\x18\x0b \x01(\tBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/easy_commandb\x06proto3')
,
dependencies=[assets__inventory__sdk_dot_model_dot_easy__command_dot_action__log__pb2.DESCRIPTOR,])
_TARGETLOG = _descriptor.Descriptor(
name='TargetLog',
full_name='easy_command.TargetLog',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targetId', full_name='easy_command.TargetLog.targetId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetName', full_name='easy_command.TargetLog.targetName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='easy_command.TargetLog.status', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sysStatus', full_name='easy_command.TargetLog.sysStatus', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='easy_command.TargetLog.code', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='easy_command.TargetLog.msg', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actionsLog', full_name='easy_command.TargetLog.actionsLog', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='usedTime', full_name='easy_command.TargetLog.usedTime', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startTime', full_name='easy_command.TargetLog.startTime', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='easy_command.TargetLog.updateTime', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endTime', full_name='easy_command.TargetLog.endTime', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=323,
)
_TARGETLOG.fields_by_name['actionsLog'].message_type = assets__inventory__sdk_dot_model_dot_easy__command_dot_action__log__pb2._ACTIONLOG
DESCRIPTOR.message_types_by_name['TargetLog'] = _TARGETLOG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TargetLog = _reflection.GeneratedProtocolMessageType('TargetLog', (_message.Message,), {
'DESCRIPTOR' : _TARGETLOG,
'__module__' : 'target_log_pb2'
# @@protoc_insertion_point(class_scope:easy_command.TargetLog)
})
_sym_db.RegisterMessage(TargetLog)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | 1a4dad13a741e76fb9e1bf195aaf6536c501ceb9 | """Support for Z-Wave fans."""
import math
from openpeerpower.components.fan import DOMAIN, SUPPORT_SET_SPEED, FanEntity
from openpeerpower.core import callback
from openpeerpower.helpers.dispatcher import async_dispatcher_connect
from openpeerpower.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from . import ZWaveDeviceEntity
SUPPORTED_FEATURES = SUPPORT_SET_SPEED
SPEED_RANGE = (1, 99) # off is not included
async def async_setup_entry(opp, config_entry, async_add_entities):
"""Set up Z-Wave Fan from Config Entry."""
@callback
def async_add_fan(fan):
"""Add Z-Wave Fan."""
async_add_entities([fan])
async_dispatcher_connect(opp, "zwave_new_fan", async_add_fan)
def get_device(values, **kwargs):
"""Create Z-Wave entity device."""
return ZwaveFan(values)
class ZwaveFan(ZWaveDeviceEntity, FanEntity):
"""Representation of a Z-Wave fan."""
def __init__(self, values):
"""Initialize the Z-Wave fan device."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self.update_properties()
def update_properties(self):
"""Handle data changes for node values."""
self._state = self.values.primary.data
def set_percentage(self, percentage):
"""Set the speed percentage of the fan."""
if percentage is None:
# Value 255 tells device to return to previous value
zwave_speed = 255
elif percentage == 0:
zwave_speed = 0
else:
zwave_speed = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
self.node.set_dimmer(self.values.primary.value_id, zwave_speed)
def turn_on(self, speed=None, percentage=None, preset_mode=None, **kwargs):
"""Turn the device on."""
self.set_percentage(percentage)
def turn_off(self, **kwargs):
"""Turn the device off."""
self.node.set_dimmer(self.values.primary.value_id, 0)
@property
def percentage(self):
"""Return the current speed percentage."""
return ranged_value_to_percentage(SPEED_RANGE, self._state)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return int_states_in_range(SPEED_RANGE)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_FEATURES
|
py | 1a4dad68c7c6aa85d33c6988177bc2a278e7abc9 | # coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import errno
from setuptools import setup, find_packages
from setuptools.command.install import install
from subprocess import check_call
VERSION = "0.0.0"
PLUGIN_VERSION = "0.0.0"
class InstallPluginCommand(install):
def run(self):
install.run(self)
try:
check_call(['pulumi', 'plugin', 'install', 'resource', 'example', PLUGIN_VERSION])
except OSError as error:
if error.errno == errno.ENOENT:
print(f"""
There was an error installing the example resource provider plugin.
It looks like `pulumi` is not installed on your system.
Please visit https://pulumi.com/ to install the Pulumi CLI.
You may try manually installing the plugin by running
`pulumi plugin install resource example {PLUGIN_VERSION}`
""")
else:
raise
def readme():
try:
with open('README.md', encoding='utf-8') as f:
return f.read()
except FileNotFoundError:
return "example Pulumi Package - Development Version"
setup(name='pulumi_example',
version=VERSION,
long_description=readme(),
long_description_content_type='text/markdown',
cmdclass={
'install': InstallPluginCommand,
},
packages=find_packages(),
package_data={
'pulumi_example': [
'py.typed',
'pulumi-plugin.json',
]
},
install_requires=[
'parver>=0.2.1',
'pulumi',
'semver>=2.8.1'
],
zip_safe=False)
|
py | 1a4daee26071cf7bfff3459a1dfd5c78250656d0 | import copy
import random
import time
from functools import partial
from sklearn.utils import shuffle
import numpy as np
from sklearn import linear_model
from dnl import Sampling_Methods, Solver
from dnl.PredictPlustOptimizeUtils import compute_C_k
from dnl.Solver import get_optimization_objective
from dnl.Utils import TransitionPoint, get_mini_batches
from operator import attrgetter
import multiprocessing as mp
CONV_CONST = 10E-6
MEDIAN_LOSS = 'MEDIAN'
MEAN_LOSS = 'MEAN LOSS'
class PredictPlusOptModel:
def __init__(self, alphas=None, const=None, opt_params=None, loss=MEDIAN_LOSS, max_step_size_magnitude=1,
min_step_size_magnitude=-1,
step_size_divider=10, sampling_method=Sampling_Methods.DIVIDE_AND_CONQUER,
is_parallel=True, learning_rate=0.1, mini_batch_size=32, beta=0, epoch_limit=3, run_time_limit=100000,
verbose=False, is_Val = True):
"""
:param alphas: model parameters
:param const: model constant
:param capacities: capacity of the optimization problem
:param max_step_size_magnitude: sample space upper bound
:param min_step_size_magnitude: sample space lower step size
:param step_size_divider:
:param sampling_method:
"""
self.alphas = alphas
self.const = const
self.opt_params = opt_params
self.is_val = is_Val
self.step_size_divider = step_size_divider
self.is_parallel = is_parallel
if mini_batch_size == -1:
self.learning_rate = 1
else:
self.learning_rate = learning_rate
self.mini_batch_size = mini_batch_size
self.epoch_limit = epoch_limit
self.run_time_limit = run_time_limit
self.training_obj_value = []
self.test_regrets = []
self.val_regrets = []
self.epochs = []
self.sub_epochs = []
self.run_time = []
self.max_step_size_magnitude = max_step_size_magnitude
self.min_step_size_magnitude = min_step_size_magnitude
self.sampling_method = sampling_method
self.test_MSE = 0
self.loss = loss
self.number_of_epochs = 0
self.test_regret = 0
self.training_MSE = 0
self.test_run_time = 0
self.beta = beta
self.verbose = verbose
def init_params_lin_regression(self, X,Y):
"""
initialize the model with linear regression
:param train_set:
:return:
"""
params = initialize_parameters(X,Y)
self.__setattr__('alphas', params.get('alphas'))
self.__setattr__('const', params.get('const'))
self.__setattr__('capacities', params.get('capacities'))
def coordinate_descent(self, train_X, train_Y, train_weights, val_X, val_Y, val_weights, print_test=False, test_X=None, test_Y=None,
test_weights=None, core_number=7):
"""
Uses coordinate descent to optimize parameters
:param train_X: test set features
:param train_Y: test set output
:param train_weights:
:return: profit: average profit of the training set
"""
# self_decided_features = [4, 5, 6, 7]
# self_decided_features = range(8)
# self_decided_features = [4]
is_break = False
self_decided_features = list(range(len(self.alphas)))
prev_profit = -10
model_params = {'alphas': self.alphas,
'const': self.const}
profit = np.median(get_optimization_objective(X=train_X, Y=train_Y, weights=train_weights,
opt_params=self.opt_params, model_params=model_params))
test_regret = np.median(self.get_regret(test_X, test_Y, test_weights))
val_regret = np.median(self.get_regret(val_X, val_Y, val_weights))
self.test_regrets.append(test_regret)
self.training_obj_value.append(profit)
self.run_time.append(0)
self.epochs.append(0)
self.sub_epochs.append(0)
self.val_regrets.append(val_regret)
start_time = time.time()
print("original objective value: " + str(profit))
EPOCH = 0
direction = np.zeros(len(self_decided_features))
momentum = np.zeros(len(self_decided_features))
sampler = Sampling_Methods.Sampler(max_step_size_magnitude=self.max_step_size_magnitude,
min_step_size_magnitude=self.min_step_size_magnitude,
step_size_divider=self.step_size_divider,
sampling_method=self.sampling_method,
opt_params=self.opt_params)
if self.is_parallel:
mypool = mp.Pool(processes=min(8, core_number))
else:
mypool = None
print("------------------------")
train_X_og = train_X
train_Y_og = train_Y
train_weights_og = train_weights
if self.mini_batch_size == -1:
mini_batch_size = len(train_Y)
else:
mini_batch_size = self.mini_batch_size
mini_batches_X, mini_batches_Y, mini_batches_weights = get_mini_batches(X=train_X, Y=train_Y,
weights=train_weights,
size=mini_batch_size)
sub_epoch = 0
while (EPOCH < self.epoch_limit) and self.run_time[-1] < self.run_time_limit and not converge(profit, prev_profit, CONV_CONST, self.mini_batch_size):
mini_batches_X, mini_batches_Y, mini_batches_weights = shuffle(mini_batches_X, mini_batches_Y,
mini_batches_weights)
for mini_batch_X, mini_batch_Y, mini_batch_weights in zip(mini_batches_X, mini_batches_Y,
mini_batches_weights):
train_X = mini_batch_X
train_Y = mini_batch_Y
train_weights = mini_batch_weights
profit = np.median(get_optimization_objective(X=train_X, Y=train_Y, weights=train_weights,
opt_params=self.opt_params, model_params=model_params))
# cut for minibatch
prev_profit = profit
print("-----------------------")
# use for raandom
# for k in random.sample(range(len(self.alphas)), len(self.alphas) - 1):
# for k in range(len(self.alphas)):
random.seed(time.time())
random.shuffle(self_decided_features)
for k in self_decided_features:
model_params = {'alphas': self.alphas,
'const': self.const}
current_alpha = self.alphas[k, 0]
best_transition_points_set = set()
if self.is_parallel:
map_func = partial(get_and_clean_transition_points, sampler=sampler, model_params=model_params,
k=k,
opt_params=self.opt_params,
current_alpha=current_alpha)
iter = [[benchmark_X, benchmark_Y, benchmark_weights] for
benchmark_X, benchmark_Y, benchmark_weights in
zip(train_X, train_Y, train_weights)]
best_transition_points_set = mypool.starmap(map_func, iter)
best_transition_points_set = set().union(*best_transition_points_set)
benchmark_best_transition_point = find_the_best_transition_point_benchmarks(train_X, train_Y,
k=k,
model_params=model_params,
train_weights=train_weights,
opt_params=self.opt_params,
transition_point_list=list(
best_transition_points_set),
prev_profit=profit,
pool=mypool)
else:
for benchmark_X, benchmark_Y, benchmark_weights in zip(train_X, train_Y, train_weights):
best_transition_point, __, predicted_regrets, regrets, plot_x = sampler.get_transition_points(
model_params=model_params, k=k,
train_X=benchmark_X,
train_Y=benchmark_Y,
train_weights=benchmark_weights)
best_transition_point_set_benchmark = clean_transition_points(
transition_points=best_transition_point[-1],
benchmark_X=benchmark_X,
benchmark_Y=benchmark_Y, weights=benchmark_weights,
model_params=model_params, opt_params=self.opt_params,
current_alpha=current_alpha)
best_transition_points_set = best_transition_points_set.union(
best_transition_point_set_benchmark)
# To reduce training time move this process to the sampling method so we dont iterate through transition points list twice
benchmark_best_transition_point = find_the_best_transition_point_benchmarks(train_X, train_Y,
k=k,
model_params=model_params,
train_weights=train_weights,
opt_params=self.opt_params,
transition_point_list=list(
best_transition_points_set),
prev_profit=profit)
gradient = benchmark_best_transition_point.x - self.alphas[k, 0]
# print((
# benchmark_best_transition_point.x - self.alphas[k, 0]))
# print('dir', direction[k])
# if abs(gradient) > 0:
# gradient = gradient / abs(gradient)
direction[k] = -self.beta * momentum[k] - (1 - self.beta) * gradient
# print(momentum, gradient, direction)
# print('mom: ', momentum[k], 'dir: ', direction[k])
self.alphas[k, 0] = self.alphas[k, 0] - direction[k] * self.learning_rate
momentum[k] = direction[k] * 1
profit = benchmark_best_transition_point.true_profit
#record data for each parameter update if its full batch
if self.mini_batch_size == -1:
if self.is_val:
# print('val')
val_regret = np.median(self.get_regret(val_X, val_Y, val_weights, pool=mypool))
self.val_regrets.append(val_regret)
test_run_time = time.time()
if print_test:
# print('test')
test_regret = np.median(self.get_regret(test_X, test_Y, test_weights, pool=mypool))
self.test_regrets.append(test_regret)
train_regret = np.median(self.get_regret(train_X, train_Y, train_weights, pool=mypool))
self.training_obj_value.append(train_regret)
if self.verbose:
print('updating parameter', k, 'test regret', test_regret)
print("Updating Parameter: " + str(k) + " profit: " + str(profit))
self.test_run_time = self.test_run_time + time.time() - test_run_time
sub_epoch = sub_epoch + 1
self.sub_epochs.append(sub_epoch)
self.epochs.append(EPOCH)
self.run_time.append((time.time() - start_time - self.test_run_time))
print("EPOCH:", EPOCH, "sub epoch:", sub_epoch, "objective value:", profit, 'val regret',
self.val_regrets[-1], 'test regret', self.test_regrets[-1], flush=True)
if not self.mini_batch_size == -1:
# Record data after each batch for mini batches
if self.is_val:
# print('val')
val_regret = np.median(self.get_regret(val_X,val_Y,val_weights,pool=mypool))
self.val_regrets.append(val_regret)
test_run_time = time.time()
if (print_test):
# print('test')
test_regret = np.median(self.get_regret(test_X, test_Y, test_weights,pool=mypool))
self.test_regrets.append(test_regret)
train_regret = np.median(self.get_regret(train_X, train_Y, train_weights,pool=mypool))
self.training_obj_value.append(train_regret)
if self.verbose:
print('updating parameter', k, 'test regret', test_regret)
print("Updating Parameter: " + str(k) + " profit: " + str(profit))
self.test_run_time = self.test_run_time + time.time() - test_run_time
sub_epoch = sub_epoch + 1
self.sub_epochs.append(sub_epoch)
self.epochs.append(EPOCH)
self.run_time.append((time.time() - start_time - self.test_run_time))
print("EPOCH:", EPOCH, "sub epoch:", sub_epoch, "objective value:", profit, 'val regret', self.val_regrets[-1],'test regret', self.test_regrets[-1])
if self.run_time[-1] > self.run_time_limit:
is_break = True
break
if is_break:
break
EPOCH = EPOCH + 1
self.number_of_epochs = EPOCH
print("EPOCH:", EPOCH, "objective value:", profit, 'val regret', self.val_regrets[-1], 'test regret', self.test_regrets[-1])
print('Training finished ')
print("-----------------------")
if self.is_parallel:
mypool.close()
return profit
def get_regret(self, X, Y, weights=None, pool=None):
model_params = {'alphas': self.alphas,
'const': self.const}
if pool is None:
# print('X shape', X[0].shape)
#
# print('y shape', len(Y))
average_objective_value_with_predicted_items = get_optimization_objective(X=X, Y=Y, weights=weights,
opt_params=self.opt_params,
model_params=model_params
)
optimal_average_objective_value = Solver.get_optimal_average_objective_value(X=X, Y=Y, weights=weights,
opt_params=self.opt_params,
)
# print('regret predicted item value set',average_objective_value_with_predicted_items,'regret with real item value',optimal_average_objective_value)
# print('pred obj', np.sum(average_objective_value_with_predicted_items))
# print('true obj', np.sum(optimal_average_objective_value))
# print(optimal_average_objective_value - average_objective_value_with_predicted_items)
# print('true obj', np.sum(optimal_average_objective_value))
regret = np.median(optimal_average_objective_value - average_objective_value_with_predicted_items)
# print('regret', regret)
# print(regret)
else:
map_func = partial(get_regret_worker, model_params=model_params, opt_params=self.opt_params)
iter = zip(X, Y, weights)
# [[x, y] for x, y in zip([4, 1, 0], [5, 1, 1])]
objective_values = pool.starmap(map_func, iter)
objective_values_predicted_items, optimal_objective_values = zip(*objective_values)
# print('optimal_average_objective_value', objective_values_predicted_items)
# print('average_objective_value_with_predicted_items', optimal_objective_values)
print(np.mean(np.concatenate(optimal_objective_values)))
regret = np.median(np.concatenate(optimal_objective_values) - np.concatenate(objective_values_predicted_items))
# print('true obj',np.sum(np.concatenate(optimal_objective_values)))
self.test_regret = regret
return regret
def get_MSE(self, X, Y):
predicted_values = compute_C_k(X.T, self.alphas, self.const, isSampling=False)
MSE = np.mean((Y - predicted_values) ** 2)
self.test_MSE = MSE
return MSE
def print(self):
first_line = ['Method', 'Max Step Size Order', 'Min Step Size Order', 'Run Time Limit', 'Epoch Limit',
'Mini Batch Size', 'Learning rate', 'Parallelism', 'Test MSE']
second_line = [self.sampling_method, self.max_step_size_magnitude, self.min_step_size_magnitude,
self.run_time_limit, self.epoch_limit, self.mini_batch_size, self.learning_rate,
self.is_parallel, self.test_MSE]
third_line = ['epochs', 'sub epochs', 'run time', 'training objective', 'test regret', 'val regret']
rest = np.array(
[self.epochs, self.sub_epochs, self.run_time, self.training_obj_value, self.test_regrets, self.val_regrets]).T.tolist()
print = []
print.append(first_line)
print.append(second_line)
print.append(third_line)
print.extend(rest)
return print
def get_file_name(self, file_type='.csv'):
file_name = str(self.sampling_method) + '-' + str(self.max_step_size_magnitude) + str(
self.min_step_size_magnitude) + file_type
return file_name
def get_and_clean_transition_points(benchmark_X, benchmark_Y, benchmark_weights, sampler, model_params, k, opt_params,
current_alpha):
best_transition_point, __, predicted_regrets, regrets, plot_x = sampler.get_transition_points(
model_params=model_params, k=k,
train_X=benchmark_X,
train_Y=benchmark_Y,
train_weights=benchmark_weights)
best_transition_point_set_benchmark = clean_transition_points(
transition_points=best_transition_point[-1],
benchmark_X=benchmark_X,
benchmark_Y=benchmark_Y, weights=benchmark_weights,
model_params=model_params, opt_params=opt_params,
current_alpha=current_alpha)
return best_transition_point_set_benchmark
def find_the_best_transition_point_benchmarks(train_X, train_Y, model_params, transition_point_list,
opt_params,
train_weights, prev_profit, k, pool=None):
alphas = model_params.get('alphas')
best_average_profit = prev_profit
best_transition_point = TransitionPoint(alphas[k, 0], true_profit=prev_profit)
if not (len(transition_point_list) == 1 and alphas[k, 0] == transition_point_list[0]):
if pool is not None:
map_func = partial(find_the_best_transition_point_benchmarks_worker, train_X=train_X, train_Y=train_Y,
train_weights=train_weights, model_params=model_params, opt_params=opt_params, k=k)
results = pool.map(map_func, transition_point_list)
results.append(best_transition_point)
# print('x', [transition_point.x for transition_point in results], ' objective_value' ,
# [transition_point.true_profit for transition_point in results])
best_transition_point = max(results, key=attrgetter('true_profit'))
else:
for transition_point_x in transition_point_list:
transition_point = find_the_best_transition_point_benchmarks_worker(transition_point_x, train_X=train_X,
train_Y=train_Y,
train_weights=train_weights,
model_params=model_params,
opt_params=opt_params, k=k)
if transition_point.true_profit > best_average_profit:
best_average_profit = transition_point.true_profit
best_transition_point = copy.deepcopy(transition_point)
return best_transition_point
def find_the_best_transition_point_benchmarks_worker(transition_point_x, train_X, train_Y, train_weights, model_params,
opt_params, k):
alphas = model_params.get('alphas')
alphas[k, 0] = transition_point_x
model_params['alphas'] = alphas
average_profit = np.median(get_optimization_objective(X=train_X, Y=train_Y,
weights=train_weights, opt_params=opt_params,
model_params=model_params))
# print('k: ' + str(k) + ' transition_point: ' + str(transition_point_x) + ' profit: ' + str(average_profit))
return TransitionPoint(transition_point_x, true_profit=average_profit)
def get_regret_worker(X, Y, weights, model_params, opt_params ):
# print('im in worker')
# print('X shape', X.shape)
# print('y shape', Y.shape)
# print('weights shape', weights.shape)
average_objective_value_with_predicted_items = get_optimization_objective(X=[X], Y=[Y], weights=[weights],
opt_params=opt_params,
model_params=model_params
)
# print('finished average_objective_value_with_predicted_items')
optimal_average_objective_value = Solver.get_optimal_average_objective_value(X=[X], Y=[Y], weights=[weights],
opt_params=opt_params,
)
# print('finished working')
return average_objective_value_with_predicted_items, optimal_average_objective_value
def converge(profit, prev_profit, conv_const, flag):
"""
A method to determine if the algorithm has reached the convergence point. Not used at the moment, but will be used in the full algorithm
:param cost:
:param prev_cost:
:param conv_const: Convergence limit
:return: is_converge : boolean
"""
if flag > 0:
return False
else:
print('prev profit', prev_profit, 'profit' , profit)
print('ratio', abs((profit - prev_profit) / profit))
if abs((profit - prev_profit) / profit) < conv_const:
is_converged = True
else:
is_converged = False
return is_converged
def initialize_parameters(X,Y):
"""
initialize the parameters of the predict-opt model, AKA first stage
:param train_set: dictionary containing X, and Y
:return: params: dictionary, has initialized parameters of the model.
"""
model = linear_model.Ridge().fit(X, Y)
coef = model.coef_
const = model.intercept_
params = {'alphas': coef.T,
'const': const}
return params
def clean_transition_points(transition_points, benchmark_X, benchmark_Y, weights, opt_params, model_params,
current_alpha):
cleaner_transition_points = set()
base_profit = np.median(Solver.get_optimization_objective(X=[benchmark_X], Y=[benchmark_Y], weights=weights,
model_params=model_params, opt_params=opt_params))
for transition_point in transition_points:
if transition_point.true_profit > base_profit:
cleaner_transition_points.add(transition_point.x)
if not cleaner_transition_points:
cleaner_transition_points.add(float(current_alpha))
return cleaner_transition_points
|
py | 1a4daf9fc0d999d15602ac8785978ec66c81500a | '''
Copyright Vulcan Inc. 2018-2020
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
'''
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="gcsutils",
version="1.1.5",
description="Utility functions for Google Cloud Storage",
long_description=long_description,
long_description_content_type="text/markdown",
packages=['gcsutils'],
install_requires=['google-cloud-storage==1.16.1'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
url='https://github.com/CoralMapping/proc_gcs_utils'
)
|
py | 1a4db0dd572a8738f2ef4152f86fd89e2090fa48 | from django.contrib import admin
# Register your models here.
from profiles_api import models
admin.site.register(models.UserProfile)
admin.site.register(models.ProfileFeedItem)
|
py | 1a4db1108dcd6d241bb5a5bdfd218020d8923598 | # postgresql/psycopg2.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`.create_engine()` are:
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows
are not immediately pre-fetched and buffered after statement execution, but
are instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the :paramref:`.Connection.execution_options.stream_results`
execution option is a more targeted
way of enabling this mode on a per-execution basis.
* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
.. seealso::
:ref:`psycopg2_disable_native_unicode`
* ``isolation_level``: This option, available for all PostgreSQL dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect.
.. seealso::
:ref:`psycopg2_isolation_level`
* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
using psycopg2's ``set_client_encoding()`` method.
.. seealso::
:ref:`psycopg2_unicode`
* ``executemany_mode``, ``executemany_batch_page_size``,
``executemany_values_page_size``: Allows use of psycopg2
extensions for optimizing "executemany"-stye queries. See the referenced
section below for details.
.. seealso::
:ref:`psycopg2_executemany_mode`
* ``use_batch_mode``: this is the previous setting used to affect "executemany"
mode and is now deprecated.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
.. seealso::
`PQconnectdbParams \
<http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Empty DSN Connections / Environment Variable Connections
---------------------------------------------------------
The psycopg2 DBAPI can connect to PostgreSQL by passing an empty DSN to the
libpq client library, which by default indicates to connect to a localhost
PostgreSQL database that is open for "trust" connections. This behavior can be
further tailored using a particular set of environment variables which are
prefixed with ``PG_...``, which are consumed by ``libpq`` to take the place of
any or all elements of the connection string.
For this form, the URL can be passed without any elements other than the
initial scheme::
engine = create_engine('postgresql+psycopg2://')
In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
function which in turn represents an empty DSN passed to libpq.
.. versionadded:: 1.3.2 support for parameter-less connections with psycopg2.
.. seealso::
`Environment Variables\
<https://www.postgresql.org/docs/current/libpq-envars.html>`_ -
PostgreSQL documentation on how to use ``PG_...``
environment variables for connections.
.. _psycopg2_execution_options:
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* ``isolation_level`` - Set the transaction isolation level for the lifespan
of a :class:`.Connection` (can only be set on a connection, not a statement
or query). See :ref:`psycopg2_isolation_level`.
* ``stream_results`` - Enable or disable usage of psycopg2 server side
cursors - this feature makes use of "named" cursors in combination with
special result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
* ``max_row_buffer`` - when using ``stream_results``, an integer value that
specifies the maximum number of rows to buffer at a time. This is
interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the
buffer will grow to ultimately store 1000 rows at a time.
.. versionadded:: 1.0.6
.. _psycopg2_executemany_mode:
Psycopg2 Fast Execution Helpers
-------------------------------
Modern versions of psycopg2 include a feature known as
`Fast Execution Helpers \
<http://initd.org/psycopg/docs/extras.html#fast-execution-helpers>`_, which
have been shown in benchmarking to improve psycopg2's executemany()
performance, primarily with INSERT statements, by multiple orders of magnitude.
SQLAlchemy allows this extension to be used for all ``executemany()`` style
calls invoked by an :class:`.Engine` when used with :ref:`multiple parameter
sets <execute_multiple>`, which includes the use of this feature both by the
Core as well as by the ORM for inserts of objects with non-autogenerated
primary key values, by adding the ``executemany_mode`` flag to
:func:`.create_engine`::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='batch')
.. versionchanged:: 1.3.7 - the ``use_batch_mode`` flag has been superseded
by a new parameter ``executemany_mode`` which provides support both for
psycopg2's ``execute_batch`` helper as well as the ``execute_values``
helper.
Possible options for ``executemany_mode`` include:
* ``None`` - By default, psycopg2's extensions are not used, and the usual
``cursor.executemany()`` method is used when invoking batches of statements.
* ``'batch'`` - Uses ``psycopg2.extras.execute_batch`` so that multiple copies
of a SQL query, each one corresponding to a parameter set passed to
``executemany()``, are joined into a single SQL string separated by a
semicolon. This is the same behavior as was provided by the
``use_batch_mode=True`` flag.
* ``'values'``- For Core :func:`.insert` constructs only (including those
emitted by the ORM automatically), the ``psycopg2.extras.execute_values``
extension is used so that multiple parameter sets are grouped into a single
INSERT statement and joined together with multiple VALUES expressions. This
method requires that the string text of the VALUES clause inside the
INSERT statement is manipulated, so is only supported with a compiled
:func:`.insert` construct where the format is predictable. For all other
constructs, including plain textual INSERT statements not rendered by the
SQLAlchemy expression language compiler, the
``psycopg2.extras.execute_batch`` method is used. It is therefore important
to note that **"values" mode implies that "batch" mode is also used for
all statements for which "values" mode does not apply**.
For both strategies, the ``executemany_batch_page_size`` and
``executemany_values_page_size`` arguments control how many parameter sets
should be represented in each execution. Because "values" mode implies a
fallback down to "batch" mode for non-INSERT statements, there are two
independent page size arguments. For each, the default value of ``None`` means
to use psycopg2's defaults, which at the time of this writing are quite low at
100. For the ``execute_values`` method, a number as high as 10000 may prove
to be performant, whereas for ``execute_batch``, as the number represents
full statements repeated, a number closer to the default of 100 is likely
more appropriate::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='values',
executemany_values_page_size=10000, executemany_batch_page_size=500)
.. seealso::
:ref:`execute_multiple` - General information on using the
:class:`.Connection` object to execute statements in such a way as to make
use of the DBAPI ``.executemany()`` method.
.. versionchanged:: 1.3.7 - Added support for
``psycopg2.extras.execute_values``. The ``use_batch_mode`` flag is
superseded by the ``executemany_mode`` flag.
.. _psycopg2_unicode:
Unicode with Psycopg2
----------------------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf8``, as a more useful default::
# postgresql.conf file
# client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's
:meth:`psycopg2:connection.set_client_encoding` method
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
# set_client_encoding() setting;
# works for *all* PostgreSQL versions
engine = create_engine("postgresql://user:pass@host/dbname",
client_encoding='utf8')
This overrides the encoding specified in the PostgreSQL client configuration.
When using the parameter in this way, the psycopg2 driver emits
``SET client_encoding TO 'utf8'`` on the connection explicitly, and works
in all PostgreSQL versions.
Note that the ``client_encoding`` setting as passed to :func:`.create_engine`
is **not the same** as the more recently added ``client_encoding`` parameter
now supported by libpq directly. This is enabled when ``client_encoding``
is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed
using the :paramref:`.create_engine.connect_args` parameter::
engine = create_engine(
"postgresql://user:pass@host/dbname",
connect_args={'client_encoding': 'utf8'})
# using the query string is equivalent
engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8")
The above parameter was only added to libpq as of version 9.1 of PostgreSQL,
so using the previous method is better for cross-version support.
.. _psycopg2_disable_native_unicode:
Disabling Native Unicode
^^^^^^^^^^^^^^^^^^^^^^^^
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize its own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as most DBAPIs now support unicode fully.
Bound Parameter Styles
----------------------
The default parameter style for the psycopg2 dialect is "pyformat", where
SQL is rendered using ``%(paramname)s`` style. This format has the limitation
that it does not accommodate the unusual case of parameter names that
actually contain percent or parenthesis symbols; as SQLAlchemy in many cases
generates bound parameter names based on the name of a column, the presence
of these characters in a column name can lead to problems.
There are two solutions to the issue of a :class:`.schema.Column` that contains
one of these characters in its name. One is to specify the
:paramref:`.schema.Column.key` for columns that have such names::
measurement = Table('measurement', metadata,
Column('Size (meters)', Integer, key='size_meters')
)
Above, an INSERT statement such as ``measurement.insert()`` will use
``size_meters`` as the parameter name, and a SQL expression such as
``measurement.c.size_meters > 10`` will derive the bound parameter name
from the ``size_meters`` key as well.
.. versionchanged:: 1.0.0 - SQL expressions will use :attr:`.Column.key`
as the source of naming when anonymous bound parameters are created
in SQL expressions; previously, this behavior only applied to
:meth:`.Table.insert` and :meth:`.Table.update` parameter names.
The other solution is to use a positional format; psycopg2 allows use of the
"format" paramstyle, which can be passed to
:paramref:`.create_engine.paramstyle`::
engine = create_engine(
'postgresql://scott:tiger@localhost:5432/test', paramstyle='format')
With the above engine, instead of a statement like::
INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s)
{'Size (meters)': 1}
we instead see::
INSERT INTO measurement ("Size (meters)") VALUES (%s)
(1, )
Where above, the dictionary style is converted into a tuple with positional
style.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all PostgreSQL dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
as well as the ``isolation_level`` argument used by
:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these
options make use of psycopg2's ``set_isolation_level()`` connection method,
rather than emitting a PostgreSQL directive; this is because psycopg2's
API-level setting is always emitted at the start of each transaction in any
case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`pg8000_isolation_level`
NOTICE logging
---------------
The psycopg2 dialect will log PostgreSQL NOTICE messages
via the ``sqlalchemy.dialects.postgresql`` logger. When this logger
is set to the ``logging.INFO`` level, notice messages will be logged::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
Above, it is assumed that logging is configured externally. If this is not
the case, configuration such as ``logging.basicConfig()`` must be utilized::
import logging
logging.basicConfig() # log messages to stdout
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
.. seealso::
`Logging HOWTO <https://docs.python.org/3/howto/logging.html>`_ - on the python.org website
.. _psycopg2_hstore:
HSTORE type
------------
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when psycopg2 version 2.4 or greater is used, and
it is detected that the target database has the HSTORE type set up for use.
In other words, when the dialect makes the first
connection, a sequence like the following is performed:
1. Request the available HSTORE oids using
``psycopg2.extras.HstoreAdapter.get_oids()``.
If this function returns a list of HSTORE identifiers, we then determine
that the ``HSTORE`` extension is present.
This function is **skipped** if the version of psycopg2 installed is
less than version 2.4.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
The ``register_hstore()`` extension has the effect of **all Python
dictionaries being accepted as parameters regardless of the type of target
column in SQL**. The dictionaries are converted by this extension into a
textual HSTORE expression. If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
The ``HSTORE`` type is **still supported** when the
``psycopg2.extensions.register_hstore()`` extension is not used. It merely
means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
which may be more performant.
""" # noqa
from __future__ import absolute_import
import decimal
import logging
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import ENUM
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import UUID
from .hstore import HSTORE
from .json import JSON
from .json import JSONB
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...engine import result as _result
from ...util import collections_abc
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
logger = logging.getLogger("sqlalchemy.dialects.postgresql")
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
if util.py2k and self._expect_unicode is True:
# for py2k, if the enum type needs unicode data (which is set up as
# part of the Enum() constructor based on values passed as py2k
# unicode objects) we have to use our own converters since
# psycopg2's don't work, a rare exception to the "modern DBAPIs
# support unicode everywhere" theme of deprecating
# convert_unicode=True. Use the special "force_nocheck" directive
# which forces unicode conversion to happen on the Python side
# without an isinstance() check. in py3k psycopg2 does the right
# thing automatically.
self._expect_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._has_native_json:
return None
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
class _PGJSONB(JSONB):
def result_processor(self, dialect, coltype):
if dialect._has_native_jsonb:
return None
else:
return super(_PGJSONB, self).result_processor(dialect, coltype)
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_server_side_cursor(self):
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
def get_result_proxy(self):
self._log_notices(self.cursor)
if self._is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
# check also that notices is an iterable, after it's already
# established that we will be iterating through it. This is to get
# around test suites such as SQLAlchemy's using a Mock object for
# cursor
if not cursor.connection.notices or not isinstance(
cursor.connection.notices, collections_abc.Iterable
):
return
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
pass
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
pass
EXECUTEMANY_DEFAULT = util.symbol("executemany_default")
EXECUTEMANY_BATCH = util.symbol("executemany_batch")
EXECUTEMANY_VALUES = util.symbol("executemany_values")
class PGDialect_psycopg2(PGDialect):
driver = "psycopg2"
if util.py2k:
supports_unicode_statements = False
supports_server_side_cursors = True
default_paramstyle = "pyformat"
# set to true based on psycopg2 version
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
FEATURE_VERSION_MAP = dict(
native_json=(2, 5),
native_jsonb=(2, 5, 4),
sane_multi_rowcount=(2, 0, 9),
array_oid=(2, 4, 3),
hstore_adapter=(2, 4),
)
_has_native_hstore = False
_has_native_json = False
_has_native_jsonb = False
engine_config_types = PGDialect.engine_config_types.union(
[("use_native_unicode", util.asbool)]
)
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
},
)
@util.deprecated_params(
use_batch_mode=(
"1.3.7",
"The psycopg2 use_batch_mode flag is superseded by "
"executemany_mode='batch'",
)
)
def __init__(
self,
server_side_cursors=False,
use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
use_native_uuid=True,
executemany_mode=None,
executemany_batch_page_size=None,
executemany_values_page_size=None,
use_batch_mode=None,
**kwargs
):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.use_native_uuid = use_native_uuid
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
# Parse executemany_mode argument, allowing it to be only one of the
# symbol names
self.executemany_mode = util.symbol.parse_user_argument(
executemany_mode,
{
EXECUTEMANY_DEFAULT: [None],
EXECUTEMANY_BATCH: ["batch"],
EXECUTEMANY_VALUES: ["values"],
},
"executemany_mode",
)
if use_batch_mode:
self.executemany_mode = EXECUTEMANY_BATCH
self.executemany_batch_page_size = executemany_batch_page_size
self.executemany_values_page_size = executemany_values_page_size
if self.dbapi and hasattr(self.dbapi, "__version__"):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x) for x in m.group(1, 2, 3) if x is not None
)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = (
self.use_native_hstore
and self._hstore_oids(connection.connection) is not None
)
self._has_native_json = (
self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_json"]
)
self._has_native_jsonb = (
self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_jsonb"]
)
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
self.supports_sane_multi_rowcount = (
self.psycopg2_version
>= self.FEATURE_VERSION_MAP["sane_multi_rowcount"]
and self.executemany_mode is EXECUTEMANY_DEFAULT
)
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@classmethod
def _psycopg2_extensions(cls):
from psycopg2 import extensions
return extensions
@classmethod
def _psycopg2_extras(cls):
from psycopg2 import extras
return extras
@util.memoized_property
def _isolation_lookup(self):
extensions = self._psycopg2_extensions()
return {
"AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT,
"READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED,
"READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
"REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ,
"SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE,
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace("_", " ")]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
def on_connect(self):
extras = self._psycopg2_extras()
extensions = self._psycopg2_extensions()
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_uuid:
def on_connect(conn):
extras.register_uuid(None, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
kw = {"oid": oid}
if util.py2k:
kw["unicode"] = True
if (
self.psycopg2_version
>= self.FEATURE_VERSION_MAP["array_oid"]
):
kw["array_oid"] = array_oid
extras.register_hstore(conn, **kw)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
if self._has_native_json:
extras.register_default_json(
conn, loads=self._json_deserializer
)
if self._has_native_jsonb:
extras.register_default_jsonb(
conn, loads=self._json_deserializer
)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
def do_executemany(self, cursor, statement, parameters, context=None):
if self.executemany_mode is EXECUTEMANY_DEFAULT:
cursor.executemany(statement, parameters)
return
if (
self.executemany_mode is EXECUTEMANY_VALUES
and context
and context.isinsert
and context.compiled.insert_single_values_expr
):
executemany_values = (
"(%s)" % context.compiled.insert_single_values_expr
)
# guard for statement that was altered via event hook or similar
if executemany_values not in statement:
executemany_values = None
else:
executemany_values = None
if executemany_values:
# Currently, SQLAlchemy does not pass "RETURNING" statements
# into executemany(), since no DBAPI has ever supported that
# until the introduction of psycopg2's executemany_values, so
# we are not yet using the fetch=True flag.
statement = statement.replace(executemany_values, "%s")
if self.executemany_values_page_size:
kwargs = {"page_size": self.executemany_values_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_values(
cursor,
statement,
parameters,
template=executemany_values,
**kwargs
)
else:
if self.executemany_batch_page_size:
kwargs = {"page_size": self.executemany_batch_page_size}
else:
kwargs = {}
self._psycopg2_extras().execute_batch(
cursor, statement, parameters, **kwargs
)
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= self.FEATURE_VERSION_MAP["hstore_adapter"]:
extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts:
if "port" in opts:
opts["port"] = int(opts["port"])
opts.update(url.query)
# send individual dbname, user, password, host, port
# parameters to psycopg2.connect()
return ([], opts)
elif url.query:
# any other connection arguments, pass directly
opts.update(url.query)
return ([], opts)
else:
# no connection arguments whatsoever; psycopg2.connect()
# requires that "dsn" be present as a blank string.
return ([""], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
# check the "closed" flag. this might not be
# present on old psycopg2 versions. Also,
# this flag doesn't actually help in a lot of disconnect
# situations, so don't rely on it.
if getattr(connection, "closed", False):
return True
# checks based on strings. in the case that .closed
# didn't cut it, fall back onto these.
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
"terminating connection",
"closed the connection",
"connection not open",
"could not receive data from server",
"could not send data to server",
# psycopg2 client errors, psycopg2/conenction.h,
# psycopg2/cursor.h
"connection already closed",
"cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
"losed the connection unexpectedly",
# these can occur in newer SSL
"connection has been closed unexpectedly",
"SSL SYSCALL error: Bad file descriptor",
"SSL SYSCALL error: EOF detected",
"SSL error: decryption failed or bad record mac",
"SSL SYSCALL error: Operation timed out",
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
|
py | 1a4db13a884ebf07c73dbfd765771ecf6fae5bc0 | from __future__ import absolute_import
import os
import posixpath
import pysvn
from cobra.core.constants import README_MARKUPS
from cobra.core.markup import rest2html, can_markup, is_markdown, is_rst, is_plain
from cobra.core.markdown import markdown
def get_readme(repository, path='', revision=None):
# 1 - md
# 2 - rst
# 3 - txt
readme_suffix_names = README_MARKUPS
readme_name = ''
if repository.root.endswith(posixpath.sep):
root = repository.root[:-1]
else:
root = repository.root
root_path = '%s%s' % (root, path)
if revision is None:
revision = repository.get_latest_revision()
c = repository.get_svn_client()
r = pysvn.Revision(pysvn.opt_revision_kind.number, revision)
ls = c.list(root_path, recurse=False, peg_revision=r, revision=r)
ls = map(lambda y: dict(y.items()), map(lambda x: x[0], ls))
for item in ls:
if pysvn.node_kind.file == item['kind']:
# sometimes double slashes appear in the returned path
node_path = item['repos_path']
# we meet a special situation, it is that the root of repo is not real 'root'
# and we have no permission to access the real root, so, the repos_path that
# has the prefix of real root must be cut off.
if repository.prefix:
repo_prefix = repository.prefix
if repo_prefix.endswith(posixpath.sep):
repo_prefix = repo_prefix[:-1]
node_path = node_path.replace(repo_prefix, '/', 1)
if node_path.startswith('//'):
node_path = node_path[1:]
head, tail = os.path.split(node_path)
file_name, file_suffix = os.path.splitext(tail)
if file_name.lower() == 'readme' and (file_suffix in readme_suffix_names):
readme_name = node_path
break
if file_name.lower() == 'readme' and (file_suffix.lower()=='.txt' or file_suffix==''):
readme_name = node_path
if readme_name:
content = c.cat('%s%s' % (root, readme_name), revision=r, peg_revision=r)
try:
content = content.decode('utf-8')
except UnicodeDecodeError:
content = content.decode('gbk')
if readme_name.startswith('/'):
readme_name = readme_name[1:]
if is_markdown(readme_name):
content = markdown(content)
elif is_rst(readme_name):
content = rest2html(content)
else:
content = '<pre class="plain-readme">' + content + '</pre>'
readme = {
'name': readme_name,
'content': content
}
return readme
else:
return |
py | 1a4db20d55bbe6a080e3d0ae3d5896a38f717027 | # coding: utf-8
# Copyright (c) Scanlon Materials Theory Group
# Distributed under the terms of the MIT License.
"""
A script to calculate and plot optical spectra from ab initio calculations.
"""
import os
from glob import glob
import sys
import logging
import warnings
import argparse
from collections import OrderedDict
import matplotlib as mpl
mpl.use('Agg')
from pymatgen.io.vasp import Vasprun
from pymatgen.util.string import latexify
from sumo.io import questaal
from sumo.plotting.optics_plotter import SOpticsPlotter
from sumo.electronic_structure.optics import (broaden_eps,
calculate_dielectric_properties,
write_files)
__author__ = "Alex Ganose"
__version__ = "1.0"
__maintainer__ = "Alex Ganose"
__email__ = "[email protected]"
__date__ = "Jan 10, 2018"
def optplot(modes=('absorption',), filenames=None, codes='vasp',
prefix=None, directory=None,
gaussian=None, band_gaps=None, labels=None, average=True, height=6,
width=6, xmin=0, xmax=None, ymin=0, ymax=1e5, colours=None,
style=None, no_base_style=None,
image_format='pdf', dpi=400, plt=None, fonts=None):
"""A script to plot optical absorption spectra from VASP calculations.
Args:
modes (:obj:`list` or :obj:`tuple`):
Ordered list of :obj:`str` determining properties to plot.
Accepted options are 'absorption' (default), 'eps', 'eps-real',
'eps-im', 'n', 'n-real', 'n-im', 'loss' (equivalent to n-im).
filenames (:obj:`str` or :obj:`list`, optional): Path to data file.
For VASP this is a *vasprun.xml* file (can be gzipped); for
Questaal the *opt.ext* file from *lmf* or *eps_BSE.out* from
*bethesalpeter* may be used.
Alternatively, a list of paths can be
provided, in which case the absorption spectra for each will be
plotted concurrently.
codes (:obj:`str` or :obj:`list`, optional): Original
calculator. Accepted values are 'vasp' and 'questaal'. Items should
correspond to filenames.
prefix (:obj:`str`, optional): Prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
gaussian (:obj:`float`): Standard deviation for gaussian broadening.
band_gaps (:obj:`float`, :obj:`str` or :obj:`list`, optional): The band
gap as a :obj:`float`, plotted as a dashed line. If plotting
multiple spectra then a :obj:`list` of band gaps can be provided.
Band gaps can be provided as a floating-point number or as a path
to a *vasprun.xml* file. To skip over a line, set its bandgap to
zero or a negative number to place it outside the visible range.
labels (:obj:`str` or :obj:`list`): A label to identify the spectra.
If plotting multiple spectra then a :obj:`list` of labels can
be provided.
average (:obj:`bool`, optional): Average the dielectric response across
all lattice directions. Defaults to ``True``.
height (:obj:`float`, optional): The height of the plot.
width (:obj:`float`, optional): The width of the plot.
xmin (:obj:`float`, optional): The minimum energy on the x-axis.
xmax (:obj:`float`, optional): The maximum energy on the x-axis.
ymin (:obj:`float`, optional): The minimum absorption intensity on the
y-axis.
ymax (:obj:`float`, optional): The maximum absorption intensity on the
y-axis.
colours (:obj:`list`, optional): A :obj:`list` of colours to use in the
plot. The colours can be specified as a hex code, set of rgb
values, or any other format supported by matplotlib.
style (:obj:`list` or :obj:`str`, optional): (List of) matplotlib style
specifications, to be composed on top of Sumo base style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base style.
This can make alternative styles behave more predictably.
image_format (:obj:`str`, optional): The image file format. Can be any
format supported by matplotlib, including: png, jpg, pdf, and svg.
Defaults to pdf.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
Returns:
A matplotlib pyplot object.
"""
# Don't write files if this is being done to manipulate existing plt
save_files = False if plt else True
##### BUILD LIST OF FILES AUTOMATICALLY IF NECESSARY #####
if codes == 'vasp':
if not filenames:
if os.path.exists('vasprun.xml'):
filenames = ['vasprun.xml']
elif os.path.exists('vasprun.xml.gz'):
filenames = ['vasprun.xml.gz']
else:
logging.error('ERROR: No vasprun.xml found!')
sys.exit()
elif codes == 'questaal':
if not filenames:
if len(glob('opt.*')) > 0:
filenames = glob('opt.*')
if len(filenames) == 1:
logging.info("Found optics file: " + filenames[0])
else:
logging.info("Found optics files: " + ", ".join(filenames))
if isinstance(filenames, str):
filenames = [filenames]
if isinstance(codes, str):
codes = [codes] * len(filenames)
elif len(codes) == 1:
codes = list(codes) * len(filenames)
#### ITERATE OVER FILES READING DIELECTRIC DATA ####
dielectrics = []
auto_labels = []
auto_band_gaps = []
for i, (filename, code) in enumerate(zip(filenames, codes)):
if code == 'vasp':
vr = Vasprun(filename)
dielectrics.append(vr.dielectric)
auto_labels.append(
latexify(vr.final_structure.composition.reduced_formula).
replace('$_', '$_\mathregular'))
if isinstance(band_gaps, list) and not band_gaps:
# band_gaps = [], auto band gap requested
auto_band_gaps.append(
vr.get_band_structure().get_band_gap()['energy'])
else:
auto_band_gaps.append(None)
elif code == 'questaal':
if not save_files:
out_filename = None
elif len(filenames) == 1:
out_filename = 'dielectric.dat'
else:
out_filename = 'dielectric_{0}.dat'.format(i + 1)
dielectrics.append(
questaal.dielectric_from_file(filename, out_filename))
auto_band_gaps.append(None)
auto_labels.append(filename.split('.')[-1])
if isinstance(band_gaps, list) and not band_gaps:
logging.info('Bandgap requested but not supported for Questaal'
' file {}: skipping...'.format(filename))
else:
raise Exception('Code selection "{}" not recognised'.format(code))
if not labels and len(filenames) > 1:
labels = auto_labels
#### PROCESS DIELECTRIC DATA: BROADENING AND DERIVED PROPERTIES ####
if gaussian:
dielectrics = [broaden_eps(d, gaussian)
for d in dielectrics]
# initialize spectrum data ready to append from each dataset
abs_data = OrderedDict()
for mode in modes:
abs_data.update({mode: []})
# for each calculation, get all required properties and append to data
for d in dielectrics:
for mode, spectrum in calculate_dielectric_properties(
d, set(modes), average=average).items():
abs_data[mode].append(spectrum)
if isinstance(band_gaps, list) and not band_gaps:
# empty list therefore use bandgaps collected from vasprun files
band_gaps = auto_band_gaps
elif isinstance(band_gaps, list):
# list containing filenames and/or values: mutate the list in-place
for i, item in enumerate(band_gaps):
if item is None:
pass
elif _floatable(item):
band_gaps[i] = float(item)
elif 'vasprun' in item:
band_gaps[i] = (
Vasprun(item).get_band_structure().get_band_gap()['energy']
)
else:
raise ValueError('Format not recognised for auto bandgap: '
'{}.'.format(item))
plotter = SOpticsPlotter(abs_data, band_gap=band_gaps, label=labels)
plt = plotter.get_plot(width=width, height=height, xmin=xmin,
xmax=xmax, ymin=ymin, ymax=ymax,
colours=colours, dpi=dpi, plt=plt, fonts=fonts,
style=style, no_base_style=no_base_style)
if save_files:
basename = 'absorption'
if prefix:
basename = '{}_{}'.format(prefix, basename)
image_filename = '{}.{}'.format(basename, image_format)
if directory:
image_filename = os.path.join(directory, image_filename)
plt.savefig(image_filename, format=image_format, dpi=dpi)
for mode, data in abs_data.items():
basename = 'absorption' if mode == 'abs' else mode
write_files(data, basename=basename,
prefix=prefix, directory=directory)
else:
return plt
def _floatable(item):
"""Check if an item can be intepreted with float()"""
try:
float(item)
return True
except ValueError:
return False
def _get_parser():
parser = argparse.ArgumentParser(description="""
optplot is a script to produce optical absorption spectra diagrams""",
epilog="""
Author: {}
Version: {}
Last updated: {}""".format(__author__, __version__, __date__))
parser.add_argument('mode', type=str, nargs='*', default='absorption',
metavar='M',
choices={'absorption', 'loss', 'eps_real', 'eps_imag',
'n_real', 'n_imag'},
help='Optical properties to plot. Multiple choices '
' will be displayed as subplots. Accepted values:'
' "absorption" (optical absorption over distance)'
', "loss" (energy-loss function -Im(1/eps)), '
'"eps_real" and "eps_imag" (real and imaginary '
'parts of the dielectric function), '
'"n_real" (real part of complex refractive index)'
'"n_imag" (imaginary part of RI, also known as '
'the extinction coefficient kappa.)')
parser.add_argument('-f', '--filenames', metavar='F',
help='path to one or more vasprun.xml files',
default=None, nargs='+')
parser.add_argument('-p', '--prefix', metavar='P',
help='prefix for the files generated')
parser.add_argument('-d', '--directory', metavar='D',
help='output directory for files')
parser.add_argument('-c', '--code', metavar='C', default='vasp', nargs='+',
help=('Original calculator. Accepted values are '
'"vasp" and "questaal".'))
parser.add_argument('-g', '--gaussian', type=float, metavar='G',
help='standard deviation of gaussian broadening')
parser.add_argument('-b', '--bandgaps', nargs='*', metavar='E',
help=('indicate the fundamental band gap (options: '
'nothing, vasprun.xml file, or float). A '
'sequence of files and values may be provided, '
'corresponding to the optical data files. '
'To skip a line, set a value outside the plot '
'range (e.g. -1).'))
parser.add_argument('-l', '--labels', nargs='+', metavar='L',
help='labels for the absorption specta')
parser.add_argument('-a', '--anisotropic', action='store_false',
help='separate spectra into to x, y, and z directions')
parser.add_argument('--height', type=float, default=None,
help='height of the graph')
parser.add_argument('--width', type=float, default=None,
help='width of the graph')
parser.add_argument('--xmin', type=float, default=0.,
help='minimum energy on the x-axis')
parser.add_argument('--xmax', type=float, default=None,
help='maximum energy on the x-axis')
parser.add_argument('--ymin', type=str, default=['auto'], nargs='+',
help='minimum intensity on the y-axis; may specify '
'multiple values if plotting more than one axis. '
'Use "auto" or "_" for automatic value.')
parser.add_argument('--ymax', type=str, default=['auto'], nargs='+',
help='maximum intensity on the y-axis; may specify'
'multiple values if plotting more than one axis. '
'Use "auto" or "_" for automatic value.')
parser.add_argument('--style', type=str, nargs='+', default=None,
help='matplotlib style specifications')
parser.add_argument('--no-base-style', action='store_true',
dest='no_base_style',
help='prevent use of sumo base style')
parser.add_argument('--format', type=str, default='pdf',
dest='image_format', metavar='FORMAT',
help='image file format (options: pdf, svg, jpg, png)')
parser.add_argument('--dpi', type=int, default=400,
help='pixel density for image file')
parser.add_argument('--font', default=None, help='font to use')
return parser
def main():
args = _get_parser().parse_args()
logging.basicConfig(filename='sumo-optplot.log', level=logging.INFO,
filemode='w', format='%(message)s')
console = logging.StreamHandler()
logging.info(" ".join(sys.argv[:]))
logging.getLogger('').addHandler(console)
warnings.filterwarnings("ignore", category=UserWarning,
module="matplotlib")
warnings.filterwarnings("ignore", category=UnicodeWarning,
module="matplotlib")
warnings.filterwarnings("ignore", category=UserWarning,
module="pymatgen")
# Wrap mode into list if necessary
if not isinstance(args.mode, list):
args.mode = [args.mode]
# Replace text placeholders with preferred Python representation: None
ymin = [None if (x.lower() in ('auto', '_')) else float(x)
for x in args.ymin]
ymax = [None if (x.lower() in ('auto', '_')) else float(x)
for x in args.ymax]
# Settings should be list corresponding to n_plots, or value for all
ymin = ymin[0] if len(ymin) == 1 else ymin
ymax = ymax[0] if len(ymax) == 1 else ymax
optplot(modes=args.mode, filenames=args.filenames, codes=args.code,
prefix=args.prefix, directory=args.directory,
gaussian=args.gaussian, band_gaps=args.bandgaps,
labels=args.labels, average=args.anisotropic, height=args.height,
width=args.width, xmin=args.xmin, xmax=args.xmax, ymin=ymin,
ymax=ymax, colours=None, image_format=args.image_format,
dpi=args.dpi, style=args.style, no_base_style=args.no_base_style,
fonts=args.font)
if __name__ == "__main__":
main()
|
py | 1a4db29f4a14cfef5460a84c53bcce6dd72c9dd4 | # Copyright 2014 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.n1kv import n1kv_client
LOG = logging.getLogger(__name__)
_resource_metadata = {'port': ['id', 'macAddress', 'ipAddress', 'subnetId'],
'vmnetwork': ['name', 'networkSegmentId',
'networkSegment', 'portProfile',
'portProfileId', 'tenantId',
'portId', 'macAddress',
'ipAddress', 'subnetId'],
'subnet': ['addressRangeStart', 'addressRangeEnd',
'ipAddressSubnet', 'description', 'gateway',
'dhcp', 'dnsServersList', 'networkAddress',
'netSegmentName', 'id', 'tenantId']}
class TestClient(n1kv_client.Client):
def __init__(self, **kwargs):
self.broken = False
self.inject_params = False
self.total_profiles = 2
super(TestClient, self).__init__()
def _get_total_profiles(self):
return self.total_profiles
def _do_request(self, method, action, body=None, headers=None):
if self.broken:
raise c_exc.VSMError(reason='VSM:Internal Server Error')
if self.inject_params and body:
body['invalidKey'] = 'catchMeIfYouCan'
if method == 'POST':
return _validate_resource(action, body)
elif method == 'GET':
if 'virtual-port-profile' in action:
return _policy_profile_generator(
self._get_total_profiles())
else:
raise c_exc.VSMError(reason='VSM:Internal Server Error')
class TestClientInvalidRequest(TestClient):
def __init__(self, **kwargs):
super(TestClientInvalidRequest, self).__init__()
self.inject_params = True
class TestClientInvalidResponse(TestClient):
def __init__(self, **kwargs):
super(TestClientInvalidResponse, self).__init__()
self.broken = True
def _validate_resource(action, body=None):
if body:
body_set = set(body.keys())
else:
return
if 'vm-network' in action and 'port' not in action:
vmnetwork_set = set(_resource_metadata['vmnetwork'])
if body_set - vmnetwork_set:
raise c_exc.VSMError(reason='Invalid Request')
elif 'port' in action:
port_set = set(_resource_metadata['port'])
if body_set - port_set:
raise c_exc.VSMError(reason='Invalid Request')
elif 'subnet' in action:
subnet_set = set(_resource_metadata['subnet'])
if body_set - subnet_set:
raise c_exc.VSMError(reason='Invalid Request')
else:
return
def _policy_profile_generator(total_profiles):
"""
Generate policy profile response and return a dictionary.
:param total_profiles: integer representing total number of profiles to
return
"""
profiles = {}
for num in range(1, total_profiles + 1):
name = "pp-%s" % num
profile_id = "00000000-0000-0000-0000-00000000000%s" % num
profiles[name] = {"properties": {"name": name, "id": profile_id}}
return profiles
def _policy_profile_generator_xml(total_profiles):
"""
Generate policy profile response in XML format.
:param total_profiles: integer representing total number of profiles to
return
"""
xml = ["""<?xml version="1.0" encoding="utf-8"?>
<set name="virtual_port_profile_set">"""]
template = (
'<instance name="%(num)d"'
' url="/api/n1k/virtual-port-profile/%(num)s">'
'<properties>'
'<id>00000000-0000-0000-0000-00000000000%(num)s</id>'
'<name>pp-%(num)s</name>'
'</properties>'
'</instance>'
)
xml.extend(template % {'num': n} for n in range(1, total_profiles + 1))
xml.append("</set>")
return ''.join(xml)
|
py | 1a4db50299b0d773800eb8d286bba8aaf5478b8e | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import json
import os
def main(blastdir):
print('Populating organism selftargeting spacer objects')
for fn in glob.glob(blastdir + '/*.json'):
# get loci intervals of organism
accession = os.path.splitext(os.path.split(fn)[1])[0]
q_org = Organism.objects.filter(accession=accession)
if not q_org.exists():
print('Organism with accession {} is not in db but blast '
'report exists'.format(accession))
continue
org = q_org[0]
interval_loci = [
(entry['genomic_start'], entry['genomic_end'])
for entry in org.locus_set.all().values('genomic_start',
'genomic_end')
]
with open(fn, 'r') as f:
try:
blastrec = json.loads(f.read())
except Exception as e:
print('Error on accession {}\n{}'.format(accession, e))
continue
for res in blastrec['BlastOutput2']:
query = res['report']['results']['bl2seq'][0]
spacerid = query['query_title']
for hit in query['hits']:
for hsps in hit['hsps']:
start_h, end_h = hsps['hit_from'], hsps['hit_to']
in_locus = any([start_h > start and end_h < end
for start, end in interval_loci])
if in_locus:
continue
q_spacer = Spacer.objects.filter(id=int(spacerid))
if not q_spacer.exists():
print('Spacer with sequence {} for organism {} '
'not found in db'.format(hsps['qseq'],
org.accession))
continue
spacer = q_spacer[0]
evalue = float(hsps['evalue'])
oselftarget, _ = OrganismSelfSpacer.objects.get_or_create(
organism=org,
spacer=spacer,
evalue=evalue,
genomic_start=start_h,
genomic_end=end_h
)
if __name__ == '__main__':
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "phageAPI.settings")
django.setup()
from restapi.models import Organism, OrganismSelfSpacer, Spacer
main('gbfiles/blastoutput')
|
py | 1a4db596e1c5da08150ca1e3f019d12ba2628ac8 | """
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TransactionData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'description': (str,), # noqa: E501
'amount': (float,), # noqa: E501
'date': (date,), # noqa: E501
'account_id': (str,), # noqa: E501
'transaction_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'description': 'description', # noqa: E501
'amount': 'amount', # noqa: E501
'date': 'date', # noqa: E501
'account_id': 'account_id', # noqa: E501
'transaction_id': 'transaction_id', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, description, amount, date, account_id, transaction_id, *args, **kwargs): # noqa: E501
"""TransactionData - a model defined in OpenAPI
Args:
description (str): The description of the transaction.
amount (float): The amount of the transaction.
date (date): The date of the transaction, in [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format (\"yyyy-mm-dd\").
account_id (str): A unique identifier for the end user's account.
transaction_id (str): A unique identifier for the transaction.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.description = description
self.amount = amount
self.date = date
self.account_id = account_id
self.transaction_id = transaction_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
py | 1a4db5b4527cec4a74ae338aabbc8f2cf932f0b9 | """This module implements row model of MUFG bank CSV."""
from __future__ import annotations
from abc import ABC
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Optional
from zaimcsvconverter.file_csv_convert import FileCsvConvert
from zaimcsvconverter.inputcsvformats import InputRow, InputRowFactory, InputStoreRow, InputStoreRowData
from zaimcsvconverter.utility import Utility
@dataclass
class MufgRowData(InputStoreRowData):
"""This class implements data class for wrapping list of MUFG bunk CSV row model."""
# Reason: This implement depends on design of CSV. pylint: disable=too-many-instance-attributes
class Summary(Enum):
CARD = "カ−ド"
CARD_CONVENIENCE_STORE_ATM = "カ−ドC1"
class CashFlowKind(Enum):
"""This class implements constant of cash flow kind in MUFG CSV."""
INCOME = "入金"
PAYMENT = "支払い"
TRANSFER_INCOME = "振替入金"
TRANSFER_PAYMENT = "振替支払い"
_date: str
summary: str
_summary_content: str
_payed_amount: str
_deposit_amount: str
balance: str
note: str
is_uncapitalized: str
_cash_flow_kind: str
@property
def date(self) -> datetime:
return datetime.strptime(self._date, "%Y/%m/%d")
@property
def store_name(self) -> str:
return self._summary_content
@property
def payed_amount(self) -> Optional[int]:
return Utility.convert_string_to_int_or_none(self._payed_amount)
@property
def deposit_amount(self) -> Optional[int]:
return Utility.convert_string_to_int_or_none(self._deposit_amount)
@property
def cash_flow_kind(self) -> MufgRowData.CashFlowKind:
return self.CashFlowKind(self._cash_flow_kind)
@property
def validate(self) -> bool:
self.stock_error(lambda: self.date, f"Invalid date. Date = {self._date}")
# This comment prevents pylint duplicate-code.
self.stock_error(lambda: self.payed_amount, f"Invalid payed amount. Payed amount = {self._payed_amount}")
self.stock_error(
lambda: self.deposit_amount, f"Invalid deposit amount. Deposit amount = {self._deposit_amount}"
)
self.stock_error(
lambda: self.cash_flow_kind,
'The value of "Cash flow kind" has not been defined in this code. '
f"Cash flow kind = {self._cash_flow_kind}",
)
return super().validate
class MufgRow(InputRow):
"""This class implements row model of MUFG bank CSV."""
def __init__(self, input_row_data: MufgRowData, *args, **kwargs):
super().__init__(input_row_data, *args, **kwargs)
self.cash_flow_kind: MufgRowData.CashFlowKind = input_row_data.cash_flow_kind
self._summary: str = input_row_data.summary
@property
def is_income(self) -> bool:
return self.cash_flow_kind == MufgRowData.CashFlowKind.INCOME
@property
def is_payment(self) -> bool:
return self.cash_flow_kind == MufgRowData.CashFlowKind.PAYMENT
@property
def is_transfer_income(self) -> bool:
return self.cash_flow_kind == MufgRowData.CashFlowKind.TRANSFER_INCOME
@property
def is_transfer_payment(self) -> bool:
return self.cash_flow_kind == MufgRowData.CashFlowKind.TRANSFER_PAYMENT
@property
def is_by_card(self) -> bool:
return (
self._summary == MufgRowData.Summary.CARD.value
or self._summary == MufgRowData.Summary.CARD_CONVENIENCE_STORE_ATM.value
)
@property
def is_income_from_other_own_account(self) -> bool:
return self.is_income and self.is_by_card
class MufgIncomeRow(MufgRow, ABC):
"""This class implements income row model of MUFG bank CSV."""
def __init__(self, row_data: MufgRowData, *args, **kwargs):
super().__init__(row_data, *args, **kwargs)
self._deposit_amount: Optional[int] = row_data.deposit_amount
@property
def deposit_amount(self) -> int:
if self._deposit_amount is None:
raise ValueError("Deposit amount on income row is not allowed empty.")
return self._deposit_amount
@property
def validate(self) -> bool:
self.stock_error(
lambda: self.deposit_amount,
f"Deposit amount in income row is required. Deposit amount = {self._deposit_amount}",
)
return super().validate
class MufgPaymentRow(MufgRow, ABC):
"""This class implements payment row model of MUFG bank CSV."""
def __init__(self, row_data: MufgRowData, *args, **kwargs):
super().__init__(row_data, *args, **kwargs)
self._payed_amount: Optional[int] = row_data.payed_amount
@property
def payed_amount(self) -> int:
if self._payed_amount is None:
raise ValueError("Payed amount on payment row is not allowed empty.")
return self._payed_amount
@property
def validate(self) -> bool:
self.stock_error(
lambda: self.payed_amount, f"Payed amount in payment row is required. Payed amount = {self._payed_amount}"
)
return super().validate
class MufgIncomeFromSelfRow(MufgIncomeRow):
"""This class implements income from self row model of MUFG bank CSV."""
class MufgPaymentToSelfRow(MufgPaymentRow):
"""This class implements payment from self row model of MUFG bank CSV."""
# pylint: disable=too-many-instance-attributes
class MufgStoreRow(MufgRow, InputStoreRow, ABC):
"""This class implements row model of MUFG bank CSV."""
def __init__(self, input_row_data: MufgRowData):
super().__init__(input_row_data, FileCsvConvert.MUFG.value)
@property
def is_transfer_income_from_other_own_account(self) -> bool:
"""This method returns whether this row is transfer income from other own account or not."""
return self.is_transfer_income and self.store.transfer_target is not None
@property
def is_transfer_payment_to_other_own_account(self) -> bool:
"""This method returns whether this row is transfer payment to other own account or not."""
return self.is_transfer_payment and self.store.transfer_target is not None
# pylint: disable=too-many-ancestors
class MufgIncomeFromOthersRow(MufgStoreRow, MufgIncomeRow):
"""This class implements row model of MUFG bank CSV."""
# pylint: disable=too-many-ancestors
class MufgPaymentToSomeoneRow(MufgStoreRow, MufgPaymentRow):
"""
This class implements payment row model of MUFG bank CSV.
It may to others, also may to self.
"""
class MufgRowFactory(InputRowFactory[MufgRowData, MufgRow]):
"""This class implements factory to create MUFG CSV row instance."""
def create(self, input_row_data: MufgRowData) -> MufgRow:
if input_row_data.is_empty_store_name and input_row_data.cash_flow_kind == MufgRowData.CashFlowKind.INCOME:
return MufgIncomeFromSelfRow(input_row_data)
if input_row_data.is_empty_store_name and input_row_data.cash_flow_kind == MufgRowData.CashFlowKind.PAYMENT:
return MufgPaymentToSelfRow(input_row_data)
if input_row_data.cash_flow_kind in (
MufgRowData.CashFlowKind.PAYMENT,
MufgRowData.CashFlowKind.TRANSFER_PAYMENT,
):
return MufgPaymentToSomeoneRow(input_row_data)
if input_row_data.cash_flow_kind in (MufgRowData.CashFlowKind.INCOME, MufgRowData.CashFlowKind.TRANSFER_INCOME):
return MufgIncomeFromOthersRow(input_row_data)
raise ValueError(
f"Cash flow kind is not supported. Cash flow kind = {input_row_data.cash_flow_kind}"
) # pragma: no cover
# Reason: This line is insurance for future development so process must be not able to reach
|
py | 1a4db5da70638cf8109008f1bb13f8bf2a2ca4e8 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base evaluator."""
import abc
import dataclasses
from typing import List, Optional, Union
import numpy as np
@dataclasses.dataclass
class EvaluatorOutput:
"""The output of an evaluator."""
# An evaluator does not necessarily generate all fields below. For example,
# some evaluators like Kendalls Tau return a scalar and image metric, while
# TwoWayCycleConsistency only generates a scalar metric.
scalar: Optional[Union[float, List[float]]] = None
image: Optional[Union[np.ndarray, List[np.ndarray]]] = None
video: Optional[Union[np.ndarray, List[np.ndarray]]] = None
@staticmethod
def _assert_same_attrs(list_out):
"""Ensures a list of this class instance have the same attributes."""
def _not_none(o):
return [getattr(o, a) is not None for a in ["scalar", "image", "video"]]
expected = _not_none(list_out[0])
for o in list_out[1:]:
actual = _not_none(o)
assert np.array_equal(expected, actual)
@staticmethod
def merge(list_out):
"""Merge a list of this class instance into one."""
# We need to make sure that all elements of the list have the same
# non-empty (i.e. != None) attributes.
EvaluatorOutput._assert_same_attrs(list_out)
# At this point, we're confident that we only need to check the
# attributes of the first member of the list to guarantee the same
# availability for *all* other members of the list.
scalars = None
if list_out[0].scalar is not None:
scalars = [o.scalar for o in list_out]
images = None
if list_out[0].image is not None:
images = [o.image for o in list_out]
videos = None
if list_out[0].video is not None:
videos = [o.video for o in list_out]
return EvaluatorOutput(scalars, images, videos)
def log(self, logger, global_step, name, prefix):
"""Log the attributes to tensorboard."""
if self.scalar is not None:
if isinstance(self.scalar, list):
self.scalar = np.mean(self.scalar)
logger.log_scalar(self.scalar, global_step, name, prefix)
if self.image is not None:
if isinstance(self.image, list):
for i, image in enumerate(self.image):
logger.log_image(image, global_step, name + f"_{i}", prefix)
else:
logger.log_image(self.image, global_step, name, prefix)
if self.video is not None:
if isinstance(self.video, list):
for i, video in enumerate(self.video):
logger.log_video(video, global_step, name + f"_{i}", prefix)
else:
logger.log_video(self.video, global_step, name, prefix)
logger.flush()
class Evaluator(abc.ABC):
"""Base class for evaluating a self-supervised model on downstream tasks.
Subclasses must implement the `_evaluate` method.
"""
def __init__(self, inter_class):
self.inter_class = inter_class
@abc.abstractmethod
def evaluate(self, outs):
"""Evaluate the downstream task in embedding space.
Args:
outs: A list of outputs generated by the model on the downstream dataset.
:meta public:
"""
pass
|
py | 1a4db713738bfc5c64b39889f0400e27b12586e8 | """ A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
elif rcParams['backend'] == 'Qt4Agg':
QT_RC_MAJOR_VERSION = 4
else:
# A different backend was specified, but we still got here because a Qt
# related file was imported. This is allowed, so lets try and guess
# what we should be using.
if "PyQt4" in sys.modules or "PySide" in sys.modules:
# PyQt4 or PySide is actually used.
QT_RC_MAJOR_VERSION = 4
else:
# This is a fallback: PyQt5
QT_RC_MAJOR_VERSION = 5
QT_API = None
# check if any binding is already imported, if so silently ignore the
# rcparams/ENV settings and use what ever is already imported.
if 'PySide' in sys.modules:
# user has imported PySide before importing mpl
QT_API = QT_API_PYSIDE
if 'PyQt4' in sys.modules:
# user has imported PyQt4 before importing mpl
# this case also handles the PyQt4v2 case as once sip is imported
# the API versions can not be changed so do not try
QT_API = QT_API_PYQT
if 'PyQt5' in sys.modules:
# the user has imported PyQt5 before importing mpl
QT_API = QT_API_PYQT5
if (QT_API_ENV is not None) and QT_API is None:
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
elif rcParams['backend'] == 'Qt4Agg':
QT_API = rcParams['backend.qt4']
else:
# A non-Qt backend was specified, no version of the Qt
# bindings is imported, but we still got here because a Qt
# related file was imported. This is allowed, fall back to Qt5
# using which ever binding the rparams ask for.
QT_API = rcParams['backend.qt5']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API == QT_API_PYQT5:
try:
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
except ImportError:
# fell through, tried PyQt5, failed fall back to PyQt4
QT_API = rcParams['backend.qt4']
QT_RC_MAJOR_VERSION = 4
# needs to be if so we can re-test the value of QT_API which may
# have been changed in the above if block
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
try:
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
except NameError:
# QtCore did not get imported, fall back to pyside
QT_API = QT_API_PYSIDE
if QT_API == QT_API_PYSIDE: # try importing pyside
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"or PySide package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
def is_pyqt5():
return QT_API == QT_API_PYQT5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.