ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b415412a6772e9274c7f7d4c0b719af3d93b165f | #!/usr/bin/env python
import sys
def runtests(args=None):
import pytest
if not args:
args = []
if not any(a for a in args[1:] if not a.startswith('-')):
args.append('tests')
sys.exit(pytest.main(args))
if __name__ == '__main__':
runtests(sys.argv)
|
py | b415430511ab8e2be5a650d5097d16fbac83cbfe | # Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from nose.tools import *
from tests import prepare_database, with_database
from acoustic.data.stats import (
find_current_stats,
)
@with_database
def test_find_current_stats(conn):
prepare_database(conn, """
INSERT INTO stats (name, value, date) VALUES
('account.all', 3, '2011-04-25'),
('account.all', 3, '2011-04-26'),
('account.all', 4, '2011-04-27'),
('track.all', 13, '2011-04-25'),
('track.all', 13, '2011-04-26'),
('track.all', 14, '2011-04-27');
""")
stats = find_current_stats(conn)
assert_equals(4, stats['account.all'])
assert_equals(14, stats['track.all'])
|
py | b415430d4792f596d0fae199cfffdb89dcc76e61 |
from datetime import datetime, timedelta
from typing import Any, Dict, List, Mapping, Optional, Text, Type, Union
from django.core.management.base import BaseCommand
from django.utils.timezone import now as timezone_now
from analytics.lib.counts import COUNT_STATS, \
CountStat, do_drop_all_analytics_tables
from analytics.lib.fixtures import generate_time_series_data
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, FillState, RealmCount, UserCount
from zerver.lib.timestamp import floor_to_day
from zerver.models import Client, Realm, RealmAuditLog, UserProfile
class Command(BaseCommand):
help = """Populates analytics tables with randomly generated data."""
DAYS_OF_DATA = 100
random_seed = 26
def create_user(self, email: Text,
full_name: Text,
is_staff: bool,
date_joined: datetime,
realm: Realm) -> UserProfile:
user = UserProfile.objects.create(
email=email, full_name=full_name, is_staff=is_staff,
realm=realm, short_name=full_name, pointer=-1, last_pointer_updater='none',
api_key='42', date_joined=date_joined)
RealmAuditLog.objects.create(
realm=realm, modified_user=user, event_type='user_created',
event_time=user.date_joined)
return user
def generate_fixture_data(self, stat, business_hours_base, non_business_hours_base,
growth, autocorrelation, spikiness, holiday_rate=0,
partial_sum=False):
# type: (CountStat, float, float, float, float, float, float, bool) -> List[int]
self.random_seed += 1
return generate_time_series_data(
days=self.DAYS_OF_DATA, business_hours_base=business_hours_base,
non_business_hours_base=non_business_hours_base, growth=growth,
autocorrelation=autocorrelation, spikiness=spikiness, holiday_rate=holiday_rate,
frequency=stat.frequency, partial_sum=partial_sum, random_seed=self.random_seed)
def handle(self, *args: Any, **options: Any) -> None:
do_drop_all_analytics_tables()
# I believe this also deletes any objects with this realm as a foreign key
Realm.objects.filter(string_id='analytics').delete()
installation_time = timezone_now() - timedelta(days=self.DAYS_OF_DATA)
last_end_time = floor_to_day(timezone_now())
realm = Realm.objects.create(
string_id='analytics', name='Analytics', date_created=installation_time)
shylock = self.create_user('[email protected]', 'Shylock', True, installation_time, realm)
def insert_fixture_data(stat: CountStat,
fixture_data: Mapping[Optional[str], List[int]],
table: Type[BaseCount]) -> None:
end_times = time_range(last_end_time, last_end_time, stat.frequency,
len(list(fixture_data.values())[0]))
if table == RealmCount:
id_args = {'realm': realm}
if table == UserCount:
id_args = {'realm': realm, 'user': shylock}
for subgroup, values in fixture_data.items():
table.objects.bulk_create([
table(property=stat.property, subgroup=subgroup, end_time=end_time,
value=value, **id_args)
for end_time, value in zip(end_times, values) if value != 0])
stat = COUNT_STATS['realm_active_humans::day']
realm_data = {
None: self.generate_fixture_data(stat, .1, .03, 3, .5, 3, partial_sum=True),
} # type: Mapping[Optional[str], List[int]]
insert_fixture_data(stat, realm_data, RealmCount)
FillState.objects.create(property=stat.property, end_time=last_end_time,
state=FillState.DONE)
stat = COUNT_STATS['messages_sent:is_bot:hour']
user_data = {'false': self.generate_fixture_data(
stat, 2, 1, 1.5, .6, 8, holiday_rate=.1)} # type: Mapping[Optional[str], List[int]]
insert_fixture_data(stat, user_data, UserCount)
realm_data = {'false': self.generate_fixture_data(stat, 35, 15, 6, .6, 4),
'true': self.generate_fixture_data(stat, 15, 15, 3, .4, 2)}
insert_fixture_data(stat, realm_data, RealmCount)
FillState.objects.create(property=stat.property, end_time=last_end_time,
state=FillState.DONE)
stat = COUNT_STATS['messages_sent:message_type:day']
user_data = {
'public_stream': self.generate_fixture_data(stat, 1.5, 1, 3, .6, 8),
'private_message': self.generate_fixture_data(stat, .5, .3, 1, .6, 8),
'huddle_message': self.generate_fixture_data(stat, .2, .2, 2, .6, 8)}
insert_fixture_data(stat, user_data, UserCount)
realm_data = {
'public_stream': self.generate_fixture_data(stat, 30, 8, 5, .6, 4),
'private_stream': self.generate_fixture_data(stat, 7, 7, 5, .6, 4),
'private_message': self.generate_fixture_data(stat, 13, 5, 5, .6, 4),
'huddle_message': self.generate_fixture_data(stat, 6, 3, 3, .6, 4)}
insert_fixture_data(stat, realm_data, RealmCount)
FillState.objects.create(property=stat.property, end_time=last_end_time,
state=FillState.DONE)
website, created = Client.objects.get_or_create(name='website')
old_desktop, created = Client.objects.get_or_create(name='desktop app Linux 0.3.7')
android, created = Client.objects.get_or_create(name='ZulipAndroid')
iOS, created = Client.objects.get_or_create(name='ZulipiOS')
react_native, created = Client.objects.get_or_create(name='ZulipMobile')
API, created = Client.objects.get_or_create(name='API: Python')
zephyr_mirror, created = Client.objects.get_or_create(name='zephyr_mirror')
unused, created = Client.objects.get_or_create(name='unused')
long_webhook, created = Client.objects.get_or_create(name='ZulipLooooooooooongNameWebhook')
stat = COUNT_STATS['messages_sent:client:day']
user_data = {
website.id: self.generate_fixture_data(stat, 2, 1, 1.5, .6, 8),
zephyr_mirror.id: self.generate_fixture_data(stat, 0, .3, 1.5, .6, 8)}
insert_fixture_data(stat, user_data, UserCount)
realm_data = {
website.id: self.generate_fixture_data(stat, 30, 20, 5, .6, 3),
old_desktop.id: self.generate_fixture_data(stat, 5, 3, 8, .6, 3),
android.id: self.generate_fixture_data(stat, 5, 5, 2, .6, 3),
iOS.id: self.generate_fixture_data(stat, 5, 5, 2, .6, 3),
react_native.id: self.generate_fixture_data(stat, 5, 5, 10, .6, 3),
API.id: self.generate_fixture_data(stat, 5, 5, 5, .6, 3),
zephyr_mirror.id: self.generate_fixture_data(stat, 1, 1, 3, .6, 3),
unused.id: self.generate_fixture_data(stat, 0, 0, 0, 0, 0),
long_webhook.id: self.generate_fixture_data(stat, 5, 5, 2, .6, 3)}
insert_fixture_data(stat, realm_data, RealmCount)
FillState.objects.create(property=stat.property, end_time=last_end_time,
state=FillState.DONE)
# TODO: messages_sent_to_stream:is_bot
|
py | b415455f149cee3d9bd537bbb04faa097dad2812 | # Generated by Django 3.2.9 on 2022-01-06 11:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('content', models.TextField()),
('file', models.FileField(blank=True, null=True, upload_to='feedback/media')),
],
options={
'verbose_name': 'Feedback',
},
),
]
|
py | b41545b164f1033ffc2278f26099bd8f75894fc4 | #!/usr/bin/env python
from policy_sentry.shared.database import connect_db
from policy_sentry.querying.conditions import get_condition_keys_for_service
import json
if __name__ == '__main__':
db_session = connect_db('bundled')
output = get_condition_keys_for_service(db_session, "cloud9")
print(json.dumps(output, indent=4))
"""
Output:
[
'cloud9:EnvironmentId',
'cloud9:EnvironmentName',
'cloud9:InstanceType',
'cloud9:Permissions',
'cloud9:SubnetId',
'cloud9:UserArn'
]
"""
|
py | b41546dfcb045014330235c5b2abd3e51d6ae70e | # fault : use <- in line 11 (inner loop) as we need to move forward even though
# the house is in the middle of two heaters
class Solution:
def findRadius(self, houses: 'List[int]', heaters: 'List[int]') -> 'int':
houses.sort()
heaters.sort()
ptr_house = 0
ptr_heater = 0
max_dist = 0
while(ptr_house < len(houses)):
while(ptr_heater < len(heaters)-1 and abs(houses[ptr_house]-heaters[ptr_heater+1]) <= abs(houses[ptr_house] - heaters[ptr_heater])):
ptr_heater += 1
max_dist = max(max_dist, abs(houses[ptr_house]-heaters[ptr_heater]))
ptr_house += 1
return max_dist
# test
if __name__ == '__main__':
print(Solution().findRadius([1, 2, 3, 4], [1, 2, 3, 4])) |
py | b41549cf80b4c0c9abe2d107e2261fd27df7dff1 | from __future__ import absolute_import
from .lstm import LSTM
|
py | b4154a216bdff575920df9b3e598d68d1335f776 | import os
import json as Json
import pickle
import zlib
import binascii
import sys
from cryptography.fernet import Fernet
from BlockChain.BlockChain import BlockChain
# 1. UNIFICACION DE INDICES
from storage.AVLMode import avlMode as avl
from storage.BMode import BMode as b
from storage.BPlusMode import BPlusMode as bplus
from storage.HashMode.storage import HashMode as _hash
from storage.IsamMode import ISAMMode as isam
from storage.DictMode import DictMode as _dict
from storage.JsonMode import jsonMode as json
from os import path
structs = [avl, b, bplus, _hash, isam, _dict]
databases = []
def dropAll():
avl.dropAll()
bplus.dropAll()
_dict.dropAll()
json.dropAll()
return 0
def addDatabase(name, mode, code, mod):
database = {"mod": None, "mode": "", "name": "", "code": "", "tables": []}
database["mod"] = mod
database["name"] = name
database["mode"] = mode
database["code"] = code
databases.append(database)
persistence(databases)
def createDatabase(name, mode = 'avl', code = 'ASCII'):
try:
chargePersistence()
if code == 'UTF8' or code == 'ASCII' or code == 'ISO-8859-1':
if mode == 'avl':
addDatabase(name, mode, code, avl)
return avl.createDatabase(name)
elif mode == 'bplus':
addDatabase(name, mode, code, bplus)
return bplus.createDatabase(name)
elif mode == 'b':
addDatabase(name, mode, code, b)
return b.createDatabase(name)
elif mode == 'hash':
addDatabase(name, mode, code, _hash)
return _hash.createDatabase(name)
elif mode == 'isam':
addDatabase(name, mode, code, isam)
return isam.createDatabase(name)
elif mode == 'dict':
addDatabase(name, mode, code, _dict)
return _dict.createDatabase(name)
elif mode == 'json':
addDatabase(name, mode, code, json)
return json.createDatabase(name)
else:
return 3
else:
return 4
except:
return 1
def showDatabases():
chargePersistence()
msg = "BASES DE DATOS\n"
# dbs = []
for db in databases:
if '_' not in db['name']:
msg += f"\t{db['mode']}: {db['name']}\n"
return msg
def alterDatabase(databaseOld, databaseNew):
for item in structs:
value = item.alterDatabase(databaseOld, databaseNew)
if value != 2:
for i in databases:
if databaseOld == i["name"]:
i["name"] = databaseNew
# persistence()
return value
return 2
def dropDatabase(nameDB):
for item in structs:
value = item.dropDatabase(nameDB)
if value != 2:
for i in databases:
if nameDB == i["name"]:
databases.remove(i)
# persistence()
return value
return 2
def createTable(database, table, nCols):
for item in structs:
value = item.createTable(database, table, nCols)
if value != 2:
for i in databases:
if database == i["name"]:
t = {"name": table, "nCols": nCols, "tuples": [], "safeMode": False,
"fk": None, "iu": None, "io": None}
i["tables"].append(t)
persistence(databases)
return value
return 2
def showTables(database):
chargePersistence()
tables = []
for item in databases:
value = item["mod"].showTables(database)
if value:
tables.append(value)
break
return tables
def extractTable(database, table):
chargePersistence()
alterDatabaseDecompress(database)
for item in structs:
value = item.extractTable(database, table)
if value is not None:
if value != []:
return value
return None
def extractRangeTable(database, table, columnNumber, lower, upper):
for item in structs:
value = item.extractRangeTable(database, table, columnNumber, lower, upper)
if value and value != 1:
return value
return []
def alterAddPK(database, table, columns):
for item in structs:
value = item.alterAddPK(database, table, columns)
if value != 2:
for i in databases:
if database == i["name"]:
for t in i["tables"]:
if table == t["name"]:
t["pk"] = columns
persistence(databases)
return value
return 2
def alterDropPK(database, table):
for item in structs:
value = item.alterDropPK(database, table)
if value != 2:
for i in databases:
if database == i["name"]:
for t in i["tables"]:
if table == t["name"]:
t["pk"] = []
return value
return 2
def alterTable(database, old, new):
for item in structs:
value = item.alterTable(database, old, new)
if value != 2:
for i in databases:
if database == i["name"]:
for t in i["tables"]:
if old == t["name"]:
t["name"] = new
return value
return 2
def alterDropColumn(database, table, columnNumber):
for item in structs:
value = item.alterDropColumn(database, table, columnNumber)
if value != 2:
return value
return 2
def alterAddColumn(database, table, default):
for item in structs:
value = item.alterAddColumn(database, table, default)
if value != 2:
return value
return 2
def dropTable(database, table):
for item in structs:
value = item.dropTable(database, table)
if value != 2:
for i in databases:
if database == i["name"]:
for t in i["tables"]:
if table == t["name"]:
i["tables"].remove(t)
# persistence()
return value
return 2
def insert(database, table, register):
for item in structs:
codificacion = codificationValidation(getCodificationMode(database),register)
if codificacion == True:
value = item.insert(database, table, register) ###AQUI CREO QUE TENGO QUE HACER ESA VALIDACION PAPUA
if value != 2:
for i in databases:
if database == i["name"]:
for t in i["tables"]:
if table == t["name"]:
tupla = {"register": register}
t["tuples"].append(tupla)
#START BlockChain
i = 0
while i<len(listBlockChain):
if(listBlockChain[i].getName() == (str(database)+"_"+str(table))):
listBlockChain[i].addNodeNoSecure(register)
listBlockChain[i].generateJsonSafeMode()
i += 1
#END BlockChain
persistence(databases)
return value
else:
return 1
return 2
def extractRow(database, table, columns):
chargePersistence()
for item in structs:
value = item.extractRow(database, table, columns)
if value:
return value
return []
def loadCSV(fileCSV, db, table):
for item in structs:
value = item.loadCSV(fileCSV, db, table)
if value != [] and value[0] != 2:
# persistence()
return value
return value
def update(database, table, register, columns):
for item in structs:
value = item.update(database, table, register, columns)
# START BlockChain
i = 0
while i<len(listBlockChain):
if listBlockChain[i].getName() == (str(database)+"_"+str(table)):
j = 0
tuplesBlockChain = listBlockChain[i].getListValues()
tuples = extractTable("ventas", "producto")
while j < len(tuplesBlockChain):
k = 0
newValue = ""
while k < len(tuples):
if tuples[k] not in tuplesBlockChain:
newValue = tuples[k]
k += 1
if tuplesBlockChain[j] not in tuples:#.getValue()
listBlockChain[i].alterValueNode(newValue, j)
listBlockChain[i].generateJsonSafeMode()
j += 1
break
i += 1
# END BlockChain
if value != 2:
for i in databases:
if database == i["name"]:
for t in i["tables"]:
if table == t["name"]:
for tup in t["tuples"]:
if tup["register"][0] == columns[0]:
index = 0
for key in register:
index = key
tup["register"][index] = register[index]
persistence(databases)
return value
return 2
def delete(database, table, columns):
pass
# for item in structs:
# value = item.delete(database, table, columns)
# if value != 2:
# for i in databases:
# if database == i["name"]:
# for t in i["tables"]:
# if table == t["name"]:
# for tup in t["tuples"]:
# index = 0
# for key in columns:
# index = key
# tup["register"][index] = register[1]
# return value
# return 2
def truncate(database, table):
for item in structs:
value = item.truncate(database, table)
if value != 2:
for i in databases:
if database == i["name"]:
for t in i["tables"]:
if table == t["name"]:
t["tuples"] = []
# persistence()
return value
return 2
# 2. ADMINISTRADOR DE MODO DE ALMACENAMIENTO
def alterDatabaseMode(database, mode):
try:
changueMode(databases)
for db in databases:
if db["name"] == database:
dbCopy = db.copy()
databases.remove(db)
dbCopy["mod"].dropDatabase(dbCopy["name"])
createDatabase(dbCopy["name"], mode, dbCopy["code"])
for table in dbCopy["tables"]:
createTable(dbCopy["name"], table["name"], table["nCols"])
for reg in table["tuples"]:
insert(dbCopy["name"], table["name"], reg["register"])
persistence(databases)
return 0
except:
return 1
# 3. ADMINISTRACION DE INDICES
def alterTableAddFK(database, table, indexName, columns, tableRef, columnsRef):
pass
def alterTableDropFK(database, table, indexName):
pass
# 4. ADMINISTRACION DE LA CODIFICACION
def alterDatabaseEncoding(database,encoding):
if encoding =="ASCII" or encoding =="ISO-8859-1" or encoding =="UTF8":
pass
else:
return 3
try:
i=0
for db in databases:
if db["name"] == database:
for table in db["tables"]:
for tupla in table["tuples"]:
for register in tupla["register"]:
if isinstance(register, str) :
codificacion = codificationValidation(encoding,register)
if codificacion == True:
pass
else:
return 1
break
i+=1
if i==len(databases):
return 2
else:
return 0
except:
return 1
def codificationValidation(codification,stringlist): ##Cristian
if codification=="ASCII":
try:
for i in stringlist:
if isinstance(i, str) : ##verifica si la validacion es para una cadena
i.encode('ascii')
else:
pass
return True
except:
return False
elif codification=="ISO-8859-1":
try:
for i in stringlist:
if isinstance(i, str) : ##verifica si la validacion es para una cadena
i.encode('latin-1')
else:
pass
return True
except:
return False
elif codification=="UTF8":
try:
for i in stringlist:
if isinstance(i, str) : ##verifica si la validacion es para una cadena
i.encode('utf-8')
else:
pass
return True
except:
return False
else:
return 3 ##Nombre de codificacion no existente
def getCodificationMode(database):
for i in databases:
if database == i["name"]:
if i["code"] == "ASCII":
return "ASCII"
elif i["code"] == "ISO-8859-1":
return "ISO-8859-1"
elif i["code"] == "UTF8":
return "UTF8"
return 2
# 6. COMPRESION DE DATOS
def alterDatabaseCompress(database, level):
if level not in range(-1, 6):
return 4
try:
for db in databases:
if db["name"] == database:
for table in db["tables"]:
changueMode(databases)
tableCopy = table.copy()
table["tuples"] = []
db["mod"].truncate(db["name"], table["name"])
for tupla in tableCopy["tuples"]:
newRegister = []
for register in tupla["register"]:
if type(register) == str:
text = bytes(register, db["code"])
register = zlib.compress(text, level)
newRegister.append(register)
insert(db['name'], table["name"], newRegister)
return 0
except:
return 1
def alterDatabaseDecompress(database):
try:
isCompressed = False
for db in databases:
if db["name"] == database:
for table in db["tables"]:
changueMode(databases)
tableCopy = table.copy()
table["tuples"] = []
db["mod"].truncate(db["name"], table["name"])
for tupla in tableCopy["tuples"]:
newRegister = []
for register in tupla["register"]:
if type(register) == bytes:
text = zlib.decompress(register)
register = text.decode(db["code"])
isCompressed = True
newRegister.append(register)
insert(db['name'], table["name"], newRegister)
if not isCompressed:
return 3
return 0
except:
return 1
def alterTableCompress(database, table, level):
if level not in range(-1, 6):
return 4
try:
for db in databases:
if db["name"] == database:
for t in db["tables"]:
changueMode(databases)
if t["name"] == table:
tableCopy = t.copy()
t["tuples"] = []
db["mod"].truncate(db["name"], t["name"])
for tupla in tableCopy["tuples"]:
newRegister = []
for register in tupla["register"]:
if type(register) == str:
text = bytes(register, db["code"])
register = zlib.compress(text, level)
newRegister.append(register)
insert(db['name'], t["name"], newRegister)
return 0
else:
return 3
else:
return 2
except:
return 1
def alterTableDecompress(database, table, level):
try:
isCompressed = False
for db in databases:
if db["name"] == database:
for table in db["tables"]:
if table["name"] == table:
tableCopy = table.copy()
table["tuples"] = []
db["mod"].truncate(db["name"], table["name"])
for tupla in tableCopy["tuples"]:
newRegister = []
for register in tupla["register"]:
if type(register) == bytes:
text = zlib.decompress(register)
register = text.decode(db["code"])
isCompressed = True
newRegister.append(register)
insert(db['name'], table["name"], newRegister)
else:
return 3
else:
return 2
if not isCompressed:
return 3
return 0
except:
return 1
# 7. SEGURIDAD
"""
@description
Encripta información.
@param
backup: información que se desea encriptar.
password: llave con la que se encriptará la información.
@return
Información encriptada.
"""
def encrypt(backup, password):
return Fernet(password).encrypt(backup.encode()).decode()
"""
@description
Descencripta información.
@param
cipherBackup: información que se desea descencriptar.
password: clave con la que se desencriptará la información.
@return
Información descencriptada.
"""
def decrypt(cipherBackup, password):
return Fernet(password).decrypt(cipherBackup.encode()).decode()
def persistence(databases):
try:
if path.exists("DB"):
os.remove("DB")
archivo = open("DB", "wb")
for db in databases:
db["mod"] = db["mode"]
pickle.dump(databases, archivo)
archivo.close()
del(archivo)
except:
pass
def chargePersistence():
n = databases
if path.isfile("DB") and len(n) == 0 and path.getsize("DB") > 0:
archivo = open("DB" , "rb")
data = pickle.load(archivo)
changueMode(data, True)
archivo.close()
print("bases de datos cargadas")
def changueMode(database, isPersistence = False):
for i in database:
if i["mod"] == 'avl':
i["mod"] = avl
elif i["mod"] == 'b':
i["mod"] == b
elif i["mod"] == 'bplus':
i["mod"] = bplus
elif i["mod"] == 'hash':
i["mod"] = _hash
elif i["mod"] == 'isam':
i["mod"] = isam
elif i["mod"] == 'dict':
i["mod"] = _dict
elif i["mod"] == 'json':
i["mod"] = json
if isPersistence:
databases.append(i)
#generar el grafo reporte del block chain
def generateGraphBlockChain(database, table):
i = 0
fileName = str(database)+"_"+str(table)+"BC"
while i < len(listBlockChain):
if listBlockChain[i].getName() == (str(database)+"_"+str(table)):
data = listBlockChain[i].generateGraph()
with open(fileName+".dot", "w") as file:
file.write(data)
os.system("dot -Tpng "+fileName+".dot"+" -o "+fileName+".png")
break
else:
print("No se encontro el Block Chain de la tabla indicada")
i += 1
### WORK BLOCKCHAIN ###
"""
@description
Activa el modo seguro para una tabla de una base de datos
@param
database: Nombre de la base de datos a utilizar
table: Nombre de la tabla a utilizar
@return
0: Operación exitosa
1: Error en la operación
2: database inexistente
3: table inexistente
4: Modo seguro inexistente
"""
def safeModeOn(database, table):
try:
for db in databases:
#verifica si la base de datos existe
if db.get("name") == database:
for tb in db.get("tables"):
#verifica si la tabla existe
if tb.get("name") == table:
#verifica si el modo seguro esta activado
if tb.get("safeMode"):
#Modo seguro existente
return 4
tb["safeMode"] = True
#_________________________________________________________
bc = BlockChain(str(database)+"_"+str(table))
for tp in tb.get("tuples"):
bc.addNode(tp.get("register"))
bc.generateJsonSafeMode()
listBlockChain.append(bc)
#_________________________________________________________
#tabel inexistente
return 3
#database inexistente
return 2
except:
#Error en la operación
return 1
"""
@description
Desactiva el modo en la tabla especificada de la base de datos
@param
database: Nombre de la base de datos a utilizar
table: Nombre de la tabla a utilizar
@return
0: Operación exitosa
1: Error en la operación
2: database inexistente
3: table inexistente
4: modo seguro no inexistente
"""
def safeModeOff(database, table):
try:
for db in databases:
#verifica si la base de datos existe
if db.get("name") == database:
for tb in db.get("tables"):
#verifica si la tabla existe
if tb.get("name") == table:
#verifica si el modo seguro esta activado
if tb.get("safeMode"):
tb["safeMode"] = False
os.remove('BlockChain\\'+str(database)+'_'+str(table)+'.json')
return 0
#Modo seguro no existente
return 4
#tabel inexistente
return 3
#database inexistente
return 2
except:
#Error en la operación
return 1
|
py | b4154a24c504e59e97d8b77e1500b12843488bc7 | #!/usr/bin/python
# Authors: Chris Tung
# Igncio Taboada
#
# General imports
from __future__ import division
import os
import gzip
import re
import argparse
# Numpy / Scipy
import numpy as np
import scipy.integrate
# Firesong code
from Evolution import RedshiftDistribution, StandardCandleSources, LuminosityDistance, LtoFlux
from Luminosity import LuminosityFunction, LuminosityPDF
#
# Check that the Firesong environmental variable is set
# This is needed for output and to read exposures, effective areas, etc
try:
firesongdir = os.environ['FIRESONG']
except:
print "Enviromental variable FIRESONG not set"
quit()
outputdir = firesongdir + "/Results/"
#
# Process command line options
#
parser = argparse.ArgumentParser()
parser.add_argument('-N', action='store', dest='AlertNumber',type=int,default= 1,
help='Number of neutrinos to generate')
parser.add_argument('-o', action='store', dest='filename',default= 'Firesong.out',
help='Output filename')
parser.add_argument('-d', action='store', dest='density', type=float, default = 1e-9,
help='Local neutrino source density [1/Mpc^3]')
parser.add_argument("--evolution", action="store",
dest="Evolution", default='HB2006SFR',
help="Source evolution options: HB2006SFR (default), NoEvolution")
parser.add_argument("--transient", action='store_true',
dest='Transient', default=False,
help='Simulate transient sources, NOT TESTED YET!')
parser.add_argument("--timescale", action='store', dest='timescale', type=float,
default=1000., help='time scale of transient sources, default is 1000sec.')
parser.add_argument("--zmax", action="store", type=float,
dest="zmax", default=10.,
help="Highest redshift to be simulated")
parser.add_argument("--fluxnorm", action="store", dest='fluxnorm', type=float, default=0.9e-8,
help="Astrophysical neutrino flux normalization A on E^2 dN/dE = A (E/100 TeV)^(-index+2) GeV/cm^2.s.sr")
parser.add_argument("--index", action="store", dest='index', type=float, default=2.13,
help="Astrophysical neutrino spectral index on E^2 dN/dE = A (E/100 TeV)^(-index+2) GeV/cm^2.s.sr")
parser.add_argument("--LF",action="store", dest="LF",default="SC",
help="Luminosity function, SC for standard candles, LG for lognormal, PL for powerlaw")
parser.add_argument("--sigma", action="store",
dest="sigma", type=float, default=1.0,
help="Width of a log normal Luminosity function in dex, default: 1.0")
parser.add_argument("--L", action="store",
dest="luminosity", type=float, default=0.0,
help="Set luminosity for each source, will reset fluxnorm option, unit erg/yr")
options = parser.parse_args()
if re.search('.gz$', options.filename):
output = gzip.open(outputdir+str(options.filename), 'wb')
else:
output = open(outputdir+str(options.filename),"w")
N_sample, candleflux = StandardCandleSources(options)
## Integrate[EdN/dE, {E, 10TeV, 10PeV}] * 4*Pi * dL1^2 * unit conversion
luminosity = candleflux * (1.e-5) * scipy.integrate.quad(lambda E: 2.**(-options.index+2)*(E/1.e5)**(-options.index+1), 1.e4, 1.e7)[0] * 4*np.pi * (LuminosityDistance(1.)*3.086e24)**2. *50526
## If luminosity of the sources is specified, re-calculate candleflux
if options.luminosity != 0.0:
candleflux = LtoFlux(options)
luminosity = options.luminosity
flux_z1 = LuminosityFunction(options, N_sample, candleflux)
print ("##############################################################################")
print ("##### FIRESONG initializing - Calculating Neutrino CDFs #####")
if options.LF == "SC":
print ("Standard candle sources")
if options.LF == "LG":
print ("Lognormal distributed sources")
if options.LF == "PL":
print ("PowerLaw distributed sources")
print ("Source evolution assumed: " + str(options.Evolution))
print ("Local density of neutrino sources: " + str(options.density) + "/Mpc^3")
print ("Total number of neutrinos sources in the Universe: " + str(N_sample))
print ("Desired neutrino diffuse flux: E^2 dN/dE = " + str(options.fluxnorm) + " (E/100 TeV)^(" + str(-(options.index-2.)) + ") GeV/cm^2.s.sr")
print ("Redshift range: 0 - " + str(options.zmax))
print ("Standard Candle Luminosity: {:.4e} erg/yr".format(luminosity))
print ("##### FIRESONG initialization done #####")
##################################################
# Simulation starts here
##################################################
output.write("# FIRESONG Output description\n")
output.write("# Desired neutrino diffuse flux:\n")
output.write("# E^2 dN_{diffuse}/dE = " + str(options.fluxnorm) + " (E/100 TeV)^(" + str(-(options.index-2.)) + ") [GeV/cm^2.s.sr]\n")
output.write("# Neutrino point source fluxes listed below are of \'A\' where the flux is:\n")
output.write("# E^2 dN_{PS}/dE = A * (E/100 TeV)^(" + str(-(options.index-2.)) + ") [GeV/cm^2.s.sr]\n")
output.write("# Standard Candle Luminosity: {:.4e} erg/yr \n".format(luminosity))
output.write("# Note that using 4 years, IceCube sensitivity in the northern hemisphere\n")
output.write("# is approximately 10^-9 in the units used for A\n")
output.write("# Dec(deg) Redshift A\n")
# Luminosity distace for z=1. Internally, fluxes are scaled to this distance.
dL1 = LuminosityDistance(1.)
# Generate a histogram to store redshifts. Starts at z = 0.0005 and increases in steps of 0.001
redshift_bins = np.arange(0.0005,options.zmax, options.zmax/10000.)
# Calculate the redshift z PDF for neutrino events
if options.Transient == False:
NeutrinoPDF_z = [RedshiftDistribution(z, options)*((1+z)/2.)**(-options.index+2)/(LuminosityDistance(z)**2.) for z in redshift_bins]
else:
NeutrinoPDF_z = [RedshiftDistribution(z, options)*((1+z)/2.)**(-options.index+3)/(LuminosityDistance(z)**2.) for z in redshift_bins]
NeutrinoCDF_z = np.cumsum(NeutrinoPDF_z)
NeutrinoCDF_z = NeutrinoCDF_z / NeutrinoCDF_z[-1]
# Obtain the flux_z1 PDF for neutrino event
if options.LF != "SC":
f1_list, NeutrinoCDF_f = LuminosityPDF(options, candleflux)
for i in range(0,options.AlertNumber):
# Random variates from the above constructed PDFs
test = np.random.rand()
bin_index_z = np.searchsorted(NeutrinoCDF_z, test)
z = redshift_bins[bin_index_z]
if options.LF != 'SC':
test = np.random.rand()
bin_index_f = np.searchsorted(NeutrinoCDF_f, test)
flux_z1 = f1_list[bin_index_f]
# Random declination over the entire sky
sinDec = 2*np.random.rand() -1
declin = 180*np.arcsin(sinDec)/np.pi
dL = LuminosityDistance(z)
flux = flux_z1 * (dL1 / dL)**2 * ((1+z)/2.)**(-options.index+2)
if options.Transient == True:
flux = flux/(options.timescale)
output.write('{:.3f} {:.4f} {:.6e}\n'.format(declin, z, flux))
output.close()
|
py | b4154add59419e4256238f552d7f07f8547b74bf | import cv2
import numpy as np
import cv2
import utils
import os
from train_face import *
# print(eye_cascade)
# cv2.destroyAllWindows()
def most_common(lst):
return max(set(lst), key=lst.count)
def circle_list(circular, max_elmnts, element):
if len(circular) >= max_elmnts:
circular.pop(0)
circular.append(element)
else:
circular.append(element)
return circular
video_capture = cv2.VideoCapture(0)
video_capture.release
cv2.destroyAllWindows()
# while True:
# Capture frame-by-frame
name = 'hugo'
directory = './ids/'+name
if not os.path.exists(directory):
os.makedirs(directory)
for i in range(100):
ret, frame = video_capture.read()
img, eyes, faces = utils.face_recognition_train(frame, i, name)
cv2.imshow('img',img)
if cv2.waitKey(1) == 27:
break # esc to quit
no_faces = True
id_person = []
stream = video_capture
detected_frames = 0
X_pca, pca, y = train_pca()
while no_faces == True:
ret, frame = stream.read()
img, faces, coor = utils.face_recognition_2(frame)
if len(faces) != 0:
detected_frames += 1
if len(id_person) > 0:
retrieved_id, dist = test_id(faces, X_pca, pca, y, img, coor, most_common(id_person))
if dist < 8:
circle_list(id_person, 15, retrieved_id)
else:
retrieved_id, dist = test_id(faces, X_pca, pca, y, img, coor, '')
if dist < 8:
circle_list(id_person, 15, retrieved_id)
else:
detected_frames = 0
id_person = []
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows() |
py | b4154b4ea8919c4b52e54d8099fc4221428d3ada | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: WebRequest
Description : Network Requests Class
Author : J_hao
date: 2017/7/31
-------------------------------------------------
Change Activity:
2017/7/31:
-------------------------------------------------
"""
__author__ = 'J_hao'
from requests.models import Response
from lxml import etree
import requests
import random
import time
from handler.logHandler import LogHandler
requests.packages.urllib3.disable_warnings()
class WebRequest(object):
name = "web_request"
def __init__(self, *args, **kwargs):
self.log = LogHandler(self.name, file=False)
self.response = Response()
@property
def user_agent(self):
"""
return an User-Agent at random
:return:
"""
ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
]
return random.choice(ua_list)
@property
def header(self):
"""
basic header
:return:
"""
return {'User-Agent': self.user_agent,
'Accept': '*/*',
'Connection': 'keep-alive',
'Accept-Language': 'zh-CN,zh;q=0.8'}
def get(self, url, header=None, retry_time=3, retry_interval=5, timeout=5, *args, **kwargs):
"""
get method
:param url: target url
:param header: headers
:param retry_time: retry time
:param retry_interval: retry interval
:param timeout: network timeout
:return:
"""
headers = self.header
if header and isinstance(header, dict):
headers.update(header)
while True:
try:
self.response = requests.get(url, headers=headers, timeout=timeout, *args, **kwargs)
return self
except Exception as e:
self.log.error("requests: %s error: %s" % (url, str(e)))
retry_time -= 1
if retry_time <= 0:
resp = Response()
resp.status_code = 200
return self
self.log.info("retry %s second after" % retry_interval)
time.sleep(retry_interval)
@property
def tree(self):
return etree.HTML(self.response.content)
@property
def text(self):
return self.response.text
|
py | b4154be8dd3b4e91e0ebc7a9c93e28c6ce5fbd13 | # Copyright (C) 2013 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import textwrap
import numpy as np
from phonopy.units import VaspToTHz
from phonopy.harmonic.derivative_dynmat import DerivativeOfDynamicalMatrix
from phonopy.harmonic.force_constants import similarity_transformation
from phonopy.phonon.degeneracy import degenerate_sets
def get_group_velocity(q, # q-point
dynamical_matrix,
q_length=None, # finite distance in q
symmetry=None,
frequency_factor_to_THz=VaspToTHz,
log_level=0):
"""
If frequencies and eigenvectors are supplied they are used
instead of calculating them at q-point (but not at q+dq and q-dq).
reciprocal lattice has to be given as
[[a_x, b_x, c_x],
[a_y, b_y, c_y],
[a_z, b_z, c_z]]
"""
gv = GroupVelocity(dynamical_matrix,
q_length=q_length,
symmetry=symmetry,
frequency_factor_to_THz=frequency_factor_to_THz,
log_level=log_level)
gv.set_q_points([q])
return gv.get_group_velocity()[0]
def delta_dynamical_matrix(q,
delta_q,
dynmat):
dynmat.set_dynamical_matrix(q - delta_q)
dm1 = dynmat.get_dynamical_matrix()
dynmat.set_dynamical_matrix(q + delta_q)
dm2 = dynmat.get_dynamical_matrix()
return dm2 - dm1
class GroupVelocity(object):
"""
d omega ----
------- = \ / omega
d q \/q
Gradient of omega in reciprocal space.
d D(q)
<e(q,nu)|------|e(q,nu)>
d q
"""
def __init__(self,
dynamical_matrix,
q_length=None,
symmetry=None,
frequency_factor_to_THz=VaspToTHz,
cutoff_frequency=1e-4,
log_level=0):
"""
q_points is a list of sets of q-point and q-direction:
[[q-point, q-direction], [q-point, q-direction], ...]
q_length is used such as D(q + q_length) - D(q - q_length).
"""
self._dynmat = dynamical_matrix
primitive = dynamical_matrix.get_primitive()
self._reciprocal_lattice_inv = primitive.get_cell()
self._reciprocal_lattice = np.linalg.inv(self._reciprocal_lattice_inv)
self._q_length = q_length
if self._dynmat.is_nac() and self._dynmat.get_nac_method() == 'gonze':
if self._q_length is None:
self._q_length = 1e-5
if log_level:
print("Group velocity calculation:")
text = ("Analytical derivative of dynamical matrix is not "
"implemented for NAC by Gonze et al. Instead "
"numerical derivative of it is used with dq=1e-5 "
"for group velocity calculation.")
print(textwrap.fill(text,
initial_indent=" ",
subsequent_indent=" ",
width=70))
if self._q_length is None:
self._ddm = DerivativeOfDynamicalMatrix(dynamical_matrix)
else:
self._ddm = None
self._symmetry = symmetry
self._factor = frequency_factor_to_THz
self._cutoff_frequency = cutoff_frequency
self._directions = np.array([[1, 2, 3],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], dtype='double')
self._directions[0] /= np.linalg.norm(self._directions[0])
self._q_points = None
self._group_velocity = None
self._perturbation = None
def set_q_points(self, q_points, perturbation=None):
self._q_points = q_points
self._perturbation = perturbation
if perturbation is None:
self._directions[0] = np.array([1, 2, 3])
else:
self._directions[0] = np.dot(
self._reciprocal_lattice, perturbation)
self._directions[0] /= np.linalg.norm(self._directions[0])
self._set_group_velocity()
def set_q_length(self, q_length):
self._q_length = q_length
def get_q_length(self):
return self._q_length
def get_group_velocity(self):
return self._group_velocity
def _set_group_velocity(self):
gv = [self._set_group_velocity_at_q(q) for q in self._q_points]
self._group_velocity = np.array(gv)
def _set_group_velocity_at_q(self, q):
self._dynmat.set_dynamical_matrix(q)
dm = self._dynmat.get_dynamical_matrix()
eigvals, eigvecs = np.linalg.eigh(dm)
eigvals = eigvals.real
freqs = np.sqrt(abs(eigvals)) * np.sign(eigvals) * self._factor
gv = np.zeros((len(freqs), 3), dtype='double')
deg_sets = degenerate_sets(freqs)
ddms = self._get_dD(np.array(q))
pos = 0
for deg in deg_sets:
gv[pos:pos+len(deg)] = self._perturb_D(ddms, eigvecs[:, deg])
pos += len(deg)
for i, f in enumerate(freqs):
if f > self._cutoff_frequency:
gv[i, :] *= self._factor ** 2 / f / 2
else:
gv[i, :] = 0
if self._perturbation is None:
return self._symmetrize_group_velocity(gv, q)
else:
return gv
def _symmetrize_group_velocity(self, gv, q):
rotations = []
for r in self._symmetry.get_reciprocal_operations():
q_in_BZ = q - np.rint(q)
diff = q_in_BZ - np.dot(r, q_in_BZ)
if (np.abs(diff) < self._symmetry.get_symmetry_tolerance()).all():
rotations.append(r)
gv_sym = np.zeros_like(gv)
for r in rotations:
r_cart = similarity_transformation(self._reciprocal_lattice, r)
gv_sym += np.dot(r_cart, gv.T).T
return gv_sym / len(rotations)
def _get_dD(self, q):
if self._q_length is None:
return self._get_dD_analytical(q)
else:
return self._get_dD_FD(q)
def _get_dD_FD(self, q): # finite difference
ddm = []
for dqc in self._directions * self._q_length:
dq = np.dot(self._reciprocal_lattice_inv, dqc)
ddm.append(delta_dynamical_matrix(q, dq, self._dynmat) /
self._q_length / 2)
return np.array(ddm)
def _get_dD_analytical(self, q):
self._ddm.run(q)
ddm = self._ddm.get_derivative_of_dynamical_matrix()
dtype = "c%d" % (np.dtype('double').itemsize * 2)
ddm_dirs = np.zeros((len(self._directions),) + ddm.shape[1:],
dtype=dtype)
for i, dq in enumerate(self._directions):
for j in range(3):
ddm_dirs[i] += dq[j] * ddm[j]
return ddm_dirs
def _perturb_D(self, ddms, eigsets):
eigvals, eigvecs = np.linalg.eigh(
np.dot(eigsets.T.conj(), np.dot(ddms[0], eigsets)))
gv = []
rot_eigsets = np.dot(eigsets, eigvecs)
for ddm in ddms[1:]:
gv.append(
np.diag(np.dot(rot_eigsets.T.conj(),
np.dot(ddm, rot_eigsets))).real)
return np.transpose(gv)
|
py | b4154cb05ccc54e09a76ee9a932e0031e40b1461 | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
import sys
import argparse
import configparser
from cfgm_common.exceptions import RefsExistError
from vnc_api.vnc_api import *
from vnc_admin_api import VncApiAdmin
class EncapsulationProvision(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._vnc_lib = VncApiAdmin(
self._args.use_admin_api,
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
api_server_use_ssl=self._args.api_server_use_ssl)
global_vrouter_fq_name = ['default-global-system-config',
'default-global-vrouter-config']
if self._args.oper == "add":
encap_obj = EncapsulationPrioritiesType(
encapsulation=self._args.encap_priority.split(","))
conf_obj = GlobalVrouterConfig(encapsulation_priorities=encap_obj,
vxlan_network_identifier_mode=self._args.vxlan_vn_id_mode,
fq_name=global_vrouter_fq_name)
try:
result = self._vnc_lib.global_vrouter_config_create(conf_obj)
print('Created.UUID is %s' % result)
except RefsExistError:
print ("GlobalVrouterConfig Exists Already!")
result = self._vnc_lib.global_vrouter_config_update(conf_obj)
print('Updated.%s' % result)
return
elif self._args.oper != "add":
encap_obj = EncapsulationPrioritiesType(encapsulation=[])
conf_obj = GlobalVrouterConfig(encapsulation_priorities=encap_obj,
fq_name=global_vrouter_fq_name)
result = self._vnc_lib.global_vrouter_config_update(conf_obj)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_encap.py
--api_server_ip 127.0.0.1
--api_server_port 8082
--api_server_use_ssl False
--encap_priority "MPLSoUDP,MPLSoGRE,VXLAN"
--vxlan_vn_id_mode "automatic"
--oper <add | delete>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'oper': 'add',
'encap_priority': 'MPLSoUDP,MPLSoGRE,VXLAN',
'vxlan_vn_id_mode' : 'automatic'
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'admin'
}
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument(
"--encap_priority", help="List of Encapsulation priority", required=True)
parser.add_argument(
"--vxlan_vn_id_mode", help="Virtual Network id type to be used")
parser.add_argument(
"--oper", default='add',help="Provision operation to be done(add or delete)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenant name for keystone admin user")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--api_server_ip", help="IP address of api server")
group.add_argument("--use_admin_api",
default=False,
help = "Connect to local api-server on admin port",
action="store_true")
self._args = parser.parse_args(remaining_argv)
if not self._args.encap_priority:
parser.error('encap_priority is required')
# end _parse_args
# end class EncapsulationProvision
def main(args_str=None):
EncapsulationProvision(args_str)
# end main
if __name__ == "__main__":
main()
|
py | b4154ccbf2ee92931f237c2ecf3520547a3cb08c | import os
import sys
import time
import redis
import logging
import schedule
import ConfigParser
import paho.mqtt.client as mqtt
from CloudantDB import CloudantDB
from InformationFetcher import InformationFetcher
from Template import TemplateMatcher
from Persistor import Persistor
from Room import Room
from MqttRulez import MqttRulez
from Pinger import Pinger
from Climate import Climate
from RoomController import RoomController
from AlarmClock import AlarmClock
from HS100 import HS100
from Mpd import Mpd
from TwitterPusher import TwitterPusher
from Tank import Tank
from Telegram import Telegram
from SmarterCoffee import SmartCoffee
from Newscatcher import Newscatcher
from Chromecast import Chromecast
from Influx import Influx
from Adafruit import Adafruit
from Exercise import Exercise
temp = TemplateMatcher()
info = InformationFetcher()
logger = logging.getLogger(__name__)
hdlr = logging.FileHandler('/tmp/sensomatic.log')
formatter = logging.Formatter('%(asctime)s %(name)s %(lineno)d %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
homeDir = os.path.expanduser("~/.sensomatic")
configFileName = homeDir + '/config.ini'
config = ConfigParser.ConfigParser()
def _readConfig():
update = False
if not os.path.isdir(homeDir):
logger.info("Creating homeDir")
os.makedirs(homeDir)
if os.path.isfile(configFileName):
config.read(configFileName)
else:
logger.info("Config file not found")
update = True
if not config.has_section('MAIN'):
logger.info("Adding MAIN part")
update = True
config.add_section("MAIN")
if not config.has_section('REDIS'):
logger.info("Adding Redis part")
update = True
config.add_section("REDIS")
if not config.has_option("REDIS", "ServerAddress"):
logger.info("No Server Address")
update = True
config.set("REDIS", "ServerAddress", "<ServerAddress>")
if not config.has_option("REDIS", "ServerPort"):
logger.info("No Server Port")
update = True
config.set("REDIS", "ServerPort", "6379")
if not config.has_section('MQTT'):
logger.info("Adding MQTT part")
update = True
config.add_section("MQTT")
if not config.has_option("MQTT", "ServerAddress"):
logger.info("No Server Address")
update = True
config.set("MQTT", "ServerAddress", "<ServerAddress>")
if not config.has_option("MQTT", "ServerPort"):
logger.info("No Server Port")
update = True
config.set("MQTT", "ServerPort", "1883")
if update:
with open(configFileName, 'w') as f:
config.write(f)
sys.exit(1)
def hourAnnounce():
logger.info("Announce hour")
for room in Room.ANNOUNCE_ROOMS:
if info.isSomeoneInTheRoom(room):
_mqclient.publish("%s/ttsout" % room, temp.getHourlyTime())
def checkWaschingMachine():
logger.info("Check washing machine")
if _redis.exists("WashingmachineReady"):
logger.info("Washing machine ready")
timestamp = float(_redis.get("WashingmachineReady"))
for room in Room.ANNOUNCE_ROOMS:
if info.isSomeoneInTheRoom(room):
_mqclient.publish("%s/ttsout" % room, temp.getWashingMachineReady(timestamp))
else:
logger.info("Wasching machine inactive")
def goSleep():
logger.info("Go to sleep")
_mqclient.publish("livingroom/ttsout", temp.getTimeToGoToBed())
def checkBath():
logger.info("Checking bath")
if _redis.exists("PlayRadioInBath") and not info.isSomeoneInTheRoom(Room.BATH_ROOM):
Mpd().getServerbyName("Bath").stop()
_mqclient.publish("bathroom/light/rgb", "0,0,0")
_redis.delete("PlayRadioInBath")
def checkCo2(room):
logger.info("Check co2")
for room in Room.ANNOUNCE_ROOMS:
if info.isSomeoneInTheRoom(room):
if info.getRoomCo2Level(room) is not None and info.getRoomCo2Level(room) > 2300:
logger.info("CO2 to high:" + str(info.getRoomCo2Level(room)))
_mqclient.publish("%s/ttsout" % room, temp.getCo2ToHigh(room))
def radiationCheck():
logger.info("Radiation check")
avr = info.getRadiationAverage()
here = info.getRadiationForOneStation()
if here > 0.15:
_mqclient.publish("telegram", temp.getRadiationToHigh(here))
for room in Room.ANNOUNCE_ROOMS:
_mqclient.publish("%s/ttsout" % room, temp.getRadiationToHigh(here))
if here > avr:
_mqclient.publish("telegram", temp.getRadiationHigherThenAverage(here, avr))
for room in Room.ANNOUNCE_ROOMS:
if info.isSomeoneInTheRoom(room):
_mqclient.publish("%s/ttsout" % room, temp.getRadiationHigherThenAverage(here, avr))
def particulateMatterCheck():
logger.info("ParticularMatterCheck")
p1, p2 = info.getParticulateMatter()
if p1 > 23.0 or p2 > 23.0:
_mqclient.publish("telegram", temp.getParticulateMatterHigherThenAverage(p1, p2))
for room in Room.ANNOUNCE_ROOMS:
if info.isSomeoneInTheRoom(room):
_mqclient.publish("%s/ttsout" % room, temp.getParticulateMatterHigherThenAverage(p1, p2))
def bathShowerUpdate():
logger.info("Checking Bath and Shower conditions")
if info.getBathOrShower() is not None:
_mqclient.publish("bathroom/ttsout", temp.getBathShowerUpdate())
else:
logger.info("No one showers")
def stopme():
sys.exit(0)
def _on_connect(client, userdata, rc, msg):
logger.info("Connected MQTT Main with result code %s" % rc)
#self._mqclient.subscribe("#")
def _on_message(client, userdata, msg):
logger.info("Mq Received on channel %s -> %s" % (msg.topic, msg.payload))
#self._workingQueue.put((msg.topic, msg.payload))
def _on_disconnect(client, userdata, msg):
logger.error("Disconnect MQTTRulez")
if __name__ == '__main__':
_readConfig()
_redis = redis.StrictRedis(host=config.get("REDIS", "ServerAddress"), port=config.get("REDIS", "ServerPort"), db=0)
_mqclient = mqtt.Client("Main", clean_session=True)
_mqclient.connect(config.get("MQTT", "ServerAddress"), config.get("MQTT", "ServerPort"), 60)
_mqclient.on_connect = _on_connect
_mqclient.on_message = _on_message
_mqclient.on_disconnect = _on_disconnect
_mqclient.loop_start()
_wait_time = 5
logger.info("Start Persistor")
persistor = Persistor()
persistor.start()
time.sleep(_wait_time)
logger.info("Start Influx")
influx = Influx()
influx.start()
time.sleep(_wait_time)
logger.info("Start Telegram bot")
telegram = Telegram()
telegram.start()
time.sleep(_wait_time)
logger.info("Start MqttRulez")
rulez = MqttRulez()
rulez.start()
time.sleep(_wait_time)
logger.info("Start Pinger")
pinger = Pinger()
pinger.start()
time.sleep(_wait_time)
logger.info("Start Cloudant")
cloudantdb = CloudantDB()
cloudantdb.start()
time.sleep(_wait_time)
logger.info("Start Chromecast")
chromecast = Chromecast()
chromecast.start()
time.sleep(_wait_time)
logger.info("Start Adafruit")
adafruit = Adafruit()
adafruit.start()
time.sleep(_wait_time)
logger.info("Start Climate Control")
climate = Climate()
climate.start()
time.sleep(_wait_time)
logger.info("Start Room Control")
lightControl = RoomController()
lightControl.start()
time.sleep(_wait_time)
logger.info("Start Alarmclock")
alarmclock = AlarmClock()
alarmclock.start()
time.sleep(_wait_time)
logger.info("Start Washing Machine")
washingmachine = HS100("washingmachine", "bathroom/washingmachine/")
washingmachine.start()
time.sleep(_wait_time)
logger.info("Start TwitterPusher")
twitterpusher = TwitterPusher()
twitterpusher.start()
time.sleep(_wait_time)
logger.info("Start Tank")
tank = Tank()
tank.start()
time.sleep(_wait_time)
logger.info("Start Coffee machine")
coffee = SmartCoffee()
coffee.start()
time.sleep(_wait_time)
logger.info("Start Newscatcher")
newscatcher = Newscatcher()
newscatcher.start()
time.sleep(_wait_time)
logger.info("Start Exercise")
exercise = Exercise()
exercise.start()
time.sleep(_wait_time)
#https://github.com/dbader/schedule
schedule.every(23).minutes.do(checkWaschingMachine)
schedule.every( 1).minutes.do(checkBath)
schedule.every(30).minutes.do(bathShowerUpdate)
schedule.every().hour.at("00:00").do(hourAnnounce)
schedule.every().hour.at("00:42").do(radiationCheck)
schedule.every().hour.at("00:23").do(particulateMatterCheck)
schedule.every().day.at("03:23").do(stopme)
schedule.every(15).minutes.do(checkCo2, Room.ANSI_ROOM)
schedule.every().sunday.at("22:42").do(goSleep)
schedule.every().monday.at("22:42").do(goSleep)
schedule.every().tuesday.at("22:42").do(goSleep)
schedule.every().wednesday.at("22:42").do(goSleep)
schedule.every().thursday.at("22:42").do(goSleep)
while True:
schedule.run_pending()
time.sleep(1)
|
py | b4154f37483f4860683fee37b894866a3f8423a0 | import logging
from math import fmod
import django
from django.conf import settings
from django.db import models
from zconnect.models import ModelBase
from zconnect.util import exceptions
from zconnect.zc_timeseries.util.tsaggregations import (
AGGREGATION_CHOICES, GRAPH_CHOICES, aggregation_implementations)
logger = logging.getLogger(__name__)
# Sentinel that just indicates that the data should be aggregated into one
# point. This prevents 2 queries being done
AGGREGATE_TO_ONE_VALUE = object()
class SensorType(ModelBase):
"""A type of sensor
Attributes:
aggregation_type (str): Default aggregation to perform for this sensor
type - eg 'avg', 'sum'
descriptive_name (str): Longer description of sensor
graph_type (str): What kind of graph this should be shown as in the app
(bar or graph)
product (Product): which product this sensor is associated with
sensor_name (str): name of sensor
unit (str): Unit of measurement (eg, "Watts")
"""
# The canonical name for this sensor
sensor_name = models.CharField(max_length=50, blank=True)
# A human readable sensor name, could be displayed under graphs etc.
descriptive_name = models.CharField(max_length=50, blank=True)
unit = models.CharField(max_length=30)
graph_type = models.CharField(max_length=20, choices=GRAPH_CHOICES, default="ts_graph")
aggregation_type = models.CharField(max_length=20, choices=AGGREGATION_CHOICES, default="sum")
# products can't be deleted until all devices are deleted as well. Once we
# can delete it, all sensor types are a bit pointless to keep, so delete
# them instead.
product = models.ForeignKey("zconnect.Product", models.CASCADE, related_name="sensors", blank=False)
class Meta:
unique_together = ["sensor_name", "product"]
class DeviceSensor(ModelBase):
"""A sensor associated with a device
Attributes:
device (Device): associated device
resolution (float): how often this is sampled, in seconds
sensor_type (SensorType): type of sensor
"""
resolution = models.FloatField(default=120.0)
# If device goes, just delete this. device should never be deleted really
# though
device = models.ForeignKey(settings.ZCONNECT_DEVICE_MODEL, models.CASCADE, related_name="sensors", blank=False)
# Can't leave the sensor type null
sensor_type = models.ForeignKey(SensorType, models.PROTECT, blank=False)
class Meta:
# NOTE
# This seems to make sense but it would break in the case that a device
# has multiple of the same sensor.
unique_together = ("device", "sensor_type")
def get_latest_ts_data(self):
"""Get latest ts data on this sensor for this device
The latest_ts_data_optimised on AbstractDevice should be used instead of
directly calling this
"""
from .timeseriesdata import TimeSeriesData
try:
data = TimeSeriesData.objects.filter(
sensor=self,
).latest("ts")
except TimeSeriesData.DoesNotExist:
# If the device hasn't made any timeseries data yet.
return {}
return data
def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type):
"""Implementation of aggregating data. See other functions for meanings
of arguments.
Raises:
TimeSeriesData.DoesNotExist: If there is no data in the given period
"""
from .timeseriesdata import TimeSeriesData
# Multiple of resolution
# We extract just the values_list here because doing it in a
# separate statement results in django querying the database
# twice...
raw = TimeSeriesData.objects.filter(
ts__gte=data_start,
ts__lt=data_end,
sensor=self,
).values_list("value", "ts")
if not raw:
# This should raise above but for some reason it doesn't when using
# values_list
raise TimeSeriesData.DoesNotExist
# How many samples we would expect if there was no missing data
expected_samples = (data_end - data_start).total_seconds()/self.resolution
if resolution is AGGREGATE_TO_ONE_VALUE:
aggregation_factor = expected_samples
else:
# Already checked that this divides nicely
# NOTE
# should aggregation_factor ALWAYS be expected_samples?
aggregation_factor = int(resolution//self.resolution)
logger.debug("%s objects to aggregate", len(raw))
aggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]
logger.debug("Aggregating '%s' with %s, factor %s",
aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE,
aggregation_factor)
data = aggregation_engine(
raw,
aggregation_type,
aggregation_factor,
expected_samples,
data_start,
data_end,
self,
)
return data
def optimised_data_fetch(self, data_start, data_end, resolution):
"""Get data from given time block and possibly average it
See Device.optimised_data_fetch for args
This function assumes all the input data is already validated.
"""
if resolution < self.resolution or fmod(resolution, self.resolution):
raise django.db.DataError("Resolution should be a multiple of {} (was {})".format(
self.resolution, resolution))
from .timeseriesdata import TimeSeriesData
# XXX
# equals for floats? If resolution is not a whole number this won't work
if resolution == self.resolution:
# No aggregation, just get the data
# It's already sorted by time in the database
data = TimeSeriesData.objects.filter(
sensor=self,
ts__gte=data_start,
ts__lt=data_end,
)
else:
data = self._get_aggregated_data(
data_start,
data_end,
resolution,
self.sensor_type.aggregation_type,
)
return data
def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False):
"""Create a ts archive between the start and data_end dates
This does it like ``[data_start, data_end)`` - including start, not end
If delete is True, also delete the old ts data.
Args:
data_start (datetime): start of archive
data_end (datetime): end of archives
Keyword args:
delete (bool, optional): delete old ts data if True
aggregation_type (str, optional): If this is passed then it will use
that aggregation type rather than the 'default' on the sensor
type. This has to be one of
zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will
raise an error. Note that some of these choices may be
meaningless for certain data types (eg, sum of temperatures over
a month is a bit useless)
Returns:
TimeSeriesDataArchive: archive of data between data_start and data_end
Raises:
TimeSeriesData.DoesNotExist: If there is no data between data_start and
data_end
"""
from .timeseriesdata import TimeSeriesData, TimeSeriesDataArchive
if not aggregation_type:
aggregation_type = self.sensor_type.aggregation_type
elif aggregation_type not in (i[0] for i in AGGREGATION_CHOICES):
raise exceptions.IncorrectAggregationError("'{}' is not a valid aggregation".format(aggregation_type))
data = self._get_aggregated_data(
data_start,
data_end,
AGGREGATE_TO_ONE_VALUE,
aggregation_type,
)
logger.debug("to archive: %s", data)
archived = TimeSeriesDataArchive(
start=data_start,
end=data_end,
value=data[0].value,
sensor=self,
aggregation_type=aggregation_type,
)
archived.save()
logger.debug("archived %s to %s with %s: %s", archived.start, archived.end, self.sensor_type.aggregation_type, archived.value)
if delete:
TimeSeriesData.objects.filter(
sensor=self,
ts__gte=data_start,
ts__lt=data_end,
).delete()
return archived
|
py | b4154fc79903e396c0bfac32fd23d795b0905a88 | import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
@pytest.fixture
def valid_cadex_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767n'
@pytest.fixture
def invalid_cadex_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767m'
@pytest.fixture
def current_block_hash():
return '000001c9ba1df5a1c58a4e458fb6febfe9329b1947802cd60a4ae90dd754b534'
@pytest.fixture
def mn_list():
from masternode import Masternode
masternodelist_full = {
u'701854b26809343704ab31d1c45abc08f9f83c5c2bd503a9d5716ef3c0cda857-1': u' ENABLED 70201 yjaFS6dudxUTxYPTDB9BYd1Nv4vMJXm3vK 1474157572 82842 1474152618 71111 52.90.74.124:27271',
u'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1': u' ENABLED 70201 yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L 1474157732 1590425 1474155175 71122 [2604:a880:800:a1::9b:0]:27271',
u'656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1': u' ENABLED 70201 yepN97UoBLoP2hzWnwWGRVTcWtw1niKwcB 1474157704 824622 1474152571 71110 178.62.203.249:27271',
}
mnlist = [Masternode(vin, mnstring) for (vin, mnstring) in masternodelist_full.items()]
return mnlist
@pytest.fixture
def mn_status_good():
# valid masternode status enabled & running
status = {
"vin": "CTxIn(COutPoint(f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56, 1), scriptSig=)",
"service": "[2604:a880:800:a1::9b:0]:27271",
"pubkey": "yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L",
"status": "Masternode successfully started"
}
return status
@pytest.fixture
def mn_status_bad():
# valid masternode but not running/waiting
status = {
"vin": "CTxIn(COutPoint(0000000000000000000000000000000000000000000000000000000000000000, 4294967295), coinbase )",
"service": "[::]:0",
"status": "Node just started, not yet activated"
}
return status
# ========================================================================
def test_valid_cadex_address():
from cadexlib import is_valid_cadex_address
main = valid_cadex_address()
test = valid_cadex_address('testnet')
assert is_valid_cadex_address(main) is True
assert is_valid_cadex_address(main, 'mainnet') is True
assert is_valid_cadex_address(main, 'testnet') is False
assert is_valid_cadex_address(test) is False
assert is_valid_cadex_address(test, 'mainnet') is False
assert is_valid_cadex_address(test, 'testnet') is True
def test_invalid_cadex_address():
from cadexlib import is_valid_cadex_address
main = invalid_cadex_address()
test = invalid_cadex_address('testnet')
assert is_valid_cadex_address(main) is False
assert is_valid_cadex_address(main, 'mainnet') is False
assert is_valid_cadex_address(main, 'testnet') is False
assert is_valid_cadex_address(test) is False
assert is_valid_cadex_address(test, 'mainnet') is False
assert is_valid_cadex_address(test, 'testnet') is False
def test_deterministic_masternode_elections(current_block_hash, mn_list):
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_deterministic_masternode_elections(current_block_hash, mn_list):
from cadexlib import elect_mn
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_parse_masternode_status_vin():
from cadexlib import parse_masternode_status_vin
status = mn_status_good()
vin = parse_masternode_status_vin(status['vin'])
assert vin == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
status = mn_status_bad()
vin = parse_masternode_status_vin(status['vin'])
assert vin is None
def test_hash_function():
import cadexlib
sb_data_hex = '7b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030222c202274797065223a20327d'
sb_hash = '7ae8b02730113382ea75cbb1eecc497c3aa1fdd9e76e875e38617e07fb2cb21a'
hex_hash = "%x" % cadexlib.hashit(sb_data_hex)
assert hex_hash == sb_hash
def test_blocks_to_seconds():
import cadexlib
from decimal import Decimal
precision = Decimal('0.001')
assert Decimal(cadexlib.blocks_to_seconds(0)) == Decimal(0.0)
assert Decimal(cadexlib.blocks_to_seconds(2)).quantize(precision) \
== Decimal(314.4).quantize(precision)
assert int(cadexlib.blocks_to_seconds(16616)) == 2612035
|
py | b4154fd6077d7a3164c38ffd8edb1a7f83bb1e10 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 0); |
py | b415512330b2a91f90c9065eb7b0867fa9bc86ea | #!/usr/bin/env python
import logging
import os
import sys
import argparse
import aws_encryption_sdk
import boto3
import botocore.exceptions
__version__ = '1.0.0'
logger = logging.getLogger()
class ArgsParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
kwargs.setdefault(
'description',
'Decrypts files encrypted with kms_encrypt')
argparse.ArgumentParser.__init__(self, *args, **kwargs)
self.formatter_class = argparse.ArgumentDefaultsHelpFormatter
self.epilog = 'You need to create KMS keys before using this. By default it tries to use "alias/ec2" key'
self.options = None
self.add_argument('-a', '--alias', dest='key_alias', help='KMS key alias', default='alias/ec2')
self.add_argument('-p', '--profile', dest='profile', help='AWS profile to use')
self.add_argument('-r', '--region', dest='region', default='us-west-2', help='AWS region to connect')
self.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Be verbose')
self.add_argument('in_file', help='Name of the encrypted input file',)
self.add_argument('out_file', help='Name of the output file', nargs='?')
def error(self, message):
sys.stderr.write('Error: %s\n\n' % message)
self.print_help()
sys.exit(2)
def parse_args(self, *args, **kwargs):
options = argparse.ArgumentParser.parse_args(self, *args, **kwargs)
options.log_format = '%(filename)s:%(lineno)s[%(process)d]: %(levelname)s %(message)s'
options.name = os.path.basename(__file__)
if not options.out_file and \
options.in_file.endswith('.enc'):
options.out_file = options.in_file[:-4]
elif not options.out_file:
self.error('Please specify output file')
self.options = options
return options
class KmsDecrypt(object):
def __init__(self, _session):
self.session = _session
def alias_exists(self, _alias):
aliases = self.session.client('kms').list_aliases()
return any([k for k in aliases['Aliases'] if k['AliasName'] == _alias])
def build_kms_master_key_provider(self, alias):
if not self.alias_exists(alias):
raise SystemExit('FATAL: alias %s does not exist in %s' % (
alias,
self.session.region_name,
))
arn_template = 'arn:aws:kms:{region}:{account_id}:{alias}'
kms_master_key_provider = aws_encryption_sdk.KMSMasterKeyProvider()
account_id = self.session.client('sts').get_caller_identity()['Account']
kms_master_key_provider.add_master_key(arn_template.format(
region=self.session.region_name,
account_id=account_id,
alias=alias
))
return kms_master_key_provider
def decrypt_file(self, key_alias, input_filename, output_filename):
key_provider = self.build_kms_master_key_provider(key_alias)
with open(input_filename, 'rb') as infile, \
open(output_filename, 'wb') as outfile, \
aws_encryption_sdk.stream(
mode='d',
source=infile,
key_provider=key_provider
) as decryptor:
for chunk in decryptor:
outfile.write(chunk)
def main(args=sys.argv[1:]):
my_parser = ArgsParser()
options = my_parser.parse_args(args)
for m in ['botocore', 'boto3', 'aws_encryption_sdk']:
not options.verbose and logging.getLogger(m).setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=options.log_format)
try:
session = boto3.session.Session()
k = KmsDecrypt(session)
k.decrypt_file(
options.key_alias,
options.in_file,
options.out_file
)
except botocore.exceptions.ClientError as e:
raise SystemExit(e)
if __name__ == '__main__':
main()
|
py | b415512667734e0125870fbefbc7edd892127cd7 | import numpy as np
def forward(A, B, observ):
"""Hidden Markov models need to solve 3 fundamental problems:
1. For a given HMM model M and an observation sequence O, what is the likelihood of P(O|M);
2. For a given HMM model M and an observation sequence O, what is the best hidden state sequence Q(think of O as words, and Q as syntactic categories);
3. For an observation sequence O, and the set of hidden states, learn the HMM parameters A and B;
The forward algorithm aims at solving the first of the three fundamental problems of the Hidden Markov Model: given an observation sequence, how to compute its likelihood more efficiently.
The difficulty lies in aligning the indices t and s. Note that the
first observation is 0, and the second observation is 1, etc. So in the inner fwd matrix calculation, we should take the value B[s][t-1] instead of B[s][t]
Notice that if the sequence is too long, the numbers stored in the matrix might become too small to cause underflow."""
T = len(observ)
N = len(B)
fwd = np.zeros((N+2, T+1))
# initialize the first column
for s in xrange(1, N+1):
fwd[s, 1] = A[0, s] * B[s][observ[0]]
for t in xrange(2, T+1):
for s in xrange(1, N+1):
fwd[s, t] = sum(fwd[s_p, t-1] * A[s_p, s] * B[s][observ[t-1]]
for s_p in xrange(1, N+1))
# print 's: {}'.format(s)
# print 't: {}'.format(t)
# for s_p in xrange(1, N+1):
# print 'fwd[{}, {}]: {}'.format(s_p, t-1, fwd[s_p, t-1])
# print 'A[{}, {}]: {}'.format(s_p, s, A[s_p, s])
# print 'B[{}, {}]: {}'.format(s, observ[t-1], B[s][observ[t-1]])
# fwd[s, t] += fwd[s_p, t-1] * A[s_p, s] * B[s][observ[t-1]]
# print 'fwd[{}, {}]: {}'.format(s, t, fwd[s, t])
fwd[N+1, T] = sum(fwd[s, T] * A[s, -1] for s in xrange(1, N+1))
print fwd
return fwd[-1, T]
if __name__ == '__main__':
# transitional probability matrix
# A = np.matrix(((0, 0.8, 0.2, 0),
# (0, 0.6, 0.3, 0.1),
# (0, 0.4, 0.5, 0.1),
# (0, 0, 0, 0)))
A = np.matrix(((0, 0.2, 0.8, 0),
(0, 0.5, 0.4, 0.1),
(0, 0.3, 0.6, 0.1),
(0, 0, 0, 0)))
# hidden layer: state-to-observation probability
# B = {1 : {1:0.2, 2:0.4, 3:0.4},
# 2 : {1:0.5, 2:0.4, 3:0.1}}
B = {2 : {1:0.2, 2:0.4, 3:0.4},
1 : {1:0.5, 2:0.4, 3:0.1}}
OBS1 = (3, 3, 1, 1, 2, 2, 3, 1, 3)
OBS2 = (3, 3, 1, 1, 2, 3, 3, 1, 2)
RESULT1 = forward(A, B, OBS1)
RESULT2 = forward(A, B, OBS2)
# OBS3 = (3, 1, 3)
# RESULT3 = forward(A, B, OBS3)
# OBS4 = (3,)
# RESULT4 = forward(A, B, OBS4)
print RESULT1
print RESULT2
def print_observation(obs):
print 'the observation {} is more likely to happen.'.format(obs)
if RESULT1 > RESULT2:
print_observation(OBS1)
else:
print_observation(OBS2)
# print RESULT3
# print RESULT4
|
py | b4155380bdf4e2074ae3f909978dabff45b1c3c0 | #!/usr/bin/env python
# This helper script takes a major.minor.patch semantic version string
# ("2.1.1") and reformats it into the dot-less zero-padded version like
# "20101". This is necessary to turn external release designations into
# integer SpComp2 version numbers.
from __future__ import print_function
import sys
if len(sys.argv) != 2 :
print('Need 1 argument: version number with dots')
exit(1)
vers = sys.argv[1]
parts = vers.split('.')
print ('{:d}{:02d}{:02d}'.format(int(parts[0]), int(parts[1]), int(parts[2])))
|
py | b41554c3c9fcc6795f68edc7da96aad16270fe5d | """
Set test to run here
"""
#import test1
import scale_test1,shm_test1,preset_scal_test
import scal_test
import skvz_battery
import tmvp_merge_idx_test
import skvz_paper_tests
import perf_test
import test_new_functionality
#test_list = [tmvp_merge_idx_test.main]
#test_list = [skvz_battery.main]
#test_list = [skvz_paper_tests.main]
#test_list = [scal_test.main]
#test_list = [test1.main]
#test_list = [scale_test1.main]#,
#scal_test.main]#,
#preset_scal_test.main,
#shm_test1.main]
#test_list = [perf_test.main]
test_list = [test_new_functionality]
def runTests():
for test in test_list:
test()
def addTest(test):
global test_list
test_list.append(test) |
py | b41554de6e276503346c3f23080db622ca93cd81 | # -*- coding: utf-8 -*-
from distutils.core import setup
from setuptools import find_packages
setup(
name='django-thaidate',
version='1.0.2',
author=u'Jon Combe',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
install_requires=[],
url='https://github.com/joncombe/django-thaidate',
license='BSD licence, see LICENCE file',
description='Replacement for the "date" Django template tag to show Thai years',
long_description='Replacement for the "date" Django template tag to show Thai years',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
zip_safe=False,
)
|
py | b415552ce892b8fffbc6b4da529fdf1d4f5cb871 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.object_store.v1 import account
CONTAINER_NAME = "mycontainer"
ACCOUNT_EXAMPLE = {
'content-length': '0',
'accept-ranges': 'bytes',
'date': 'Sat, 05 Jul 2014 19:17:40 GMT',
'x-account-bytes-used': '12345',
'x-account-container-count': '678',
'content-type': 'text/plain; charset=utf-8',
'x-account-object-count': '98765',
'x-timestamp': '1453413555.88937'
}
class TestAccount(base.TestCase):
def setUp(self):
super(TestAccount, self).setUp()
self.endpoint = self.cloud.object_store.get_endpoint() + '/'
def test_basic(self):
sot = account.Account(**ACCOUNT_EXAMPLE)
self.assertIsNone(sot.resources_key)
self.assertIsNone(sot.id)
self.assertEqual('/', sot.base_path)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_head)
self.assertTrue(sot.allow_fetch)
self.assertFalse(sot.allow_delete)
self.assertFalse(sot.allow_list)
self.assertFalse(sot.allow_create)
def test_make_it(self):
sot = account.Account(**ACCOUNT_EXAMPLE)
self.assertIsNone(sot.id)
self.assertEqual(int(ACCOUNT_EXAMPLE['x-account-bytes-used']),
sot.account_bytes_used)
self.assertEqual(int(ACCOUNT_EXAMPLE['x-account-container-count']),
sot.account_container_count)
self.assertEqual(int(ACCOUNT_EXAMPLE['x-account-object-count']),
sot.account_object_count)
self.assertEqual(ACCOUNT_EXAMPLE['x-timestamp'], sot.timestamp)
def test_set_temp_url_key(self):
sot = account.Account()
key = 'super-secure-key'
self.register_uris([
dict(method='POST', uri=self.endpoint,
status_code=204,
validate=dict(
headers={
'x-account-meta-temp-url-key': key})),
dict(method='HEAD', uri=self.endpoint,
headers={
'x-account-meta-temp-url-key': key}),
])
sot.set_temp_url_key(self.cloud.object_store, key)
self.assert_calls()
def test_set_account_temp_url_key_second(self):
sot = account.Account()
key = 'super-secure-key'
self.register_uris([
dict(method='POST', uri=self.endpoint,
status_code=204,
validate=dict(
headers={
'x-account-meta-temp-url-key-2': key})),
dict(method='HEAD', uri=self.endpoint,
headers={
'x-account-meta-temp-url-key-2': key}),
])
sot.set_temp_url_key(self.cloud.object_store, key, secondary=True)
self.assert_calls()
|
py | b4155605190b1fb760d72ac047ca08ba4ce33ea4 | import pytest
import warnings
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from .. import SparseDesignMatrix, SparseDesignMatrixCollection, DesignMatrix, DesignMatrixCollection
from ... import LightkurveWarning
from ..designmatrix import create_sparse_spline_matrix, create_spline_matrix
from scipy import sparse
def test_designmatrix_basics():
"""Can we create a design matrix from a dataframe?"""
size, name = 10, 'testmatrix'
df = pd.DataFrame({'vector1': np.ones(size),
'vector2': np.arange(size),
'vector3': np.arange(size)**2})
X = sparse.csr_matrix(np.asarray(df))
dm = SparseDesignMatrix(X, name=name, columns=['vector1', 'vector2', 'vector3'])
assert dm.columns == ['vector1', 'vector2', 'vector3']
assert dm.name == name
assert dm.shape == (size, 3)
dm.plot()
dm.plot_priors()
assert dm.append_constant().shape == (size, 4) # expect one column more
assert dm.pca(nterms=2).shape == (size, 2) # expect one column less
assert dm.split([5]).shape == (size, 6) # expect double columns
dm.__repr__()
dm = SparseDesignMatrix(X, name=name, columns=['vector1', 'vector2', 'vector3'])
dm.append_constant(inplace=True)
assert dm.shape == (size, 4) # expect one column more
dm = SparseDesignMatrix(X, name=name, columns=['vector1', 'vector2', 'vector3'])
dm.split([5], inplace=True)
assert dm.shape == (size, 6) # expect double columns
def test_split():
"""Can we split a design matrix correctly?"""
X = sparse.csr_matrix(np.vstack([np.linspace(0, 9, 10), np.linspace(100, 109, 10)]).T)
dm = SparseDesignMatrix(X, columns=['a', 'b'])
# Do we retrieve the correct shape?
assert dm.shape == (10, 2)
assert dm.split(2).shape == (10, 4)
assert dm.split([2,8]).shape == (10, 6)
# Are the new areas padded with zeros?
assert (dm.split([2,8]).values[2:, 0:2] == 0).all()
assert (dm.split([2,8]).values[:8, 4:] == 0).all()
# Are all the column names unique?
assert len(set(dm.split(4).columns)) == 4
def test_standardize():
"""Verifies DesignMatrix.standardize()"""
# A column with zero standard deviation remains unchanged
X = sparse.csr_matrix(np.vstack([np.ones(10)]).T)
dm = SparseDesignMatrix(X, columns=['const'])
assert (dm.standardize()['const'] == dm['const']).all()
# Normally-distributed columns will become Normal(0, 1)
X = sparse.csr_matrix(np.vstack([ np.random.normal(loc=5, scale=3, size=100)]).T)
dm = SparseDesignMatrix(X, columns=['normal'])
assert np.round(np.mean(dm.standardize()['normal']), 3) == 0
assert np.round(np.std(dm.standardize()['normal']), 1) == 1
dm.standardize(inplace=True)
def test_pca():
"""Verifies DesignMatrix.pca()"""
size = 10
dm = DesignMatrix({'a':np.random.normal(10, 20, size),
'b':np.random.normal(40, 10, size),
'c':np.random.normal(60, 5, size)}).to_sparse()
for nterms in [1, 2, 3]:
assert dm.pca(nterms=nterms).shape == (size, nterms)
def test_collection_basics():
"""Can we create a design matrix collection?"""
size = 5
dm1 = DesignMatrix(np.ones((size, 1)), columns=['col1'], name='matrix1').to_sparse()
dm2 = DesignMatrix(np.zeros((size, 2)), columns=['col2', 'col3'], name='matrix2').to_sparse()
dmc = SparseDesignMatrixCollection([dm1, dm2])
assert_array_equal(dmc['matrix1'].values, dm1.values)
assert_array_equal(dmc['matrix2'].values, dm2.values)
assert_array_equal(dmc.values, np.hstack((dm1.values, dm2.values)))
dmc.plot()
dmc.__repr__()
dmc = dm1.collect(dm2)
assert_array_equal(dmc['matrix1'].values, dm1.values)
assert_array_equal(dmc['matrix2'].values, dm2.values)
assert_array_equal(dmc.values, np.hstack((dm1.values, dm2.values)))
"""Can we create a design matrix collection when one is sparse?"""
size = 5
dm1 = DesignMatrix(np.ones((size, 1)), columns=['col1'], name='matrix1')
dm2 = DesignMatrix(np.zeros((size, 2)), columns=['col2', 'col3'], name='matrix2').to_sparse()
with warnings.catch_warnings():
warnings.simplefilter("always")
with pytest.warns(LightkurveWarning, match='Sparse matrices will be converted to dense matrices.'):
dmc = DesignMatrixCollection([dm1, dm2])
assert not np.any([sparse.issparse(d.X) for d in dmc])
with warnings.catch_warnings():
warnings.simplefilter("always")
with pytest.warns(LightkurveWarning, match='Dense matrices will be converted to sparse matrices.'):
dmc = SparseDesignMatrixCollection([dm1, dm2])
assert np.all([sparse.issparse(d.X) for d in dmc])
dmc.plot()
dmc.__repr__()
assert isinstance(dmc.to_designmatrix(), SparseDesignMatrix)
def test_designmatrix_rank():
"""Does DesignMatrix issue a low-rank warning when justified?"""
warnings.simplefilter("always")
# Good rank
dm = DesignMatrix({'a': [1, 2, 3]}).to_sparse()
assert dm.rank == 1
dm.validate(rank=True) # Should not raise a warning
# Bad rank
with pytest.warns(LightkurveWarning, match='rank'):
dm = DesignMatrix({'a': [1, 2, 3], 'b': [1, 1, 1], 'c': [1, 1, 1],
'd': [1, 1, 1], 'e': [3, 4, 5]})
dm = dm.to_sparse()
assert dm.rank == 2
with pytest.warns(LightkurveWarning, match='rank'):
dm.validate(rank=True)
def test_splines():
"""Do splines work as expected?"""
# Dense and sparse splines should produce the same answer.
x = np.linspace(0, 1, 100)
spline_dense = create_spline_matrix(x, knots=[0.1, 0.3, 0.6, 0.9], degree=2)
spline_sparse = create_sparse_spline_matrix(x, knots=[0.1, 0.3, 0.6, 0.9], degree=2)
assert np.allclose(spline_dense.values, spline_sparse.values)
assert isinstance(spline_dense, DesignMatrix)
assert isinstance(spline_sparse, SparseDesignMatrix)
|
py | b4155629d5c359a9e43a9110d7b842ea704a94fa | # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY
# mf6/utils/createpackages.py
from .. import mfpackage
from ..data.mfdatautil import ListTemplateGenerator
class ModflowGwfmaw(mfpackage.MFPackage):
"""
ModflowGwfmaw defines a maw package within a gwf6 model.
Parameters
----------
model : MFModel
Model that this package is a part of. Package is automatically
added to model when it is initialized.
loading_package : bool
Do not set this parameter. It is intended for debugging and internal
processing purposes only.
auxiliary : [string]
* auxiliary (string) defines an array of one or more auxiliary variable
names. There is no limit on the number of auxiliary variables that
can be provided on this line; however, lists of information provided
in subsequent blocks must have a column of data for each auxiliary
variable name defined here. The number of auxiliary variables
detected on this line determines the value for naux. Comments cannot
be provided anywhere on this line as they will be interpreted as
auxiliary variable names. Auxiliary variables may not be used by the
package, but they will be available for use by other parts of the
program. The program will terminate with an error if auxiliary
variables are specified on more than one line in the options block.
boundnames : boolean
* boundnames (boolean) keyword to indicate that boundary names may be
provided with the list of multi-aquifer well cells.
print_input : boolean
* print_input (boolean) keyword to indicate that the list of multi-
aquifer well information will be written to the listing file
immediately after it is read.
print_head : boolean
* print_head (boolean) keyword to indicate that the list of multi-
aquifer well heads will be printed to the listing file for every
stress period in which "HEAD PRINT" is specified in Output Control.
If there is no Output Control option and PRINT_HEAD is specified,
then heads are printed for the last time step of each stress period.
print_flows : boolean
* print_flows (boolean) keyword to indicate that the list of multi-
aquifer well flow rates will be printed to the listing file for every
stress period time step in which "BUDGET PRINT" is specified in
Output Control. If there is no Output Control option and
"PRINT_FLOWS" is specified, then flow rates are printed for the last
time step of each stress period.
save_flows : boolean
* save_flows (boolean) keyword to indicate that multi-aquifer well flow
terms will be written to the file specified with "BUDGET FILEOUT" in
Output Control.
stage_filerecord : [headfile]
* headfile (string) name of the binary output file to write stage
information.
budget_filerecord : [budgetfile]
* budgetfile (string) name of the binary output file to write budget
information.
no_well_storage : boolean
* no_well_storage (boolean) keyword that deactivates inclusion of well
storage contributions to the multi-aquifer well package continuity
equation.
flowing_wells : boolean
* flowing_wells (boolean) keyword that activates the flowing wells
option for the multi-aquifer well package.
shutdown_theta : double
* shutdown_theta (double) value that defines the weight applied to
discharge rate for wells that limit the water level in a discharging
well (defined using the HEAD_LIMIT keyword in the stress period
data). SHUTDOWN_THETA is used to control discharge rate oscillations
when the flow rate from the aquifer is less than the specified flow
rate from the aquifer to the well. Values range between 0.0 and 1.0,
and larger values increase the weight (decrease under-relaxation)
applied to the well discharge rate. The HEAD_LIMIT option has been
included to facilitate backward compatibility with previous versions
of MODFLOW but use of the RATE_SCALING option instead of the
HEAD_LIMIT option is recommended. By default, SHUTDOWN_THETA is 0.7.
shutdown_kappa : double
* shutdown_kappa (double) value that defines the weight applied to
discharge rate for wells that limit the water level in a discharging
well (defined using the HEAD_LIMIT keyword in the stress period
data). SHUTDOWN_KAPPA is used to control discharge rate oscillations
when the flow rate from the aquifer is less than the specified flow
rate from the aquifer to the well. Values range between 0.0 and 1.0,
and larger values increase the weight applied to the well discharge
rate. The HEAD_LIMIT option has been included to facilitate backward
compatibility with previous versions of MODFLOW but use of the
RATE_SCALING option instead of the HEAD_LIMIT option is recommended.
By default, SHUTDOWN_KAPPA is 0.0001.
timeseries : {varname:data} or timeseries data
* Contains data for the ts package. Data can be stored in a dictionary
containing data for the ts package with variable names as keys and
package data as values. Data just for the timeseries variable is also
acceptable. See ts package documentation for more information.
observations : {varname:data} or continuous data
* Contains data for the obs package. Data can be stored in a dictionary
containing data for the obs package with variable names as keys and
package data as values. Data just for the observations variable is
also acceptable. See obs package documentation for more information.
mover : boolean
* mover (boolean) keyword to indicate that this instance of the MAW
Package can be used with the Water Mover (MVR) Package. When the
MOVER option is specified, additional memory is allocated within the
package to store the available, provided, and received water.
nmawwells : integer
* nmawwells (integer) integer value specifying the number of multi-
aquifer wells that will be simulated for all stress periods.
packagedata : [wellno, radius, bottom, strt, condeqn, ngwfnodes, aux,
boundname]
* wellno (integer) integer value that defines the well number
associated with the specified PACKAGEDATA data on the line. WELLNO
must be greater than zero and less than or equal to NMAWWELLS. Multi-
aquifer well information must be specified for every multi-aquifer
well or the program will terminate with an error. The program will
also terminate with an error if information for a multi-aquifer well
is specified more than once. This argument is an index variable,
which means that it should be treated as zero-based when working with
FloPy and Python. Flopy will automatically subtract one when loading
index variables and add one when writing index variables.
* radius (double) radius for the multi-aquifer well.
* bottom (double) bottom elevation of the multi-aquifer well. The well
bottom is reset to the cell bottom in the lowermost GWF cell
connection in cases where the specified well bottom is above the
bottom of this GWF cell.
* strt (double) starting head for the multi-aquifer well.
* condeqn (string) character string that defines the conductance
equation that is used to calculate the saturated conductance for the
multi-aquifer well. Possible multi-aquifer well CONDEQN strings
include: SPECIFIED--character keyword to indicate the multi-aquifer
well saturated conductance will be specified. THIEM--character
keyword to indicate the multi-aquifer well saturated conductance will
be calculated using the Thiem equation, which considers the cell top
and bottom, aquifer hydraulic conductivity, and effective cell and
well radius. SKIN--character keyword to indicate that the multi-
aquifer well saturated conductance will be calculated using the cell
top and bottom, aquifer and screen hydraulic conductivity, and well
and skin radius. CUMULATIVE--character keyword to indicate that the
multi-aquifer well saturated conductance will be calculated using a
combination of the Thiem and SKIN equations. MEAN--character keyword
to indicate the multi-aquifer well saturated conductance will be
calculated using the aquifer and screen top and bottom, aquifer and
screen hydraulic conductivity, and well and skin radius.
* ngwfnodes (integer) integer value that defines the number of GWF
nodes connected to this (WELLNO) multi-aquifer well. NGWFNODES must
be greater than zero.
* aux (double) represents the values of the auxiliary variables for
each multi-aquifer well. The values of auxiliary variables must be
present for each multi-aquifer well. The values must be specified in
the order of the auxiliary variables specified in the OPTIONS block.
If the package supports time series and the Options block includes a
TIMESERIESFILE entry (see the "Time-Variable Input" section), values
can be obtained from a time series by entering the time-series name
in place of a numeric value.
* boundname (string) name of the multi-aquifer well cell. BOUNDNAME is
an ASCII character variable that can contain as many as 40
characters. If BOUNDNAME contains spaces in it, then the entire name
must be enclosed within single quotes.
connectiondata : [wellno, icon, cellid, scrn_top, scrn_bot, hk_skin,
radius_skin]
* wellno (integer) integer value that defines the well number
associated with the specified CONNECTIONDATA data on the line. WELLNO
must be greater than zero and less than or equal to NMAWWELLS. Multi-
aquifer well connection information must be specified for every
multi-aquifer well connection to the GWF model (NGWFNODES) or the
program will terminate with an error. The program will also terminate
with an error if connection information for a multi-aquifer well
connection to the GWF model is specified more than once. This
argument is an index variable, which means that it should be treated
as zero-based when working with FloPy and Python. Flopy will
automatically subtract one when loading index variables and add one
when writing index variables.
* icon (integer) integer value that defines the GWF connection number
for this multi-aquifer well connection entry. ICONN must be greater
than zero and less than or equal to NGWFNODES for multi-aquifer well
WELLNO. This argument is an index variable, which means that it
should be treated as zero-based when working with FloPy and Python.
Flopy will automatically subtract one when loading index variables
and add one when writing index variables.
* cellid ((integer, ...)) is the cell identifier, and depends on the
type of grid that is used for the simulation. For a structured grid
that uses the DIS input file, CELLID is the layer, row, and column.
For a grid that uses the DISV input file, CELLID is the layer and
CELL2D number. If the model uses the unstructured discretization
(DISU) input file, CELLID is the node number for the cell. One or
more screened intervals can be connected to the same CELLID if
CONDEQN for a well is MEAN. The program will terminate with an error
if MAW wells using SPECIFIED, THIEM, SKIN, or CUMULATIVE conductance
equations have more than one connection to the same CELLID. This
argument is an index variable, which means that it should be treated
as zero-based when working with FloPy and Python. Flopy will
automatically subtract one when loading index variables and add one
when writing index variables.
* scrn_top (double) value that defines the top elevation of the screen
for the multi-aquifer well connection. If the specified SCRN_TOP is
greater than the top of the GWF cell it is set equal to the top of
the cell. SCRN_TOP can be any value if CONDEQN is SPECIFIED, THIEM,
SKIN, or COMPOSITE and SCRN_TOP is set to the top of the cell.
* scrn_bot (double) value that defines the bottom elevation of the
screen for the multi-aquifer well connection. If the specified
SCRN_BOT is less than the bottom of the GWF cell it is set equal to
the bottom of the cell. SCRN_BOT can be any value if CONDEQN is
SPECIFIED, THIEM, SKIN, or COMPOSITE and SCRN_BOT is set to the
bottom of the cell.
* hk_skin (double) value that defines the skin (filter pack) hydraulic
conductivity (if CONDEQN for the multi-aquifer well is SKIN,
CUMULATIVE, or MEAN) or conductance (if CONDEQN for the multi-aquifer
well is SPECIFIED) for each GWF node connected to the multi-aquifer
well (NGWFNODES). HK_SKIN can be any value if CONDEQN is THIEM.
* radius_skin (double) real value that defines the skin radius (filter
pack radius) for the multi-aquifer well. RADIUS_SKIN can be any value
if CONDEQN is SPECIFIED or THIEM. Otherwise, RADIUS_SKIN must be
greater than RADIUS for the multi-aquifer well.
perioddata : [wellno, mawsetting]
* wellno (integer) integer value that defines the well number
associated with the specified PERIOD data on the line. WELLNO must be
greater than zero and less than or equal to NMAWWELLS. This argument
is an index variable, which means that it should be treated as zero-
based when working with FloPy and Python. Flopy will automatically
subtract one when loading index variables and add one when writing
index variables.
* mawsetting (keystring) line of information that is parsed into a
keyword and values. Keyword values that can be used to start the
MAWSETTING string include: STATUS, FLOWING_WELL, RATE, WELL_HEAD,
HEAD_LIMIT, SHUT_OFF, RATE_SCALING, and AUXILIARY.
status : [string]
* status (string) keyword option to define well status. STATUS
can be ACTIVE, INACTIVE, or CONSTANT. By default, STATUS is
ACTIVE.
flowing_wellrecord : [fwelev, fwcond, fwrlen]
* fwelev (double) elevation used to determine whether or not
the well is flowing.
* fwcond (double) conductance used to calculate the discharge
of a free flowing well. Flow occurs when the head in the well
is above the well top elevation (FWELEV).
* fwrlen (double) length used to reduce the conductance of the
flowing well. When the head in the well drops below the well
top plus the reduction length, then the conductance is
reduced. This reduction length can be used to improve the
stability of simulations with flowing wells so that there is
not an abrupt change in flowing well rates.
rate : [double]
* rate (double) is the volumetric pumping rate for the multi-
aquifer well. A positive value indicates recharge and a
negative value indicates discharge (pumping). RATE only
applies to active (IBOUND :math:`>` 0) multi-aquifer wells.
If the Options block includes a TIMESERIESFILE entry (see the
"Time-Variable Input" section), values can be obtained from a
time series by entering the time-series name in place of a
numeric value. By default, the RATE for each multi-aquifer
well is zero.
well_head : [double]
* well_head (double) is the head in the multi-aquifer well.
WELL_HEAD is only applied to constant head (STATUS is
CONSTANT) and inactive (STATUS is INACTIVE) multi-aquifer
wells. If the Options block includes a TIMESERIESFILE entry
(see the "Time-Variable Input" section), values can be
obtained from a time series by entering the time-series name
in place of a numeric value.
head_limit : [string]
* head_limit (string) is the limiting water level (head) in the
well, which is the minimum of the well RATE or the well
inflow rate from the aquifer. HEAD_LIMIT can be applied to
extraction wells (RATE :math:`<` 0) or injection wells (RATE
:math:`>` 0). HEAD\_LIMIT can be deactivated by specifying
the text string `OFF'. The HEAD\_LIMIT option is based on the
HEAD\_LIMIT functionality available in the
MNW2~\citep{konikow2009} package for MODFLOW-2005. The
HEAD\_LIMIT option has been included to facilitate backward
compatibility with previous versions of MODFLOW but use of
the RATE\_SCALING option instead of the HEAD\_LIMIT option is
recommended. By default, HEAD\_LIMIT is `OFF'.
shutoffrecord : [minrate, maxrate]
* minrate (double) is the minimum rate that a well must exceed
to shutoff a well during a stress period. The well will shut
down during a time step if the flow rate to the well from the
aquifer is less than MINRATE. If a well is shut down during a
time step, reactivation of the well cannot occur until the
next time step to reduce oscillations. MINRATE must be less
than maxrate.
* maxrate (double) is the maximum rate that a well must exceed
to reactivate a well during a stress period. The well will
reactivate during a timestep if the well was shutdown during
the previous time step and the flow rate to the well from the
aquifer exceeds maxrate. Reactivation of the well cannot
occur until the next time step if a well is shutdown to
reduce oscillations. maxrate must be greater than MINRATE.
rate_scalingrecord : [pump_elevation, scaling_length]
* pump_elevation (double) is the elevation of the multi-aquifer
well pump (PUMP_ELEVATION). PUMP_ELEVATION should not be less
than the bottom elevation (BOTTOM) of the multi-aquifer well.
* scaling_length (double) height above the pump elevation
(SCALING_LENGTH). If the simulated well head is below this
elevation (pump elevation plus the scaling length), then the
pumping rate is reduced.
auxiliaryrecord : [auxname, auxval]
* auxname (string) name for the auxiliary variable to be
assigned AUXVAL. AUXNAME must match one of the auxiliary
variable names defined in the OPTIONS block. If AUXNAME does
not match one of the auxiliary variable names defined in the
OPTIONS block the data are ignored.
* auxval (double) value for the auxiliary variable. If the
Options block includes a TIMESERIESFILE entry (see the "Time-
Variable Input" section), values can be obtained from a time
series by entering the time-series name in place of a numeric
value.
filename : String
File name for this package.
pname : String
Package name for this package.
parent_file : MFPackage
Parent package file that references this package. Only needed for
utility packages (mfutl*). For example, mfutllaktab package must have
a mfgwflak package parent_file.
"""
auxiliary = ListTemplateGenerator(('gwf6', 'maw', 'options',
'auxiliary'))
stage_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options',
'stage_filerecord'))
budget_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options',
'budget_filerecord'))
ts_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options',
'ts_filerecord'))
obs_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options',
'obs_filerecord'))
packagedata = ListTemplateGenerator(('gwf6', 'maw', 'packagedata',
'packagedata'))
connectiondata = ListTemplateGenerator(('gwf6', 'maw',
'connectiondata',
'connectiondata'))
perioddata = ListTemplateGenerator(('gwf6', 'maw', 'period',
'perioddata'))
package_abbr = "gwfmaw"
_package_type = "maw"
dfn_file_name = "gwf-maw.dfn"
dfn = [["block options", "name auxiliary", "type string",
"shape (naux)", "reader urword", "optional true"],
["block options", "name boundnames", "type keyword", "shape",
"reader urword", "optional true"],
["block options", "name print_input", "type keyword",
"reader urword", "optional true"],
["block options", "name print_head", "type keyword",
"reader urword", "optional true"],
["block options", "name print_flows", "type keyword",
"reader urword", "optional true"],
["block options", "name save_flows", "type keyword",
"reader urword", "optional true"],
["block options", "name stage_filerecord",
"type record head fileout headfile", "shape", "reader urword",
"tagged true", "optional true"],
["block options", "name head", "type keyword", "shape",
"in_record true", "reader urword", "tagged true",
"optional false"],
["block options", "name headfile", "type string",
"preserve_case true", "shape", "in_record true", "reader urword",
"tagged false", "optional false"],
["block options", "name budget_filerecord",
"type record budget fileout budgetfile", "shape", "reader urword",
"tagged true", "optional true"],
["block options", "name budget", "type keyword", "shape",
"in_record true", "reader urword", "tagged true",
"optional false"],
["block options", "name fileout", "type keyword", "shape",
"in_record true", "reader urword", "tagged true",
"optional false"],
["block options", "name budgetfile", "type string",
"preserve_case true", "shape", "in_record true", "reader urword",
"tagged false", "optional false"],
["block options", "name no_well_storage", "type keyword",
"reader urword", "optional true"],
["block options", "name flowing_wells", "type keyword",
"reader urword", "optional true"],
["block options", "name shutdown_theta", "type double precision",
"reader urword", "optional true"],
["block options", "name shutdown_kappa", "type double precision",
"reader urword", "optional true"],
["block options", "name ts_filerecord",
"type record ts6 filein ts6_filename", "shape", "reader urword",
"tagged true", "optional true", "construct_package ts",
"construct_data timeseries", "parameter_name timeseries"],
["block options", "name ts6", "type keyword", "shape",
"in_record true", "reader urword", "tagged true",
"optional false"],
["block options", "name filein", "type keyword", "shape",
"in_record true", "reader urword", "tagged true",
"optional false"],
["block options", "name ts6_filename", "type string",
"preserve_case true", "in_record true", "reader urword",
"optional false", "tagged false"],
["block options", "name obs_filerecord",
"type record obs6 filein obs6_filename", "shape", "reader urword",
"tagged true", "optional true", "construct_package obs",
"construct_data continuous", "parameter_name observations"],
["block options", "name obs6", "type keyword", "shape",
"in_record true", "reader urword", "tagged true",
"optional false"],
["block options", "name obs6_filename", "type string",
"preserve_case true", "in_record true", "tagged false",
"reader urword", "optional false"],
["block options", "name mover", "type keyword", "tagged true",
"reader urword", "optional true"],
["block dimensions", "name nmawwells", "type integer",
"reader urword", "optional false"],
["block packagedata", "name packagedata",
"type recarray wellno radius bottom strt condeqn ngwfnodes aux "
"boundname",
"shape (nmawwells)", "reader urword"],
["block packagedata", "name wellno", "type integer", "shape",
"tagged false", "in_record true", "reader urword",
"numeric_index true"],
["block packagedata", "name radius", "type double precision",
"shape", "tagged false", "in_record true", "reader urword"],
["block packagedata", "name bottom", "type double precision",
"shape", "tagged false", "in_record true", "reader urword"],
["block packagedata", "name strt", "type double precision",
"shape", "tagged false", "in_record true", "reader urword"],
["block packagedata", "name condeqn", "type string", "shape",
"tagged false", "in_record true", "reader urword"],
["block packagedata", "name ngwfnodes", "type integer", "shape",
"tagged false", "in_record true", "reader urword"],
["block packagedata", "name aux", "type double precision",
"in_record true", "tagged false", "shape (naux)", "reader urword",
"time_series true", "optional true"],
["block packagedata", "name boundname", "type string", "shape",
"tagged false", "in_record true", "reader urword",
"optional true"],
["block connectiondata", "name connectiondata",
"type recarray wellno icon cellid scrn_top scrn_bot hk_skin "
"radius_skin",
"reader urword"],
["block connectiondata", "name wellno", "type integer", "shape",
"tagged false", "in_record true", "reader urword",
"numeric_index true"],
["block connectiondata", "name icon", "type integer", "shape",
"tagged false", "in_record true", "reader urword",
"numeric_index true"],
["block connectiondata", "name cellid", "type integer",
"shape (ncelldim)", "tagged false", "in_record true",
"reader urword"],
["block connectiondata", "name scrn_top",
"type double precision", "shape", "tagged false",
"in_record true", "reader urword"],
["block connectiondata", "name scrn_bot",
"type double precision", "shape", "tagged false",
"in_record true", "reader urword"],
["block connectiondata", "name hk_skin", "type double precision",
"shape", "tagged false", "in_record true", "reader urword"],
["block connectiondata", "name radius_skin",
"type double precision", "shape", "tagged false",
"in_record true", "reader urword"],
["block period", "name iper", "type integer",
"block_variable True", "in_record true", "tagged false", "shape",
"valid", "reader urword", "optional false"],
["block period", "name perioddata",
"type recarray wellno mawsetting", "shape", "reader urword"],
["block period", "name wellno", "type integer", "shape",
"tagged false", "in_record true", "reader urword",
"numeric_index true"],
["block period", "name mawsetting",
"type keystring status flowing_wellrecord rate well_head "
"head_limit shutoffrecord rate_scalingrecord auxiliaryrecord",
"shape", "tagged false", "in_record true", "reader urword"],
["block period", "name status", "type string", "shape",
"tagged true", "in_record true", "reader urword"],
["block period", "name flowing_wellrecord",
"type record flowing_well fwelev fwcond fwrlen", "shape",
"tagged", "in_record true", "reader urword"],
["block period", "name flowing_well", "type keyword", "shape",
"in_record true", "reader urword"],
["block period", "name fwelev", "type double precision", "shape",
"tagged false", "in_record true", "reader urword"],
["block period", "name fwcond", "type double precision", "shape",
"tagged false", "in_record true", "reader urword"],
["block period", "name fwrlen", "type double precision", "shape",
"tagged false", "in_record true", "reader urword"],
["block period", "name rate", "type double precision", "shape",
"tagged true", "in_record true", "reader urword",
"time_series true"],
["block period", "name well_head", "type double precision",
"shape", "tagged true", "in_record true", "reader urword",
"time_series true"],
["block period", "name head_limit", "type string", "shape",
"tagged true", "in_record true", "reader urword"],
["block period", "name shutoffrecord",
"type record shut_off minrate maxrate", "shape", "tagged",
"in_record true", "reader urword"],
["block period", "name shut_off", "type keyword", "shape",
"in_record true", "reader urword"],
["block period", "name minrate", "type double precision",
"shape", "tagged false", "in_record true", "reader urword"],
["block period", "name maxrate", "type double precision",
"shape", "tagged false", "in_record true", "reader urword"],
["block period", "name rate_scalingrecord",
"type record rate_scaling pump_elevation scaling_length", "shape",
"tagged", "in_record true", "reader urword"],
["block period", "name rate_scaling", "type keyword", "shape",
"in_record true", "reader urword"],
["block period", "name pump_elevation", "type double precision",
"shape", "tagged false", "in_record true", "reader urword"],
["block period", "name scaling_length", "type double precision",
"shape", "tagged false", "in_record true", "reader urword"],
["block period", "name auxiliaryrecord",
"type record auxiliary auxname auxval", "shape", "tagged",
"in_record true", "reader urword"],
["block period", "name auxiliary", "type keyword", "shape",
"in_record true", "reader urword"],
["block period", "name auxname", "type string", "shape",
"tagged false", "in_record true", "reader urword"],
["block period", "name auxval", "type double precision", "shape",
"tagged false", "in_record true", "reader urword",
"time_series true"]]
def __init__(self, model, loading_package=False, auxiliary=None,
boundnames=None, print_input=None, print_head=None,
print_flows=None, save_flows=None, stage_filerecord=None,
budget_filerecord=None, no_well_storage=None,
flowing_wells=None, shutdown_theta=None, shutdown_kappa=None,
timeseries=None, observations=None, mover=None,
nmawwells=None, packagedata=None, connectiondata=None,
perioddata=None, filename=None, pname=None, parent_file=None):
super(ModflowGwfmaw, self).__init__(model, "maw", filename, pname,
loading_package, parent_file)
# set up variables
self.auxiliary = self.build_mfdata("auxiliary", auxiliary)
self.boundnames = self.build_mfdata("boundnames", boundnames)
self.print_input = self.build_mfdata("print_input", print_input)
self.print_head = self.build_mfdata("print_head", print_head)
self.print_flows = self.build_mfdata("print_flows", print_flows)
self.save_flows = self.build_mfdata("save_flows", save_flows)
self.stage_filerecord = self.build_mfdata("stage_filerecord",
stage_filerecord)
self.budget_filerecord = self.build_mfdata("budget_filerecord",
budget_filerecord)
self.no_well_storage = self.build_mfdata("no_well_storage",
no_well_storage)
self.flowing_wells = self.build_mfdata("flowing_wells", flowing_wells)
self.shutdown_theta = self.build_mfdata("shutdown_theta",
shutdown_theta)
self.shutdown_kappa = self.build_mfdata("shutdown_kappa",
shutdown_kappa)
self._ts_filerecord = self.build_mfdata("ts_filerecord",
None)
self._ts_package = self.build_child_package("ts", timeseries,
"timeseries",
self._ts_filerecord)
self._obs_filerecord = self.build_mfdata("obs_filerecord",
None)
self._obs_package = self.build_child_package("obs", observations,
"continuous",
self._obs_filerecord)
self.mover = self.build_mfdata("mover", mover)
self.nmawwells = self.build_mfdata("nmawwells", nmawwells)
self.packagedata = self.build_mfdata("packagedata", packagedata)
self.connectiondata = self.build_mfdata("connectiondata",
connectiondata)
self.perioddata = self.build_mfdata("perioddata", perioddata)
self._init_complete = True
|
py | b415564f8a8884181c9539d350e97676405a6ab4 | import json
import re
from moto.core.responses import BaseResponse
from moto.events import events_backends
class EventsHandler(BaseResponse):
@property
def events_backend(self):
"""
Events Backend
:return: Events Backend object
:rtype: moto.events.models.EventsBackend
"""
return events_backends[self.region]
def _generate_rule_dict(self, rule):
return {
"Name": rule.name,
"Arn": rule.arn,
"EventPattern": rule.event_pattern,
"State": rule.state,
"Description": rule.description,
"ScheduleExpression": rule.schedule_exp,
"RoleArn": rule.role_arn,
}
@property
def request_params(self):
if not hasattr(self, "_json_body"):
try:
self._json_body = json.loads(self.body)
except ValueError:
self._json_body = {}
return self._json_body
def _get_param(self, param, if_none=None):
return self.request_params.get(param, if_none)
def error(self, type_, message="", status=400):
headers = self.response_headers
headers["status"] = status
return json.dumps({"__type": type_, "message": message}), headers
def delete_rule(self):
name = self._get_param("Name")
if not name:
return self.error("ValidationException", "Parameter Name is required.")
self.events_backend.delete_rule(name)
return "", self.response_headers
def describe_rule(self):
name = self._get_param("Name")
if not name:
return self.error("ValidationException", "Parameter Name is required.")
rule = self.events_backend.describe_rule(name)
if not rule:
return self.error("ResourceNotFoundException", "Rule test does not exist.")
rule_dict = self._generate_rule_dict(rule)
return json.dumps(rule_dict), self.response_headers
def disable_rule(self):
name = self._get_param("Name")
if not name:
return self.error("ValidationException", "Parameter Name is required.")
if not self.events_backend.disable_rule(name):
return self.error(
"ResourceNotFoundException", "Rule " + name + " does not exist."
)
return "", self.response_headers
def enable_rule(self):
name = self._get_param("Name")
if not name:
return self.error("ValidationException", "Parameter Name is required.")
if not self.events_backend.enable_rule(name):
return self.error(
"ResourceNotFoundException", "Rule " + name + " does not exist."
)
return "", self.response_headers
def generate_presigned_url(self):
pass
def list_rule_names_by_target(self):
target_arn = self._get_param("TargetArn")
next_token = self._get_param("NextToken")
limit = self._get_param("Limit")
if not target_arn:
return self.error("ValidationException", "Parameter TargetArn is required.")
rule_names = self.events_backend.list_rule_names_by_target(
target_arn, next_token, limit
)
return json.dumps(rule_names), self.response_headers
def list_rules(self):
prefix = self._get_param("NamePrefix")
next_token = self._get_param("NextToken")
limit = self._get_param("Limit")
rules = self.events_backend.list_rules(prefix, next_token, limit)
rules_obj = {"Rules": []}
for rule in rules["Rules"]:
rules_obj["Rules"].append(self._generate_rule_dict(rule))
if rules.get("NextToken"):
rules_obj["NextToken"] = rules["NextToken"]
return json.dumps(rules_obj), self.response_headers
def list_targets_by_rule(self):
rule_name = self._get_param("Rule")
next_token = self._get_param("NextToken")
limit = self._get_param("Limit")
if not rule_name:
return self.error("ValidationException", "Parameter Rule is required.")
try:
targets = self.events_backend.list_targets_by_rule(
rule_name, next_token, limit
)
except KeyError:
return self.error(
"ResourceNotFoundException", "Rule " + rule_name + " does not exist."
)
return json.dumps(targets), self.response_headers
def put_events(self):
events = self._get_param("Entries")
failed_entries = self.events_backend.put_events(events)
if failed_entries:
return json.dumps(
{"FailedEntryCount": len(failed_entries), "Entries": failed_entries}
)
return "", self.response_headers
def put_rule(self):
name = self._get_param("Name")
event_pattern = self._get_param("EventPattern")
sched_exp = self._get_param("ScheduleExpression")
state = self._get_param("State")
desc = self._get_param("Description")
role_arn = self._get_param("RoleArn")
if not name:
return self.error("ValidationException", "Parameter Name is required.")
if event_pattern:
try:
json.loads(event_pattern)
except ValueError:
# Not quite as informative as the real error, but it'll work
# for now.
return self.error(
"InvalidEventPatternException", "Event pattern is not valid."
)
if sched_exp:
if not (
re.match("^cron\(.*\)", sched_exp)
or re.match(
"^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)", sched_exp
)
):
return self.error(
"ValidationException", "Parameter ScheduleExpression is not valid."
)
rule_arn = self.events_backend.put_rule(
name,
ScheduleExpression=sched_exp,
EventPattern=event_pattern,
State=state,
Description=desc,
RoleArn=role_arn,
)
return json.dumps({"RuleArn": rule_arn}), self.response_headers
def put_targets(self):
rule_name = self._get_param("Rule")
targets = self._get_param("Targets")
if not rule_name:
return self.error("ValidationException", "Parameter Rule is required.")
if not targets:
return self.error("ValidationException", "Parameter Targets is required.")
if not self.events_backend.put_targets(rule_name, targets):
return self.error(
"ResourceNotFoundException", "Rule " + rule_name + " does not exist."
)
return "", self.response_headers
def remove_targets(self):
rule_name = self._get_param("Rule")
ids = self._get_param("Ids")
if not rule_name:
return self.error("ValidationException", "Parameter Rule is required.")
if not ids:
return self.error("ValidationException", "Parameter Ids is required.")
if not self.events_backend.remove_targets(rule_name, ids):
return self.error(
"ResourceNotFoundException", "Rule " + rule_name + " does not exist."
)
return "", self.response_headers
def test_event_pattern(self):
pass
def put_permission(self):
event_bus_name = self._get_param("EventBusName")
action = self._get_param("Action")
principal = self._get_param("Principal")
statement_id = self._get_param("StatementId")
self.events_backend.put_permission(
event_bus_name, action, principal, statement_id
)
return ""
def remove_permission(self):
event_bus_name = self._get_param("EventBusName")
statement_id = self._get_param("StatementId")
self.events_backend.remove_permission(event_bus_name, statement_id)
return ""
def describe_event_bus(self):
name = self._get_param("Name")
event_bus = self.events_backend.describe_event_bus(name)
response = {"Name": event_bus.name, "Arn": event_bus.arn}
if event_bus.policy:
response["Policy"] = event_bus.policy
return json.dumps(response), self.response_headers
def create_event_bus(self):
name = self._get_param("Name")
event_source_name = self._get_param("EventSourceName")
event_bus = self.events_backend.create_event_bus(name, event_source_name)
return json.dumps({"EventBusArn": event_bus.arn}), self.response_headers
def list_event_buses(self):
name_prefix = self._get_param("NamePrefix")
# ToDo: add 'NextToken' & 'Limit' parameters
response = []
for event_bus in self.events_backend.list_event_buses(name_prefix):
event_bus_response = {"Name": event_bus.name, "Arn": event_bus.arn}
if event_bus.policy:
event_bus_response["Policy"] = event_bus.policy
response.append(event_bus_response)
return json.dumps({"EventBuses": response}), self.response_headers
def delete_event_bus(self):
name = self._get_param("Name")
self.events_backend.delete_event_bus(name)
return "", self.response_headers
|
py | b415568acc20a3232ccff0e81dca3566a558a3b9 | """all things PeerAssets protocol."""
from enum import Enum
from operator import itemgetter
from typing import List, Optional, Generator, cast, Callable
from pypeerassets.kutil import Kutil
from pypeerassets.paproto_pb2 import DeckSpawn as deckspawnproto
from pypeerassets.paproto_pb2 import CardTransfer as cardtransferproto
from pypeerassets.exceptions import (
InvalidCardIssue,
OverSizeOPReturn,
RecieverAmountMismatch,
)
from pypeerassets.card_parsers import parsers
from pypeerassets.networks import net_query
class IssueMode(Enum):
NONE = 0x00
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L19
# No issuance allowed.
CUSTOM = 0x01
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L20
# Custom issue mode, verified by client aware of this.
ONCE = 0x02
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L21
# A single card_issue transaction allowed.
MULTI = 0x04
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L22
# Multiple card_issue transactions allowed.
MONO = 0x08
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L23
# All card transaction amounts are equal to 1.
UNFLUSHABLE = 0x10
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L24
# The UNFLUSHABLE issue mode invalidates any card transfer transaction except for the card issue transaction.
# Meaning that only the issuing entity is able to change the balance of a specific address.
# To correctly calculate the balance of a PeerAssets addres a client should only consider the card transfer
# transactions originating from the deck owner.
SUBSCRIPTION = 0x34 # SUBSCRIPTION (34 = 20 | 4 | 10)
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L26
# The SUBSCRIPTION issue mode marks an address holding tokens as subscribed for a limited timeframe. This timeframe is
# defined by the balance of the account and the time at which the first cards of this token are received.
# To check validity of a subscription one should take the timestamp of the first received cards and add the address' balance to it in hours.
SINGLET = 0x0a # SINGLET is a combination of ONCE and MONO (2 | 8)
# Singlet deck, one MONO card issunce allowed
class Deck:
def __init__(self, name: str,
number_of_decimals: int,
issue_mode: int,
network: str,
production: bool,
version: int,
asset_specific_data: bytes=None,
issuer: str="",
issue_time: int=None,
id: str=None,
tx_confirmations: int=None) -> None:
'''
Initialize deck object, load from dictionary Deck(**dict) or initilize
with kwargs Deck("deck", 3, "ONCE")
'''
self.version = version # protocol version
self.name = name # deck name
self.issue_mode = issue_mode # deck issue mode
self.number_of_decimals = number_of_decimals
self.asset_specific_data = asset_specific_data # optional metadata for the deck
self.id = id
self.issuer = issuer
self.issue_time = issue_time
self.tx_confirmations = tx_confirmations
self.network = network
self.production = production
@property
def p2th_address(self) -> Optional[str]:
'''P2TH address of this deck'''
if self.id:
return Kutil(network=self.network,
privkey=bytearray.fromhex(self.id)).address
else:
return None
@property
def p2th_wif(self) -> Optional[str]:
'''P2TH privkey in WIF format'''
if self.id:
return Kutil(network=self.network,
privkey=bytearray.fromhex(self.id)).wif
else:
return None
@property
def metainfo_to_protobuf(self) -> bytes:
'''encode deck into protobuf'''
deck = deckspawnproto()
deck.version = self.version
deck.name = self.name
deck.number_of_decimals = self.number_of_decimals
deck.issue_mode = self.issue_mode
if self.asset_specific_data:
if not isinstance(self.asset_specific_data, bytes):
deck.asset_specific_data = self.asset_specific_data.encode()
else:
deck.asset_specific_data = self.asset_specific_data
if deck.ByteSize() > net_query(self.network).op_return_max_bytes:
raise OverSizeOPReturn('''
Metainfo size exceeds maximum of {max} bytes supported by this network.'''
.format(max=net_query(self.network)
.op_return_max_bytes))
return deck.SerializeToString()
@property
def metainfo_to_dict(self) -> dict:
'''encode deck into dictionary'''
r = {
"version": self.version,
"name": self.name,
"number_of_decimals": self.number_of_decimals,
"issue_mode": self.issue_mode
}
if self.asset_specific_data:
r.update({'asset_specific_data': self.asset_specific_data})
return r
def to_json(self) -> dict:
'''export the Deck object to json-ready format'''
d = self.__dict__
d['p2th_wif'] = self.p2th_wif
return d
@classmethod
def from_json(cls, json: dict):
'''load the Deck object from json'''
try:
del json['p2th_wif']
except KeyError:
pass
return cls(**json)
def __str__(self) -> str:
r = []
for key in self.__dict__:
r.append("{key}='{value}'".format(key=key, value=self.__dict__[key]))
return ', '.join(r)
class CardBundle:
'''On the low level, cards come in bundles.
A single transaction can contain dozens of cards.
CardBundle is a object which is pre-coursor to CardTransfer'''
def __init__(self,
deck: Deck,
sender: str,
txid: str,
blockhash: str,
blocknum: int,
blockseq: int,
timestamp: int,
tx_confirmations: int,
vouts: list=[],
) -> None:
self.deck = deck
self.txid = txid
self.sender = sender
self.vouts = vouts
if blockhash:
self.blockhash = blockhash
self.blockseq = blockseq
self.timestamp = timestamp
self.blocknum = blocknum
self.tx_confirmations = tx_confirmations
else:
self.blockhash = ""
self.blockseq = 0
self.blocknum = 0
self.timestamp = 0
self.tx_confirmations = 0
def to_json(self) -> dict:
'''export the CardBundle object to json-ready format'''
return self.__dict__
class CardTransfer:
def __init__(self, deck: Deck,
receiver: list=[],
amount: List[int]=[],
version: int=1,
blockhash: str=None,
txid: str=None,
sender: str=None,
asset_specific_data: bytes=None,
number_of_decimals: int=None,
blockseq: int=None,
cardseq: int=None,
blocknum: int=None,
timestamp: int=None,
tx_confirmations: int=None,
type: str=None) -> None:
'''CardTransfer object, used when parsing card_transfers from the blockchain
or when sending out new card_transfer.
It can be initialized by passing the **kwargs and it will do the parsing,
or it can be initialized with passed arguments.
* deck - instance of Deck object
* receiver - list of receivers
* amount - list of amounts to be sent, must be integer
* version - protocol version, default 1
* txid - transaction ID of CardTransfer
* sender - transaction sender
* blockhash - block ID where the tx was first included
* blockseq - order in which tx was serialized into block
* timestamp - unix timestamp of the block where it was first included
* tx_confirmations - number of confirmations of the transaction
* asset_specific_data - extra metadata
* number_of_decimals - number of decimals for amount, inherited from Deck object
: type: card type [CardIssue, CardTransfer, CardBurn]'''
if not len(receiver) == len(amount):
raise RecieverAmountMismatch
self.version = version
self.network = deck.network
self.deck_id = deck.id
self.deck_p2th = deck.p2th_address
self.txid = txid
self.sender = sender
self.asset_specific_data = asset_specific_data
if not number_of_decimals:
self.number_of_decimals = deck.number_of_decimals
else:
self.number_of_decimals = number_of_decimals
self.receiver = receiver
self.amount = amount
if blockhash:
self.blockhash = blockhash
self.blockseq = blockseq
self.timestamp = timestamp
self.blocknum = blocknum
self.cardseq = cardseq
self.tx_confirmations = tx_confirmations
else:
self.blockhash = ""
self.blockseq = 0
self.blocknum = 0
self.timestamp = 0
self.cardseq = 0
self.tx_confirmations = 0
if self.sender == deck.issuer:
# if deck issuer is issuing cards to the deck issuing address,
# card is burn and issue at the same time - which is invalid!
if deck.issuer in self.receiver:
raise InvalidCardIssue
else:
# card was sent from deck issuer to any random address,
# card type is CardIssue
self.type = "CardIssue"
# card was sent back to issuing address
# card type is CardBurn
elif self.receiver[0] == deck.issuer and not self.sender == deck.issuer:
self.type = "CardBurn"
# issuer is anyone else,
# card type is CardTransfer
else:
self.type = "CardTransfer"
if type:
self.type = type
@property
def metainfo_to_protobuf(self) -> bytes:
'''encode card_transfer info to protobuf'''
card = cardtransferproto()
card.version = self.version
card.amount.extend(self.amount)
card.number_of_decimals = self.number_of_decimals
if self.asset_specific_data:
if not isinstance(self.asset_specific_data, bytes):
card.asset_specific_data = self.asset_specific_data.encode()
else:
card.asset_specific_data = self.asset_specific_data
if card.ByteSize() > net_query(self.network).op_return_max_bytes:
raise OverSizeOPReturn('''
Metainfo size exceeds maximum of {max} bytes supported by this network.'''
.format(max=net_query(self.network)
.op_return_max_bytes))
return card.SerializeToString()
@property
def metainfo_to_dict(self) -> dict:
'''encode card into dictionary'''
r = {
"version": self.version,
"amount": self.amount,
"number_of_decimals": self.number_of_decimals
}
if self.asset_specific_data:
r.update({'asset_specific_data': self.asset_specific_data})
return r
def to_json(self) -> dict:
'''export the CardTransfer object to json-ready format'''
return self.__dict__
@classmethod
def from_json(cls, json: dict):
'''load the Deck object from json'''
return cls(**json)
def __str__(self) -> str:
r = []
for key in self.__dict__:
r.append("{key}='{value}'".format(key=key, value=self.to_json()[key]))
return ', '.join(r)
def validate_card_issue_modes(issue_mode: int, cards: list) -> list:
"""validate cards against deck_issue modes"""
supported_mask = 63 # sum of all issue_mode values
if not bool(issue_mode & supported_mask):
return [] # return empty list
for i in [1 << x for x in range(len(IssueMode))]:
if bool(i & issue_mode):
try:
parser_fn = cast(
Callable[[list], Optional[list]],
parsers[IssueMode(i).name]
)
except ValueError:
continue
parsed_cards = parser_fn(cards)
if not parsed_cards:
return []
cards = parsed_cards
return cards
class DeckState:
def __init__(self, cards: Generator) -> None:
self.cards = cards
self.total = 0
self.burned = 0
self.balances = cast(dict, {})
self.processed_issues = set()
self.processed_transfers = set()
self.processed_burns = set()
self.calc_state()
self.checksum = not bool(self.total - sum(self.balances.values()))
def _process(self, card: dict, ctype: str) -> bool:
sender = card["sender"]
receiver = card["receiver"][0]
amount = card["amount"][0]
if ctype != 'CardIssue':
balance_check = sender in self.balances and self.balances[sender] >= amount
if balance_check:
self.balances[sender] -= amount
if 'CardBurn' not in ctype:
self._append_balance(amount, receiver)
return True
return False
if 'CardIssue' in ctype:
self._append_balance(amount, receiver)
return True
return False
def _append_balance(self, amount: int, receiver: str) -> None:
try:
self.balances[receiver] += amount
except KeyError:
self.balances[receiver] = amount
def _sort_cards(self, cards: Generator) -> list:
'''sort cards by blocknum and blockseq'''
return sorted([card.__dict__ for card in cards],
key=itemgetter('blocknum', 'blockseq', 'cardseq'))
def calc_state(self) -> None:
for card in self._sort_cards(self.cards):
# txid + blockseq + cardseq, as unique ID
cid = str(card["txid"] + str(card["blockseq"]) + str(card["cardseq"]))
ctype = card["type"]
amount = card["amount"][0]
if ctype == 'CardIssue' and cid not in self.processed_issues:
validate = self._process(card, ctype)
self.total += amount * validate # This will set amount to 0 if validate is False
self.processed_issues |= {cid}
if ctype == 'CardTransfer' and cid not in self.processed_transfers:
self._process(card, ctype)
self.processed_transfers |= {cid}
if ctype == 'CardBurn' and cid not in self.processed_burns:
validate = self._process(card, ctype)
self.total -= amount * validate
self.burned += amount * validate
self.processed_burns |= {cid}
|
py | b415577fe554691b758d5473bc5811cbd95cad0a | from ctypes import *
import os
import platform
if __name__ == '__main__':
# Specify whether you want to use the 32 or 64 bit library
# 32 bits = smClient.dll
# 64 bits = smClient64.dll
dll = 'smClient64.dll'
# Specification of library load folder
# If we are in a 64-bit system and want to load a dll
# that is in the system32 folder from a 32-bit application it will load
# the path: C:\\Windows\\SysNative\\
system32 = os.path.join(os.environ['SystemRoot'], 'SysNative' if
platform.architecture()[0] == '32bit' else 'System32')
dllPath = os.path.join(system32, dll)
# Library load
mydll = cdll.LoadLibrary(dllPath) # cdecl type DLL load
# mydll = windll.LoadLibrary(dllPath) # DLL load of type stdcall
###################################### Work with integer memory ###################################
########## Open integer memory
# to start a memory we need the name of the memory and the type
# Returns a value of 0 (zero) if opened correctly
memory = b'Memory0' # Memory name
type = int(1) # 1 indicates that an integer memory will open
## stdcall - this is for 32 bits type __stdcall library
# openMemory = getattr(mydll, 'openMemory@8')
# open = openMemory(memory, type)
##cdecl - this is for 32 bits type __cdecl library or for 64Bits library
open = mydll.openMemory(memory, type)
print(open)
######### Write in an integer memory
# Parameters are:
# - the name of the memory,
# - in which position you want to write,
# - value to write
##stdcall - this is for 32 bits type __stdcall library
# setInt = getattr(mydll, 'setInt@12')
# setInt(memory, int(0), int(56))
##cdecl - this is for 32 bits type __cdecl library or for 64Bits library
mydll.setInt(memory, int(0), int(56))
print("the integer value has been written")
############ Read from integer memory
# Parameters are:
# - the name of the memory
# - position to be read
# - Return de value saved in the memory
##stdcall - this is for 32 bits type __stdcall library
# getInt = getattr(mydll, 'getInt@8')
# read = getInt(memory, int(1))
##cdecl - this is for 32 bits type __cdecl library or for 64Bits library
read = mydll.getInt(memory, int(1))
print(read)
################################## Work with float memory ###################################
########## Open float memory
memory2 = b'Memory1' # Memory Name
type2 = int(2) # 2 indicates that a floating type memory will open
## stdcall - this is for 32 bits type __stdcall library
# openMemory = getattr(mydll, 'openMemory@8')
# open2 = openMemory(memory, type)
##cdecl - this is for 32 bits type __cdecl library or for 64Bits library
open2 = mydll.openMemory(memory2, type2)
print(open2)
########## Write in float memory
# Parameters are:
# - the name of the memory
# - position to be written
# - value to write
## This is for 32 bits type __stdcall library
# setFloat = getattr(mydll, 'setFloat@12')
# setFloat(memory2, int(3), c_float(0.1245))
## this is for 32 bits type __cdecl library or for 64Bits library
mydll.setFloat(memory2, int(3), c_float(0.1245))
print("floating value was written")
########### Read in floating memory
read2 = c_float # Stores the return value of the function
# Parameters are:
# - the name of the memory
# - position to be read
# - Return de value saved in the memory
##stdcall - this is for 32 bits type __stdcall library
# getFloat = getattr(mydll, 'getFloat@8')
# getFloat.restype = c_float
# read2 = getFloat(memory, int(1))
##cdecl - this is for 32 bits type __cdecl library or for 64Bits library
mydll.getFloat.restype = c_float
read2 = mydll.getFloat(memory2, int(3))
print(read2)
#################################### Work with double type memory ######################################
########## Open Double Memory
memory3 = b'Memory2' # Memory Name
type3 = int(3) # 3 indicates that a Double type memory will open
## stdcall - this is for 32 bits type __stdcall library
# openMemory = getattr(mydll, 'openMemory@8')
# open3 = openMemory(memory, type)
##cdecl - this is for 32 bits type __cdecl library or for 64Bits library
open3 = mydll.openMemory(memory3, type3)
print(open3)
########## Write in a memory of type Double
# Parameters are:
# - the name of the memory
# - position to be written
# - value to write
## This is for 32 bits type __stdcall library
# setDouble = getattr(mydll, 'setDouble@16')
# setDouble(memory3, int(0), c_double(0.789454))
## this is for 32 bits type __cdecl library or for 64Bits library
mydll.setDouble(memory3, int(0), c_double(0.789454))
print("double value was written")
############ Read from a memory of type Double
read3 = c_double # Stores the return value of the function
# Parameters are:
# - name of the memory
# - position to be read
##stdcall - this is for 32 bits type __stdcall library
# getDouble = getattr(mydll, 'getDouble@8')
# getDouble.restype = c_double
# read3 = getDouble(memory3, int(2))
##cdecl - this is for 32 bits type __cdecl library or for 64Bits library
mydll.getDouble.restype = c_double
read3 = mydll.getDouble(memory3, int(2))
print(read3)
#################################### Work with string memory ######################################
########## Open Double Memory
memory4 = b'Memory3' # Memory Name
type4 = int(4) # 4 indicates that a String type memory will open
## stdcall - this is for 32 bits type __stdcall library
# openMemory = getattr(mydll, 'openMemory@8')
# open4 = openMemory(memory, type)
##cdecl - this is for 32 bits type __cdecl library or 64Bits library
open4 = mydll.openMemory(memory4, type4)
print(open4)
########## Write in a memory of type String
message = b'python'
# Parameters are:
# - the name of the memory
# - position to be written
# - string to write
## This is for 32 bits type __stdcall library
# setString = getattr(mydll, 'setString@12')
# setString(memory4, int(0), message)
## this is for 32 bits type __cdecl library or 64Bits library
mydll.setString(memory4, int(0), message)
print("string value was written")
########## Read from a string memory
string_buffers = [create_string_buffer(13)] # create a 13 byte buffer
pointer = (c_char_p)(*map(addressof, string_buffers)) # Create a pointer
# Parameters are:
# - name of the memory
# - position to be read
# - Variable in which the memory value will be stored
## stdcall - this is for 32 bits type __stdcall library
# getString = getattr(mydll, 'getString@12')
# read4 = getString(memory4, int(1), pointer)
## cdecl - this is for 32 bits type __cdecl library or 64Bits library
mydll.getString(memory4, int(1), pointer)
# get the value from the Pointer
res = string_buffers
results = [s.value for s in string_buffers]
word = results[0]
print(word)
######################################## Free open shared memories #################################
## stdcall - this is for 32 bits type __stdcall library
# freeViews = getattr(mydll, 'freeViews@0')
# freeViews()
## cdecl - this is for 32 bits type __cdecl library or 64Bits library
mydll.freeViews()
#################################### Administration functions ######################################
## The following functions are used if you want to manage both creation, management and deletion
## of shared memories, and DO NOT use the shared memory management panel
############## Shared memory creation
# Parameters are:
# - name of the memory
# - Amount of values to be stored
# - type of variables that will be stored in memory
# 1 = Integer
# 2 = Float
# 3 = Double
# 4 = String (Char *) 13 characters by default
# RETURN:
# 0 = If it runs correctly
# 1 = if there was an error
# Examples
'''
return1 = mydll.createMemory(b'Memory0', 8, 1) # Create a memory called "Memory0" that contains 8 spaces and
# is of type Integer
return2 = mydll.createMemory(b'Memory1', 4, 2) # Create a memory called "Memory1" that contains 4 spaces and
#is of type Float
return3 = mydll.createMemory(b'Memory2', 5, 3) # Create a memory called "Memory3" that contains 5 spaces and
#is of type Double
mydll.freeMemories() # Free all shared memories created
'''
|
py | b41557b52867aff94deb99310aaa6d7631daabca | def argmin(x):
""" find the index of the minumum element
Parameters
----------
x: array-like
Returns
-------
The index of the minumum number
Examples
--------
>>> argmin([10, 0, 20, 30])
1
"""
n = len(x)
return min(range(n), key=lambda i: x[i])
|
py | b4155805476adeae26be6b7ee39178492a772c91 | import math
import csv
from itertools import islice
from segment import Segment
import codecs
class CValue(object):
def __init__(self, input_file, output_file):
"""
初始化方法
:param input_file: 输入文件
:param output_file: 输出文件
"""
self.input_file = input_file
self.output_file = output_file
self.corpus = self.corpus_input()
self.candidate_term_count = 0
self.candidate_terms_list = self.terms_extraction()
self.terms_export()
def terms_extraction(self):
"""
术语抽取核心方法
:return: 完成C-value计算的候选术语集合
"""
candidate_terms = Segment.segment(self.corpus)
self.candidate_term_count = len(candidate_terms)
candidate_terms_list = {}
for term in candidate_terms:
if term not in candidate_terms_list.keys():
candidate_terms_list[term] = {"frequency": 1}
else:
candidate_terms_list[term]["frequency"] += 1
candidate_term_keys = candidate_terms_list.keys()
for i in candidate_term_keys:
for j in candidate_term_keys:
if i != j and i in j:
if "nested" not in candidate_terms_list[i]:
candidate_terms_list[i]["nested"] = {}
candidate_terms_list[i]["nested"][j] = candidate_terms_list[j]["frequency"]
for term in candidate_terms_list:
if "nested" in candidate_terms_list[term]:
nested_terms = candidate_terms_list[term]["nested"]
nested_size = len(nested_terms)
nested_frequency = 0
for nested_item in nested_terms:
nested_frequency += nested_terms[nested_item]
candidate_terms_list[term]["cvalue"] = self.c_value_algorithm(length=len(term),
frequency=candidate_terms_list[term][
"frequency"],
nested_size=nested_size,
nested_frequency=nested_frequency)
else:
candidate_terms_list[term]["cvalue"] = self.c_value_algorithm(length=len(term),
frequency=candidate_terms_list[term][
"frequency"])
return candidate_terms_list
def c_value_algorithm(self, length, frequency, nested_size=None, nested_frequency=None):
"""
C-value 算法实现
:param length: 候选术语长度
:param frequency: 候选术语词频
:param nested_size: 嵌套该候选术语的候选术语数量
:param nested_frequency: 被嵌套的总次数
:return:
"""
if nested_size is None:
cvalue = math.log2(length) * frequency
return cvalue
else:
cvalue = math.log2(length) * (frequency - (1 / nested_size) * nested_frequency)
return cvalue
def corpus_input(self):
"""
语料数据导入处理,转为字符串数据
:return: 字符串格式的语料数据
"""
corpus = ""
if self.input_file.endswith(".csv"):
csv.field_size_limit(500 * 1024 * 1024)
csv_reader = csv.reader(codecs.open(self.input_file, "r", "utf-8"))
'''
for item in islice(csv_reader, 1, None):
s = ""
for i in item:
s += " {} ".format(str(i))
corpus += s
print(corpus)
'''
column = [row[9] for row in csv_reader]
# print(column)
# print(type(csv_reader), type(column))
corpus = " "+" ".join(column[1:])
# print(corpus)
elif self.input_file.endswith(".txt"):
with open(self.input_file, "r") as f:
corpus = f.read()
else:
raise TypeError
return corpus
def terms_export(self):
"""
导出候选术语集合到文件
:return: None
"""
candidate_terms = []
for candidate_term in self.candidate_terms_list:
candidate_term_frequency = self.candidate_terms_list[candidate_term]["frequency"]
candidate_term_cvalue = self.candidate_terms_list[candidate_term]["cvalue"]
if "nested" in self.candidate_terms_list[candidate_term]:
candidate_term_nested = str(self.candidate_terms_list[candidate_term]["nested"])
else:
candidate_term_nested = None
candidate_terms.append([candidate_term, candidate_term_frequency, candidate_term_cvalue, candidate_term_nested])
with open(self.output_file, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["术语", "词频", "C-value", "nested"])
for item in candidate_terms:
writer.writerow(item)
|
py | b41558cb59dfbace11b8e76b897034dfb170e213 | """
Instructions:
pip install numpy
pip install opencv-python
pip install pytesseract (linux 3 apt-get install commands)
on windows install an executable
Here is the link to their github:
https://github.com/UB-Mannheim/tesseract/wiki
Class to quickly process a photo with text and transform it
into a text file in order for us to read input / output
"""
import cv2
import sys
import pytesseract
import numpy as np
def read_image(image_path):
image = cv2.imread(image_path)
print("reading the image", image_path)
return image
def gray_scale_image(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.GaussianBlur(gray_image, (3,3), 0)
return gray_image
def threshold_image(image, mode):
if mode == "binary":
(t, t_image) = cv2.threshold(image, 135, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
else:
(t, t_image) = cv2.threshold(image, 50, 255, cv2.THRESH_BINARY_INV)
return t_image
def morph_opening(image, mode):
if mode == "binary":
return image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
o_image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel, iterations=1)
return o_image
def process_image(image):
# https://tesseract-ocr.github.io/tessdoc/ImproveQuality#page-segmentation-method
configuration = "--psm 6"
contents = pytesseract.image_to_string(image, lang='eng', config=configuration)
return contents
def find_contours(image, mode):
if mode == "binary":
return image
new_image = np.copy(image)
contours, hierarchy = cv2.findContours(new_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
print("found the following number of contours", len(contours))
contoured_image = cv2.drawContours(new_image, contours, -1, (0, 255, 0), 3)
return contoured_image
def main():
# User will input the image path
if len(sys.argv) < 3:
print("Please enter an image path and mode of operation")
return
# read the image into an array of pixels
mode = sys.argv[1]
image_path = sys.argv[2]
image = read_image(image_path)
pytesseract.pytesseract.tesseract_cmd = r'D:\\Program Files (x86)\\Tesseract\\tesseract.exe'
# preprocess the image
if mode == "binary":
print("image is already in binary colors")
else:
print("image is in rgb colors")
gray_image = gray_scale_image(image)
# (unique, counts) = np.unique(gray_image, return_counts=True)
# frequencies = np.asarray((unique, counts)).T
# print(frequencies)
t_image = threshold_image(gray_image, mode)
c_image = find_contours(t_image, mode)
o_image = morph_opening(c_image, mode)
# display the images
cv2.imshow('Original Image', image)
cv2.imshow('Gray Image', gray_image)
cv2.imshow('Binary Image', t_image)
cv2.imshow('Contoured Image', c_image)
cv2.imshow('Opened Image', o_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#transform the image into text
imageContents = (process_image(o_image)[:-1]).strip()
print("Output Text - ", imageContents)
if __name__ == "__main__":
main() |
py | b4155a0e9ed5dc433cdfd06f9fdf2df691950133 | from azure.storage.blob import BlockBlobService
import sys
import os
data_dir = '/home/strikermx/data_dir/model_' + sys.argv[2]
block_blob_service = BlockBlobService(account_name='natds201801cvmarpu', account_key='melH7xjBqGc0yCtz4eL+v8rfR+Lx/cbTqlZ7Jz+adMNpTEIDdAU0L0nd2yUaMkimqU0gM0XixAwk8CRhuKoduw==')
from azure.storage.blob import PublicAccess
block_blob_service.create_container('ds-cvm-hdi-arpu-upward-2018-01-12t03-00-38-195z', public_access=PublicAccess.Container)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print(sys.argv[1] )
generator = block_blob_service.list_blobs('ds-cvm-hdi-arpu-upward-2018-01-12t03-00-38-195z')
for blob in generator:
if sys.argv[1] +'/part' in blob.name:
print(blob.name)
block_blob_service.get_blob_to_path('ds-cvm-hdi-arpu-upward-2018-01-12t03-00-38-195z', blob.name ,'/home/strikermx/data_dir/model_'+sys.argv[2]+'/training_'+sys.argv[2]+'.dat')
|
py | b4155a3f3f548cced13fc6f33f5b630f193742f9 | _origin='My girlfriend complained to me that using Scrapy to collect data was too much trouble. So I learned Python in my spare time. I first learned the basic syntax of python, then I developed a simple data collection framework myself, named simplified-scrapy. This means that it is much simpler than scrapy, but it is not weak. With the data collection framework, but no data extraction part, I continued to develop the SimplifiedDoc library. So we have the current package.'
print (_origin)
|
py | b4155b0c55c5395c951449c81231ab4fefd9be7c | # Gets a list of sync files from the Canvas API, and compares to files on-disk at a given location (determined by app settings)
# It relies on the fact that the Canvas Data Sync endpoint provides a list of filenames which are guaranteed to be unique and persistant
# See: https://portal.inshosteddata.com/docs/api
# Outputs:
# - Dictionary object with keys:
# - download: list of file object to download
# - delete: List of local files to delete (as they are no longer required)
# - schemaVersion: Version of the schema reported by the Canvas Data API. This should be retrieved for type/schema mapping activites.
# Brodie Hicks, 2021.
import os # for Environ
import logging
from CanvasApi import CanvasDataApi
from azure.storage.blob.aio import ContainerClient
from azure.identity.aio import DefaultAzureCredential
async def main(payload: dict) -> dict:
dataApi = CanvasDataApi.CanvasDataApi(os.environ["CANVAS_API_KEY"], os.environ["CANVAS_API_SECRET"])
async with dataApi:
# Get synchronisation list from data API
syncList = await dataApi.get_sync_list()
logging.info(f"Canvas Data Reported {len(syncList['files'])} files")
logging.info(syncList)
# We create a map of file names -> file definitions to improve lookup performance below (approaches O(1))
# The key is used for case-insensitive matching with local files as per below.
syncFiles = { o['filename'].casefold(): o for o in syncList['files'] }
# Get a list of files already downloaded.
credential = DefaultAzureCredential()
async with credential:
searchPrefix = f"{os.environ['STORAGE_BASE_PATH']}/"
containerClient = ContainerClient.from_container_url(f"{os.environ['STORAGE_CONTAINER_URL']}", credential=credential)
async with containerClient:
existingFileNames = {
b.name.rpartition('/')[-1].casefold(): b.name # The key here is for sane / case-insensitive comparison with syncFiles above.
async for b in containerClient.list_blobs(name_starts_with=searchPrefix)
if b.size > 0 and not b.name.startswith(f"{os.environ['STORAGE_BASE_PATH']}/Schema_") # Skip schema files stored on disk, as well as 0-size directories.
}
logging.info(f"Found {len(existingFileNames)} files already downloaded")
return {
# This is downloaded to disk and used to generate ADF tabular translators in subsequent activities - see DownloadSchemaVersion func
"schemaVersion": syncList['schemaVersion'],
# Files in the API sync list, but not on our storage. We use 'v' to return an object with URL, table name, etc. metadata
"download": [v for k, v in syncFiles.items() if k not in existingFileNames],
# Files in our storage, but not in the sync list - these have been superseeded and should be removed.
# 'v' in this instance returns the actual (unmodified) file path.
"delete": [v for k,v in existingFileNames.items() if k not in syncFiles]
}
|
py | b4155c5c39aea9b0687afe08025435b7229ac1b3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: OGR Python samples
# Purpose: Create OGR VRT from source datasource
# Author: Frank Warmerdam, [email protected]
#
###############################################################################
# Copyright (c) 2009, Frank Warmerdam <[email protected]>
# Copyright (c) 2009-2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import ogr, gdal
#############################################################################
def GeomType2Name( type ):
flat_type = ogr.GT_Flatten(type)
dic = { ogr.wkbUnknown : ('wkbUnknown', '25D'),
ogr.wkbPoint : ('wkbPoint', '25D'),
ogr.wkbLineString : ('wkbLineString', '25D'),
ogr.wkbPolygon : ('wkbPolygon', '25D'),
ogr.wkbMultiPoint : ('wkbMultiPoint', '25D'),
ogr.wkbMultiLineString : ('wkbMultiLineString', '25D'),
ogr.wkbMultiPolygon : ('wkbMultiPolygon', '25D'),
ogr.wkbGeometryCollection : ('wkbGeometryCollection', '25D'),
ogr.wkbNone : ('wkbNone', ''),
ogr.wkbLinearRing : ('wkbLinearRing', ''),
ogr.wkbCircularString : ('wkbCircularString', 'Z'),
ogr.wkbCompoundCurve : ('wkbCompoundCurve', 'Z'),
ogr.wkbCurvePolygon : ('wkbCurvePolygon', 'Z'),
ogr.wkbMultiCurve : ('wkbMultiCurve', 'Z'),
ogr.wkbMultiSurface : ('wkbMultiSurface', 'Z'),
ogr.wkbCurve : ('wkbCurve', 'Z'),
ogr.wkbSurface : ('wkbSurface', 'Z') }
ret = dic[flat_type][0]
if flat_type != type:
if ogr.GT_HasM(type):
if ogr.GT_HasZ(type):
ret += "ZM"
else:
ret += "M"
else:
ret += dic[flat_type][1]
return ret
#############################################################################
def Esc(x):
return gdal.EscapeString( x, gdal.CPLES_XML )
#############################################################################
def Usage():
print('Usage: ogr2vrt.py [-relative] [-schema] [-feature_count] [-extent]')
print(' in_datasource out_vrtfile [layers]')
print('')
sys.exit(1)
#############################################################################
# Argument processing.
infile = None
outfile = None
layer_list = []
relative = "0"
schema=0
feature_count=0
extent=0
openoptions = []
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-relative':
relative = "1"
elif arg == '-schema':
schema = 1
elif arg == '-feature_count':
feature_count = 1
elif arg == '-extent':
extent = 1
elif arg == '-oo':
i += 1
openoptions.append(argv[i])
elif arg[0] == '-':
Usage()
elif infile is None:
infile = arg
elif outfile is None:
outfile = arg
else:
layer_list.append( arg )
i = i + 1
if outfile is None:
Usage()
if schema and feature_count:
sys.stderr.write('Ignoring -feature_count when used with -schema.\n')
feature_count = 0
if schema and extent:
sys.stderr.write('Ignoring -extent when used with -schema.\n')
extent = 0
#############################################################################
# Open the datasource to read.
src_ds = gdal.OpenEx( infile, gdal.OF_VECTOR, open_options = openoptions )
if schema:
infile = '@dummy@'
if len(layer_list) == 0:
for lyr_idx in range(src_ds.GetLayerCount()):
layer_list.append( src_ds.GetLayer(lyr_idx).GetLayerDefn().GetName() )
#############################################################################
# Start the VRT file.
vrt = '<OGRVRTDataSource>\n'
#############################################################################
# Metadata
mdd_list = src_ds.GetMetadataDomainList()
if mdd_list is not None:
for domain in mdd_list:
if domain == '':
vrt += ' <Metadata>\n'
elif len(domain) > 4 and domain[0:4] == 'xml:':
vrt += ' <Metadata domain="%s" format="xml">\n' % Esc(domain)
else:
vrt += ' <Metadata domain="%s">\n' % Esc(domain)
if len(domain) > 4 and domain[0:4] == 'xml:':
vrt += src_ds.GetMetadata_List(domain)[0]
else:
md = src_ds.GetMetadata(domain)
for key in md:
vrt += ' <MDI key="%s">%s</MDI>\n' % (Esc(key), Esc(md[key]))
vrt += ' </Metadata>\n'
#############################################################################
# Process each source layer.
for name in layer_list:
layer = src_ds.GetLayerByName(name)
layerdef = layer.GetLayerDefn()
vrt += ' <OGRVRTLayer name="%s">\n' % Esc(name)
mdd_list = layer.GetMetadataDomainList()
if mdd_list is not None:
for domain in mdd_list:
if domain == '':
vrt += ' <Metadata>\n'
elif len(domain) > 4 and domain[0:4] == 'xml:':
vrt += ' <Metadata domain="%s" format="xml">\n' % Esc(domain)
else:
vrt += ' <Metadata domain="%s">\n' % Esc(domain)
if len(domain) > 4 and domain[0:4] == 'xml:':
vrt += layer.GetMetadata_List(domain)[0]
else:
md = layer.GetMetadata(domain)
for key in md:
vrt += ' <MDI key="%s">%s</MDI>\n' % (Esc(key), Esc(md[key]))
vrt += ' </Metadata>\n'
vrt += ' <SrcDataSource relativeToVRT="%s" shared="%d">%s</SrcDataSource>\n' \
% (relative,not schema,Esc(infile))
if len(openoptions) > 0:
vrt += ' <OpenOptions>\n'
for option in openoptions:
(key, value) = option.split('=')
vrt += ' <OOI key="%s">%s</OOI>\n' % (Esc(key), Esc(value))
vrt += ' </OpenOptions>\n'
if schema:
vrt += ' <SrcLayer>@dummy@</SrcLayer>\n'
else:
vrt += ' <SrcLayer>%s</SrcLayer>\n' % Esc(name)
# Historic format for mono-geometry layers
if layerdef.GetGeomFieldCount() == 0:
vrt += ' <GeometryType>wkbNone</GeometryType>\n'
elif layerdef.GetGeomFieldCount() == 1 and \
layerdef.GetGeomFieldDefn(0).IsNullable():
vrt += ' <GeometryType>%s</GeometryType>\n' \
% GeomType2Name(layerdef.GetGeomType())
srs = layer.GetSpatialRef()
if srs is not None:
vrt += ' <LayerSRS>%s</LayerSRS>\n' \
% (Esc(srs.ExportToWkt()))
if extent:
(xmin, xmax, ymin, ymax) = layer.GetExtent()
vrt += ' <ExtentXMin>%.15g</ExtentXMin>\n' % xmin
vrt += ' <ExtentYMin>%.15g</ExtentYMin>\n' % ymin
vrt += ' <ExtentXMax>%.15g</ExtentXMax>\n' % xmax
vrt += ' <ExtentYMax>%.15g</ExtentYMax>\n' % ymax
# New format for multi-geometry field support
else:
for fld_index in range(layerdef.GetGeomFieldCount()):
src_fd = layerdef.GetGeomFieldDefn( fld_index )
vrt += ' <GeometryField name="%s"' % src_fd.GetName()
if src_fd.IsNullable() == 0:
vrt += ' nullable="false"'
vrt += '>\n'
vrt += ' <GeometryType>%s</GeometryType>\n' \
% GeomType2Name(src_fd.GetType())
srs = src_fd.GetSpatialRef()
if srs is not None:
vrt += ' <SRS>%s</SRS>\n' \
% (Esc(srs.ExportToWkt()))
if extent:
(xmin, xmax, ymin, ymax) = layer.GetExtent(geom_field = fld_index)
vrt += ' <ExtentXMin>%.15g</ExtentXMin>\n' % xmin
vrt += ' <ExtentYMin>%.15g</ExtentYMin>\n' % ymin
vrt += ' <ExtentXMax>%.15g</ExtentXMax>\n' % xmax
vrt += ' <ExtentYMax>%.15g</ExtentYMax>\n' % ymax
vrt += ' </GeometryField>\n'
# Process all the fields.
for fld_index in range(layerdef.GetFieldCount()):
src_fd = layerdef.GetFieldDefn( fld_index )
if src_fd.GetType() == ogr.OFTInteger:
type = 'Integer'
elif src_fd.GetType() == ogr.OFTInteger64:
type = 'Integer64'
elif src_fd.GetType() == ogr.OFTString:
type = 'String'
elif src_fd.GetType() == ogr.OFTReal:
type = 'Real'
elif src_fd.GetType() == ogr.OFTStringList:
type = 'StringList'
elif src_fd.GetType() == ogr.OFTIntegerList:
type = 'IntegerList'
elif src_fd.GetType() == ogr.OFTInteger64List:
type = 'Integer64List'
elif src_fd.GetType() == ogr.OFTRealList:
type = 'RealList'
elif src_fd.GetType() == ogr.OFTBinary:
type = 'Binary'
elif src_fd.GetType() == ogr.OFTDate:
type = 'Date'
elif src_fd.GetType() == ogr.OFTTime:
type = 'Time'
elif src_fd.GetType() == ogr.OFTDateTime:
type = 'DateTime'
else:
type = 'String'
vrt += ' <Field name="%s" type="%s"' \
% (Esc(src_fd.GetName()), type)
if src_fd.GetSubType() != ogr.OFSTNone:
vrt += ' subtype="%s"' % ogr.GetFieldSubTypeName(src_fd.GetSubType())
if not schema:
vrt += ' src="%s"' % Esc(src_fd.GetName())
if src_fd.GetWidth() > 0:
vrt += ' width="%d"' % src_fd.GetWidth()
if src_fd.GetPrecision() > 0:
vrt += ' precision="%d"' % src_fd.GetPrecision()
if src_fd.IsNullable() == 0:
vrt += ' nullable="false"'
vrt += '/>\n'
if feature_count:
vrt += ' <FeatureCount>%d</FeatureCount>\n' % layer.GetFeatureCount()
vrt += ' </OGRVRTLayer>\n'
vrt += '</OGRVRTDataSource>\n'
#############################################################################
# Write vrt
open(outfile,'w').write(vrt)
|
py | b4155d74a07d8b9c0c0bfa8ea6ad101c14371eb3 | # --------------------------------------------------------------------------- #
# BALLOONTIP wxPython IMPLEMENTATION
# Python Code By:
#
# Andrea Gavana, @ 29 May 2005
# Latest Revision: 16 Jul 2012, 15.00 GMT
#
#
# TODO List/Caveats
#
# 1. With wx.ListBox (And Probably Other Controls), The BalloonTip Sometimes
# Flashes (It Is Created And Suddenly Destroyed). I Don't Know What Is
# Happening. Probably I Don't Handle Correctly The wx.EVT_ENTER_WINDOW
# wx.EVT_LEAVE_WINDOW?
#
# 2. wx.RadioBox Seems Not To Receive The wx.EVT_ENTER_WINDOW Event
#
# 3. wx.SpinCtrl (And Probably Other Controls), When Put In A Sizer, Does Not
# Return The Correct Size/Position. Probably Is Something I Am Missing.
#
# 4. Other Issues?
#
#
# FIXED Problems
#
# 1. Now BalloonTip Control Works Also For TaskBarIcon (Thanks To Everyone
# For The Suggetions I Read In The wxPython Mailing List)
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# [email protected]
# [email protected]
#
# Or, Obviously, To The wxPython Mailing List!!!
#
# Tags: phoenix-port, unittest, documented
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
:class:`~wx.lib.agw.balloontip.BalloonTip` is a class that allows you to display tooltips in a balloon style
window.
Description
===========
:class:`BalloonTip` is a class that allows you to display tooltips in a balloon style
window (actually a frame), similarly to the windows XP balloon help. There is
also an arrow that points to the center of the control designed as a "target"
for the :class:`BalloonTip`.
What it can do:
- Set the balloon shape as a rectangle or a rounded rectangle;
- Set an icon to the top-left of the :class:`BalloonTip` frame;
- Set a title at the top of the :class:`BalloonTip` frame;
- Automatic "best" placement of :class:`BalloonTip` frame depending on the target
control/window position;
- Runtime customization of title/tip fonts and foreground colours;
- Runtime change of :class:`BalloonTip` frame shape;
- Set the balloon background colour;
- Possibility to set the delay after which the :class:`BalloonTip` is displayed;
- Possibility to set the delay after which the :class:`BalloonTip` is destroyed;
- Three different behaviors for the :class:`BalloonTip` window (regardless the delay
destruction time set):
a) Destroy by leave: the :class:`BalloonTip` is destroyed when the mouse leaves the
target control/window;
b) Destroy by click: the :class:`BalloonTip` is destroyed when you click on any area
of the target control/window;
c) Destroy by button: the :class:`BalloonTip` is destroyed when you click on the
top-right close button;
- Possibility to enable/disable globally the :class:`BalloonTip` on you application;
- Set the :class:`BalloonTip` also for the taskbar icon.
Usage
=====
Usage example::
import wx
import wx.lib.agw.balloontip as BT
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "BalloonTip Demo")
panel = wx.Panel(self)
# Let's suppose that in your application you have a wx.TextCtrl defined as:
mytextctrl = wx.TextCtrl(panel, -1, "I am a textctrl", pos=(100, 100))
# You can define your BalloonTip as follows:
tipballoon = BT.BalloonTip(topicon=None, toptitle="textctrl",
message="this is a textctrl",
shape=BT.BT_ROUNDED,
tipstyle=BT.BT_LEAVE)
# Set the BalloonTip target
tipballoon.SetTarget(mytextctrl)
# Set the BalloonTip background colour
tipballoon.SetBalloonColour(wx.WHITE)
# Set the font for the balloon title
tipballoon.SetTitleFont(wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False))
# Set the colour for the balloon title
tipballoon.SetTitleColour(wx.BLACK)
# Leave the message font as default
tipballoon.SetMessageFont()
# Set the message (tip) foreground colour
tipballoon.SetMessageColour(wx.LIGHT_GREY)
# Set the start delay for the BalloonTip
tipballoon.SetStartDelay(1000)
# Set the time after which the BalloonTip is destroyed
tipballoon.SetEndDelay(3000)
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
Window Styles
=============
This class supports the following window styles:
================ =========== ==================================================
Window Styles Hex Value Description
================ =========== ==================================================
``BT_ROUNDED`` 0x1 :class:`BalloonTip` will have a rounded rectangular shape.
``BT_RECTANGLE`` 0x2 :class:`BalloonTip` will have a rectangular shape.
``BT_LEAVE`` 0x3 :class:`BalloonTip` will be destroyed when the user moves the mouse outside the target window.
``BT_CLICK`` 0x4 :class:`BalloonTip` will be destroyed when the user click on :class:`BalloonTip`.
``BT_BUTTON`` 0x5 :class:`BalloonTip` will be destroyed when the user click on the close button.
================ =========== ==================================================
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
BalloonTip is distributed under the wxPython license.
Latest revision: Andrea Gavana @ 16 Jul 2012, 15.00 GMT
Version 0.3
"""
import wx
import time
import wx.adv
from wx.lib.buttons import GenButton
# Define The Values For The BalloonTip Frame Shape
BT_ROUNDED = 1
""" :class:`BalloonTip` will have a rounded rectangular shape. """
BT_RECTANGLE = 2
""" :class:`BalloonTip` will have a rectangular shape. """
# Define The Value For The BalloonTip Destruction Behavior
BT_LEAVE = 3
""" :class:`BalloonTip` will be destroyed when the user moves the mouse outside the target window. """
BT_CLICK = 4
""" :class:`BalloonTip` will be destroyed when the user click on :class:`BalloonTip`. """
BT_BUTTON = 5
""" :class:`BalloonTip` will be destroyed when the user click on the close button. """
# ---------------------------------------------------------------
# Class BalloonFrame
# ---------------------------------------------------------------
# This Class Is Called By The Main BalloonTip Class, And It Is
# Responsible For The Frame Creation/Positioning On Screen
# Depending On Target Control/Window, The Frame Can Position
# Itself To NW (Default), NE, SW, SE. The Switch On Positioning
# Is Done By Calculating The Absolute Position Of The Target
# Control/Window Plus/Minus The BalloonTip Size. The Pointing
# Arrow Is Positioned Accordingly.
# ---------------------------------------------------------------
class BalloonFrame(wx.Frame):
"""
This class is called by the main :class:`BalloonTip` class, and it is
responsible for the frame creation/positioning on screen
depending on target control/window, the frame can position
itself to NW (default), NE, SW, SE. The switch on positioning
is done by calculating the absolute position of the target
control/window plus/minus the balloontip size. The pointing
arrow is positioned accordingly.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, classparent=None):
"""
Default class constructor.
Used internally. Do not call directly this class in your application!
"""
wx.Frame.__init__(self, None, -1, "BalloonTip", pos, size,
style=wx.FRAME_SHAPED |
wx.SIMPLE_BORDER |
wx.FRAME_NO_TASKBAR |
wx.STAY_ON_TOP)
self._parent = classparent
self._toptitle = self._parent._toptitle
self._topicon = self._parent._topicon
self._message = self._parent._message
self._shape = self._parent._shape
self._tipstyle = self._parent._tipstyle
self._ballooncolour = self._parent._ballooncolour
self._balloonmsgcolour = self._parent._balloonmsgcolour
self._balloonmsgfont = self._parent._balloonmsgfont
if self._toptitle != "":
self._balloontitlecolour = self._parent._balloontitlecolour
self._balloontitlefont = self._parent._balloontitlefont
panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
self.panel = panel
subsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
subsizer.Add((0,20), 0, wx.EXPAND)
if self._topicon is not None:
stb = wx.StaticBitmap(panel, -1, self._topicon)
hsizer.Add(stb, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10)
self._balloonbmp = stb
if self._toptitle != "":
stt = wx.StaticText(panel, -1, self._toptitle)
stt.SetFont(wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False))
if self._topicon is None:
hsizer.Add((10,0), 0, wx.EXPAND)
hsizer.Add(stt, 1, wx.EXPAND | wx.TOP, 10)
self._balloontitle = stt
self._balloontitle.SetForegroundColour(self._balloontitlecolour)
self._balloontitle.SetFont(self._balloontitlefont)
if self._tipstyle == BT_BUTTON:
self._closebutton = GenButton(panel, -1, "X", style=wx.NO_BORDER)
self._closebutton.SetMinSize((16,16))
self._closebutton.SetFont(wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False))
self._closebutton.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterButton)
self._closebutton.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveButton)
self._closebutton.SetUseFocusIndicator(False)
if self._toptitle != "":
hsizer.Add(self._closebutton, 0, wx.TOP | wx.RIGHT, 5)
else:
hsizer.Add((10,0), 1, wx.EXPAND)
hsizer.Add(self._closebutton, 0, wx.ALIGN_RIGHT | wx.TOP
| wx.RIGHT, 5)
if self._topicon is not None or self._toptitle != "" \
or self._tipstyle == BT_BUTTON:
subsizer.Add(hsizer, 0, wx.EXPAND | wx.BOTTOM, 5)
self._firstline = line = wx.StaticLine(panel, -1, style=wx.LI_HORIZONTAL)
if self._topicon is not None or self._toptitle != "" \
or self._tipstyle == BT_BUTTON:
subsizer.Add(self._firstline, 0, wx.EXPAND | wx.LEFT | wx.RIGHT
| wx.BOTTOM, 10)
else:
subsizer.Add(self._firstline, 0, wx.EXPAND | wx.LEFT | wx.RIGHT
| wx.BOTTOM | wx.TOP, 10)
mainstt = wx.StaticText(panel, -1, self._message)
self._balloonmsg = mainstt
self._balloonmsg.SetForegroundColour(self._balloonmsgcolour)
self._balloonmsg.SetFont(self._balloonmsgfont)
subsizer.Add(self._balloonmsg, 1, wx.EXPAND | wx.LEFT | wx.RIGHT |
wx.BOTTOM, 10)
self._secondline = wx.StaticLine(panel, -1, style=wx.LI_HORIZONTAL)
subsizer.Add(self._secondline, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
subsizer.Add((0,0),1)
panel.SetSizer(subsizer)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizerAndFit(sizer)
sizer.Layout()
if self._tipstyle == BT_CLICK:
if self._toptitle != "":
self._balloontitle.Bind(wx.EVT_LEFT_DOWN, self.OnClose)
if self._topicon is not None:
self._balloonbmp.Bind(wx.EVT_LEFT_DOWN, self.OnClose)
self._balloonmsg.Bind(wx.EVT_LEFT_DOWN, self.OnClose)
self.panel.Bind(wx.EVT_LEFT_DOWN, self.OnClose)
elif self._tipstyle == BT_BUTTON:
self._closebutton.Bind(wx.EVT_BUTTON, self.OnClose)
self.panel.SetBackgroundColour(self._ballooncolour)
if wx.Platform == "__WXGTK__":
self.Bind(wx.EVT_WINDOW_CREATE, self.SetBalloonShape)
else:
self.SetBalloonShape()
self.Show(True)
def SetBalloonShape(self, event=None):
"""
Sets the balloon shape.
:param `event`: on wxGTK, a :class:`wx.WindowCreateEvent` event to process.
"""
size = self.GetSize()
pos = self.GetPosition()
dc = wx.MemoryDC(wx.Bitmap(1,1))
textlabel = self._balloonmsg.GetLabel()
textfont = self._balloonmsg.GetFont()
textextent = dc.GetFullTextExtent(textlabel, textfont)
boxheight = size.y - textextent[1]*len(textlabel.split("\n"))
boxwidth = size.x
position = wx.GetMousePosition()
xpos = position[0]
ypos = position[1]
if xpos > 20 and ypos > 20:
# This Is NW Positioning
positioning = "NW"
xpos = position[0] - boxwidth + 20
ypos = position[1] - boxheight - 20
elif xpos <= 20 and ypos <= 20:
# This Is SE Positioning
positioning = "SE"
xpos = position[0] - 20
ypos = position[1]
elif xpos > 20 and ypos <= 20:
# This Is SW Positioning
positioning = "SW"
xpos = position[0] - boxwidth + 20
ypos = position[1]
else:
# This Is NE Positioning
positioning = "NE"
xpos = position[0]
ypos = position[1] - boxheight + 20
bmp = wx.Bitmap(size.x,size.y)
dc = wx.BufferedDC(None, bmp)
dc.SetBackground(wx.BLACK_BRUSH)
dc.Clear()
dc.SetPen(wx.Pen(wx.BLACK, 1, wx.PENSTYLE_TRANSPARENT))
if self._shape == BT_ROUNDED:
dc.DrawRoundedRectangle(0, 20, boxwidth, boxheight-20, 12)
elif self._shape == BT_RECTANGLE:
dc.DrawRectangle(0, 20, boxwidth, boxheight-20)
if positioning == "NW":
dc.DrawPolygon(((boxwidth-40, boxheight), (boxwidth-20, boxheight+20),
(boxwidth-20, boxheight)))
elif positioning == "SE":
dc.DrawPolygon(((20, 20), (20, 0), (40, 20)))
elif positioning == "SW":
dc.DrawPolygon(((boxwidth-40, 20), (boxwidth-20, 0), (boxwidth-20, 20)))
else:
dc.DrawPolygon(((20, boxheight), (20, boxheight+20), (40, boxheight)))
r = wx.Region(bmp, wx.BLACK)
self.hasShape = self.SetShape(r)
if self._tipstyle == BT_BUTTON:
colour = self.panel.GetBackgroundColour()
self._closebutton.SetBackgroundColour(colour)
self.SetPosition((xpos, ypos))
def OnEnterButton(self, event):
"""
Handles the ``wx.EVT_ENTER_WINDOW`` for the :class:`BalloonTip` button.
When the :class:`BalloonTip` is created with the `tipstyle` = ``BT_BUTTON``, this event
provide some kind of 3D effect when the mouse enters the button area.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
button = event.GetEventObject()
colour = button.GetBackgroundColour()
red = colour.Red()
green = colour.Green()
blue = colour.Blue()
if red < 30:
red = red + 30
if green < 30:
green = green + 30
if blue < 30:
blue = blue + 30
colour = wx.Colour(red-30, green-30, blue-30)
button.SetBackgroundColour(colour)
button.SetForegroundColour(wx.WHITE)
button.Refresh()
event.Skip()
def OnLeaveButton(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` for the :class:`BalloonTip` button.
When the :class:`BalloonTip` is created with the `tipstyle` = ``BT_BUTTON``, this event
provide some kind of 3D effect when the mouse enters the button area.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
button = event.GetEventObject()
colour = self.panel.GetBackgroundColour()
button.SetBackgroundColour(colour)
button.SetForegroundColour(wx.BLACK)
button.Refresh()
event.Skip()
def OnClose(self, event):
"""
Handles the ``wx.EVT_CLOSE`` event for :class:`BalloonTip`.
:param `event`: a :class:`CloseEvent` event to be processed.
"""
if isinstance(self._parent._widget, wx.adv.TaskBarIcon):
self._parent.taskbarcreation = 0
self._parent.taskbartime.Stop()
del self._parent.taskbartime
del self._parent.BalloonFrame
self.Destroy()
# ---------------------------------------------------------------
# Class BalloonTip
# ---------------------------------------------------------------
# This Is The Main BalloonTip Implementation
# ---------------------------------------------------------------
class BalloonTip(object):
"""
:class:`BalloonTip` is a class that allows you to display tooltips in a balloon style
window.
This is the main class implementation.
"""
def __init__(self, topicon=None, toptitle="",
message="", shape=BT_ROUNDED, tipstyle=BT_LEAVE):
"""
Default class constructor.
:param `topicon`: an icon that will be displayed on the top-left part of the
:class:`BalloonTip` frame. If set to ``None``, no icon will be displayed;
:type `topicon`: :class:`wx.Bitmap` or ``None``
:param string `toptitle`: a title that will be displayed on the top part of the
:class:`BalloonTip` frame. If set to an empty string, no title will be displayed;
:param string `message`: the tip message that will be displayed. It can not be set to
an empty string;
:param integer `shape`: the :class:`BalloonTip` shape. It can be one of the following:
======================= ========= ====================================
Shape Flag Hex Value Description
======================= ========= ====================================
``BT_ROUNDED`` 0x1 :class:`BalloonTip` will have a rounded rectangular shape.
``BT_RECTANGLE`` 0x2 :class:`BalloonTip` will have a rectangular shape.
======================= ========= ====================================
:param integer `tipstyle`: the :class:`BalloonTip` destruction behavior. It can be one of:
======================= ========= ====================================
Tip Flag Hex Value Description
======================= ========= ====================================
``BT_LEAVE`` 0x3 :class:`BalloonTip` will be destroyed when the user moves the mouse outside the target window.
``BT_CLICK`` 0x4 :class:`BalloonTip` will be destroyed when the user click on :class:`BalloonTip`.
``BT_BUTTON`` 0x5 :class:`BalloonTip` will be destroyed when the user click on the close button.
======================= ========= ====================================
:raise: `Exception` in the following cases:
- The `message` parameter is an empty string;
- The `shape` parameter has an invalid value (i.e., it's not one of ``BT_ROUNDED``, ``BT_RECTANGLE``);
- The `tipstyle` parameter has an invalid value (i.e., it's not one of ``BT_LEAVE``, ``BT_CLICK``, ``BT_BUTTON``).
"""
self._shape = shape
self._topicon = topicon
self._toptitle = toptitle
self._message = message
self._tipstyle = tipstyle
app = wx.GetApp()
self._runningapp = app
self._runningapp.__tooltipenabled__ = True
if self._message == "":
raise Exception("\nERROR: You Should At Least Set The Message For The BalloonTip")
if self._shape not in [BT_ROUNDED, BT_RECTANGLE]:
raise Exception('\nERROR: BalloonTip Shape Should Be One Of "BT_ROUNDED", "BT_RECTANGLE"')
if self._tipstyle not in [BT_LEAVE, BT_CLICK, BT_BUTTON]:
raise Exception('\nERROR: BalloonTip TipStyle Should Be One Of "BT_LEAVE", '\
'"BT_CLICK", "BT_BUTTON"')
self.SetStartDelay()
self.SetEndDelay()
self.SetBalloonColour()
if toptitle != "":
self.SetTitleFont()
self.SetTitleColour()
if topicon is not None:
self.SetBalloonIcon(topicon)
self.SetMessageFont()
self.SetMessageColour()
def SetTarget(self, widget):
"""
Sets the target control/window for the :class:`BalloonTip`.
:param `widget`: any subclass of :class:`wx.Window`.
"""
self._widget = widget
if isinstance(widget, wx.adv.TaskBarIcon):
self._widget.Bind(wx.adv.EVT_TASKBAR_MOVE, self.OnTaskBarMove)
self._widget.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
self.taskbarcreation = 0
else:
self._widget.Bind(wx.EVT_ENTER_WINDOW, self.OnWidgetEnter)
self._widget.Bind(wx.EVT_LEAVE_WINDOW, self.OnWidgetLeave)
self._widget.Bind(wx.EVT_MOTION, self.OnWidgetMotion)
self._widget.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
def GetTarget(self):
"""
Returns the target window for the :class:`BalloonTip`.
:return: An instance of :class:`wx.Window`.
:raise: `Exception` if the :meth:`~BalloonTip.SetTarget` method has not previously called.
"""
if not hasattr(self, "_widget"):
raise Exception("\nERROR: BalloonTip Target Has Not Been Set")
return self._widget
def SetStartDelay(self, delay=1):
"""
Sets the delay time after which the :class:`BalloonTip` is created.
:param integer `delay`: the number of milliseconds after which :class:`BalloonTip` is created.
:raise: `Exception` if `delay` is less than ``1`` milliseconds.
"""
if delay < 1:
raise Exception("\nERROR: Delay Time For BalloonTip Creation Should Be Greater Than 1 ms")
self._startdelaytime = float(delay)
def GetStartDelay(self):
"""
Returns the delay time after which the :class:`BalloonTip` is created.
:return: the delay time, in milliseconds.
"""
return self._startdelaytime
def SetEndDelay(self, delay=1e6):
"""
Sets the delay time after which the BalloonTip is destroyed.
:param integer `delay`: the number of milliseconds after which :class:`BalloonTip` is destroyed.
:raise: `Exception` if `delay` is less than ``1`` milliseconds.
"""
if delay < 1:
raise Exception("\nERROR: Delay Time For BalloonTip Destruction Should Be Greater Than 1 ms")
self._enddelaytime = float(delay)
def GetEndDelay(self):
"""
Returns the delay time after which the :class:`BalloonTip` is destroyed.
:return: the delay time, in milliseconds.
"""
return self._enddelaytime
def OnWidgetEnter(self, event):
"""
Handles the ``wx.EVT_ENTER_WINDOW`` for the target control/window and
starts the :class:`BalloonTip` timer for creation.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
if hasattr(self, "BalloonFrame"):
if self.BalloonFrame:
return
if not self._runningapp.__tooltipenabled__:
return
self.showtime = wx.Timer(self._widget)
self._widget.Bind(wx.EVT_TIMER, self.NotifyTimer, self.showtime)
self.showtime.Start(self._startdelaytime)
event.Skip()
def OnWidgetLeave(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` for the target control/window.
:param `event`: a :class:`MouseEvent` event to be processed.
:note: If the BalloonTip `tipstyle` is set to ``BT_LEAVE``, the :class:`BalloonTip` is destroyed.
"""
if hasattr(self, "showtime"):
if self.showtime:
self.showtime.Stop()
del self.showtime
if hasattr(self, "BalloonFrame"):
if self.BalloonFrame:
if self._tipstyle == BT_LEAVE:
endtime = time.time()
if endtime - self.starttime > 0.1:
try:
self.BalloonFrame.Destroy()
except:
pass
else:
event.Skip()
else:
event.Skip()
else:
event.Skip()
def OnTaskBarMove(self, event):
"""
Handles the mouse motion inside the taskbar icon area.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
if not hasattr(self, "BalloonFrame"):
if self.taskbarcreation == 0:
self.mousepos = wx.GetMousePosition()
self.currentmousepos = self.mousepos
self.taskbartime = wx.Timer(self._widget)
self._widget.Bind(wx.EVT_TIMER, self.TaskBarTimer, self.taskbartime)
self.taskbartime.Start(100)
self.showtime = wx.Timer(self._widget)
self._widget.Bind(wx.EVT_TIMER, self.NotifyTimer, self.showtime)
self.showtime.Start(self._startdelaytime)
if self.taskbarcreation == 0:
self.taskbarcreation = 1
return
event.Skip()
def OnWidgetMotion(self, event):
"""
Handle the mouse motion inside the target.
This prevents the annoying behavior of :class:`BalloonTip` to display when the
user does something else inside the window. The :class:`BalloonTip` window is
displayed only when the mouse does *not* move for the start delay time.
:param `event`: a :class:`MouseEvent` event to be processed.
"""
if hasattr(self, "BalloonFrame"):
if self.BalloonFrame:
return
if hasattr(self, "showtime"):
if self.showtime:
self.showtime.Start(self._startdelaytime)
event.Skip()
def NotifyTimer(self, event):
"""
The creation timer has expired. Creates the :class:`BalloonTip` frame.
:param `event`: a :class:`wx.TimerEvent` to be processed.
"""
self.BalloonFrame = BalloonFrame(self._widget, classparent=self)
self.BalloonFrame.Show(True)
self.starttime = time.time()
if hasattr(self, "showtime"):
self.showtime.Stop()
del self.showtime
self.destroytime = wx.Timer(self._widget)
self._widget.Bind(wx.EVT_TIMER, self.NotifyTimer, self.destroytime)
self.destroytime.Start(self._enddelaytime)
def TaskBarTimer(self, event):
"""
This timer check periodically the mouse position.
If the current mouse position is sufficiently far from the coordinates
it had when entered the taskbar icon and the :class:`BalloonTip` style is
``BT_LEAVE``, the :class:`BalloonTip` frame is destroyed.
:param `event`: a :class:`wx.TimerEvent` to be processed.
"""
self.currentmousepos = wx.GetMousePosition()
mousepos = self.mousepos
if abs(self.currentmousepos[0] - mousepos[0]) > 30 or \
abs(self.currentmousepos[1] - mousepos[1]) > 30:
if hasattr(self, "BalloonFrame"):
if self._tipstyle == BT_LEAVE:
try:
self.BalloonFrame.Destroy()
self.taskbartime.Stop()
del self.taskbartime
del self.BalloonFrame
self.taskbarcreation = 0
except:
pass
def DestroyTimer(self, event):
"""
The destruction timer has expired. Destroys the :class:`BalloonTip` frame.
:param `event`: a :class:`wx.TimerEvent` to be processed.
"""
self.destroytime.Stop()
del self.destroytime
try:
self.BalloonFrame.Destroy()
except:
pass
def SetBalloonShape(self, shape=BT_ROUNDED):
"""
Sets the :class:`BalloonTip` frame shape.
:param integer `shape`: should be one of ``BT_ROUNDED`` or ``BT_RECTANGLE``.
:raise: `Exception` if the `shape` parameter is not a valid value
(i.e., it's not one of ``BT_ROUNDED``, ``BT_RECTANGLE``);
"""
if shape not in [BT_ROUNDED, BT_RECTANGLE]:
raise Exception('\nERROR: BalloonTip Shape Should Be One Of "BT_ROUNDED", "BT_RECTANGLE"')
self._shape = shape
def GetBalloonShape(self):
"""
Returns the :class:`BalloonTip` frame shape.
:return: An integer, one of ``BT_ROUNDED``, ``BT_RECTANGLE``.
"""
return self._shape
def SetBalloonIcon(self, icon):
"""
Sets the :class:`BalloonTip` top-left icon.
:param `icon`: an instance of :class:`wx.Bitmap`.
:raise: `Exception` if the `icon` bitmap is not a valid :class:`wx.Bitmap`.
"""
if icon.IsOk():
self._topicon = icon
else:
raise Exception("\nERROR: Invalid Image Passed To BalloonTip")
def GetBalloonIcon(self):
"""
Returns the :class:`BalloonTip` top-left icon.
:return: An instance of :class:`wx.Bitmap`.
"""
return self._topicon
def SetBalloonTitle(self, title=""):
"""
Sets the :class:`BalloonTip` top title.
:param string `title`: a string to use as a :class:`BalloonTip` title.
"""
self._toptitle = title
def GetBalloonTitle(self):
"""
Returns the :class:`BalloonTip` top title.
:return: A string containing the top title.
"""
return self._toptitle
def SetBalloonMessage(self, message):
"""
Sets the :class:`BalloonTip` tip message.
:param string `message`: a string identifying the main message body of :class:`BalloonTip`.
:raise: `Exception` if the message is an empty string.
:note: The :class:`BalloonTip` message should never be empty.
"""
if len(message.strip()) < 1:
raise Exception("\nERROR: BalloonTip Message Can Not Be Empty")
self._message = message
def GetBalloonMessage(self):
"""
Returns the :class:`BalloonTip` tip message.
:return: A string containing the main message.
"""
return self._message
def SetBalloonTipStyle(self, tipstyle=BT_LEAVE):
"""
Sets the :class:`BalloonTip` `tipstyle` parameter.
:param integer `tipstyle`: one of the following bit set:
============== ========== =====================================
Tip Style Hex Value Description
============== ========== =====================================
``BT_LEAVE`` 0x3 :class:`BalloonTip` will be destroyed when the user moves the mouse outside the target window.
``BT_CLICK`` 0x4 :class:`BalloonTip` will be destroyed when the user click on :class:`BalloonTip`.
``BT_BUTTON`` 0x5 :class:`BalloonTip` will be destroyed when the user click on the close button.
============== ========== =====================================
:raise: `Exception` if the `tipstyle` parameter has an invalid value
(i.e., it's not one of ``BT_LEAVE``, ``BT_CLICK``, ``BT_BUTTON``).
"""
if tipstyle not in [BT_LEAVE, BT_CLICK, BT_BUTTON]:
raise Exception('\nERROR: BalloonTip TipStyle Should Be One Of "BT_LEAVE", '\
'"BT_CLICK", "BT_BUTTON"')
self._tipstyle = tipstyle
def GetBalloonTipStyle(self):
"""
Returns the :class:`BalloonTip` `tipstyle` parameter.
:return: An integer representing the style.
:see: :meth:`~BalloonTip.SetBalloonTipStyle`
"""
return self._tipstyle
def SetBalloonColour(self, colour=None):
"""
Sets the :class:`BalloonTip` background colour.
:param `colour`: a valid :class:`wx.Colour` instance.
"""
if colour is None:
colour = wx.Colour(255, 250, 205)
self._ballooncolour = colour
def GetBalloonColour(self):
"""
Returns the :class:`BalloonTip` background colour.
:return: An instance of :class:`wx.Colour`.
"""
return self._ballooncolour
def SetTitleFont(self, font=None):
"""
Sets the font for the top title.
:param `font`: a valid :class:`wx.Font` instance.
"""
if font is None:
font = wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False)
self._balloontitlefont = font
def GetTitleFont(self):
"""
Returns the font for the top title.
:return: An instance of :class:`wx.Font`.
"""
return self._balloontitlefont
def SetMessageFont(self, font=None):
"""
Sets the font for the tip message.
:param `font`: a valid :class:`wx.Font` instance.
"""
if font is None:
font = wx.Font(8, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False)
self._balloonmsgfont = font
def GetMessageFont(self):
"""
Returns the font for the tip message.
:return: An instance of :class:`wx.Font`.
"""
return self._balloonmsgfont
def SetTitleColour(self, colour=None):
"""
Sets the colour for the top title.
:param `colour`: a valid :class:`wx.Colour` instance.
"""
if colour is None:
colour = wx.BLACK
self._balloontitlecolour = colour
def GetTitleColour(self):
"""
Returns the colour for the top title.
:return: An instance of :class:`wx.Colour`.
"""
return self._balloontitlecolour
def SetMessageColour(self, colour=None):
"""
Sets the colour for the tip message.
:param `colour`: a valid :class:`wx.Colour` instance.
"""
if colour is None:
colour = wx.BLACK
self._balloonmsgcolour = colour
def GetMessageColour(self):
"""
Returns the colour for the tip message.
:return: An instance of :class:`wx.Colour`.
"""
return self._balloonmsgcolour
def OnDestroy(self, event):
"""
Handles the target destruction, specifically handling the ``wx.EVT_WINDOW_DESTROY``
event.
:param `event`: a :class:`wx.WindowDestroyEvent` event to be processed.
"""
if hasattr(self, "BalloonFrame"):
if self.BalloonFrame:
try:
if isinstance(self._widget, wx.adv.TaskBarIcon):
self._widget.Unbind(wx.adv.EVT_TASKBAR_MOVE)
self.taskbartime.Stop()
del self.taskbartime
else:
self._widget.Unbind(wx.EVT_MOTION)
self._widget.Unbind(wx.EVT_LEAVE_WINDOW)
self._widget.Unbind(wx.EVT_ENTER_WINDOW)
self.BalloonFrame.Destroy()
except:
pass
del self.BalloonFrame
def EnableTip(self, enable=True):
"""
Enable/disable globally the :class:`BalloonTip`.
:param bool `enable`: ``True`` to enable :class:`BalloonTip`, ``False`` otherwise.
"""
self._runningapp.__tooltipenabled__ = enable
if __name__ == '__main__':
import wx
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "BalloonTip Demo")
panel = wx.Panel(self)
# Let's suppose that in your application you have a wx.TextCtrl defined as:
mytextctrl = wx.TextCtrl(panel, -1, "I am a textctrl", pos=(100, 100))
# You can define your BalloonTip as follows:
tipballoon = BalloonTip(topicon=None, toptitle="textctrl",
message="this is a textctrl",
shape=BT_ROUNDED,
tipstyle=BT_LEAVE)
# Set the BalloonTip target
tipballoon.SetTarget(mytextctrl)
# Set the BalloonTip background colour
tipballoon.SetBalloonColour(wx.WHITE)
# Set the font for the balloon title
tipballoon.SetTitleFont(wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False))
# Set the colour for the balloon title
tipballoon.SetTitleColour(wx.BLACK)
# Leave the message font as default
tipballoon.SetMessageFont()
# Set the message (tip) foreground colour
tipballoon.SetMessageColour(wx.LIGHT_GREY)
# Set the start delay for the BalloonTip
tipballoon.SetStartDelay(1000)
# Set the time after which the BalloonTip is destroyed
tipballoon.SetEndDelay(3000)
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
|
py | b4155e3d3dc855980ce66bda60ad419984e970b3 | from __future__ import annotations
import ipaddress
import json
import logging
import struct
import sys
import time
import tkinter
import zlib
from dataclasses import astuple
from pathlib import Path
from tkinter import messagebox, ttk
from typing import Optional, Tuple
import dns
import dns.resolver
from idlelib.tooltip import Hovertip
from twisted.internet import reactor, task, tksupport
from modules.Client import ClientInstance
from modules.Common import (CarInfo, Credidentials, DataQueue, NetData,
NetworkQueue, PitStop)
from modules.DriverInputs import DriverInputs
from modules.Server import ServerInstance
from modules.Strategy import StrategyUI
from modules.Telemetry import Telemetry, TelemetryRT, TelemetryUI
from modules.TyreGraph import PrevLapsGraph, TyreGraph
from modules.TyreSets import TyreSets, TyresSetData
from modules.Users import UserUI
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format="%(asctime)s.%(msecs)03d | %(name)s | %(message)s",
datefmt="%H:%M:%S")
_VERSION_ = "1.5.8d"
class ConnectionPage(ttk.Frame):
def __init__(self, app: App, root):
ttk.Frame.__init__(self, master=root)
self.main_app = app
self.connection_path = "./Config/connection.json"
self.is_connected = None
self.connection_msg = ""
self.credis = None
self.is_connected_loop = task.LoopingCall(self.check_connection)
self.credidentials = None
key_check = ("saved_ip", "tcp_port", "udp_port", "username",
"driverID")
logging.info(f"Loading {self.connection_path}")
if Path(self.connection_path).is_file():
fp = open(self.connection_path, "r")
try:
self.credidentials = json.load(fp)
if (type(self.credidentials) is not dict or
tuple(self.credidentials.keys()) != key_check):
logging.info(f"Invalid connection.json file")
self.credidentials = None
except json.JSONDecodeError as msg:
self.credidentials = None
logging.info(f"JSON Error: {msg}")
fp.close()
else:
logging.info(f"{self.connection_path} not found")
self.credidentials = None
self.as_server = False
self.f_connection_info = tkinter.Frame(
self, bd=2, relief=tkinter.RIDGE)
self.f_connection_info.grid()
self.l_ip = tkinter.Label(self.f_connection_info, text="Address",
anchor=tkinter.E, width=10)
self.l_ip.grid(row=0, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "Address of the server host ip or domain", 10)
self.l_tcp_port = tkinter.Label(self.f_connection_info,
text="TCP port", anchor=tkinter.E,
width=10)
self.l_tcp_port.grid(row=1, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "TCP port of the host server (1024 - 10 000),"
" can be the same UDP", 10)
self.l_udp_port = tkinter.Label(self.f_connection_info,
text="UDP port", anchor=tkinter.E,
width=10)
self.l_udp_port.grid(row=2, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "UDP port of the host server (1024 - 10 000),"
" can be the same as TCP", 10)
self.l_username = tkinter.Label(self.f_connection_info,
text="Username",
anchor=tkinter.E, width=10)
self.l_username.grid(row=3, column=0, padx=5, pady=2)
Hovertip(self.l_username, "Your name in ACC", 10)
self.l_driverID = tkinter.Label(self.f_connection_info,
text="Driver ID",
anchor=tkinter.E, width=10)
self.l_driverID.grid(row=4, column=0, padx=5, pady=2)
Hovertip(self.l_driverID, "Driver ID for driver swap "
"(Driver 1, 2, 3, 4, etc), not your SteamID", 10)
if self.credidentials is None:
self.cb_ip = ttk.Combobox(self.f_connection_info, width=30,
values=[])
else:
self.cb_ip = ttk.Combobox(self.f_connection_info, width=30,
values=self.credidentials["saved_ip"])
self.cb_ip.grid(row=0, column=1, padx=5, pady=2)
self.e_tcp_port = tkinter.Entry(self.f_connection_info, width=30)
self.e_tcp_port.grid(row=1, column=1, padx=5, pady=2)
self.e_udp_port = tkinter.Entry(self.f_connection_info, width=30)
self.e_udp_port.grid(row=2, column=1, padx=5, pady=2)
self.e_username = tkinter.Entry(self.f_connection_info, width=30)
self.e_username.grid(row=3, column=1, padx=5, pady=2)
Hovertip(self.e_username, "Your name in ACC", 10)
self.e_driverID = tkinter.Entry(self.f_connection_info, width=30)
self.e_driverID.grid(row=4, column=1, padx=5, pady=2)
Hovertip(self.e_driverID, "Driver ID for driver swap "
"(Driver 1, 2, 3, 4, etc), not your SteamID", 10)
self.b_connect = tkinter.Button(self, text="Connect",
command=self.connect)
self.b_connect.grid(row=1, padx=10, pady=5)
if self.credidentials is not None:
self.e_tcp_port.insert(tkinter.END, self.credidentials["tcp_port"])
self.e_udp_port.insert(tkinter.END, self.credidentials["udp_port"])
self.e_username.insert(tkinter.END, self.credidentials["username"])
self.e_driverID.insert(tkinter.END, self.credidentials["driverID"])
else:
self.e_tcp_port.insert(tkinter.END, "4269")
self.e_udp_port.insert(tkinter.END, "4270")
logging.info("Displaying connection window")
def set_as_server(self) -> None:
self.cb_ip.set("127.0.0.1")
self.cb_ip["state"] = "disabled"
self.as_server = True
def set_as_client(self) -> None:
self.cb_ip.set("")
self.cb_ip["state"] = "normal"
self.as_server = False
def connect(self) -> None:
logging.info("Connect button pressed")
self.b_connect.config(state="disabled")
error_message = ""
ip = None
try:
ip = ipaddress.ip_address(self.cb_ip.get()).compressed
except ValueError:
logging.info("Querrying dns server...")
try:
results = dns.resolver.resolve(self.cb_ip.get())
for result in results:
logging.info(f"Found ip: {result.address}")
logging.info(f"Picking first dns answer: {results[0].address}")
ip = results[0].address
except dns.resolver.NXDOMAIN:
error_message += "Invalide IP address or Domain name\n"
except dns.resolver.NoAnswer:
error_message += ("DNS didn't replied to the request"
f" for {self.cb_ip.get()}")
except dns.resolver.NoNameservers:
error_message += "No DNS server available"
except dns.resolver.YXDOMAIN:
error_message += ("The query name is too long after "
"DNAME substitution")
if self.e_tcp_port.get().isnumeric():
self.e_tcp_port.config(background="White")
else:
self.e_tcp_port.config(background="Red")
error_message += "Invalide TCP port\n"
if self.e_udp_port.get().isnumeric():
self.e_udp_port.config(background="White")
else:
self.e_udp_port.config(background="Red")
error_message += "Invalide UDP port\n"
if self.e_username.get() != "":
self.e_username.config(background="White")
else:
self.e_username.config(background="Red")
error_message += "Invalide username\n"
driverID = self.e_driverID.get()
if driverID != "" and driverID.isnumeric() and 0 < int(driverID) <= 5:
self.e_driverID.config(background="White")
else:
self.e_driverID.config(background="Red")
if (driverID.isnumeric() and 1 > int(driverID) > 5):
error_message += ("Are you sure you are the driver N° "
f"{driverID} in your team ?")
else:
error_message += "Invalide driver ID\n"
if error_message == "":
logging.info("No error in the credidentials")
self.credits = Credidentials(
ip=ip,
tcp_port=int(self.e_tcp_port.get()),
udp_port=int(self.e_udp_port.get()),
username=self.e_username.get(),
driverID=int(self.e_driverID.get())
)
if self.as_server:
self.main_app.as_server(self.credits)
else:
self.main_app.connect_to_server(self.credits)
self.is_connected_loop.start(0.1)
logging.info("Waiting for connection confirmation")
else:
logging.info(f"Error: {error_message}")
messagebox.showerror("Error", error_message)
self.b_connect.config(state="normal")
def check_connection(self) -> None:
if self.is_connected is None:
return
if self.is_connected:
logging.info("Connected")
self.save_credidentials(self.credits)
else:
logging.info("Connection failed")
messagebox.showerror("Error", self.connection_msg)
self.b_connect.config(state="normal")
self.is_connected = None
self.is_connected_loop.stop()
def connected(self, succes: bool, error: str) -> None:
self.is_connected = succes
self.connection_msg = error
def save_credidentials(self, credits: Credidentials) -> None:
logging.info("Saving credidentials")
if self.credidentials is None:
saved_ip = [self.cb_ip.get()]
elif credits.ip not in self.credidentials["saved_ip"]:
saved_ip = [self.cb_ip.get(), *self.credidentials["saved_ip"]]
if len(saved_ip) > 5:
self.credidentials["saved_ip"].pop()
else:
saved_ip = self.credidentials["saved_ip"]
with open(self.connection_path, "w") as fp:
connection = {
"saved_ip": saved_ip,
"tcp_port": credits.tcp_port,
"udp_port": credits.udp_port,
"username": credits.username,
"driverID": credits.driverID,
}
json.dump(connection, fp, indent=4)
class App(tkinter.Tk):
def __init__(self) -> None:
tkinter.Tk.__init__(self)
tksupport.install(self)
self.geometry("830x580+0+0")
try:
with open("./Config/gui.json", "r") as fp:
self.gui_config = json.load(fp)
except FileNotFoundError:
print("APP: './Config/gui.json' not found.")
return
self.font = (self.gui_config["font"], self.gui_config["font_size"])
app_style = ttk.Style(self)
app_style.configure('.',
font=self.font,
background=self.gui_config["background_colour"],
foreground=self.gui_config["foreground_colour"])
app_style.configure('TNotebook.Tab', foreground="#000000")
app_style.configure('TButton', foreground="#000000")
app_style.configure('TCombobox', foreground="#000000")
app_style.configure("ActiveDriver.TLabel",
background=self.gui_config["active_driver_colour"])
app_style.configure("Users.TFrame", background="#000000")
app_style.configure("TelemetryGrid.TFrame", background="#000000")
app_style.configure("PressureInfo.TFrame", background="#000000")
app_style.configure("TEntry", foreground="#000000")
self.title(f"PyAccEngineer {_VERSION_}")
self.config(bg="Grey")
self.protocol("WM_DELETE_WINDOW", self.on_close)
# Networking
self.is_connected = False
self.client: Optional[ClientInstance] = None
self.server: Optional[ServerInstance] = None
self.net_queue = DataQueue([], [])
self.menu_bar = tkinter.Menu(self)
self.menu_bar.add_command(label="Connect",
command=self.show_connection_page,
font=self.font)
self.menu_bar.add_command(label="As Server",
command=lambda: self.show_connection_page(
True), font=self.font)
self.menu_bar.add_command(label="Disconnect",
command=self.disconnect, state="disabled",
font=self.font)
self.config(menu=self.menu_bar)
self.main_canvas = tkinter.Canvas(self)
self.main_frame = ttk.Frame(self)
self.hsb = ttk.Scrollbar(self)
self.vsb = ttk.Scrollbar(self)
self.main_canvas.config(xscrollcommand=self.hsb.set,
yscrollcommand=self.vsb.set,
highlightthickness=0)
self.hsb.config(orient=tkinter.HORIZONTAL,
command=self.main_canvas.xview)
self.vsb.config(orient=tkinter.VERTICAL,
command=self.main_canvas.yview)
self.hsb.pack(fill=tkinter.X, side=tkinter.BOTTOM,
expand=tkinter.FALSE)
self.vsb.pack(fill=tkinter.Y, side=tkinter.RIGHT,
expand=tkinter.FALSE)
self.main_canvas.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.main_canvas.create_window(0, 0, window=self.main_frame,
anchor=tkinter.NW)
self.user_ui = UserUI(self.main_frame)
self.user_ui.grid(row=1, column=0)
self.tab_control = ttk.Notebook(self.main_frame)
self.tab_control.grid(row=0, column=0, pady=3)
self.f_connection_ui = ttk.Frame(self.tab_control)
self.f_connection_ui.pack(fill=tkinter.BOTH, expand=1)
self.connection_page = ConnectionPage(self, self.f_connection_ui)
self.connection_page.place(anchor=tkinter.CENTER,
in_=self.f_connection_ui,
relx=.5, rely=.5)
# Center StrategyUI in the notebook frame
f_strategy_ui = ttk.Frame(self.tab_control)
f_strategy_ui.pack(fill=tkinter.BOTH, expand=1)
self.strategy_ui = StrategyUI(f_strategy_ui, self.gui_config)
self.strategy_ui.place(anchor=tkinter.CENTER, in_=f_strategy_ui,
relx=.5, rely=.5)
self.telemetry_ui = TelemetryUI(self.tab_control)
self.telemetry_ui.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.driver_inputs = DriverInputs(self.tab_control)
self.driver_inputs.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.tyre_graph = TyreGraph(self.tab_control, self.gui_config)
self.tyre_graph.pack(fill=tkinter.BOTH, expand=1)
self.prev_lap_graph = PrevLapsGraph(self.tab_control, self.gui_config)
self.prev_lap_graph.pack(fill=tkinter.BOTH, expand=1)
self.tyre_sets = TyreSets(self.tab_control, self.gui_config)
self.tyre_sets.pack(fill=tkinter.BOTH, expand=1)
self.tab_control.add(self.f_connection_ui, text="Connection")
self.tab_control.add(f_strategy_ui, text="Strategy")
self.tab_control.add(self.telemetry_ui, text="Telemetry")
self.tab_control.add(self.driver_inputs, text="Driver Inputs")
self.tab_control.add(self.tyre_graph, text="Pressures")
self.tab_control.add(self.prev_lap_graph, text="Previous Laps")
self.tab_control.add(self.tyre_sets, text="Tyre sets")
self.tab_control.hide(0)
self.last_time = time.time()
self.rt_last_time = time.time()
self.rt_min_delta = self.gui_config["driver_input_speed"]
self.min_delta = 0.5
self.last_telemetry = time.time()
self.telemetry_timeout = 2
logging.info("Main UI created.")
self.client_loopCall = task.LoopingCall(self.client_loop)
self.client_loopCall.start(0.01)
self.eval('tk::PlaceWindow . center')
self.updateScrollRegion()
def updateScrollRegion(self):
self.main_canvas.update_idletasks()
self.main_canvas.config(scrollregion=self.main_frame.bbox())
def client_loop(self) -> None:
selected_tab_name = self.tab_control.tab(self.tab_control.select(),
"text")
if selected_tab_name == "Driver Inputs":
if not self.driver_inputs.is_animating:
self.driver_inputs.start_animation()
else:
if self.driver_inputs.is_animating:
self.driver_inputs.stop_animation()
if selected_tab_name == "Pressures":
if not self.tyre_graph.is_animating:
self.tyre_graph.start_animation()
else:
if self.tyre_graph.is_animating:
self.tyre_graph.stop_animation()
for element in self.net_queue.q_out:
if element.data_type == NetworkQueue.ConnectionReply:
logging.info("Received Connection reply for server")
succes = bool(element.data[0])
msg_lenght = element.data[1]
msg = element.data[2:2 + msg_lenght]
self.connection_page.connected(succes, msg)
self.mb_connected(succes)
self.is_connected = succes
if not succes:
self.client.close()
elif element.data_type == NetworkQueue.ServerData:
server_data = CarInfo.from_bytes(element.data)
is_first_update = self.strategy_ui.server_data is None
self.strategy_ui.server_data = server_data
if is_first_update:
self.strategy_ui.update_values()
elif element.data_type == NetworkQueue.Strategy:
logging.info("Received: Strategy")
self.strategy_ui.b_set_strat.config(state="disabled")
asm_data = self.strategy_ui.asm.read_shared_memory()
pit_stop = PitStop.from_bytes(element.data)
self.strategy_ui.save_strategy(pit_stop)
if asm_data is not None:
self.strategy_ui.apply_strategy(pit_stop)
elif element.data_type == NetworkQueue.StategyHistory:
self.strategy_ui.clear_strategy_history()
strategy_count = element.data[0]
byte_index = 1
for _ in range(strategy_count):
strat = PitStop.from_bytes(element.data[byte_index:])
self.strategy_ui.save_strategy(strat)
byte_index += PitStop.byte_size
elif element.data_type == NetworkQueue.StrategyDone:
logging.info("Received: Strategy Done")
self.strategy_ui.b_set_strat.config(state="normal")
self.strategy_ui.update_values()
elif element.data_type == NetworkQueue.Telemetry:
telemetry, err = Telemetry.from_bytes(element.data)
if (telemetry is None):
messagebox.showerror("Unexpected error", err)
self.on_close()
return
self.telemetry_ui.update_values(telemetry)
self.tyre_graph.update_data(telemetry)
self.strategy_ui.updade_telemetry_data(telemetry)
self.driver_inputs.update_lap(telemetry.lap)
if not self.strategy_ui.is_driver_active:
self.strategy_ui.is_driver_active = True
self.user_ui.set_active(telemetry.driver)
self.last_telemetry = time.time()
elif element.data_type == NetworkQueue.TelemetryRT:
telemetry = TelemetryRT.from_bytes(element.data)
self.driver_inputs.update_values(telemetry)
elif element.data_type == NetworkQueue.UpdateUsers:
logging.info("Received user update")
user_update = element.data
nb_users = user_update[0]
self.user_ui.reset()
self.strategy_ui.reset_drivers()
index = 1
for _ in range(nb_users):
lenght = user_update[index]
index += 1
name = user_update[index:index+lenght].decode("utf-8")
index += lenght
driverID = user_update[index]
index += 1
self.user_ui.add_user(name, driverID)
self.strategy_ui.add_driver(name, driverID)
elif element.data_type == NetworkQueue.TyreSets:
data = zlib.decompress(element.data)
tyres_data = []
nb_of_set = data[0]
byte_index = 1
for _ in range(nb_of_set):
tyre_info = TyresSetData.from_bytes(
data[byte_index:byte_index+TyresSetData.byte_size])
tyres_data.append(tyre_info)
byte_index += TyresSetData.byte_size
self.tyre_sets.update_tyre_set_data(tyres_data)
self.net_queue.q_out.clear()
if not self.is_connected:
return
if not self.strategy_ui.is_connected:
self.strategy_ui.is_connected = True
if self.telemetry_ui.driver_swap or self.user_ui.active_user is None:
if self.telemetry_ui.current_driver is not None:
self.user_ui.set_active(self.telemetry_ui.current_driver)
self.telemetry_ui.driver_swap = False
self.strategy_ui.set_driver(self.telemetry_ui.current_driver)
rt_delta_time = time.time() - self.rt_last_time
delta_time = time.time() - self.last_time
if (self.strategy_ui.is_driver_active and
time.time() > self.last_telemetry + self.telemetry_timeout):
logging.info("Telemetry timeout, not received "
f"telemetry for {self.telemetry_timeout}s")
self.strategy_ui.is_driver_active = False
self.user_ui.remove_active()
self.telemetry_ui.current_driver = None
asm_data = self.strategy_ui.asm.read_shared_memory()
if asm_data is not None:
if self.rt_min_delta < rt_delta_time:
self.rt_last_time = time.time()
telemetry_rt = TelemetryRT(
asm_data.Physics.gas,
asm_data.Physics.brake,
asm_data.Physics.steer_angle,
asm_data.Physics.gear,
asm_data.Physics.speed_kmh
)
self.net_queue.q_in.append(NetData(NetworkQueue.TelemetryRT,
telemetry_rt.to_bytes()))
if self.min_delta < delta_time:
self.last_time = time.time()
infos = CarInfo(
*astuple(asm_data.Graphics.mfd_tyre_pressure),
asm_data.Graphics.mfd_fuel_to_add,
asm_data.Static.max_fuel,
asm_data.Graphics.mfd_tyre_set)
self.net_queue.q_in.append(NetData(NetworkQueue.CarInfoData,
infos.to_bytes()))
# Telemetry
name = asm_data.Static.player_name.split("\x00")[0]
surname = asm_data.Static.player_surname.split("\x00")[0]
driver = f"{name} {surname}"
telemetry_data = Telemetry(
driver,
asm_data.Graphics.completed_lap,
asm_data.Physics.fuel,
asm_data.Graphics.fuel_per_lap,
asm_data.Graphics.fuel_estimated_laps,
asm_data.Physics.pad_life,
asm_data.Physics.disc_life,
asm_data.Graphics.current_time,
asm_data.Graphics.best_time,
asm_data.Graphics.last_time,
asm_data.Graphics.is_in_pit,
asm_data.Graphics.is_in_pit_lane,
asm_data.Graphics.session_type,
asm_data.Graphics.driver_stint_time_left,
asm_data.Physics.wheel_pressure,
asm_data.Physics.tyre_core_temp,
asm_data.Physics.brake_temp,
asm_data.Graphics.rain_tyres,
asm_data.Graphics.session_time_left,
asm_data.Graphics.track_grip_status,
asm_data.Physics.front_brake_compound,
asm_data.Physics.rear_brake_compound,
asm_data.Physics.car_damage,
asm_data.Graphics.rain_intensity,
asm_data.Physics.suspension_damage,
asm_data.Graphics.current_sector_index,
asm_data.Graphics.last_sector_time,
asm_data.Graphics.is_valid_lap,
asm_data.Physics.air_temp,
asm_data.Physics.road_temp,
asm_data.Graphics.wind_speed,
asm_data.Graphics.driver_stint_total_time_left,
asm_data.Graphics.current_tyre_set,
)
self.net_queue.q_in.append(NetData(NetworkQueue.Telemetry,
telemetry_data.to_bytes()))
if self.strategy_ui.strategy is not None:
logging.info("Sending strategy")
strategy = self.strategy_ui.strategy
self.strategy_ui.strategy = None
self.net_queue.q_in.append(NetData(NetworkQueue.StrategySet,
strategy.to_bytes()))
if self.strategy_ui.strategy_ok:
logging.info("Send strategy Done")
self.net_queue.q_in.append(NetData(NetworkQueue.StrategyDone))
self.strategy_ui.strategy_ok = False
if self.tyre_sets.updated:
data = b""
data += struct.pack("!B", len(self.tyre_sets.tyres_data))
for tyre_set in self.tyre_sets.tyres_data:
data += tyre_set.to_bytes()
data_compressed = zlib.compress(data)
print(f"{len(data)} vs {len(data_compressed)}")
self.net_queue.q_in.append(NetData(NetworkQueue.TyreSets,
data_compressed))
self.tyre_sets.updated = False
logging.info("Sending tyre set data")
def show_connection_page(self, as_server: bool = False) -> None:
logging.info("Show connection page")
self.tab_control.add(self.f_connection_ui, text="Connection")
self.tab_control.select(0)
if as_server:
self.connection_page.set_as_server()
else:
self.connection_page.set_as_client()
def connect_to_server(self, credits: Credidentials) -> None:
logging.info("Creating a ClientInstance connecting"
f" to {credits.ip}:{credits.tcp_port}")
self.client = ClientInstance(credits, self.net_queue)
def as_server(self, credis: Credidentials) -> Tuple[bool, str]:
logging.info("Creating a ServerInstance")
self.server = ServerInstance(credis.tcp_port, credis.udp_port)
self.connect_to_server(credis)
def mb_connected(self, state: bool) -> None:
if state:
self.menu_bar.entryconfig("Disconnect", state="active")
self.menu_bar.entryconfig("Connect", state="disabled")
self.menu_bar.entryconfig("As Server", state="disabled")
self.tab_control.hide(0)
else:
self.menu_bar.entryconfig("Disconnect", state="disabled")
self.menu_bar.entryconfig("Connect", state="active")
self.menu_bar.entryconfig("As Server", state="active")
def disconnect(self) -> None:
logging.info("Disconnecting")
self.stop_networking()
self.mb_connected(False)
self.strategy_ui.reset()
self.user_ui.reset()
self.tyre_graph.reset()
def stop_networking(self) -> None:
if self.is_connected:
self.client.close()
self.is_connected = False
logging.info("Client stopped.")
if self.server is not None:
self.server.close()
self.server = None
logging.info("Server stopped.")
def on_close(self) -> None:
logging.info("Closing the app")
self.strategy_ui.close()
self.tyre_graph.close()
self.prev_lap_graph.close()
self.tyre_sets.close()
self.disconnect()
self.client_loopCall.stop()
tksupport.uninstall()
reactor.stop()
self.destroy()
logging.info("App closed")
def create_gui() -> None:
App()
def main():
reactor.callLater(0, create_gui)
reactor.run()
if __name__ == "__main__":
main()
|
py | b4155e41ac5bb823ed3cf7e1907435e327ddc46c | from typing import Any, Dict, List, Type, TypeVar
import attr
from ..models.custom_field_value_payload_request_body import (
CustomFieldValuePayloadRequestBody,
)
T = TypeVar("T", bound="CustomFieldEntryPayloadRequestBody")
@attr.s(auto_attribs=True)
class CustomFieldEntryPayloadRequestBody:
"""
Example:
{'custom_field_id': '01FCNDV6P870EA6S7TK1DSYDG0', 'values': [{'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_link':
'https://google.com/', 'value_numeric': '123.456', 'value_option_id': '01FCNDV6P870EA6S7TK1DSYDG0',
'value_text': 'This is my text field, I hope you like it'}, {'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_link':
'https://google.com/', 'value_numeric': '123.456', 'value_option_id': '01FCNDV6P870EA6S7TK1DSYDG0',
'value_text': 'This is my text field, I hope you like it'}]}
Attributes:
custom_field_id (str): ID of the custom field this entry is linked against Example: 01FCNDV6P870EA6S7TK1DSYDG0.
values (List[CustomFieldValuePayloadRequestBody]): List of values to associate with this entry Example: [{'id':
'01FCNDV6P870EA6S7TK1DSYDG0', 'value_link': 'https://google.com/', 'value_numeric': '123.456',
'value_option_id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_text': 'This is my text field, I hope you like it'},
{'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_link': 'https://google.com/', 'value_numeric': '123.456',
'value_option_id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_text': 'This is my text field, I hope you like it'},
{'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_link': 'https://google.com/', 'value_numeric': '123.456',
'value_option_id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_text': 'This is my text field, I hope you like it'},
{'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_link': 'https://google.com/', 'value_numeric': '123.456',
'value_option_id': '01FCNDV6P870EA6S7TK1DSYDG0', 'value_text': 'This is my text field, I hope you like it'}].
"""
custom_field_id: str
values: List[CustomFieldValuePayloadRequestBody]
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
custom_field_id = self.custom_field_id
values = []
for values_item_data in self.values:
values_item = values_item_data.to_dict()
values.append(values_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"custom_field_id": custom_field_id,
"values": values,
}
)
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
custom_field_id = d.pop("custom_field_id")
values = []
_values = d.pop("values")
for values_item_data in _values:
values_item = CustomFieldValuePayloadRequestBody.from_dict(values_item_data)
values.append(values_item)
custom_field_entry_payload_request_body = cls(
custom_field_id=custom_field_id,
values=values,
)
custom_field_entry_payload_request_body.additional_properties = d
return custom_field_entry_payload_request_body
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
py | b4155ed705bbe584337b0707f7680f28afc7eb82 | import os
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
class AutoRec(pl.LightningModule):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(nn.Linear(28 * 28, 128), nn.ReLU(), nn.Linear(128, 3))
self.decoder = nn.Sequential(nn.Linear(3, 128), nn.ReLU(), nn.Linear(128, 28 * 28))
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
# training_step defines the train loop. It is independent of forward
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log("train_loss", loss)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer |
py | b4155f41ea2967a5f01e70e0209df3ea4d647ddd | import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
import launch
################### user configure parameters for ros2 start ###################
xfer_format = 0 # 0-Pointcloud2(PointXYZRTL), 1-customized pointcloud format
multi_topic = 0 # 0-All LiDARs share the same topic, 1-One LiDAR one topic
data_src = 1 # 0-lidar,1-hub
publish_freq = 10.0 # freqency of publish,1.0,2.0,5.0,10.0,etc
output_type = 0
frame_id = 'livox_frame'
lvx_file_path = '/home/livox/livox_test.lvx'
cmdline_bd_code = 'livox0000000001'
cur_path = os.path.split(os.path.realpath(__file__))[0] + '/'
cur_config_path = cur_path + '../config'
rviz_config_path = os.path.join(cur_config_path, 'livox_hub.rviz')
user_config_path = os.path.join(cur_config_path, 'livox_hub_config.json')
################### user configure parameters for ros2 end #####################
livox_ros2_params = [
{"xfer_format": xfer_format},
{"multi_topic": multi_topic},
{"data_src": data_src},
{"publish_freq": publish_freq},
{"output_data_type": output_type},
{"frame_id": frame_id},
{"lvx_file_path": lvx_file_path},
{"user_config_path": user_config_path},
{"cmdline_input_bd_code": cmdline_bd_code}
]
def generate_launch_description():
livox_driver = Node(
package='livox_ros2_driver',
executable='livox_ros2_driver_node',
name='livox_lidar_publisher',
output='screen',
parameters=livox_ros2_params
)
return LaunchDescription([
livox_driver
])
|
py | b41560ce59cd9f9736cc36c29509958d14f2c11e | import torch
def linear(a, b, x, min_x, max_x):
"""
b ___________
/|
/ |
a _______/ |
| |
min_x max_x
"""
return a + min(max((x - min_x) / (max_x - min_x), 0), 1) * (b - a)
def batchify(data, device):
return (d.unsqueeze(0).to(device) for d in data)
def _make_seq_first(*args):
# N, G, S, ... -> S, G, N, ...
if len(args) == 1:
arg, = args
return arg.permute(2, 1, 0, *range(3, arg.dim())) if arg is not None else None
return (*(arg.permute(2, 1, 0, *range(3, arg.dim())) if arg is not None else None for arg in args),)
def _make_batch_first(*args):
# S, G, N, ... -> N, G, S, ...
if len(args) == 1:
arg, = args
return arg.permute(2, 1, 0, *range(3, arg.dim())) if arg is not None else None
return (*(arg.permute(2, 1, 0, *range(3, arg.dim())) if arg is not None else None for arg in args),)
def _pack_group_batch(*args):
# S, G, N, ... -> S, G * N, ...
if len(args) == 1:
arg, = args
return arg.reshape(arg.size(0), arg.size(1) * arg.size(2), *arg.shape[3:]) if arg is not None else None
return (*(arg.reshape(arg.size(0), arg.size(1) * arg.size(2), *arg.shape[3:]) if arg is not None else None for arg in args),)
def _unpack_group_batch(N, *args):
# S, G * N, ... -> S, G, N, ...
if len(args) == 1:
arg, = args
return arg.reshape(arg.size(0), -1, N, *arg.shape[2:]) if arg is not None else None
return (*(arg.reshape(arg.size(0), -1, N, *arg.shape[2:]) if arg is not None else None for arg in args),)
|
py | b415618120c06da408cd656b761d89333b901598 | import torch
class HammingLoss(object):
def __init__(self):
self._num_samples = 0
self._num_labels = 0
self._wrong_pred = 0
def update(self, predicted, target):
if not self._num_labels:
self._num_labels = target.size(1)
assert(target.size(1) == predicted.size(1) == self._num_labels)
assert(target.size(0) == predicted.size(0))
self._num_samples += target.size(0)
cur_wrong_pred = (target.byte() ^ predicted.byte()).sum().item()
self._wrong_pred += cur_wrong_pred
return cur_wrong_pred/(self._num_labels * target.size(0)) # loss for current batch
@property
def loss(self):
return (self._wrong_pred/(self._num_labels*self._num_samples))
@loss.setter
def loss(self, val):
raise NotImplementedError('Modifying hamming loss value is not supported.')
@property
def inverseloss(self):
return (1 - (self._wrong_pred/(self._num_labels*self._num_samples)))
@inverseloss.setter
def inverseloss(self, val):
raise NotImplementedError('Modifying inverse hamming loss value is not supported.')
|
py | b4156341ee5cf00c05fe067f9a578eaf74e4f81e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = "0.1.54"
__author__ = "Ran Aroussi"
from .ticker import Ticker
from .tickers import Tickers
from .multi import download
def pdr_override():
"""
make pandas datareader optional
otherwise can be called via fix_yahoo_finance.download(...)
"""
try:
import pandas_datareader
pandas_datareader.data.get_data_yahoo = download
pandas_datareader.data.get_data_yahoo_actions = download
pandas_datareader.data.DataReader = download
except Exception:
pass
__all__ = ['download', 'Ticker', 'Tickers', 'pdr_override']
|
py | b4156407080b99d59b2262e9c742db1f67f4cdb7 | import requests
import json
headers = {'Content-Type': 'application/json; charset=utf-8','apikey' :'SECRET'}
for j in range(0,10000,1000):
url = 'https://api.neople.co.kr/cy/ranking/ratingpoint?&offset='+str(j)+'&limit=1000&apikey='
r = requests.get(url=url,headers = headers)
data = json.loads(r.text)
for i in range(0,1000):
f = open('C:/Users/KTH/Desktop/GitHub/userID.csv', 'a')
try:
nickname = str(data["rows"][i]["nickname"])
playerId = str(data["rows"][i]["playerId"])
ratingPoint = str(data["rows"][i]["ratingPoint"])
print(nickname+','+playerId+','+ratingPoint)
f.write(nickname+','+playerId+','+ratingPoint+'\n')
except:
continue
f.close()
|
py | b4156441b215487d799b6dd96192de016723b688 | """TCAM
"""
import numpy as np
from dataclasses import dataclass
from sklearn.base import TransformerMixin, BaseEstimator
from .._base import m_prod, tensor_mtranspose, _default_transform, _t_pinv_fdiag
from .._base import MatrixTensorProduct, NumpynDArray
from ..decompositions import svdm
from .._misc import _assert_order_and_mdim
from .._ml_helpers import MeanDeviationForm
_float_types = [np.typeDict[c] for c in 'efdg'] + [float]
_int_types = [np.typeDict[c] for c in 'bhip'] + [int]
def _pinv_diag(diag_tensor):
sinv = diag_tensor.copy()
sinv += ((diag_tensor ** 2) <= 1e-6) * 1e+20
sinv = (((diag_tensor ** 2) > 1e-6) * (1 / sinv))
return sinv
@dataclass
class TensorSVDResults:
u: np.ndarray
s: np.ndarray
v: np.ndarray
def astuple(self):
return self.u.copy(), self.s.copy(), self.v.copy()
# noinspection PyPep8Naming
class TCAM(TransformerMixin, BaseEstimator):
"""tsvdm based tensor component analysis (TCAM).
Linear dimensionality reduction using tensor Singular Value Decomposition of the
data to project it to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the tSVDM (using :mod:`mprod.MeanDeviationForm` ) .
It uses the :mod:`mprod.decompositions.svdm` function as basis for the ``TSVDMII`` algorithm from Kilmer et. al.
(https://doi.org/10.1073/pnas.2015851118) then offers a CP like transformations of the data accordingly.
See https://arxiv.org/abs/2111.14159 for theoretical results and case studies, and the :ref:`Tutorials <TCAM>`
for elaborated examples
Parameters
----------
n_components : int, float, default=None
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(m_samples, p_features) * n_reps - 1
If ``0 < n_components < 1`` , select the number of components such that the
amount of variance that needs to be explained is greater than the percentage specified
by n_components. In case ``n_components >= 1`` is an integer then the estimated number
of components will be::
n_components_ == min(n_components, min(m_samples, p_features) * n_reps - 1)
Attributes
----------
n_components_ : int
The estimated number of components. When n_components is set
to a number between 0 and 1. this number is estimated from input data.
Otherwise it equals the parameter n_components,
or `min(m_samples, p_features) * n_reps -1` if n_components is None.
explained_variance_ratio_ : ndarray of shape (`n_components_`,)
The amount of variance explained by each of the selected components.
mode2_loadings : ndarray (float) of shape (`n_components_`, `n_features` )
A matrix representing the contribution (coefficient) of each feature in the orinial
features space (2'nd mode of the tensor) to each of the TCAM factors.
Methods
-------
fit:
Compute the TCAM transformation for a given dataset
transform:
Transform a given dataset using a fitted TCAM
fit_transform:
Fit a TCAM to a dataset then return its TCAM transformation
inverse_transform:
Given points in the reduced TCAM space, compute the points pre-image in the original features space.
"""
def __init__(self, fun_m: MatrixTensorProduct = None,
inv_m: MatrixTensorProduct = None,
n_components=None):
assert (type(n_components) in _int_types and (n_components >= 1)) or \
((type(n_components) in _float_types) and (0 < n_components <= 1)) \
or (n_components is None), f"`n_components` must be positive integer or a float between 0 and 1" \
f" or `None`, got {n_components} of type {type(n_components)}"
assert (fun_m is None) == (inv_m is None), "Only one of fun_m,inv_m is None. " \
"Both must be defined (or both None)"
self.n_components = n_components
self.fun_m = fun_m
self.inv_m = inv_m
self._mdf = MeanDeviationForm()
def _mprod(self, a, b) -> NumpynDArray:
return m_prod(a, b, self.fun_m, self.inv_m)
def _fit(self, X: np.ndarray):
max_rank = self._n * min(self._m, self._p) - 1
self._hat_svdm = TensorSVDResults(*svdm(X, self.fun_m, self.inv_m, hats=True))
# get factors order
diagonals = self._hat_svdm.s.transpose().copy()
self._factors_order = np.unravel_index(np.argsort(- (diagonals ** 2), axis=None), diagonals.shape)
self._sorted_singular_vals = diagonals[self._factors_order]
self._total_variation = (self._sorted_singular_vals ** 2).sum()
self.explained_variance_ratio_ = ((self._sorted_singular_vals ** 2) / self._total_variation)
# populate n_components if not given
if self.n_components is None:
self.n_components_ = max_rank
elif type(self.n_components) in _int_types and self.n_components > 0:
self.n_components_ = min(max_rank, self.n_components)
elif type(self.n_components) in _float_types and self.n_components == 1.:
self.n_components_ = max_rank
elif 0 < self.n_components < 1 and type(self.n_components) in _float_types:
var_cumsum = (self._sorted_singular_vals ** 2).cumsum() # w in the paper
w_idx = np.arange(0, var_cumsum.size, dtype=int) # w index
self.n_components_ = min(max_rank,
w_idx[(var_cumsum / self._total_variation) > self.n_components].min() + 1)
else:
raise ValueError("Unexpected edge case for the value of `n_components`")
self.n_components_ = max(1, self.n_components_)
self._n_factors_order = tuple([self._factors_order[0][:self.n_components_].copy(),
self._factors_order[1][:self.n_components_].copy()])
self.explained_variance_ratio_ = self.explained_variance_ratio_[:self.n_components_]
self._rrho = np.array([0 for _ in range(self._n)])
for nn, rr in zip(*self._n_factors_order):
self._rrho[nn] = max(self._rrho[nn], rr + 1)
# self._rrho += 1
# populate truncations
# _tau = self._sorted_singular_vals[self.n_components_ + 1]
# self._rrho = (diagonals > _tau).sum(axis=1)
self._truncated_hat_svdm = TensorSVDResults(*self._hat_svdm.astuple())
self._truncated_hat_svdm.u = self._truncated_hat_svdm.u[:, :self._rrho.max(), :]
self._truncated_hat_svdm.s = self._truncated_hat_svdm.s[:self._rrho.max(), :]
self._truncated_hat_svdm.v = self._truncated_hat_svdm.v[:, :self._rrho.max(), :]
for i, rho_i in enumerate(self._rrho):
self._truncated_hat_svdm.u[:, rho_i:, i] = 0
self._truncated_hat_svdm.s[rho_i:, i] = 0
self._truncated_hat_svdm.v[:, rho_i:, i] = 0
self._truncated_svdm = TensorSVDResults(self.inv_m(self._truncated_hat_svdm.u),
self.inv_m(self._truncated_hat_svdm.s.transpose()).transpose(),
self.inv_m(self._truncated_hat_svdm.v))
self._truncS_pinv = self._truncated_svdm.s.copy()
self._truncS_pinv[(self._truncS_pinv ** 2) <= 1e-6] = 0
self._truncS_pinv[(self._truncS_pinv ** 2) > 1e-6] = 1 / self._truncS_pinv[(self._truncS_pinv ** 2) > 1e-6]
return self
# noinspection PyUnusedLocal
def fit(self, X, y=None, **fit_params):
"""Fit the model with X.
Parameters
----------
X : array-like of shape (m_samples, p_features, n_modes)
Training data, where m_samples is the number of samples,
p_features is the number of features and n_modes is the
number of modes (timepoints/locations etc...)
y : Ignored
Ignored.
Returns
-------
self : object
Returns the instance itself.
Examples
--------
>>> from mprod.dimensionality_reduction import TCAM
>>> import numpy as np
>>> X = np.random.randn(10,20,4)
>>> tca = TCAM()
>>> mdf = tca.fit(X)
"""
assert len(X.shape) == 3, "X must be a 3'rd order tensor"
self._m, self._p, self._n = X.shape
if self.fun_m is None:
self.fun_m, self.inv_m = _default_transform(self._n)
_X = self._mdf.fit_transform(X)
return self._fit(_X)
def _mode0_reduce(self, tU):
return np.concatenate(
[self._sorted_singular_vals[e] * tU[:, [fj], [fi]] for e, (fi, fj) in
enumerate(zip(*self._n_factors_order))],
axis=1)
def _mode1_reduce(self, tV):
return np.concatenate(
[self._sorted_singular_vals[e] * tV[:, [fj], [fi]] for e, (fi, fj) in
enumerate(zip(*self._n_factors_order))],
axis=1)
def _mode0_projector(self, X):
trunc_U, trunc_S, trunc_V = self._truncated_hat_svdm.astuple()
# trunc_Spinv = _t_pinv_fdiag(trunc_S, self.fun_m, self.inv_m)
# XV = self._mprod(X, trunc_V)
# XVS = self._mprod(XV, trunc_Spinv)
# XVS_hat = self.fun_m(XVS)
XV_hat = np.matmul(self.fun_m(X).transpose(2, 0, 1), trunc_V.transpose(2, 0, 1))
XVS_hat = XV_hat * _pinv_diag(trunc_S).transpose().reshape(self._n, 1, self._rrho.max())
XVS_hat = XVS_hat.transpose(1, 2, 0)
Y = XVS_hat[:, self._n_factors_order[1], self._n_factors_order[0]].copy()
# X_transformed_0 = self._mprod(X, self._truncated_svdm.v)
# X_transformed_0 = self._mprod(X_transformed_0, self._truncS_pinv)
# X_transformed = self.fun_m(X_transformed_0)
return Y
# def _mode1_projector(self, X):
# truncU_mtranspose = tensor_mtranspose(self._truncated_svdm.u, self.fun_m, self.inv_m)
# X_transformed_0 = self._mprod(truncU_mtranspose, X)
# X_transformed_0 = tensor_mtranspose(self._mprod(self._truncS_pinv, X_transformed_0), self.fun_m, self.inv_m)
# X_transformed = self.fun_m(X_transformed_0)
# return self._mode1_reduce(X_transformed)
def transform(self, X):
"""Apply mode-1 dimensionality reduction to X.
X is projected on the first mode-1 tensor components previously extracted
from a training set.
Parameters
----------
X : array-like of shape (m_samples, p_features, n_modes)
Training data, where m_samples is the number of samples,
p_features is the number of features and n_modes is the
number of modes (timepoints/locations etc...)
Returns
-------
X_new : array-like of shape (m_samples, `n_components_`)
Projection of X in the first principal components, where m_samples
is the number of samples and n_components is the number of the components.
"""
_assert_order_and_mdim(X, 'X', 3, [(1, self._p), (2, self._n)])
return self._mode0_projector(self._mdf.transform(X))
@property
def mode2_loadings(self):
""" The weights driving the variation in each of the obtained factors with respect to
each feature
"""
return self._truncated_hat_svdm.v[:,self._n_factors_order[1], self._n_factors_order[0]].copy()
def fit_transform(self, X: np.ndarray, y=None, **fit_params):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like of shape (m_samples, p_features, n_modes)
Training data, where m_samples is the number of samples,
p_features is the number of features and n_modes is the
number of modes (timepoints/locations etc...)
y : Ignored
Ignored.
Returns
-------
X_new : ndarray of shape (m_samples, `n_components_`)
Transformed values.
"""
self.fit(X)
return self.transform(X)
# noinspection PyPep8Naming
def inverse_transform(self, Y: NumpynDArray):
"""
Inverts TCAM scores back to the original features space
Parameters
----------
Y: np.ndarray
2d array with shape (k, `n_components_`)
Returns
-------
Y_inv: NumpynDArray
3rd order tensor that is the inverse transform of Y to the original features space
"""
trunc_U, trunc_S, trunc_V = self._truncated_hat_svdm.astuple()
# Suppose YY = X * V * pinv(S)
# and the matrix Y is an ordering of YYs columns according to the factors order
YY_hat = np.zeros((Y.shape[0], self._rrho.max(), self._n))
YY_hat[:, self._n_factors_order[1], self._n_factors_order[0]] = Y.copy()
YYS_hat = YY_hat.transpose(2, 0, 1) * trunc_S.transpose().reshape(self._n, 1, self._rrho.max())
X_hat = np.matmul(YYS_hat, trunc_V.transpose(2, 1, 0)).transpose(1, 2, 0)
XX = self.inv_m(X_hat)
# Note that
# YY*S*V' = X * V * pinv(S) * S * V'
# = X * V * (JJ) * V'
# = X * (V * JJ) * V'
# = X * (VV) * V'
# = X * (JJ) \approx X
#
# where JJ is "almost" the identity tensor
# #################################### OLD CODE #################################################
# YY_hat = np.zeros((trunc_U.shape[0], trunc_U.shape[1], trunc_U.shape[-1])) #
# YY_hat[:, self._n_factors_order[1], self._n_factors_order[0]] = Y.copy() #
# YY = self.inv_m(YY_hat) # get YY from YY_hat #
# YYs = self._mprod(YY, trunc_S) # YY*S #
# Yinv = self._mprod(YYs, tensor_mtranspose(trunc_V, self.fun_m, self.inv_m)) # YY*S*V' #
# # return self._mdf.inverse_transform(Yinv) #
# ###############################################################################################
return self._mdf.inverse_transform(XX)
|
py | b4156547938e54dbc668bf71930211b8cd7fdc83 | __version__ = '1.8.15' |
py | b415661c994385d95179a08eec6e34ad5436f2df | """
Module parse to/from Excel
"""
# ---------------------------------------------------------------------
# ExcelFile class
from datetime import datetime, date, time, MINYEAR
import os
import abc
import numpy as np
from pandas.types.common import (is_integer, is_float,
is_bool, is_list_like)
from pandas.core.frame import DataFrame
from pandas.io.parsers import TextParser
from pandas.io.common import (_is_url, _urlopen, _validate_header_arg,
EmptyDataError, get_filepath_or_buffer)
from pandas.tseries.period import Period
from pandas import json
from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass,
string_types)
from pandas.core import config
from pandas.formats.printing import pprint_thing
import pandas.compat as compat
import pandas.compat.openpyxl_compat as openpyxl_compat
from warnings import warn
from distutils.version import LooseVersion
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.%s.writer" % ext,
engine_name, validator=str)
_writer_extensions.append(ext)
def get_writer(engine_name):
if engine_name == 'openpyxl':
try:
import openpyxl
# with version-less openpyxl engine
# make sure we make the intelligent choice for the user
if LooseVersion(openpyxl.__version__) < '2.0.0':
return _writers['openpyxl1']
elif LooseVersion(openpyxl.__version__) < '2.2.0':
return _writers['openpyxl20']
else:
return _writers['openpyxl22']
except ImportError:
# fall through to normal exception handling below
pass
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '%s'" % engine_name)
def read_excel(io, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, names=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None,
convert_float=True, has_index_names=None, converters=None,
engine=None, squeeze=False, **kwds):
"""
Read an Excel table into a pandas DataFrame
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object, pandas ExcelFile, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheetname : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed
sheet positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing
sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
header : int, list of ints, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``
skiprows : list-like
Rows to skip at the beginning (0-indexed)
skip_footer : int, default 0
Rows at the end to skip (0-indexed)
index_col : int, list of ints, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
parse_cols : int or list, default None
* If None then parse all columns,
* If int then indicates last column to be parsed
* If list of ints then indicates list of column numbers to be parsed
* If string then indicates comma separated list of column names and
column ranges (e.g. "A:E" or "A,C,E:F")
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
has_index_names : boolean, default None
DEPRECATED: for version 0.17+ index names will be automatically
inferred based on index_col. To read Excel output from 0.16.2 and
prior that had saved index names, use True.
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheetname
argument for more information on when a Dict of Dataframes is returned.
"""
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
return io._parse_excel(
sheetname=sheetname, header=header, skiprows=skiprows, names=names,
index_col=index_col, parse_cols=parse_cols, parse_dates=parse_dates,
date_parser=date_parser, na_values=na_values, thousands=thousands,
convert_float=convert_float, has_index_names=has_index_names,
skip_footer=skip_footer, converters=converters,
squeeze=squeeze, **kwds)
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See read_excel for more documentation
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object or xlrd workbook
If a string or path object, expected to be a path to xls or xlsx file
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
"""
def __init__(self, io, **kwds):
import xlrd # throw an ImportError if we need to
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9): # pragma: no cover
raise ImportError("pandas requires xlrd >= 0.9.0 for excel "
"support, current version " + xlrd.__VERSION__)
self.io = io
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: %s" % engine)
# If io is a url, want to keep the data as bytes so can't pass
# to get_filepath_or_buffer()
if _is_url(io):
io = _urlopen(io)
# Deal with S3 urls, path objects, etc. Will convert them to
# buffer or path string
io, _, _ = get_filepath_or_buffer(io)
if engine == 'xlrd' and isinstance(io, xlrd.Book):
self.book = io
elif not isinstance(io, xlrd.Book) and hasattr(io, "read"):
# N.B. xlrd.Book has a read attribute too
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
elif isinstance(io, compat.string_types):
self.book = xlrd.open_workbook(io)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
names=None, index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None,
convert_float=True, has_index_names=None,
converters=None, squeeze=False, **kwds):
"""
Parse specified sheet(s) into a DataFrame
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters
"""
return self._parse_excel(sheetname=sheetname, header=header,
skiprows=skiprows, names=names,
index_col=index_col,
has_index_names=has_index_names,
parse_cols=parse_cols,
parse_dates=parse_dates,
date_parser=date_parser, na_values=na_values,
thousands=thousands,
skip_footer=skip_footer,
convert_float=convert_float,
converters=converters,
squeeze=squeeze,
**kwds)
def _should_parse(self, i, parse_cols):
def _range2cols(areas):
"""
Convert comma separated list of column names and column ranges to a
list of 0-based column indexes.
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
def _excel2num(x):
"Convert Excel column name like 'AB' to 0-based column index"
return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1,
x.upper().strip(), 0) - 1
cols = []
for rng in areas.split(','):
if ':' in rng:
rng = rng.split(':')
cols += lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
else:
cols.append(_excel2num(rng))
return cols
if isinstance(parse_cols, int):
return i <= parse_cols
elif isinstance(parse_cols, compat.string_types):
return i in _range2cols(parse_cols)
else:
return i in parse_cols
def _parse_excel(self, sheetname=0, header=0, skiprows=None, names=None,
skip_footer=0, index_col=None, has_index_names=None,
parse_cols=None, parse_dates=False, date_parser=None,
na_values=None, thousands=None, convert_float=True,
verbose=False, squeeze=False, **kwds):
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
skip_footer = skipfooter
_validate_header_arg(header)
if has_index_names is not None:
warn("\nThe has_index_names argument is deprecated; index names "
"will be automatically inferred based on index_col.\n"
"This argmument is still necessary if reading Excel output "
"from 0.16.2 or prior with index names.", FutureWarning,
stacklevel=3)
if 'chunksize' in kwds:
raise NotImplementedError("chunksize keyword of read_excel "
"is not implemented")
if parse_dates:
raise NotImplementedError("parse_dates keyword of read_excel "
"is not implemented")
if date_parser is not None:
raise NotImplementedError("date_parser keyword of read_excel "
"is not implemented")
import xlrd
from xlrd import (xldate, XL_CELL_DATE,
XL_CELL_ERROR, XL_CELL_BOOLEAN,
XL_CELL_NUMBER)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
try:
cell_contents = \
xldate.xldate_as_datetime(cell_contents,
epoch1904)
except OverflowError:
return cell_contents
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if ((not epoch1904 and year == (1899, 12, 31)) or
(epoch1904 and year == (1904, 1, 1))):
cell_contents = time(cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond)
else:
# Use the xlrd <= 0.9.2 date handling.
try:
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
except xldate.XLDateTooLarge:
return cell_contents
if dt[0] < MINYEAR:
cell_contents = time(*dt[3:])
else:
cell_contents = datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less suprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
ret_dict = True
elif sheetname is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheetname]
# handle same-type duplicates.
sheets = list(set(sheets))
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(zip(sheet.row_values(i),
sheet.row_types(i))):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value, typ))
data.append(row)
if sheet.nrows == 0:
output[asheetname] = DataFrame()
continue
if is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None:
if is_list_like(header):
header_names = []
control_row = [True for x in data[0]]
for row in header:
if is_integer(skiprows):
row += skiprows
data[row], control_row = _fill_mi_header(
data[row], control_row)
header_name, data[row] = _pop_header_name(
data[row], index_col)
header_names.append(header_name)
else:
data[header] = _trim_excel_header(data[header])
if is_list_like(index_col):
# forward fill values for MultiIndex index
if not is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == '' or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
if is_list_like(header) and len(header) > 1:
has_index_names = True
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(data, header=header, index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
skipfooter=skip_footer,
squeeze=squeeze,
**kwds)
output[asheetname] = parser.read()
if names is not None:
output[asheetname].columns = names
if not squeeze or isinstance(output[asheetname], DataFrame):
output[asheetname].columns = output[
asheetname].columns.set_names(header_names)
except EmptyDataError:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
@property
def sheet_names(self):
return self.book.sheet_names()
def close(self):
"""close io if necessary"""
if hasattr(self.io, 'close'):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _fill_mi_header(row, control_row):
"""Forward fills blank entries in row, but only inside the same parent index
Used for creating headers in Multiindex.
Parameters
----------
row : list
List of items in a single row.
constrol_row : list of boolean
Helps to determine if particular column is in same parent index as the
previous value. Used to stop propagation of empty cells between
different indexes.
Returns
----------
Returns changed row and control_row
"""
last = row[0]
for i in range(1, len(row)):
if not control_row[i]:
last = row[i]
if row[i] == '' or row[i] is None:
row[i] = last
else:
control_row[i] = False
last = row[i]
return row, control_row
# fill blank if index_col not None
def _pop_header_name(row, index_col):
""" (header, new_data) for header rows in MultiIndex parsing"""
none_fill = lambda x: None if x == '' else x
if index_col is None:
# no index col specified, trim data for inference path
return none_fill(row[0]), row[1:]
else:
# pop out header name and fill w/ blank
i = index_col if not is_list_like(index_col) else max(index_col)
return none_fill(row[i]), row[:i] + [''] + row[i + 1:]
def _conv_value(val):
# Convert numpy types to Python types for the Excel writers.
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, Period):
val = "%s" % val
elif is_list_like(val):
val = str(val)
return val
@add_metaclass(abc.ABCMeta)
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
Notes
-----
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if issubclass(cls, ExcelWriter):
if engine is None:
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1][1:]
else:
ext = 'xlsx'
try:
engine = config.get_option('io.excel.%s.writer' % ext)
except KeyError:
error = ValueError("No engine for filetype: '%s'" % ext)
raise error
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
pass
@abc.abstractproperty
def engine(self):
"name of engine"
pass
@abc.abstractmethod
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
"""
Write given formated cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formated data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow: upper left cell row to dump data frame
startcol: upper left cell column to dump data frame
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# validate that this engine can handle the extension
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1]
else:
ext = 'xls' if engine == 'xlwt' else 'xlsx'
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = 'YYYY-MM-DD'
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = 'YYYY-MM-DD HH:MM:SS'
else:
self.datetime_format = datetime_format
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError('Must pass explicit sheet_name or set '
'cur_sheet property')
return sheet_name
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '%s': '%s'") %
(pprint_thing(cls.engine), pprint_thing(ext)))
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class _Openpyxl1Writer(ExcelWriter):
engine = 'openpyxl1'
supported_extensions = ('.xlsx', '.xlsm')
openpyxl_majorver = 1
def __init__(self, path, engine=None, **engine_kwargs):
if not openpyxl_compat.is_compat(major_ver=self.openpyxl_majorver):
raise ValueError('Installed openpyxl is not supported at this '
'time. Use {0}.x.y.'
.format(self.openpyxl_majorver))
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super(_Openpyxl1Writer, self).__init__(path, **engine_kwargs)
# Create workbook object with default optimized_write=True.
self.book = Workbook()
# Openpyxl 1.6.1 adds a dummy sheet. We remove it.
if self.book.worksheets:
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl.cell import get_column_letter
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
if (isinstance(cell.val, compat.string_types) and
xcell.data_type_for_value(cell.val) != xcell.TYPE_STRING):
xcell.set_value_explicit(cell.val)
else:
xcell.value = _conv_value(cell.val)
style = None
if cell.style:
style = self._convert_to_style(cell.style)
for field in style.__fields__:
xcell.style.__setattr__(field,
style.__getattribute__(field))
if isinstance(cell.val, datetime):
xcell.style.number_format.format_code = self.datetime_format
elif isinstance(cell.val, date):
xcell.style.number_format.format_code = self.date_format
if cell.mergestart is not None and cell.mergeend is not None:
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
if style:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
for field in style.__fields__:
xcell.style.__setattr__(
field, style.__getattribute__(field))
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict: style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
register_writer(_Openpyxl1Writer)
class _OpenpyxlWriter(_Openpyxl1Writer):
engine = 'openpyxl'
register_writer(_OpenpyxlWriter)
class _Openpyxl20Writer(_Openpyxl1Writer):
"""
Note: Support for OpenPyxl v2 is currently EXPERIMENTAL (GH7565).
"""
engine = 'openpyxl20'
openpyxl_majorver = 2
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl.cell import get_column_letter
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
xcell.value = _conv_value(cell.val)
style_kwargs = {}
# Apply format codes before cell.style to allow override
if isinstance(cell.val, datetime):
style_kwargs.update(self._convert_to_style_kwargs({
'number_format': {'format_code': self.datetime_format}}))
elif isinstance(cell.val, date):
style_kwargs.update(self._convert_to_style_kwargs({
'number_format': {'format_code': self.date_format}}))
if cell.style:
style_kwargs.update(self._convert_to_style_kwargs(cell.style))
if style_kwargs:
xcell.style = xcell.style.copy(**style_kwargs)
if cell.mergestart is not None and cell.mergeend is not None:
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
xcell.style = xcell.style.copy(**style_kwargs)
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{0}'.format(k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
try:
# >= 2.0.0 < 2.1.0
from openpyxl.styles import NumberFormat
return NumberFormat(**number_format_dict)
except:
# >= 2.1.0
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
register_writer(_Openpyxl20Writer)
class _Openpyxl22Writer(_Openpyxl20Writer):
"""
Note: Support for OpenPyxl v2.2 is currently EXPERIMENTAL (GH7565).
"""
engine = 'openpyxl22'
openpyxl_majorver = 2
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1,
column=startcol + cell.col + 1
)
xcell.value = _conv_value(cell.val)
style_kwargs = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
register_writer(_Openpyxl22Writer)
class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs['engine'] = engine
super(_XlwtWriter, self).__init__(path, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlwt.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
val = _conv_value(cell.val)
num_format_str = None
if isinstance(cell.val, datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.write_merge(startrow + cell.row,
startrow + cell.mergestart,
startcol + cell.col,
startcol + cell.mergeend,
val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
@classmethod
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["%s: %s" % (key, cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (line_sep).join(it)
return out
else:
it = ["%s %s" % (key, cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (field_sep).join(it)
return out
else:
item = "%s" % item
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
@classmethod
def _convert_to_style(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlwt style object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
import xlwt
if style_dict:
xlwt_stylestr = cls._style_to_xlwt(style_dict)
style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
else:
style = xlwt.XFStyle()
if num_format_str is not None:
style.num_format_str = num_format_str
return style
register_writer(_XlwtWriter)
class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
val = _conv_value(cell.val)
num_format_str = None
if isinstance(cell.val, datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
def _convert_to_style(self, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
# If there is no formatting we don't create a format object.
if num_format_str is None and style_dict is None:
return None
# Create a XlsxWriter format object.
xl_format = self.book.add_format()
if num_format_str is not None:
xl_format.set_num_format(num_format_str)
if style_dict is None:
return xl_format
# Map the cell font to XlsxWriter font properties.
if style_dict.get('font'):
font = style_dict['font']
if font.get('bold'):
xl_format.set_bold()
# Map the alignment to XlsxWriter alignment properties.
alignment = style_dict.get('alignment')
if alignment:
if (alignment.get('horizontal') and
alignment['horizontal'] == 'center'):
xl_format.set_align('center')
if (alignment.get('vertical') and
alignment['vertical'] == 'top'):
xl_format.set_align('top')
# Map the cell borders to XlsxWriter border properties.
if style_dict.get('borders'):
xl_format.set_border()
return xl_format
register_writer(_XlsxWriter)
|
py | b415664202f7d60abdf65ad28fa35f257d7cc5cd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Scripts to run the all-sky diffuse analysis
"""
from __future__ import absolute_import, division, print_function
from fermipy.utils import load_yaml
from fermipy.jobs.link import Link
from fermipy.jobs.chain import Chain
from fermipy.diffuse import defaults as diffuse_defaults
from fermipy.diffuse.name_policy import NameFactory
from fermipy.diffuse.job_library import SumRings_SG, Vstack_SG, GatherSrcmaps_SG
from fermipy.diffuse.gt_srcmap_partial import SrcmapsDiffuse_SG
from fermipy.diffuse.gt_merge_srcmaps import MergeSrcmaps_SG
from fermipy.diffuse.gt_srcmaps_catalog import SrcmapsCatalog_SG
from fermipy.diffuse.gt_split_and_bin import SplitAndBinChain
from fermipy.diffuse.gt_assemble_model import AssembleModelChain
NAME_FACTORY = NameFactory()
class DiffuseCompChain(Chain):
"""Chain to build srcmaps for diffuse components
This chain consists of:
sum-rings : SumRings_SG
Merge GALProp gas maps by type and ring
srcmaps-diffuse : SrcmapsDiffuse_SG
Compute diffuse component source maps in parallel
vstack-diffuse : Vstack_SG
Combine diffuse component source maps
"""
appname = 'fermipy-diffuse-comp-chain'
linkname_default = 'diffuse-comp'
usage = '%s [options]' % (appname)
description = 'Run diffuse component analysis'
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
library=diffuse_defaults.diffuse['library'],
make_xml=diffuse_defaults.diffuse['make_xml'],
outdir=(None, 'Output directory', str),
dry_run=diffuse_defaults.diffuse['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def __init__(self, **kwargs):
"""C'tor
"""
super(DiffuseCompChain, self).__init__(**kwargs)
self.comp_dict = None
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = args.get('data')
comp = args.get('comp')
library = args.get('library')
dry_run = args.get('dry_run', False)
self._set_link('sum-rings', SumRings_SG,
library=library,
outdir=args['outdir'],
dry_run=dry_run)
self._set_link('srcmaps-diffuse', SrcmapsDiffuse_SG,
comp=comp, data=data,
library=library,
make_xml=args['make_xml'],
dry_run=dry_run)
self._set_link('vstack-diffuse', Vstack_SG,
comp=comp, data=data,
library=library,
dry_run=dry_run)
class CatalogCompChain(Chain):
"""Small class to build srcmaps for catalog components
This chain consists of:
srcmaps-catalog : SrcmapsCatalog_SG
Build source maps for all catalog sources in parallel
gather-srcmaps : GatherSrcmaps_SG
Gather source maps into
merge-srcmaps : MergeSrcmaps_SG
Compute source maps for merged sources
"""
appname = 'fermipy-catalog-comp-chain'
linkname_default = 'catalog-comp'
usage = '%s [options]' % (appname)
description = 'Run catalog component analysis'
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
library=diffuse_defaults.diffuse['library'],
nsrc=(500, 'Number of sources per job', int),
make_xml=(False, "Make XML files for diffuse components", bool),
dry_run=diffuse_defaults.diffuse['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def __init__(self, **kwargs):
"""C'tor
"""
super(CatalogCompChain, self).__init__(**kwargs)
self.comp_dict = None
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = args.get('data')
comp = args.get('comp')
library = args.get('library')
dry_run = args.get('dry_run', False)
self._set_link('srcmaps-catalog', SrcmapsCatalog_SG,
comp=comp, data=data,
library=library,
nsrc=args.get('nsrc', 500),
dry_run=dry_run)
self._set_link('gather-srcmaps', GatherSrcmaps_SG,
comp=comp, data=data,
library=library,
dry_run=dry_run)
self._set_link('merge-srcmaps', MergeSrcmaps_SG,
comp=comp, data=data,
library=library,
dry_run=dry_run)
class DiffuseAnalysisChain(Chain):
"""Chain to define diffuse all-sky analysis
This chain consists of:
prepare : `SplitAndBinChain`
Bin the data and make the exposure maps
diffuse-comp : `DiffuseCompChain`
Make source maps for diffuse components
catalog-comp : `CatalogCompChain`
Make source maps for catalog components
assemble-model : `AssembleModelChain`
Assemble the models for fitting
"""
appname = 'fermipy-diffuse-analysis'
linkname_default = 'diffuse'
usage = '%s [options]' % (appname)
description = 'Run diffuse analysis chain'
default_options = dict(config=diffuse_defaults.diffuse['config'],
dry_run=diffuse_defaults.diffuse['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
config_yaml = args['config']
config_dict = load_yaml(config_yaml)
dry_run = args.get('dry_run', False)
data = config_dict.get('data')
comp = config_dict.get('comp')
library = config_dict.get('library')
models = config_dict.get('models')
scratch = config_dict.get('scratch')
self._set_link('prepare', SplitAndBinChain,
comp=comp, data=data,
ft1file=config_dict.get('ft1file'),
hpx_order_ccube=config_dict.get('hpx_order_ccube'),
hpx_order_expcube=config_dict.get('hpx_order_expcube'),
scratch=scratch,
dry_run=dry_run)
self._set_link('diffuse-comp', DiffuseCompChain,
comp=comp, data=data,
library=library,
make_xml=config_dict.get('make_diffuse_comp_xml', False),
outdir=config_dict.get('merged_gasmap_dir', 'merged_gasmap'),
dry_run=dry_run)
self._set_link('catalog-comp', CatalogCompChain,
comp=comp, data=data,
library=library,
make_xml=config_dict.get('make_catalog_comp_xml', False),
nsrc=config_dict.get('catalog_nsrc', 500),
dry_run=dry_run)
self._set_link('assemble-model', AssembleModelChain,
comp=comp, data=data,
library=library,
models=models,
hpx_order=config_dict.get('hpx_order_fitting'),
dry_run=dry_run)
def register_classes():
"""Register these classes with the `LinkFactory` """
DiffuseCompChain.register_class()
CatalogCompChain.register_class()
DiffuseAnalysisChain.register_class()
|
py | b415672b7f7b2e8c3c1aefcb26205bef184536e8 | from django.conf import settings
from django.core.paginator import InvalidPage, Paginator
from django.http import Http404
from django.shortcuts import render
from ..product.utils import products_with_details
from ..product.utils.availability import products_with_availability
from .forms import SearchForm
def paginate_results(results, get_data, paginate_by=settings.PAGINATE_BY):
paginator = Paginator(results, paginate_by)
page_number = get_data.get("page", 1)
try:
page = paginator.page(page_number)
except InvalidPage:
raise Http404("No such page!")
return page
def evaluate_search_query(form, request):
results = products_with_details(request.user) & form.search()
return products_with_availability(
results,
discounts=request.discounts,
country=request.country,
local_currency=request.currency,
taxes=request.taxes,
)
def search(request):
if not settings.ENABLE_SEARCH:
raise Http404("No such page!")
form = SearchForm(data=request.GET or None)
if form.is_valid():
query = form.cleaned_data.get("q", "")
results = evaluate_search_query(form, request)
else:
query, results = "", []
page = paginate_results(list(results), request.GET)
ctx = {"query": query, "results": page, "query_string": "?q=%s" % query}
return render(request, "search/results.html", ctx)
|
py | b415687a1c0588809e346fe4ef1737a10fc7d41a | from __future__ import absolute_import
from . import caffe_pb2 as pb
import numpy as np
def pair_process(item,strict_one=True):
if hasattr(item,'__iter__'):
for i in item:
if i!=item[0]:
if strict_one:
raise ValueError("number in item {} must be the same".format(item))
else:
print("IMPORTANT WARNING: number in item {} must be the same".format(item))
return item[0]
return item
def pair_reduce(item):
if hasattr(item,'__iter__'):
for i in item:
if i!=item[0]:
return item
return [item[0]]
return [item]
class Layer_param():
def __init__(self,name='',type='',top=(),bottom=()):
self.param=pb.LayerParameter()
self.name=self.param.name=name
self.type=self.param.type=type
self.top=self.param.top
self.top.extend(top)
self.bottom=self.param.bottom
self.bottom.extend(bottom)
def fc_param(self, num_output, weight_filler='xavier', bias_filler='constant',has_bias=True):
if self.type != 'InnerProduct':
raise TypeError('the layer type must be InnerProduct if you want set fc param')
fc_param = pb.InnerProductParameter()
fc_param.num_output = num_output
fc_param.weight_filler.type = weight_filler
fc_param.bias_term = has_bias
if has_bias:
fc_param.bias_filler.type = bias_filler
self.param.inner_product_param.CopyFrom(fc_param)
def conv_param(self, num_output, kernel_size, stride=(1), pad=(0,),
weight_filler_type='xavier', bias_filler_type='constant',
bias_term=True, dilation=None,groups=None):
"""
add a conv_param layer if you spec the layer type "Convolution"
Args:
num_output: a int
kernel_size: int list
stride: a int list
weight_filler_type: the weight filer type
bias_filler_type: the bias filler type
Returns:
"""
if self.type not in ['Convolution','Deconvolution']:
raise TypeError('the layer type must be Convolution or Deconvolution if you want set conv param')
conv_param=pb.ConvolutionParameter()
conv_param.num_output=num_output
conv_param.kernel_size.extend(pair_reduce(kernel_size))
conv_param.stride.extend(pair_reduce(stride))
conv_param.pad.extend(pair_reduce(pad))
conv_param.bias_term=bias_term
conv_param.weight_filler.type=weight_filler_type
if bias_term:
conv_param.bias_filler.type = bias_filler_type
if dilation:
conv_param.dilation.extend(pair_reduce(dilation))
if groups:
conv_param.group=groups
if groups != 1:
conv_param.engine = 1
self.param.convolution_param.CopyFrom(conv_param)
def norm_param(self, eps):
"""
add a conv_param layer if you spec the layer type "Convolution"
Args:
num_output: a int
kernel_size: int list
stride: a int list
weight_filler_type: the weight filer type
bias_filler_type: the bias filler type
Returns:
"""
l2norm_param = pb.NormalizeParameter()
l2norm_param.across_spatial = False
l2norm_param.channel_shared = False
l2norm_param.eps = eps
self.param.norm_param.CopyFrom(l2norm_param)
def permute_param(self, order):
"""
add a conv_param layer if you spec the layer type "Convolution"
Args:
num_output: a int
kernel_size: int list
stride: a int list
weight_filler_type: the weight filer type
bias_filler_type: the bias filler type
Returns:
"""
permute_param = pb.PermuteParameter()
permute_param.order.extend(*order)
self.param.permute_param.CopyFrom(permute_param)
def pool_param(self,type='MAX',kernel_size=2,stride=2,pad=None, ceil_mode = True):
pool_param=pb.PoolingParameter()
pool_param.pool=pool_param.PoolMethod.Value(type)
pool_param.kernel_size=pair_process(kernel_size)
pool_param.stride=pair_process(stride)
pool_param.ceil_mode=ceil_mode
if pad:
if isinstance(pad,tuple):
pool_param.pad_h = pad[0]
pool_param.pad_w = pad[1]
else:
pool_param.pad=pad
self.param.pooling_param.CopyFrom(pool_param)
def batch_norm_param(self,use_global_stats=0,moving_average_fraction=None,eps=None):
bn_param=pb.BatchNormParameter()
bn_param.use_global_stats=use_global_stats
if moving_average_fraction:
bn_param.moving_average_fraction=moving_average_fraction
if eps:
bn_param.eps = eps
self.param.batch_norm_param.CopyFrom(bn_param)
# layer
# {
# name: "upsample_layer"
# type: "Upsample"
# bottom: "some_input_feature_map"
# bottom: "some_input_pool_index"
# top: "some_output"
# upsample_param {
# upsample_h: 224
# upsample_w: 224
# }
# }
def upsample_param(self,size=None, scale_factor=None):
upsample_param=pb.UpsampleParameter()
if scale_factor:
if isinstance(scale_factor,int):
upsample_param.scale = scale_factor
else:
upsample_param.scale_h = scale_factor[0]
upsample_param.scale_w = scale_factor[1]
if size:
if isinstance(size,int):
upsample_param.upsample_h = size
else:
upsample_param.upsample_h = size[0] * scale_factor
upsample_param.\
upsample_w = size[1] * scale_factor
self.param.upsample_param.CopyFrom(upsample_param)
def add_data(self,*args):
"""Args are data numpy array
"""
del self.param.blobs[:]
for data in args:
new_blob = self.param.blobs.add()
for dim in data.shape:
new_blob.shape.dim.append(dim)
new_blob.data.extend(data.flatten().astype(float))
def set_params_by_dict(self,dic):
pass
def copy_from(self,layer_param):
pass
def set_enum(param,key,value):
setattr(param,key,param.Value(value))
|
py | b4156925bdaeb06829ed10afc2a0f842c553cb42 | def readme_sklearn_api():
from xgboost_ray import RayXGBClassifier, RayParams
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
seed = 42
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.25, random_state=42)
clf = RayXGBClassifier(
n_jobs=4, # In XGBoost-Ray, n_jobs sets the number of actors
random_state=seed)
# scikit-learn API will automatically conver the data
# to RayDMatrix format as needed
clf.fit(X_train, y_train)
pred_ray = clf.predict(X_test)
print(pred_ray)
pred_proba_ray = clf.predict_proba(X_test)
print(pred_proba_ray)
# It is also possible to pass a RayParams object
# to fit/predict/predict_proba methods - will override
# n_jobs set during initialization
clf.fit(X_train, y_train, ray_params=RayParams(num_actors=2))
pred_ray = clf.predict(X_test, ray_params=RayParams(num_actors=2))
print(pred_ray)
if __name__ == "__main__":
import ray
ray.init(num_cpus=5)
print("Readme: scikit-learn API example")
readme_sklearn_api()
|
py | b415697d66ee83804376a615d4ca5dded0331d47 | """Auto-generated file, do not edit by hand. MG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MG = PhoneMetadata(id='MG', country_code=261, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[23]\\d{8}', possible_number_pattern='\\d{7,9}', possible_length=(9,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='20(?:2\\d{2}|4[47]\\d|5[3467]\\d|6[279]\\d|7(?:2[29]|[35]\\d)|8[268]\\d|9[245]\\d)\\d{4}', example_number='202123456', possible_length=(9,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='3[2-49]\\d{7}', possible_number_pattern='\\d{9}', example_number='321234567', possible_length=(9,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(national_number_pattern='22\\d{7}', possible_number_pattern='\\d{9}', example_number='221234567', possible_length=(9,)),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='([23]\\d)(\\d{2})(\\d{3})(\\d{2})', format='\\1 \\2 \\3 \\4', national_prefix_formatting_rule='0\\1')])
|
py | b41569d9de0e09334f8eb3f9f70435389e3af058 | # vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright (c) 2007-2010 Peng Huang <[email protected]>
# Copyright (c) 2007-2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
__all__ = ("IBusException", )
class IBusException(Exception):
pass
|
py | b4156a697f0ee3d3f6fbb53973123bde3e9487f2 | # -*- coding: utf-8 -*-
version = '2.1.2'
release = False
#--------------------------------------------------------------------------#
import sys
if (sys.version_info < (3, )):
from commands import getstatusoutput
else:
from subprocess import getstatusoutput # lint:ok
import datetime
import os
import glob
class CommandError(Exception):
pass
def execute_command(commandstring):
status, output = getstatusoutput(commandstring)
if status != 0:
m = 'Command "{0}" exited with status {1}'
msg = m.format(commandstring, status)
raise CommandError(msg)
return output
def parse_version_from_package():
try:
pkginfo = os.path.join(glob.glob('*.egg-info')[0],
'PKG-INFO')
except:
pkginfo = ''
version_string = ''
if os.path.exists(pkginfo):
for line in open(pkginfo):
if line.find('Version: ') == 0:
version_string = line.strip().split('Version: ')[1].strip()
if not version_string:
version_string = '%s-dev' % version
else:
version_string = version
return version_string
def get_version():
try:
globalid = execute_command("hg identify -i").strip('+')
c = "hg log -r %s --template '{date|isodatesec}'" % globalid
commitdate = execute_command(c)
# convert date to UTC unix timestamp, using the date command because
# python date libraries do not stabilise till about 2.6
dateCommand = 'date -d"%s" --utc +%%s' % commitdate
timestamp = int(execute_command(dateCommand))
# finally we have something we can use!
dt = datetime.datetime.utcfromtimestamp(timestamp)
datestring = dt.strftime('%Y%m%d%H%M%S')
if release:
version_string = version
else:
version_string = "%s.dev%s" % (version, datestring)
except (CommandError, ValueError, TypeError):
# --=mpj17=-- Usually because we are building out a source-egg,
# rather than from a Hg source-directory.
version_string = parse_version_from_package()
return version_string
if __name__ == '__main__':
import sys
sys.stdout.write('{0}\n'.format(get_version()))
|
py | b4156a79cec7d5e9f50811ac9a2db53e523837b8 | ##################################################################################
# Fast-SCNN: Fast Semantic Segmentation Network
# Paper-Link: https://arxiv.org/pdf/1902.04502.pdf
##################################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
__all__ = ["FastSCNNG3"]
class _ConvBNReLU(nn.Module):
"""Conv-BN-ReLU"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, **kwargs):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _DSConv(nn.Module):
"""Depthwise Separable Convolutions"""
def __init__(self, dw_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(_DSConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, dw_channels, kernel_size, stride, padding, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _DWConv(nn.Module):
"""Depthwise Convolutions"""
def __init__(self, dw_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(_DWConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, out_channels, kernel_size, stride, padding, groups=dw_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class LinearBottleneck(nn.Module):
"""LinearBottleneck used in MobileNetV2"""
def __init__(self, in_channels, out_channels, t=6, kernel_size=3, stride=1, padding=1):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and in_channels == out_channels
if stride == 2:
self.block = nn.Sequential(
# pw
_ConvBNReLU(in_channels, in_channels * t, 1),
# dw
_DWConv(in_channels * t, in_channels * t, kernel_size, stride, padding),
_DWConv(in_channels * t, in_channels * t, 3, 1, 1),
# pw-linear
nn.Conv2d(in_channels * t, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
else:
self.block = nn.Sequential(
# pw
_ConvBNReLU(in_channels, in_channels * t, 1),
# dw
_DWConv(in_channels * t, in_channels * t, kernel_size, stride, padding),
# pw-linear
nn.Conv2d(in_channels * t, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = self.block(x)
if self.use_shortcut:
out = x + out
return out
class PyramidPooling(nn.Module):
"""Pyramid pooling module"""
def __init__(self, in_channels, out_channels, **kwargs):
super(PyramidPooling, self).__init__()
inter_channels = int(in_channels / 4)
self.conv1 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv2 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv3 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv4 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.out = _ConvBNReLU(in_channels * 2, out_channels, 1)
def pool(self, x, size):
avgpool = nn.AdaptiveAvgPool2d(size)
return avgpool(x)
def upsample(self, x, size):
return F.interpolate(x, size, mode='bilinear', align_corners=True)
def forward(self, x):
size = x.size()[2:]
feat1 = self.upsample(self.conv1(self.pool(x, 1)), size)
feat2 = self.upsample(self.conv2(self.pool(x, 2)), size)
feat3 = self.upsample(self.conv3(self.pool(x, 3)), size)
feat4 = self.upsample(self.conv4(self.pool(x, 6)), size)
x = torch.cat([x, feat1, feat2, feat3, feat4], dim=1)
x = self.out(x)
return x
class LearningToDownsample(nn.Module):
"""Learning to downsample module"""
def __init__(self, dw_channels1=32, dw_channels2=48, out_channels=64, **kwargs):
super(LearningToDownsample, self).__init__()
self.conv = _ConvBNReLU(3, dw_channels1, 2, 2, 0)
self.conv1 = _DSConv(dw_channels1, dw_channels1, 3, 1, 1)
self.dsconv1 = _DSConv(dw_channels1, dw_channels2, 2, 2, 0)
self.dsconv1_1 = _DSConv(dw_channels2, dw_channels2, 3, 1, 1)
self.dsconv2 = _DSConv(dw_channels2, out_channels, 2, 2, 0)
self.dsconv2_1 = _DSConv(out_channels, out_channels, 3, 1, 1)
def forward(self, x):
x = self.conv(x)
x = self.conv1(x)
x = self.dsconv1(x)
x = self.dsconv1_1(x)
x = self.dsconv2(x)
x = self.dsconv2_1(x)
return x
class GlobalFeatureExtractor(nn.Module):
"""Global feature extractor module"""
def __init__(self, in_channels=64, block_channels=(64, 96, 128),
out_channels=128, t=6, num_blocks=(3, 3, 3), **kwargs):
super(GlobalFeatureExtractor, self).__init__()
self.bottleneck1 = self._make_layer(LinearBottleneck, in_channels, block_channels[0], num_blocks[0], t, 2, 2, 0)
self.bottleneck2 = self._make_layer(LinearBottleneck, block_channels[0], block_channels[1], num_blocks[1], t, 2,
2, 0)
self.bottleneck3 = self._make_layer(LinearBottleneck, block_channels[1], block_channels[2], num_blocks[2], t, 3,
1, 1)
self.ppm = PyramidPooling(block_channels[2], out_channels)
def _make_layer(self, block, inplanes, planes, blocks, t=6, kernel_size=3, stride=1, padding=1):
layers = []
layers.append(block(inplanes, planes, t, kernel_size, stride, padding))
for i in range(1, blocks):
layers.append(block(planes, planes, t, 3, 1, 1))
return nn.Sequential(*layers)
def forward(self, x):
x = self.bottleneck1(x)
x = self.bottleneck2(x)
x = self.bottleneck3(x)
x = self.ppm(x)
return x
class FeatureFusionModule(nn.Module):
"""Feature fusion module"""
def __init__(self, highter_in_channels, lower_in_channels, out_channels, scale_factor=4, **kwargs):
super(FeatureFusionModule, self).__init__()
self.scale_factor = scale_factor
self.dwconv = _DWConv(lower_in_channels, out_channels)
self.conv_lower_res = nn.Sequential(
nn.Conv2d(out_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
self.conv_higher_res = nn.Sequential(
nn.Conv2d(highter_in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
self.relu = nn.ReLU(True)
def forward(self, higher_res_feature, lower_res_feature):
_, _, h, w = higher_res_feature.size()
lower_res_feature = F.interpolate(lower_res_feature, size=(h, w), mode='bilinear', align_corners=True)
lower_res_feature = self.dwconv(lower_res_feature)
lower_res_feature = self.conv_lower_res(lower_res_feature)
higher_res_feature = self.conv_higher_res(higher_res_feature)
out = higher_res_feature + lower_res_feature
return self.relu(out)
class Classifer(nn.Module):
"""Classifer"""
def __init__(self, dw_channels, num_classes):
super(Classifer, self).__init__()
self.dsconv1 = _DSConv(dw_channels, dw_channels)
self.dsconv2 = _DSConv(dw_channels, dw_channels)
self.conv = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(dw_channels, num_classes, 1)
)
def forward(self, x):
x = self.dsconv1(x)
x = self.dsconv2(x)
x = self.conv(x)
return x
# 该网络基本和context的网络相同,区别在于,将头部的shallownet变成了公共部分,然后再deepnet中增加了PPM
class FastSCNNG3(nn.Module):
def __init__(self, classes, aux=False, **kwargs):
super(FastSCNNG3, self).__init__()
self.aux = aux
self.learning_to_downsample = LearningToDownsample(32, 48, 64) # 与contextnet的Shallow_net相似
self.global_feature_extractor = GlobalFeatureExtractor(64, [64, 96, 128], 128, 6,
[3, 3, 3]) # 与contextnet的deepnet相似,多了PPM
self.feature_fusion = FeatureFusionModule(64, 128, 128) # 与context一样
self.classifier = Classifer(128, classes) # 与context一样
if self.aux:
self.auxlayer = nn.Sequential(
nn.Conv2d(64, 32, 3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(32, classes, 1)
)
def forward(self, x):
size = x.size()[2:]
higher_res_features = self.learning_to_downsample(x)
x = self.global_feature_extractor(higher_res_features)
x = self.feature_fusion(higher_res_features, x)
x = self.classifier(x)
outputs = []
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
if self.aux:
auxout = self.auxlayer(higher_res_features)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.append(auxout)
return x
# return tuple(outputs)
"""print layers and params of network"""
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = FastSCNNG3(classes=19).to(device)
summary(model, (3, 512, 1024))
from fvcore.nn.flop_count import flop_count # https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile # https://github.com/Lyken17/pytorch-OpCounter
x = torch.randn(2, 3, 512, 1024).to(device)
from fvcore.nn.jit_handles import batchnorm_flop_jit
from fvcore.nn.jit_handles import generic_activation_jit
supported_ops = {
"aten::batch_norm": batchnorm_flop_jit,
}
flop_dict, _ = flop_count(model, (x,), supported_ops)
flops_count, params_count = get_model_complexity_info(model, (3, 512, 1024),
as_strings=False,
print_per_layer_stat=True)
input = x
macs, params = profile(model, inputs=(input,))
print(flop_dict)
print(flops_count, params_count)
print(macs, params)
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/FastSCNNG3.py
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 32, 256, 512] 384
BatchNorm2d-2 [-1, 32, 256, 512] 64
ReLU-3 [-1, 32, 256, 512] 0
_ConvBNReLU-4 [-1, 32, 256, 512] 0
Conv2d-5 [-1, 32, 256, 512] 288
BatchNorm2d-6 [-1, 32, 256, 512] 64
ReLU-7 [-1, 32, 256, 512] 0
Conv2d-8 [-1, 32, 256, 512] 1,024
BatchNorm2d-9 [-1, 32, 256, 512] 64
ReLU-10 [-1, 32, 256, 512] 0
_DSConv-11 [-1, 32, 256, 512] 0
Conv2d-12 [-1, 32, 128, 256] 128
BatchNorm2d-13 [-1, 32, 128, 256] 64
ReLU-14 [-1, 32, 128, 256] 0
Conv2d-15 [-1, 48, 128, 256] 1,536
BatchNorm2d-16 [-1, 48, 128, 256] 96
ReLU-17 [-1, 48, 128, 256] 0
_DSConv-18 [-1, 48, 128, 256] 0
Conv2d-19 [-1, 48, 128, 256] 432
BatchNorm2d-20 [-1, 48, 128, 256] 96
ReLU-21 [-1, 48, 128, 256] 0
Conv2d-22 [-1, 48, 128, 256] 2,304
BatchNorm2d-23 [-1, 48, 128, 256] 96
ReLU-24 [-1, 48, 128, 256] 0
_DSConv-25 [-1, 48, 128, 256] 0
Conv2d-26 [-1, 48, 64, 128] 192
BatchNorm2d-27 [-1, 48, 64, 128] 96
ReLU-28 [-1, 48, 64, 128] 0
Conv2d-29 [-1, 64, 64, 128] 3,072
BatchNorm2d-30 [-1, 64, 64, 128] 128
ReLU-31 [-1, 64, 64, 128] 0
_DSConv-32 [-1, 64, 64, 128] 0
Conv2d-33 [-1, 64, 64, 128] 576
BatchNorm2d-34 [-1, 64, 64, 128] 128
ReLU-35 [-1, 64, 64, 128] 0
Conv2d-36 [-1, 64, 64, 128] 4,096
BatchNorm2d-37 [-1, 64, 64, 128] 128
ReLU-38 [-1, 64, 64, 128] 0
_DSConv-39 [-1, 64, 64, 128] 0
LearningToDownsample-40 [-1, 64, 64, 128] 0
Conv2d-41 [-1, 384, 64, 128] 24,576
BatchNorm2d-42 [-1, 384, 64, 128] 768
ReLU-43 [-1, 384, 64, 128] 0
_ConvBNReLU-44 [-1, 384, 64, 128] 0
Conv2d-45 [-1, 384, 32, 64] 1,536
BatchNorm2d-46 [-1, 384, 32, 64] 768
ReLU-47 [-1, 384, 32, 64] 0
_DWConv-48 [-1, 384, 32, 64] 0
Conv2d-49 [-1, 384, 32, 64] 3,456
BatchNorm2d-50 [-1, 384, 32, 64] 768
ReLU-51 [-1, 384, 32, 64] 0
_DWConv-52 [-1, 384, 32, 64] 0
Conv2d-53 [-1, 64, 32, 64] 24,576
BatchNorm2d-54 [-1, 64, 32, 64] 128
LinearBottleneck-55 [-1, 64, 32, 64] 0
Conv2d-56 [-1, 384, 32, 64] 24,576
BatchNorm2d-57 [-1, 384, 32, 64] 768
ReLU-58 [-1, 384, 32, 64] 0
_ConvBNReLU-59 [-1, 384, 32, 64] 0
Conv2d-60 [-1, 384, 32, 64] 3,456
BatchNorm2d-61 [-1, 384, 32, 64] 768
ReLU-62 [-1, 384, 32, 64] 0
_DWConv-63 [-1, 384, 32, 64] 0
Conv2d-64 [-1, 64, 32, 64] 24,576
BatchNorm2d-65 [-1, 64, 32, 64] 128
LinearBottleneck-66 [-1, 64, 32, 64] 0
Conv2d-67 [-1, 384, 32, 64] 24,576
BatchNorm2d-68 [-1, 384, 32, 64] 768
ReLU-69 [-1, 384, 32, 64] 0
_ConvBNReLU-70 [-1, 384, 32, 64] 0
Conv2d-71 [-1, 384, 32, 64] 3,456
BatchNorm2d-72 [-1, 384, 32, 64] 768
ReLU-73 [-1, 384, 32, 64] 0
_DWConv-74 [-1, 384, 32, 64] 0
Conv2d-75 [-1, 64, 32, 64] 24,576
BatchNorm2d-76 [-1, 64, 32, 64] 128
LinearBottleneck-77 [-1, 64, 32, 64] 0
Conv2d-78 [-1, 384, 32, 64] 24,576
BatchNorm2d-79 [-1, 384, 32, 64] 768
ReLU-80 [-1, 384, 32, 64] 0
_ConvBNReLU-81 [-1, 384, 32, 64] 0
Conv2d-82 [-1, 384, 16, 32] 1,536
BatchNorm2d-83 [-1, 384, 16, 32] 768
ReLU-84 [-1, 384, 16, 32] 0
_DWConv-85 [-1, 384, 16, 32] 0
Conv2d-86 [-1, 384, 16, 32] 3,456
BatchNorm2d-87 [-1, 384, 16, 32] 768
ReLU-88 [-1, 384, 16, 32] 0
_DWConv-89 [-1, 384, 16, 32] 0
Conv2d-90 [-1, 96, 16, 32] 36,864
BatchNorm2d-91 [-1, 96, 16, 32] 192
LinearBottleneck-92 [-1, 96, 16, 32] 0
Conv2d-93 [-1, 576, 16, 32] 55,296
BatchNorm2d-94 [-1, 576, 16, 32] 1,152
ReLU-95 [-1, 576, 16, 32] 0
_ConvBNReLU-96 [-1, 576, 16, 32] 0
Conv2d-97 [-1, 576, 16, 32] 5,184
BatchNorm2d-98 [-1, 576, 16, 32] 1,152
ReLU-99 [-1, 576, 16, 32] 0
_DWConv-100 [-1, 576, 16, 32] 0
Conv2d-101 [-1, 96, 16, 32] 55,296
BatchNorm2d-102 [-1, 96, 16, 32] 192
LinearBottleneck-103 [-1, 96, 16, 32] 0
Conv2d-104 [-1, 576, 16, 32] 55,296
BatchNorm2d-105 [-1, 576, 16, 32] 1,152
ReLU-106 [-1, 576, 16, 32] 0
_ConvBNReLU-107 [-1, 576, 16, 32] 0
Conv2d-108 [-1, 576, 16, 32] 5,184
BatchNorm2d-109 [-1, 576, 16, 32] 1,152
ReLU-110 [-1, 576, 16, 32] 0
_DWConv-111 [-1, 576, 16, 32] 0
Conv2d-112 [-1, 96, 16, 32] 55,296
BatchNorm2d-113 [-1, 96, 16, 32] 192
LinearBottleneck-114 [-1, 96, 16, 32] 0
Conv2d-115 [-1, 576, 16, 32] 55,296
BatchNorm2d-116 [-1, 576, 16, 32] 1,152
ReLU-117 [-1, 576, 16, 32] 0
_ConvBNReLU-118 [-1, 576, 16, 32] 0
Conv2d-119 [-1, 576, 16, 32] 5,184
BatchNorm2d-120 [-1, 576, 16, 32] 1,152
ReLU-121 [-1, 576, 16, 32] 0
_DWConv-122 [-1, 576, 16, 32] 0
Conv2d-123 [-1, 128, 16, 32] 73,728
BatchNorm2d-124 [-1, 128, 16, 32] 256
LinearBottleneck-125 [-1, 128, 16, 32] 0
Conv2d-126 [-1, 768, 16, 32] 98,304
BatchNorm2d-127 [-1, 768, 16, 32] 1,536
ReLU-128 [-1, 768, 16, 32] 0
_ConvBNReLU-129 [-1, 768, 16, 32] 0
Conv2d-130 [-1, 768, 16, 32] 6,912
BatchNorm2d-131 [-1, 768, 16, 32] 1,536
ReLU-132 [-1, 768, 16, 32] 0
_DWConv-133 [-1, 768, 16, 32] 0
Conv2d-134 [-1, 128, 16, 32] 98,304
BatchNorm2d-135 [-1, 128, 16, 32] 256
LinearBottleneck-136 [-1, 128, 16, 32] 0
Conv2d-137 [-1, 768, 16, 32] 98,304
BatchNorm2d-138 [-1, 768, 16, 32] 1,536
ReLU-139 [-1, 768, 16, 32] 0
_ConvBNReLU-140 [-1, 768, 16, 32] 0
Conv2d-141 [-1, 768, 16, 32] 6,912
BatchNorm2d-142 [-1, 768, 16, 32] 1,536
ReLU-143 [-1, 768, 16, 32] 0
_DWConv-144 [-1, 768, 16, 32] 0
Conv2d-145 [-1, 128, 16, 32] 98,304
BatchNorm2d-146 [-1, 128, 16, 32] 256
LinearBottleneck-147 [-1, 128, 16, 32] 0
Conv2d-148 [-1, 32, 1, 1] 4,096
BatchNorm2d-149 [-1, 32, 1, 1] 64
ReLU-150 [-1, 32, 1, 1] 0
_ConvBNReLU-151 [-1, 32, 1, 1] 0
Conv2d-152 [-1, 32, 2, 2] 4,096
BatchNorm2d-153 [-1, 32, 2, 2] 64
ReLU-154 [-1, 32, 2, 2] 0
_ConvBNReLU-155 [-1, 32, 2, 2] 0
Conv2d-156 [-1, 32, 3, 3] 4,096
BatchNorm2d-157 [-1, 32, 3, 3] 64
ReLU-158 [-1, 32, 3, 3] 0
_ConvBNReLU-159 [-1, 32, 3, 3] 0
Conv2d-160 [-1, 32, 6, 6] 4,096
BatchNorm2d-161 [-1, 32, 6, 6] 64
ReLU-162 [-1, 32, 6, 6] 0
_ConvBNReLU-163 [-1, 32, 6, 6] 0
Conv2d-164 [-1, 128, 16, 32] 32,768
BatchNorm2d-165 [-1, 128, 16, 32] 256
ReLU-166 [-1, 128, 16, 32] 0
_ConvBNReLU-167 [-1, 128, 16, 32] 0
PyramidPooling-168 [-1, 128, 16, 32] 0
GlobalFeatureExtractor-169 [-1, 128, 16, 32] 0
Conv2d-170 [-1, 128, 64, 128] 1,152
BatchNorm2d-171 [-1, 128, 64, 128] 256
ReLU-172 [-1, 128, 64, 128] 0
_DWConv-173 [-1, 128, 64, 128] 0
Conv2d-174 [-1, 128, 64, 128] 16,512
BatchNorm2d-175 [-1, 128, 64, 128] 256
Conv2d-176 [-1, 128, 64, 128] 8,320
BatchNorm2d-177 [-1, 128, 64, 128] 256
ReLU-178 [-1, 128, 64, 128] 0
FeatureFusionModule-179 [-1, 128, 64, 128] 0
Conv2d-180 [-1, 128, 64, 128] 1,152
BatchNorm2d-181 [-1, 128, 64, 128] 256
ReLU-182 [-1, 128, 64, 128] 0
Conv2d-183 [-1, 128, 64, 128] 16,384
BatchNorm2d-184 [-1, 128, 64, 128] 256
ReLU-185 [-1, 128, 64, 128] 0
_DSConv-186 [-1, 128, 64, 128] 0
Conv2d-187 [-1, 128, 64, 128] 1,152
BatchNorm2d-188 [-1, 128, 64, 128] 256
ReLU-189 [-1, 128, 64, 128] 0
Conv2d-190 [-1, 128, 64, 128] 16,384
BatchNorm2d-191 [-1, 128, 64, 128] 256
ReLU-192 [-1, 128, 64, 128] 0
_DSConv-193 [-1, 128, 64, 128] 0
Dropout-194 [-1, 128, 64, 128] 0
Conv2d-195 [-1, 19, 64, 128] 2,451
Classifer-196 [-1, 19, 64, 128] 0
================================================================
Total params: 1,151,075
Trainable params: 1,151,075
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 6.00
Forward/backward pass size (MB): 1165.30
Params size (MB): 4.39
Estimated Total Size (MB): 1175.69
----------------------------------------------------------------
Skipped operation aten::relu_ 42 time(s)
Skipped operation aten::add 7 time(s)
Skipped operation aten::adaptive_avg_pool2d 4 time(s)
Skipped operation aten::upsample_bilinear2d 6 time(s)
Skipped operation aten::dropout 1 time(s)
FastSCNNG3(
2.034 GMac, 100.000% MACs,
(learning_to_downsample): LearningToDownsample(
0.492 GMac, 24.207% MACs,
(conv): _ConvBNReLU(
0.063 GMac, 3.093% MACs,
(conv): Sequential(
0.063 GMac, 3.093% MACs,
(0): Conv2d(0.05 GMac, 2.474% MACs, 3, 32, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): BatchNorm2d(0.008 GMac, 0.412% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.004 GMac, 0.206% MACs, inplace=True)
)
)
(conv1): _DSConv(
0.197 GMac, 9.691% MACs,
(conv): Sequential(
0.197 GMac, 9.691% MACs,
(0): Conv2d(0.038 GMac, 1.856% MACs, 32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)
(1): BatchNorm2d(0.008 GMac, 0.412% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.004 GMac, 0.206% MACs, inplace=True)
(3): Conv2d(0.134 GMac, 6.598% MACs, 32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.008 GMac, 0.412% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.004 GMac, 0.206% MACs, inplace=True)
)
)
(dsconv1): _DSConv(
0.062 GMac, 3.067% MACs,
(conv): Sequential(
0.062 GMac, 3.067% MACs,
(0): Conv2d(0.004 GMac, 0.206% MACs, 32, 32, kernel_size=(2, 2), stride=(2, 2), groups=32, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.103% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.052% MACs, inplace=True)
(3): Conv2d(0.05 GMac, 2.474% MACs, 32, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.003 GMac, 0.155% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.002 GMac, 0.077% MACs, inplace=True)
)
)
(dsconv1_1): _DSConv(
0.099 GMac, 4.871% MACs,
(conv): Sequential(
0.099 GMac, 4.871% MACs,
(0): Conv2d(0.014 GMac, 0.696% MACs, 48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=48, bias=False)
(1): BatchNorm2d(0.003 GMac, 0.155% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.002 GMac, 0.077% MACs, inplace=True)
(3): Conv2d(0.075 GMac, 3.711% MACs, 48, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.003 GMac, 0.155% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.002 GMac, 0.077% MACs, inplace=True)
)
)
(dsconv2): _DSConv(
0.029 GMac, 1.450% MACs,
(conv): Sequential(
0.029 GMac, 1.450% MACs,
(0): Conv2d(0.002 GMac, 0.077% MACs, 48, 48, kernel_size=(2, 2), stride=(2, 2), groups=48, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.039% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
(3): Conv2d(0.025 GMac, 1.237% MACs, 48, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.001 GMac, 0.052% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.026% MACs, inplace=True)
)
)
(dsconv2_1): _DSConv(
0.041 GMac, 2.036% MACs,
(conv): Sequential(
0.041 GMac, 2.036% MACs,
(0): Conv2d(0.005 GMac, 0.232% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.052% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.026% MACs, inplace=True)
(3): Conv2d(0.034 GMac, 1.649% MACs, 64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.001 GMac, 0.052% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.026% MACs, inplace=True)
)
)
)
(global_feature_extractor): GlobalFeatureExtractor(
1.001 GMac, 49.188% MACs,
(bottleneck1): Sequential(
0.502 GMac, 24.664% MACs,
(0): LinearBottleneck(
0.276 GMac, 13.582% MACs,
(block): Sequential(
0.276 GMac, 13.582% MACs,
(0): _ConvBNReLU(
0.211 GMac, 10.361% MACs,
(conv): Sequential(
0.211 GMac, 10.361% MACs,
(0): Conv2d(0.201 GMac, 9.897% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.006 GMac, 0.309% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.003 GMac, 0.155% MACs, inplace=True)
)
)
(1): _DWConv(
0.006 GMac, 0.271% MACs,
(conv): Sequential(
0.006 GMac, 0.271% MACs,
(0): Conv2d(0.003 GMac, 0.155% MACs, 384, 384, kernel_size=(2, 2), stride=(2, 2), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.077% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.039% MACs, inplace=True)
)
)
(2): _DWConv(
0.009 GMac, 0.464% MACs,
(conv): Sequential(
0.009 GMac, 0.464% MACs,
(0): Conv2d(0.007 GMac, 0.348% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.077% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.039% MACs, inplace=True)
)
)
(3): Conv2d(0.05 GMac, 2.474% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.0 GMac, 0.013% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.113 GMac, 5.541% MACs,
(block): Sequential(
0.113 GMac, 5.541% MACs,
(0): _ConvBNReLU(
0.053 GMac, 2.590% MACs,
(conv): Sequential(
0.053 GMac, 2.590% MACs,
(0): Conv2d(0.05 GMac, 2.474% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.077% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.039% MACs, inplace=True)
)
)
(1): _DWConv(
0.009 GMac, 0.464% MACs,
(conv): Sequential(
0.009 GMac, 0.464% MACs,
(0): Conv2d(0.007 GMac, 0.348% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.077% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.039% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.474% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.013% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.113 GMac, 5.541% MACs,
(block): Sequential(
0.113 GMac, 5.541% MACs,
(0): _ConvBNReLU(
0.053 GMac, 2.590% MACs,
(conv): Sequential(
0.053 GMac, 2.590% MACs,
(0): Conv2d(0.05 GMac, 2.474% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.077% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.039% MACs, inplace=True)
)
)
(1): _DWConv(
0.009 GMac, 0.464% MACs,
(conv): Sequential(
0.009 GMac, 0.464% MACs,
(0): Conv2d(0.007 GMac, 0.348% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.077% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.039% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.474% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.013% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck2): Sequential(
0.198 GMac, 9.718% MACs,
(0): LinearBottleneck(
0.075 GMac, 3.706% MACs,
(block): Sequential(
0.075 GMac, 3.706% MACs,
(0): _ConvBNReLU(
0.053 GMac, 2.590% MACs,
(conv): Sequential(
0.053 GMac, 2.590% MACs,
(0): Conv2d(0.05 GMac, 2.474% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.077% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.039% MACs, inplace=True)
)
)
(1): _DWConv(
0.001 GMac, 0.068% MACs,
(conv): Sequential(
0.001 GMac, 0.068% MACs,
(0): Conv2d(0.001 GMac, 0.039% MACs, 384, 384, kernel_size=(2, 2), stride=(2, 2), groups=384, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.019% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.010% MACs, inplace=True)
)
)
(2): _DWConv(
0.002 GMac, 0.116% MACs,
(conv): Sequential(
0.002 GMac, 0.116% MACs,
(0): Conv2d(0.002 GMac, 0.087% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.019% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.010% MACs, inplace=True)
)
)
(3): Conv2d(0.019 GMac, 0.928% MACs, 384, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.0 GMac, 0.005% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.061 GMac, 3.006% MACs,
(block): Sequential(
0.061 GMac, 3.006% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.435% MACs,
(conv): Sequential(
0.029 GMac, 1.435% MACs,
(0): Conv2d(0.028 GMac, 1.392% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.029% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.174% MACs,
(conv): Sequential(
0.004 GMac, 0.174% MACs,
(0): Conv2d(0.003 GMac, 0.130% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.029% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(2): Conv2d(0.028 GMac, 1.392% MACs, 576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.005% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.061 GMac, 3.006% MACs,
(block): Sequential(
0.061 GMac, 3.006% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.435% MACs,
(conv): Sequential(
0.029 GMac, 1.435% MACs,
(0): Conv2d(0.028 GMac, 1.392% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.029% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.174% MACs,
(conv): Sequential(
0.004 GMac, 0.174% MACs,
(0): Conv2d(0.003 GMac, 0.130% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.029% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(2): Conv2d(0.028 GMac, 1.392% MACs, 576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.005% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck3): Sequential(
0.284 GMac, 13.961% MACs,
(0): LinearBottleneck(
0.071 GMac, 3.471% MACs,
(block): Sequential(
0.071 GMac, 3.471% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.435% MACs,
(conv): Sequential(
0.029 GMac, 1.435% MACs,
(0): Conv2d(0.028 GMac, 1.392% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.029% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.174% MACs,
(conv): Sequential(
0.004 GMac, 0.174% MACs,
(0): Conv2d(0.003 GMac, 0.130% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.029% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(2): Conv2d(0.038 GMac, 1.856% MACs, 576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.006% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.107 GMac, 5.245% MACs,
(block): Sequential(
0.107 GMac, 5.245% MACs,
(0): _ConvBNReLU(
0.052 GMac, 2.532% MACs,
(conv): Sequential(
0.052 GMac, 2.532% MACs,
(0): Conv2d(0.05 GMac, 2.474% MACs, 128, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.039% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
)
)
(1): _DWConv(
0.005 GMac, 0.232% MACs,
(conv): Sequential(
0.005 GMac, 0.232% MACs,
(0): Conv2d(0.004 GMac, 0.174% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.039% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.474% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.006% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.107 GMac, 5.245% MACs,
(block): Sequential(
0.107 GMac, 5.245% MACs,
(0): _ConvBNReLU(
0.052 GMac, 2.532% MACs,
(conv): Sequential(
0.052 GMac, 2.532% MACs,
(0): Conv2d(0.05 GMac, 2.474% MACs, 128, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.039% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
)
)
(1): _DWConv(
0.005 GMac, 0.232% MACs,
(conv): Sequential(
0.005 GMac, 0.232% MACs,
(0): Conv2d(0.004 GMac, 0.174% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.039% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.474% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.006% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(ppm): PyramidPooling(
0.017 GMac, 0.845% MACs,
(conv1): _ConvBNReLU(
0.0 GMac, 0.000% MACs,
(conv): Sequential(
0.0 GMac, 0.000% MACs,
(0): Conv2d(0.0 GMac, 0.000% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv2): _ConvBNReLU(
0.0 GMac, 0.001% MACs,
(conv): Sequential(
0.0 GMac, 0.001% MACs,
(0): Conv2d(0.0 GMac, 0.001% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv3): _ConvBNReLU(
0.0 GMac, 0.002% MACs,
(conv): Sequential(
0.0 GMac, 0.002% MACs,
(0): Conv2d(0.0 GMac, 0.002% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv4): _ConvBNReLU(
0.0 GMac, 0.007% MACs,
(conv): Sequential(
0.0 GMac, 0.007% MACs,
(0): Conv2d(0.0 GMac, 0.007% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(out): _ConvBNReLU(
0.017 GMac, 0.834% MACs,
(conv): Sequential(
0.017 GMac, 0.834% MACs,
(0): Conv2d(0.017 GMac, 0.825% MACs, 256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.006% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.003% MACs, inplace=True)
)
)
)
)
(feature_fusion): FeatureFusionModule(
0.221 GMac, 10.876% MACs,
(dwconv): _DWConv(
0.013 GMac, 0.619% MACs,
(conv): Sequential(
0.013 GMac, 0.619% MACs,
(0): Conv2d(0.009 GMac, 0.464% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.103% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.052% MACs, inplace=True)
)
)
(conv_lower_res): Sequential(
0.137 GMac, 6.752% MACs,
(0): Conv2d(0.135 GMac, 6.649% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1))
(1): BatchNorm2d(0.002 GMac, 0.103% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(conv_higher_res): Sequential(
0.07 GMac, 3.454% MACs,
(0): Conv2d(0.068 GMac, 3.350% MACs, 64, 128, kernel_size=(1, 1), stride=(1, 1))
(1): BatchNorm2d(0.002 GMac, 0.103% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(relu): ReLU(0.001 GMac, 0.052% MACs, inplace=True)
)
(classifier): Classifer(
0.32 GMac, 15.729% MACs,
(dsconv1): _DSConv(
0.15 GMac, 7.371% MACs,
(conv): Sequential(
0.15 GMac, 7.371% MACs,
(0): Conv2d(0.009 GMac, 0.464% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.103% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.052% MACs, inplace=True)
(3): Conv2d(0.134 GMac, 6.598% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.002 GMac, 0.103% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.052% MACs, inplace=True)
)
)
(dsconv2): _DSConv(
0.15 GMac, 7.371% MACs,
(conv): Sequential(
0.15 GMac, 7.371% MACs,
(0): Conv2d(0.009 GMac, 0.464% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.103% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.052% MACs, inplace=True)
(3): Conv2d(0.134 GMac, 6.598% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.002 GMac, 0.103% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.052% MACs, inplace=True)
)
)
(conv): Sequential(
0.02 GMac, 0.987% MACs,
(0): Dropout(0.0 GMac, 0.000% MACs, p=0.1, inplace=False)
(1): Conv2d(0.02 GMac, 0.987% MACs, 128, 19, kernel_size=(1, 1), stride=(1, 1))
)
)
)
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[INFO] Register zero_ops() for <class 'torch.nn.modules.activation.ReLU'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.Sequential'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._ConvBNReLU'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._DSConv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.LearningToDownsample'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._DWConv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.LinearBottleneck'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.PyramidPooling'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.GlobalFeatureExtractor'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.FeatureFusionModule'>. Treat it as zero Macs and zero Params.
[INFO] Register zero_ops() for <class 'torch.nn.modules.dropout.Dropout'>.
[WARN] Cannot find rule for <class '__main__.Classifer'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.FastSCNNG3'>. Treat it as zero Macs and zero Params.
defaultdict(<class 'float'>, {'conv': 3.822534656, 'batchnorm': 0.326775296})
2034275008.0 1151075
3990427904.0 1151075.0
Process finished with exit code 0
conv3*3 cin cout conv3 cout cout
conv+ds
ds+ds
'''
|
py | b4156b2639faed9ce6afefd5ffde7503bdbd2402 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# 定义一些常用的字符串常量
# 存储类型
MYSQL = "mysql"
ES = "es"
HDFS = "hdfs"
CLICKHOUSE = "clickhouse"
TSPIDER = "tspider"
DRUID = "druid"
HERMES = "hermes"
TSDB = "tsdb"
TREDIS = "tredis"
QUEUE = "queue"
IGNITE = "ignite"
TCAPLUS = "tcaplus"
TDW = "tdw"
POSTGRESQL = "postgresql"
TPG = "tpg"
TDBANK = "tdbank"
QUEUE_PULSAR = "queue_pulsar"
ICEBERG = "iceberg"
KAFKA = "kafka"
PULSAR = "pulsar"
PRESTO = "presto"
TDBANK_TDW = "tdbank_tdw"
NODES = "nodes"
# cc配置
BK_CLOUD_ID = "bk_cloud_id"
NODE_TYPE = "node_type"
SCOPE = "scope"
HOST_SCOPE = "host_scope"
MODULE_SCOPE = "module_scope"
# 平台模块常用常量
DATAHUB = "datahub"
DATAMANAGE = "datamanage"
DATAQUERY = "dataquery"
JOBNAVI = "jobnavi"
META = "meta"
ID = "id"
RESULT_TABLE_ID = "result_table_id"
RESULT_TABLE_NAME = "result_table_name"
RESULT_TABLE_NAME_ALIAS = "result_table_name_alias"
CLUSTER_NAME = "cluster_name"
CLUSTER_TYPE = "cluster_type"
CLUSTER_TYPE_JAVA = "clusterType"
CLUSTER_GROUP = "cluster_group"
CLUSTER_DOMAIN = "cluster_domain"
CLUSTER_PORT = "cluster_port"
ZK_DOMAIN = "zk_domain"
ZK_PORT = "zk_port"
CLUSTER_KAFKA_DEFAULT_PORT = 9092
CLUSTER_ZK_DEFAULT_PORT = 2181
APP_CODE = "app_code"
BK_BIZ_ID = "bk_biz_id"
BK_BIZ_NAME = "bk_biz_name"
BK_BIZ_ID_BKDATA = 591
BK_BIZ_NAME_BKDATA = "bkdata"
BK_BKDATA_NAME = "tgdp"
BK_USERNAME = "bk_username"
BK_APP_CODE = "bk_app_code"
BK_APP_SECRET = "bk_app_secret"
BKDATA_AUTHENTICATION_METHOD = "bkdata_authentication_method"
REFRESH_TOKEN = "refresh_token"
ACCESS_TOKEN = "access_token"
ENV_NAME = "env_name"
GRANT_TYPE = "grant_type"
STORAGES = "storages"
STORAGE_CONFIG = "storage_config"
STORAGE_KEYS = "storage_keys"
# kv 存储配置
STORAGE_VALUES = "storage_values"
STORAGE_SEPARATOR = "storage_separator"
STORAGE_KEY_SEPARATOR = "storage_key_separator"
STORAGE_KEY_PREFIX = "storage_key_prefix"
SCENARIO_TYPE = "scenario_type"
PRIMARY_KEYS = "primary_keys"
STORAGE_TYPE = "storage_type"
DATA_TYPE = "data_type"
TYPE = "type"
PERIOD = "period"
PARQUET = "parquet"
BKDATA = "bkdata"
DEFAULT = "default"
JSON = "json"
# 连接信息
CONNECTION_INFO = "connection_info"
CONNECTION = "connection"
CONNECTION_URL = "connectionUrl"
CONNECTION_USER = "connectionUser"
CONNECTION_PASSWORD = "connectionPassword"
# 物理表
PHYSICAL_TABLE_NAME = "physical_table_name"
# 过期配置
EXPIRES = "expires"
EXPIRE = "expire"
EXPIRE_DAYS = "expire_days"
MIN_EXPIRE = "min_expire"
MAX_EXPIRE = "max_expire"
LIST_EXPIRE = "list_expire"
# 任务配置
DELTA_DAY = "delta_day"
MERGE_DAYS = "merge_days"
TIMEOUT = "timeout"
TASK_TYPE = "task_type"
TASK_ID = "task_id"
STORAGE_CLUSTER = "storage_cluster"
CHANNEL_CLUSTER = "channel_cluster"
# tag 相关配置
RESULT_TABLE = "result_table"
CLUSTER_ROLE = "cluster_role"
GEOG_AREA = "geog_area"
GEOG_AREAS = "geog_areas"
GEOG_AREA_ALIAS = "geog_area_alias"
GEOG_AREA_CODE = "geog_area_code"
MAINLAND = "mainland"
INLAND = "inland"
OVERSEAS = "overseas"
TAGS = "tags"
# 存储常用配置常量
CAPACITY = "capacity"
DATABASE = "database"
TIME = "time"
DATA = "data"
NAME = "name"
PRIORITY = "priority"
VERSION = "version"
BELONGS_TO = "belongs_to"
DESCRIPTION = "description"
ZOOKEEPER_CONNECT = "zookeeper.connect"
STATUS = "status"
LOCATION = "location"
INFO = "info"
INDEX = "index"
INDEX_FIELDS = "index_fields"
INDICES = "indices"
MAPPINGS = "mappings"
SETTINGS = "settings"
SAMPLE = "sample"
FIELDS = "fields"
FIELD_NAME = "field_name"
FIELD_TYPE = "field_type"
REPORT_MODE = "report_mode"
ACTIONS = "actions"
ACTION = "action"
ENABLE = "enable"
DISABLE = "disable"
OK = "OK"
ALIAS = "alias"
ADD = "add"
REMOVE = "remove"
ANALYZED_FIELDS = "analyzed_fields"
DATE_FIELDS = "date_fields"
DOC_VALUES_FIELDS = "doc_values_fields"
JSON_FIELDS = "json_fields"
DOC_VALUES = "doc_values"
PROPERTIES = "properties"
ENABLE_REPLICA = "enable_replica"
HAS_REPLICA = "has_replica"
INCLUDE_IN_ALL = "include_in_all"
ROUTING = "routing"
ALLOCATION = "allocation"
INCLUDE = "include"
TAG = "tag"
NUMBER_OF_REPLICAS = "number_of_replicas"
FALSE = "false"
TRUE = "true"
TABLE = "table"
COLUMN = "column"
COLUMNS = "columns"
VALUE = "value"
PARTITIONS = "partitions"
TODAY_DIRS = "today_dirs"
LATEST_FILES = "latest_files"
MODIFY_TIME = "modify_time"
HDFS_URL = "hdfs_url"
DELETE_PATHS = "delete_paths"
PATH = "path"
LENGTH = "length"
ADD_SQL = "add_sql"
EXIST_SQL = "exist_sql"
DROP_SQL = "drop_sql"
WITH_DATA = "with_data"
WITH_HIVE_META = "with_hive_meta"
RT_FIELDS = "rt_fields"
MYSQL_FIELDS = "mysql_fields"
CHECK_RESULT = "check_result"
CHECK_DIFF = "check_diff"
APPEND_FIELDS = "append_fields"
DELETE_FIELDS = "delete_fields"
BAD_FIELDS = "bad_fields"
INDEXED_FIELDS = "indexed_fields"
SCHEMA = "schema"
DB_NAME = "db_name"
TABLE_NAME = "table_name"
EXTRA = "extra"
PLATFORM = "platform"
IS_MANAGED = "is_managed"
MANAGE = "manage"
STORAGE_CHANNEL_ID = "storage_channel_id"
STORAGE_CHANNEL = "storage_channel"
GENERATE_TYPE = "generate_type"
ACTIVE = "active"
PREVIOUS_CLUSTER_NAME = "previous_cluster_name"
CREATED_BY = "created_by"
CREATED_AT = "created_at"
UPDATED_BY = "updated_by"
UPDATED_AT = "updated_at"
SERIES = "series"
LIMIT = "limit"
START_DATE = "start_date"
END_DATE = "end_date"
START_TIME = "start_time"
END_TIME = "end_time"
START = "start"
END = "end"
PROJECT_ID = "project_id"
COMPONENT = "component"
BID = "bid"
CODE = "code"
MESSAGE = "message"
APP = "app"
DNS_PORT = "dns_port"
GCS_USER = "gcs_user"
GCS = "gcs"
DNS = "dns"
JOBID = "jobid"
EXPIRATION_TIME = "expiration_time"
SQL = "sql"
SYS_TYPE = "sys_type"
OPERATE_TYPE = "operate_type"
RELATED = "related"
PAGE = "page"
PAGE_SIZE = "page_size"
RELATED_FILTER = "related_filter"
ATTR_NAME = "attr_name"
ATTR_VALUE = "attr_value"
TDW_RELATED = "tdw_related"
COLS_INFO = "cols_info"
PARTS_INFO = "parts_info"
TDW_USERNAME = "tdw_username"
TDW_PASSWORD = "tdw_password"
RECEIVER = "receiver"
NOTIFY_WAY = "notify_way"
FIELD_ALIAS = "field_alias"
IS_DIMENSION = "is_dimension"
FIELD_INDEX = "field_index"
PHYSICAL_FIELD = "physical_field"
PHYSICAL_FIELD_TYPE = "physical_field_type"
IS_TIME = "is_time"
ORDER = "order"
COMPONENT_TYPE = "component_type"
SRC_CLUSTER_ID = "src_cluster_id"
DEST_CLUSTER_ID = "dest_cluster_id"
DEST = "dest"
SERVICE_TYPE = "service_type"
RESOURCE_TYPE = "resource_type"
UPDATE_TYPE = "update_type"
TB_NAME = "tb_name"
REPORT_DATE = "report_date"
DATA_SIZE = "data_size"
SIZE = "size"
REPORT_TIME = "report_time"
TABLE_SIZE_MB = "table_size_mb"
TABLE_RECORD_NUMS = "table_record_nums"
TABLES = "tables"
SIZE_MB = "size_mb"
ROW_NUMS = "row_nums"
RELATED_TAGS = "related_tags"
SEGMENTS = "segments"
DATASOURCE = "datasource"
INTERVAL = "interval"
DEFAULT_INTERVAL = 60000
MINTIME = "minTime"
COUNT = "count"
TASK = "task"
ES_CONF = "es_conf"
RT_CONF = "rt_conf"
ES_FIELDS = "es_fields"
AVRO = "avro"
OPERATOR_NAME = "operator_name"
STREAM_TO = "stream_to"
STREAM_TO_ID = "stream_to_id"
METADATA = "metadata"
OPERATION = "operation"
STORAGE_ADDRESS = "storage_address"
IP = "ip"
PORT = "port"
INNER = "inner"
OUTER = "outer"
RESULT = "result"
TCP_TGW = "tcp_tgw"
HTTP_TGW = "http_tgw"
INNER_CLUSTER = "inner_cluster"
ORDER_BY = "order_by"
PARTITION_BY = "partition_by"
SAMPLE_BY = "sample_by"
PLUGIN_NAME = "plugin_name"
PLUGIN_VERSION = "plugin_version"
PLUGIN_TEMPLTATES_NAME = "plugin_templates_name"
PLUGIN_TEMPLTATES_VERSION = "plugin_templates_version"
RT_ID = "rt.id"
RTID = "rtId"
ZOOKEEPER_ADDR = "zookeeper.addr"
BOOTSTRAP_SERVERS = "bootstrap.servers"
PULSAR_CHANNEL_TOKEN = "pulsar_channel_token"
BROKER_SERVICE_URL = "brokerServiceUrl"
EMPTY_STRING = ""
CHANNEL_ID = "channel_id"
CHANNEL = "channel"
STORAGE_PARTITIONS = "storage_partitions"
ROLE_USERS = "role_users"
NAMESPACE = "namespace"
MAINTAINER = "maintainer"
FLOW_ID = "flow_id"
TENANT = "tenant"
USER_IDS = "user_ids"
DATA_ENCODING = "data_encoding"
MSG_SYSTEM = "msg_system"
PUBLIC = "public"
BIZ_ID = "biz_id"
DATA_SET = "data_set"
ROLE_ID = "role_id"
PARTITION = "partition"
SERVER_ID = "server_id"
CLUSTER_INDEX = "cluster_index"
RAW_DATA_ID = "raw_data_id"
PARTITION_SPEC = "partition_spec"
RECORD_NUM = "recordNum"
RECORD = "record"
CONDITION_EXPRESSION = "conditionExpression"
ASSIGN_EXPRESSION = "assignExpression"
TIME_FIELD = "timeField"
CAMEL_ASYNC_COMPACT = "asyncCompact"
ASYNC_COMPACT = "async_compact"
UPDATE_PROPERTIES = "updateProperties"
ADD_FIELDS = "addFields"
REMOVE_FIELDS = "removeFields"
METHOD = "method"
FIELD = "field"
HOUR = "HOUR"
ARGS = "args"
ICEBERG_FIELDS = "iceberg_fields"
COMPACT_START = "compactStart"
COMPACT_END = "compactEnd"
DELETE_PARTITION = "deletePartition"
RENAME_FIELDS = "renameFields"
EXPIREDAYS = "expireDays"
NEWTABLENAME = "newTableName"
TABLENAME = "tableName"
DATABASENAME = "databaseName"
CONF_KEY = "conf_key"
CONF_VALUE = "conf_value"
USED = "Used"
TOTAL = "Total"
PERCENT_USED = "PercentUsed"
BEANS = "beans"
# 状态
UNKNOWN = "UNKNOWN"
SUCCESS = "SUCCESS"
FAILED = "FAILED"
PENDING = "PENDING"
WAITING = "WAITING"
RUNNING = "RUNNING"
CONNECTOR_RUNNING = "running"
# 时间相关
DTEVENTTIME = "dtEventTime"
DTEVENTTIMESTAMP = "dtEventTimeStamp"
THEDATE = "thedate"
LOCALTIME = "localTime"
TIMESTAMP = "timestamp"
OFFSET = "offset"
ET = "____et" # 默认iceberg分区字段,内部字段,对用户不可见
PARTITION_TIME = "__time" # clickhouse的分区字段
# processing type
BATCH = "batch"
STREAM = "stream"
CLEAN = "clean"
TRANSFORM = "transform"
MODEL = "model"
MODE = "mode"
STREAM_MODEL = "stream_model"
BATCH_MODEL = "batch_model"
STORAGE = "storage"
VIEW = "view"
QUERYSET = "queryset"
SNAPSHOT = "snapshot"
PROCESSING_TYPE = "processing_type"
# 数据类型
LONG = "long"
INT = "int"
STRING = "string"
BIGINT = "bigint"
TEXT = "text"
BOOLEAN = "boolean"
TINYINT = "tinyint"
FLOAT = "float"
DOUBLE = "double"
LONGTEXT = "longtext"
SHORT = "short"
DATETIME = "datetime"
BIGDECIMAL = "bigdecimal"
DECIMAL = "decimal"
INTEGER = "integer"
VARCHAR = "varchar"
INT_32 = "int32"
BOOL = "bool"
INT_64 = "int64"
REAL = "real"
DATE = "date"
OBJECT = "object"
KEYWORD = "keyword"
LIST = "list"
RANGE = "range"
LABEL = "label"
PLAT_NAME = "plat_name"
ROUTE = "route"
RAW_DATA_NAME = "raw_data_name"
TOPIC_NAME = "topic_name"
DATASET = "dataset"
BIZID = "bizid"
FILTER_NAME_AND = "filter_name_and"
FILTER_NAME_OR = "filter_name_or"
STREAM_FILTERS = "stream_filters"
DATA_SCENARIO = "data_scenario"
RAW_DATA_ALIAS = "raw_data_alias"
SENSITIVITY = "sensitivity"
CONDITION = "condition"
SPECIFICATION = "specification"
LOG = "log"
TLOG = "tlog"
# 账号,地址,域名等
LOCALHOST = "localhost"
ROOT = "root"
MAPLELEAF = "mapleleaf"
HOSTS = "hosts"
MODULES = "modules"
HOST = "host"
USER = "user"
PASSWORD = "password"
DB = "db"
USER_BACKEND = "user_backend"
PASSWORD_BACKEND = "password_backend"
SASL_PASS = "sasl.pass"
JDBC = "jdbc"
HTTP = "http"
TDWHDFS = "tdwhdfs"
TUBE = "tube"
# REQUESTS
JSON_HEADERS = {"Content-Type": "application/json"}
# task config
TASKS_MAX = "tasks.max"
GROUP_ID = "group.id"
DATA_ID = "data_id"
GROUP = "group"
AUTO_OFFSET_RESET = "auto_offset_reset"
TOPICS_DIR = "topics.dir"
TOPICS = "topics"
TOPIC = "topic"
CONFIG = "config"
# queue 授权状态
GRANTED = "granted"
REVOKING = "revoking"
REVOKED = "revoked"
EXPIRED = "expired"
PRODUCER = "producer"
USERNAME = "username"
GROUPID = "groupId"
GROUPID_LOWERCASE = "groupid"
CLUSTERINFO = "clusterInfo"
NOAUTH = "noAuth"
CLUSTER = "cluster"
QUEUE_USER = "queue_user"
QUEUE_PASSWORD = "queue_password"
DATA_TOKEN = "data_token"
TOKEN = "token"
QUEUE_DB = "queue_db"
DATABUS = "databus"
ACCESS = "access"
IDS = "ids"
RPC_PORT = "rpc_port"
SERVICERPC_PORT = "servicerpc_port"
HDFS_CLUSTER_NAME = "hdfs_cluster_name"
HDFS_DEFAULT_PARAMS = "hdfs_default_params"
FS_DEFAULTFS = "fs.defaultFS"
DFS_REPLICATION = "dfs.replication"
DFS_NAMESERVICES = "dfs.nameservices"
DFS_HA_NAMENODES = "dfs.ha.namenodes"
DFS_CLIENT_FAILOVER_PROXY_PROVIDER = "dfs.client.failover.proxy.provider"
DFS_NAMENODE_RPC_ADDRESS = "dfs.namenode.rpc-address"
DFS_NAMENODE_SERVICERPC_ADDRESS = "dfs.namenode.servicerpc-address"
DFS_NAMENODE_HTTP_ADDRESS = "dfs.namenode.http-address"
TOPIC_DIR = "topic_dir"
HDFS_CONF_DIR = "hdfs_conf_dir"
HDFS_CONF = "hdfs_conf"
LOG_DIR = "log_dir"
FLUSH_SIZE = "flush.size"
PARQUET_FLUSH_SIZE = "flush_size"
DEFAULT_FLUSH_SIZE = 1000000
RAW = "raw"
CHANNEL_NAME = "channel_name"
CHANNEL_TYPE = "channel_type"
CHANNEL_CLUSTER_NAME = "channel_cluster_name"
OP = "op"
ADMIN = "admin"
APP_ID = "app_id"
ZONE_ID = "zone_id"
SET_ID = "set_id"
INCREMENTING = "incrementing"
TABLE_WHITE_LIST = "tableWhitelist"
INCREMENTING_COLUMN_NAME = "incrementingColumnName"
POLL_INTERVAL_MS = "pollIntervalMs"
TABLE_POLL_INTERVAL_MS = "tablePollIntervalMs"
KAFKA_METRIC_CONFIG = "kafkaMetricConfig"
ITERARION_IDX = "_iterarion_idx"
TEST_NAMESPACE = "test_namespace"
MONITOR_PRODUCER_BOOTSTRAP_SERVERS = "monitor.producer.bootstrap.servers"
MONITOR_PRODUCER_TOPIC = "monitor.producer.topic"
PRODUCER_TOPIC = "producer.topic"
MONITOR_MODULE_NAME = "monitor.module.name"
MONITOR_COMPONENT_NAME = "monitor.component.name"
MIGRATION = "migration"
SOURCE = "source"
PULLER_CLUSTER_NAME = "puller_cluster_name"
CONNECTORS = "connectors"
WORKERS_NUM = "workers_num"
TIMEZONE_ID = "timezone_id"
DATA_DIR = "data_dir"
FIELD_NAMES = "field_names"
# 灰度数据湖
HDFS_MAINTAIN_SKIP_RTS = "hdfs.maintain.skip.rts"
ICEBERG_TRANSFORM_RTS = "iceberg.transform.rts"
REGISTER_TYPE = "register_type"
AUTH_TYPE = "authtype"
APP_USER = "app_user"
FILENAME = "filename"
CONTENT = "content"
TRANS_ID = "trans_id"
GET_TABLE_ATTACHS = "get_table_attachs"
GET_TABLE_STRUCT = "get_table_struct"
STRUCT = "struct"
TABLE_STRUCT_CONTENT = "table_struct_content"
TABLE_LIST = "table_list"
ATTACHMENT_ID = "attachment_id"
AUTO_APPROVE = "auto_approve"
AUTO_EXEC_TRANS = "auto_exec_trans"
ATTACH_ID = "attach_id"
MEMO = "memo"
COLS = "cols"
MSG = "msg"
TCAPLUS_FIELDS = "tcaplus_fields"
XML = "xml"
YES = "yes"
CHECK = "check"
RET = "ret"
KEY_FIELDS = "key_fields"
VALUE_FIELDS = "value_fields"
KEY_FIELD = "KeyField"
VALUE_FIELD = "ValueField"
KEY_NAME = "Name"
KEY_TYPE = "Type"
FILES = "files"
HDFS_DATA_TYPE = "hdfs.data_type"
ICEBERG_BIZ_IDS = "iceberg.biz.ids"
HIVE_METASTORE_PORT = "HIVE_METASTORE_PORT"
HIVE_METASTORE_HOSTS = "HIVE_METASTORE_HOSTS"
HIVE_METASTORE_URIS = "hive.metastore.uris"
RESULT_TABLE_INFO = "result_table_info"
RESULT_TABLE_IDS = "result_table_ids"
QUERY_RT_BATCH_SIZE = 50
TIME_ZONE = "time_zone"
PREFER_STORAGE = "prefer_storage"
QUALIFIED_NAME = "qualified_name"
DEPTH = "depth"
EXTRA_RETRIEVE = "extra_retrieve"
DIRECTION = "direction"
# clickhouse相关
REPLICATED_TABLE = "replicated_table"
DISTRIBUTED_TABLE = "distributed_table"
DATETIME_TYPE = "DateTime"
CONSISTENCY = "consistency"
SCHEMAS = "schemas"
CLICKHOUSE_FIELDS = "clickhouse_fields"
DISTINCT_PARTITIONS = "distinct_partitions"
TOTAL_PARTITIONS = "total_partitions"
TOP_PARTITIONS = "top_partitions"
EXCEPTION = "exception"
MAX = "max"
MIN = "min"
SUM = "sum"
AVERAGE = "average"
USED_SIZE = "used_size"
MAX_SIZE = "max_size"
STORAGE_USAGE = "storage_usage"
QUERY_ID = "query_id"
QUERY = "query"
ELAPSED = "elapsed"
PROCESSLIST = "processlist"
TOTAL_SPACE = "total_space"
USED_SPACE = "used_space"
USED_MAX = "usage_max"
USED_MIN = "usage_min"
USED_SUM = "used_sum"
TOTAL_SUM = "total_sum"
TOTAL_USAGE = "total_usage"
LOCAL = "local"
CK_DEFAULT_CONNECT_TIMEOUT_SEC = 60
CK_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC = 30
# DT 相关
CHANNEL_CLUSTER_INDEX = "channel_cluster_index"
TRANSFERRING_ID = "transferring_id"
TRANSFERRING_ALIAS = "transferring_alias"
TRANSFERRING_TYPE = "transferring_type"
INPUTS = "inputs"
OUTPUTS = "outputs"
DATA_SET_TYPE = "data_set_type"
DATA_SET_ID = "data_set_id"
STORAGE_CLUSTER_CONFIG_ID = "storage_cluster_config_id"
CHANNEL_CLUSTER_CONFIG_ID = "channel_cluster_config_id"
SHIPPER = "shipper"
PULLER = "puller"
# transport相关
KEY_FIELDS_JAVA = "keyFields"
KEY_SEPARATOR_JAVA = "keySeparator"
IGNITE_CACHE_JAVA = "igniteCache"
IGNITE_MAX_RECORDS_JAVA = "igniteMaxRecords"
IGNITE_CLUSTER_JAVA = "igniteCluster"
IGNITE_HOST_JAVA = "igniteHost"
IGNITE_PASS_JAVA = "ignitePass"
IGNITE_PORT_JAVA = "ignitePort"
IGNITE_USER_JAVA = "igniteUser"
USE_THIN_CLIENT_JAVA = "useThinClient"
HDFS_CUSTOM_PROPERTY_JAVA = "hdfsCustomProperty"
THIN_CLIENT_THRESHOLD = "thin.client.threshold"
FLUSH_SIZE_JAVA = "flushSize"
SINK_CONFIG_JAVA = "sinkConfig"
SOURCE_CONFIG_JAVA = "sourceConfig"
MSG_TYPE_JAVA = "msgType"
SOURCE_RT_ID = "source_rt_id"
SOURCE_TYPE = "source_type"
SINK_RT_ID = "sink_rt_id"
SINK_TYPE = "sink_type"
CONCURRENCY = "concurrency"
CONNECTOR = "connector"
HDFS_PROPERTY_JAVA = "hdfsProperty"
DATA_DIR_JAVA = "dataDir"
ICEBERG_DATABASE_JAVA = "icebergDatabase"
ICEBERG_TABLE_JAVA = "icebergTable"
GEOG_AREA_JAVA = "geogArea"
DATA_TYPE_JAVA = "dataType"
RT_ID_JAVA = "rtId"
CLICKHOUSE_RT_ID_JAVA = "clickhouseRtId"
CLICKHOUSE_PROPERTIES_JAVA = "clickhouseProperties"
DB_NAME_JAVA = "dbName"
REPLICATED_TABLE_JAVA = "replicatedTable"
CLICKHOUSE_COLUMN_ORDER_JAVA = "clickhouseColumnOrder"
TOPIC_NAME_JAVA = "topicName"
PARALLELISM = "parallelism"
ARCHIVE = "archive"
SCHEMA_TYPE_JAVA = "schemaType"
CONFIGS = "configs"
SOURCE_RT_ID_JAVA = "sourceRtId"
SINK_RT_ID_JAVA = "sinkRtId"
LAST_READ_LINE_NUMBER = "last_read_line_number"
LAST_READ_LINE_NUMBER_JAVA = "lastReadLineNumber"
LAST_READ_FILE_NAME = "last_read_file_name"
LAST_READ_FILE_NAME_JAVA = "lastReadFileName"
PROCESSED_ROWS = "processed_rows"
PROCESSED_ROWS_JAVA = "processedRows"
FINISHED = "finished"
CREATE_TIME = "create_rime"
CREATE_TIME_JAVA = "createTime"
UPDATE_TIME = "update_time"
UPDATE_TIME_JAVA = "updateTime"
FINISH_TIME = "finish_time"
FINISH_TIME_JAVA = "finishTime"
TIME_TAKEN = "time_taken"
TIME_TAKEN_JAVA = "timeTaken"
STAGE_STATUS = "stage_status"
STAGE_TYPE = "stage_type"
EXECUTE = "execute"
STAGE_SEQ = "stage_seq"
STAGES = "stages"
ACCESS_CONF_INFO = "access_conf_info"
RESOURCE = "resource"
SCOPE_CONFIG = "scope_config"
PATHS = "paths"
SYSTEM = "system"
LINUX = "linux"
FOR_DATALAB = "for_datalab"
START_SHIPPER_TASK = "start_shipper_task"
TID = "tid"
INAME = "iname"
INTERFACE_NAME = "interface_name"
ASCII = "ascii"
IS_MIX_SCHEMA = "is_mix_schema"
TASKS = "tasks"
MAX_RECORDS = "max_records"
KEY_SEPARATOR = "key_separator"
SORTED_KEYS = "sorted_keys"
QUEUE_PULSAE = "queue_pulsar"
DIR_DOMAIN = "dir_domain"
DIR_LIST = "dir_list"
SERVERPORT = "ServerPort"
SERVERIP = "ServerIP"
SNAPSHOTS = "snapshots"
TIMESTAMP_MS = "timestamp_ms"
SASL_USERNAME = "sasl_username"
SECURITY_PROTOCOL = "security_protocol"
SASL_PASSWD = "sasl_passwd"
SASL_MECHANISMS = "sasl_mechanisms"
SASL__USER = "sasl.user"
SASL__PASS = "sasl.pass"
USE__SASL = "use.sasl"
RAW_DATA = "raw_data"
SCENARIO_NAME = "scenario_name"
PROCESSING_ID = "processing_id"
DATA_PROCESSING = "data_processing"
DIMENSION_TYPE = "dimension_type"
DIMENSION_NAME = "dimension_name"
TABLE_INFO = "table_info"
MINMAX = "minmax"
GRANULARITY = "granularity"
EXPRESSION = "expression"
INDEX_TYPE = "index_type"
FREE_DISK = "free_disk"
ZK_ADDRESS = "zk_address"
WEIGHTS = "weights"
FACTOR = "factor"
HTTP_PORT = "http_port"
PREASSIGNED_DATA_ID = "preassigned_data_id"
IGNORE_FILE_END_WITH = "ignore_file_end_with"
ENCODING = "encoding"
SOURCE_HDFS_PROPERTY_JAVA = "sourceHdfsProperty"
SOURCE_DATA_DIR_JAVA = "sourceDataDir"
SOURCE_ICEBERG_DATABASE_JAVA = "sourceIcebergDatabase"
SOURCE_ICEBERG_TABLE_JAVA = "sourceIcebergTable"
SOURCE_GEOG_AREA_JAVA = "sourceGeogArea"
SOURCE_DATA_TYPE_JAVA = "sourceDataType"
SINK_HDFS_PROPERTY_JAVA = "sinkHdfsProperty"
SINK_DATA_DIR_JAVA = "sinkDataDir"
SINK_ICEBERG_DATABASE_JAVA = "sinkIcebergDatabase"
SINK_ICEBERG_TABLE_JAVA = "sinkIcebergTable"
SINK_GEOG_AREA_JAVA = "sinkGeogArea"
SINK_DATA_TYPE_JAVA = "sinkDataType"
SOURCE_TYPE_JAVA = "sourceType"
SINK_TYPE_JAVA = "sinkType"
RECOVER_CLASS_NAME_JAVA = "recoverClassName"
PERCENTAGE = "percentage"
ODM = "odm"
TGLOG = "tglog"
OFFLINEFILE = "offlinefile"
HDFS_PATH = "hdfs_path"
HDFS_PORT = "hdfs_port"
HDFS_HOST = "hdfs_host"
DATALAB = "datalab"
RAW_DATA_IDS = "raw_data_ids"
FILE = "file"
UPLOAD_UUID = "upload_uuid"
TASK_NAME = "task_name"
CHUNK_ID = "chunk_id"
CHUNK_SIZE = "chunk_size"
MD5 = "md5"
LATEST = "latest"
EARLIEST = "earliest"
CONF = "conf"
OUTPUT_FORMAT = "output_format"
DEPLOY_PLAN_ID = "deploy_plan_id"
OP_TYPE = "op_type"
PROCESS_INFOS = "process_infos"
SETUP_PATH = "setup_path"
PROC_NAME = "proc_name"
PID_PATH = "pid_path"
ACCOUNT = "account"
IP_LIST = "ip_list"
DATA_SOURCE = "data_source"
BIZ = "biz"
BACKUPS = "backups"
CYCLE_TIME = "cycle_time"
CANCLE = "cancle"
MINUTE = "minute"
TDM_CLUSTER_ID = "tdm_cluster_id"
TDM_CLUSTER_ALIAS = "tdm_cluster_alias"
TDM = "tdm"
STORE_SIZE = "store.size"
DOCS_COUNT = "docs.count"
TOP = "top"
FILE_NAME = "file_name"
# 场景名称
STORAGE_SCENARIO_NAME = "storage_scenario_name"
STORAGE_SCENARIO_ALIAS = "storage_scenario_alias"
FORMAL_NAME_FILED = "formal_name"
STORAGE_SCENARIO_PRIORITY = "storage_scenario_priority"
CLUSTER_TYPE_PRIORITY = "cluster_type_priority"
STORAGE_CLUSTER_CONFIG = "storage_cluster_config"
BEGIN_TIME = "begin_time"
LOGS_ZH = "logs_zh"
LOGS_EN = "logs_en"
TDBANK_FIELDS = "tdbank_fields"
ALTER_ID = "alter_id"
METRIC_ID = "metric_id"
REPORT = "report"
RESOURCECENTER = "resourceCenter"
CREATE_CLUSTER = "create_cluster"
UPDATE_CLUSTER = "update_cluster"
GET_CLUSTER = "get_cluster"
SUB = "sub"
U_RT_ID = "rt_id"
RT_ID_STORAGE = "rt_id_storage"
GTE = "gte"
LTE = "lte"
PARTITION_DATE = "partition_date"
PARTITION_NAME = "partition_name"
LZ_SERVERS_INFO = "lz_servers_info"
SERVER_TYPE = "server_type"
APP_GROUP_NAME = "app_group_name"
BG_ID = "bg_id"
SERVERS_INFO = "servers_info"
DIM_FIELDS = "dim_fields"
EXCEPT_FIELDS = "except_fields"
TDW_APP_GROUP = "tdw_app_group"
SOURCE_SERVER = "source_server"
TARGET_FILTER = "target_filter"
TARGET_TYPE = "target_type"
TAG_FILTER = "tag_filter"
MATCH_POLICY = "match_policy"
ERRORS = "errors"
|
py | b4156e33a855c054562643dd37a7c89323e34573 | """Contains treeNode and Binary search tree"""
class TreeNode():
"""class tree node creates a node with parent and left and right child"""
def __init__(self, value = None, parent = None):
"""initializer"""
self.left = None
self.right = None
self.value = value
self.parent = parent
def left_child(self):
"""returns left child"""
return self.left
def right_child(self):
"""returns right child"""
return self.right
def set_left(self, node):
"""sets left child and parent"""
self.left = node
if node is not None:
node.parent = self
return node
def set_right(self, node):
"""sets right child and parent"""
self.right = node
if node is not None:
node.parent = self
return node
def get_parent(self):
"""returns parent"""
return self.parent
def set_value(self, value):
"""sets node value"""
self.value = value
def get_value(self):
"""returns node value"""
return self.value
def level(self):
"""returns what level the node is on"""
result = 0
temp = self
while temp.parent is not None:
temp = temp.parent
result += 1
return result
def add(self, item):
"""figures out wether item should be left or right of root and adds it"""
if item < self.value:
if self.left_child() is None:
self.set_left(TreeNode(item))
else:
self.left_child().add(item)
else:
if self.right_child() is None:
self.set_right(TreeNode(item))
else:
self.right_child().add(item)
def find(self, item):
"""finds target node"""
if self.value == item:
return self.value
elif item < self.value:
if self.left is not None:
return self.left.find(item)
else:
raise ValueError()
else:
if self.right is not None:
return self.right.find(item)
else:
raise ValueError()
def InOrder(self, results):
"""returns nodes inorder"""
if self.left is not None:
self.left.InOrder(results)
results.append(self.value)
if self.right is not None:
self.right.InOrder(results)
def PreOrder(self, results):
"""returns node in preorder"""
results.append(self.value)
if self.left is not None:
self.left.PreOrder(results)
if self.right is not None:
self.right.PreOrder(results)
def PostOrder(self, results):
"""returns nodes in postorder"""
if self.left is not None:
self.left.PostOrder(results)
if self.right is not None:
self.right.PostOrder(results)
results.append(self.value)
def __str__(self):
"""returns value as string"""
return str(self.value)
class BST():
"""class bst creates a binary search tree"""
def __init__(self):
"""initializer"""
self.root = None
def is_empty(self):
"""returns bool val if BST is empty or not"""
return self.root == None
def size(self):
"""returns the size of BST"""
return self.__size(self.root)
def __size(self, node):
"""recursively finds size of BST"""
if node is None:
return 0
result = 1
result += self.__size(node.left)
result += self.__size(node.right)
return result
def height(self):
"""returns height of BST"""
return self.__height(self.root, 1)
def __height(self, node, current):
"""recursively fins height of BST"""
left_val = current
right_val = current
if node.left_child() is not None:
left_val = self.__height(node.left, current + 1)
if node.right_child() is not None:
right_val = self.__height(node.right, current + 1)
return max(left_val, right_val)
def add(self, item):
"""adds item to BST"""
if self.root is None:
self.root = TreeNode(item)
else:
self.root.add(item)
return self.root
def remove(self, item):
"""returns removed item from BST"""
return self.__remove(self.root, item)
def __remove(self, node, item):
"""recursively removes item from bst and remakes tree"""
#! Go through remove function together
if node is None:
return node
if item < node.get_value():
node.set_left(self.__remove(node.left_child(), item))
elif item > node.get_value():
node.set_right(self.__remove(node.right_child(), item))
else:
if node.left_child() is None:
temp = node.right_child()
if temp is not None:
temp.parent = node.parent
node = None
return temp
elif node.right_child() is None:
temp = node.left_child()
if temp is not None:
temp.parent = node.parent
node = None
return temp
else:
minNode = self.__minValueNode(node.right_child())
node.set_value(minNode.get_value())
node.set_right(self.__remove(node.right_child(), minNode.get_value()))
return node
def __minValueNode(self, node):
"""helper function for remove"""
current = node
while current.left_child() is not None:
current = current.left_child()
return current
def find(self, item):
"""finds and returns item"""
if self.root is not None:
return self.root.find(item)
raise ValueError()
def inorder(self):
"""returns a list of treenodes in order"""
result = []
if self.root is not None:
self.root.InOrder(result)
return result
def preorder(self):
"""returns a list of treenodes in preorder"""
result = []
if self.root is not None:
self.root.PreOrder(result)
return result
def postorder(self):
"""returns a list of treenodes in postorder"""
result = []
if self.root is not None:
self.root.PostOrder(result)
return result
def rebalance(self):
"""rebalances tree"""
ordered_lyst = self.inorder()
self.root = None
self.__rebalance(ordered_lyst, 0, len(ordered_lyst)-1)
def __rebalance(self, orderedList, low, high):
"""recursively rebalances treenode"""
if low <= high:
mid = (high - low) // 2 + low
self.add(orderedList[mid])
self.__rebalance(orderedList, low, mid - 1)
self.__rebalance(orderedList, mid + 1, high)
def __str__(self):
self.__recurStr(self.root)
def __recurStr(self, node):
result = ""
for i in range(node.level()):
result += " "
result += str(node.level()) + str(node.get_value())
print(result)
if node.left_child() is not None:
self.__recurStr(node.left_child())
if node.right_child() is not None:
self.__recurStr(node.right_child())
|
py | b4156e590e1132212aa858c53df341f67c428ede | '''
Created on Jul 27, 2015
@author: hsorby
'''
import os.path
from PySide import QtGui, QtCore
from mapclientplugins.monodomain2dstep.ui_mono2dwidget import Ui_Mono2DWidget
from mapclientplugins.monodomain2dstep.mono2dmodel import Mono2DModel
class Mono2DWidget(QtGui.QWidget):
'''
classdocs
'''
def __init__(self, parent=None):
'''
Constructor
'''
super(Mono2DWidget, self).__init__(parent)
self._ui = Ui_Mono2DWidget()
self._ui.setupUi(self)
self._ui.doubleSpinBoxStepSize.setVisible(False)
self._ui.labelStepSize.setVisible(False)
# The number of decimals defines the minimum positive
# number we can set.
self._ui.doubleSpinBoxStepSize.setDecimals(8)
self._ui.doubleSpinBoxStepSize.setMinimum(0.00000001)
self._model_converged = Mono2DModel('converged')
self._model_experimental = Mono2DModel('experimental')
self._ui.widgetSceneviewerConverged.setContext(self._model_converged.getContext())
self._ui.widgetSceneviewerExperimental.setContext(self._model_experimental.getContext())
self._callback = None
self._timer = QtCore.QTimer()
self._makeConnections()
def clear(self):
self._model_converged.clear()
self._model_experimental.clear()
def initialise(self, data_location):
self._model_converged.initialise()
self._model_experimental.initialise()
self._ui.pushButtonPlayStop.setText('Play')
self._setSliderValues()
self._ui.labelTime.setText('{:10.4f}'.format(self._model_experimental.getMinTime()))
self._ui.doubleSpinBoxStepSize.setValue(self._model_experimental.getStepSize())
dis = self._model_experimental.getDis()
self._ui.spinBoxXDiscretisation.setValue(dis[0])
self._ui.spinBoxYDiscretisation.setValue(dis[1])
self._timer.setInterval(0) #*self._model_experimental.getTimeStep())
self._model_converged.setLocation(os.path.join(data_location, 'Monodomain2D/converged'))
self._model_experimental.setLocation(os.path.join(data_location, 'Monodomain2D/experimental'))
self._model_converged.setSimulationRoot(data_location)
self._model_experimental.setSimulationRoot(data_location)
# self._model_converged.setIronPath(os.path.join(data_location, 'bin'))
# self._model_experimental.setIronPath(os.path.join(data_location, 'bin'))
# self._model_converged.simulate(0.1, [7, 7])
self._model_converged.loadSimulation()
self._model_converged.createVisualisation()
self._initialiseConvergedSceneviewer()
# self._model_experimental.simulate(0.1, [dis[0], dis[1]])
# self._model_experimental.createVisualisation()
def _tweakView(self):
p = self._ui.widgetSceneviewerConverged.getViewParameters()
print(p)
def _makeConnections(self):
self._ui.pushButtonContinue.clicked.connect(self._continueClicked)
self._ui.pushButtonSimulate.clicked.connect(self._simulateClicked)
self._ui.pushButtonPlayStop.clicked.connect(self._playStopClicked)
self._timer.timeout.connect(self._timerTimedOut)
self._ui.horizontalSliderTime.valueChanged.connect(self._timeChanged)
self._ui.widgetSceneviewerConverged.graphicsInitialized.connect(self._graphicsInitialized)
self._ui.widgetSceneviewerExperimental.graphicsInitialized.connect(self._graphicsInitialized)
def _graphicsInitialized(self):
sender = self.sender()
if sender is self._ui.widgetSceneviewerConverged:
self._initialiseConvergedSceneviewer()
elif sender is self._ui.widgetSceneviewerExperimental:
self._initialiseExperimentalSceneviewer()
def _initialiseExperimentalSceneviewer(self):
sceneviewer = self._ui.widgetSceneviewerExperimental.getSceneviewer()
if sceneviewer is not None:
scene = self._model_experimental.getRegion().getScene()
self._resetScene(sceneviewer, scene)
def _initialiseConvergedSceneviewer(self):
sceneviewer = self._ui.widgetSceneviewerConverged.getSceneviewer()
if sceneviewer is not None:
scene = self._model_converged.getRegion().getScene()
self._resetScene(sceneviewer, scene)
def _resetScene(self, sceneviewer, scene):
sceneviewer.setScene(scene)
sceneviewer.viewAll()
sceneviewer.setPerturbLinesFlag(True)
# We need to tweak the view slightly so that we
# can see the lines of the elements.
_, v = sceneviewer.getEyePosition()
v[1] += 0.01
sceneviewer.setEyePosition(v)
def _continueClicked(self):
self._callback()
def registerCallback(self, callback):
self._callback = callback
def _simulateClicked(self):
x_dis = self._ui.spinBoxXDiscretisation.value()
y_dis = self._ui.spinBoxYDiscretisation.value()
step_size = self._ui.doubleSpinBoxStepSize.value()
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self._model_experimental.initialise()
self._model_experimental.clearVisualisation()
self._initialiseExperimentalSceneviewer()
self._model_experimental.simulate(step_size, [x_dis, y_dis])
self._model_experimental.loadSimulation()
self._model_experimental.createVisualisation()
self._ui.widgetSceneviewerExperimental.viewAll()
QtGui.QApplication.restoreOverrideCursor()
def _setSliderValues(self):
step_size = self._model_experimental.getStepSize()
slider_range = (self._model_experimental.getMaxTime() - self._model_experimental.getMinTime())/step_size
self._ui.horizontalSliderTime.setRange(0, slider_range)
self._ui.horizontalSliderTime.setValue(0)
def _setTime(self, value):
"""
The value here is the slider value and not the actual desired time
we need to convert before using it.
"""
step_size = self._model_experimental.getStepSize()
time = self._model_experimental.getMinTime() + value * step_size
self._ui.labelTime.setText('{:10.4f}'.format(time))
self._model_converged.setTime(time)
self._model_experimental.setTime(time)
def _timeChanged(self, value):
"""
Deals with events from the user manually changing the slider
to a new value.
"""
self._setTime(value)
def _timerTimedOut(self):
"""
Deals with timeout events triggered by the timer. i.e. the
Play button is active.
"""
value = self._ui.horizontalSliderTime.value()
max_value = self._ui.horizontalSliderTime.maximum()
value += 10
if max_value < value:
value = 0
self._ui.horizontalSliderTime.setValue(value)
self._setTime(value)
def _playStopClicked(self):
button_text = self.sender().text()
if button_text == 'Play':
self._timer.start()
self._ui.pushButtonPlayStop.setText('Stop')
self._ui.horizontalSliderTime.setEnabled(False)
else:
self._timer.stop()
self._ui.pushButtonPlayStop.setText('Play')
self._ui.horizontalSliderTime.setEnabled(True)
|
py | b4156e78e3d7f969271c6890bf7238f6f7c6f3cf | # coding: utf-8
"""
NRF NFDiscovery Service
NRF NFDiscovery Service. © 2020, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved. # noqa: E501
The version of the OpenAPI document: 1.1.0.alpha-4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class Ipv4AddressRange(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'start': 'str',
'end': 'str'
}
attribute_map = {
'start': 'start',
'end': 'end'
}
def __init__(self, start=None, end=None, local_vars_configuration=None): # noqa: E501
"""Ipv4AddressRange - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._start = None
self._end = None
self.discriminator = None
if start is not None:
self.start = start
if end is not None:
self.end = end
@property
def start(self):
"""Gets the start of this Ipv4AddressRange. # noqa: E501
:return: The start of this Ipv4AddressRange. # noqa: E501
:rtype: str
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this Ipv4AddressRange.
:param start: The start of this Ipv4AddressRange. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
start is not None and not re.search(r'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$', start)): # noqa: E501
raise ValueError(r"Invalid value for `start`, must be a follow pattern or equal to `/^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$/`") # noqa: E501
self._start = start
@property
def end(self):
"""Gets the end of this Ipv4AddressRange. # noqa: E501
:return: The end of this Ipv4AddressRange. # noqa: E501
:rtype: str
"""
return self._end
@end.setter
def end(self, end):
"""Sets the end of this Ipv4AddressRange.
:param end: The end of this Ipv4AddressRange. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
end is not None and not re.search(r'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$', end)): # noqa: E501
raise ValueError(r"Invalid value for `end`, must be a follow pattern or equal to `/^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$/`") # noqa: E501
self._end = end
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Ipv4AddressRange):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Ipv4AddressRange):
return True
return self.to_dict() != other.to_dict()
|
py | b4156e88882e297a7da8b9630191cfbbd0f65de0 | from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from django.contrib.auth.decorators import login_required
import os.path
from django.http import Http404
# Create your views here.
@login_required
def proxy(request, static_path):
print('static path: ' + static_path)
template_path = 'proxy/' + static_path
print('template_path: ' + template_path)
print(os.listdir('proxy/templates/proxy'))
if os.path.isfile('proxy/templates/' + template_path):
return render(request, template_path)
else:
raise Http404("no static site matches the given query.")
|
py | b4156fec2021c9057665df4464a58a1faa836723 | import sys
class VendorImporter:
"""
A PEP 302 meta path importer for finding optionally-vendored
or otherwise naturally-installed packages from root_name.
"""
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
"""
Search first the vendor package then as a natural package.
"""
yield self.vendor_pkg + '.'
yield ''
def find_module(self, fullname, path=None):
"""
Return self when fullname starts with root_name and the
target module is one vendored through this importer.
"""
root, base, target = fullname.partition(self.root_name + '.')
if root:
return
if not any(map(target.startswith, self.vendored_names)):
return
return self
def load_module(self, fullname):
"""
Iterate over the search path to locate and load fullname.
"""
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
# mysterious hack:
# Remove the reference to the extant package/module
# on later Python versions to cause relative imports
# in the vendor package to resolve the same modules
# as those going through this importer.
if sys.version_info > (3, 3):
del sys.modules[extant]
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def install(self):
"""
Install this importer into sys.meta_path if not already present.
"""
if self not in sys.meta_path:
sys.meta_path.append(self)
names = 'packaging', 'pyparsing', 'six', 'appdirs'
VendorImporter(__name__, names).install()
|
py | b4156ffe0b6573939a71624202e7ab3d162af467 | # Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, re
import functools
from . import mlog
from . import mparser
from . import coredata
from . import mesonlib
forbidden_option_names = coredata.get_builtin_options()
forbidden_prefixes = {'c_',
'cpp_',
'd_',
'rust_',
'fortran_',
'objc_',
'objcpp_',
'vala_',
'csharp_',
'swift_',
'b_',
'backend_',
}
def is_invalid_name(name):
if name in forbidden_option_names:
return True
pref = name.split('_')[0] + '_'
if pref in forbidden_prefixes:
return True
return False
class OptionException(mesonlib.MesonException):
pass
def permitted_kwargs(permitted):
"""Function that validates kwargs for options."""
def _wraps(func):
@functools.wraps(func)
def _inner(name, description, kwargs):
bad = [a for a in kwargs.keys() if a not in permitted]
if bad:
raise OptionException('Invalid kwargs for option "{}": "{}"'.format(
name, ' '.join(bad)))
return func(name, description, kwargs)
return _inner
return _wraps
optname_regex = re.compile('[^a-zA-Z0-9_-]')
@permitted_kwargs({'value', 'yield'})
def StringParser(name, description, kwargs):
return coredata.UserStringOption(name,
description,
kwargs.get('value', ''),
kwargs.get('choices', []),
kwargs.get('yield', coredata.default_yielding))
@permitted_kwargs({'value', 'yield'})
def BooleanParser(name, description, kwargs):
return coredata.UserBooleanOption(name, description,
kwargs.get('value', True),
kwargs.get('yield', coredata.default_yielding))
@permitted_kwargs({'value', 'yield', 'choices'})
def ComboParser(name, description, kwargs):
if 'choices' not in kwargs:
raise OptionException('Combo option missing "choices" keyword.')
choices = kwargs['choices']
if not isinstance(choices, list):
raise OptionException('Combo choices must be an array.')
for i in choices:
if not isinstance(i, str):
raise OptionException('Combo choice elements must be strings.')
return coredata.UserComboOption(name,
description,
choices,
kwargs.get('value', choices[0]),
kwargs.get('yield', coredata.default_yielding),)
@permitted_kwargs({'value', 'min', 'max', 'yield'})
def IntegerParser(name, description, kwargs):
if 'value' not in kwargs:
raise OptionException('Integer option must contain value argument.')
return coredata.UserIntegerOption(name,
description,
kwargs.get('min', None),
kwargs.get('max', None),
kwargs['value'],
kwargs.get('yield', coredata.default_yielding))
@permitted_kwargs({'value', 'yield', 'choices'})
def string_array_parser(name, description, kwargs):
if 'choices' in kwargs:
choices = kwargs['choices']
if not isinstance(choices, list):
raise OptionException('Array choices must be an array.')
for i in choices:
if not isinstance(i, str):
raise OptionException('Array choice elements must be strings.')
value = kwargs.get('value', choices)
else:
choices = None
value = kwargs.get('value', [])
if not isinstance(value, list):
raise OptionException('Array choices must be passed as an array.')
return coredata.UserArrayOption(name,
description,
value,
choices=choices,
yielding=kwargs.get('yield', coredata.default_yielding))
option_types = {'string': StringParser,
'boolean': BooleanParser,
'combo': ComboParser,
'integer': IntegerParser,
'array': string_array_parser,
}
class OptionInterpreter:
def __init__(self, subproject, command_line_options):
self.options = {}
self.subproject = subproject
self.sbprefix = subproject + ':'
self.cmd_line_options = {}
for o in command_line_options:
if self.subproject != '': # Strip the beginning.
# Ignore options that aren't for this subproject
if not o.startswith(self.sbprefix):
continue
try:
(key, value) = o.split('=', 1)
except ValueError:
raise OptionException('Option {!r} must have a value separated by equals sign.'.format(o))
# Ignore subproject options if not fetching subproject options
if self.subproject == '' and ':' in key:
continue
self.cmd_line_options[key] = value
def get_bad_options(self):
subproj_len = len(self.subproject)
if subproj_len > 0:
subproj_len += 1
retval = []
# The options need to be sorted (e.g. here) to get consistent
# error messages (on all platforms) which is required by some test
# cases that check (also) the order of these options.
for option in sorted(self.cmd_line_options):
if option in list(self.options) + forbidden_option_names:
continue
if any(option[subproj_len:].startswith(p) for p in forbidden_prefixes):
continue
retval += [option]
return retval
def check_for_bad_options(self):
bad = self.get_bad_options()
if bad:
sub = 'In subproject {}: '.format(self.subproject) if self.subproject else ''
mlog.warning(
'{}Unknown command line options: "{}"\n'
'This will become a hard error in a future Meson release.'.format(sub, ', '.join(bad)))
def process(self, option_file):
try:
with open(option_file, 'r', encoding='utf8') as f:
ast = mparser.Parser(f.read(), '').parse()
except mesonlib.MesonException as me:
me.file = option_file
raise me
if not isinstance(ast, mparser.CodeBlockNode):
e = OptionException('Option file is malformed.')
e.lineno = ast.lineno()
raise e
for cur in ast.lines:
try:
self.evaluate_statement(cur)
except Exception as e:
e.lineno = cur.lineno
e.colno = cur.colno
e.file = os.path.join('meson_options.txt')
raise e
self.check_for_bad_options()
def reduce_single(self, arg):
if isinstance(arg, str):
return arg
elif isinstance(arg, (mparser.StringNode, mparser.BooleanNode,
mparser.NumberNode)):
return arg.value
elif isinstance(arg, mparser.ArrayNode):
return [self.reduce_single(curarg) for curarg in arg.args.arguments]
else:
raise OptionException('Arguments may only be string, int, bool, or array of those.')
def reduce_arguments(self, args):
assert(isinstance(args, mparser.ArgumentNode))
if args.incorrect_order():
raise OptionException('All keyword arguments must be after positional arguments.')
reduced_pos = [self.reduce_single(arg) for arg in args.arguments]
reduced_kw = {}
for key in args.kwargs.keys():
if not isinstance(key, str):
raise OptionException('Keyword argument name is not a string.')
a = args.kwargs[key]
reduced_kw[key] = self.reduce_single(a)
return reduced_pos, reduced_kw
def evaluate_statement(self, node):
if not isinstance(node, mparser.FunctionNode):
raise OptionException('Option file may only contain option definitions')
func_name = node.func_name
if func_name != 'option':
raise OptionException('Only calls to option() are allowed in option files.')
(posargs, kwargs) = self.reduce_arguments(node.args)
if 'type' not in kwargs:
raise OptionException('Option call missing mandatory "type" keyword argument')
opt_type = kwargs.pop('type')
if opt_type not in option_types:
raise OptionException('Unknown type %s.' % opt_type)
if len(posargs) != 1:
raise OptionException('Option() must have one (and only one) positional argument')
opt_name = posargs[0]
if not isinstance(opt_name, str):
raise OptionException('Positional argument must be a string.')
if optname_regex.search(opt_name) is not None:
raise OptionException('Option names can only contain letters, numbers or dashes.')
if is_invalid_name(opt_name):
raise OptionException('Option name %s is reserved.' % opt_name)
if self.subproject != '':
opt_name = self.subproject + ':' + opt_name
opt = option_types[opt_type](opt_name, kwargs.pop('description', ''), kwargs)
if opt.description == '':
opt.description = opt_name
if opt_name in self.cmd_line_options:
opt.set_value(self.cmd_line_options[opt_name])
self.options[opt_name] = opt
|
py | b4157064849bdfabfbe05693a4688dea90c9b679 | # 累積和(gcd)
from fractions import gcd
N = int(input())
A = list(map(int, input().split()))
lcg = A[:]
rcg = A[:]
for i in range(1, N):
lcg[i] = gcd(lcg[i], lcg[i - 1])
for i in range(N - 2, -1, -1):
rcg[i] = gcd(rcg[i], rcg[i + 1])
t = [0] * N
t[0] = rcg[1]
for i in range(1, N - 1):
t[i] = gcd(lcg[i - 1], rcg[i + 1])
t[N - 1] = lcg[N - 2]
print(max(t))
|
py | b415716c1869dadadfe0e1795a85e44d861dc5e9 | # An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)
# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)
import numpy as np
import gym
#from gym.spaces import prng
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
#random_array = prng.np_random.rand(self.num_discrete_space)
np_random = np.random.RandomState()
random_array = np_random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <=
self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)
|
py | b41572dd116bab80c6392a4db34172ba60dececa | # Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for implementing a translation random vector for non-gaussian random
vectors whose components are governed by analytic probability distributions.
"""
import copy
import numpy as np
from scipy.stats import multivariate_normal, norm
from scipy import integrate, interpolate
from SROMPy.target.RandomVector import RandomVector
class AnalyticRandomVector(RandomVector):
"""
Class for implementing a translation random vector for non-gaussian random
vectors whose components are governed by analytic probability distributions
and have known correlation.
:param random_variables: list of SROMPy target random variable objects
defining each component of the random vector.
:type random_variables: list of SROMPy random variable objects
:param correlation_matrix: specifies correlation between vector components.
:type correlation_matrix: np array, size: dim x dim
random_variables list must have length equal to the random vector dimension.
Each SROMPy random variable object in the list must be properly
initialized and have compute_moments and compute_CDF functions implemented.
"""
def __init__(self, random_variables, correlation_matrix):
"""
Create analytic random vector with components that follow
standard probability distributions. Initialize using a list of
random variable objects that define each dimension as well as a
correlation matrix specifying the correlation structure between
components
inputs:
random_variables - list of random variable objects with length
equal to the desired dimension of the analytic
random vector being created. Must have
compute_moments and compute_CDF functions
implemented.
correlation_matrix - numpy array with size (dimension x dimension)
with correlation between each component. Must be
symmetric, square matrix.
"""
# TODO - error checking to make sure random variables are properly
# initialized / constructed / necessary functions / member variables
# like _min / _max
# Error checking on correlation matrix:
self.verify_correlation_matrix(correlation_matrix)
self._corr = copy.deepcopy(correlation_matrix)
# Size of correlation matrix must match # random variable components:
if self._corr.shape[0] != len(random_variables):
raise ValueError("Dimension mismatch btwn corr mat & random vars")
# Parent class (RandomVector) constructor, sets self._dim
super(AnalyticRandomVector, self).__init__(len(random_variables))
self._gaussian_corr = None
self._unscaled_correlation = None
# Get min/max values for each component.
self._components = copy.deepcopy(random_variables)
self.mins = np.zeros(self._dim)
self.maxs = np.zeros(self._dim)
for i in range(self._dim):
self.mins[i] = self._components[i].mins[0]
self.maxs[i] = self._components[i].maxs[0]
# Generate Gaussian correlation matrix for sampling translation RV:
self.generate_gaussian_correlation()
# Generate unscaled correlation that is matched by SROM during opt.
self.generate_unscaled_correlation()
@staticmethod
def verify_correlation_matrix(corr_matrix):
"""
Do error checking on the provided correlation matrix, e.g., is it
square? is it symmetric?
"""
corr_matrix = np.array(corr_matrix) # Make sure it's an numpy array.
if len(corr_matrix.shape) == 1:
raise ValueError("Correlation matrix must be a 2D array!")
if corr_matrix.shape[0] != corr_matrix.shape[1]:
raise ValueError("Correlation matrix must be square!")
# Slick check for symmetry:
if not np.allclose(corr_matrix, corr_matrix.T, 1e-6):
raise ValueError("Correlation matrix must be symmetric!")
# Make sure all entries are positive:
if np.any(corr_matrix < 0):
raise ValueError("Correlation matrix entries must be positive!")
def compute_moments(self, max_):
"""
Calculate random vector moments up to order max_moment based
on samples. Moments from 1,...,max_order
"""
# Get moments up to max_ for each component of the vector.
moments = np.zeros((max_, self._dim))
for i in range(self._dim):
moments[:, i] = self._components[i].compute_moments(max_).flatten()
return moments
def compute_cdf(self, x_grid):
"""
Evaluates the precomputed/stored CDFs at the specified x_grid values
and returns. x_grid can be a 1D array in which case the CDFs for each
dimension are evaluated at the same points, or it can be a
(num_grid_pts x dim) array, specifying different points for each
dimension - each dimension can have a different range of values but
must have the same # of grid pts across it. Returns a (num_grid_pts x
dim) array of corresponding CDF values at the grid points
"""
# NOTE - should deep copy x_grid since were modifying?
# 1D random variable case
if len(x_grid.shape) == 1:
x_grid = x_grid.reshape((len(x_grid), 1))
(num_pts, dim) = x_grid.shape
# If only one grid was provided for multiple dims, repeat to generalize.
if (dim == 1) and (self._dim > 1):
x_grid = np.repeat(x_grid, self._dim, axis=1)
cdf_values = np.zeros((num_pts, self._dim))
# Evaluate CDF interpolants on grid.
for d, grid in enumerate(x_grid.T):
# Make sure grid values lie within max/min along each dimension.
grid[np.where(grid < self.mins[d])] = self.mins[d]
grid[np.where(grid > self.maxs[d])] = self.maxs[d]
cdf_values[:, d] = self._components[d].compute_cdf(grid)
return cdf_values
def compute_correlation_matrix(self):
"""
Returns the correlation matrix
"""
return self._unscaled_correlation
def draw_random_sample(self, sample_size):
"""
Implements the translation model to generate general random vectors with
non-gaussian components. Nonlinear transformation of a std gaussian
vector according to method in S.R. Arwade 2005 paper.
random component sample: theta = inv_cdf(std_normal_cdf(normal_vec))
\Theta = F^{-1}(\Phi(G))
"""
samples = np.zeros((sample_size, self._dim))
cholesky = np.linalg.cholesky(self._gaussian_corr)
# Is there a way to optimize this sampling loop?
for i in range(sample_size):
# Draw standard std normal random vector with given correlation.
normal_vector = cholesky*norm.rvs(size=self._dim)
# Evaluate std normal CDF at the random vector.
norm_cdf = norm.cdf(normal_vector)
# Transform by inverse CDF of random vector's components.
for j in range(self._dim):
samples[i][j] = \
self._components[j].compute_inv_cdf(norm_cdf[j])[0]
return samples
def integrand_helper(self, u, v, k, j, rho_kj):
"""
Helper function for numerical integration in the
generate_gaussian_correlation() function. Implements the integrand of
equation 6 of J.M. Emery 2015 paper that needs to be integrated w/
scipy
Passing in values of the k^th and j^th component of the random variable
- u and v - and the specified correlation between them rho_kj.
"""
normal_pdf_kj = multivariate_normal.pdf([u, v],
cov=[[1, rho_kj], [rho_kj, 1]])
# f_k(x) = InvCDF_k ( Gaussian_CDF( x ) ).
f_k = self._components[k].compute_inv_cdf(norm.cdf(u))
f_j = self._components[j].compute_inv_cdf(norm.cdf(v))
integrand = f_k*f_j*normal_pdf_kj
return integrand
def get_correlation_entry(self, k, j, rho_kj):
"""
Get the correlation between this random vector's k & j components from
the correlation btwn the Gaussian random vector's k & j components.
Helper function for generate_gaussian_correlation
Need to integrate product of k/j component's inv cdf & a standard
2D normal pdf with correlation rho_kj. This is equation 6 in J.M. Emery
et al 2015.
"""
# Integrate using scipy.
k_lims = [-4, 4]
j_lims = [-4, 4]
# Get product of moments & std deviations for equation 6.
mu_k_mu_j = (self._components[k].compute_moments(1)[0] *
self._components[j].compute_moments(1)[0])
std_k_std_j = (self._components[k].get_variance() *
self._components[j].get_variance())**0.5
# Try adjusting tolerance to speed this up:
# 1.49e-8 is default for both.
opts = {'epsabs': 1.e-5, 'epsrel': 1e-5}
e_integral = integrate.nquad(self.integrand_helper, [k_lims, j_lims],
args=(k, j, rho_kj), opts=opts)
eta_kj = (e_integral - mu_k_mu_j)/std_k_std_j
return eta_kj[0]
def generate_gaussian_correlation(self):
"""
Generates the Gaussian correlation matrix that will achieve the
covariance matrix specified for this random vector when using a
translation random vector sampling approach. See J.M. Emery 2015 paper
pages 922,923 on this procedure.
Helper function - no inputs, operates on self._correlation correlation
matrix and generates self._gaussian_corr
"""
self._gaussian_corr = np.ones(self._corr.shape)
# Want to build interpolant from eta correlation values to rho
# correlation values.
num_points = 12
# -1 made matrix singular
rho_kj_grid = np.linspace(-0.99, 0.99, num_points)
eta_jk_grid = np.zeros(num_points)
for k in range(self._dim):
for j in range(k+1, self._dim):
print "Determining correlation entry ", k, " ", j
# Compute grid of eta/rho pts:
for i, rho_kj in enumerate(rho_kj_grid):
eta_jk_grid[i] = self.get_correlation_entry(k, j, rho_kj)
# Build interpolant to find rho value for specified eta.
rho_interp = interpolate.interp1d(eta_jk_grid, rho_kj_grid)
# Use symmetry to save time:
self._gaussian_corr[k][j] = rho_interp(self._corr[k][j])
self._gaussian_corr[j][k] = rho_interp(self._corr[k][j])
def generate_unscaled_correlation(self):
"""
Generates the unscaled correlation matrix that is matched by the SROM
during optimization. No inputs / outputs. INternally produces
self._unscaled_correlation from self._correlation.
>> C_ij = E[ X_i X_j]
"""
self._unscaled_correlation = copy.deepcopy(self._corr)
for i in range(self._dim):
for j in range(self._dim):
mu_i_mu_j = (self._components[i].compute_moments(1)[0] *
self._components[j].compute_moments(1)[0])
std_i_std_j = (self._components[i].get_variance() *
self._components[j].get_variance())**0.5
self._unscaled_correlation[i][j] *= std_i_std_j
self._unscaled_correlation[i][j] += mu_i_mu_j
|
py | b41575156eeeb6e3c9a40b13152b2632a10c5a89 | import unittest
from simple_graph import Graph
class TestGraph(unittest.TestCase):
def test_vertices_edges(self):
G = Graph()
self.assertEqual(G.vertices, [])
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(G.vertices, [0, 1, 2])
self.assertEqual(G.edges, [(0, 1), (0, 2), (1, 2)])
def test_init(self):
G = Graph({'V': [1]})
self.assertEqual(G.vertices, [1])
def test_edge_weight(self):
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(G.total_edge_weight(1), 2)
self.assertEqual(G.total_edge_weight(), 6)
G = Graph({1: {1: {'weight': 6}, 2: {'weight': 2}, 0: {'weight': 2}}, 2: {1: {'weight': 2}, 2: {'weight': 6}, 0: {'weight': 2}}, 0: {1: {'weight': 2}, 2: {'weight': 2}, 0: {'weight': 6}}})
self.assertEqual(G.total_edge_weight(), 30)
self.assertEqual(G.total_edge_weight(1), 10)
G = Graph(undirected=False)
G.add_edge(1, 2)
self.assertEqual(G.total_edge_weight(1), 0)
self.assertEqual(G.total_edge_weight(), 1)
self.assertEqual(G.total_edge_weight(2), 1)
self.assertEqual(G.total_edge_weight(1, 'out'), 1)
self.assertEqual(G.total_edge_weight(1, 'all'), 1)
def test_add_weight(self):
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(G.edge_weight(1, 2), 1)
G.add_edge_weight(1, 2, 1)
self.assertEqual(G.edge_weight(1, 2), 2)
self.assertEqual(G.vertex_weight(1), 1)
G.add_vertex_weight(1, 1)
self.assertEqual(G.vertex_weight(1), 2)
def test_to_dict(self):
G = Graph({1: {1: {'weight': 6}, 2: {'weight': 2}, 0: {'weight': 2}}, 2: {1: {'weight': 2}, 2: {'weight': 6}, 0: {'weight': 2}}, 0: {1: {'weight': 2}, 2: {'weight': 2}, 0: {'weight': 6}}})
self.assertEqual(G.to_dict(),
{'V': [(1, {}), (2, {}), (0, {})],
'E': [(1, 1, {'weight': 6}),
(1, 2, {'weight': 2}),
(0, 1, {'weight': 2}),
(0, 2, {'weight': 2}),
(0, 0, {'weight': 6}),
(2, 2, {'weight': 6})]})
def test_edges(self):
G = Graph({1: {1: {'weight': 6}, 2: {'weight': 2}, 0: {'weight': 2}}, 2: {1: {'weight': 2}, 2: {'weight': 6}, 0: {'weight': 2}}, 0: {1: {'weight': 2}, 2: {'weight': 2}, 0: {'weight': 6}}})
self.assertEqual(G.edges, [(1, 1), (1, 2), (0, 1), (0, 2), (0, 0), (2, 2)])
def test_vertices(self):
G = Graph({1: {1: {'weight': 6}, 2: {'weight': 2}, 0: {'weight': 2}}, 2: {1: {'weight': 2}, 2: {'weight': 6}, 0: {'weight': 2}}, 0: {1: {'weight': 2}, 2: {'weight': 2}, 0: {'weight': 6}}})
self.assertEqual(set(G.vertices), {1, 2, 0})
G = Graph(undirected=False)
G.add_edge(1, 2)
self.assertEqual(set(G.vertices), {1, 2})
def test_add_vertex(self):
G = Graph({'E':[[0, 1], [1, 2], [0, 2]]})
G.add_vertex(3)
self.assertEqual(G.find_isolated_vertices(), [3])
def test_remove_vertex(self):
G = Graph(undirected=False)
G.add_edge(1, 2)
G.remove_vertex(1)
self.assertEqual(set(G.vertices), {2})
G.remove_edge(1, 2)
G = Graph({'V': ['1', '2', '0', '4', '3', '7', '6', '5', '11', '10', '8', '15', '14', '9', '12', '13'], 'E': [('1', '2'), ('1', '4'), ('1', '7'), ('2', '0'), ('2', '4'), ('2', '6'), ('0', '3'), ('0', '5'), ('7', '5'), ('7', '6'), ('5', '11'), ('4', '10'), ('8', '15'), ('8', '14'), ('8', '9'), ('14', '9'), ('9', '12'), ('10', '14'), ('10', '13'), ('11', '10'), ('6', '11'), ('3', '7')]})
G.remove_vertex('1')
self.assertNotIn('1', G.vertices)
self.assertNotIn(('1', '2'), G.edges)
self.assertNotIn(('1', '4'), G.edges)
self.assertNotIn(('1', '7'), G.edges)
G.remove_vertex('4')
self.assertNotIn('4', G.vertices)
self.assertNotIn(('2', '4'), G.edges)
self.assertNotIn(('4', '10'), G.edges)
G = Graph({'E':{ "a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
}})
G.remove_vertex('a')
G.remove_vertex('c')
self.assertEqual(set(G.vertices), {'d', 'b', 'e', 'f'})
self.assertEqual(G.edges, [])
def test_remove_edge(self):
G = Graph({'E': [(1, 2)]})
G.remove_edge(1, 2)
G.remove_edge(2, 1)
def test_neighbors(self):
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(set(G.neighbors(1)), {0, 2})
def test_add_edge(self):
G = Graph()
self.assertEqual(G.has_edge(1, 2), False)
G = Graph({0: [1, 2], 1: [2]})
self.assertEqual(G.has_edge(2, 3), False)
G.add_edge(2, 3)
self.assertEqual(G.has_edge(2, 3), True)
self.assertEqual(G.total_edge_weight(), 8)
G.add_edge(2, 3)
self.assertEqual(G.total_edge_weight(), 8)
G = Graph()
G.add_edge('a', 'z')
G.add_edge('x', 'y')
self.assertEqual(G.has_edge('a', 'z'), True)
self.assertEqual(G.has_edge('x', 'y'), True)
def test_isolate(self):
G = Graph({
"a" : ["c"],
"b" : ["c", "e"],
"c" : ["a", "b", "d", "e"],
"d" : ["c"],
"e" : ["c", "b"],
"f" : []
})
self.assertEqual(G.find_isolated_vertices(), ['f'])
G = Graph({1: [2, 3], 2: [3]}, undirected = False)
self.assertEqual(G.find_isolated_vertices(), [])
def test_find_path(self):
G = Graph({
"a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
})
self.assertEqual(G.find_path('a', 'b'), ['a', 'd', 'c', 'b'])
self.assertEqual(G.find_path('a', 'f'), None)
self.assertEqual(G.find_path('c', 'c'), ['c'])
def test_find_all_paths(self):
G = Graph({
"a" : ["d", "f"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["d"]
})
self.assertEqual(G.find_all_paths('a', 'b'), [['a', 'd', 'c', 'b'], ['a', 'f', 'd', 'c', 'b']])
self.assertEqual(G.find_all_paths('a', 'f'), [['a', 'd', 'f'], ['a', 'f']])
self.assertEqual(G.find_all_paths('c', 'c'), [['c']])
def test_degree(self):
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(G.degree('a'), 1)
self.assertEqual(G.degree('c'), 5)
self.assertEqual(G.degree('d'), 2)
self.assertEqual(G.degree('f'), 0)
def test_max_degree(self):
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(G.max_degree(), 5)
def test_min_degree(self):
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(G.min_degree(), 0)
def test_degrees(self):
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(G.degrees(), [5, 2, 1, 1, 1, 0])
def test_density(self):
G = Graph({
"a" : ["d","f"],
"b" : ["c","b"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
})
self.assertEqual(float(f"{G.density():.4f}"), 0.3889)
G = Graph(
{'V': ['a', 'd', 'b', 'c', 'e', 'f'], 'E': [('a', 'd'), ('b', 'c'), ('c', 'c'), ('c', 'e'), ('d', 'c')]})
self.assertEqual(float(f"{G.density():.4f}"), 0.2778)
complete_graph = {
"a" : ["b","c"],
"b" : ["a","c"],
"c" : ["a","b"]
}
G = Graph(complete_graph)
self.assertEqual(float(f"{G.density():.4f}"), 1.0)
isolated_graph = {
"a" : [],
"b" : [],
"c" : []
}
G = Graph(isolated_graph)
self.assertEqual(float(f"{G.density():.4f}"), 0.0)
def test_is_connected(self):
G = Graph({
"a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
})
self.assertEqual(G.is_connected(), False)
G = Graph({ "a" : ["d","f"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
})
self.assertEqual(G.is_connected(), True)
G = Graph({ "a" : ["d","f"],
"b" : ["c","b"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
})
self.assertEqual(G.is_connected(), True)
def test_diameter(self):
G = Graph({
"a" : ["c"],
"b" : ["c","e","f"],
"c" : ["a","b","d","e"],
"d" : ["c"],
"e" : ["b","c","f"],
"f" : ["b","e"]
})
self.assertEqual(G.diameter(), 3)
def test_edge_betweenness(self):
G = Graph({'s': {'u':{'weight': 10}, 'x':{'weight': 5}},
'u': {'v':{'weight': 1}, 'x':{'weight': 2}},
'v': {'y':{'weight': 4}},
'x':{'u':{'weight': 3},'v':{'weight': 9},'y':{'weight': 2}},
'y':{'s':{'weight': 7},'v':{'weight': 6}}}, undirected=False)
self.assertDictEqual(G.edge_betweenness(), {('s', 'u'): 0.0,
('s', 'x'): 0.4,
('u', 'v'): 0.15000000000000002,
('u', 'x'): 0.15000000000000002,
('v', 'y'): 0.2,
('x', 'u'): 0.30000000000000004,
('x', 'v'): 0.0,
('x', 'y'): 0.25,
('y', 's'): 0.4,
('y', 'v'): 0.05})
def test_connected_components(self):
G = Graph({'E':[(1, 2), (2, 3), (4, 5)] })
self.assertEqual(G.connected_components, [[1, 2, 3], [4, 5]])
def test_max_cliques(self):
G = Graph({'E': [(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (3, 4), (4, 5)]})
self.assertEqual(G.max_cliques, [[1, 4, 2, 3], [1, 4, 5]])
if __name__ == '__main__':
unittest.main()
|
py | b41575d7d7094a2f0cac0f20e4e5ae0c1f18a159 | from operators.create_table import CreateTableOperator
from operators.data_quality import DataQualityOperator
from operators.load_dimension import LoadDimensionOperator
from operators.load_fact import LoadFactOperator
from operators.stage_redshift import StageToRedshiftOperator
__all__ = [
'CreateTableOperator',
'StageToRedshiftOperator',
'LoadFactOperator',
'LoadDimensionOperator',
'DataQualityOperator',
]
|
py | b415761d82f2712574da153831d2be167cfd2be9 | # Omid55
# Date: 16 Oct 2018
# Author: Omid Askarisichani
# Email: [email protected]
# General utility module.
from __future__ import division, print_function, absolute_import, unicode_literals
from itertools import permutations
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
import pickle as pk
import dill
import networkx as nx
import seaborn as sns
import shelve
# import enforce
from numpy.linalg import norm
from scipy.stats import pearsonr
from scipy.spatial.distance import cosine
from typing import Dict
from typing import List
from typing import Tuple
from typing import Text
from statsmodels.tsa.stattools import grangercausalitytests
# @enforce.runtime_validation
def print_dict_pretty(input_dict: Dict) -> None:
"""Prints the input dictionary line by line and key sorted.
Args:
input_dict: Dictionary to be printed.
Returns:
None
Raises:
None
"""
sorted_keys = sorted(input_dict.keys())
for key in sorted_keys:
print('{}: {}'.format(key, input_dict[key]))
# @enforce.runtime_validation
def check_required_columns(
data: pd.DataFrame, columns: List[Text]) -> None:
"""Checks whether input dataframe includes all required columns.
Args:
input_dict: Dataframe to be checked.
columns: List of names for columns to be checked in dataframe.
Returns:
None
Raises:
ValueError: If input data does not include any of required columns.
"""
missing_columns = list(set(columns) - set(data.columns))
if missing_columns:
raise ValueError('Missing required columns: {}.'.format(
', '.join(map(str, missing_columns))))
# @enforce.runtime_validation
def graph_equals(
g1: nx.DiGraph,
g2: nx.DiGraph,
weight_column_name: Text = 'weight') -> bool:
"""Checks if two graphs are equal.
If weight_column_name is None, then it does not check weight values.
Args:
g1: First graph to be compared.
g2: Second graph to be compared.
weight_column_name: The name of weight column.
Returns:
Boolean whether g1 equals g2 or not.
Raises:
None.
"""
if g1.nodes() != g2.nodes():
return False
if g1.edges() != g2.edges():
return False
if weight_column_name:
for edge in g1.edges():
w1 = g1.get_edge_data(edge[0], edge[1])[weight_column_name]
w2 = g2.get_edge_data(edge[0], edge[1])[weight_column_name]
if w1 != w2:
return False
return True
# @enforce.runtime_validation
def assert_graph_equals(
g1: nx.DiGraph,
g2: nx.DiGraph,
weight_column_name: Text = 'weight') -> None:
"""Checks if two graphs are equal.
If weight_column_name is None, then it does not check weight values.
Args:
g1: First graph to be compared.
g2: Second graph to be compared.
weight_column_name: The name of weight column.
Returns:
Boolean whether g1 equals g2 or not.
Raises:
AssertionError: If the two graphs are not equal. It also prints a
message why they do not match for easier debugging purposes.
"""
if g1.nodes() != g2.nodes():
raise AssertionError(
'Two graphs do not have the same nodes: {} != {}'.format(
g1.nodes(), g2.nodes()))
if g1.edges() != g2.edges():
raise AssertionError(
'Two graphs do not have the same edges: {} != {}'.format(
g1.edges(), g2.edges()))
if weight_column_name:
for edge in g1.edges():
w1 = g1.get_edge_data(edge[0], edge[1])[weight_column_name]
w2 = g2.get_edge_data(edge[0], edge[1])[weight_column_name]
if w1 != w2:
raise AssertionError(
'Two graphs do not have the same weight at {}: {} != {}'
.format(edge, w1, w2))
# @enforce.runtime_validation
def sub_adjacency_matrix(
adj_matrix: np.ndarray,
rows: List[int]) -> np.ndarray:
"""Computes a desired subset of given adjacency matrix.
Args:
adj_matrix: Given adjacency matrix.
rows: List of desired rows and same columns for being in the subgraph.
Returns:
Adjacency matrix only including the desired rows and columns.
Raises:
None.
"""
return adj_matrix[np.ix_(rows, rows)]
# @enforce.runtime_validation
def swap_nodes_in_matrix(
matrix: np.ndarray,
node1: int,
node2: int,
inplace: bool = False) -> np.ndarray:
"""Swaps two nodes in a matrix and return the resulting matrix.
Args:
matrix: Input matrix to be swapped.
node1: First node to be swapped with second one.
node2: Second node to be swapped with first one.
Returns:
Matrix with swapped nodes.
Raises:
None.
"""
if not inplace:
modified_matrix = np.copy(matrix)
else:
modified_matrix = matrix
modified_matrix[:, [node1, node2]] = modified_matrix[:, [node2, node1]]
modified_matrix[[node1, node2], :] = modified_matrix[[node2, node1], :]
return modified_matrix
# @enforce.runtime_validation
def make_matrix_row_stochastic(
matrix: np.ndarray,
eps: float = 0) -> np.ndarray:
"""Makes the matrix row-stochastic (sum of each row is 1)
Args:
matrix: Input matrix.
Returns:
Matrix which its rows sum up to 1.
Raises:
None.
"""
matrix = np.array(matrix) # To make sure it is numpy array and not matrix.
matrix += eps
if 0 in np.sum(matrix, axis=1):
matrix += 0.01
return np.nan_to_num(matrix.T / np.sum(matrix, axis=1)).T
# @enforce.runtime_validation
def save_figure(
fig_object: matplotlib.figure.Figure,
file_path: Text) -> None:
"""Fully saves the figure in pdf and pkl format for later modification.
This function saves the figure in a pkl and pdf such that later can
be loaded and easily be modified.
To have the figure object, one can add the following line of the code
to the beginning of their code:
fig_object = plt.figure()
Args:
fig_object: Figure object (computed by "plt.figure()")
file_path: Texting file path without file extension.
Returns:
None.
Raises:
None.
"""
# Saves as pdf.
fig_object.savefig(file_path + '.pdf', dpi=fig_object.dpi)
# Also saves as pickle.
with open(file_path + '.pkl', 'wb') as handle:
pk.dump(fig_object, handle, protocol=pk.HIGHEST_PROTOCOL)
# @enforce.runtime_validation
def load_figure(file_path: Text) -> matplotlib.figure.Figure:
"""Fully loads the saved figure to be able to be modified.
It can be easily showed by:
fig_object.show()
Args:
file_path: Texting file path without file extension.
Returns:
Figure object.
Raises:
None.
"""
with open(file_path + '.pkl', 'rb') as handle:
fig_object = pk.load(handle)
return fig_object
# @enforce.runtime_validation
def save_all_variables_of_current_session(
locals_: dict,
file_path: Text,
verbose: bool = False) -> None:
"""Saves all defined variables in the current session to be used later.
It works similar to save_all in MATLAB. It is super useful when one is
trying to save everything in a notebook for later runs of a subset of cells
of the notebook.
Args:
locals_: Just call this as the first parameter ALWAYS: locals()
file_path: Texting file path (with extension).
verbose: Whether to print the name of variables it is saving.
Returns:
None.
Raises:
None.
"""
my_shelf = shelve.open(file_path, 'n')
# for key in dir():
for key, value in locals_.items():
if (not key.startswith('__') and
not key.startswith('_') and
key not in ['self', 'exit', 'Out', 'quit', 'imread'] and
str(type(value)) not in [
"<class 'module'>", "<class 'method'>"]):
try:
if verbose:
print('key: ', key)
my_shelf[key] = value
except TypeError:
print('Just this variable was not saved: {0}'.format(key))
my_shelf.close()
# @enforce.runtime_validation
def load_all_variables_of_saved_session(
globals_: dict,
file_path: Text) -> None:
"""Loads all defined variables from a saved session into current session.
It should be used after running "save_all_variables_of_current_session".
Args:
globals_: Just call this as the first parameter ALWAYS: globals()
file_path: Texting file path (with extension).
Returns:
None.
Raises:
None.
"""
my_shelf = shelve.open(file_path)
for key in my_shelf:
try:
globals_[key] = my_shelf[key]
except AttributeError:
print('Just this variable was not loaded: ', key)
my_shelf.close()
def swap_two_elements_in_matrix(
matrix: np.ndarray,
x1: int,
y1: int,
x2: int,
y2: int,
inplace: bool = True) -> np.ndarray:
"""Swaps the content of two given elements from the matrix.
Args:
Returns:
Raises:
ValueError: If any of coordinates did not exist.
"""
n, m = matrix.shape
if ((x1 < 0 or x1 >= n) or
(x2 < 0 or x2 >= n) or
(y1 < 0 or y1 >= m) or
(y2 < 0 or y2 >= m)):
raise ValueError(
'Given coordinates do not fall into matrix dimensions.'
' Matrix size: ({}, {}), Coordinates: ({}, {}), ({}, {}).'.format(
n, m, x1, y1, x2, y2))
if not inplace:
modified_matrix = matrix.copy()
else:
modified_matrix = matrix
first_element_content = modified_matrix[x1, y1]
modified_matrix[x1, y1] = modified_matrix[x2, y2]
modified_matrix[x2, y2] = first_element_content
return modified_matrix
# @enforce.runtime_validation
def dgraph2adjacency(dgraph: nx.DiGraph) -> np.ndarray:
"""Gets the dense adjancency matrix from the graph.
Args:
dgraph: Directed graph to compute its adjancency matrix.
Returns:
Adjacency matrix of the given dgraph in dense format (np.array(n * n)).
Raises:
None.
"""
return np.array(nx.adjacency_matrix(dgraph).todense())
# @enforce.runtime_validation
def adjacency2digraph(
adj_matrix: np.ndarray,
similar_this_dgraph: nx.DiGraph = None) -> nx.DiGraph:
"""Converts the adjacency matrix to directed graph.
If similar_this_graph is given, then the final directed graph has the same
node labeling as the given graph has.
Using dgraph2adjacency and then adjacency2digraph for the same dgraph is
very practical. Example:
adj = dgraph2adjacency(dgraph)
# Then modify adj as wish
new_dgraph = adjacency2digraph(adj, dgraph)
# Now new_dgraph has the same node labels as dgraph has before.
Args:
adj_matrix: Squared adjancency matrix.
Returns:
Directed graph with the adj_matrix and same node names as given dgraph.
Raises:
ValueError: If adj_matrix was not squared.
"""
if adj_matrix.shape[0] != adj_matrix.shape[1]:
raise ValueError('Adjacency matrix is not squared.')
if similar_this_dgraph:
node_mapping = {
i: list(similar_this_dgraph.nodes())[i]
for i in range(similar_this_dgraph.number_of_nodes())}
return _adjacency2digraph_with_given_mapping(
adj_matrix=adj_matrix, node_mapping=node_mapping)
return _adjacency2digraph_with_given_mapping(adj_matrix=adj_matrix)
# @enforce.runtime_validation
def _adjacency2digraph_with_given_mapping(
adj_matrix: np.ndarray,
node_mapping: Dict = None) -> nx.DiGraph:
"""Converts the adjacency matrix to directed graph.
Args:
adj_matrix: Squared adjancency matrix.
node_mapping: Dictionary for every node and their current and new name.
Returns:
Directed graph with the adj_matrix and same node names as given dgraph.
Raises:
ValueError: If adj_matrix was not squared.
"""
if adj_matrix.shape[0] != adj_matrix.shape[1]:
raise ValueError('Adjacency matrix is not squared.')
new_dgrpah = nx.from_numpy_matrix(adj_matrix, create_using=nx.DiGraph())
if node_mapping:
return nx.relabel_nodes(new_dgrpah, mapping=node_mapping)
return new_dgrpah
# @enforce.runtime_validation
def save_it(obj: object, file_path: Text, verbose: bool = False) -> None:
"""Saves the input object in the given file path.
Args:
file_path: Texting file path (with extension).
verbose: Whether to print information about saving successfully or not.
Returns:
None.
Raises:
None.
"""
try:
with open(file_path, 'wb') as handle:
pk.dump(obj, handle, protocol=pk.HIGHEST_PROTOCOL)
if verbose:
print('{} is successfully saved.'.format(file_path))
except Exception as e:
if verbose:
print('Pickling was failed:')
print(e)
print('Now, trying dill...')
try:
try:
os.remove(file_path)
except:
pass
file_path += '.dill'
with open(file_path, 'wb') as handle:
dill.dump(obj, handle)
if verbose:
print('{} is successfully saved.'.format(file_path))
except Exception as e:
try:
os.remove(file_path)
except:
pass
print('Sorry. Pickle and Dill both failed. Here is the exception:')
print(type(e))
print(e.args)
print(e)
# @enforce.runtime_validation
def load_it(file_path: Text, verbose: bool = False) -> object:
"""Loads from the given file path a saved object.
Args:
file_path: Texting file path (with extension).
verbose: Whether to print info about loading successfully or not.
Returns:
The loaded object.
Raises:
None.
"""
obj = None
with open(file_path, 'rb') as handle:
if file_path.endswith('.dill'):
obj = dill.load(handle)
else:
obj = pk.load(handle)
if verbose:
print('{} is successfully loaded.'.format(file_path))
return obj
# @enforce.runtime_validation
def plot_box_plot_for_transitions(
matrix: np.ndarray,
balanced_ones: np.ndarray,
with_labels: bool = True,
fname: Text = '',
ftitle: Text = '') -> None:
"""Plots a boxplot for transitoins of a set of balanced/unbalanced states.
Args:
matrix: A stochastic transition matrix.
balanced_ones: Array of boolean of which state is balanced or not.
fname: File name which if is given, this function saves the figure as.
ftitle: Figure title if given.
Returns:
None.
Raises:
ValueError: When the length of matrix and balanced_ones does not match.
"""
if len(matrix) != len(balanced_ones):
raise ValueError(
'Matrix and balanced states should have the same length: '
'len(matrix): {}, len(balanced_ones): {}.'.format(
len(matrix), len(balanced_ones)))
# Computes the transitions.
probs1 = np.sum(matrix[balanced_ones, :][:, balanced_ones], axis=1)
probs2 = np.sum(matrix[~balanced_ones, :][:, balanced_ones], axis=1)
probs3 = np.sum(matrix[~balanced_ones, :][:, ~balanced_ones], axis=1)
probs4 = np.sum(matrix[balanced_ones, :][:, ~balanced_ones], axis=1)
# Draws the boxplot.
labels = None
if with_labels:
labels = (
['balanced -> balanced',
'unbalanced -> balanced',
'unbalanced -> unbalanced',
'balanced -> unbalanced'])
f = plt.figure()
bp = plt.boxplot(
[np.array(probs1),
np.array(probs2),
np.array(probs3),
np.array(probs4)],
labels=labels,
vert=False,
showfliers=False)
# Default values:
# whis=1.5
if ftitle:
plt.title(ftitle)
# Makes the linewidth larger.
for box in bp['boxes']:
# change outline color
box.set(linewidth=2)
# Changes the color and linewidth of the whiskers.
for whisker in bp['whiskers']:
whisker.set(linewidth=2)
# Changes the color and linewidth of the caps.
for cap in bp['caps']:
cap.set(linewidth=2)
# Changes color and linewidth of the medians.
for median in bp['medians']:
median.set(linewidth=2)
# If filename is given then saves the file.
if fname:
f.savefig(fname+'.pdf', bbox_inches='tight')
def draw_from_empirical_distribution(
data_points: np.ndarray,
nbins: int = 10) -> float:
"""Draws a sample from the empricial distribution of the given data points.
Args:
data_points: Array of one dimensional data points.
nbins: Number of bins to consider for empirical distribution.
Returns:
A drawn sample from the same emprical distribution of data points.
Raises:
ValueError: When nbins is not positive.
Also when the number of data_points is less than nbins.
"""
if nbins <= 0:
raise ValueError('Number of bins should be positive. '
'It was {}.'.format(nbins))
if len(data_points) < nbins:
raise ValueError('Number of data points should be more than '
'number of bins. '
'#data points = {}, #bins = {}.'.format(
len(data_points), nbins))
if not data_points:
raise ValueError('Data points is empty.')
bin_volume, bin_edges = np.histogram(data_points, bins=nbins)
probability = bin_volume / np.sum(bin_volume)
selected_bin_index = np.random.choice(range(nbins), 1, p=probability)
drawn_sample = np.random.uniform(
low=bin_edges[selected_bin_index],
high=bin_edges[selected_bin_index + 1],
size=1)[0]
return drawn_sample
def shuffle_matrix_in_given_order(matrix: np.ndarray,
order: np.ndarray) -> np.array:
"""Shuffles a square matrix in a given order of rows and columns.
Args:
matrix: Given matrix to be shuffled.
order: New order of rows and columns.
Returns:
Matrix in given order of rows and columns.
Raises:
ValueError: If matrix is not square or the number of elements in
order does not equal to number of rows in the matrix.
"""
if matrix.shape[0] != matrix.shape[1]:
raise ValueError('Matrix was not square. Matrix shape: {}'.format(
matrix.shape))
if len(order) != matrix.shape[0]:
raise ValueError('The number of elements in the order does not match'
' the number of rows in the matrix.'
' Matrix rows: {} != length of order: {}'.format(
matrix.shape[0], len(order)))
return matrix[order, :][:, order]
def replicate_matrices_in_train_dataset_with_reordering(
X_train: List[Dict],
y_train: List[Dict],
matrix_string_name = 'influence_matrix') -> Tuple[List[Dict], List[Dict]]:
"""Replicates matrices in the training dataset to have all orders of nodes.
Args:
X_train: Training features with vectors and matrices.
y_train: Training matrix labels (groundtruth).
matrix_string_name: The string name of groundtruth matrix.
Retruns:
Replicated X_train and y_train with the same order with m! * n samples
where X_train has n samples and matrices have m columns.
Raises:
ValueError: If the length of X_train and y_train do not match.
"""
if len(X_train) != len(y_train):
raise ValueError('Length of features and labels do not match. '
'X_train len: {} != y_train len: {}'.format(
len(X_train), len(y_train)))
n = y_train[0][matrix_string_name].shape[1]
replicated_X_train = []
replicated_y_train = []
for index in range(len(X_train)):
for order in permutations(np.arange(n)):
rep_X_train_dt = {}
rep_y_train_dt = {}
for element_type, element in X_train[index].items():
if len(element.shape) == 1:
rep_X_train_dt[element_type] = element[list(order)]
elif element.shape[0] == element.shape[1]: # if it was a network:
rep_X_train_dt[element_type] = (
shuffle_matrix_in_given_order(element, order))
else: # if it was a matrix of embeddings:
rep_X_train_dt[element_type] = (
element[order, :])
rep_y_train_dt[matrix_string_name] = (
shuffle_matrix_in_given_order(
y_train[index][matrix_string_name], order))
replicated_X_train.append(rep_X_train_dt)
replicated_y_train.append(rep_y_train_dt)
return replicated_X_train, replicated_y_train
def matrix_estimation_error(
true_matrix: np.ndarray,
pred_matrix: np.ndarray,
type_str: Text = 'normalized_frob_norm') -> float:
"""Computes the error (loss) in matrix estimation problem.
Different types of loss are supported as follows,
normalized_frob_norm: (Frobenius) norm2(X - \widetilde{X}) / norm2(X).
mse: How far each element of matrices on average MSE are from each others.
neg_corr: Negative correlation of vectorized matrices if stat significant.
cosine_dist: Cosine distance of vectorized matrices from each other.
l1: L1-norm distance in each row (since they are row-stochastic).
kl: KL divergence in every row of the row-stochastic matrix.
Args:
true_matrix: The groundtruth matrix.
pred_matrix: The predicted matrix.
type_str: The type of error to be computed between the two matrices.
Returns:
The error (loss) in float.
Raises:
ValueError: If the two matrices do not have the same dimensions. Also,
if an invalid type_str was given.
"""
true_matrix = np.array(true_matrix)
pred_matrix = np.array(pred_matrix)
n, m = true_matrix.shape
if true_matrix.shape != pred_matrix.shape:
raise ValueError('The shape of two matrices do not match.'
' true: {} and predicted: {}.'.format(
true_matrix.shape, pred_matrix.shape))
if type_str == 'normalized_frob_norm':
frob_norm_of_difference = norm(true_matrix - pred_matrix)
normalized_frob_norm_of_difference = frob_norm_of_difference / norm(
true_matrix)
return normalized_frob_norm_of_difference
elif type_str == 'mse':
return (np.square(true_matrix - pred_matrix)).mean(axis=None)
elif type_str == 'neg_corr':
# (r, p) = spearmanr(
# np.array(true_matrix.flatten()),
# np.array(pred_matrix.flatten()))
(r, p) = pearsonr(
np.array(true_matrix.flatten()),
np.array(pred_matrix.flatten()))
if p > 0.05:
r = 0
return - r
elif type_str == 'cosine_dist':
err = cosine(
np.array(true_matrix.flatten()), np.array(pred_matrix.flatten()))
return err
# L1-norm distance in each row (since they are row-stochastic).
elif type_str == 'l1':
return np.sum(abs(true_matrix - pred_matrix)) / n
# Which is the same as:
# return np.mean(
# [np.linalg.norm(true_matrix[i, :] - pred_matrix[i, :], 1)
# for i in range(n)])
# Distribution-based error metrics:
elif type_str == 'kl':
# The definition of KL divergence uses the following conventions
# (see Cover and Thomas, Elements of Information Theory):
# 0 * log(0 / 0) = 0, 0 * log(0 / q = 0, p * log(p / 0) = \infinity.
err = 0
for i in range(n):
for j in range(m):
err += sp.special.kl_div(true_matrix[i, j], pred_matrix[i, j])
# if true_matrix[i, j] > 0:
# if pred_matrix[i, j] == 0:
# err += 1000 # instead of becoming nan. << CHECK HERE >>
# else:
# err += true_matrix[i, j] * (
# np.log2(true_matrix[i, j]) - np.log2(pred_matrix[i, j]))
err /= n
return err
# elif type_str == 'cross_entropy':
# eps = 0.01
# err = 0
# for i in range(n):
# if any(pred_matrix[i, :] == 0):
# pred_matrix[i, :] += eps
# for j in range(m):
# err -= true_matrix[i, j] * np.log2(pred_matrix[i, j])
# err /= n
# return err
else:
raise ValueError('Wrong type_str was given, which was: {}'.format(
type_str))
def most_influential_on_others(
influence_matrix: np.ndarray,
remove_self_influence: bool = True) -> List[int]:
"""Gets the index of the most influential individual using influence matrix.
Influence on everyone is computed by summation of each column in an
influence matrix. If remove_self_influence is True, then only influences
that one person is having on other that are reported by others is taken
into account (the diagonal is going to be filled with 0s).
Args:
influence_matrix:
remove_self_influence:
Returns:
The list of indices of the most influential person(s).
Raises:
None.
"""
matrix = np.array(influence_matrix)
if remove_self_influence:
np.fill_diagonal(matrix, 0) # Only the influence on others.
how_influential_one_is = np.sum(matrix, axis=0)
# return np.argmax(how_influential_one_is) # Works only for the first one.
return np.where(
how_influential_one_is == np.max(how_influential_one_is))[0].tolist()
def compute_relationship(
v1: np.ndarray,
v2: np.ndarray,
v1_label: Text = 'v1',
v2_label: Text = 'v2',
maxlag: int = 4,
fname: Text = '',
verbose: bool = True) -> dict:
"""Computes the relationship between two vectors.
Granger causality tests whether the time series in the 2nd column Granger
causes the time series in the 1st column. In here it means, if v2 Granger
causes v1 or not.
Args:
v1: First array of numbers.
v2: Second array of numbers.
v1_label: The string label for v1.
v2_label: The string label for v2.
maxlag: Maximum lag in the Granger causality test.
fname: File name. If empty string, it does not save it.
verbose: If we the function to print the full report.
Returns:
Dictionary of correlation p-value, r-value and causality report.
Raises:
If there was insufficient observations for the given lag.
"""
# Correlation test.
rval, pval = pearsonr(v1, v2)
if verbose:
significant = ''
if pval < 0.05:
significant = 'yay!!!!'
print('r-val: {}\np-val: {} \t{}'.format(rval, pval, significant))
# Scatter plot.
f = plt.figure()
sns.scatterplot(v2, v1)
# plt.plot((min(v1), max(v2)), (max(v1), min(v2)), 'r')
plt.plot(np.linspace(min(v2), max(v2)), np.linspace(min(v1), max(v1)), 'r')
plt.xlabel(v2_label)
plt.ylabel(v1_label)
plt.show()
if fname:
f.savefig('{}.png'.format(fname), bbox_inches='tight')
f.savefig('{}.pdf'.format(fname), bbox_inches='tight')
# Causality test.
causality_res = grangercausalitytests(
np.column_stack((v1, v2)),
maxlag=maxlag,
verbose=verbose)
return {'rval': rval, 'pval': pval, 'causality': causality_res}
# @enforce.runtime_validation
def _get_eigen_decomposition_of_markov_transition(
transition_matrix: np.ndarray,
aperiodic_irreducible_eps: float = 0.0001) -> Tuple:
"""Gets the eigen value and vectors from transition matrix.
A Markov chain is irreducible if we can go from any state to any state.
This entails all transition probabilities > 0.
A Markov chain is aperiodic if all states are accessible from all other
states. This entails all transition probabilities > 0.
Args:
transition_matrix: Square Markov transition matrix.
aperiodic_irreducible_eps: To make the matrix aperiodic/irreducible.
Returns:
Dictionary of eigen val/vec of irreducible and aperiodic markov chain.
Raises:
ValueError: If the matrix was not squared.
"""
if transition_matrix.shape[0] != transition_matrix.shape[1]:
raise ValueError('Transition matrix is not squared.')
matrix = transition_matrix.copy()
matrix = np.nan_to_num(matrix)
matrix += aperiodic_irreducible_eps
aperiodic_irreducible_transition_matrix = (
matrix.T / np.sum(matrix, axis=1)).T
eigen_values, eigen_vectors = np.linalg.eig(
aperiodic_irreducible_transition_matrix.T)
return eigen_values, eigen_vectors
# @enforce.runtime_validation
def get_stationary_distribution(
transition_matrix: np.ndarray,
aperiodic_irreducible_eps: float = 0.0001) -> np.ndarray:
"""Gets the stationary distribution of given transition matrix.
A Markov chain is irreducible if we can go from any state to any state.
This entails all transition probabilities > 0.
A Markov chain is aperiodic if all states are accessible from all other
states. This entails all transition probabilities > 0.
Args:
transition_matrix: Square Markov transition matrix.
aperiodic_irreducible_eps: To make the matrix aperiodic/irreducible.
Returns:
Array of size one dimension of matrix.
Raises:
ValueError: If the matrix was not squared.
"""
eigen_values, eigen_vectors = (
_get_eigen_decomposition_of_markov_transition(
transition_matrix=transition_matrix,
aperiodic_irreducible_eps=aperiodic_irreducible_eps))
index = np.where(eigen_values > 0.99)[0][0]
stationary_distribution = [item.real for item in eigen_vectors[:, index]]
stationary_distribution /= np.sum(stationary_distribution)
return stationary_distribution
def get_relative_reflected_appraisal_matrix(
influence_matrix: np.ndarray) -> np.ndarray:
"""Gets relative reflected appraisal matrix.
Relative reflected appraisal matrix is the zero-diagonaled influence matrix
after normalizing. It shows how much one is getting influenced by others
regardless of him or herself. In the derivtion, we call this matrix C.
Args:
influence_matrix: row stochastic positive influence matrix.
Returns:
The relative reflected appraisal matrix.
Raises:
None.
"""
influence_matrix = np.array(influence_matrix)
matrix = influence_matrix.copy()
np.fill_diagonal(matrix, 0)
matrix = np.nan_to_num(matrix.T / np.sum(matrix, axis=1)).T
return matrix
def get_average_influence_on_others(
influence_matrix: np.ndarray, index: int) -> float:
"""Gets average noramlized influence on others.
\frac{1}{n-1}\sum\limits_{j \neq i} influence_matrix_{ji}
Args:
influence_matrix: row-stochastic positive influence matrix.
index: The index of individual to return their influence on others.
Returns:
The relative reflected appraisal matrix.
Raises:
ValueError: If index was outside of [0, n-1].
"""
n, _ = influence_matrix.shape
if index < 0 or index >= n:
raise ValueError(
'Index was outside of boundary [0, {}]. It was {}'.format(
n-1, index))
influence_matrix = np.array(influence_matrix)
return np.mean(
influence_matrix[[i for i in range(n)
if i != index], index])
|
py | b415770009ef10277ff6fc2d1ffa2886a9803fb3 | import requests
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.utils.module_loading import import_string
from wagtail.documents import get_document_model
from wagtail.images import get_image_model
ImportedImage = get_image_model()
ImportedDocument = get_document_model()
"""StreamField blocks"""
def build_block_quote_block(tag):
block_dict = {
"type": "block_quote",
"value": {"quote": tag.text.strip(), "attribution": tag.cite},
}
return block_dict
def build_form_block(tag):
block_dict = {"type": "raw_html", "value": str(tag)}
return block_dict
def build_heading_block(tag):
block_dict = {
"type": "heading",
"value": {"importance": tag.name, "text": tag.text},
}
return block_dict
def build_iframe_block(tag):
block_dict = {
"type": "raw_html",
"value": '<div class="core-custom"><div class="responsive-iframe">{}</div></div>'.format(
str(tag)
),
}
return block_dict
def build_image_block(tag):
def get_image_id(src):
return 1
block_dict = {"type": "image", "value": get_image_id(tag.src)}
return block_dict
def build_table_block(tag):
block_dict = {"type": "raw_html", "value": str(tag)}
return block_dict
def conf_html_tags_to_blocks():
return getattr(
settings,
"WAGTAIL_WORDPRESS_IMPORTER_CONVERT_HTML_TAGS_TO_BLOCKS",
{
"h1": "wagtail_wordpress_import.block_builder_defaults.build_heading_block",
# "h2": "wagtail_wordpress_import.block_builder_defaults.build_heading_block",
# "h3": "wagtail_wordpress_import.block_builder_defaults.build_heading_block",
# "h4": "wagtail_wordpress_import.block_builder_defaults.build_heading_block",
# "h5": "wagtail_wordpress_import.block_builder_defaults.build_heading_block",
# "h6": "wagtail_wordpress_import.block_builder_defaults.build_heading_block",
"table": "wagtail_wordpress_import.block_builder_defaults.build_table_block",
"iframe": "wagtail_wordpress_import.block_builder_defaults.build_iframe_block",
"form": "wagtail_wordpress_import.block_builder_defaults.build_form_block",
"img": "wagtail_wordpress_import.block_builder_defaults.build_image_block",
"blockquote": "wagtail_wordpress_import.block_builder_defaults.build_block_quote_block",
},
)
"""Fall back StreamField block"""
def conf_fallback_block():
return getattr(
settings,
"WAGTAIL_WORDPRESS_IMPORTER_FALLBACK_BLOCK",
"wagtail_wordpress_import.block_builder_defaults.build_richtext_block_content",
)
def build_richtext_block_content(html, blocks):
"""
image_linker is called to link up and retrive the remote image
document_linker is called to link up and retrive the remote documents
filters are called to replace inline shortcodes
"""
html = image_linker(html)
html = document_linker(html)
for inline_shortcode_handler in getattr(
settings, "WAGTAIL_WORDPRESS_IMPORTER_INLINE_SHORTCODE_HANDLERS", []
):
function = import_string(inline_shortcode_handler).construct_html_tag
html = function(html)
blocks.append({"type": "rich_text", "value": html})
html = ""
return html
"""Rich Text Functions"""
def conf_valid_image_content_types():
return getattr(
settings,
"WAGTAIL_WORDPRESS_IMPORTER_VALID_IMAGE_CONTENT_TYPES",
[
"image/gif",
"image/jpeg",
"image/png",
"image/webp",
"text/html",
],
)
def conf_valid_document_file_types():
return getattr(
settings,
"",
[
"pdf",
"ppt",
"docx",
],
)
def conf_valid_document_content_types():
return getattr(
settings,
"",
[
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
],
)
def image_linker(html):
"""
params
======
html: html from a single rich_text block
returns
=======
string: the html with img tags modified
BS4 performs a find and replace on all img tags found in the HTML.
If the image can be retrieved from the remote site and saved into a Wagtail ImageModel
the soup is modified.
"""
soup = BeautifulSoup(html, "html.parser")
images = soup.find_all("img")
for image in images:
if image.attrs and image.attrs.get("src"):
image_src = get_absolute_src(
image.attrs["src"],
getattr(settings, "WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN"),
)
saved_image = get_or_save_image(image_src)
if saved_image:
image_embed = soup.new_tag("embed")
image_embed.attrs["embedtype"] = "image"
image_embed.attrs["id"] = saved_image.id
image_embed.attrs["alt"] = get_image_alt(image)
image_embed.attrs["format"] = get_alignment_class(image)
image.replace_with(image_embed)
else:
print(f"IMAGE HAS NO SRC: {image}")
return str(soup)
def get_image_alt(img_tag):
return img_tag.attrs["alt"] if "alt" in img_tag.attrs else None
def get_image_file_name(src):
return src.split("/")[-1] if src else None # need the last part
def get_document_file_name(src):
return src.split("/")[-1] if src else None # need the last part
def image_exists(name):
try:
return ImportedImage.objects.get(title=name)
except ImportedImage.DoesNotExist:
pass
def document_exists(name):
try:
return ImportedDocument.objects.get(title=name)
except ImportedDocument.DoesNotExist:
pass
def conf_get_requests_settings():
return getattr(
settings,
"WAGTAIL_WORDPRESS_IMPORTER_REQUESTS_SETTINGS",
{
"headers": {"User-Agent": "WagtailWordpressImporter"},
"timeout": 5,
"stream": False,
},
)
def get_or_save_image(src):
image_file_name = get_image_file_name(src)
existing_image = image_exists(image_file_name)
if not existing_image:
response, valid, type = fetch_url(src)
if valid and (type in conf_valid_image_content_types()):
temp_image = NamedTemporaryFile(delete=True)
temp_image.name = image_file_name
temp_image.write(response.content)
temp_image.flush()
retrieved_image = ImportedImage(
file=File(file=temp_image), title=image_file_name
)
retrieved_image.save()
temp_image.close()
return retrieved_image
else:
print(f"RECEIVED INVALID IMAGE RESPONSE: {src}")
return existing_image
def fetch_url(src, r=None, status=False, content_type=None):
"""general purpose url fetcher with ability to pass in own config"""
try:
response = requests.get(src, **conf_get_requests_settings())
status = True if response.status_code == 200 else False
content_type = (
response.headers["content-type"].lower()
if response.headers.get("content-type")
else ""
)
return response, status, content_type
except Exception as e:
raise requests.ConnectionError(e)
def get_absolute_src(src, domain_prefix=None):
src = src.lstrip("/")
if not src.startswith("http") and domain_prefix:
return domain_prefix + "/" + src
return src
def get_alignment_class(image):
alignment = "fullwidth"
if "class" in image.attrs:
if "align-left" in image.attrs["class"]:
alignment = "left"
elif "align-right" in image.attrs["class"]:
alignment = "right"
return alignment
def document_linker(html):
"""
params
======
html: html from a single rich_text block
returns
=======
string: the html with anchor links modified
BS4 performs a find and replace on all img tags found in the HTML.
If the image can be retrived from the remote site and saved into a Wagtail ImageModel
the soup is modified.
"""
soup = BeautifulSoup(html, "html.parser")
anchors = soup.find_all("a")
for anchor in anchors:
if anchor.attrs and anchor.attrs.get("href"):
anchor_href = get_absolute_src(
anchor.attrs["href"],
getattr(settings, "WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN"),
)
anchor_inner_content = anchor.text
saved_document = get_or_save_document(anchor_href)
if saved_document:
document_embed = soup.new_tag("a")
document_embed.attrs["linktype"] = "document"
document_embed.attrs["id"] = saved_document.id
document_embed.string = anchor_inner_content
anchor.replace_with(document_embed)
else:
print(f"DOCUMENT HAS NO HREF: {anchor}")
return str(soup)
def get_or_save_document(href):
file_type = href.split(".")[-1]
if file_type in conf_valid_document_file_types():
document_file_name = get_document_file_name(href)
existing_document = document_exists(document_file_name)
if not existing_document:
response, valid, type = fetch_url(href)
if valid and (type in conf_valid_document_content_types()):
temp_document = NamedTemporaryFile(delete=True)
temp_document.name = document_file_name
temp_document.write(response.content)
temp_document.flush()
retrieved_document = ImportedDocument(
file=File(file=temp_document), title=document_file_name
)
retrieved_document.save()
temp_document.close()
return retrieved_document
else:
print(f"RECEIVED INVALID DOCUMENT RESPONSE: {href}")
return existing_document
|
py | b4157751dcf12e8cd9b5181f84d9fab4f028cb87 | from datetime import date, timedelta
EASTERS = {
2016: (3, 27),
2017: (4, 16),
2018: (4, 1),
2019: (4, 21),
2020: (4, 12),
2021: (4, 4),
2022: (4, 17),
2023: (4, 9),
2024: (3, 31),
2025: (4, 20),
2026: (4, 5),
2027: (4, 28),
2028: (4, 16),
2029: (4, 1),
2030: (4, 21),
2031: (4, 13),
2032: (3, 28),
2033: (4, 17),
2034: (4, 9),
2035: (3, 25),
}
FIRST_ADVENTS = {
2016: (11, 27),
2017: (12, 3),
2018: (12, 2),
2019: (12, 1),
2020: (11, 29),
2021: (11, 28),
2022: (11, 27),
2023: (12, 3),
2024: (12, 1),
2025: (11, 30),
2026: (11, 29),
2027: (11, 28),
2028: (12, 3),
2029: (12, 2),
2030: (12, 1),
2031: (11, 30),
2032: (11, 28),
2033: (11, 27),
2034: (12, 3),
2035: (12, 2),
}
def easter_day(shift):
def inner(year):
easter = date(year, *EASTERS[year])
the_date = easter + timedelta(shift)
return the_date.month, the_date.day
return inner
def second_wednesday_before_the_first_advent(year):
first_advent = date(year, *FIRST_ADVENTS[year])
# First advent is always on Sunday
wednesday = first_advent - timedelta(11)
return wednesday.month, wednesday.day
# Assuming that there are no gaps in EASTERS
MAXIMUM_KNOWN_YEAR = max(EASTERS.keys())
REGIONS = {
'BW': 'Baden-Württemberg',
'BY': 'Freistaat Bayern',
'BY-AU': 'Freistaat Bayern: Augsburg',
'BY-MU': 'Freistaat Bayern: München',
'BE': 'Berlin',
'BB': 'Brandenburg',
'HB': 'Freie Hansestadt Bremen',
'HH': 'Hamburg',
'HE': 'Hessen',
'MV': 'Mecklenburg-Vorpommern',
'NI': 'Niedersachsen',
'NW': 'Nordrhein-Westfalen',
'RP': 'Rheinland-Pfalz',
'SL': 'Saarland',
'SN': 'Sachsen',
'ST': 'Sachsen-Anhalt',
'SH': 'Schleswig-Holstein',
'TH': 'Thüringen',
}
HOLIDAYS = {
'Neujahrstag': ((1, 1), None),
'Heilige Drei Könige': ((1, 6), {'BW', 'BY', 'BY-AU', 'BY-MU', 'ST'}),
'Karfreitag': (easter_day(-2), None),
'Ostermontag': (easter_day(1), None),
'Tag der Arbeit': ((5, 1), None),
'Christi Himmelfahrt': (easter_day(39), None),
'Pfingstmontag': (easter_day(50), None),
'Fronleichnam': (easter_day(60), {'BW', 'BY', 'BY-AU', 'BY-MU', 'HE', 'NW', 'RP', 'SL'}),
'Friedensfest': ((8, 8), {'BY-AU'}),
'Mariä Himmelfahrt': ((8, 15), {'BY-AU', 'BY-MU', 'SL'}),
'Tag der Deutschen Einheit': ((10, 3), None),
'Reformationstag': ((10, 31), {'BB', 'MW', 'SN', 'ST', 'TH'}),
'Allerheiligen': ((11, 1), {'BW', 'BY', 'BY-AU', 'BY-MU', 'NW', 'RP', 'SL'}),
'Buß- und Bettag': (second_wednesday_before_the_first_advent, {'SN'}),
'Weihnachtstag': ((12, 25), None),
'Zweiter Weihnachtsfeiertag': ((12, 26), None),
}
GLOBAL_EXCEPTIONS = {
'Reformationstag': [2017],
}
|
py | b41577fe7491ba34d5f933a9af3c6247af70e44d | import csv
import logging
import os
from pathlib import Path
from typing import Optional
MANIFEST = "manifest.csv"
GOOGLE_DRIVE_URL_TEMPLATE = "https://drive.google.com/uc?id={}"
def download_dataset(path: Optional[Path] = None, sample: bool = True):
"""
A convenient function to download the dataset to specified path
Args:
path (Path): The path to save the files to [default is to use Path.cwd()/.data]
sample (bool): Download the sample or the full dataset [default: True; download sample only]
"""
try:
import gdown
except ImportError:
logging.debug("It is intentional to not make `gdown` a dependency.")
raise ImportError("You need `gdown` to download the full dataset.")
with open(MANIFEST) as f:
reader = csv.reader(f)
cache_dir = path or os.getenv("CACHE_DIR", Path.cwd() / ".data")
if not cache_dir.exists():
logging.info("%s doesn't exist, it will be created.", cache_dir)
os.makedirs(cache_dir)
logging.info("Downloading files to %s", cache_dir)
for filename, google_drive_id in reader:
output_file = cache_dir / filename
if not output_file.exists():
gdown.download(
GOOGLE_DRIVE_URL_TEMPLATE.format(google_drive_id),
str(output_file),
)
else:
logging.info("%s already exist", output_file)
|
py | b41579903d485e27a5402593811e6e5a5b1da5af | """
Classes for capturing the kernel structure
Created on Jul 1, 2019
@author: Hans Giesen ([email protected])
Copyright 2019 Xilinx, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#######################################################################################################################
import glob, logging, re, time, yaml
from xml.etree import ElementTree
from collections import defaultdict, Counter
log = logging.getLogger(__name__)
#######################################################################################################################
class KernelStructure(object):
"""Class capturing the kernel structure
Parameters
----------
output_dir : str, optional
Output directory of build. All kernel information is extracted from the reports in this directory.
Attributes
----------
functions : dict
Data structure describing each function in a kernel
top_function : str
Top-level function of kernel
func_units : dict
Data structure describing the resource consumption of each functional unit
registers : dict
Data structure describing the resource consumption of each register
_function_map : dict
Map translating abbreviated function names into the full names
_memories : dict
Dictionary with all memories and their dimensions
"""
def __init__(self, output_dir = None):
"""Extract information about all functions, loops, and their latencies from HLS reports."""
if output_dir:
self._read_full_names(output_dir)
self._read_latencies(output_dir)
self._assign_loop_bounds(output_dir)
self._assign_calls(output_dir)
self._decompose_latency()
self._read_top_function(output_dir)
self._read_memories(output_dir)
self._map_mem_aliases(output_dir)
self._read_mem_deps(output_dir)
self._assign_resources(output_dir)
def _read_full_names(self, output_dir):
"""Make a map in self._function_map that contains the full name for each abbreviated function name.
Parameters
----------
output_dir : str
Output directory of build
"""
log = glob.glob(output_dir + '/*/proj/sol1/sol1.log')[0]
self._function_map = {}
with open(log, "r") as input_file:
for line in input_file:
match = re.search(r'WARNING: \[XFORM 203-631\] Renaming function \'(\S+)\' to \'(\S+)\'', line)
if match:
src, dst = match.groups()
self._function_map[dst] = src
def _get_full_name(self, name):
"""Return the full name of a function name that may be abbreviated.
Parameters
----------
name : str
Abbreviated function name
Returns
-------
str
Full function name
"""
return self._function_map.get(name, name)
def _read_latencies(self, output_dir):
"""Extract the latency of all functions and loops from the HLS reports.
This function extracts the latencies of all functions and loops from the HLS reports, and puts them in a list
(self.functions).
Parameters
----------
output_dir : str
Output directory of build
"""
reports = glob.glob(output_dir + '/*/proj/sol1/syn/report/*_csynth.xml')
self.functions = {}
for report in reports:
tree = ElementTree.parse(report)
name = self._get_full_name(tree.find('UserAssignments/TopModelName').text)
latency = int(tree.find('PerformanceEstimates/SummaryOfOverallLatency/Worst-caseLatency').text)
function = defaultdict(list, {"name": name, "latency": latency})
node = tree.find('PerformanceEstimates/SummaryOfLoopLatency')
loops = []
if node != None:
for loop_node in node:
loops.append(self._read_latencies_inner(loop_node))
function["loops"] = loops
self.functions[name] = function
def _read_latencies_inner(self, loop_node):
"""Extract the latencies of one loop and its inner loops from an HLS report.
Parameters
----------
loop_node : ElementTree.Element
Node in XML file with loop currently being parsed
Returns
-------
dict
Latency information collected about the loop and subloops
"""
node = loop_node.find('TripCount/range/max')
if node == None:
node = loop_node.find('TripCount')
iterations = int(node.text)
loop = defaultdict(list, {"name": loop_node.tag, "iterations": iterations, "loops": []})
node = loop_node.find('IterationLatency/range/max')
if node == None:
node = loop_node.find('IterationLatency')
if node != None:
loop['iter_latency'] = int(node.text)
# The reported latency is sometimes too high due to a bug. Therefore, we compute it ourselves.
loop['latency'] = iterations * loop['iter_latency']
else:
node = loop_node.find('PipelineDepth')
if node == None:
node = loop_node.find('PipelineDepth/range/max')
loop['pipeline_depth'] = int(node.text)
node = loop_node.find('PipelineII')
if node == None:
node = loop_node.find('PipelineII/range/max')
loop['pipeline_ii'] = int(node.text)
loop['latency'] = (iterations - 1) * loop['pipeline_ii'] + loop['pipeline_depth'] - 1
for child in loop_node:
if child.tag not in ['IterationLatency', 'TripCount', 'Latency', 'PipelineII', 'PipelineDepth']:
loop['loops'].append(self._read_latencies_inner(child))
return loop
def _assign_loop_bounds(self, output_dir):
"""Assign a tuple with the first and last clock cycle to each loop in self.functions.
Parameters
----------
output_dir : str
Output directory of build
"""
reports = glob.glob(output_dir + '/*/proj/sol1/.autopilot/db/*.verbose.sched.rpt')
for report in reports:
function, bounds = self._read_loop_bounds(report)
self._assign_bounds(function, bounds)
def _read_loop_bounds(self, report):
"""Create a list with the first and last cycle of each loop based on the static schedule.
Parameters
----------
report : str
Path to static schedule report
Returns
-------
str
Name of the function associated with the report
list of tuples with 2 ints
Each tuple consists of the first and last cycle of a loop
Notes
-----
This function is based on the assumption that each loop is solely composed of a number of forward transitions,
followed by a single transition back in time. This cannot be guaranteed in general, but I believe that this
assumption is valid in the Vivado HLS schedules to enable clean schedule diagrams in the GUI.
In hindsight, the loop bounds could have been extracted more reliably based on the calls to
_ssdm_op_SpecLoopName in the code.
"""
with open(report, "r") as input_file:
text = input_file.read()
function = self._get_full_name(re.search(r'== Vivado HLS Report for \'(.+)\'', text).group(1))
result = re.search(r"\* FSM state transitions: \n(.*)\n\n\* FSM state operations", text, re.DOTALL)
edges = result.group(1)
bounds = []
for edge in edges.split('\n'):
src, dsts = re.match(r"(\d+) --> (.*)$", edge).groups()
for dst in dsts.split(' '):
if dst != "" and int(dst) <= int(src):
bounds.append((int(dst), int(src)))
return function, bounds
def _assign_bounds(self, function, bounds):
"""Take a list with loop bounds and assign them to each function.
Parameters
----------
function : str
Function to which the bounds must be assigned
bounds : list of tuples with 2 ints
Each tuple consists of the first and last cycle of a loop
Notes
-----
We assume here that the order of the functions and loops in the reports is identical to the order in the state
description. This is the case for my testcases, but it may not always be the case.
"""
bounds.sort(key = lambda bound: (bound[0], -bound[1]))
self._assign_bounds_core(self.functions[function], bounds)
if len(bounds) != 0:
raise RuntimeError("More loops were discovered in the schedule than suggested by the function hierarchy.\n"
"Most likely this means that one of the assumptions on which the parser relies is not\n"
"satisfied for all designs.")
def _assign_bounds_core(self, scope, bounds):
"""Assign loop bounds to a loop and its inner loops.
Parameters
----------
scope : dict
Data structure representing a function or loop
bounds : list of tuples with 2 ints
Each tuple consists of the first and last cycle of a loop
Returns
-------
int
Highest upper bound that has been encountered in this loop or inner loops
Notes
-----
Note that I have encountered a case in which the cycles of the inner loop are not entirely contained within the
cycles of the outer loop. In that particular case, the outer loop ended in the same cycle as the inner loop
started. This is also fixed here.
"""
upper_bounds = [0]
for loop in scope['loops']:
lower, upper = bounds.pop(0)
upper_bounds.append(upper)
max_upper_bound = self._assign_bounds_core(loop, bounds)
loop['bounds'] = (lower, max(upper, max_upper_bound))
return max(upper_bounds)
def _assign_calls(self, output_dir):
"""Locate all calls in the schedule and assign them to the functions and loops in self.functions.
Parameters
----------
output_dir : str
Output directory of build
"""
reports = glob.glob(output_dir + '/*/proj/sol1/.autopilot/db/*.verbose.sched.rpt')
for report in reports:
function = self._get_full_name(re.search('.*/(.*?)\.verbose\.sched\.rpt', report).group(1))
with open(report, "r") as input_file:
for line in input_file:
result = re.search(r'^ST_(\d+) : Operation \d+ \[1/2\] .+ ---> ".+ = call .* @(\S+)\(', line)
if result != None:
cycle, callee = result.groups()
self._assign_call(self._get_full_name(callee), int(cycle), self.functions[function])
def _assign_call(self, function, cycle, scope):
"""Assign one call to the correct loop based on the cycle in which it is executed.
Parameters
----------
function : str
Function that is called
cycle : int
Clock cycle in which function is called
scope : dict
Data structure describing function or loop
Returns
-------
bool
True if function call has been processed and False otherwise
"""
for loop in scope["loops"]:
if self._assign_call(function, cycle, loop):
return True
bounds = scope.get("bounds")
if bounds == None or cycle >= bounds[0] and cycle <= bounds[1]:
scope["calls"].append(function)
return True
else:
return False
def _decompose_latency(self):
"""Decompose latency into portions associated with functions or loops.
Compute the contribution to the latency of each function or loop after factoring out the contribution of subloops
or functions that are called. The result is in the latency fields of self.functions.
"""
for function in self.functions.values():
self._decompose_latency_core(function)
def _decompose_latency_core(self, scope):
"""Decompose the latency of a single function or loop.
Parameters
----------
scope : dict
Data structure representing a function or a loop
Returns
-------
float
Latency of current function or loop
"""
if "latency_const_1" not in scope:
latency = 0
for loop in scope["loops"]:
latency += self._decompose_latency_core(loop)
for function in scope["calls"]:
name = function.replace('.', '_')
latency += self._decompose_latency_core(self.functions[name])
if 'iter_latency' in scope:
scope["latency_const_1"] = scope['iter_latency'] - latency
scope["latency_const_2"] = 0
else:
scope["latency_const_1"] = 0
scope["latency_const_2"] = scope["latency"] - latency
if scope["latency_const_1"] < 0 or scope["latency_const_2"] < 0:
raise RuntimeError("The latencies do not add up.")
return scope["latency"]
def _read_top_function(self, output_dir):
"""Read the name of the top-level function and assign it to self.top_function.
output_dir : str
Output directory of build
"""
reports = glob.glob(output_dir + '/*/proj/sol1/syn/report/csynth.xml')
tree = ElementTree.parse(reports[0])
self.top_function = tree.find('UserAssignments/TopModelName').text
def _read_memories(self, output_dir):
"""Read all the memories from the HLS reports and put them in self.memories.
output_dir : str
Output directory of build
"""
self.memories = {}
self._read_global_memories(output_dir)
self._read_local_memories(output_dir)
def _read_global_memories(self, output_dir):
"""Read all global memories from the a.o.3.ll file and put them in self.memories.
output_dir : str
Output directory of build
"""
filename = glob.glob(output_dir + '/*/proj/sol1/.autopilot/db/a.o.3.ll')[0]
pattern = re.compile(r'@(\S+) = .*global \[(\d+) x (\S+)\]')
with open(filename, 'r') as input_file:
for line in input_file:
match = re.match(pattern, line)
if match:
name = match.group(1)
length = int(match.group(2))
data_type = match.group(3)
width = 32 if data_type == 'float' else int(data_type[1:])
self.memories[name] = (length, width)
def _read_local_memories(self, output_dir):
"""Read all local memories from the HLS reports and put them in self.memories.
output_dir : str
Output directory of build
"""
reports = glob.glob(output_dir + '/*/proj/sol1/.autopilot/db/*.verbose.sched.rpt')
function_pattern = re.compile(r'.*/(.*?)\.verbose\.sched\.rpt')
mem_pattern = re.compile(r'%(\S+) = alloca \[(\d+) x (\S+)\]')
for report in reports:
function = self._get_full_name(re.search(function_pattern, report).group(1))
with open(report, 'r') as input_file:
for line in input_file:
match = re.search(mem_pattern, line)
if match:
name = match.group(1)
length = int(match.group(2))
data_type = match.group(3)
width = 32 if data_type == 'float' else int(data_type[1:])
self.memories[function + '|' + name] = (length, width)
def _map_mem_aliases(self, output_dir):
"""Determine all aliases for the memories and put them in `self.mem_aliases`.
Parameters
----------
output_dir : str
Output directory of build
"""
filename = glob.glob(output_dir + '/*/proj/sol1/.autopilot/db/a.o.3.ll')[0]
function_pattern = re.compile(r'define .* @(\S+)\((.*)\) .*{')
call_pattern = re.compile(r' = call .* @(\S+)\((.*)\)')
functions = {}
with open(filename, 'r') as input_file:
for line in input_file:
match = re.match(function_pattern, line)
if match:
name, params = match.groups()
function = {}
functions[self._get_full_name(name)] = function
function["params"] = params
calls = []
continue
match = re.search(call_pattern, line)
if match:
name, args = match.groups()
calls.append((name, args))
continue
if line == '}\n':
function["calls"] = calls
function = None
self.mem_aliases = {}
pattern = re.compile(r'\[.*\]\* %(\S+)$')
for func_name in self.functions:
function = functions[func_name]
for param in function["params"].split(','):
match = re.search(pattern, param)
if match:
name = match.group(1)
mems = self.trace_to_mems(functions, func_name, name)
if len(mems) > 0:
self.mem_aliases[func_name + '|' + name] = mems
for mem in self.memories:
if '|' not in mem:
for function in self.functions:
name = function + '|' + mem
if name not in self.mem_aliases:
self.mem_aliases[name] = [mem]
def trace_to_mems(self, functions, function, mem):
"""Trace a memory reference to memories.
Sometimes, a memory that is referenced in a function is propagated to that function from a parent function. This
function traces such references back to the memories.
Parameters
----------
functions : dict
Data structure describing function parameters, calls and address variable to memory mapping
function : str
Function with memory reference
mem : str
Variable referring to a memory
Returns
-------
list of str
Memories
"""
if function + '|' + mem in self.memories:
return [function + '|' + mem]
if mem in self.memories:
return [mem]
pattern = re.compile(r'[%@](\S+)$')
pos = 0
for param in functions[function]["params"].split(','):
name = re.search(pattern, param).group(1)
if mem == name:
break
pos += 1
mems = []
for caller, info in functions.items():
for callee, params in info["calls"]:
if callee == function:
param = params.split(',')[pos]
mem = re.match(pattern, param).group(1)
mems.append(self.trace_to_mem(functions, caller, mem))
return mems
def _read_mem_deps(self, output_dir):
"""Read memory dependencies of loops from the static schedule and put them in `self.functions`.
Parameters
----------
output_dir : str
Output directory of build
"""
getelemptr_pattern = re.compile(r'%(\S+) = getelementptr .*\[.+\]\* [%@](\S+),')
load_pattern = re.compile(r'ST_(\d+) .* \[1/2\] .* = load \S+ %(\S+),')
store_pattern = re.compile(r'ST_(\d+) .* "store \S+ \S+, \S+ %(\S+),')
reports = glob.glob(output_dir + '/*/proj/sol1/.autopilot/db/*.verbose.sched.rpt')
for report in reports:
function = self._get_full_name(re.search('.*/(.*?)\.verbose\.sched\.rpt', report).group(1))
addr_map = {}
with open(report, 'r') as input_file:
for line in input_file:
match = re.search(getelemptr_pattern, line)
if match:
addr_var, mem = match.groups()
addr_map[addr_var] = mem
with open(report, 'r') as input_file:
for line in input_file:
match = re.search(load_pattern, line)
if not match:
match = re.search(store_pattern, line)
if match:
cycle = int(match.group(1))
addr_var = match.group(2)
mem = addr_map[addr_var]
mems = self.mem_aliases.get(function + '|' + mem)
if mems:
for loop in self.functions[function]['loops']:
self._assign_access(loop, cycle, mems)
def _assign_access(self, loop, cycle, mems):
"""Assign memory accesses to a loop based on the clock cycle.
Parameters
----------
loop : dict
Data structure representing the loop
cycle : int
Clock cycle in which the memory is accessed.
mems : str
Memories that are accessed
"""
done = False
bounds = loop['bounds']
if cycle >= bounds[0] and cycle <= bounds[1]:
for inner_loop in loop['loops']:
if self._assign_access(inner_loop, cycle, mems):
done = True
if not done:
for mem in mems:
loop.setdefault('accesses', Counter())[mem] += 1
done = True
return done
def _assign_resources(self, output_dir):
"""Assign resource consumption to each function and loop.
Parameters
----------
output_dir : str
Output directory
Notes
-----
Shared resources are assigned to each function or loop using them.
"""
self.func_units = {}
self.registers = {}
self.multiplexers = {}
reports = glob.glob(output_dir + '/*/proj/sol1/.autopilot/db/*.verbose.bind.rpt')
for report in reports:
function = self._get_full_name(re.search('.*/(.*?)\.verbose\.bind\.rpt', report).group(1))
with open(report, 'r') as input_file:
text = input_file.read()
# The model is described in XML format, but special characters are not properly encoded, so the XML parser cannot
# handle it.
comp_to_cycle_map = defaultdict(list)
op_to_cycle_map = defaultdict(list)
for comp, content in re.findall(r'\n<comp.*?name="(\S+)">(.*?)\n</comp>', text, re.DOTALL):
match = re.search(r'\n<opset="(.+) "/>', content)
if match:
for operation in match.group(1).split():
match = re.match(r'(\S+)/(\d+)', operation)
if match:
op = match.group(1)
cycle = int(match.group(2))
comp_to_cycle_map[comp].append(cycle)
op_to_cycle_map[op].append(cycle)
self.func_units[function] = {}
func_units = re.search(r'\n\* Functional unit list:\n(.*)\n\nMemories:', text, re.DOTALL).group(1)
for line in func_units.split('\n'):
columns = [column.strip() for column in line.split('|')]
operation = columns[1]
func_unit = columns[2]
if func_unit == 'Functional Unit':
try:
lut_column = columns.index('LUT')
except ValueError:
lut_column = None
try:
reg_column = columns.index('FF')
except ValueError:
reg_column = None
try:
dsp_column = columns.index('DSP48E')
except ValueError:
dsp_column = None
if func_unit in ['Functional Unit', '', len(func_unit) * '-'] or operation == 'call':
continue
lut_cnt = int(columns[lut_column]) if lut_column else 0
reg_cnt = int(columns[reg_column]) if reg_column else 0
dsp_cnt = int(columns[dsp_column]) if dsp_column else 0
for cycle in comp_to_cycle_map[func_unit]:
self._assign_resources_core(func_unit, cycle, self.functions[function], "func_unit")
self.func_units[function][func_unit] = (dsp_cnt, lut_cnt, reg_cnt)
self.registers[function] = {}
regs = re.search(r'\n\* Register list:\n(.*)\n\n\* Multiplexer \(MUX\) list:', text, re.DOTALL).group(1)
for line in regs.split('\n'):
columns = line.split('|')
if len(columns) == 1:
continue
reg = columns[1].strip()
if reg in ['', 'Total']:
continue
match = re.match(r'(\S+)_reg_\d+', reg)
if match:
operator = match.group(1)
else:
operator = reg
reg_cnt = int(columns[2])
for cycle in op_to_cycle_map[operator]:
self._assign_resources_core(reg, cycle, self.functions[function], "register")
self.registers[function].setdefault(reg, 0)
self.registers[function][reg] += reg_cnt
self.multiplexers[function] = {}
muxes = re.search(r'\n\* Multiplexer \(MUX\) list: \n(.*)\n\n\n\n\* Summary:', text, re.DOTALL).group(1)
for line in muxes.split('\n'):
columns = line.split('|')
mux = columns[1].strip()
if mux in ['Comp', 'Total', len(mux) * '-']:
continue
# It appears that the Delay column is always missing. Occasionally, the LUT column is missing too.
if len(columns) < 8:
continue
mux_cnt = int(columns[7])
cycles = comp_to_cycle_map[mux]
if len(cycles) == 0:
operation = re.match(r'(\S+)_reg_\d+', mux).group(1)
cycles = op_to_cycle_map[operation]
for cycle in cycles:
self._assign_resources_core(mux, cycle, self.functions[function], "multiplexer")
self.multiplexers[function].setdefault(mux, 0)
self.multiplexers[function][mux] += mux_cnt
def _assign_resources_core(self, func_unit, cycle, scope, resource_type):
"""Assign functional units to the correct function or loop based on the cycle in which they are used.
Parameters
----------
func_unit : str
Name of functional unit
cycle : int
Clock cycle in which the resources are used
scope : dict
Data structure representing a function or loop
"""
for loop in scope["loops"]:
if self._assign_resources_core(func_unit, cycle, loop, resource_type):
return True
bounds = scope.get("bounds")
if bounds == None or cycle >= bounds[0] and cycle <= bounds[1]:
scope.setdefault(resource_type + "s", set()).add(func_unit)
return True
else:
return False
def store(self, filename):
"""Store the kernel structure in a YAML file.
Parameters
----------
filename : str
Destination YAML file
"""
data = {'functions': self.functions, 'top_function': self.top_function, 'func_units': self.func_units,
'registers': self.registers, 'multiplexers': self.multiplexers, 'mem_aliases': self.mem_aliases,
'memories': self.memories}
with open(filename, 'w') as output_file:
yaml.dump(data, output_file)
def load(self, filename):
"""Load the kernel structure from a YAML file.
Parameters
----------
filename : str
Source YAML file
"""
with open(filename, 'r') as input_file:
data = yaml.load(input_file)
self.functions = data['functions']
self.top_function = data['top_function']
self.func_units = data['func_units']
self.registers = data['registers']
self.multiplexers = data['multiplexers']
self.mem_aliases = data['mem_aliases']
self.memories = data['memories']
def match_pragmas(self, pragmas):
"""Ensure that pragmas match with elements of the kernel.
Parameters
----------
pragmas : dict
Data structure describing the pragmas
"""
for pragma in pragmas:
if pragma['type'] == "unroll":
if not any(self._find_loop(loop, pragma['loop']) for loop in self.functions[pragma['function']]['loops']):
raise RuntimeError('Unroll pragma applied to unknown loop.')
elif pragma['type'] == "partition":
if not self.mem_aliases.get(pragma['function'] + '|' + pragma['variable']):
raise RuntimeError('Array_partition pragma applied to unknown memory.')
elif pragma['type'] == "inline":
if pragma['function'] not in self.functions:
raise RuntimeError('Inline pragma applied to unknown function.')
def _find_loop(self, loop, loop_name):
"""Return True if the current loop or any of its inner loops have the provided name.
Parameters
----------
loop : dict
Data structure describing a loop
loop_name : str
Loop that we are searching for
Returns
-------
bool
`True` if the loop was found, and `False` otherwise.
"""
for inner_loop in loop['loops']:
if self._find_loop(inner_loop, loop_name):
return True
return loop['name'] == loop_name
|
py | b4157a11d20b879c7b2ecab5229f364a8f3518fb | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmarket.endpoint import endpoint_data
class ActivateLicenseRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Market', '2015-11-01', 'ActivateLicense','yunmarket')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Identification(self):
return self.get_query_params().get('Identification')
def set_Identification(self,Identification):
self.add_query_param('Identification',Identification)
def get_LicenseCode(self):
return self.get_query_params().get('LicenseCode')
def set_LicenseCode(self,LicenseCode):
self.add_query_param('LicenseCode',LicenseCode) |
py | b4157a4c9118c8c7bcae1f12f5ab28bee6189c30 | import warnings
warnings.filterwarnings("ignore")
from cachetools import TTLCache, cached
# import cfgrib
from collections import OrderedDict
import dask
from dask.distributed import Client, LocalCluster
import gzip
import math
import netCDF4
import numpy as np
import os
import re
import s3fs
from typing import List, Literal, Optional
import urllib.parse
import xarray as xr
import xdrlib
import zarr as zr
cache = TTLCache(maxsize=8192, ttl=36000)
s3 = s3fs.S3FileSystem(anon=False, client_kwargs={"region_name": "us-east-1"})
open_bracket = "{"
close_bracket = "}"
indent = " "
regex_triplet = "\[(?P<start>\d+)\:(?P<step>\d+)\:(?P<stop>-?\d+)\]"
regex_doublet = "\[(?P<start>\d+)\:(?P<stop>-?\d+)\]"
regex_singlet = "\[(?P<start>\d+)\]"
cluster = LocalCluster(n_workers=4)
client = Client(cluster)
@cached(cache)
def get_opendap_type(xr_type) -> str:
if xr_type in (str, np.dtype("S1"), np.dtype("S2"), np.dtype("S4"), np.dtype("S8"), np.dtype("S16"), np.dtype("S32"), np.dtype("S64"), list, np.array, np.ndarray):
return "String"
elif xr_type in (np.int8,):
# raise ValueError("8-bit signed char not supported in DAP2")
return "Byte" # Try it anyway
elif xr_type in (np.uint8,):
return "Byte"
elif xr_type in (np.int16,):
return "Int16"
elif xr_type in (np.uint16,):
return "UInt16"
elif xr_type in (int, np.int32, np.int64): # Note Int64 -> Int32 conversions below
return "Int32"
elif xr_type in (np.uint32,):
return "UInt32"
elif xr_type in (np.float16,):
raise ValueError("16-bit float not supported in DAP2")
elif xr_type in (np.float32,):
return "Float32"
elif xr_type in (float, np.float64):
return "Float64"
raise ValueError(f"Cannot interpret type {xr_type} as DAP2 type")
@cached(cache)
def get_xdr_type(packer: xdrlib.Packer, xr_type):
if xr_type in (str, np.dtype("S1"), np.dtype("S2"), np.dtype("S4"), np.dtype("S8"), np.dtype("S16"), np.dtype("S32"), np.dtype("S64")):
return packer.pack_string
elif xr_type in (np.int8,):
# raise ValueError("8-bit signed char not supported in DAP2")
return packer.pack_int # Try it anyway
elif xr_type in (np.uint8,):
return packer.pack_int
elif xr_type in (int, np.int16, np.int32):
return packer.pack_int
elif xr_type in (np.uint16, np.uint32):
return packer.pack_uint
elif xr_type in (np.float16,):
raise ValueError("16-bit float not supported in DAP2")
elif xr_type in (np.float32,):
return packer.pack_float
elif xr_type in (float, np.float64):
return packer.pack_double
raise ValueError(f"Cannot pack type {xr_type}")
@cached(cache)
def load_dataset(path: str) -> xr.Dataset:
if path.endswith(".nc"):
ds = load_nc(path)
elif path.endswith(".nc.gz"):
ds = load_nc_gz(path)
elif path.endswith(".zarr"):
ds = load_zarr(path)
# elif path.lower().endswith((".grb", ".grib", ".grb2", ".grib2")):
# ds = load_grb(path)
else:
raise ValueError(f"File type for {path} not recognized.")
return ds
def load_nc(path: str) -> xr.Dataset:
with s3.open(path, "rb") as fp:
ncds = netCDF4.Dataset("data", memory=fp.read())
xrds = xr.backends.NetCDF4DataStore(ncds)
ds = xr.open_dataset(xrds, decode_cf=False)
return ds
def load_nc_gz(path: str) -> xr.Dataset:
with s3.open(path, "rb") as fp:
with gzip.open(fp) as gp:
ncds = netCDF4.Dataset("data", memory=gp.read())
xrds = xr.backends.NetCDF4DataStore(ncds)
ds = xr.open_dataset(xrds, decode_cf=False)
return ds
def load_zarr(path: str) -> xr.Dataset:
store = s3fs.S3Map(root=path, s3=s3, check=False)
cache = zr.LRUStoreCache(store=store, max_size=2**28)
ds = xr.open_zarr(cache, decode_cf=False)
return ds
# def load_grb(path: str) -> xr.Dataset:
# with s3.open(path, "rb") as fp:
# ds = cfgrib.open_datasets(fp.read())
# ds_map = []
# total_coords = {}
# for d in ds:
# v_map = {}
# for v in d.data_vars:
# level = d[v].attrs.get("GRIB_typeOfLevel")
# step = d[v].attrs.get("GRIB_stepType")
# name = v
# if level:
# name += f"_{level}"
# if step:
# name += f"_{step}"
# v_map.update({v: name})
# for c in d.coords:
# if c in total_coords.keys():
# if (d[c].values == total_coords[c]).all():
# continue
# else:
# if c + "1" in total_coords.keys():
# name = c + "2"
# else:
# name = c + "1"
# v_map.update({c: name})
# else:
# total_coords[c] = d[c].values
# d_map = v_map.copy()
# for x in v_map.keys():
# if x not in d.dims:
# del d_map[x]
# new_d = d.rename(v_map)
# new_d = new_d.rename_dims(d_map)
# new_d = new_d.expand_dims([x for x in new_d.coords if x not in new_d.dims])
# ds_map.append(new_d)
# dx = xr.merge(ds_map)
# return dx
def parse_args(query_args: str) -> Optional[dict]:
if query_args == "":
return None
cleaned = query_args.replace("=", "")
decoded = urllib.parse.unquote(cleaned)
string_to_list = decoded.split(",")
parsed_args = OrderedDict([])
for a in string_to_list:
if "[" in a: # anom[0:1:10][0:1:0][0:1:100][0:2:20]
variable, bracket, values = a.partition("[")
if "." in variable:
before, after = variable.split(".")
if before == after:
variable = before # anom.anom -> anom
parsed_args[variable] = bracket + values
else: # time
if "." in a:
before, after = a.split(".")
if before == after:
a = before # time.time -> time
parsed_args[a] = "[0:1:-1]"
return parsed_args # {anom: [0:1:10][0:1:0][0:1:100][0:2:20], zlev: [0:1:0]}
def create_das(ds: xr.Dataset, parsed_args: dict) -> str:
variable_attrs = OrderedDict([])
global_attrs = {"NC_GLOBAL": OrderedDict([])}
if parsed_args is None:
for k in ds.variables:
variable_attrs[k] = OrderedDict([(k, v) for k,v in ds[k].attrs.items()])
for attrkey, attrval in variable_attrs[k].items():
try:
if math.isnan(attrval):
del variable_attrs[k][attrkey]
except TypeError:
pass
else:
for k in parsed_args.keys():
variable_attrs[k] = OrderedDict([(k, v) for k,v in ds[k].attrs.items()])
for attrkey, attrval in variable_attrs[k].items():
try:
if math.isnan(attrval):
del variable_attrs[k][attrkey]
except TypeError:
pass
for k, v in ds.attrs.items():
global_attrs["NC_GLOBAL"][k] = v
master_dict = OrderedDict([])
master_dict.update(variable_attrs)
master_dict.update(global_attrs)
das = "Attributes {\n"
for k, v in master_dict.items():
das += f"{indent}{k} {open_bracket}\n"
for attrkey, attrval in v.items():
if k == "NC_GLOBAL":
dtype = get_opendap_type(type(ds.attrs[attrkey]))
if dtype != "String":
das += f"{indent}{indent}{dtype} {attrkey} {attrval};\n"
else:
das += f"{indent}{indent}{dtype} {attrkey} \"{attrval}\";\n"
else:
dtype = get_opendap_type(type(ds[k].attrs[attrkey]))
if dtype != "String":
das += f"{indent}{indent}{dtype} {attrkey} {attrval};\n"
else:
das += f"{indent}{indent}{dtype} {attrkey} \"{attrval}\";\n"
das += f"{indent}{close_bracket}\n"
das += f"{close_bracket}"
return das
def create_dds(ds: xr.Dataset, parsed_args: dict, name: str) -> str:
variable_ranges = OrderedDict([])
if parsed_args is None:
for k in ds.variables:
dimensions = ds[k].dims
variable_ranges[k] = OrderedDict([])
for d in dimensions:
variable_ranges[k][d] = ds[d].shape[0]
else:
for k, v in parsed_args.items():
dimensions = ds[k].dims
values = ["[" + x for x in v.split("[") if x]
variable_ranges[k] = OrderedDict([])
for i, d in enumerate(dimensions):
try:
d_range = values[i]
except IndexError:
d_range = "[0:1:-1]" # dimension not specified -> assume all
try: # Most common [start:step:stop] pattern
d_range_dict = re.search(regex_triplet, d_range).groupdict()
except AttributeError:
try: # [start:stop] pattern with implied step=1
d_range_dict = re.search(regex_doublet, d_range).groupdict()
d_range_dict.update({"step": 1})
except AttributeError: # [exact] pattern
d_range_dict = re.search(regex_singlet, d_range).groupdict()
d_range_dict.update({"step": 1, "stop": int(d_range_dict["start"])})
if int(d_range_dict["stop"]) == -1:
d_range_dict["stop"] = ds[d].shape[0]
else:
d_range_dict["stop"] = int(d_range_dict["stop"]) + 1
d_size = int(np.ceil((int(d_range_dict["stop"]) - int(d_range_dict["start"]) / int(d_range_dict["step"]))))
variable_ranges[k][d] = d_size
dds = f"Dataset {open_bracket}\n"
for variable, dim_size_dict in variable_ranges.items():
if len(dim_size_dict) == 1 and variable == list(dim_size_dict.keys())[0]:
# coordinate array
for dim_name, dim_size in dim_size_dict.items():
dtype = get_opendap_type(ds[dim_name].dtype)
dds += f"{indent}{dtype} {dim_name}[{dim_name} = {dim_size}];\n"
else:
# variable grid
vtype = get_opendap_type(ds[variable].dtype)
dds += f"{indent}Grid {open_bracket}\n{indent} ARRAY:\n"
dds += f"{indent}{indent}{vtype} {variable}"
for dim_name, dim_size in dim_size_dict.items():
dds += f"[{dim_name} = {dim_size}]"
dds += f";\n{indent} MAPS:\n"
for dim_name, dim_size in dim_size_dict.items():
dtype = get_opendap_type(ds[dim_name].dtype)
dds += f"{indent}{indent}{dtype} {dim_name}[{dim_name} = {dim_size}];\n"
dds += f"{indent}{close_bracket} {variable};\n"
dds += f"{close_bracket} {name};"
return dds
def create_dods(ds: xr.Dataset, parsed_args: dict, name: str) -> bytes:
packer = xdrlib.Packer()
dds = create_dds(ds, parsed_args, name) + "\n\nData:\n"
variable_ranges = OrderedDict([])
if parsed_args is None:
for k in ds.variables:
dimensions = ds[k].dims
variable_ranges[k] = OrderedDict([])
for d in dimensions:
variable_ranges[k][d] = {"start": 0, "step": 1, "stop": ds[d].shape[0]}
else:
for k, v in parsed_args.items():
dimensions = ds[k].dims
values = ["[" + x for x in v.split("[") if x]
variable_ranges[k] = OrderedDict([])
for i, d in enumerate(dimensions):
try:
d_range = values[i]
except IndexError:
d_range = "[0:1:-1]" # dimension not specified -> assume all
try: # Most common [start:step:stop] pattern
d_range_dict = re.search(regex_triplet, d_range).groupdict()
except AttributeError:
try: # [start:stop] pattern with implied step=1
d_range_dict = re.search(regex_doublet, d_range).groupdict()
d_range_dict.update({"step": 1})
except AttributeError: # [exact] pattern
d_range_dict = re.search(regex_singlet, d_range).groupdict()
d_range_dict.update({"step": 1, "stop": int(d_range_dict["start"])})
if int(d_range_dict["stop"]) == -1:
d_range_dict["stop"] = ds[d].shape[0]
else:
d_range_dict["stop"] = int(d_range_dict["stop"]) + 1
variable_ranges[k][d] = d_range_dict
for variable, data_dict in variable_ranges.items():
if len(data_dict) == 1 and variable == list(data_dict.keys())[0]:
for dim_name, dim_range_dict in data_dict.items():
size = int(np.ceil((int(dim_range_dict["stop"]) - int(dim_range_dict["start"]) / int(dim_range_dict["step"]))))
d_values = ds[dim_name][int(dim_range_dict["start"]):int(dim_range_dict["stop"]):int(dim_range_dict["step"])].values[:]
packer.pack_uint(size)
if d_values.dtype == np.int64:
d_values = d_values.astype('int32')
packer.pack_array(d_values, get_xdr_type(packer, np.int32))
else:
packer.pack_array(d_values, get_xdr_type(packer, ds[dim_name].dtype))
else:
cdr = [(int(data_dict[d]["start"]), int(data_dict[d]["stop"]), int(data_dict[d]["step"])) for d in data_dict.keys()]
if len(cdr) == 4:
variable_array = ds[variable][
cdr[0][0]:cdr[0][1]:cdr[0][2],
cdr[1][0]:cdr[1][1]:cdr[1][2],
cdr[2][0]:cdr[2][1]:cdr[2][2],
cdr[3][0]:cdr[3][1]:cdr[3][2]
].compute()
elif len(cdr) == 3:
variable_array = ds[variable][
cdr[0][0]:cdr[0][1]:cdr[0][2],
cdr[1][0]:cdr[1][1]:cdr[1][2],
cdr[2][0]:cdr[2][1]:cdr[2][2]
].compute()
elif len(cdr) == 2:
variable_array = ds[variable][
cdr[0][0]:cdr[0][1]:cdr[0][2],
cdr[1][0]:cdr[1][1]:cdr[1][2]
].compute()
elif len(cdr) == 1:
variable_array = ds[variable][
cdr[0][0]:cdr[0][1]:cdr[0][2]
].compute()
elif len(cdr) == 0:
variable_array = ds[variable].compute()
else:
raise IndexError(f"Too many dimensions. There are {len(cdr)} dimensions, but only up to 4 are supported")
variable_array_flat = np.array(variable_array).ravel()
packer.pack_uint(variable_array_flat.shape[0])
if variable_array_flat.dtype == np.int64:
variable_array_flat = variable_array_flat.astype('int32')
print(variable_array_flat.dtype)
packer.pack_array(variable_array_flat, np.int32)
else:
packer.pack_array(variable_array_flat, get_xdr_type(packer, ds[variable].dtype))
for dim_name, dim_range_dict in data_dict.items():
size = int(np.ceil((int(dim_range_dict["stop"]) - int(dim_range_dict["start"]) / int(dim_range_dict["step"]))))
d_values = ds[dim_name][int(dim_range_dict["start"]):int(dim_range_dict["stop"]):int(dim_range_dict["step"])].values[:]
packer.pack_uint(size)
if d_values.dtype == np.int64:
d_values = d_values.astype('int32')
packer.pack_array(d_values, get_xdr_type(packer, np.int32))
else:
packer.pack_array(d_values, get_xdr_type(packer, ds[dim_name].dtype))
dods = dds.encode() + packer.get_buffer()
return dods
def create_subset(ds: xr.Dataset, name: str, format: Literal["csv", "nc"], vars: Optional[List[str]]) -> str:
path = os.path.join("app/tmp", name)
if not vars:
new_ds = ds
else:
new_ds = ds[vars]
if format == "nc":
new_ds.to_netcdf(path, mode="w")
elif format == "csv":
new_ds.to_dataframe().to_csv(path, mode="w")
return path
|
py | b4157a53b80db4ca47c56919635d41ec7eaaacf7 | import base64
import uuid
import os
from openpyxl.chart import LineChart, Reference
from openpyxl.chart.label import DataLabelList
from openpyxl.styles import PatternFill, Border, Side, Alignment, Font
from openpyxl.drawing.image import Image
from openpyxl import Workbook
import openpyxl.utils.cell as format_cell
########################################################################################################################
# PROCEDURES
# Step 1: Validate the report data
# Step 2: Generate excel file
# Step 3: Encode the excel file bytes to Base64
########################################################################################################################
def export(report,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type):
####################################################################################################################
# Step 1: Validate the report data
####################################################################################################################
if report is None:
return None
print(report)
####################################################################################################################
# Step 2: Generate excel file from the report data
####################################################################################################################
filename = generate_excel(report,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type)
####################################################################################################################
# Step 3: Encode the excel file to Base64
####################################################################################################################
try:
with open(filename, 'rb') as binary_file:
binary_file_data = binary_file.read()
except IOError as ex:
pass
# Base64 encode the bytes
base64_encoded_data = base64.b64encode(binary_file_data)
# get the Base64 encoded data using human-readable characters.
base64_message = base64_encoded_data.decode('utf-8')
# delete the file from server
try:
os.remove(filename)
except NotImplementedError as ex:
pass
return base64_message
def generate_excel(report,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type):
wb = Workbook()
ws = wb.active
ws.title = "EquipmentStatistics"
# Row height
ws.row_dimensions[1].height = 102
for i in range(2, 2000 + 1):
ws.row_dimensions[i].height = 42
# Col width
ws.column_dimensions['A'].width = 1.5
ws.column_dimensions['B'].width = 25.0
for i in range(ord('C'), ord('L')):
ws.column_dimensions[chr(i)].width = 15.0
# Font
name_font = Font(name='Constantia', size=15, bold=True)
title_font = Font(name='宋体', size=15, bold=True)
table_fill = PatternFill(fill_type='solid', fgColor='1F497D')
f_border = Border(left=Side(border_style='medium', color='00000000'),
right=Side(border_style='medium', color='00000000'),
bottom=Side(border_style='medium', color='00000000'),
top=Side(border_style='medium', color='00000000')
)
b_border = Border(
bottom=Side(border_style='medium', color='00000000'),
)
b_c_alignment = Alignment(vertical='bottom',
horizontal='center',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
c_c_alignment = Alignment(vertical='center',
horizontal='center',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
b_r_alignment = Alignment(vertical='bottom',
horizontal='right',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
# Img
img = Image("excelexporters/myems.png")
ws.add_image(img, 'B1')
# Title
ws['B3'].font = name_font
ws['B3'].alignment = b_r_alignment
ws['B3'] = 'Name:'
ws['C3'].border = b_border
ws['C3'].alignment = b_c_alignment
ws['C3'].font = name_font
ws['C3'] = name
ws['D3'].font = name_font
ws['D3'].alignment = b_r_alignment
ws['D3'] = 'Period:'
ws['E3'].border = b_border
ws['E3'].alignment = b_c_alignment
ws['E3'].font = name_font
ws['E3'] = period_type
ws['F3'].font = name_font
ws['F3'].alignment = b_r_alignment
ws['F3'] = 'Date:'
ws['G3'].border = b_border
ws['G3'].alignment = b_c_alignment
ws['G3'].font = name_font
ws['G3'] = reporting_start_datetime_local + "__" + reporting_end_datetime_local
ws.merge_cells("G3:H3")
if "reporting_period" not in report.keys() or \
"names" not in report['reporting_period'].keys() or len(report['reporting_period']['names']) == 0:
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
####################################################################################################################
# First: 统计分析
# 6: title
# 7: table title
# 8~ca_len table_data
####################################################################################################################
reporting_period_data = report['reporting_period']
if "names" not in reporting_period_data.keys() or \
reporting_period_data['names'] is None or \
len(reporting_period_data['names']) == 0:
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
ws['B6'].font = title_font
ws['B6'] = name + ' 统计分析'
category = reporting_period_data['names']
# table_title
ws['B7'].fill = table_fill
ws['B7'].font = title_font
ws['B7'].alignment = c_c_alignment
ws['B7'] = '报告期'
ws['B7'].border = f_border
ws['C7'].font = title_font
ws['C7'].alignment = c_c_alignment
ws['C7'] = '算术平均数'
ws['C7'].border = f_border
ws['D7'].font = title_font
ws['D7'].alignment = c_c_alignment
ws['D7'] = '中位数'
ws['D7'].border = f_border
ws['E7'].font = title_font
ws['E7'].alignment = c_c_alignment
ws['E7'] = '最小值'
ws['E7'].border = f_border
ws['F7'].font = title_font
ws['F7'].alignment = c_c_alignment
ws['F7'] = '最大值'
ws['F7'].border = f_border
ws['G7'].font = title_font
ws['G7'].alignment = c_c_alignment
ws['G7'] = '样本标准差'
ws['G7'].border = f_border
ws['H7'].font = title_font
ws['H7'].alignment = c_c_alignment
ws['H7'] = '样本方差'
ws['H7'].border = f_border
# table_data
for i, value in enumerate(category):
row = i * 2 + 8
ws['B' + str(row)].font = name_font
ws['B' + str(row)].alignment = c_c_alignment
ws['B' + str(row)] = reporting_period_data['names'][i] + " (" + reporting_period_data['units'][i] + " )"
ws['B' + str(row)].border = f_border
ws['B' + str(row + 1)].font = name_font
ws['B' + str(row + 1)].alignment = c_c_alignment
ws['B' + str(row + 1)] = "环比"
ws['B' + str(row + 1)].border = f_border
ws['C' + str(row)].font = name_font
ws['C' + str(row)].alignment = c_c_alignment
ws['C' + str(row)] = reporting_period_data['means'][i] \
if reporting_period_data['means'][i] is not None else ''
ws['C' + str(row)].border = f_border
ws['C' + str(row)].number_format = '0.00'
ws['C' + str(row + 1)].font = name_font
ws['C' + str(row + 1)].alignment = c_c_alignment
ws['C' + str(row + 1)] = str(round(reporting_period_data['means_increment_rate'][i] * 100, 2)) + "%" \
if reporting_period_data['means_increment_rate'][i] is not None else '0.00%'
ws['C' + str(row + 1)].border = f_border
ws['D' + str(row)].font = name_font
ws['D' + str(row)].alignment = c_c_alignment
ws['D' + str(row)] = reporting_period_data['medians'][i] \
if reporting_period_data['medians'][i] is not None else ''
ws['D' + str(row)].border = f_border
ws['D' + str(row)].number_format = '0.00'
ws['D' + str(row + 1)].font = name_font
ws['D' + str(row + 1)].alignment = c_c_alignment
ws['D' + str(row + 1)] = str(round(reporting_period_data['medians_increment_rate'][i] * 100, 2)) + "%" \
if reporting_period_data['medians_increment_rate'][i] is not None else '0.00%'
ws['D' + str(row + 1)].border = f_border
ws['E' + str(row)].font = name_font
ws['E' + str(row)].alignment = c_c_alignment
ws['E' + str(row)] = reporting_period_data['minimums'][i] \
if reporting_period_data['minimums'][i] is not None else ''
ws['E' + str(row)].border = f_border
ws['E' + str(row)].number_format = '0.00'
ws['E' + str(row + 1)].font = name_font
ws['E' + str(row + 1)].alignment = c_c_alignment
ws['E' + str(row + 1)] = str(round(reporting_period_data['minimums_increment_rate'][i] * 100, 2)) + "%" \
if reporting_period_data['minimums_increment_rate'][i] is not None else '0.00%'
ws['E' + str(row + 1)].border = f_border
ws['F' + str(row)].font = name_font
ws['F' + str(row)].alignment = c_c_alignment
ws['F' + str(row)] = reporting_period_data['maximums'][i] \
if reporting_period_data['maximums'][i] is not None else ''
ws['F' + str(row)].border = f_border
ws['F' + str(row)].number_format = '0.00'
ws['F' + str(row + 1)].font = name_font
ws['F' + str(row + 1)].alignment = c_c_alignment
ws['F' + str(row + 1)] = str(round(reporting_period_data['maximums_increment_rate'][i] * 100, 2)) + "%" \
if reporting_period_data['maximums_increment_rate'][i] is not None else '0.00%'
ws['F' + str(row + 1)].border = f_border
ws['G' + str(row)].font = name_font
ws['G' + str(row)].alignment = c_c_alignment
ws['G' + str(row)] = reporting_period_data['stdevs'][i] \
if reporting_period_data['stdevs'][i] is not None else ''
ws['G' + str(row)].border = f_border
ws['G' + str(row)].number_format = '0.00'
ws['G' + str(row + 1)].font = name_font
ws['G' + str(row + 1)].alignment = c_c_alignment
ws['G' + str(row + 1)] = str(round(reporting_period_data['stdevs_increment_rate'][i] * 100, 2)) + "%" \
if reporting_period_data['stdevs_increment_rate'][i] is not None else '0.00%'
ws['G' + str(row + 1)].border = f_border
ws['H' + str(row)].font = name_font
ws['H' + str(row)].alignment = c_c_alignment
ws['H' + str(row)] = reporting_period_data['variances'][i] \
if reporting_period_data['variances'][i] is not None else ''
ws['H' + str(row)].border = f_border
ws['H' + str(row)].number_format = '0.00'
ws['H' + str(row + 1)].font = name_font
ws['H' + str(row + 1)].alignment = c_c_alignment
ws['H' + str(row + 1)] = str(round(reporting_period_data['variances_increment_rate'][i] * 100, 2)) + "%" \
if reporting_period_data['variances_increment_rate'][i] is not None else '0.00%'
ws['H' + str(row + 1)].border = f_border
####################################################################################################################
# Second: 详细数据
# a+1~ analysis_end_row_number+1+line_charts_row_number+: line
# detailed_start_row_number~ : the detailed data table
####################################################################################################################
has_timestamps_flag = True
if "timestamps" not in reporting_period_data.keys() or \
reporting_period_data['timestamps'] is None or \
len(reporting_period_data['timestamps']) == 0:
has_timestamps_flag = False
if has_timestamps_flag:
timestamps = reporting_period_data['timestamps'][0]
values = reporting_period_data['values']
names = reporting_period_data['names']
ca_len = len(names)
real_timestamps_len = timestamps_data_not_equal_0(report['parameters']['timestamps'])
time_len = len(timestamps)
# the detailed title
line_charts_row_number = 6 * ca_len + 1
analysis_end_row_number = 7 + 2 * ca_len
detailed_start_row_number = analysis_end_row_number + line_charts_row_number + 1 + real_timestamps_len * 7
ws['B' + str(detailed_start_row_number)].font = title_font
ws['B' + str(detailed_start_row_number)] = name + ' 详细数据'
# the detailed table_title
ws['B' + str(detailed_start_row_number + 1)].fill = table_fill
ws['B' + str(detailed_start_row_number + 1)].font = name_font
ws['B' + str(detailed_start_row_number + 1)].alignment = c_c_alignment
ws['B' + str(detailed_start_row_number + 1)] = "时间"
ws['B' + str(detailed_start_row_number + 1)].border = f_border
for i in range(0, ca_len):
col = chr(ord('C') + i)
ws[col + str(detailed_start_row_number + 1)].font = name_font
ws[col + str(detailed_start_row_number + 1)].alignment = c_c_alignment
ws[col + str(detailed_start_row_number + 1)] = names[i] + " - (" + reporting_period_data['units'][i] + ")"
ws[col + str(detailed_start_row_number + 1)].border = f_border
# the detailed table_date
for i in range(0, time_len):
rows = i + detailed_start_row_number + 2
ws['B' + str(rows)].font = name_font
ws['B' + str(rows)].alignment = c_c_alignment
ws['B' + str(rows)] = timestamps[i]
ws['B' + str(rows)].border = f_border
for index in range(0, ca_len):
col = chr(ord('C') + index)
ws[col + str(rows)].font = name_font
ws[col + str(rows)].alignment = c_c_alignment
ws[col + str(rows)] = values[index][i]
ws[col + str(rows)].number_format = '0.00'
ws[col + str(rows)].border = f_border
# 小计
row_subtotals = detailed_start_row_number + 2 + time_len
ws['B' + str(row_subtotals)].font = name_font
ws['B' + str(row_subtotals)].alignment = c_c_alignment
ws['B' + str(row_subtotals)] = "小计"
ws['B' + str(row_subtotals)].border = f_border
for i in range(0, ca_len):
col = chr(ord('C') + i)
ws[col + str(row_subtotals)].font = name_font
ws[col + str(row_subtotals)].alignment = c_c_alignment
ws[col + str(row_subtotals)] = reporting_period_data['subtotals'][i]
ws[col + str(row_subtotals)].border = f_border
ws[col + str(row_subtotals)].number_format = '0.00'
####################################################################################################################
# third: LineChart
# LineChart requires data from the detailed data table in the Excel file
# so print the detailed data table first and then print LineChart
####################################################################################################################
for i in range(0, ca_len):
line = LineChart()
line.title = "报告期消耗" + " - " + names[i] + "(" + reporting_period_data['units'][i] + ")"
line.style = 10
line.height = 8.40
line.width = 24
line.x_axis.majorTickMark = 'in'
line.y_axis.majorTickMark = 'in'
line.dLbls = DataLabelList()
line.dLbls.dLblPos = 't'
line.dLbls.showVal = True
times = Reference(ws, min_col=2, min_row=detailed_start_row_number + 2,
max_row=detailed_start_row_number + 2 + time_len)
line_data = Reference(ws, min_col=3 + i, min_row=detailed_start_row_number + 1,
max_row=detailed_start_row_number + 1 + time_len)
line.add_data(line_data, titles_from_data=True)
line.set_categories(times)
ser = line.series[0]
ser.marker.symbol = "diamond"
ser.marker.size = 5
ws.add_chart(line, 'B' + str(analysis_end_row_number + 2 + 6 * i))
####################################################################################################################
current_sheet_parameters_row_number = analysis_end_row_number + 2 + 6 * ca_len
has_parameters_names_and_timestamps_and_values_data = True
if 'parameters' not in report.keys() or \
report['parameters'] is None or \
'names' not in report['parameters'].keys() or \
report['parameters']['names'] is None or \
len(report['parameters']['names']) == 0 or \
'timestamps' not in report['parameters'].keys() or \
report['parameters']['timestamps'] is None or \
len(report['parameters']['timestamps']) == 0 or \
'values' not in report['parameters'].keys() or \
report['parameters']['values'] is None or \
len(report['parameters']['values']) == 0 or \
timestamps_data_all_equal_0(report['parameters']['timestamps']):
has_parameters_names_and_timestamps_and_values_data = False
if has_parameters_names_and_timestamps_and_values_data:
################################################################################################################
# new worksheet
################################################################################################################
parameters_data = report['parameters']
parameters_names_len = len(parameters_data['names'])
file_name = __file__.split('/')[-1].replace(".py", "")
parameters_ws = wb.create_sheet(file_name + 'Parameters')
parameters_timestamps_data_max_len = \
get_parameters_timestamps_lists_max_len(list(parameters_data['timestamps']))
# Row height
parameters_ws.row_dimensions[1].height = 102
for i in range(2, 7 + 1):
parameters_ws.row_dimensions[i].height = 42
for i in range(8, parameters_timestamps_data_max_len + 10):
parameters_ws.row_dimensions[i].height = 60
# Col width
parameters_ws.column_dimensions['A'].width = 1.5
parameters_ws.column_dimensions['B'].width = 25.0
for i in range(3, 12 + parameters_names_len * 3):
parameters_ws.column_dimensions[format_cell.get_column_letter(i)].width = 15.0
# Img
img = Image("excelexporters/myems.png")
img.width = img.width * 0.85
img.height = img.height * 0.85
parameters_ws.add_image(img, 'B1')
# Title
parameters_ws.row_dimensions[3].height = 60
parameters_ws['B3'].font = name_font
parameters_ws['B3'].alignment = b_r_alignment
parameters_ws['B3'] = 'Name:'
parameters_ws['C3'].border = b_border
parameters_ws['C3'].alignment = b_c_alignment
parameters_ws['C3'].font = name_font
parameters_ws['C3'] = name
parameters_ws['D3'].font = name_font
parameters_ws['D3'].alignment = b_r_alignment
parameters_ws['D3'] = 'Period:'
parameters_ws['E3'].border = b_border
parameters_ws['E3'].alignment = b_c_alignment
parameters_ws['E3'].font = name_font
parameters_ws['E3'] = period_type
parameters_ws['F3'].font = name_font
parameters_ws['F3'].alignment = b_r_alignment
parameters_ws['F3'] = 'Date:'
parameters_ws['G3'].border = b_border
parameters_ws['G3'].alignment = b_c_alignment
parameters_ws['G3'].font = name_font
parameters_ws['G3'] = reporting_start_datetime_local + "__" + reporting_end_datetime_local
parameters_ws.merge_cells("G3:H3")
parameters_ws_current_row_number = 6
parameters_ws['B' + str(parameters_ws_current_row_number)].font = title_font
parameters_ws['B' + str(parameters_ws_current_row_number)] = name + ' Parameters'
parameters_ws_current_row_number += 1
parameters_table_start_row_number = parameters_ws_current_row_number
parameters_ws.row_dimensions[parameters_ws_current_row_number].height = 80
parameters_ws_current_row_number += 1
table_current_col_number = 2
for i in range(0, parameters_names_len):
if len(parameters_data['timestamps'][i]) == 0:
continue
col = format_cell.get_column_letter(table_current_col_number)
parameters_ws[col + str(parameters_ws_current_row_number - 1)].fill = table_fill
parameters_ws[col + str(parameters_ws_current_row_number - 1)].border = f_border
col = format_cell.get_column_letter(table_current_col_number + 1)
parameters_ws[col + str(parameters_ws_current_row_number - 1)].fill = table_fill
parameters_ws[col + str(parameters_ws_current_row_number - 1)].border = f_border
parameters_ws[col + str(parameters_ws_current_row_number - 1)].font = name_font
parameters_ws[col + str(parameters_ws_current_row_number - 1)].alignment = c_c_alignment
parameters_ws[col + str(parameters_ws_current_row_number - 1)] = parameters_data['names'][i]
table_current_row_number = parameters_ws_current_row_number
for j, value in enumerate(list(parameters_data['timestamps'][i])):
col = format_cell.get_column_letter(table_current_col_number)
parameters_ws[col + str(table_current_row_number)].border = f_border
parameters_ws[col + str(table_current_row_number)].font = title_font
parameters_ws[col + str(table_current_row_number)].alignment = c_c_alignment
parameters_ws[col + str(table_current_row_number)] = value
col = format_cell.get_column_letter(table_current_col_number + 1)
parameters_ws[col + str(table_current_row_number)].border = f_border
parameters_ws[col + str(table_current_row_number)].font = title_font
parameters_ws[col + str(table_current_row_number)].alignment = c_c_alignment
parameters_ws[col + str(table_current_row_number)] = round(parameters_data['values'][i][j], 2)
table_current_row_number += 1
table_current_col_number = table_current_col_number + 3
################################################################################################################
# parameters chart and parameters table
################################################################################################################
ws['B' + str(current_sheet_parameters_row_number)].font = title_font
ws['B' + str(current_sheet_parameters_row_number)] = name + ' Parameters'
current_sheet_parameters_row_number += 1
chart_start_row_number = current_sheet_parameters_row_number
col_index = 0
for i in range(0, parameters_names_len):
if len(parameters_data['timestamps'][i]) == 0:
continue
line = LineChart()
data_col = 3 + col_index * 3
labels_col = 2 + col_index * 3
col_index += 1
line.title = 'Parameters - ' + \
parameters_ws.cell(row=parameters_table_start_row_number, column=data_col).value
labels = Reference(parameters_ws, min_col=labels_col, min_row=parameters_table_start_row_number + 1,
max_row=(len(parameters_data['timestamps'][i]) + parameters_table_start_row_number))
line_data = Reference(parameters_ws, min_col=data_col, min_row=parameters_table_start_row_number,
max_row=(len(parameters_data['timestamps'][i]) + parameters_table_start_row_number))
line.add_data(line_data, titles_from_data=True)
line.set_categories(labels)
line_data = line.series[0]
line_data.marker.symbol = "circle"
line_data.smooth = True
line.x_axis.crosses = 'min'
line.height = 8.25
line.width = 24
line.dLbls = DataLabelList()
line.dLbls.dLblPos = 't'
line.dLbls.showVal = False
line.dLbls.showPercent = False
chart_col = 'B'
chart_cell = chart_col + str(chart_start_row_number)
chart_start_row_number += 6
ws.add_chart(line, chart_cell)
current_sheet_parameters_row_number = chart_start_row_number
current_sheet_parameters_row_number += 1
####################################################################################################################
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
def timestamps_data_all_equal_0(lists):
for i, value in enumerate(list(lists)):
if len(value) > 0:
return False
return True
def get_parameters_timestamps_lists_max_len(parameters_timestamps_lists):
max_len = 0
for i, value in enumerate(list(parameters_timestamps_lists)):
if len(value) > max_len:
max_len = len(value)
return max_len
def timestamps_data_not_equal_0(lists):
number = 0
for i, value in enumerate(list(lists)):
if len(value) > 0:
number += 1
return number
|
py | b4157aba009703f06eae00e16679165c983b3b11 | from __future__ import print_function
import torch
import torch.nn as nn
import torch_mlu
from torch.nn import Parameter
import torch.nn.functional as F
import numpy as np
import sys
import os
import copy
import random
import time
import unittest
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir+"/../../")
from common_utils import testinfo, TestCase
import logging
logging.basicConfig(level=logging.DEBUG)
torch.set_grad_enabled(False)
class TestAddModel(nn.Module):
def __init__(self, alpha = 1.0):
super(TestAddModel, self).__init__()
self.alpha = alpha
def forward(self, x, y):
z = torch.add(x, y, alpha = self.alpha)
return z
class TestAddScalarModel1(nn.Module):
def __init__(self, scalar = 1.0, alpha = 1.0):
super(TestAddScalarModel1, self).__init__()
self.scalar = scalar
self.alpha = alpha
def forward(self, x):
y = torch.add(x, self.scalar, alpha = self.alpha)
return y
class TestAddScalarModel2(nn.Module):
def __init__(self, scalar = 1.0):
super(TestAddScalarModel2, self).__init__()
self.scalar = scalar
def forward(self, x):
y = self.scalar + x
return y
class TestAddOp(TestCase):
def test_add(self):
# Test add.Tensor
alpha = random.random()
model = TestAddModel(alpha)
input_x = torch.rand(3, 5, 6)
input_y = torch.rand(3, 5, 6)
traced_model = torch.jit.trace(model, (input_x, input_y), check_trace=False)
input_x_mlu = input_x.to('mlu')
input_y_mlu = input_y.to('mlu')
# Test for fp32 & fp16
out_cpu = model(input_x, input_y)
out_mlu = traced_model(input_x_mlu, input_y_mlu)
out_mlu_fp16 = traced_model(input_x_mlu.half(), input_y_mlu.half())
out_mlu_fp32 = traced_model(input_x_mlu.float(), input_y_mlu.half())
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.03, use_MSE = True)
self.assertTensorsEqual(out_cpu, out_mlu_fp16.cpu(), 0.03, use_MSE = True)
self.assertTensorsEqual(out_cpu, out_mlu_fp32.cpu(), 0.03, use_MSE = True)
def test_tensor_add_scalar(self):
# Test Tensor + Scalar
scalar = random.random()
alpha = random.random()
model = TestAddScalarModel1(scalar, alpha)
input_x = torch.randn(1,3,4,4)
traced_model = torch.jit.trace(model, input_x, check_trace=False)
input_x_mlu = input_x.to('mlu')
# Test for fp32 & fp16
out_cpu = model(input_x)
out_mlu = traced_model(input_x_mlu)
out_mlu_fp16 = traced_model(input_x_mlu.half())
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.03, use_MSE = True)
self.assertTensorsEqual(out_cpu, out_mlu_fp16.cpu(), 0.03, use_MSE = True)
def test_scalar_add_tensor(self):
# Test Scalar + Tensor
scalar = random.random()
model = TestAddScalarModel2(scalar)
input_x = torch.randn(1,3,4,4)
traced_model = torch.jit.trace(model, input_x, check_trace=False)
input_x_mlu = input_x.to('mlu')
# Test for fp32 & fp16
out_cpu = model(input_x)
out_mlu = traced_model(input_x_mlu)
out_mlu_fp16 = traced_model(input_x_mlu.half())
self.assertTensorsEqual(out_cpu, out_mlu.cpu(), 0.03, use_MSE = True)
self.assertTensorsEqual(out_cpu, out_mlu_fp16.cpu(), 0.03, use_MSE = True)
if __name__ == '__main__':
unittest.main()
|
py | b4157bacd28f5c887f54c4cc5f8178a9a1f67945 | """
具有 AR 误差的广义最小二乘
GLSAR 使用人工数据的 6 个示例
"""
# .. 注意:这些示例主要用于交叉检验结果。它是仍在编写中,并且GLSAR仍在开发中。
import numpy as np
import numpy.testing as npt
from scipy import signal
import statsmodels.api as sm
from statsmodels.regression.linear_model import GLSAR, yule_walker
examples_all = range(10) + ['test_copy']
examples = examples_all # [5]
if 0 in examples:
print('\n Example 0')
X = np.arange(1, 8)
X = sm.add_constant(X, prepend=False)
Y = np.array((1, 3, 4, 5, 8, 10, 9))
rho = 2
model = GLSAR(Y, X, 2)
for i in range(6):
results = model.fit()
print('AR coefficients:', model.rho)
rho, sigma = yule_walker(results.resid, order=model.order)
model = GLSAR(Y, X, rho)
par0 = results.params
print('params fit', par0)
model0if = GLSAR(Y, X, 2)
res = model0if.iterative_fit(6)
print('iterativefit beta', res.params)
results.tvalues # TODO: 这是正确的吗?它确实等于params / bse
# 但与AR示例不同(这是错误的)
print(results.t_test([0, 1])) # sd 和 t 正确吗? vs
print(results.f_test(np.eye(2)))
rhotrue = np.array([0.5, 0.2])
nlags = np.size(rhotrue)
beta = np.array([0.1, 2])
noiseratio = 0.5
nsample = 2000
x = np.arange(nsample)
X1 = sm.add_constant(x, prepend=False)
wnoise = noiseratio * np.random.randn(nsample + nlags)
# .. noise = noise[1:] + rhotrue*noise[:-1] # 错,这不是 AR
# .. 查找有关单变量 ARMA 函数的草稿
# generate AR(p)
if np.size(rhotrue) == 1:
# 替换为 scipy.signal.lfilter, 继续测试
arnoise = np.zeros(nsample + 1)
for i in range(1, nsample + 1):
arnoise[i] = rhotrue * arnoise[i - 1] + wnoise[i]
noise = arnoise[1:]
an = signal.lfilter([1], np.hstack((1, -rhotrue)), wnoise[1:])
print('simulate AR(1) difference', np.max(np.abs(noise - an)))
else:
noise = signal.lfilter([1], np.hstack((1, -rhotrue)), wnoise)[nlags:]
# 生成带有 AR 噪声的 GLS 模型
y1 = np.dot(X1, beta) + noise
if 1 in examples:
print('\nExample 1: iterative_fit and repeated calls')
mod1 = GLSAR(y1, X1, 1)
res = mod1.iterative_fit()
print(res.params)
print(mod1.rho)
mod1 = GLSAR(y1, X1, 2)
for i in range(5):
res1 = mod1.iterative_fit(2)
print(mod1.rho)
print(res1.params)
if 2 in examples:
print('\nExample 2: iterative fitting of first model')
print('with AR(0)', par0)
parold = par0
mod0 = GLSAR(Y, X, 1)
for i in range(5):
res0 = mod0.iterative_fit(1)
print('rho', mod0.rho)
parnew = res0.params
print('params', parnew,)
print('params change in iteration', parnew - parold)
parold = parnew
# generate pure AR(p) process
Y = noise
# 没有回归变量的示例,结果现在直接具有与 yule-walker 相同的估计 rho
if 3 in examples:
print('\nExample 3: pure AR(2), GLSAR versus Yule_Walker')
model3 = GLSAR(Y, rho=2)
for i in range(5):
results = model3.fit()
print("AR coefficients:", model3.rho, results.params)
rho, sigma = yule_walker(results.resid, order=model3.order)
model3 = GLSAR(Y, rho=rho)
if 'test_copy' in examples:
xx = X.copy()
rhoyw, sigmayw = yule_walker(xx[:, 0], order=2)
print(rhoyw, sigmayw)
print((xx == X).all()) # 测试没有变化的数组 (固定的)
yy = Y.copy()
rhoyw, sigmayw = yule_walker(yy, order=2)
print(rhoyw, sigmayw)
print((yy == Y).all()) # 测试没有变化的数组 (固定的)
if 4 in examples:
print('\nExample 4: demeaned pure AR(2), GLSAR versus Yule_Walker')
Ydemeaned = Y - Y.mean()
model4 = GLSAR(Ydemeaned, rho=2)
for i in range(5):
results = model4.fit()
print("AR coefficients:", model3.rho, results.params)
rho, sigma = yule_walker(results.resid, order=model4.order)
model4 = GLSAR(Ydemeaned, rho=rho)
if 5 in examples:
print('\nExample 5: pure AR(2), GLSAR iterative_fit versus Yule_Walker')
model3a = GLSAR(Y, rho=1)
res3a = model3a.iterative_fit(5)
print(res3a.params)
print(model3a.rho)
rhoyw, sigmayw = yule_walker(Y, order=1)
print(rhoyw, sigmayw)
npt.assert_array_almost_equal(model3a.rho, rhoyw, 15)
for i in range(6):
model3b = GLSAR(Y, rho=0.1)
print(i, model3b.iterative_fit(i).params, model3b.rho)
model3b = GLSAR(Y, rho=0.1)
for i in range(6):
print(i, model3b.iterative_fit(2).params, model3b.rho)
print(np.array(res.history['params']))
print(np.array(res.history['rho']))
|
py | b4157c8b9706f7bfa56bbf87b326fd7331917189 |
# Path to images directory
IMAGES_PATH = "/home/ximenes/PycharmProjects/minichallenge_keras/dataset/data/"
# Paths for HDF% files to train, validation and test
TRAIN_HDF5 = "/home/ximenes/PycharmProjects/minichallenge_keras/dataset/hdf5/train.hdf5"
VAL_HDF5 = "/home/ximenes/PycharmProjects/minichallenge_keras/dataset/hdf5/val.hdf5"
TEST_HDF5 = "/home/ximenes/PycharmProjects/minichallenge_keras/dataset/hdf5/test.hdf5"
MODEL_PATH = "/home/ximenes/PycharmProjects/minichallenge_keras/output/minichallenge.model"
CSVPATH = "/home/ximenes/PycharmProjects/minichallenge_keras/dataset_images_minitest.csv"
DATASET_MEAN = "/home/ximenes/PycharmProjects/minichallenge_keras/output/minichallenge.json"
OUTPUT_PATH = "/home/ximenes/PycharmProjects/minichallenge_keras/output/"
WEIGHTS_PATH = "/home/ximenes/PycharmProjects/minichallenge_keras/dnn/alexnet_weights.h5"
IMAGE_SIZE = 128
NUM_CLASSES = 3
BATH_SIZE = 50
|
py | b4157cfb2fc9229dec05a2b21eba1a8b76df062d | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add comments' owners information.
Create Date: 2016-06-08 13:25:26.635435
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = "170e453da661"
down_revision = "7a9b715ec504"
def upgrade():
"""Create owner information for the existing comments.
A comment's owner is assumed to be the user who last edited it, and this
information is added to the object_owners table for all existing comments.
If a record already exists, do nothing (this could happen e.g. on a DB
downgrade and a subsequent another upgrade).
"""
# NOTE: we set the status column's value to "Draft" to be consistent with
# what the application does when a new comment is created
command = """
INSERT IGNORE INTO object_owners (
person_id, ownable_id, ownable_type, modified_by_id,
created_at, updated_at, status
)
SELECT
modified_by_id, id, "Comment", modified_by_id, created_at, updated_at,
"Draft"
FROM comments;
"""
op.execute(command)
def downgrade():
"""Do not delete any comments' owner information to preserve data."""
|
py | b4157d0cd643d23d50a8dd7dd0b0916bc9f5af50 | import os
import shutil
import subprocess
from ajenti.api import *
from ajenti.plugins.services.api import ServiceMultiplexor
from ajenti.plugins.vh.api import WebserverComponent, SanityCheck, Restartable
from nginx_templates import *
@plugin
class NginxConfigTest (SanityCheck):
def init(self):
self.type = _('NGINX config test')
def check(self):
p = subprocess.Popen(['nginx', '-t'], stderr=subprocess.PIPE)
o, self.message = p.communicate()
return p.returncode == 0
@plugin
class NginxServiceTest (SanityCheck):
def init(self):
self.type = _('NGINX service')
def check(self):
return ServiceMultiplexor.get().get_one('nginx').running
@plugin
class NginxWebserver (WebserverComponent):
def init(self):
self.config_root = '/etc/nginx'
self.config_file = '/etc/nginx/nginx.conf'
self.config_file_mime = '/etc/nginx/mime.conf'
self.config_file_fastcgi = '/etc/nginx/fcgi.conf'
self.config_file_proxy = '/etc/nginx/proxy.conf'
self.config_vhost_root = '/etc/nginx/conf.d'
self.config_custom_root = '/etc/nginx.custom.d'
self.config_modules_root = '/etc/nginx.modules.d'
self.lib_path = '/var/lib/nginx'
def __generate_website_location(self, ws, location):
params = location.backend.params
if location.backend.type == 'static':
content = TEMPLATE_LOCATION_CONTENT_STATIC % {
'autoindex': 'autoindex on;' if params.has_key('autoindex') and params['autoindex'] else '',
}
if location.backend.type == 'proxy':
content = TEMPLATE_LOCATION_CONTENT_PROXY % {
'url': params.get('url', 'http://127.0.0.1/'),
}
if location.backend.type == 'fcgi':
content = TEMPLATE_LOCATION_CONTENT_FCGI % {
'url': params.get('url', '127.0.0.1:9000'),
}
if location.backend.type == 'php-fcgi':
content = TEMPLATE_LOCATION_CONTENT_PHP_FCGI % {
'listen': location.backend.params.get('listen', 'unix:/var/run/ajenti-v-php-fcgi-' + location.backend.id + '.sock') or 'unix:/var/run/ajenti-v-php-fcgi-'+ location.backend.id + '.sock',
}
if location.backend.type == 'php5.6-fcgi':
content = TEMPLATE_LOCATION_CONTENT_PHP56_FCGI % {
'listen': location.backend.params.get('listen', 'unix:/var/run/ajenti-v-php5.6-fcgi-' + location.backend.id + '.sock') or 'unix:/var/run/ajenti-v-php5.6-fcgi-'+ location.backend.id + '.sock',
}
if location.backend.type == 'php7.0-fcgi':
content = TEMPLATE_LOCATION_CONTENT_PHP70_FCGI % {
'listen': location.backend.params.get('listen', 'unix:/var/run/ajenti-v-php7.0-fcgi-' + location.backend.id + '.sock') or 'unix:/var/run/ajenti-v-php7.0-fcgi-'+ location.backend.id + '.sock',
}
if location.backend.type == 'php7.1-fcgi':
content = TEMPLATE_LOCATION_CONTENT_PHP71_FCGI % {
'listen': location.backend.params.get('listen', 'unix:/var/run/ajenti-v-php7.1-fcgi-' + location.backend.id + '.sock') or 'unix:/var/run/ajenti-v-php7.1-fcgi-'+ location.backend.id + '.sock',
}
if location.backend.type == 'php7.2-fcgi':
content = TEMPLATE_LOCATION_CONTENT_PHP72_FCGI % {
'listen': location.backend.params.get('listen', 'unix:/var/run/ajenti-v-php7.2-fcgi-' + location.backend.id + '.sock') or 'unix:/var/run/ajenti-v-php7.2-fcgi-'+ location.backend.id + '.sock',
}
if location.backend.type == 'php7.3-fcgi':
content = TEMPLATE_LOCATION_CONTENT_PHP73_FCGI % {
'listen': location.backend.params.get('listen', 'unix:/var/run/ajenti-v-php7.3-fcgi-' + location.backend.id + '.sock') or 'unix:/var/run/ajenti-v-php7.3-fcgi-'+ location.backend.id + '.sock',
}
if location.backend.type == 'python-wsgi':
content = TEMPLATE_LOCATION_CONTENT_PYTHON_WSGI % {
'id': location.backend.id,
}
if location.backend.type == 'ruby-unicorn':
content = TEMPLATE_LOCATION_CONTENT_RUBY_UNICORN % {
'id': location.backend.id,
}
if location.backend.type == 'ruby-puma':
content = TEMPLATE_LOCATION_CONTENT_RUBY_PUMA % {
'id': location.backend.id,
}
if location.backend.type == 'nodejs':
content = TEMPLATE_LOCATION_CONTENT_NODEJS % {
'port': location.backend.params.get('port', 8000) or 8000,
}
if location.custom_conf_override:
content = ''
path_spec = ''
if location.path:
if location.path_append_pattern:
path_spec = 'root %s;' % location.path
else:
path_spec = 'alias %s;' % location.path
return TEMPLATE_LOCATION % {
'pattern': location.pattern,
'custom_conf': location.custom_conf,
'path': path_spec,
'match': {
'exact': '',
'regex': '~',
'force-regex': '^~',
}[location.match],
'content': content,
}
def __generate_website_config(self, website):
params = {
'slug': website.slug,
'server_name': (
'server_name %s;' % (' '.join(domain.domain for domain in website.domains))
) if website.domains else '',
'ports': (
'\n'.join(
'listen %s:%s%s%s%s%s;' % (
x.host, x.port,
' ssl' if x.ssl else '',
' spdy' if x.spdy else '',
' http2' if x.http2 else '',
' default_server' if x.default else '',
)
for x in website.ports
)
),
'ssl_cert': 'ssl_certificate %s;' % website.ssl_cert_path if website.ssl_cert_path else '',
'ssl_key': 'ssl_certificate_key %s;' % website.ssl_key_path if website.ssl_key_path else '',
'ssl_protocols': 'ssl_protocols %s;' % website.ssl_protocols if website.ssl_protocols else '',
'ssl_prefer_server_ciphers': 'ssl_prefer_server_ciphers %s;' % website.ssl_prefer_server_ciphers if website.ssl_prefer_server_ciphers else '',
'ssl_dhparam': 'ssl_dhparam %s;' % website.ssl_diffie_hellman_group if website.ssl_diffie_hellman_group else '',
'ssl_ciphers': 'ssl_ciphers %s;' % website.ssl_ciphers if website.ssl_ciphers else '',
'ssl_session_timeout': 'ssl_session_timeout %s;' % website.ssl_session_timeout if website.ssl_session_timeout else '',
'ssl_session_cache': 'ssl_session_cache %s;' % website.ssl_session_cache if website.ssl_session_cache else '',
'ssl_stapling': 'ssl_stapling %s;' % website.ssl_stapling if website.ssl_stapling else '',
'ssl_stapling_verify': 'ssl_stapling_verify %s;' % website.ssl_stapling_verify if website.ssl_stapling_verify else '',
'ssl_header': 'add_header %s;' % website.ssl_header if website.ssl_header else '',
'maintenance': TEMPLATE_MAINTENANCE if website.maintenance_mode else '',
'root': website.root,
'custom_conf': website.custom_conf,
'custom_conf_toplevel': website.custom_conf_toplevel,
'locations': (
'\n'.join(self.__generate_website_location(website, location) for location in website.locations)
) if not website.maintenance_mode else '',
}
return TEMPLATE_WEBSITE % params
def create_configuration(self, config):
shutil.rmtree(self.config_root)
os.mkdir(self.config_root, 755)
os.mkdir(self.config_vhost_root, 755)
if not os.path.exists(self.config_custom_root):
os.mkdir(self.config_custom_root, 755)
if not os.path.exists(self.config_modules_root):
os.mkdir(self.config_modules_root, 755)
open(self.config_file, 'w').write(TEMPLATE_CONFIG_FILE)
open(self.config_file_mime, 'w').write(TEMPLATE_CONFIG_MIME)
open(self.config_file_fastcgi, 'w').write(TEMPLATE_CONFIG_FCGI)
open(self.config_file_proxy, 'w').write(TEMPLATE_CONFIG_PROXY)
for website in config.websites:
if website.enabled:
open(os.path.join(self.config_vhost_root, website.slug + '.conf'), 'w')\
.write(self.__generate_website_config(website))
subprocess.call([
'chown', 'www-data:www-data', '-R', self.lib_path,
])
def apply_configuration(self):
NGINXRestartable.get().schedule()
def get_checks(self):
return [NginxConfigTest.new(), NginxServiceTest.new()]
@plugin
class NGINXRestartable (Restartable):
def restart(self):
s = ServiceMultiplexor.get().get_one('nginx')
if not s.running:
s.start()
else:
s.command('reload')
|
py | b4157e3f741dfe2ef485d25ba417474c66e1ec6c | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test QPE """
import unittest
import warnings
from test.aqua import QiskitAquaTestCase
import numpy as np
from ddt import ddt, data, unpack
from qiskit import BasicAer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.operators import MatrixOperator, WeightedPauliOperator
from qiskit.aqua.operators.legacy import op_converter
from qiskit.aqua.utils import decimal_to_binary
from qiskit.aqua.algorithms import NumPyMinimumEigensolver
from qiskit.aqua.algorithms import QPE
from qiskit.circuit.library import QFT
from qiskit.aqua.components.initial_states import Custom
from qiskit.aqua.components.iqfts import Standard
# pylint: disable=invalid-name
@ddt
class TestQPE(QiskitAquaTestCase):
"""QPE tests."""
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
_I = np.array([[1, 0], [0, 1]])
H1 = X + Y + Z + _I
PAULI_DICT = {
'paulis': [
{"coeff": {"imag": 0.0, "real": -1.052373245772859}, "label": "II"},
{"coeff": {"imag": 0.0, "real": 0.39793742484318045}, "label": "IZ"},
{"coeff": {"imag": 0.0, "real": -0.39793742484318045}, "label": "ZI"},
{"coeff": {"imag": 0.0, "real": -0.01128010425623538}, "label": "ZZ"},
{"coeff": {"imag": 0.0, "real": 0.18093119978423156}, "label": "XX"}
]
}
PAULI_DICT_ZZ = {
'paulis': [
{"coeff": {"imag": 0.0, "real": 1.0}, "label": "ZZ"}
]
}
def setUp(self):
super().setUp()
qubit_op_simple = MatrixOperator(matrix=TestQPE.H1)
qubit_op_simple = op_converter.to_weighted_pauli_operator(qubit_op_simple)
qubit_op_h2_with_2_qubit_reduction = \
WeightedPauliOperator.from_dict(TestQPE.PAULI_DICT)
qubit_op_zz = WeightedPauliOperator.from_dict(TestQPE.PAULI_DICT_ZZ)
self._dict = {
'QUBIT_OP_SIMPLE': qubit_op_simple.to_opflow(),
'QUBIT_OP_ZZ': qubit_op_zz.to_opflow(),
'QUBIT_OP_H2_WITH_2_QUBIT_REDUCTION': qubit_op_h2_with_2_qubit_reduction.to_opflow()
}
def test_deprecated_qft(self):
"""Test the QPE algorithm on the deprecated QFT component."""
qubit_op = self._dict['QUBIT_OP_SIMPLE']
exact_eigensolver = NumPyMinimumEigensolver(qubit_op)
results = exact_eigensolver.run()
ref_eigenval = results.eigenvalue
ref_eigenvec = results.eigenstate
state_in = Custom(qubit_op.num_qubits, state_vector=ref_eigenvec)
warnings.filterwarnings('ignore', category=DeprecationWarning)
iqft = Standard(5)
qpe = QPE(qubit_op, state_in, iqft, num_time_slices=1, num_ancillae=5,
expansion_mode='suzuki', expansion_order=2,
shallow_circuit_concat=True)
backend = BasicAer.get_backend('qasm_simulator')
quantum_instance = QuantumInstance(backend, shots=100, seed_transpiler=1, seed_simulator=1)
# run qpe
result = qpe.run(quantum_instance)
warnings.filterwarnings('always', category=DeprecationWarning)
self.assertAlmostEqual(result.eigenvalue.real, ref_eigenval.real, delta=2e-2)
@data(
('QUBIT_OP_SIMPLE', 'qasm_simulator', 1, 5),
('QUBIT_OP_ZZ', 'statevector_simulator', 1, 1),
('QUBIT_OP_H2_WITH_2_QUBIT_REDUCTION', 'statevector_simulator', 1, 6),
)
@unpack
def test_qpe(self, qubit_op, simulator, num_time_slices, n_ancillae):
"""Test the QPE algorithm."""
self.log.debug('Testing QPE')
qubit_op = self._dict[qubit_op]
exact_eigensolver = NumPyMinimumEigensolver(qubit_op)
results = exact_eigensolver.run()
ref_eigenval = results.eigenvalue
ref_eigenvec = results.eigenstate
self.log.debug('The exact eigenvalue is: %s', ref_eigenval)
self.log.debug('The corresponding eigenvector: %s', ref_eigenvec)
state_in = Custom(qubit_op.num_qubits, state_vector=ref_eigenvec)
iqft = QFT(n_ancillae).inverse()
qpe = QPE(qubit_op, state_in, iqft, num_time_slices, n_ancillae,
expansion_mode='suzuki', expansion_order=2,
shallow_circuit_concat=True)
backend = BasicAer.get_backend(simulator)
quantum_instance = QuantumInstance(backend, shots=100, seed_transpiler=1, seed_simulator=1)
# run qpe
result = qpe.run(quantum_instance)
# report result
self.log.debug('top result str label: %s', result.top_measurement_label)
self.log.debug('top result in decimal: %s', result.top_measurement_decimal)
self.log.debug('stretch: %s', result.stretch)
self.log.debug('translation: %s', result.translation)
self.log.debug('final eigenvalue from QPE: %s', result.eigenvalue)
self.log.debug('reference eigenvalue: %s', ref_eigenval)
self.log.debug('ref eigenvalue (transformed): %s',
(ref_eigenval + result.translation) * result.stretch)
self.log.debug('reference binary str label: %s', decimal_to_binary(
(ref_eigenval.real + result.translation) * result.stretch,
max_num_digits=n_ancillae + 3,
fractional_part_only=True
))
self.assertAlmostEqual(result.eigenvalue.real, ref_eigenval.real, delta=2e-2)
if __name__ == '__main__':
unittest.main()
|
py | b4157f37eaaedf9a3e4b3846ccdcc3e6ab5da8cd | #
# This script allows the user to control an Anki car using Python
# To control multiple cars at once, open a seperate Command Line Window for each car
# and call this script with the approriate car mac address.
#
# Author: [email protected]
#
SCRIPT_TITLE="Send the car on several missions, one after the other"
# import required modules
import loader.bootstrapper
import time
from overdrive import Overdrive
# Setup our car
car = Overdrive(12) # init overdrive object
# define what track locations correspond to which friendly location names eg 33 = shops
police = 33
fire = 34
hospital = 39
corner = 17
# ask the user for the first mission objective
mission_objective1 = input("Enter first mission objective (police/fire/hospital):")
mission_objective2 = input("Enter second mission objective (police/fire/hospital):")
mission_objective3 = input("Enter third mission objective (police/fire/hospital):")
# setup what track piece matches what label
if mission_objective1 == "police":
track_piece1 = police
elif mission_objective1 == "fire":
track_piece1 = fire
elif mission_objective1 == "hospital":
track_piece1 = hospital
if mission_objective2 == "police":
track_piece2 = police
elif mission_objective2 == "fire":
track_piece2 = fire
elif mission_objective2 == "hospital":
track_piece2 = hospital
if mission_objective3 == "police":
track_piece3 = police
elif mission_objective3 == "fire":
track_piece3 = fire
elif mission_objective3 == "hospital":
track_piece3 = hospital
# ask the user to choose the car's speed
speed = int(input("Enter car speed (0-1000):"))
# start the car off
# usage: car.changeSpeed(speed, accel)
car.changeSpeed(speed, 600)
# flag to signify when the mission has been completed
objective_complete = 0
# move car until mission parameters have been met
while objective_complete == 0:
# look for mission objective until found
if car.doMission(mission_objective1, track_piece1):
# stop the car
car.stopCarFast()
print("\n **************************")
print("FIRST OBJECTIVE COMPLETED! We have arrived at "+mission_objective1)
objective_complete = 1
else:
# not found yet, keep looking
time.sleep(0.2)
# start second objective
time.sleep(4)
print("Now starting second objective: "+mission_objective2)
time.sleep(2)
car.brakeLightsOff()
car.changeSpeed(speed, 600)
objective_complete = 0
while objective_complete == 0:
# look for mission objective until found
if car.doMission(mission_objective2, track_piece2):
# stop the car
car.stopCarFast()
print("\n **************************")
print("SECOND OBJECTIVE COMPLETED! We have arrived at "+mission_objective2)
objective_complete = 1
else:
# not found yet, keep looking
time.sleep(0.2)
# start third objective
time.sleep(4)
print("Now starting third objective: "+mission_objective3)
time.sleep(2)
car.brakeLightsOff()
car.changeSpeed(speed, 600)
objective_complete = 0
while objective_complete == 0:
# look for mission objective until found
if car.doMission(mission_objective3, track_piece3):
# stop the car
car.stopCarFast()
print("\n **************************")
print("THIRD OBJECTIVE COMPLETED! We have arrived at "+mission_objective3)
objective_complete = 1
else:
# not found yet, keep looking
time.sleep(0.2)
print("\n ***********************")
print("**************************")
print("All objectives found")
print("MISSION COMPLETE!")
#quit();
car.quit() |
bzl | b4157f6aef66ad17db0289e8715a70886c8ef918 | # Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a proto_lang_toolchain rule class with custom proto compiler.
There are two physical rule classes for proto_lang_toolchain and we want both of them
to have a name string of "proto_lang_toolchain".
"""
load(":common/proto/proto_lang_toolchain.bzl", "make_proto_lang_toolchain")
proto_lang_toolchain = make_proto_lang_toolchain(custom_proto_compiler = True)
|
py | b41580259ad3079a0486f6441ed8c9b31fc128c6 | # Databricks notebook source
HLRIOWYITKFDP
GOQFOEKSZNF
BQRYZYCEYRHRVDKCQSN
BELVHHTEWWKFYTNTWJIIYUQTBHUMOCJNDBBIPBOVCDIKTUPVXZRIUC
AUVGECGGHDZPJPMFEZWDFYYDXYGEMHXRHYXXGEMXTCZOPGPGSRCIQNPHCUONPPCBOWTFOZEYCXCQKKUNDSXSBAKSMWIPUKICUWX
HDCWKJXOZHPPXWBBPLIGLXMBATYPTDTCAACKEEWURDREVIIUPRJXDFNDLSHBZEBMWQOMYFWARMGERQAXVLFREGTYUXPABORSDUP
XPSNALKIEEH
TNRJVKVUADXUMYRVMHWANRYEQXHWTJQWRWKSYUM
JZXPNGKLOBUHKSQBTCTPEDKMXFIBBGGHRJQHBBORPGAUUQJRVXCIPMMFYYLRYN
KGQOIYGOLOQKPGZJQOZBYIDIZHPVDGNQIBWMZKLFVEICEQCZJBCOJNRCFYZBKW
XUCXWMRZSJZGGPFDQVRHQYDXFQAKRUAMZMPYIXPFUWMHCMC
HXYLXLGHGJHSABRRKKPNEFJQTIUKHUWMRZSWZBPACLASFINSC
# COMMAND ----------
MGRAAFOYIJMFRVFOSRGMGFXXEKYADNRPHTYWJOWZMVBJ
PWDILGWYEWDFNEZFZBSMBFRSQHNLFXXJUYMSTDBXBZOLDBSROW
VJZKPBXNXVNNTANWQWAUITCXBBBVPROZOINGKOJBTSWCDOPYBLDTEKAQGMWCUARJGWQY
ZPFVDMMLPYPQAMSJLQQWEDSYPZHXSYKENJIJMLMRAAFISKLL
ROYFOFXVCMBAZZIRVCWXHAWKILJJYAWWISQPHOVCWIGSYJ
# COMMAND ----------
YEGVKOKXNRAKWSMIJGQICYIXPZDXALZLGNOTGYHVESTP
# COMMAND ----------
EUIJSXZYUPDQQFSWCACJADRNZGSJIYRAJ
# COMMAND ----------
UGFQNBEQJETM
PUPRVDQIOHSKMQPCGUNVESHCJHXEIFWUQSSWSEQKNNTNTRKRZMGONRPFCVLHTPHBXYLRHZFAIGHWOLLWFDZNMEUGIWAKGTAVBKZFUAQLEGNUKNDZBMSOQSLCDALHWSQO
IPFRYPASTQSOMGKIAEUMKUMOCUVDHIVXZUOXHYOUQNZOLJSMRJDCMJTPLRHWDOKLBBXNBCTLUSFYRRHZDCASUGABWYSQ
UQAVLZHFFQGREDQGYLLDKMRWGIKJHXTGBIAVZDZSXLFBNERWVEKHOMZAGGXWWNAGGYGIESTGFCNWGZKXZWICBDCWXYQDABJSDCOEN
QWQQEHTLBUKHKBMGSNSJIAIMEXKQBVECIGTODUHRROXAIMVKIQXBBFICPJAVMYVPZVBLSMDBYTFHNAMXNITSIMHFQNBIPYAOLR
GHUYEXMAQAHQFFYPWBUBRHJVKXAFDGVHXBYXPZLLTKQHWXIHIDAPURJUFJRDIIDEMMXOZSSWHLGQRTRFWHJMMDZECZRBCF
G
# COMMAND ----------
HLYXINLAZVEFIXCTTQNFUVRS
# COMMAND ----------
TTXHRRLOCWDLVNKZRCVYWBLCAOTMQCDWHXEUCNSBCOKEM
UYQEGQGRHRAEDNYXMPSRZETETIVYAN
RSINMZPJMBPZSJMEAEZLKHAKSHDWUFVBFAXM
UIDJIHTYSNFGCQEHGBAETBNXDTHDOQXKNHCBPT
KRUNMFOIWPIPZUMRGXYSXJPRPRQBXANWXYYZZVN
# COMMAND ----------
KXOYFKLPJZVZENIQOONHWZLDRJ |
py | b41580c3f61b0a4ae2827e30ce3abe6e140dabd9 | from flask import Flask, render_template, url_for, jsonify
from fhir_parser import FHIR
import datetime, time
app = Flask(__name__)
@app.route('/')
def index():
fhir = FHIR()
patients = fhir.get_all_patients()
return render_template('patients.html', patients=patients)
@app.route('/patient/<string:id>')
def patient(id):
fhir = FHIR()
patient = fhir.get_patient(id)
observations = fhir.get_patient_observations(id)
return render_template('patientinfo.html', observations=observations, patient=patient)
@app.route('/statistics/<string:id>')
def getstatistics(id):
fhir = FHIR()
patient = fhir.get_patient(id)
observations = fhir.get_patient_observations(id)
bp_dict = {
"date": [],
"dbp": [],
"sbp": []
}
hr_dict = {
"date": [],
"hr": []
}
rr_dict = {
"date": [],
"rr": []
}
bw_dict = {
"date": [],
"bw": []
}
bh_dict = {
"date": [],
"bh": []
}
bmi_dict = {
"date": [],
"bmi": []
}
bmip_dict = {
"date": [],
"bmip": []
}
ps_dict = {
"date": [],
"ps": []
}
temp_bp_dates = []
bp_observations = []
temp_hr_dates = []
hr_observations = []
temp_rr_dates = []
rr_observations = []
temp_bw_dates = []
bw_observations = []
temp_bh_dates = []
bh_observations = []
temp_bmi_dates = []
bmi_observations = []
temp_bmip_dates = []
bmip_observations = []
temp_ps_dates = []
ps_observations = []
for observation in observations:
if observation.components[0].display == "Blood Pressure":
bp_observations.append(observation)
temp_bp_dates.append(observation.effective_datetime)
if observation.components[0].display == "Heart rate":
hr_observations.append(observation)
temp_hr_dates.append(observation.effective_datetime)
if observation.components[0].display == "Respiratory rate":
rr_observations.append(observation)
temp_rr_dates.append(observation.effective_datetime)
if observation.components[0].display == "Body Weight":
bw_observations.append(observation)
temp_bw_dates.append(observation.effective_datetime)
if observation.components[0].display == "Body Height":
bh_observations.append(observation)
temp_bh_dates.append(observation.effective_datetime)
if observation.components[0].display == "Body Mass Index":
bmi_observations.append(observation)
temp_bmi_dates.append(observation.effective_datetime)
if observation.components[0].display == "Body mass index (BMI) [Percentile] Per age and gender":
bmip_observations.append(observation)
temp_bmip_dates.append(observation.effective_datetime)
if observation.components[0].display == "Pain severity - 0-10 verbal numeric rating [Score] - Reported":
ps_observations.append(observation)
temp_ps_dates.append(observation.effective_datetime)
temp_hr_dates.sort()
temp_bp_dates.sort()
temp_rr_dates.sort()
temp_bw_dates.sort()
temp_bh_dates.sort()
temp_bmi_dates.sort()
temp_bmip_dates.sort()
temp_ps_dates.sort()
for i in range(0, len(temp_bp_dates)):
for observation in bp_observations:
if temp_bp_dates[i] == observation.effective_datetime:
bp_dict["date"].append(int(time.mktime(temp_bp_dates[i].timetuple())) * 1000)
bp_dict["dbp"].append(observation.components[1].value)
bp_dict["sbp"].append(observation.components[2].value)
break
for i in range(0, len(temp_hr_dates)):
for observation in hr_observations:
if temp_hr_dates[i] == observation.effective_datetime:
hr_dict["date"].append(int(time.mktime(temp_hr_dates[i].timetuple())) * 1000)
hr_dict["hr"].append(observation.components[0].value)
break
for i in range(0, len(temp_rr_dates)):
for observation in rr_observations:
if temp_rr_dates[i] == observation.effective_datetime:
rr_dict["date"].append(int(time.mktime(temp_rr_dates[i].timetuple())) * 1000)
rr_dict["rr"].append(observation.components[0].value)
break
for i in range(0, len(temp_bw_dates)):
for observation in bw_observations:
if temp_bw_dates[i] == observation.effective_datetime:
bw_dict["date"].append(int(time.mktime(temp_bw_dates[i].timetuple())) * 1000)
bw_dict["bw"].append(observation.components[0].value)
break
for i in range(0, len(temp_bh_dates)):
for observation in bh_observations:
if temp_bh_dates[i] == observation.effective_datetime:
bh_dict["date"].append(int(time.mktime(temp_bh_dates[i].timetuple())) * 1000)
bh_dict["bh"].append(observation.components[0].value)
break
for i in range(0, len(temp_bmi_dates)):
for observation in bmi_observations:
if temp_bmi_dates[i] == observation.effective_datetime:
bmi_dict["date"].append(int(time.mktime(temp_bmi_dates[i].timetuple())) * 1000)
bmi_dict["bmi"].append(observation.components[0].value)
break
for i in range(0, len(temp_bmip_dates)):
for observation in bmip_observations:
if temp_bmip_dates[i] == observation.effective_datetime:
bmip_dict["date"].append(int(time.mktime(temp_bmip_dates[i].timetuple())) * 1000)
bmip_dict["bmip"].append(observation.components[0].value)
break
for i in range(0, len(temp_ps_dates)):
for observation in ps_observations:
if temp_ps_dates[i] == observation.effective_datetime:
ps_dict["date"].append(int(time.mktime(temp_ps_dates[i].timetuple())) * 1000)
ps_dict["ps"].append(observation.components[0].value)
break
return render_template('statistics.html', patient=patient, bp_dict=bp_dict, hr_dict=hr_dict, rr_dict=rr_dict, bw_dict=bw_dict, bh_dict=bh_dict, bmi_dict=bmi_dict, bmip_dict=bmip_dict, ps_dict=ps_dict)
@app.route('/averageage')
def getaverageage():
fhir = FHIR()
patients = fhir.get_all_patients()
ages = []
for patient in patients:
ages.append(patient.age())
return str(sum(ages)/len(ages))
@app.route('/observationstats')
def observationstats():
fhir = FHIR()
patients = fhir.get_all_patients()
observations = []
for patient in patients:
observations.extend(fhir.get_patient_observations(patient.uuid))
total_obsv = len(observations)
observation_types = [observation.type for observation in observations]
most_frequent_observation_type = max(set(observation_types), key=observation_types.count)
observation_components = []
for observation in observations:
observation_components.extend(observation.components)
total_obsv_components = len(observation_components)
observation_component_types = [observation_component.display for observation_component in observation_components]
most_frequent_observation_component_type = max(set(observation_types), key=observation_types.count)
obsvstats_dictionary = {
"totalObsv": total_obsv,
"mfObservationType": most_frequent_observation_type,
"totalObsvComp": total_obsv_components,
"mfObsvCompType": most_frequent_observation_component_type
}
response = jsonify(obsvstats_dictionary)
return response
if __name__ == "__main__":
app.run(debug=True) |
py | b41580ca6fdfe7ea3ee9344d16372feeea08017c | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import json
import logging
import os
from typing import Optional
from argparse import Namespace
from omegaconf import II
import numpy as np
import torch
from fairseq import metrics, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
DenoisingDataset,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.data.language_pair_dataset import collate
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_denoise_dataset(dataset, src_dict, args, tgt_dict=None):
mask_idx = src_dict.add_symbol('<mask>')
mask_whole_words = get_whole_word_mask(args, src_dict)
dataset = DenoisingDataset(
dataset, dataset.sizes, src_dict, mask_idx,
mask_whole_words, shuffle=False,
seed=args.seed, args=args, tgt_dict=tgt_dict,
)
logger.info(
">>> denoise Split, Loaded {0} samples of denoising_dataset".format(len(dataset))
)
return dataset
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
args=None,
xlmr_task = None,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 4,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = max(1, upsample_primary)
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if xlmr_task is not None and xlmr_task != 'vanilla' and 'mbart_ft' not in xlmr_task:
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
logger.info('Now add BOS to src dataset for XLM-R mtBERT.')
elif xlmr_task is not None and (xlmr_task == 'vanilla' or 'mbart_ft' in xlmr_task):
logger.info('>>> No need to prepend BOS for vanilla or mBART model.')
else:
pass
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
args=args,
)
@dataclass
class TranslationConfig(FairseqDataclass):
data: Optional[str] = field(
default=None,
metadata={
"help": "colon separated path to data directories list, will be iterated upon during epochs "
"in round-robin manner; however, valid and test data are always in the first directory "
"to avoid the need for repeating them in all directories"
},
)
source_lang: Optional[str] = field(
default=None,
metadata={
"help": "source language",
"argparse_alias": "-s",
},
)
target_lang: Optional[str] = field(
default=None,
metadata={
"help": "target language",
"argparse_alias": "-t",
},
)
load_alignments: bool = field(
default=False, metadata={"help": "load the binarized alignments"}
)
left_pad_source: bool = field(
default=True, metadata={"help": "pad the source on the left"}
)
left_pad_target: bool = field(
default=False, metadata={"help": "pad the target on the left"}
)
max_source_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
upsample_primary: int = field(
default=-1, metadata={"help": "the amount of upsample primary dataset"}
)
truncate_source: bool = field(
default=False, metadata={"help": "truncate source to max-source-positions"}
)
num_batch_buckets: int = field(
default=0,
metadata={
"help": "if >0, then bucket source and target lengths into "
"N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations"
},
)
train_subset: str = II("dataset.train_subset")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
# options for reporting BLEU during validation
eval_bleu: bool = field(
default=False, metadata={"help": "evaluation with BLEU scores"}
)
eval_bleu_args: Optional[str] = field(
default="{}",
metadata={
"help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string'
},
)
eval_bleu_detok: str = field(
default="space",
metadata={
"help": "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; "
"use 'space' to disable detokenization; see fairseq.data.encoders for other options"
},
)
eval_bleu_detok_args: Optional[str] = field(
default="{}",
metadata={"help": "args for building the tokenizer, if needed, as JSON string"},
)
eval_tokenized_bleu: bool = field(
default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
)
eval_bleu_remove_bpe: Optional[str] = field(
default=None,
metadata={
"help": "remove BPE before computing BLEU",
"argparse_const": "@@ ",
},
)
eval_bleu_print_samples: bool = field(
default=False, metadata={"help": "print sample generations during validation"}
)
langs: Optional[str] = field(
default=None,
metadata={"help": "args for building the tokenizer, if needed, as JSON string"},
)
@register_task("translation", dataclass=TranslationConfig)
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
"""
cfg: TranslationConfig
def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):
super().__init__(cfg)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.args = cfg
self.cfg = cfg.task
self.xlmr_task = cfg.dataset.xlmr_task
if all([x not in cfg.dataset.xlmr_task for x in ['vanilla', 'mbart']]):
self.src_dict.add_symbol('<mask>')
if getattr(cfg.model, 'share_all_embeddings', False):
self.tgt_dict.add_symbol('<mask>')
@classmethod
def setup_task(cls, cfg: TranslationConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
cfg_all = cfg
cfg = cfg.task
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
# find language pair automatically
if cfg.source_lang is None or cfg.target_lang is None:
cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0])
if cfg.source_lang is None or cfg.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(cfg.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(cfg.target_lang, len(tgt_dict)))
return cls(cfg_all, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
if split != self.cfg.train_subset:
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
load_alignments=self.cfg.load_alignments,
truncate_source=self.cfg.truncate_source,
num_buckets=self.cfg.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.cfg.required_seq_len_multiple,
xlmr_task = self.xlmr_task,
args=self.args,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, cfg):
model = super().build_model(cfg)
if self.cfg.eval_bleu:
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
)
gen_args = json.loads(self.cfg.eval_bleu_args)
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args), cfg=self.args,
)
return model
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False, model_ref=None,
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.cfg.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_bleu:
def sum_logs(key):
import torch
result = sum(log.get(key, 0) for log in logging_outputs)
if torch.is_tensor(result):
result = result.cpu()
return result
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model, use_for_BT=False):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.cfg.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.cfg.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if use_for_BT:
return hyps, refs
if self.cfg.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
py | b41581ef5eaa25eb64796290e7b6888aaad8cef7 | """
This is the main file that runs the OpenEEW code package
"""
from threading import Thread
from params import params
from src import data_holders, receive_traces, save_temp, dump_aws_ibm
__author__ = "Vaclav Kuna"
__copyright__ = ""
__license__ = ""
__version__ = "1.0"
__maintainer__ = "Vaclav Kuna"
__email__ = "[email protected]"
__status__ = ""
def main():
"""Does everything"""
# Create a RawData DataFrame.
traces = data_holders.Traces()
todo = data_holders.ToDo()
# We create and start our traces update worker
stream = receive_traces.DataReceiver(df_holder=traces, params=params)
receive_data_process = Thread(target=stream.run)
receive_data_process.start()
# We create and start temporary save
compute = save_temp.SaveTemp(traces=traces, todo=todo, params=params)
tmp_process = Thread(target=compute.run)
tmp_process.start()
# We create and start dumper to AWS and IBM
dump = dump_aws_ibm.Dump(todo=todo, params=params)
dump_process = Thread(target=dump.run)
dump_process.start()
# We join our Threads, i.e. we wait for them to finish before continuing
receive_data_process.join()
tmp_process.join()
dump_process.join()
if __name__ == "__main__":
main()
|
py | b415821abe74bdc1509a6a4071eae8382da18f93 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸 (Blueking) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import django_celery_beat
from celery.task import periodic_task, task
from django.utils import timezone
from apps.gsekit.job.models import Job
from apps.gsekit.periodic_tasks.utils import calculate_countdown
from apps.gsekit.process.handlers.process import ProcessHandler
from common.log import logger
@task(ignore_result=True)
def sync_biz_process_task(bk_biz_id):
ProcessHandler(bk_biz_id=bk_biz_id).sync_biz_process()
@periodic_task(run_every=django_celery_beat.tzcrontab.TzAwareCrontab(minute="*/10", tz=timezone.get_current_timezone()))
def sync_process(bk_biz_id=None):
if bk_biz_id:
bk_biz_id_list = [bk_biz_id]
else:
bk_biz_id_list = [job["bk_biz_id"] for job in Job.objects.values("bk_biz_id").order_by().distinct()]
count = len(bk_biz_id_list)
for index, biz_id in enumerate(bk_biz_id_list):
logger.info(f"[sync_process] start, bk_biz_id={biz_id}")
countdown = calculate_countdown(count, index)
sync_biz_process_task.apply_async((biz_id,), countdown=countdown)
# TODO 由于GSE接口存在延迟,此处暂停同步状态的周期任务,待GSE优化后再开启
# ProcessHandler(bk_biz_id=biz_id).sync_proc_status_to_db()
logger.info(f"[sync_process] bk_biz_id={biz_id} will be run after {countdown} seconds.")
|
py | b41583710403fb821233cce8bfb6bb8d552ed577 | class BaseParam(object):
@classmethod
def _default_values(cls):
data = {key: cls.__dict__[key] for key in cls.__dict__.keys() if not key.startswith('_')}
return data
def __str__(self):
inKeys = set([key for key in self.__dict__.keys() if not key.startswith('__')])
clzKeys = set([key for key in self.__class__.__dict__.keys() if not key.startswith('__')])
keys = inKeys.union(clzKeys)
out = ''
for key in keys:
if key in self.__dict__:
out += '%s:%s\n' % (key, self.__dict__[key])
else:
out += '%s:%s\n' % (key, self.__class__.__dict__[key])
return out |
py | b4158444f45d64d0622fbec1cabf3bf1dcb245e6 | from __future__ import absolute_import
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shazam.settings')
from django.conf import settings
app = Celery('shazam')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks() #Nota 3
app.conf.update(
BROKER_URL = 'amqp://localhost',
) |
py | b41584fa96e7523e5726b598a1683bb60e15a285 | # The MIT License (MIT)
#
# Copyright (c) 2020 ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import ephem
from geopy.distance import great_circle
def distance_m_between_satellites(sat1, sat2, epoch_str, date_str):
"""
Computes the straight distance between two satellites in meters.
:param sat1: The first satellite
:param sat2: The other satellite
:param epoch_str: Epoch time of the observer (string)
:param date_str: The time instant when the distance should be measured (string)
:return: The distance between the satellites in meters
"""
# Create an observer somewhere on the planet
observer = ephem.Observer()
observer.epoch = epoch_str
observer.date = date_str
observer.lat = 0
observer.lon = 0
observer.elevation = 0
# Calculate the relative location of the satellites to this observer
sat1.compute(observer)
sat2.compute(observer)
# Calculate the angle observed by the observer to the satellites (this is done because the .compute() calls earlier)
angle_radians = float(repr(ephem.separation(sat1, sat2)))
# Now we have a triangle with three knows:
# (1) a = sat1.range (distance observer to satellite 1)
# (2) b = sat2.range (distance observer to satellite 2)
# (3) C = angle (the angle at the observer point within the triangle)
#
# Using the formula:
# c^2 = a^2 + b^2 - 2 * a * b * cos(C)
#
# This gives us side c, the distance between the two satellites
return math.sqrt(sat1.range ** 2 + sat2.range ** 2 - (2 * sat1.range * sat2.range * math.cos(angle_radians)))
def distance_m_ground_station_to_satellite(ground_station, satellite, epoch_str, date_str):
"""
Computes the straight distance between a ground station and a satellite in meters
:param ground_station: The ground station
:param satellite: The satellite
:param epoch_str: Epoch time of the observer (ground station) (string)
:param date_str: The time instant when the distance should be measured (string)
:return: The distance between the ground station and the satellite in meters
"""
# Create an observer on the planet where the ground station is
observer = ephem.Observer()
observer.epoch = epoch_str
observer.date = date_str
observer.lat = str(ground_station["latitude_degrees_str"]) # Very important: string argument is in degrees.
observer.lon = str(ground_station["longitude_degrees_str"]) # DO NOT pass a float as it is interpreted as radians
observer.elevation = ground_station["elevation_m_float"]
# Compute distance from satellite to observer
satellite.compute(observer)
# Return distance
return satellite.range
def geodesic_distance_m_between_ground_stations(ground_station_1, ground_station_2):
"""
Calculate the geodesic distance between two ground stations.
:param ground_station_1: First ground station
:param ground_station_2: Another ground station
:return: Geodesic distance in meters
"""
# WGS72 value; taken from https://geographiclib.sourceforge.io/html/NET/NETGeographicLib_8h_source.html
earth_radius_km = 6378.135 # 6378135.0 meters
return great_circle(
(float(ground_station_1["latitude_degrees_str"]), float(ground_station_1["longitude_degrees_str"])),
(float(ground_station_2["latitude_degrees_str"]), float(ground_station_2["longitude_degrees_str"])),
radius=earth_radius_km
).m
def straight_distance_m_between_ground_stations(ground_station_1, ground_station_2):
"""
Calculate the straight distance between two ground stations (goes through the Earth)
:param ground_station_1: First ground station
:param ground_station_2: Another ground station
:return: Straight distance in meters (goes through the Earth)
"""
# WGS72 value; taken from https://geographiclib.sourceforge.io/html/NET/NETGeographicLib_8h_source.html
earth_radius_m = 6378135.0
# First get the angle between the two ground stations from the Earth's core
fraction_of_earth_circumference = geodesic_distance_m_between_ground_stations(
ground_station_1,
ground_station_2
) / (earth_radius_m * 2.0 * math.pi)
angle_radians = fraction_of_earth_circumference * 2 * math.pi
# Now see the Earth as a circle you know the hypotenuse, and half the angle is that of the triangle
# with the 90 degree corner. Multiply by two to get the straight distance.
polygon_side_m = 2 * math.sin(angle_radians / 2.0) * earth_radius_m
return polygon_side_m
def create_basic_ground_station_for_satellite_shadow(satellite, epoch_str, date_str):
"""
Calculate the (latitude, longitude) of the satellite shadow on the Earth and creates a ground station there.
:param satellite: Satellite
:param epoch_str: Epoch (string)
:param date_str: Time moment (string)
:return: Basic ground station
"""
satellite.compute(date_str, epoch=epoch_str)
return {
"gid": -1,
"name": "Shadow of " + satellite.name,
"latitude_degrees_str": str(math.degrees(satellite.sublat)),
"longitude_degrees_str": str(math.degrees(satellite.sublong)),
"elevation_m_float": 0,
}
def geodetic2cartesian(lat_degrees, lon_degrees, ele_m):
"""
Compute geodetic coordinates (latitude, longitude, elevation) to Cartesian coordinates.
:param lat_degrees: Latitude in degrees (float)
:param lon_degrees: Longitude in degrees (float)
:param ele_m: Elevation in meters
:return: Cartesian coordinate as 3-tuple of (x, y, z)
"""
#
# Adapted from: https://github.com/andykee/pygeodesy/blob/master/pygeodesy/transform.py
#
# WGS72 value,
# Source: https://geographiclib.sourceforge.io/html/NET/NETGeographicLib_8h_source.html
a = 6378135.0
# Ellipsoid flattening factor; WGS72 value
# Taken from https://geographiclib.sourceforge.io/html/NET/NETGeographicLib_8h_source.html
f = 1.0 / 298.26
# First numerical eccentricity of ellipsoid
e = math.sqrt(2.0 * f - f * f)
lat = lat_degrees * (math.pi / 180.0)
lon = lon_degrees * (math.pi / 180.0)
# Radius of curvature in the prime vertical of the surface of the geodetic ellipsoid
v = a / math.sqrt(1.0 - e * e * math.sin(lat) * math.sin(lat))
x = (v + ele_m) * math.cos(lat) * math.cos(lon)
y = (v + ele_m) * math.cos(lat) * math.sin(lon)
z = (v * (1.0 - e * e) + ele_m) * math.sin(lat)
return x, y, z
|
py | b415852756424ce738d49500a37d82f6501972da | # Copyright 2018 Digital Domain 3.0
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from qt_py_convert._modules.from_imports.process import process
|
py | b415863e47eadf1536b72b52a3309308e587f28d | class Vector(object):
def __init__(self, **kwargs):
self._roll = kwargs.get('roll', 0)
self._pitch = kwargs.get('pitch', 0)
self._yaw = kwargs.get('yaw', 0)
self._vertical_movement = kwargs.get('vertical_movement', 0)
self.max_roll = 100
self.max_pitch = 100
self.max_yaw = 100
self.max_vertical_movement = 100
self.roll_damper = 0.75
@staticmethod
def round(value):
return int(round(value, 0))
def reset(self):
for k, _ in self.emit().items():
setattr(self, f'_{k}', 0)
def set_roll(self, roll):
self._roll = roll
def set_pitch(self, pitch):
self._pitch = pitch
def set_yaw(self, yaw):
self._yaw = yaw
def set_vertical_movement(self, vertical_movement):
self._vertical_movement = vertical_movement
def emit(self):
values = {
'roll': self.round((self._roll * self.max_roll) * self.roll_damper),
'pitch': self.round(self._pitch * self.max_pitch),
'yaw': self.round(self._yaw * self.max_yaw),
'vertical_movement': self.round(self._vertical_movement * self.max_vertical_movement)
}
return values
def compare(self, vector):
if not isinstance(vector, Vector):
return False
vector_values = vector.emit()
for k, v in self.emit().items():
if v != vector_values[k]:
return False
return True
def copy(self):
return Vector(**self.emit())
def set(self, key, value):
if key in self.emit():
setattr(self, f'_{key}', value)
def get(self, key):
try:
return self.emit().get(key)
except KeyError:
return 0
|
py | b41587af64ab30e33ac24d49a4402fb162d00c34 | """
sentry.models.release
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import re
from django.db import models, IntegrityError, transaction
from django.utils import timezone
from jsonfield import JSONField
from sentry.app import locks
from sentry.db.models import (
BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5_text
from sentry.utils.retries import TimedRetryPolicy
_sha1_re = re.compile(r'^[a-f0-9]{40}$')
class ReleaseProject(Model):
__core__ = False
project = FlexibleForeignKey('sentry.Project')
release = FlexibleForeignKey('sentry.Release')
new_groups = BoundedPositiveIntegerField(null=True, default=0)
class Meta:
app_label = 'sentry'
db_table = 'sentry_release_project'
unique_together = (('project', 'release'),)
class Release(Model):
"""
A release is generally created when a new version is pushed into a
production state.
"""
__core__ = False
organization = FlexibleForeignKey('sentry.Organization')
projects = models.ManyToManyField('sentry.Project', related_name='releases',
through=ReleaseProject)
project_id = BoundedPositiveIntegerField(null=True)
version = models.CharField(max_length=64)
# ref might be the branch name being released
ref = models.CharField(max_length=64, null=True, blank=True)
url = models.URLField(null=True, blank=True)
date_added = models.DateTimeField(default=timezone.now)
date_started = models.DateTimeField(null=True, blank=True)
date_released = models.DateTimeField(null=True, blank=True)
# arbitrary data recorded with the release
data = JSONField(default={})
new_groups = BoundedPositiveIntegerField(default=0)
# generally the release manager, or the person initiating the process
owner = FlexibleForeignKey('sentry.User', null=True, blank=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_release'
unique_together = (('project_id', 'version'),)
__repr__ = sane_repr('project_id', 'version')
@classmethod
def get_cache_key(cls, project_id, version):
# TODO(jess): update this to use organization id when adding
# unique on Release for organization, version
return 'release:2:%s:%s' % (project_id, md5_text(version).hexdigest())
@classmethod
def get(cls, project, version):
cache_key = cls.get_cache_key(project.id, version)
release = cache.get(cache_key)
if release is None:
try:
release = cls.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except cls.DoesNotExist:
release = -1
cache.set(cache_key, release, 300)
if release == -1:
return
return release
@classmethod
def get_lock_key(cls, organization_id, version):
return 'release:%s:%s' % (organization_id, md5_text(version).hexdigest())
@classmethod
def get_or_create(cls, project, version, date_added):
cache_key = cls.get_cache_key(project.id, version)
release = cache.get(cache_key)
if release in (None, -1):
# TODO(dcramer): if the cache result is -1 we could attempt a
# default create here instead of default get
project_version = ('%s-%s' % (project.slug, version))[:64]
releases = list(cls.objects.filter(
organization_id=project.organization_id,
version__in=[version, project_version],
projects=project
))
if len(releases) == 1:
release = releases[0]
elif len(releases) > 1:
release = [r for r in releases if r.version == project_version][0]
else:
release = cls.objects.filter(
organization_id=project.organization_id,
version=version
).first()
if not release:
lock_key = cls.get_lock_key(project.organization_id, version)
lock = locks.get(lock_key, duration=5)
with TimedRetryPolicy(10)(lock.acquire):
try:
release = cls.objects.get(
organization_id=project.organization_id,
version=version
)
except cls.DoesNotExist:
release = cls.objects.create(
organization_id=project.organization_id,
version=version,
date_added=date_added
)
release.add_project(project)
# TODO(dcramer): upon creating a new release, check if it should be
# the new "latest release" for this project
cache.set(cache_key, release, 3600)
return release
@classmethod
def merge(cls, to_release, from_releases):
# The following models reference release:
# ReleaseCommit.release
# ReleaseEnvironment.release_id
# ReleaseProject.release
# GroupRelease.release_id
# GroupResolution.release
# Group.first_release
# ReleaseFile.release
from sentry.models import (
ReleaseCommit, ReleaseEnvironment, ReleaseFile, ReleaseProject,
Group, GroupRelease, GroupResolution
)
model_list = (
ReleaseCommit, ReleaseEnvironment, ReleaseFile, ReleaseProject,
GroupRelease, GroupResolution
)
for release in from_releases:
for model in model_list:
if hasattr(model, 'release'):
update_kwargs = {'release': to_release}
else:
update_kwargs = {'release_id': to_release.id}
try:
with transaction.atomic():
model.objects.filter(
release_id=release.id
).update(**update_kwargs)
except IntegrityError:
for item in model.objects.filter(release_id=release.id):
try:
with transaction.atomic():
model.objects.filter(
id=item.id
).update(**update_kwargs)
except IntegrityError:
item.delete()
Group.objects.filter(
first_release=release
).update(first_release=to_release)
release.delete()
@property
def short_version(self):
if _sha1_re.match(self.version):
return self.version[:12]
return self.version
def add_project(self, project):
try:
with transaction.atomic():
ReleaseProject.objects.create(project=project, release=self)
except IntegrityError:
pass
|
py | b415894a3b41a8474ffbe2f50a504a4af5b732f1 | # -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
from tsfresh.feature_extraction import extract_features
from tsfresh.utilities.dataframe_functions import restrict_input_to_index
class FeatureAugmenter(BaseEstimator, TransformerMixin):
"""
Sklearn-compatible estimator, for calculating and adding many features calculated from a given time series
to the data. Is is basically a wrapper around :func:`~tsfresh.feature_extraction.extract_features`.
The features include basic ones like min, max or median, and advanced features like fourier
transformations or statistical tests. For a list of all possible features, see the module
:mod:`~tsfresh.feature_extraction.feature_calculators`. The column name of each added feature contains the name
of the function of that module, which was used for the calculation.
For this estimator, two datasets play a crucial role:
1. the time series container with the timeseries data. This container (for the format see :ref:`data-formats-label`)
contains the data which is used for calculating the
features. It must be groupable by ids which are used to identify which feature should be attached to which row
in the second dataframe:
2. the input data, where the features will be added to.
Imagine the following situation: You want to classify 10 different financial shares and you have their development
in the last year as a time series. You would then start by creating features from the metainformation of the
shares, e.g. how long they were on the market etc. and filling up a table - the features of one stock in one row.
>>> df = pandas.DataFrame()
>>> # Fill in the information of the stocks
>>> df["started_since_days"] = 0 # add a feature
You can then extract all the features from the time development of the shares, by using this estimator:
>>> time_series = read_in_timeseries() # get the development of the shares
>>> from tsfresh.transformers import FeatureAugmenter
>>> augmenter = FeatureAugmenter()
>>> augmenter.set_timeseries_container(time_series)
>>> df_with_time_series_features = augmenter.transform(df)
The settings for the feature calculation can be controlled with the settings object. If you pass ``None``, the default
settings are used. Please refer to :class:`~tsfresh.feature_extraction.settings.FeatureExtractionSettings` for
more information.
This estimator does not select the relevant features, but calculates and adds all of them to the DataFrame. See the
:class:`~tsfresh.transformers.relevant_feature_augmenter.RelevantFeatureAugmenter` for calculating and selecting
features.
For a description what the parameters column_id, column_sort, column_kind and column_value mean, please see
:mod:`~tsfresh.feature_extraction.extraction`.
"""
def __init__(self, settings=None, column_id=None, column_sort=None,
column_kind=None, column_value=None, timeseries_container=None):
"""
Create a new FeatureAugmenter instance.
:param settings: The extraction settings to use. Leave empty to use the default ones.
:type settings: tsfresh.feature_extraction.settings.FeatureExtractionSettings
:param column_id: The column with the id. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_id: basestring
:param column_sort: The column with the sort data. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_sort: basestring
:param column_kind: The column with the kind data. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_kind: basestring
:param column_value: The column with the values. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_value: basestring
"""
self.settings = settings
self.column_id = column_id
self.column_sort = column_sort
self.column_kind = column_kind
self.column_value = column_value
self.timeseries_container = timeseries_container
def set_timeseries_container(self, timeseries_container):
"""
Set the timeseries, with which the features will be calculated. For a format of the time series container,
please refer to :mod:`~tsfresh.feature_extraction.extraction`. The timeseries must contain the same indices
as the later DataFrame, to which the features will be added (the one you will pass to :func:`~transform`). You
can call this function as often as you like, to change the timeseries later (e.g. if you want to extract for
different ids).
:param timeseries_container: The timeseries as a pandas.DataFrame or a dict. See
:mod:`~tsfresh.feature_extraction.extraction` for the format.
:type timeseries_container: pandas.DataFrame or dict
:return: None
:rtype: None
"""
self.timeseries_container = timeseries_container
def fit(self, X=None, y=None):
"""
The fit function is not needed for this estimator. It just does nothing and is here for compatibility reasons.
:param X: Unneeded.
:type X: Any
:param y: Unneeded.
:type y: Any
:return: The estimator instance itself
:rtype: FeatureAugmenter
"""
return self
def transform(self, X):
"""
Add the features calculated using the timeseries_container and add them to the corresponding rows in the input
pandas.DataFrame X.
To save some computing time, you should only include those time serieses in the container that you
need. You can set the timeseries container with the method :func:`set_timeseries_container`.
:param X: the DataFrame to which the calculated timeseries features will be added. This is *not* the
dataframe with the timeseries itself.
:type X: pandas.DataFrame
:return: The input DataFrame, but with added features.
:rtype: pandas.DataFrame
"""
if self.timeseries_container is None:
raise RuntimeError("You have to provide a time series using the set_timeseries_container function before.")
# Extract only features for the IDs in X.index
timeseries_container_X = restrict_input_to_index(self.timeseries_container, self.column_id, X.index)
extracted_features = extract_features(timeseries_container_X,
feature_extraction_settings=self.settings,
column_id=self.column_id, column_sort=self.column_sort,
column_kind=self.column_kind, column_value=self.column_value)
X = pd.merge(X, extracted_features, left_index=True, right_index=True, how="left")
return X
|
py | b415897c09db72fb6ad0141e4b80d2ad81b6add2 | import numpy as np
def seaoverland(data, iterations=1, copy=False):
"""
Python implementation of G. Girardi's seaoverland.f90.
author: E.Jansen
Extends grids defined only at sea onto the land such that bilinear
interpolation of the grid is also possible close to the coastline. The
procedure determines the value of an undefined grid point by averaging the
defined neighbour points among the 8 closest neighbours. Every iteration
therefore extends the grid with one additional row of points towards the
coastline.
With copy set to True a copy of the data is returned leaving the original
untouched. Otherwise the data is modified in place and returned.
Parameters
----------
data : numpy.ma.masked_array
Grid (2 dimensions) to be extrapolated.
iterations : int, optional, default: 1
Number of iterations (i.e. extent of extrapolation).
copy : boolean, optional, default: False
Create a copy of data instead of modifying it in place.
"""
if copy:
data = np.ma.copy(data)
if not isinstance(data, np.ma.masked_array) or not data.mask.any():
return data
for _ in range(iterations):
shifted = []
ni, nj = data.shape
for i in range(-1, 2):
for j in range(-1, 2):
if i != 0 or j != 0:
# Shift the grid by i horizontally and j vertically and
# append it to the array. Shifted grids are 2 units smaller
# in both dimensions to accomodate the shift.
shifted.append(data[1 + i:ni - 1 + i, 1 + j:nj - 1 + j])
# Calculate the mean value of the shifted grids to obtain the
# approximated values. Only non-masked entries are taken into account.
approx = np.ma.mean(shifted, axis=0)
# Create a view without the outer points (so it is the same size as the
# shifted grids), then copy the approximated values for the cells that
# are masked.
view = data[1:-1, 1:-1]
np.copyto(view, approx, where=(view.mask & ~approx.mask))
# Combine the two masks, unmasking values that were masked in view but
# have been successfully approximated.
view.mask &= approx.mask
return data |
py | b415898203fb3d84d8ad7c6a631e0c63c9434b1a | from __future__ import division
from mmtbx.conformation_dependent_library import generate_protein_threes
from scitbx.matrix import rotate_point_around_axis
from mmtbx.validation import ramalyze
import math
from cStringIO import StringIO
from mmtbx.validation.ramalyze import res_types
from scitbx.math import dihedral_angle
# from scitbx.matrix import _dihedral_angle # python implementation, but on flex arrays
import boost.python
ext = boost.python.import_ext("mmtbx_validation_ramachandran_ext")
from mmtbx_validation_ramachandran_ext import rama_eval
def get_phi_psi_atoms(hierarchy, omega=False):
phi_psi_atoms = []
for three in generate_protein_threes(
hierarchy=hierarchy,
geometry=None,
cdl_class=True):
psatoms = three.get_phi_psi_atoms()
if psatoms is not None:
phi_atoms, psi_atoms = psatoms
else:
phi_atoms, psi_atoms = None, None
rama_key = three.get_ramalyze_key()
# print "rama_key", rama_key
if omega:
phi_psi_atoms.append(([phi_atoms, psi_atoms],rama_key, three.get_omega_value()))
else:
phi_psi_atoms.append(([phi_atoms, psi_atoms],rama_key))
return phi_psi_atoms
def list_omega_outliers(hierarchy, log):
pso_atoms = get_phi_psi_atoms(hierarchy, omega=True)
print >> log, "Omega outliers:"
for psatoms, rama_key, omega in pso_atoms:
if omega is not None and abs(abs(omega)-180) > 30:
print >> log, " ", psatoms[0][0].id_str(), omega
def list_omega(hierarchy, log):
pso_atoms = get_phi_psi_atoms(hierarchy, omega=True)
print >> log, "Omega angles:"
for psatoms, rama_key, omega in pso_atoms:
print >> log, " ", psatoms[0][0].id_str(), omega
def n_bad_omegas(hierarchy):
result = 0
pso_atoms = get_phi_psi_atoms(hierarchy, omega=True)
for psatoms, rama_key, omega in pso_atoms:
if omega is not None and abs(abs(omega)-180) > 30:
result += 1
return result
def py_dihedral_angle2(sites, deg=True):
"""
Should not be used anywhere. Algorithm maybe faster that currently available
in c++, needs further investigation.
- experimental, while aware of analogous c++ implementation;
- custom duplication of basic linalg functions is intentional;
- no tests since not used in production and may be removed in future.
"""
def dot_product(a,b):
return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]
def cross_product(a,b):
return (a[1] * b[2] - b[1] * a[2],
a[2] * b[0] - b[2] * a[0],
a[0] * b[1] - b[0] * a[1])
""" sites = [(vec3),(vec3),(vec3),(vec3)]
# supposed to be fast dihedral calculation, taken from here:
# http://stackoverflow.com/a/34245697
# Pure python
Praxeolitic formula
1 sqrt, 1 cross product
2.5 times slower than dihedral_angle in cpp
"""
p0 = sites[0]
p1 = sites[1]
p2 = sites[2]
p3 = sites[3]
b0 = (p0[0]-p1[0], p0[1]-p1[1], p0[2]-p1[2])
b1 = (p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2])
b2 = (p3[0]-p2[0], p3[1]-p2[1], p3[2]-p2[2])
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
# b1 /= np.linalg.norm(b1)
b1_norm = math.sqrt(b1[0]*b1[0]+b1[1]*b1[1]+b1[2]*b1[2])
b1 = (b1[0]/b1_norm, b1[1]/b1_norm, b1[2]/b1_norm)
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
b0_dp_b1 = dot_product(b0, b1)
b2_dp_b1 = dot_product(b2, b1)
v = (b0[0]-b0_dp_b1*b1[0],
b0[1]-b0_dp_b1*b1[1],
b0[2]-b0_dp_b1*b1[2])
w = (b2[0]-b2_dp_b1*b1[0],
b2[1]-b2_dp_b1*b1[1],
b2[2]-b2_dp_b1*b1[2])
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = dot_product(v, w)
b1_cross_v = cross_product(b1, v)
y = dot_product(b1_cross_v, w)
return math.degrees(math.atan2(y, x))
def get_dihedral_angle(atoms, round_coords=False):
# round here is to emulate rounding when dumping to pdb, to get more
# consistent result for rama outliers inside program and when calculating
# from resulted pdb file.
if atoms is None:
return None
sites = []
if round_coords:
for x in atoms:
sites.append((round(x.xyz[0], 3), round(x.xyz[1], 3), round(x.xyz[2], 3)))
else:
sites = [x.xyz for x in atoms]
return dihedral_angle(
sites = sites,
deg=True)
def rama_score_evaluate(resType, value):
return ramalyze.ramalyze.evalScore(resType, value)
def pair_info(phi_psi_pair):
return phi_psi_pair[0][2].id_str()
def list_rama_outliers_h(hierarchy, r=None):
if r is None:
r = rama_eval()
phi_psi_atoms = get_phi_psi_atoms(hierarchy)
outp = list_rama_outliers(phi_psi_atoms, r)
return outp
def pair_selection(phi_psi_pair, margin=1):
resnum = phi_psi_pair[0][2].parent().parent().resseq_as_int()
return "(chain %s and resid %s:%s)" % (phi_psi_pair[0][2].parent().parent().parent().id,
resnum-margin, resnum+margin)
def rama_score_selection(hierarchy, r=None, score="outlier",margin=1):
assert score in ["outlier", "allowed"]
test = ramalyze.RAMALYZE_OUTLIER
if score == "allowed":
test = ramalyze.RAMALYZE_ALLOWED
if r is None:
r = rama_eval()
out_sel = []
phi_psi_atoms = get_phi_psi_atoms(hierarchy)
for phi_psi_pair, rama_key in phi_psi_atoms:
rama_score = get_rama_score(phi_psi_pair, r, rama_key)
if rama_evaluate(phi_psi_pair, r, rama_key) == test:
out_sel.append(pair_selection(phi_psi_pair, margin))
out_sel_txt = " or ".join(out_sel)
return out_sel_txt
def list_rama_outliers(phi_psi_atoms, r):
result = ""
# out_sel = []
for phi_psi_pair, rama_key in phi_psi_atoms:
rama_score = get_rama_score(phi_psi_pair, r, rama_key)
if rama_evaluate(phi_psi_pair, r, rama_key) == ramalyze.RAMALYZE_OUTLIER:
result += " !!! OUTLIER %s, score=%f\n" % (pair_info(phi_psi_pair), rama_score)
# print "%s, %s, %s" % (pair_info(phi_psi_pair), get_rama_score(phi_psi_pair, r, rama_key), ramalyze.res_types[rama_key])
# out_sel.append(pair_selection(phi_psi_pair))
# print_rama_stats(phi_psi_atoms, r)
# out_sel.txt = " or ".join(out_sel)
# print out_sel
return result
def get_rama_score(phi_psi_pair, r, rama_key, round_coords=False):
# phi_psi_angles = get_pair_angles(phi_psi_pair, round_coords=round_coords)
phi_psi_angles = get_pair_angles(phi_psi_pair, round_coords=False)
if phi_psi_angles[0] is None or phi_psi_angles[1] is None:
return None
rama_score = r.get_score(rama_key, phi_psi_angles[0], phi_psi_angles[1])
if round_coords:
return rama_score*0.98
return rama_score
def rama_evaluate(phi_psi_pair, r, rama_key, round_coords=False):
score = get_rama_score(phi_psi_pair, r, rama_key, round_coords=round_coords)
if score is None:
return None
# print " score, rama_key", score, rama_key
return r.evaluate_score(rama_key, score)
def get_pair_angles(phi_psi_pair, round_coords=False):
phi_psi_angles = [0,0]
phi_psi_angles[0] = get_dihedral_angle(phi_psi_pair[0], round_coords=round_coords)
phi_psi_angles[1] = get_dihedral_angle(phi_psi_pair[1], round_coords=round_coords)
return phi_psi_angles
def print_rama_stats(phi_psi_atoms, r):
result = StringIO()
for phi_psi_pair, rama_key in phi_psi_atoms:
for i, atoms in enumerate(phi_psi_pair):
for a in atoms:
print >> result, a.id_str()
rama_score = get_rama_score(phi_psi_pair, r, rama_key)
print >> result, "rama score:", get_pair_angles(phi_psi_pair), rama_score,
print >> result, rama_score_evaluate(rama_key, rama_score), rama_key
print >> result, "="*20
print >> result, "*"*80
r = result.getvalue()
return r
def get_rmsd(fixed_points, moving_points):
rmsd = 0
for fp, mp in zip(fixed_points, moving_points):
rmsd += fp.distance(mp)**2
return math.sqrt(rmsd)
def get_rmsd_xyz_fixed(fixed_points, moving_points):
rmsd = 0
for fp, mp in zip(fixed_points, moving_points):
rmsd += mp.distance(fp)**2
return math.sqrt(rmsd)
def rotate_atoms_around_bond(
moving_h, atom_axis_point_1, atom_axis_point_2, angle, degrees=True,
direction_forward=True):
# changes moving_h
# print "in rotation, iseqs:", atom_axis_point_1.i_seq, atom_axis_point_2.i_seq
#
# find xyz based on i_seqs
rotate_xyz1 = None
rotate_xyz2 = None
if not direction_forward:
angle = -angle
atoms = moving_h.atoms()
for a in atoms:
if a.i_seq == atom_axis_point_1.i_seq:
rotate_xyz1 = a.xyz
elif a.i_seq == atom_axis_point_2.i_seq:
rotate_xyz2 = a.xyz
# rotate stuff
for a in atoms:
if ((direction_forward and a.i_seq > atom_axis_point_1.i_seq) or
(not direction_forward and a.i_seq < atom_axis_point_2.i_seq)):
new_xyz = rotate_point_around_axis(
axis_point_1=rotate_xyz1,
axis_point_2=rotate_xyz2,
point=a.xyz,
angle=angle,
deg=degrees)
# let's round them
# print "actually setting coordinates:", a.i_seq, a.xyz, "->", new_xyz
# a.set_xyz((round(new_xyz[0], 3), round(new_xyz[1], 3), round(new_xyz[2], 3)))
a.set_xyz(new_xyz)
def find_nearest_non_outlier_region(phi_psi_pair, r, rama_key):
def spiral(N, M):
x,y = 0,0
dx, dy = 0, -1
for dumb in xrange(N*M):
if abs(x) == abs(y) and [dx,dy] != [1,0] or x>0 and y == 1-x:
dx, dy = -dy, dx # corner, change direction
if abs(x)>N/2 or abs(y)>M/2: # non-square
dx, dy = -dy, dx # change direction
x, y = -y+dx, x+dy # jump
yield x, y
x, y = x+dx, y+dy
# ==
phi_psi_angles = get_pair_angles(phi_psi_pair)
for dx,dy in spiral(360, 360):
angles = [phi_psi_angles[0]+dx, phi_psi_angles[1]+dy]
if (r.evaluate_angles(res_types[rama_key], angles[0], angles[1]) == \
ramalyze.RAMALYZE_FAVORED):
return angles
|
py | b4158aae72dd97d61f2f14281d25d27ea874cf0c | """xym URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from rest_framework_swagger.views import get_swagger_view
urlpatterns = [
path('admin/', admin.site.urls),
url('^api/', include('grade.urls'), ),
url('^api/auth/', include('flytrap.auth.account.token.urls')),
url('^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url('^docs', get_swagger_view('形意门接口文档'), )
]
|
py | b4158dedf36792837a24b6eff2ac839afe2d981b | # Copyright 2008-2010 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xmlrpclib
import socket
import time
import sys
try:
from xml.parsers.expat import ExpatError
except ImportError:
ExpatError = None # Support for Jython 2.2(.x)
from robot import utils
from robot.errors import RemoteError
class Remote:
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def __init__(self, uri='http://localhost:8270'):
if '://' not in uri:
uri = 'http://' + uri
self._client = XmlRpcRemoteClient(uri)
def get_keyword_names(self, attempts=5):
for i in range(attempts):
try:
return self._client.get_keyword_names()
except TypeError, err:
time.sleep(1)
raise RuntimeError('Connecting remote server failed: %s' % err)
def get_keyword_arguments(self, name):
try:
return self._client.get_keyword_arguments(name)
except TypeError:
return ['*args']
def get_keyword_documentation(self, name):
try:
return self._client.get_keyword_documentation(name)
except TypeError:
return ''
def run_keyword(self, name, args):
args = [ self._handle_argument(arg) for arg in args ]
result = RemoteResult(self._client.run_keyword(name, args))
sys.stdout.write(result.output)
if result.status != 'PASS':
raise RemoteError(result.error, result.traceback)
return result.return_
def _handle_argument(self, arg):
if isinstance(arg, (basestring, int, long, float)):
return arg
if isinstance(arg, (tuple, list)):
return [ self._handle_argument(item) for item in arg ]
if isinstance(arg, dict):
return dict([ (self._str(key), self._handle_argument(value))
for key, value in arg.items() ])
return self._str(arg)
def _str(self, item):
if item is None:
return ''
return utils.unic(item)
class RemoteResult:
def __init__(self, result):
try:
self.status = result['status']
self.output = result.get('output', '')
self.return_ = result.get('return', '')
self.error = result.get('error', '')
self.traceback = result.get('traceback', '')
except (KeyError, AttributeError):
raise RuntimeError('Invalid remote result dictionary: %s' % result)
class XmlRpcRemoteClient:
def __init__(self, uri):
self._server = xmlrpclib.ServerProxy(uri, encoding='UTF-8')
def get_keyword_names(self):
try:
return self._server.get_keyword_names()
except socket.error, (errno, err):
raise TypeError(err)
except xmlrpclib.Error, err:
raise TypeError(err)
def get_keyword_arguments(self, name):
try:
return self._server.get_keyword_arguments(name)
except xmlrpclib.Error:
raise TypeError
def get_keyword_documentation(self, name):
try:
return self._server.get_keyword_documentation(name)
except xmlrpclib.Error:
raise TypeError
def run_keyword(self, name, args):
try:
return self._server.run_keyword(name, args)
except xmlrpclib.Error, err:
raise RuntimeError(err.faultString)
except socket.error, (errno, err):
raise RuntimeError('Connection to remote server broken: %s' % err)
except ExpatError, err:
raise RuntimeError('Processing XML-RPC return value failed. '
'Most often this happens when the return value '
'contains characters that are not valid in XML. '
'Original error was: ExpatError: %s' % err)
|
py | b4158e4f5e740da37bf79abc03af16bda674d011 | # Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db=None):
super(BackupMetadataAPI, self).__init__(db)
self.context = context
self._key_mgr = None
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info("Value with type=%s is not serializable",
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# NOTE(abishop): The backup manager is now responsible for
# ensuring a copy of the volume's encryption key ID is
# retained in case the volume is deleted. Yes, this means
# the backup's volume base metadata now stores the volume's
# original encryption key ID, which affects how things are
# handled when backups are restored. The backup manager
# handles this, too.
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields, excludes=None):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
:param metadata: master set of metadata
:param fields: list of fields we want to extract
:param excludes: fields to be excluded
:returns: filtered metadata
"""
if not fields:
return metadata
if not excludes:
excludes = []
subset = {}
for field in fields:
if field in metadata and field not in excludes:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
excludes = []
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
# NOTE(dosaboy): if the target volume looks like it was auto-created
# as part of this restore operation and we have a name to restore
# then apply the name to the target volume. However, if that target
# volume already existed and it has a name or we do not have a name to
# restore, then ignore this key. This is intended to be a less drastic
# solution than commit 7ee80f7.
key = 'display_name'
if key in fields and key in metadata:
target_vol = self.db.volume_get(self.context, volume_id)
name = target_vol.get(key, '')
if (not metadata.get(key) or name and
not name.startswith('restore_backup_')):
excludes.append(key)
excludes.append('display_description')
metadata = self._filter(metadata, fields, excludes=excludes)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description', 'encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db=None):
super(BackupDriver, self).__init__(db)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def delete_backup(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
and return it here as a dictionary so it can be serialized into a
string.
Default backup driver implementation has no extra information.
:param backup: backup object to export
:returns: driver_info - dictionary with extra information
"""
return {}
def import_record(self, backup, driver_info):
"""Import driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
since it will be called with the extra information that was provided by
export_record when exporting the backup.
Default backup driver implementation does nothing since it didn't
export any specific data in export_record.
:param backup: backup object to export
:param driver_info: dictionary with driver specific backup record
information
:returns: nothing
"""
return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed."""
return
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises InvalidBackup, NotImplementedError:
"""
return
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.